aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig5
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/Kconfig23
-rw-r--r--drivers/acpi/battery.c673
-rw-r--r--drivers/acpi/bay.c19
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/dock.c127
-rw-r--r--drivers/acpi/ec.c247
-rw-r--r--drivers/acpi/event.c153
-rw-r--r--drivers/acpi/events/evgpeblk.c4
-rw-r--r--drivers/acpi/events/evrgnini.c1
-rw-r--r--drivers/acpi/glue.c2
-rw-r--r--drivers/acpi/numa.c31
-rw-r--r--drivers/acpi/osl.c42
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/processor_core.c13
-rw-r--r--drivers/acpi/processor_idle.c24
-rw-r--r--drivers/acpi/processor_throttling.c410
-rw-r--r--drivers/acpi/sbs.c33
-rw-r--r--drivers/acpi/sleep/main.c19
-rw-r--r--drivers/acpi/sleep/poweroff.c41
-rw-r--r--drivers/acpi/system.c165
-rw-r--r--drivers/acpi/tables/tbfadt.c44
-rw-r--r--drivers/acpi/thermal.c24
-rw-r--r--drivers/acpi/utilities/uteval.c17
-rw-r--r--drivers/acpi/video.c124
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/ahci.c255
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libata-core.c153
-rw-r--r--drivers/ata/libata-eh.c300
-rw-r--r--drivers/ata/libata-scsi.c63
-rw-r--r--drivers/ata/libata-sff.c292
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/pata_ali.c34
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5530.c2
-rw-r--r--drivers/ata/pata_hpt3x3.c93
-rw-r--r--drivers/ata/pata_it821x.c3
-rw-r--r--drivers/ata/pata_mpc52xx.c18
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_platform.c5
-rw-r--r--drivers/ata/pata_scc.c63
-rw-r--r--drivers/ata/pata_serverworks.c5
-rw-r--r--drivers/ata/pata_sis.c11
-rw-r--r--drivers/ata/pata_sl82c105.c5
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/sata_inic162x.c16
-rw-r--r--drivers/ata/sata_mv.c1044
-rw-r--r--drivers/ata/sata_nv.c40
-rw-r--r--drivers/ata/sata_promise.c66
-rw-r--r--drivers/ata/sata_qstor.c18
-rw-r--r--drivers/ata/sata_sil.c25
-rw-r--r--drivers/ata/sata_sil24.c139
-rw-r--r--drivers/ata/sata_sis.c24
-rw-r--r--drivers/ata/sata_svw.c13
-rw-r--r--drivers/ata/sata_uli.c18
-rw-r--r--drivers/ata/sata_via.c29
-rw-r--r--drivers/ata/sata_vsc.c13
-rw-r--r--drivers/atm/Kconfig10
-rw-r--r--drivers/atm/ambassador.c4
-rw-r--r--drivers/atm/eni.c28
-rw-r--r--drivers/atm/firestream.c14
-rw-r--r--drivers/atm/idt77252.c19
-rw-r--r--drivers/atm/iphase.c11
-rw-r--r--drivers/atm/lanai.c31
-rw-r--r--drivers/atm/nicstarmac.c2
-rw-r--r--drivers/atm/zatm.c10
-rw-r--r--drivers/auxdisplay/Kconfig8
-rw-r--r--drivers/base/attribute_container.c1
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c24
-rw-r--r--drivers/base/class.c46
-rw-r--r--drivers/base/core.c226
-rw-r--r--drivers/base/dd.c21
-rw-r--r--drivers/base/devres.c2
-rw-r--r--drivers/base/dmapool.c2
-rw-r--r--drivers/base/firmware_class.c6
-rw-r--r--drivers/base/power/Makefile4
-rw-r--r--drivers/base/power/main.c44
-rw-r--r--drivers/base/power/power.h9
-rw-r--r--drivers/base/power/resume.c23
-rw-r--r--drivers/base/power/runtime.c85
-rw-r--r--drivers/base/power/suspend.c72
-rw-r--r--drivers/base/power/sysfs.c66
-rw-r--r--drivers/base/power/trace.c5
-rw-r--r--drivers/base/sys.c24
-rw-r--r--drivers/block/Kconfig35
-rw-r--r--drivers/block/Makefile6
-rw-r--r--drivers/block/acsi_slm.c1032
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cciss_scsi.c75
-rw-r--r--drivers/block/lguest_blk.c275
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/nbd.c22
-rw-r--r--drivers/block/pktcdvd.c7
-rw-r--r--drivers/block/ps3disk.c630
-rw-r--r--drivers/block/sunvdc.c887
-rw-r--r--drivers/block/sx8.c3
-rw-r--r--drivers/block/ub.c14
-rw-r--r--drivers/block/umem.c58
-rw-r--r--drivers/block/viodasd.c4
-rw-r--r--drivers/block/xen-blkfront.c988
-rw-r--r--drivers/block/xsysace.c1164
-rw-r--r--drivers/block/z2ram.c7
-rw-r--r--drivers/bluetooth/hci_usb.c88
-rw-r--r--drivers/bluetooth/hci_usb.h5
-rw-r--r--drivers/bluetooth/hci_vhci.c6
-rw-r--r--drivers/cdrom/cdrom.c3
-rw-r--r--drivers/char/Kconfig82
-rw-r--r--drivers/char/Makefile5
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/amd64-agp.c6
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/amiserial.c10
-rw-r--r--drivers/char/apm-emulation.c12
-rw-r--r--drivers/char/briq_panel.c10
-rw-r--r--drivers/char/consolemap.c78
-rw-r--r--drivers/char/cyclades.c371
-rw-r--r--drivers/char/decserial.c67
-rw-r--r--drivers/char/drm/ati_pcigart.c8
-rw-r--r--drivers/char/drm/drm.h329
-rw-r--r--drivers/char/drm/drmP.h356
-rw-r--r--drivers/char/drm/drm_agpsupport.c116
-rw-r--r--drivers/char/drm/drm_auth.c40
-rw-r--r--drivers/char/drm/drm_bufs.c209
-rw-r--r--drivers/char/drm/drm_context.c226
-rw-r--r--drivers/char/drm/drm_dma.c12
-rw-r--r--drivers/char/drm/drm_drawable.c270
-rw-r--r--drivers/char/drm/drm_drv.c87
-rw-r--r--drivers/char/drm/drm_fops.c68
-rw-r--r--drivers/char/drm/drm_hashtab.c34
-rw-r--r--drivers/char/drm/drm_hashtab.h24
-rw-r--r--drivers/char/drm/drm_ioc32.c82
-rw-r--r--drivers/char/drm/drm_ioctl.c68
-rw-r--r--drivers/char/drm/drm_irq.c58
-rw-r--r--drivers/char/drm/drm_lock.c28
-rw-r--r--drivers/char/drm/drm_memory.c8
-rw-r--r--drivers/char/drm/drm_mm.c66
-rw-r--r--drivers/char/drm/drm_os_linux.h22
-rw-r--r--drivers/char/drm/drm_pci.c6
-rw-r--r--drivers/char/drm/drm_proc.c50
-rw-r--r--drivers/char/drm/drm_sarea.h26
-rw-r--r--drivers/char/drm/drm_scatter.c22
-rw-r--r--drivers/char/drm/drm_sman.c93
-rw-r--r--drivers/char/drm/drm_sman.h50
-rw-r--r--drivers/char/drm/drm_stub.c32
-rw-r--r--drivers/char/drm/drm_sysfs.c4
-rw-r--r--drivers/char/drm/drm_vm.c106
-rw-r--r--drivers/char/drm/i810_dma.c164
-rw-r--r--drivers/char/drm/i810_drm.h2
-rw-r--r--drivers/char/drm/i810_drv.h18
-rw-r--r--drivers/char/drm/i830_dma.c157
-rw-r--r--drivers/char/drm/i830_drm.h2
-rw-r--r--drivers/char/drm/i830_drv.h24
-rw-r--r--drivers/char/drm/i830_irq.c20
-rw-r--r--drivers/char/drm/i915_dma.c44
-rw-r--r--drivers/char/drm/i915_drm.h8
-rw-r--r--drivers/char/drm/i915_drv.h22
-rw-r--r--drivers/char/drm/i915_irq.c28
-rw-r--r--drivers/char/drm/i915_mem.c6
-rw-r--r--drivers/char/drm/mga_dma.c79
-rw-r--r--drivers/char/drm/mga_drm.h6
-rw-r--r--drivers/char/drm/mga_drv.c4
-rw-r--r--drivers/char/drm/mga_drv.h22
-rw-r--r--drivers/char/drm/mga_irq.c12
-rw-r--r--drivers/char/drm/mga_state.c36
-rw-r--r--drivers/char/drm/r128_cce.c41
-rw-r--r--drivers/char/drm/r128_drm.h4
-rw-r--r--drivers/char/drm/r128_drv.h20
-rw-r--r--drivers/char/drm/r128_irq.c10
-rw-r--r--drivers/char/drm/r128_state.c60
-rw-r--r--drivers/char/drm/r300_cmdbuf.c53
-rw-r--r--drivers/char/drm/r300_reg.h1163
-rw-r--r--drivers/char/drm/radeon_cp.c54
-rw-r--r--drivers/char/drm/radeon_drm.h12
-rw-r--r--drivers/char/drm/radeon_drv.c3
-rw-r--r--drivers/char/drm/radeon_drv.h45
-rw-r--r--drivers/char/drm/radeon_irq.c118
-rw-r--r--drivers/char/drm/radeon_state.c108
-rw-r--r--drivers/char/drm/savage_bci.c44
-rw-r--r--drivers/char/drm/savage_drm.h4
-rw-r--r--drivers/char/drm/savage_drv.h20
-rw-r--r--drivers/char/drm/savage_state.c28
-rw-r--r--drivers/char/drm/sis_drv.c4
-rw-r--r--drivers/char/drm/sis_drv.h9
-rw-r--r--drivers/char/drm/sis_mm.c18
-rw-r--r--drivers/char/drm/via_dma.c10
-rw-r--r--drivers/char/drm/via_dmablit.c23
-rw-r--r--drivers/char/drm/via_dmablit.h2
-rw-r--r--drivers/char/drm/via_drm.h4
-rw-r--r--drivers/char/drm/via_drv.h32
-rw-r--r--drivers/char/drm/via_irq.c12
-rw-r--r--drivers/char/drm/via_map.c10
-rw-r--r--drivers/char/drm/via_mm.c6
-rw-r--r--drivers/char/drm/via_verifier.c12
-rw-r--r--drivers/char/drm/via_verifier.h6
-rw-r--r--drivers/char/esp.c13
-rw-r--r--drivers/char/generic_serial.c120
-rw-r--r--drivers/char/genrtc.c22
-rw-r--r--drivers/char/hpet.c70
-rw-r--r--drivers/char/hvc_console.c3
-rw-r--r--drivers/char/hvc_iseries.c8
-rw-r--r--drivers/char/hvc_lguest.c102
-rw-r--r--drivers/char/hvc_rtas.c2
-rw-r--r--drivers/char/hvc_xen.c159
-rw-r--r--drivers/char/hvcs.c12
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/hw_random/intel-rng.c10
-rw-r--r--drivers/char/ip2/i2ellis.c44
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/ipmi/Kconfig15
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c13
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/isicom.c93
-rw-r--r--drivers/char/istallion.c12
-rw-r--r--drivers/char/keyboard.c26
-rw-r--r--drivers/char/mbcs.c27
-rw-r--r--drivers/char/mbcs.h10
-rw-r--r--drivers/char/misc.c18
-rw-r--r--drivers/char/moxa.c37
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/mxser.c16
-rw-r--r--drivers/char/mxser_new.c22
-rw-r--r--drivers/char/n_hdlc.c16
-rw-r--r--drivers/char/n_r3964.c14
-rw-r--r--drivers/char/n_tty.c23
-rw-r--r--drivers/char/nvram.c192
-rw-r--r--drivers/char/pcmcia/synclink_cs.c3
-rw-r--r--drivers/char/ps3flash.c440
-rw-r--r--drivers/char/random.c9
-rw-r--r--drivers/char/rio/rio_linux.c4
-rw-r--r--drivers/char/rio/riocmd.c4
-rw-r--r--drivers/char/rio/riotable.c3
-rw-r--r--drivers/char/riscom8.c12
-rw-r--r--drivers/char/rocket.c6
-rw-r--r--drivers/char/rtc.c35
-rw-r--r--drivers/char/selection.c48
-rw-r--r--drivers/char/serial167.c6
-rw-r--r--drivers/char/sonypi.c47
-rw-r--r--drivers/char/specialix.c16
-rw-r--r--drivers/char/stallion.c10
-rw-r--r--drivers/char/synclink.c11
-rw-r--r--drivers/char/synclink_gt.c11
-rw-r--r--drivers/char/synclinkmp.c11
-rw-r--r--drivers/char/tpm/Kconfig18
-rw-r--r--drivers/char/tpm/tpm_bios.c22
-rw-r--r--drivers/char/tty_audit.c345
-rw-r--r--drivers/char/tty_io.c65
-rw-r--r--drivers/char/tty_ioctl.c32
-rw-r--r--drivers/char/viotape.c19
-rw-r--r--drivers/char/vme_scc.c8
-rw-r--r--drivers/char/vr41xx_giu.c132
-rw-r--r--drivers/char/vt.c35
-rw-r--r--drivers/char/watchdog/Kconfig27
-rw-r--r--drivers/char/watchdog/Makefile4
-rw-r--r--drivers/char/watchdog/at32ap700x_wdt.c386
-rw-r--r--drivers/char/watchdog/ep93xx_wdt.c4
-rw-r--r--drivers/char/watchdog/iop_wdt.c262
-rw-r--r--drivers/char/watchdog/mixcomwd.c127
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c3
-rw-r--r--drivers/char/watchdog/pcwd_usb.c3
-rw-r--r--drivers/char/watchdog/pnx4008_wdt.c4
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c41
-rw-r--r--drivers/clocksource/acpi_pm.c7
-rw-r--r--drivers/connector/Kconfig7
-rw-r--r--drivers/cpufreq/cpufreq.c47
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c30
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c25
-rw-r--r--drivers/cpufreq/freq_table.c1
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c419
-rw-r--r--drivers/dma/ioatdma.c369
-rw-r--r--drivers/dma/ioatdma.h16
-rw-r--r--drivers/dma/ioatdma_io.h118
-rw-r--r--drivers/dma/iop-adma.c1467
-rw-r--r--drivers/edac/Kconfig65
-rw-r--r--drivers/edac/Makefile17
-rw-r--r--drivers/edac/amd76x_edac.c75
-rw-r--r--drivers/edac/e752x_edac.c320
-rw-r--r--drivers/edac/e7xxx_edac.c125
-rw-r--r--drivers/edac/edac_core.h (renamed from drivers/edac/edac_mc.h)506
-rw-r--r--drivers/edac/edac_device.c746
-rw-r--r--drivers/edac/edac_device_sysfs.c896
-rw-r--r--drivers/edac/edac_mc.c1675
-rw-r--r--drivers/edac/edac_mc_sysfs.c1024
-rw-r--r--drivers/edac/edac_module.c222
-rw-r--r--drivers/edac/edac_module.h77
-rw-r--r--drivers/edac/edac_pci.c433
-rw-r--r--drivers/edac/edac_pci_sysfs.c620
-rw-r--r--drivers/edac/edac_stub.c46
-rw-r--r--drivers/edac/i3000_edac.c506
-rw-r--r--drivers/edac/i5000_edac.c1505
-rw-r--r--drivers/edac/i82443bxgx_edac.c402
-rw-r--r--drivers/edac/i82860_edac.c56
-rw-r--r--drivers/edac/i82875p_edac.c92
-rw-r--r--drivers/edac/i82975x_edac.c666
-rw-r--r--drivers/edac/pasemi_edac.c299
-rw-r--r--drivers/edac/r82600_edac.c77
-rw-r--r--drivers/firewire/fw-ohci.c3
-rw-r--r--drivers/firewire/fw-sbp2.c18
-rw-r--r--drivers/firewire/fw-transaction.c9
-rw-r--r--drivers/firewire/fw-transaction.h4
-rw-r--r--drivers/firmware/Kconfig9
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/dcdbas.c10
-rw-r--r--drivers/firmware/dcdbas.h3
-rw-r--r--drivers/firmware/dell_rbu.c31
-rw-r--r--drivers/firmware/dmi-id.c222
-rw-r--r--drivers/firmware/dmi_scan.c73
-rw-r--r--drivers/firmware/edd.c9
-rw-r--r--drivers/firmware/efivars.c6
-rw-r--r--drivers/firmware/pcdp.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c9
-rw-r--r--drivers/hwmon/Kconfig82
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/abituguru.c45
-rw-r--r--drivers/hwmon/abituguru3.c1140
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/dme1737.c2080
-rw-r--r--drivers/hwmon/ds1621.c161
-rw-r--r--drivers/hwmon/f71805f.c178
-rw-r--r--drivers/hwmon/it87.c497
-rw-r--r--drivers/hwmon/lm63.c4
-rw-r--r--drivers/hwmon/lm70.c4
-rw-r--r--drivers/hwmon/lm83.c12
-rw-r--r--drivers/hwmon/lm90.c78
-rw-r--r--drivers/hwmon/lm93.c2655
-rw-r--r--drivers/hwmon/pc87360.c232
-rw-r--r--drivers/hwmon/pc87427.c2
-rw-r--r--drivers/hwmon/sis5595.c510
-rw-r--r--drivers/hwmon/smsc47b397.c7
-rw-r--r--drivers/hwmon/smsc47m1.c12
-rw-r--r--drivers/hwmon/smsc47m192.c37
-rw-r--r--drivers/hwmon/via686a.c538
-rw-r--r--drivers/hwmon/vt8231.c318
-rw-r--r--drivers/hwmon/w83627ehf.c615
-rw-r--r--drivers/hwmon/w83627hf.c153
-rw-r--r--drivers/i2c/algos/Kconfig4
-rw-r--r--drivers/i2c/busses/Kconfig36
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-acorn.c2
-rw-r--r--drivers/i2c/busses/i2c-gpio.c12
-rw-r--r--drivers/i2c/busses/i2c-i801.c249
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c3
-rw-r--r--drivers/i2c/busses/i2c-isa.c192
-rw-r--r--drivers/i2c/busses/i2c-mpc.c26
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c3
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c44
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c653
-rw-r--r--drivers/i2c/busses/i2c-powermac.c3
-rw-r--r--drivers/i2c/busses/i2c-pxa.c9
-rw-r--r--drivers/i2c/busses/i2c-rpx.c101
-rw-r--r--drivers/i2c/busses/i2c-savage4.c21
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c27
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c330
-rw-r--r--drivers/i2c/busses/i2c-viapro.c5
-rw-r--r--drivers/i2c/busses/scx200_acb.c16
-rw-r--r--drivers/i2c/chips/Kconfig45
-rw-r--r--drivers/i2c/chips/Makefile3
-rw-r--r--drivers/i2c/chips/ds1682.c259
-rw-r--r--drivers/i2c/chips/eeprom.c10
-rw-r--r--drivers/i2c/chips/max6875.c7
-rw-r--r--drivers/i2c/chips/menelaus.c1281
-rw-r--r--drivers/i2c/chips/tsl2550.c460
-rw-r--r--drivers/i2c/i2c-core.c27
-rw-r--r--drivers/i2c/i2c-dev.c9
-rw-r--r--drivers/ide/arm/icside.c2
-rw-r--r--drivers/ide/cris/ide-cris.c9
-rw-r--r--drivers/ide/ide-floppy.c47
-rw-r--r--drivers/ide/ide-io.c22
-rw-r--r--drivers/ide/ide-lib.c72
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-timing.h20
-rw-r--r--drivers/ide/ide.c86
-rw-r--r--drivers/ide/legacy/ali14xx.c7
-rw-r--r--drivers/ide/legacy/dtc2278.c3
-rw-r--r--drivers/ide/legacy/falconide.c2
-rw-r--r--drivers/ide/legacy/hd.c73
-rw-r--r--drivers/ide/legacy/ht6560b.c12
-rw-r--r--drivers/ide/legacy/ide-cs.c1
-rw-r--r--drivers/ide/legacy/qd65xx.c21
-rw-r--r--drivers/ide/legacy/umc8672.c4
-rw-r--r--drivers/ide/mips/au1xxx-ide.c4
-rw-r--r--drivers/ide/mips/swarm.c3
-rw-r--r--drivers/ide/pci/aec62xx.c18
-rw-r--r--drivers/ide/pci/alim15x3.c7
-rw-r--r--drivers/ide/pci/amd74xx.c32
-rw-r--r--drivers/ide/pci/atiixp.c37
-rw-r--r--drivers/ide/pci/cmd640.c19
-rw-r--r--drivers/ide/pci/cmd64x.c30
-rw-r--r--drivers/ide/pci/cs5520.c6
-rw-r--r--drivers/ide/pci/cs5530.c6
-rw-r--r--drivers/ide/pci/cs5535.c9
-rw-r--r--drivers/ide/pci/cy82c693.c5
-rw-r--r--drivers/ide/pci/generic.c15
-rw-r--r--drivers/ide/pci/hpt34x.c17
-rw-r--r--drivers/ide/pci/hpt366.c56
-rw-r--r--drivers/ide/pci/it8213.c7
-rw-r--r--drivers/ide/pci/it821x.c8
-rw-r--r--drivers/ide/pci/jmicron.c4
-rw-r--r--drivers/ide/pci/ns87415.c1
-rw-r--r--drivers/ide/pci/opti621.c8
-rw-r--r--drivers/ide/pci/pdc202xx_new.c23
-rw-r--r--drivers/ide/pci/pdc202xx_old.c20
-rw-r--r--drivers/ide/pci/piix.c14
-rw-r--r--drivers/ide/pci/rz1000.c1
-rw-r--r--drivers/ide/pci/sc1200.c43
-rw-r--r--drivers/ide/pci/scc_pata.c76
-rw-r--r--drivers/ide/pci/serverworks.c118
-rw-r--r--drivers/ide/pci/sgiioc4.c3
-rw-r--r--drivers/ide/pci/siimage.c139
-rw-r--r--drivers/ide/pci/sis5513.c12
-rw-r--r--drivers/ide/pci/sl82c105.c26
-rw-r--r--drivers/ide/pci/slc90e66.c4
-rw-r--r--drivers/ide/pci/tc86c001.c7
-rw-r--r--drivers/ide/pci/triflex.c4
-rw-r--r--drivers/ide/pci/trm290.c1
-rw-r--r--drivers/ide/pci/via82cxxx.c35
-rw-r--r--drivers/ide/ppc/mpc8xx.c5
-rw-r--r--drivers/ide/ppc/pmac.c16
-rw-r--r--drivers/ide/setup-pci.c18
-rw-r--r--drivers/ieee1394/eth1394.c2
-rw-r--r--drivers/ieee1394/ieee1394_core.c3
-rw-r--r--drivers/ieee1394/nodemgr.c3
-rw-r--r--drivers/ieee1394/sbp2.c75
-rw-r--r--drivers/infiniband/Kconfig15
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/agent.c19
-rw-r--r--drivers/infiniband/core/cm.c247
-rw-r--r--drivers/infiniband/core/cm_msgs.h1
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/mad.c51
-rw-r--r--drivers/infiniband/core/multicast.c2
-rw-r--r--drivers/infiniband/core/sa.h2
-rw-r--r--drivers/infiniband/core/sa_query.c87
-rw-r--r--drivers/infiniband/core/smi.c16
-rw-r--r--drivers/infiniband/core/smi.h2
-rw-r--r--drivers/infiniband/core/sysfs.c3
-rw-r--r--drivers/infiniband/core/ucm.c1
-rw-r--r--drivers/infiniband/core/umem.c1
-rw-r--r--drivers/infiniband/hw/amso1100/Kconfig2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c112
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c7
-rw-r--r--drivers/infiniband/hw/ehca/Kconfig2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c10
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h163
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h156
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c62
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c87
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c232
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h25
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c181
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c1354
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h23
-rw-r--r--drivers/infiniband/hw/ehca/ehca_pd.c27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h22
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c874
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c100
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h45
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c25
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c112
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h1
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c2
-rw-r--r--drivers/infiniband/hw/ehca/hipz_fns_core.h4
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h41
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c222
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h58
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig2
-rw-r--r--drivers/infiniband/hw/ipath/Makefile1
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h33
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c41
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c196
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c303
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c205
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c101
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c92
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c26
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c143
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h87
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c365
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h71
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c116
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c38
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c25
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c43
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c28
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c31
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_x86_64.c29
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c250
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c18
-rw-r--r--drivers/infiniband/hw/mthca/Kconfig2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c221
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h15
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c33
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c42
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c14
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c51
-rw-r--r--drivers/infiniband/ulp/srp/Kconfig2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c63
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/input.c29
-rw-r--r--drivers/input/joystick/Kconfig7
-rw-r--r--drivers/input/joystick/xpad.c190
-rw-r--r--drivers/input/misc/pcspkr.c11
-rw-r--r--drivers/input/mouse/appletouch.c111
-rw-r--r--drivers/input/mouse/lifebook.c2
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/serio/ambakmi.c6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h36
-rw-r--r--drivers/input/serio/pcips2.c6
-rw-r--r--drivers/input/serio/sa1111ps2.c6
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/touchscreen/Kconfig13
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ads7846.c80
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c189
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c1
-rw-r--r--drivers/isdn/Kconfig27
-rw-r--r--drivers/isdn/act2000/Kconfig2
-rw-r--r--drivers/isdn/capi/Kconfig7
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/capi/kcapi_proc.c28
-rw-r--r--drivers/isdn/gigaset/Kconfig10
-rw-r--r--drivers/isdn/hardware/Kconfig1
-rw-r--r--drivers/isdn/hardware/avm/Kconfig23
-rw-r--r--drivers/isdn/hardware/eicon/Kconfig22
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c1
-rw-r--r--drivers/isdn/hisax/Kconfig1
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c108
-rw-r--r--drivers/isdn/hisax/bkm_a8.c4
-rw-r--r--drivers/isdn/hisax/config.c245
-rw-r--r--drivers/isdn/hisax/enternow_pci.c165
-rw-r--r--drivers/isdn/hisax/hfc_pci.c191
-rw-r--r--drivers/isdn/hisax/nj_s.c194
-rw-r--r--drivers/isdn/hisax/nj_u.c167
-rw-r--r--drivers/isdn/hisax/sedlbauer.c8
-rw-r--r--drivers/isdn/i4l/Kconfig4
-rw-r--r--drivers/isdn/icn/Kconfig2
-rw-r--r--drivers/isdn/pcbit/Kconfig2
-rw-r--r--drivers/isdn/sc/Kconfig2
-rw-r--r--drivers/isdn/sc/card.h2
-rw-r--r--drivers/isdn/sc/command.c2
-rw-r--r--drivers/isdn/sc/timer.c2
-rw-r--r--drivers/kvm/Kconfig9
-rw-r--r--drivers/kvm/kvm.h126
-rw-r--r--drivers/kvm/kvm_main.c530
-rw-r--r--drivers/kvm/mmu.c396
-rw-r--r--drivers/kvm/paging_tmpl.h275
-rw-r--r--drivers/kvm/svm.c59
-rw-r--r--drivers/kvm/svm.h3
-rw-r--r--drivers/kvm/vmx.c652
-rw-r--r--drivers/kvm/x86_emulate.c70
-rw-r--r--drivers/leds/Kconfig22
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class.c49
-rw-r--r--drivers/leds/led-triggers.c27
-rw-r--r--drivers/leds/leds-gpio.c199
-rw-r--r--drivers/leds/leds-locomo.c2
-rw-r--r--drivers/leds/leds.h8
-rw-r--r--drivers/leds/ledtrig-timer.c49
-rw-r--r--drivers/lguest/Kconfig20
-rw-r--r--drivers/lguest/Makefile7
-rw-r--r--drivers/lguest/core.c462
-rw-r--r--drivers/lguest/hypercalls.c192
-rw-r--r--drivers/lguest/interrupts_and_traps.c268
-rw-r--r--drivers/lguest/io.c399
-rw-r--r--drivers/lguest/lg.h261
-rw-r--r--drivers/lguest/lguest.c630
-rw-r--r--drivers/lguest/lguest_asm.S54
-rw-r--r--drivers/lguest/lguest_bus.c148
-rw-r--r--drivers/lguest/lguest_user.c236
-rw-r--r--drivers/lguest/page_tables.c411
-rw-r--r--drivers/lguest/segments.c125
-rw-r--r--drivers/lguest/switcher.S159
-rw-r--r--drivers/macintosh/adb.c10
-rw-r--r--drivers/macintosh/macio_asic.c3
-rw-r--r--drivers/macintosh/rack-meter.c1
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/macintosh/therm_adt746x.c1
-rw-r--r--drivers/macintosh/therm_pm72.c6
-rw-r--r--drivers/macintosh/therm_windtunnel.c3
-rw-r--r--drivers/macintosh/windfarm_core.c6
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c3
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c28
-rw-r--r--drivers/md/Kconfig23
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/bitmap.c169
-rw-r--r--drivers/md/dm-bio-list.h4
-rw-r--r--drivers/md/dm-crypt.c35
-rw-r--r--drivers/md/dm-delay.c23
-rw-r--r--drivers/md/dm-exception-store.c77
-rw-r--r--drivers/md/dm-io.c5
-rw-r--r--drivers/md/dm-mpath-rdac.c700
-rw-r--r--drivers/md/dm-mpath.c34
-rw-r--r--drivers/md/dm-raid1.c78
-rw-r--r--drivers/md/dm-round-robin.c2
-rw-r--r--drivers/md/dm-snap.c116
-rw-r--r--drivers/md/dm-snap.h6
-rw-r--r--drivers/md/dm.c37
-rw-r--r--drivers/md/dm.h40
-rw-r--r--drivers/md/kcopyd.c11
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c2731
-rw-r--r--drivers/md/xor.c154
-rw-r--r--drivers/media/Kconfig4
-rw-r--r--drivers/media/common/ir-functions.c27
-rw-r--r--drivers/media/common/saa7146_core.c8
-rw-r--r--drivers/media/common/saa7146_video.c8
-rw-r--r--drivers/media/dvb/b2c2/Kconfig2
-rw-r--r--drivers/media/dvb/b2c2/Makefile2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c4
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig2
-rw-r--r--drivers/media/dvb/bt8xx/Makefile2
-rw-r--r--drivers/media/dvb/bt8xx/dst.c2
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c5
-rw-r--r--drivers/media/dvb/cinergyT2/Makefile2
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c22
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c12
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c5
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c10
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig29
-rw-r--r--drivers/media/dvb/dvb-usb/Makefile8
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-fe.c1503
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-remote.c157
-rw-r--r--drivers/media/dvb/dvb-usb/af9005-script.h203
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c1141
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h3496
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c18
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c53
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb.h1
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c21
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.h4
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-i2c.c79
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h11
-rw-r--r--drivers/media/dvb/dvb-usb/gl861.c7
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c127
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.h5
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c25
-rw-r--r--drivers/media/dvb/dvb-usb/umt-010.c8
-rw-r--r--drivers/media/dvb/frontends/Makefile2
-rw-r--r--drivers/media/dvb/frontends/cx22702.c1
-rw-r--r--drivers/media/dvb/frontends/cx24123.c2
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c256
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.h73
-rw-r--r--drivers/media/dvb/frontends/nxt200x.c23
-rw-r--r--drivers/media/dvb/frontends/nxt200x.h3
-rw-r--r--drivers/media/dvb/frontends/or51132.c1
-rw-r--r--drivers/media/dvb/frontends/or51211.c31
-rw-r--r--drivers/media/dvb/frontends/stv0299.c2
-rw-r--r--drivers/media/dvb/frontends/tda10023.c2
-rw-r--r--drivers/media/dvb/pluto2/Makefile2
-rw-r--r--drivers/media/dvb/ttpci/Kconfig2
-rw-r--r--drivers/media/dvb/ttpci/Makefile2
-rw-r--r--drivers/media/dvb/ttpci/av7110.c15
-rw-r--r--drivers/media/dvb/ttpci/av7110.h1
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c20
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c4
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.h2
-rw-r--r--drivers/media/dvb/ttpci/av7110_ir.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c31
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c2
-rw-r--r--drivers/media/dvb/ttusb-budget/Makefile2
-rw-r--r--drivers/media/dvb/ttusb-dec/Makefile2
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/radio-aimslab.c3
-rw-r--r--drivers/media/radio/radio-aztech.c1
-rw-r--r--drivers/media/radio/radio-cadet.c4
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c8
-rw-r--r--drivers/media/radio/radio-gemtek.c1
-rw-r--r--drivers/media/radio/radio-rtrack2.c1
-rw-r--r--drivers/media/radio/radio-sf16fmi.c1
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c1
-rw-r--r--drivers/media/radio/radio-terratec.c1
-rw-r--r--drivers/media/radio/radio-trust.c1
-rw-r--r--drivers/media/radio/radio-typhoon.c1
-rw-r--r--drivers/media/video/Kconfig9
-rw-r--r--drivers/media/video/Makefile6
-rw-r--r--drivers/media/video/adv7170.c8
-rw-r--r--drivers/media/video/adv7175.c8
-rw-r--r--drivers/media/video/bt819.c9
-rw-r--r--drivers/media/video/bt856.c8
-rw-r--r--drivers/media/video/bt866.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c444
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c34
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/video/bt8xx/bttv.h2
-rw-r--r--drivers/media/video/bt8xx/bttvp.h6
-rw-r--r--drivers/media/video/c-qcam.c4
-rw-r--r--drivers/media/video/cpia2/cpia2_core.c13
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c8
-rw-r--r--drivers/media/video/cx88/Kconfig2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c14
-rw-r--r--drivers/media/video/cx88/cx88-cards.c24
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c122
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c25
-rw-r--r--drivers/media/video/cx88/cx88-input.c25
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c2
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c1
-rw-r--r--drivers/media/video/cx88/cx88-video.c8
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c12
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.h7
-rw-r--r--drivers/media/video/cx88/cx88.h8
-rw-r--r--drivers/media/video/et61x251/Kconfig2
-rw-r--r--drivers/media/video/et61x251/et61x251.h23
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c189
-rw-r--r--drivers/media/video/et61x251/et61x251_sensor.h8
-rw-r--r--drivers/media/video/et61x251/et61x251_tas5130d1b.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c53
-rw-r--r--drivers/media/video/ivtv/Kconfig5
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c54
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h21
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c14
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.c29
-rw-r--r--drivers/media/video/ivtv/ivtv-gpio.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c20
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c47
-rw-r--r--drivers/media/video/ivtv/ivtv-mailbox.c33
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c45
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c31
-rw-r--r--drivers/media/video/meye.c4
-rw-r--r--drivers/media/video/msp3400-driver.c9
-rw-r--r--drivers/media/video/msp3400-kthreads.c6
-rw-r--r--drivers/media/video/mt20xx.c80
-rw-r--r--drivers/media/video/ov7670.c4
-rw-r--r--drivers/media/video/planb.c3
-rw-r--r--drivers/media/video/pwc/pwc-if.c12
-rw-r--r--drivers/media/video/pwc/pwc.h4
-rw-r--r--drivers/media/video/saa5249.c8
-rw-r--r--drivers/media/video/saa7110.c4
-rw-r--r--drivers/media/video/saa7111.c8
-rw-r--r--drivers/media/video/saa7114.c10
-rw-r--r--drivers/media/video/saa7134/Kconfig2
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c16
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c41
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c169
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c20
-rw-r--r--drivers/media/video/saa7134/saa7134-tvaudio.c42
-rw-r--r--drivers/media/video/saa7134/saa7134.h8
-rw-r--r--drivers/media/video/saa7185.c8
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h9
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c173
-rw-r--r--drivers/media/video/sn9c102/sn9c102_ov7630.c214
-rw-r--r--drivers/media/video/sn9c102/sn9c102_ov7660.c88
-rw-r--r--drivers/media/video/stradis.c2
-rw-r--r--drivers/media/video/stv680.c7
-rw-r--r--drivers/media/video/tda8290.c129
-rw-r--r--drivers/media/video/tda9887.c57
-rw-r--r--drivers/media/video/tea5761.c243
-rw-r--r--drivers/media/video/tea5767.c16
-rw-r--r--drivers/media/video/tuner-core.c95
-rw-r--r--drivers/media/video/tuner-driver.h107
-rw-r--r--drivers/media/video/tuner-simple.c27
-rw-r--r--drivers/media/video/tuner-types.c22
-rw-r--r--drivers/media/video/tvaudio.c6
-rw-r--r--drivers/media/video/tveeprom.c8
-rw-r--r--drivers/media/video/tvp5150.c2
-rw-r--r--drivers/media/video/usbvideo/konicawc.c2
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c4
-rw-r--r--drivers/media/video/usbvideo/vicam.c184
-rw-r--r--drivers/media/video/usbvision/usbvision-cards.c8
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c43
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c1561
-rw-r--r--drivers/media/video/usbvision/usbvision.h13
-rw-r--r--drivers/media/video/v4l2-common.c19
-rw-r--r--drivers/media/video/video-buf-dvb.c1
-rw-r--r--drivers/media/video/vino.c6
-rw-r--r--drivers/media/video/vivi.c179
-rw-r--r--drivers/media/video/wm8739.c2
-rw-r--r--drivers/media/video/wm8775.c2
-rw-r--r--drivers/media/video/zc0301/Kconfig2
-rw-r--r--drivers/media/video/zc0301/zc0301.h21
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c147
-rw-r--r--drivers/media/video/zc0301/zc0301_pas202bcb.c1
-rw-r--r--drivers/media/video/zc0301/zc0301_pb0330.c1
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h2
-rw-r--r--drivers/media/video/zoran_driver.c63
-rw-r--r--drivers/media/video/zr364xx.c18
-rw-r--r--drivers/message/fusion/Kconfig1
-rw-r--r--drivers/message/fusion/linux_compat.h9
-rw-r--r--drivers/message/fusion/lsi/mpi.h7
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h61
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt66
-rw-r--r--drivers/message/fusion/lsi/mpi_inb.h221
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h10
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h40
-rw-r--r--drivers/message/fusion/lsi/mpi_raid.h11
-rw-r--r--drivers/message/fusion/mptbase.c422
-rw-r--r--drivers/message/fusion/mptbase.h15
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/fusion/mptctl.h2
-rw-r--r--drivers/message/fusion/mptfc.c6
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptlan.h2
-rw-r--r--drivers/message/fusion/mptsas.c74
-rw-r--r--drivers/message/fusion/mptscsih.c417
-rw-r--r--drivers/message/fusion/mptscsih.h3
-rw-r--r--drivers/message/fusion/mptspi.c12
-rw-r--r--drivers/message/i2o/Kconfig22
-rw-r--r--drivers/message/i2o/debug.c134
-rw-r--r--drivers/message/i2o/device.c18
-rw-r--r--drivers/message/i2o/exec-osm.c10
-rw-r--r--drivers/message/i2o/i2o_block.c5
-rw-r--r--drivers/message/i2o/i2o_config.c62
-rw-r--r--drivers/message/i2o/i2o_scsi.c24
-rw-r--r--drivers/message/i2o/iop.c2
-rw-r--r--drivers/mfd/mcp-core.c3
-rw-r--r--drivers/mfd/ucb1x00-core.c3
-rw-r--r--drivers/mfd/ucb1x00-ts.c1
-rw-r--r--drivers/misc/Kconfig24
-rw-r--r--drivers/misc/asus-laptop.c6
-rw-r--r--drivers/misc/ibmasm/command.c20
-rw-r--r--drivers/misc/ibmasm/dot_command.c10
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c8
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h10
-rw-r--r--drivers/misc/ibmasm/ibmasm.h70
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h16
-rw-r--r--drivers/misc/ibmasm/module.c13
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c10
-rw-r--r--drivers/misc/ibmasm/remote.c37
-rw-r--r--drivers/misc/ibmasm/remote.h8
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/misc/msi-laptop.c44
-rw-r--r--drivers/misc/sony-laptop.c371
-rw-r--r--drivers/misc/thinkpad_acpi.c602
-rw-r--r--drivers/misc/thinkpad_acpi.h42
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mmc/host/at91_mci.c13
-rw-r--r--drivers/mmc/host/pxamci.h22
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/ubi/build.c25
-rw-r--r--drivers/mtd/ubi/cdev.c49
-rw-r--r--drivers/mtd/ubi/debug.c44
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/mtd/ubi/eba.c104
-rw-r--r--drivers/mtd/ubi/gluebi.c27
-rw-r--r--drivers/mtd/ubi/io.c65
-rw-r--r--drivers/mtd/ubi/kapi.c19
-rw-r--r--drivers/mtd/ubi/misc.c4
-rw-r--r--drivers/mtd/ubi/scan.c127
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/upd.c4
-rw-r--r--drivers/mtd/ubi/vmt.c53
-rw-r--r--drivers/mtd/ubi/vtbl.c85
-rw-r--r--drivers/mtd/ubi/wl.c96
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/7990.c4
-rw-r--r--drivers/net/8139cp.c34
-rw-r--r--drivers/net/8139too.c9
-rw-r--r--drivers/net/Kconfig114
-rw-r--r--drivers/net/Makefile9
-rw-r--r--drivers/net/Space.c8
-rw-r--r--drivers/net/a2065.c4
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/arm/ether1.c3
-rw-r--r--drivers/net/arm/ether3.c5
-rw-r--r--drivers/net/arm/etherh.c1
-rw-r--r--drivers/net/atari_bionet.c675
-rw-r--r--drivers/net/atari_pamsnet.c878
-rw-r--r--drivers/net/atl1/atl1.h157
-rw-r--r--drivers/net/atl1/atl1_main.c2181
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/ax88796.c4
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/bfin_mac.c1009
-rw-r--r--drivers/net/bfin_mac.h132
-rw-r--r--drivers/net/bnx2.c608
-rw-r--r--drivers/net/bnx2.h76
-rw-r--r--drivers/net/bsd_comp.c3
-rw-r--r--drivers/net/cassini.c12
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/dl2k.c7
-rw-r--r--drivers/net/dl2k.h1
-rw-r--r--drivers/net/dm9000.c17
-rw-r--r--drivers/net/dummy.c82
-rw-r--r--drivers/net/e100.c8
-rw-r--r--drivers/net/e1000/e1000_main.c3
-rw-r--r--drivers/net/eepro100.c9
-rw-r--r--drivers/net/ehea/ehea.h23
-rw-r--r--drivers/net/ehea/ehea_main.c181
-rw-r--r--drivers/net/ehea/ehea_phyp.h3
-rw-r--r--drivers/net/ehea/ehea_qmr.c156
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/forcedeth.c172
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/gianfar_mii.c1
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/dmascc.c6
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/ifb.c78
-rw-r--r--drivers/net/irda/Kconfig11
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c89
-rw-r--r--drivers/net/irda/irport.c3
-rw-r--r--drivers/net/irda/irtty-sir.c3
-rw-r--r--drivers/net/irda/kingsun-sir.c4
-rw-r--r--drivers/net/irda/vlsi_ir.c27
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/lance.c7
-rw-r--r--drivers/net/lguest_net.c354
-rw-r--r--drivers/net/mac89x0.c2
-rw-r--r--drivers/net/macb.c563
-rw-r--r--drivers/net/macb.h10
-rw-r--r--drivers/net/macvlan.c496
-rw-r--r--drivers/net/mlx4/catas.c106
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/eq.c56
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/fw.h1
-rw-r--r--drivers/net/mlx4/intf.c2
-rw-r--r--drivers/net/mlx4/main.c27
-rw-r--r--drivers/net/mlx4/mlx4.h14
-rw-r--r--drivers/net/mlx4/qp.c21
-rw-r--r--drivers/net/mlx4/srq.c30
-rw-r--r--drivers/net/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/ne2k-pci.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/ni5010.c6
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/com20020_cs.c3
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c3
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/phy/vitesse.c46
-rw-r--r--drivers/net/ppp_async.c3
-rw-r--r--drivers/net/ppp_deflate.c6
-rw-r--r--drivers/net/ppp_generic.c3
-rw-r--r--drivers/net/ppp_mppe.c3
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/pppol2tp.c2496
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/s2io.c29
-rw-r--r--drivers/net/s2io.h1
-rw-r--r--drivers/net/saa9730.c13
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/shaper.c3
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/starfire.c4
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c5
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c9
-rw-r--r--drivers/net/sunhme.c8
-rw-r--r--drivers/net/sunlance.c4
-rw-r--r--drivers/net/sunqe.c4
-rw-r--r--drivers/net/sunvnet.c1295
-rw-r--r--drivers/net/sunvnet.h83
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tg3.c258
-rw-r--r--drivers/net/tg3.h10
-rw-r--r--drivers/net/tlan.c5
-rw-r--r--drivers/net/tokenring/smctr.c6
-rw-r--r--drivers/net/tulip/de4x5.c8
-rw-r--r--drivers/net/tulip/dmfe.c26
-rw-r--r--drivers/net/tulip/interrupt.c8
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c7
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c8
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/typhoon.c9
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_subset.c3
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/via-rhine.c17
-rw-r--r--drivers/net/via-velocity.c3
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/c101.c3
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wan/cycx_main.c4
-rw-r--r--drivers/net/wan/cycx_x25.c3
-rw-r--r--drivers/net/wan/dscc4.c6
-rw-r--r--drivers/net/wan/farsync.c3
-rw-r--r--drivers/net/wan/hostess_sv11.c3
-rw-r--r--drivers/net/wan/n2.c3
-rw-r--r--drivers/net/wan/pc300_drv.c9
-rw-r--r--drivers/net/wan/pc300too.c5
-rw-r--r--drivers/net/wan/pci200syn.c5
-rw-r--r--drivers/net/wan/sbni.c7
-rw-r--r--drivers/net/wan/sdla.c3
-rw-r--r--drivers/net/wan/sealevel.c3
-rw-r--r--drivers/net/wan/wanxl.c3
-rw-r--r--drivers/net/wan/x25_asy.c4
-rw-r--r--drivers/net/wireless/airo.c211
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c3
-rw-r--r--drivers/net/wireless/ipw2100.c17
-rw-r--r--drivers/net/wireless/ipw2200.c23
-rw-r--r--drivers/net/wireless/libertas/cmd.c2
-rw-r--r--drivers/net/wireless/libertas/main.c1
-rw-r--r--drivers/net/wireless/libertas/rx.c1
-rw-r--r--drivers/net/wireless/libertas/version.h1
-rw-r--r--drivers/net/wireless/libertas/wext.c3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c22
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c4
-rw-r--r--drivers/net/wireless/rtl8187_rtl8225.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c88
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c59
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c12
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c102
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h3
-rw-r--r--drivers/net/xen-netfront.c1863
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/nubus/nubus.c6
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/base.c275
-rw-r--r--drivers/of/device.c131
-rw-r--r--drivers/of/platform.c96
-rw-r--r--drivers/oprofile/buffer_sync.c3
-rw-r--r--drivers/oprofile/event_buffer.h20
-rw-r--r--drivers/oprofile/oprof.c28
-rw-r--r--drivers/parisc/hppb.c1
-rw-r--r--drivers/parisc/pdc_stable.c4
-rw-r--r--drivers/parisc/superio.c1
-rw-r--r--drivers/parport/Kconfig26
-rw-r--r--drivers/parport/parport_cs.c3
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/parport/parport_serial.c3
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c53
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c7
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c66
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c12
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c12
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c191
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c6
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/pci/pci-sysfs.c35
-rw-r--r--drivers/pci/pci.c200
-rw-r--r--drivers/pci/pcie/aer/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c5
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h14
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c34
-rw-r--r--drivers/pci/probe.c17
-rw-r--r--drivers/pci/proc.c3
-rw-r--r--drivers/pci/quirks.c30
-rw-r--r--drivers/pci/rom.c73
-rw-r--r--drivers/pci/search.c11
-rw-r--r--drivers/pci/setup-bus.c8
-rw-r--r--drivers/pci/syscall.c20
-rw-r--r--drivers/pcmcia/Kconfig17
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/ds.c40
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c829
-rw-r--r--drivers/pcmcia/socket_sysfs.c10
-rw-r--r--drivers/pnp/Kconfig13
-rw-r--r--drivers/pnp/core.c3
-rw-r--r--drivers/pnp/isapnp/Kconfig2
-rw-r--r--drivers/pnp/isapnp/core.c2
-rw-r--r--drivers/pnp/pnpbios/Kconfig2
-rw-r--r--drivers/pnp/pnpbios/core.c3
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/apm_power.c4
-rw-r--r--drivers/power/ds2760_battery.c7
-rw-r--r--drivers/power/olpc_battery.c1
-rw-r--r--drivers/power/pda_power.c22
-rw-r--r--drivers/power/pmu_battery.c2
-rw-r--r--drivers/power/power_supply_core.c6
-rw-r--r--drivers/power/power_supply_leds.c8
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/ps3/Makefile5
-rw-r--r--drivers/ps3/ps3av.c372
-rw-r--r--drivers/ps3/ps3av_cmd.c51
-rw-r--r--drivers/ps3/ps3stor_lib.c302
-rw-r--r--drivers/ps3/sys-manager-core.c68
-rw-r--r--drivers/ps3/sys-manager.c290
-rw-r--r--drivers/ps3/vuart.c817
-rw-r--r--drivers/ps3/vuart.h71
-rw-r--r--drivers/rapidio/rio-scan.c6
-rw-r--r--drivers/rapidio/rio-sysfs.c7
-rw-r--r--drivers/rtc/Kconfig81
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/rtc-at32ap700x.c317
-rw-r--r--drivers/rtc/rtc-cmos.c33
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-ds1216.c226
-rw-r--r--drivers/rtc/rtc-ds1307.c300
-rw-r--r--drivers/rtc/rtc-ds1553.c13
-rw-r--r--drivers/rtc/rtc-ds1742.c13
-rw-r--r--drivers/rtc/rtc-m41t80.c917
-rw-r--r--drivers/rtc/rtc-m48t59.c491
-rw-r--r--drivers/rtc/rtc-max6900.c96
-rw-r--r--drivers/rtc/rtc-rs5c372.c95
-rw-r--r--drivers/rtc/rtc-stk17ta8.c420
-rw-r--r--drivers/rtc/rtc-vr41xx.c186
-rw-r--r--drivers/rtc/rtc-x1205.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/char/Kconfig7
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/tape_34xx.c3
-rw-r--r--drivers/s390/char/vmcp.c89
-rw-r--r--drivers/s390/char/vmcp.h4
-rw-r--r--drivers/s390/char/vmur.c906
-rw-r--r--drivers/s390/char/vmur.h104
-rw-r--r--drivers/s390/cio/chp.c12
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/qdio.c16
-rw-r--r--drivers/s390/net/claw.c9
-rw-r--r--drivers/s390/net/qeth.h9
-rw-r--r--drivers/s390/net/qeth_main.c183
-rw-r--r--drivers/s390/net/qeth_proc.c6
-rw-r--r--drivers/s390/net/qeth_sys.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c35
-rw-r--r--drivers/s390/scsi/zfcp_def.h1
-rw-r--r--drivers/s390/scsi/zfcp_erp.c10
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c113
-rw-r--r--drivers/sbus/char/bbc_envctrl.c5
-rw-r--r--drivers/sbus/char/bbc_i2c.c3
-rw-r--r--drivers/sbus/char/cpwatchdog.c1
-rw-r--r--drivers/sbus/char/envctrl.c7
-rw-r--r--drivers/sbus/char/jsflash.c3
-rw-r--r--drivers/sbus/char/vfc_dev.c5
-rw-r--r--drivers/sbus/sbus.c5
-rw-r--r--drivers/scsi/3w-9xxx.c205
-rw-r--r--drivers/scsi/3w-9xxx.h5
-rw-r--r--drivers/scsi/3w-xxxx.c104
-rw-r--r--drivers/scsi/53c700.c77
-rw-r--r--drivers/scsi/53c700.h5
-rw-r--r--drivers/scsi/53c7xx.c6102
-rw-r--r--drivers/scsi/53c7xx.h1608
-rw-r--r--drivers/scsi/53c7xx.scr1591
-rw-r--r--drivers/scsi/53c7xx_d.h_shipped2874
-rw-r--r--drivers/scsi/53c7xx_u.h_shipped102
-rw-r--r--drivers/scsi/BusLogic.c51
-rw-r--r--drivers/scsi/Kconfig70
-rw-r--r--drivers/scsi/Makefile24
-rw-r--r--drivers/scsi/NCR5380.c14
-rw-r--r--drivers/scsi/NCR5380.h6
-rw-r--r--drivers/scsi/NCR53C9x.c10
-rw-r--r--drivers/scsi/NCR53c406a.c45
-rw-r--r--drivers/scsi/NCR_D700.c3
-rw-r--r--drivers/scsi/NCR_Q720.c3
-rw-r--r--drivers/scsi/a100u2w.c1239
-rw-r--r--drivers/scsi/a100u2w.h297
-rw-r--r--drivers/scsi/a4000t.c144
-rw-r--r--drivers/scsi/aacraid/aachba.c462
-rw-r--r--drivers/scsi/aacraid/aacraid.h54
-rw-r--r--drivers/scsi/aacraid/commsup.c226
-rw-r--r--drivers/scsi/aacraid/linit.c104
-rw-r--r--drivers/scsi/aacraid/rx.c33
-rw-r--r--drivers/scsi/advansys.c101
-rw-r--r--drivers/scsi/advansys.h36
-rw-r--r--drivers/scsi/aha152x.c50
-rw-r--r--drivers/scsi/aha1740.c48
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c51
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c59
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx_old.c57
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c16
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c20
-rw-r--r--drivers/scsi/amiga7xx.c138
-rw-r--r--drivers/scsi/amiga7xx.h23
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c18
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c559
-rw-r--r--drivers/scsi/arm/cumana_1.c207
-rw-r--r--drivers/scsi/arm/ecoscsi.c152
-rw-r--r--drivers/scsi/arm/oak.c74
-rw-r--r--drivers/scsi/bvme6000.c76
-rw-r--r--drivers/scsi/bvme6000.h24
-rw-r--r--drivers/scsi/bvme6000_scsi.c136
-rw-r--r--drivers/scsi/dc395x.c163
-rw-r--r--drivers/scsi/dpt_i2o.c33
-rw-r--r--drivers/scsi/eata.c48
-rw-r--r--drivers/scsi/esp_scsi.c30
-rw-r--r--drivers/scsi/esp_scsi.h4
-rw-r--r--drivers/scsi/fdomain.c70
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/hptiop.c76
-rw-r--r--drivers/scsi/ibmmca.c1267
-rw-r--r--drivers/scsi/ibmmca.h21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c20
-rw-r--r--drivers/scsi/imm.c3
-rw-r--r--drivers/scsi/initio.c3819
-rw-r--r--drivers/scsi/initio.h313
-rw-r--r--drivers/scsi/ipr.c184
-rw-r--r--drivers/scsi/ips.c413
-rw-r--r--drivers/scsi/ips.h44
-rw-r--r--drivers/scsi/iscsi_tcp.c606
-rw-r--r--drivers/scsi/iscsi_tcp.h9
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/lasi700.c3
-rw-r--r--drivers/scsi/libiscsi.c650
-rw-r--r--drivers/scsi/libsas/Kconfig7
-rw-r--r--drivers/scsi/libsas/Makefile1
-rw-r--r--drivers/scsi/libsas/sas_ata.c817
-rw-r--r--drivers/scsi/libsas/sas_discover.c402
-rw-r--r--drivers/scsi/libsas/sas_expander.c227
-rw-r--r--drivers/scsi/libsas/sas_init.c3
-rw-r--r--drivers/scsi/libsas/sas_internal.h3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c87
-rw-r--r--drivers/scsi/lpfc/Makefile5
-rw-r--r--drivers/scsi/lpfc/lpfc.h358
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c774
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h182
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c971
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c508
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3377
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2262
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h558
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c956
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c306
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1325
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c557
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2047
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c523
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h113
-rw-r--r--drivers/scsi/mac53c94.c62
-rw-r--r--drivers/scsi/megaraid.c141
-rw-r--r--drivers/scsi/megaraid/mega_common.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c185
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c116
-rw-r--r--drivers/scsi/mesh.c46
-rw-r--r--drivers/scsi/mvme16x.c78
-rw-r--r--drivers/scsi/mvme16x.h24
-rw-r--r--drivers/scsi/mvme16x_scsi.c159
-rw-r--r--drivers/scsi/ncr53c8xx.c70
-rw-r--r--drivers/scsi/nsp32.c204
-rw-r--r--drivers/scsi/pcmcia/Kconfig7
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c3
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c3
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c3
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c45
-rw-r--r--drivers/scsi/ppa.c60
-rw-r--r--drivers/scsi/ps3rom.c533
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c253
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c1118
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h48
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h105
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h127
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c252
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c454
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c497
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c619
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c174
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h78
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h426
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c105
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c101
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c114
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c274
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c98
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicfas408.c30
-rw-r--r--drivers/scsi/scsi.c50
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c29
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_lib_dma.c50
-rw-r--r--drivers/scsi/scsi_scan.c67
-rw-r--r--drivers/scsi/scsi_sysctl.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c41
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c833
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c138
-rw-r--r--drivers/scsi/scsi_transport_sas.c125
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/seagate.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sim710.c6
-rw-r--r--drivers/scsi/sni_53c710.c10
-rw-r--r--drivers/scsi/sr.c8
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/stex.c111
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/scsi/sym53c416.c44
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c83
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/tmscsim.c88
-rw-r--r--drivers/scsi/tmscsim.h10
-rw-r--r--drivers/scsi/u14-34f.c60
-rw-r--r--drivers/scsi/ultrastor.c19
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/scsi/wd7000.c20
-rw-r--r--drivers/scsi/zorro7xx.c181
-rw-r--r--drivers/serial/68360serial.c7
-rw-r--r--drivers/serial/8250.c28
-rw-r--r--drivers/serial/8250_early.c117
-rw-r--r--drivers/serial/8250_hp300.c1
-rw-r--r--drivers/serial/Kconfig120
-rw-r--r--drivers/serial/Makefile2
-rw-r--r--drivers/serial/amba-pl011.c3
-rw-r--r--drivers/serial/atmel_serial.c32
-rw-r--r--drivers/serial/bfin_5xx.c182
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/imx.c2
-rw-r--r--drivers/serial/ip22zilog.c3
-rw-r--r--drivers/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/serial/of_serial.c33
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/sb1250-duart.c972
-rw-r--r--drivers/serial/serial_core.c6
-rw-r--r--drivers/serial/sh-sci.c4
-rw-r--r--drivers/serial/sh-sci.h50
-rw-r--r--drivers/serial/sn_console.c4
-rw-r--r--drivers/serial/suncore.c123
-rw-r--r--drivers/serial/suncore.h2
-rw-r--r--drivers/serial/sunhv.c55
-rw-r--r--drivers/serial/sunsab.c41
-rw-r--r--drivers/serial/sunsu.c37
-rw-r--r--drivers/serial/sunzilog.c41
-rw-r--r--drivers/serial/vr41xx_siu.c143
-rw-r--r--drivers/serial/zs.c1287
-rw-r--r--drivers/serial/zs.h284
-rw-r--r--drivers/sh/superhyway/superhyway.c3
-rw-r--r--drivers/sn/ioc3.c3
-rw-r--r--drivers/spi/Kconfig45
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/at25.c7
-rw-r--r--drivers/spi/atmel_spi.c185
-rw-r--r--drivers/spi/au1550_spi.c9
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c9
-rw-r--r--drivers/spi/omap2_mcspi.c1081
-rw-r--r--drivers/spi/omap_uwire.c9
-rw-r--r--drivers/spi/pxa2xx_spi.c9
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spi/spi_bitbang.c8
-rw-r--r--drivers/spi/spi_imx.c24
-rw-r--r--drivers/spi/spi_lm70llp.c361
-rw-r--r--drivers/spi/spi_mpc83xx.c47
-rw-r--r--drivers/spi/spi_s3c24xx.c8
-rw-r--r--drivers/spi/spi_txx9.c474
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--drivers/spi/tle62x0.c328
-rw-r--r--drivers/spi/xilinx_spi.c434
-rw-r--r--drivers/tc/Makefile1
-rw-r--r--drivers/tc/zs.c2203
-rw-r--r--drivers/tc/zs.h404
-rw-r--r--drivers/telephony/Kconfig13
-rw-r--r--drivers/telephony/ixj.c7
-rw-r--r--drivers/telephony/ixj_pcmcia.c3
-rw-r--r--drivers/uio/Kconfig29
-rw-r--r--drivers/uio/Makefile2
-rw-r--r--drivers/uio/uio.c701
-rw-r--r--drivers/uio/uio_cif.c156
-rw-r--r--drivers/usb/Kconfig10
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/cxacru.c76
-rw-r--r--drivers/usb/atm/speedtch.c7
-rw-r--r--drivers/usb/atm/ueagle-atm.c7
-rw-r--r--drivers/usb/atm/usbatm.c11
-rw-r--r--drivers/usb/class/cdc-acm.c21
-rw-r--r--drivers/usb/class/usblp.c621
-rw-r--r--drivers/usb/core/Kconfig25
-rw-r--r--drivers/usb/core/config.c42
-rw-r--r--drivers/usb/core/devices.c26
-rw-r--r--drivers/usb/core/driver.c155
-rw-r--r--drivers/usb/core/file.c29
-rw-r--r--drivers/usb/core/generic.c29
-rw-r--r--drivers/usb/core/hcd-pci.c3
-rw-r--r--drivers/usb/core/hcd.c247
-rw-r--r--drivers/usb/core/hcd.h14
-rw-r--r--drivers/usb/core/hub.c661
-rw-r--r--drivers/usb/core/message.c72
-rw-r--r--drivers/usb/core/quirks.c18
-rw-r--r--drivers/usb/core/sysfs.c162
-rw-r--r--drivers/usb/core/urb.c193
-rw-r--r--drivers/usb/core/usb.c12
-rw-r--r--drivers/usb/core/usb.h14
-rw-r--r--drivers/usb/gadget/Kconfig74
-rw-r--r--drivers/usb/gadget/Makefile7
-rw-r--r--drivers/usb/gadget/amd5536udc.c3454
-rw-r--r--drivers/usb/gadget/amd5536udc.h626
-rw-r--r--drivers/usb/gadget/at91_udc.c21
-rw-r--r--drivers/usb/gadget/dummy_hcd.c39
-rw-r--r--drivers/usb/gadget/ether.c12
-rw-r--r--drivers/usb/gadget/file_storage.c25
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.c99
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/gadget_chips.h35
-rw-r--r--drivers/usb/gadget/gmidi.c8
-rw-r--r--drivers/usb/gadget/goku_udc.c87
-rw-r--r--drivers/usb/gadget/goku_udc.h10
-rw-r--r--drivers/usb/gadget/inode.c8
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.c27
-rw-r--r--drivers/usb/gadget/m66592-udc.c1633
-rw-r--r--drivers/usb/gadget/m66592-udc.h575
-rw-r--r--drivers/usb/gadget/net2280.c99
-rw-r--r--drivers/usb/gadget/omap_udc.c108
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c473
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h48
-rw-r--r--drivers/usb/gadget/rndis.c164
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2045
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h110
-rw-r--r--drivers/usb/gadget/serial.c42
-rw-r--r--drivers/usb/gadget/zero.c9
-rw-r--r--drivers/usb/host/Kconfig28
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-dbg.c183
-rw-r--r--drivers/usb/host/ehci-fsl.c10
-rw-r--r--drivers/usb/host/ehci-hcd.c118
-rw-r--r--drivers/usb/host/ehci-hub.c122
-rw-r--r--drivers/usb/host/ehci-mem.c27
-rw-r--r--drivers/usb/host/ehci-pci.c10
-rw-r--r--drivers/usb/host/ehci-ppc-soc.c182
-rw-r--r--drivers/usb/host/ehci-ps3.c86
-rw-r--r--drivers/usb/host/ehci-q.c96
-rw-r--r--drivers/usb/host/ehci-sched.c339
-rw-r--r--drivers/usb/host/ehci.h234
-rw-r--r--drivers/usb/host/isp116x-hcd.c187
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c95
-rw-r--r--drivers/usb/host/ohci-hub.c5
-rw-r--r--drivers/usb/host/ohci-mem.c1
-rw-r--r--drivers/usb/host/ohci-pci.c57
-rw-r--r--drivers/usb/host/ohci-pnx4008.c2
-rw-r--r--drivers/usb/host/ohci-ps3.c87
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2242
-rw-r--r--drivers/usb/host/r8a66597.h631
-rw-r--r--drivers/usb/host/sl811_cs.c3
-rw-r--r--drivers/usb/host/u132-hcd.c17
-rw-r--r--drivers/usb/host/uhci-hcd.c7
-rw-r--r--drivers/usb/host/uhci-q.c59
-rw-r--r--drivers/usb/image/mdc800.c45
-rw-r--r--drivers/usb/image/microtek.c19
-rw-r--r--drivers/usb/misc/adutux.c86
-rw-r--r--drivers/usb/misc/appledisplay.c13
-rw-r--r--drivers/usb/misc/auerswald.c35
-rw-r--r--drivers/usb/misc/berry_charge.c35
-rw-r--r--drivers/usb/misc/ftdi-elan.c21
-rw-r--r--drivers/usb/misc/idmouse.c54
-rw-r--r--drivers/usb/misc/iowarrior.c47
-rw-r--r--drivers/usb/misc/ldusb.c53
-rw-r--r--drivers/usb/misc/legousbtower.c52
-rw-r--r--drivers/usb/misc/phidgetkit.c13
-rw-r--r--drivers/usb/misc/phidgetmotorcontrol.c13
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c38
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c25
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h2
-rw-r--r--drivers/usb/misc/usblcd.c78
-rw-r--r--drivers/usb/misc/usbtest.c4
-rw-r--r--drivers/usb/misc/uss720.c5
-rw-r--r--drivers/usb/mon/mon_bin.c34
-rw-r--r--drivers/usb/mon/mon_main.c14
-rw-r--r--drivers/usb/mon/mon_text.c31
-rw-r--r--drivers/usb/mon/usb_mon.h7
-rw-r--r--drivers/usb/serial/Kconfig10
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/aircable.c16
-rw-r--r--drivers/usb/serial/airprime.c10
-rw-r--r--drivers/usb/serial/ark3116.c61
-rw-r--r--drivers/usb/serial/belkin_sa.c78
-rw-r--r--drivers/usb/serial/cyberjack.c17
-rw-r--r--drivers/usb/serial/cypress_m8.c18
-rw-r--r--drivers/usb/serial/digi_acceleport.c99
-rw-r--r--drivers/usb/serial/empeg.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c119
-rw-r--r--drivers/usb/serial/garmin_gps.c47
-rw-r--r--drivers/usb/serial/generic.c97
-rw-r--r--drivers/usb/serial/io_edgeport.c40
-rw-r--r--drivers/usb/serial/io_fw_down3.h1460
-rw-r--r--drivers/usb/serial/io_ti.c134
-rw-r--r--drivers/usb/serial/io_usbvend.h12
-rw-r--r--drivers/usb/serial/ipaq.c14
-rw-r--r--drivers/usb/serial/ipw.c12
-rw-r--r--drivers/usb/serial/ir-usb.c154
-rw-r--r--drivers/usb/serial/keyspan.c456
-rw-r--r--drivers/usb/serial/keyspan.h74
-rw-r--r--drivers/usb/serial/keyspan_pda.c17
-rw-r--r--drivers/usb/serial/keyspan_usa67msg.h254
-rw-r--r--drivers/usb/serial/kl5kusb105.c22
-rw-r--r--drivers/usb/serial/kobil_sct.c26
-rw-r--r--drivers/usb/serial/mct_u232.c149
-rw-r--r--drivers/usb/serial/mct_u232.h15
-rw-r--r--drivers/usb/serial/mos7720.c48
-rw-r--r--drivers/usb/serial/mos7840.c125
-rw-r--r--drivers/usb/serial/navman.c7
-rw-r--r--drivers/usb/serial/omninet.c20
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/serial/oti6858.c1342
-rw-r--r--drivers/usb/serial/oti6858.h15
-rw-r--r--drivers/usb/serial/pl2303.c90
-rw-r--r--drivers/usb/serial/safe_serial.c6
-rw-r--r--drivers/usb/serial/sierra.c490
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c46
-rw-r--r--drivers/usb/serial/usb-serial.c35
-rw-r--r--drivers/usb/serial/visor.c64
-rw-r--r--drivers/usb/serial/whiteheat.c122
-rw-r--r--drivers/usb/storage/dpcm.c56
-rw-r--r--drivers/usb/storage/onetouch.c13
-rw-r--r--drivers/usb/storage/scsiglue.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h206
-rw-r--r--drivers/usb/storage/usb.c94
-rw-r--r--drivers/usb/storage/usb.h1
-rw-r--r--drivers/usb/usb-skeleton.c151
-rw-r--r--drivers/video/68328fb.c2
-rw-r--r--drivers/video/Kconfig30
-rw-r--r--drivers/video/Makefile4
-rw-r--r--drivers/video/amba-clcd.c3
-rw-r--r--drivers/video/atmel_lcdfb.c67
-rw-r--r--drivers/video/aty/ati_ids.h1
-rw-r--r--drivers/video/aty/aty128fb.c2
-rw-r--r--drivers/video/aty/atyfb_base.c11
-rw-r--r--drivers/video/aty/radeon_backlight.c4
-rw-r--r--drivers/video/aty/radeon_base.c12
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/au1200fb.c3
-rw-r--r--drivers/video/backlight/Kconfig26
-rw-r--r--drivers/video/backlight/backlight.c125
-rw-r--r--drivers/video/backlight/cr_bllcd.c4
-rw-r--r--drivers/video/backlight/lcd.c112
-rw-r--r--drivers/video/clps711xfb.c3
-rw-r--r--drivers/video/console/Kconfig18
-rw-r--r--drivers/video/console/fbcon.c366
-rw-r--r--drivers/video/console/vgacon.c6
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/cyber2000fb.c3
-rw-r--r--drivers/video/cyblafb.c21
-rw-r--r--drivers/video/epson1355fb.c21
-rw-r--r--drivers/video/fbmem.c299
-rw-r--r--drivers/video/fm2fb.c16
-rw-r--r--drivers/video/gbefb.c41
-rw-r--r--drivers/video/i810/i810.h2
-rw-r--r--drivers/video/igafb.c4
-rw-r--r--drivers/video/intelfb/intelfb.h2
-rw-r--r--drivers/video/kyro/STG4000InitDevice.c5
-rw-r--r--drivers/video/logo/Kconfig5
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/logo/logo.c7
-rw-r--r--drivers/video/logo/logo_spe_clut224.ppm283
-rw-r--r--drivers/video/macfb.c93
-rw-r--r--drivers/video/macmodes.c5
-rw-r--r--drivers/video/macmodes.h8
-rw-r--r--drivers/video/matrox/matroxfb_accel.c11
-rw-r--r--drivers/video/matrox/matroxfb_base.c8
-rw-r--r--drivers/video/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c6
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.h4
-rw-r--r--drivers/video/matrox/matroxfb_maven.c9
-rw-r--r--drivers/video/nvidia/nv_backlight.c2
-rw-r--r--drivers/video/nvidia/nv_hw.c62
-rw-r--r--drivers/video/nvidia/nv_setup.c12
-rw-r--r--drivers/video/nvidia/nv_type.h1
-rw-r--r--drivers/video/nvidia/nvidia.c9
-rw-r--r--drivers/video/offb.c2
-rw-r--r--drivers/video/omap/Kconfig58
-rw-r--r--drivers/video/omap/Makefile29
-rw-r--r--drivers/video/omap/blizzard.c1568
-rw-r--r--drivers/video/omap/dispc.c1502
-rw-r--r--drivers/video/omap/dispc.h43
-rw-r--r--drivers/video/omap/hwa742.c1077
-rw-r--r--drivers/video/omap/lcd_h3.c141
-rw-r--r--drivers/video/omap/lcd_h4.c117
-rw-r--r--drivers/video/omap/lcd_inn1510.c124
-rw-r--r--drivers/video/omap/lcd_inn1610.c150
-rw-r--r--drivers/video/omap/lcd_osk.c144
-rw-r--r--drivers/video/omap/lcd_palmte.c123
-rw-r--r--drivers/video/omap/lcd_palmtt.c127
-rw-r--r--drivers/video/omap/lcd_palmz71.c123
-rw-r--r--drivers/video/omap/lcd_sx1.c334
-rw-r--r--drivers/video/omap/lcdc.c893
-rw-r--r--drivers/video/omap/lcdc.h7
-rw-r--r--drivers/video/omap/omapfb_main.c1941
-rw-r--r--drivers/video/omap/rfbi.c588
-rw-r--r--drivers/video/omap/sossi.c686
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pm2fb.c202
-rw-r--r--drivers/video/pm3fb.c270
-rw-r--r--drivers/video/ps3fb.c293
-rw-r--r--drivers/video/pvr2fb.c111
-rw-r--r--drivers/video/q40fb.c2
-rw-r--r--drivers/video/riva/fbdev.c4
-rw-r--r--drivers/video/riva/riva_hw.c7
-rw-r--r--drivers/video/savage/savagefb_driver.c3
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sis/sis.h2
-rw-r--r--drivers/video/sis/sis_main.c8
-rw-r--r--drivers/video/sstfb.c2
-rw-r--r--drivers/video/tgafb.c5
-rw-r--r--drivers/video/tridentfb.c30
-rw-r--r--drivers/video/tx3912fb.c2
-rw-r--r--drivers/video/valkyriefb.c3
-rw-r--r--drivers/video/vt8623fb.c42
-rw-r--r--drivers/w1/Kconfig12
-rw-r--r--drivers/w1/masters/Kconfig7
-rw-r--r--drivers/w1/masters/matrox_w1.c3
-rw-r--r--drivers/w1/slaves/Kconfig4
-rw-r--r--drivers/w1/slaves/w1_ds2433.c14
-rw-r--r--drivers/w1/slaves/w1_ds2760.c9
-rw-r--r--drivers/w1/slaves/w1_therm.c8
-rw-r--r--drivers/w1/w1.c18
-rw-r--r--drivers/w1/w1_int.c3
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/grant-table.c582
-rw-r--r--drivers/xen/xenbus/Makefile7
-rw-r--r--drivers/xen/xenbus/xenbus_client.c569
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c233
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h46
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c935
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h74
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c861
-rw-r--r--drivers/zorro/zorro-sysfs.c6
1713 files changed, 145651 insertions, 65163 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7916f4b86d23..3e1c442deff9 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -8,6 +8,8 @@ source "drivers/connector/Kconfig"
source "drivers/mtd/Kconfig"
+source "drivers/of/Kconfig"
+
source "drivers/parport/Kconfig"
source "drivers/pnp/Kconfig"
@@ -84,4 +86,7 @@ source "drivers/auxdisplay/Kconfig"
source "drivers/kvm/Kconfig"
+source "drivers/uio/Kconfig"
+
+source "drivers/lguest/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 503d82569449..a9e4c5f922a0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_PNP) += pnp/
obj-$(CONFIG_ARM_AMBA) += amba/
+obj-$(CONFIG_XEN) += xen/
+
# char/ comes before serial/ etc so that the VT console is the boot-time
# default.
obj-y += char/
@@ -38,6 +40,7 @@ obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_FUSION) += message/
obj-$(CONFIG_FIREWIRE) += firewire/
obj-$(CONFIG_IEEE1394) += ieee1394/
+obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
obj-y += auxdisplay/
obj-$(CONFIG_MTD) += mtd/
@@ -70,6 +73,7 @@ obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_MCA) += mca/
obj-$(CONFIG_EISA) += eisa/
+obj-$(CONFIG_LGUEST_GUEST) += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_NEW_LEDS) += leds/
@@ -82,3 +86,4 @@ obj-$(CONFIG_GENERIC_TIME) += clocksource/
obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_HID) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
+obj-$(CONFIG_OF) += of/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 139f41f033d8..408b45168aba 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -2,16 +2,12 @@
# ACPI Configuration
#
-menu "ACPI (Advanced Configuration and Power Interface) Support"
+menuconfig ACPI
+ bool "ACPI Support (Advanced Configuration and Power Interface) Support"
depends on !X86_NUMAQ
depends on !X86_VISWS
depends on !IA64_HP_SIM
depends on IA64 || X86
- depends on PM
-
-config ACPI
- bool "ACPI Support"
- depends on IA64 || X86
depends on PCI
depends on PM
select PNP
@@ -49,7 +45,6 @@ if ACPI
config ACPI_SLEEP
bool "Sleep States"
depends on X86 && (!SMP || SUSPEND_SMP)
- depends on PM
default y
---help---
This option adds support for ACPI suspend states.
@@ -82,7 +77,6 @@ config ACPI_SLEEP_PROC_SLEEP
config ACPI_PROCFS
bool "Procfs interface (deprecated)"
- depends on ACPI
default y
---help---
The Procfs interface for ACPI is made optional for backward compatibility.
@@ -124,7 +118,7 @@ config ACPI_BUTTON
config ACPI_VIDEO
tristate "Video"
- depends on X86 && BACKLIGHT_CLASS_DEVICE
+ depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
help
This driver implement the ACPI Extensions For Display Adapters
for integrated graphics devices on motherboard, as specified in
@@ -280,6 +274,14 @@ config ACPI_DEBUG
of verbosity. Saying Y enables these statements. This will increase
your kernel size by around 50K.
+config ACPI_DEBUG_FUNC_TRACE
+ bool "Additionally enable ACPI function tracing"
+ default n
+ depends on ACPI_DEBUG
+ help
+ ACPI Debug Statements slow down ACPI processing. Function trace
+ is about half of the penalty and is rarely useful.
+
config ACPI_EC
bool
default y
@@ -330,7 +332,6 @@ config ACPI_CONTAINER
config ACPI_HOTPLUG_MEMORY
tristate "Memory Hotplug"
- depends on ACPI
depends on MEMORY_HOTPLUG
default n
help
@@ -359,5 +360,3 @@ config ACPI_SBS
to today's ACPI "Control Method" battery.
endif # ACPI
-
-endmenu
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index e64c76c8b726..cad932de383d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -43,21 +43,30 @@
#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_HID "PNP0C0A"
#define ACPI_BATTERY_DEVICE_NAME "Battery"
-#define ACPI_BATTERY_FILE_INFO "info"
-#define ACPI_BATTERY_FILE_STATUS "state"
-#define ACPI_BATTERY_FILE_ALARM "alarm"
#define ACPI_BATTERY_NOTIFY_STATUS 0x80
#define ACPI_BATTERY_NOTIFY_INFO 0x81
#define ACPI_BATTERY_UNITS_WATTS "mW"
#define ACPI_BATTERY_UNITS_AMPS "mA"
#define _COMPONENT ACPI_BATTERY_COMPONENT
+
+#define ACPI_BATTERY_UPDATE_TIME 0
+
+#define ACPI_BATTERY_NONE_UPDATE 0
+#define ACPI_BATTERY_EASY_UPDATE 1
+#define ACPI_BATTERY_INIT_UPDATE 2
+
ACPI_MODULE_NAME("battery");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
+static unsigned int update_time = ACPI_BATTERY_UPDATE_TIME;
+
+/* 0 - every time, > 0 - by update_time */
+module_param(update_time, uint, 0644);
+
extern struct proc_dir_entry *acpi_lock_battery_dir(void);
extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
@@ -76,7 +85,7 @@ static struct acpi_driver acpi_battery_driver = {
},
};
-struct acpi_battery_status {
+struct acpi_battery_state {
acpi_integer state;
acpi_integer present_rate;
acpi_integer remaining_capacity;
@@ -99,33 +108,111 @@ struct acpi_battery_info {
acpi_string oem_info;
};
-struct acpi_battery_flags {
- u8 present:1; /* Bay occupied? */
- u8 power_unit:1; /* 0=watts, 1=apms */
- u8 alarm:1; /* _BTP present? */
- u8 reserved:5;
+enum acpi_battery_files{
+ ACPI_BATTERY_INFO = 0,
+ ACPI_BATTERY_STATE,
+ ACPI_BATTERY_ALARM,
+ ACPI_BATTERY_NUMFILES,
};
-struct acpi_battery_trips {
- unsigned long warning;
- unsigned long low;
+struct acpi_battery_flags {
+ u8 battery_present_prev;
+ u8 alarm_present;
+ u8 init_update;
+ u8 update[ACPI_BATTERY_NUMFILES];
+ u8 power_unit;
};
struct acpi_battery {
- struct acpi_device * device;
+ struct mutex mutex;
+ struct acpi_device *device;
struct acpi_battery_flags flags;
- struct acpi_battery_trips trips;
+ struct acpi_buffer bif_data;
+ struct acpi_buffer bst_data;
unsigned long alarm;
- struct acpi_battery_info *info;
+ unsigned long update_time[ACPI_BATTERY_NUMFILES];
};
+inline int acpi_battery_present(struct acpi_battery *battery)
+{
+ return battery->device->status.battery_present;
+}
+inline char *acpi_battery_power_units(struct acpi_battery *battery)
+{
+ if (battery->flags.power_unit)
+ return ACPI_BATTERY_UNITS_AMPS;
+ else
+ return ACPI_BATTERY_UNITS_WATTS;
+}
+
+inline acpi_handle acpi_battery_handle(struct acpi_battery *battery)
+{
+ return battery->device->handle;
+}
+
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
-static int
-acpi_battery_get_info(struct acpi_battery *battery,
- struct acpi_battery_info **bif)
+static void acpi_battery_check_result(struct acpi_battery *battery, int result)
+{
+ if (!battery)
+ return;
+
+ if (result) {
+ battery->flags.init_update = 1;
+ }
+}
+
+static int acpi_battery_extract_package(struct acpi_battery *battery,
+ union acpi_object *package,
+ struct acpi_buffer *format,
+ struct acpi_buffer *data,
+ char *package_name)
+{
+ acpi_status status = AE_OK;
+ struct acpi_buffer data_null = { 0, NULL };
+
+ status = acpi_extract_package(package, format, &data_null);
+ if (status != AE_BUFFER_OVERFLOW) {
+ ACPI_EXCEPTION((AE_INFO, status, "Extracting size %s",
+ package_name));
+ return -ENODEV;
+ }
+
+ if (data_null.length != data->length) {
+ kfree(data->pointer);
+ data->pointer = kzalloc(data_null.length, GFP_KERNEL);
+ if (!data->pointer) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "kzalloc()"));
+ return -ENOMEM;
+ }
+ data->length = data_null.length;
+ }
+
+ status = acpi_extract_package(package, format, data);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Extracting %s",
+ package_name));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int acpi_battery_get_status(struct acpi_battery *battery)
+{
+ int result = 0;
+
+ result = acpi_bus_get_status(battery->device);
+ if (result) {
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA"));
+ return -ENODEV;
+ }
+ return result;
+}
+
+static int acpi_battery_get_info(struct acpi_battery *battery)
{
int result = 0;
acpi_status status = 0;
@@ -133,16 +220,20 @@ acpi_battery_get_info(struct acpi_battery *battery,
struct acpi_buffer format = { sizeof(ACPI_BATTERY_FORMAT_BIF),
ACPI_BATTERY_FORMAT_BIF
};
- struct acpi_buffer data = { 0, NULL };
union acpi_object *package = NULL;
+ struct acpi_buffer *data = NULL;
+ struct acpi_battery_info *bif = NULL;
+ battery->update_time[ACPI_BATTERY_INFO] = get_seconds();
- if (!battery || !bif)
- return -EINVAL;
+ if (!acpi_battery_present(battery))
+ return 0;
- /* Evalute _BIF */
+ /* Evaluate _BIF */
- status = acpi_evaluate_object(battery->device->handle, "_BIF", NULL, &buffer);
+ status =
+ acpi_evaluate_object(acpi_battery_handle(battery), "_BIF", NULL,
+ &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BIF"));
return -ENODEV;
@@ -150,41 +241,29 @@ acpi_battery_get_info(struct acpi_battery *battery,
package = buffer.pointer;
- /* Extract Package Data */
-
- status = acpi_extract_package(package, &format, &data);
- if (status != AE_BUFFER_OVERFLOW) {
- ACPI_EXCEPTION((AE_INFO, status, "Extracting _BIF"));
- result = -ENODEV;
- goto end;
- }
+ data = &battery->bif_data;
- data.pointer = kzalloc(data.length, GFP_KERNEL);
- if (!data.pointer) {
- result = -ENOMEM;
- goto end;
- }
+ /* Extract Package Data */
- status = acpi_extract_package(package, &format, &data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Extracting _BIF"));
- kfree(data.pointer);
- result = -ENODEV;
+ result =
+ acpi_battery_extract_package(battery, package, &format, data,
+ "_BIF");
+ if (result)
goto end;
- }
end:
+
kfree(buffer.pointer);
- if (!result)
- (*bif) = data.pointer;
+ if (!result) {
+ bif = data->pointer;
+ battery->flags.power_unit = bif->power_unit;
+ }
return result;
}
-static int
-acpi_battery_get_status(struct acpi_battery *battery,
- struct acpi_battery_status **bst)
+static int acpi_battery_get_state(struct acpi_battery *battery)
{
int result = 0;
acpi_status status = 0;
@@ -192,16 +271,19 @@ acpi_battery_get_status(struct acpi_battery *battery,
struct acpi_buffer format = { sizeof(ACPI_BATTERY_FORMAT_BST),
ACPI_BATTERY_FORMAT_BST
};
- struct acpi_buffer data = { 0, NULL };
union acpi_object *package = NULL;
+ struct acpi_buffer *data = NULL;
+ battery->update_time[ACPI_BATTERY_STATE] = get_seconds();
- if (!battery || !bst)
- return -EINVAL;
+ if (!acpi_battery_present(battery))
+ return 0;
- /* Evalute _BST */
+ /* Evaluate _BST */
- status = acpi_evaluate_object(battery->device->handle, "_BST", NULL, &buffer);
+ status =
+ acpi_evaluate_object(acpi_battery_handle(battery), "_BST", NULL,
+ &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
return -ENODEV;
@@ -209,55 +291,49 @@ acpi_battery_get_status(struct acpi_battery *battery,
package = buffer.pointer;
- /* Extract Package Data */
-
- status = acpi_extract_package(package, &format, &data);
- if (status != AE_BUFFER_OVERFLOW) {
- ACPI_EXCEPTION((AE_INFO, status, "Extracting _BST"));
- result = -ENODEV;
- goto end;
- }
+ data = &battery->bst_data;
- data.pointer = kzalloc(data.length, GFP_KERNEL);
- if (!data.pointer) {
- result = -ENOMEM;
- goto end;
- }
+ /* Extract Package Data */
- status = acpi_extract_package(package, &format, &data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Extracting _BST"));
- kfree(data.pointer);
- result = -ENODEV;
+ result =
+ acpi_battery_extract_package(battery, package, &format, data,
+ "_BST");
+ if (result)
goto end;
- }
end:
kfree(buffer.pointer);
- if (!result)
- (*bst) = data.pointer;
-
return result;
}
-static int
-acpi_battery_set_alarm(struct acpi_battery *battery, unsigned long alarm)
+static int acpi_battery_get_alarm(struct acpi_battery *battery)
+{
+ battery->update_time[ACPI_BATTERY_ALARM] = get_seconds();
+
+ return 0;
+}
+
+static int acpi_battery_set_alarm(struct acpi_battery *battery,
+ unsigned long alarm)
{
acpi_status status = 0;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list arg_list = { 1, &arg0 };
+ battery->update_time[ACPI_BATTERY_ALARM] = get_seconds();
- if (!battery)
- return -EINVAL;
+ if (!acpi_battery_present(battery))
+ return -ENODEV;
- if (!battery->flags.alarm)
+ if (!battery->flags.alarm_present)
return -ENODEV;
arg0.integer.value = alarm;
- status = acpi_evaluate_object(battery->device->handle, "_BTP", &arg_list, NULL);
+ status =
+ acpi_evaluate_object(acpi_battery_handle(battery), "_BTP",
+ &arg_list, NULL);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -268,65 +344,114 @@ acpi_battery_set_alarm(struct acpi_battery *battery, unsigned long alarm)
return 0;
}
-static int acpi_battery_check(struct acpi_battery *battery)
+static int acpi_battery_init_alarm(struct acpi_battery *battery)
{
int result = 0;
acpi_status status = AE_OK;
acpi_handle handle = NULL;
- struct acpi_device *device = NULL;
- struct acpi_battery_info *bif = NULL;
+ struct acpi_battery_info *bif = battery->bif_data.pointer;
+ unsigned long alarm = battery->alarm;
+ /* See if alarms are supported, and if so, set default */
- if (!battery)
- return -EINVAL;
-
- device = battery->device;
+ status = acpi_get_handle(acpi_battery_handle(battery), "_BTP", &handle);
+ if (ACPI_SUCCESS(status)) {
+ battery->flags.alarm_present = 1;
+ if (!alarm && bif) {
+ alarm = bif->design_capacity_warning;
+ }
+ result = acpi_battery_set_alarm(battery, alarm);
+ if (result)
+ goto end;
+ } else {
+ battery->flags.alarm_present = 0;
+ }
- result = acpi_bus_get_status(device);
- if (result)
- return result;
+ end:
- /* Insertion? */
+ return result;
+}
- if (!battery->flags.present && device->status.battery_present) {
+static int acpi_battery_init_update(struct acpi_battery *battery)
+{
+ int result = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Battery inserted\n"));
+ result = acpi_battery_get_status(battery);
+ if (result)
+ return result;
- /* Evalute _BIF to get certain static information */
+ battery->flags.battery_present_prev = acpi_battery_present(battery);
- result = acpi_battery_get_info(battery, &bif);
+ if (acpi_battery_present(battery)) {
+ result = acpi_battery_get_info(battery);
+ if (result)
+ return result;
+ result = acpi_battery_get_state(battery);
if (result)
return result;
- battery->flags.power_unit = bif->power_unit;
- battery->trips.warning = bif->design_capacity_warning;
- battery->trips.low = bif->design_capacity_low;
- kfree(bif);
+ acpi_battery_init_alarm(battery);
+ }
+
+ return result;
+}
- /* See if alarms are supported, and if so, set default */
+static int acpi_battery_update(struct acpi_battery *battery,
+ int update, int *update_result_ptr)
+{
+ int result = 0;
+ int update_result = ACPI_BATTERY_NONE_UPDATE;
+
+ if (!acpi_battery_present(battery)) {
+ update = 1;
+ }
- status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
- if (ACPI_SUCCESS(status)) {
- battery->flags.alarm = 1;
- acpi_battery_set_alarm(battery, battery->trips.warning);
+ if (battery->flags.init_update) {
+ result = acpi_battery_init_update(battery);
+ if (result)
+ goto end;
+ update_result = ACPI_BATTERY_INIT_UPDATE;
+ } else if (update) {
+ result = acpi_battery_get_status(battery);
+ if (result)
+ goto end;
+ if ((!battery->flags.battery_present_prev & acpi_battery_present(battery))
+ || (battery->flags.battery_present_prev & !acpi_battery_present(battery))) {
+ result = acpi_battery_init_update(battery);
+ if (result)
+ goto end;
+ update_result = ACPI_BATTERY_INIT_UPDATE;
+ } else {
+ update_result = ACPI_BATTERY_EASY_UPDATE;
}
}
- /* Removal? */
+ end:
- else if (battery->flags.present && !device->status.battery_present) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Battery removed\n"));
- }
+ battery->flags.init_update = (result != 0);
- battery->flags.present = device->status.battery_present;
+ *update_result_ptr = update_result;
return result;
}
-static void acpi_battery_check_present(struct acpi_battery *battery)
+static void acpi_battery_notify_update(struct acpi_battery *battery)
{
- if (!battery->flags.present) {
- acpi_battery_check(battery);
+ acpi_battery_get_status(battery);
+
+ if (battery->flags.init_update) {
+ return;
+ }
+
+ if ((!battery->flags.battery_present_prev &
+ acpi_battery_present(battery)) ||
+ (battery->flags.battery_present_prev &
+ !acpi_battery_present(battery))) {
+ battery->flags.init_update = 1;
+ } else {
+ battery->flags.update[ACPI_BATTERY_INFO] = 1;
+ battery->flags.update[ACPI_BATTERY_STATE] = 1;
+ battery->flags.update[ACPI_BATTERY_ALARM] = 1;
}
}
@@ -335,37 +460,33 @@ static void acpi_battery_check_present(struct acpi_battery *battery)
-------------------------------------------------------------------------- */
static struct proc_dir_entry *acpi_battery_dir;
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
+
+static int acpi_battery_print_info(struct seq_file *seq, int result)
{
- int result = 0;
struct acpi_battery *battery = seq->private;
struct acpi_battery_info *bif = NULL;
char *units = "?";
-
- if (!battery)
+ if (result)
goto end;
- acpi_battery_check_present(battery);
-
- if (battery->flags.present)
+ if (acpi_battery_present(battery))
seq_printf(seq, "present: yes\n");
else {
seq_printf(seq, "present: no\n");
goto end;
}
- /* Battery Info (_BIF) */
-
- result = acpi_battery_get_info(battery, &bif);
- if (result || !bif) {
- seq_printf(seq, "ERROR: Unable to read battery information\n");
+ bif = battery->bif_data.pointer;
+ if (!bif) {
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, "BIF buffer is NULL"));
+ result = -ENODEV;
goto end;
}
- units =
- bif->
- power_unit ? ACPI_BATTERY_UNITS_AMPS : ACPI_BATTERY_UNITS_WATTS;
+ /* Battery Units */
+
+ units = acpi_battery_power_units(battery);
if (bif->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design capacity: unknown\n");
@@ -396,7 +517,6 @@ static int acpi_battery_read_info(struct seq_file *seq, void *offset)
else
seq_printf(seq, "design voltage: %d mV\n",
(u32) bif->design_voltage);
-
seq_printf(seq, "design capacity warning: %d %sh\n",
(u32) bif->design_capacity_warning, units);
seq_printf(seq, "design capacity low: %d %sh\n",
@@ -411,50 +531,40 @@ static int acpi_battery_read_info(struct seq_file *seq, void *offset)
seq_printf(seq, "OEM info: %s\n", bif->oem_info);
end:
- kfree(bif);
- return 0;
-}
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery info\n");
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_info, PDE(inode)->data);
+ return result;
}
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
+static int acpi_battery_print_state(struct seq_file *seq, int result)
{
- int result = 0;
struct acpi_battery *battery = seq->private;
- struct acpi_battery_status *bst = NULL;
+ struct acpi_battery_state *bst = NULL;
char *units = "?";
-
- if (!battery)
+ if (result)
goto end;
- acpi_battery_check_present(battery);
-
- if (battery->flags.present)
+ if (acpi_battery_present(battery))
seq_printf(seq, "present: yes\n");
else {
seq_printf(seq, "present: no\n");
goto end;
}
- /* Battery Units */
-
- units =
- battery->flags.
- power_unit ? ACPI_BATTERY_UNITS_AMPS : ACPI_BATTERY_UNITS_WATTS;
-
- /* Battery Status (_BST) */
-
- result = acpi_battery_get_status(battery, &bst);
- if (result || !bst) {
- seq_printf(seq, "ERROR: Unable to read battery status\n");
+ bst = battery->bst_data.pointer;
+ if (!bst) {
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, "BST buffer is NULL"));
+ result = -ENODEV;
goto end;
}
+ /* Battery Units */
+
+ units = acpi_battery_power_units(battery);
+
if (!(bst->state & 0x04))
seq_printf(seq, "capacity state: ok\n");
else
@@ -490,48 +600,43 @@ static int acpi_battery_read_state(struct seq_file *seq, void *offset)
(u32) bst->present_voltage);
end:
- kfree(bst);
- return 0;
-}
+ if (result) {
+ seq_printf(seq, "ERROR: Unable to read battery state\n");
+ }
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
- return single_open(file, acpi_battery_read_state, PDE(inode)->data);
+ return result;
}
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
+static int acpi_battery_print_alarm(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
char *units = "?";
-
- if (!battery)
+ if (result)
goto end;
- acpi_battery_check_present(battery);
-
- if (!battery->flags.present) {
+ if (!acpi_battery_present(battery)) {
seq_printf(seq, "present: no\n");
goto end;
}
/* Battery Units */
- units =
- battery->flags.
- power_unit ? ACPI_BATTERY_UNITS_AMPS : ACPI_BATTERY_UNITS_WATTS;
-
- /* Battery Alarm */
+ units = acpi_battery_power_units(battery);
seq_printf(seq, "alarm: ");
if (!battery->alarm)
seq_printf(seq, "unsupported\n");
else
- seq_printf(seq, "%d %sh\n", (u32) battery->alarm, units);
+ seq_printf(seq, "%lu %sh\n", battery->alarm, units);
end:
- return 0;
+
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery alarm\n");
+
+ return result;
}
static ssize_t
@@ -543,27 +648,113 @@ acpi_battery_write_alarm(struct file *file,
char alarm_string[12] = { '\0' };
struct seq_file *m = file->private_data;
struct acpi_battery *battery = m->private;
-
+ int update_result = ACPI_BATTERY_NONE_UPDATE;
if (!battery || (count > sizeof(alarm_string) - 1))
return -EINVAL;
- acpi_battery_check_present(battery);
+ mutex_lock(&battery->mutex);
- if (!battery->flags.present)
- return -ENODEV;
+ result = acpi_battery_update(battery, 1, &update_result);
+ if (result) {
+ result = -ENODEV;
+ goto end;
+ }
+
+ if (!acpi_battery_present(battery)) {
+ result = -ENODEV;
+ goto end;
+ }
- if (copy_from_user(alarm_string, buffer, count))
- return -EFAULT;
+ if (copy_from_user(alarm_string, buffer, count)) {
+ result = -EFAULT;
+ goto end;
+ }
alarm_string[count] = '\0';
result = acpi_battery_set_alarm(battery,
simple_strtoul(alarm_string, NULL, 0));
if (result)
- return result;
+ goto end;
+
+ end:
- return count;
+ acpi_battery_check_result(battery, result);
+
+ if (!result)
+ result = count;
+
+ mutex_unlock(&battery->mutex);
+
+ return result;
+}
+
+typedef int(*print_func)(struct seq_file *seq, int result);
+typedef int(*get_func)(struct acpi_battery *battery);
+
+static struct acpi_read_mux {
+ print_func print;
+ get_func get;
+} acpi_read_funcs[ACPI_BATTERY_NUMFILES] = {
+ {.get = acpi_battery_get_info, .print = acpi_battery_print_info},
+ {.get = acpi_battery_get_state, .print = acpi_battery_print_state},
+ {.get = acpi_battery_get_alarm, .print = acpi_battery_print_alarm},
+};
+
+static int acpi_battery_read(int fid, struct seq_file *seq)
+{
+ struct acpi_battery *battery = seq->private;
+ int result = 0;
+ int update_result = ACPI_BATTERY_NONE_UPDATE;
+ int update = 0;
+
+ mutex_lock(&battery->mutex);
+
+ update = (get_seconds() - battery->update_time[fid] >= update_time);
+ update = (update | battery->flags.update[fid]);
+
+ result = acpi_battery_update(battery, update, &update_result);
+ if (result)
+ goto end;
+
+ if (update_result == ACPI_BATTERY_EASY_UPDATE) {
+ result = acpi_read_funcs[fid].get(battery);
+ if (result)
+ goto end;
+ }
+
+ end:
+ result = acpi_read_funcs[fid].print(seq, result);
+ acpi_battery_check_result(battery, result);
+ battery->flags.update[fid] = result;
+ mutex_unlock(&battery->mutex);
+ return result;
+}
+
+static int acpi_battery_read_info(struct seq_file *seq, void *offset)
+{
+ return acpi_battery_read(ACPI_BATTERY_INFO, seq);
+}
+
+static int acpi_battery_read_state(struct seq_file *seq, void *offset)
+{
+ return acpi_battery_read(ACPI_BATTERY_STATE, seq);
+}
+
+static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
+{
+ return acpi_battery_read(ACPI_BATTERY_ALARM, seq);
+}
+
+static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_battery_read_info, PDE(inode)->data);
+}
+
+static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, acpi_battery_read_state, PDE(inode)->data);
}
static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
@@ -571,35 +762,51 @@ static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
return single_open(file, acpi_battery_read_alarm, PDE(inode)->data);
}
-static const struct file_operations acpi_battery_info_ops = {
+static struct battery_file {
+ struct file_operations ops;
+ mode_t mode;
+ char *name;
+} acpi_battery_file[] = {
+ {
+ .name = "info",
+ .mode = S_IRUGO,
+ .ops = {
.open = acpi_battery_info_open_fs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_ops = {
+ },
+ },
+ {
+ .name = "state",
+ .mode = S_IRUGO,
+ .ops = {
.open = acpi_battery_state_open_fs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_ops = {
+ },
+ },
+ {
+ .name = "alarm",
+ .mode = S_IFREG | S_IRUGO | S_IWUSR,
+ .ops = {
.open = acpi_battery_alarm_open_fs,
.read = seq_read,
.write = acpi_battery_write_alarm,
.llseek = seq_lseek,
.release = single_release,
.owner = THIS_MODULE,
+ },
+ },
};
static int acpi_battery_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
-
+ int i;
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
@@ -609,38 +816,16 @@ static int acpi_battery_add_fs(struct acpi_device *device)
acpi_device_dir(device)->owner = THIS_MODULE;
}
- /* 'info' [R] */
- entry = create_proc_entry(ACPI_BATTERY_FILE_INFO,
- S_IRUGO, acpi_device_dir(device));
- if (!entry)
- return -ENODEV;
- else {
- entry->proc_fops = &acpi_battery_info_ops;
- entry->data = acpi_driver_data(device);
- entry->owner = THIS_MODULE;
- }
-
- /* 'status' [R] */
- entry = create_proc_entry(ACPI_BATTERY_FILE_STATUS,
- S_IRUGO, acpi_device_dir(device));
- if (!entry)
- return -ENODEV;
- else {
- entry->proc_fops = &acpi_battery_state_ops;
- entry->data = acpi_driver_data(device);
- entry->owner = THIS_MODULE;
- }
-
- /* 'alarm' [R/W] */
- entry = create_proc_entry(ACPI_BATTERY_FILE_ALARM,
- S_IFREG | S_IRUGO | S_IWUSR,
- acpi_device_dir(device));
- if (!entry)
- return -ENODEV;
- else {
- entry->proc_fops = &acpi_battery_alarm_ops;
- entry->data = acpi_driver_data(device);
- entry->owner = THIS_MODULE;
+ for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+ entry = create_proc_entry(acpi_battery_file[i].name,
+ acpi_battery_file[i].mode, acpi_device_dir(device));
+ if (!entry)
+ return -ENODEV;
+ else {
+ entry->proc_fops = &acpi_battery_file[i].ops;
+ entry->data = acpi_driver_data(device);
+ entry->owner = THIS_MODULE;
+ }
}
return 0;
@@ -648,15 +833,12 @@ static int acpi_battery_add_fs(struct acpi_device *device)
static int acpi_battery_remove_fs(struct acpi_device *device)
{
-
+ int i;
if (acpi_device_dir(device)) {
- remove_proc_entry(ACPI_BATTERY_FILE_ALARM,
+ for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+ remove_proc_entry(acpi_battery_file[i].name,
acpi_device_dir(device));
- remove_proc_entry(ACPI_BATTERY_FILE_STATUS,
- acpi_device_dir(device));
- remove_proc_entry(ACPI_BATTERY_FILE_INFO,
- acpi_device_dir(device));
-
+ }
remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
acpi_device_dir(device) = NULL;
}
@@ -673,7 +855,6 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
struct acpi_battery *battery = data;
struct acpi_device *device = NULL;
-
if (!battery)
return;
@@ -684,8 +865,10 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
case ACPI_BATTERY_NOTIFY_INFO:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- acpi_battery_check(battery);
- acpi_bus_generate_event(device, event, battery->flags.present);
+ device = battery->device;
+ acpi_battery_notify_update(battery);
+ acpi_bus_generate_event(device, event,
+ acpi_battery_present(battery));
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -702,7 +885,6 @@ static int acpi_battery_add(struct acpi_device *device)
acpi_status status = 0;
struct acpi_battery *battery = NULL;
-
if (!device)
return -EINVAL;
@@ -710,15 +892,21 @@ static int acpi_battery_add(struct acpi_device *device)
if (!battery)
return -ENOMEM;
+ mutex_init(&battery->mutex);
+
+ mutex_lock(&battery->mutex);
+
battery->device = device;
strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
acpi_driver_data(device) = battery;
- result = acpi_battery_check(battery);
+ result = acpi_battery_get_status(battery);
if (result)
goto end;
+ battery->flags.init_update = 1;
+
result = acpi_battery_add_fs(device);
if (result)
goto end;
@@ -727,6 +915,7 @@ static int acpi_battery_add(struct acpi_device *device)
ACPI_ALL_NOTIFY,
acpi_battery_notify, battery);
if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler"));
result = -ENODEV;
goto end;
}
@@ -736,11 +925,14 @@ static int acpi_battery_add(struct acpi_device *device)
device->status.battery_present ? "present" : "absent");
end:
+
if (result) {
acpi_battery_remove_fs(device);
kfree(battery);
}
+ mutex_unlock(&battery->mutex);
+
return result;
}
@@ -749,18 +941,27 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
acpi_status status = 0;
struct acpi_battery *battery = NULL;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
battery = acpi_driver_data(device);
+ mutex_lock(&battery->mutex);
+
status = acpi_remove_notify_handler(device->handle,
ACPI_ALL_NOTIFY,
acpi_battery_notify);
acpi_battery_remove_fs(device);
+ kfree(battery->bif_data.pointer);
+
+ kfree(battery->bst_data.pointer);
+
+ mutex_unlock(&battery->mutex);
+
+ mutex_destroy(&battery->mutex);
+
kfree(battery);
return 0;
@@ -775,7 +976,10 @@ static int acpi_battery_resume(struct acpi_device *device)
return -EINVAL;
battery = device->driver_data;
- return acpi_battery_check(battery);
+
+ battery->flags.init_update = 1;
+
+ return 0;
}
static int __init acpi_battery_init(void)
@@ -800,7 +1004,6 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
-
acpi_bus_unregister_driver(&acpi_battery_driver);
acpi_unlock_battery_dir(acpi_battery_dir);
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
index fb3f31b5e69f..56a5b3fffeb3 100644
--- a/drivers/acpi/bay.c
+++ b/drivers/acpi/bay.c
@@ -288,6 +288,11 @@ static int bay_add(acpi_handle handle, int id)
new_bay->pdev = pdev;
platform_set_drvdata(pdev, new_bay);
+ /*
+ * we want the bay driver to be able to send uevents
+ */
+ pdev->dev.uevent_suppress = 0;
+
if (acpi_bay_add_fs(new_bay)) {
platform_device_unregister(new_bay->pdev);
goto bay_add_err;
@@ -328,18 +333,12 @@ static void bay_notify(acpi_handle handle, u32 event, void *data)
{
struct bay *bay_dev = (struct bay *)data;
struct device *dev = &bay_dev->pdev->dev;
+ char event_string[12];
+ char *envp[] = { event_string, NULL };
bay_dprintk(handle, "Bay event");
-
- switch(event) {
- case ACPI_NOTIFY_BUS_CHECK:
- case ACPI_NOTIFY_DEVICE_CHECK:
- case ACPI_NOTIFY_EJECT_REQUEST:
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
- break;
- default:
- printk(KERN_ERR PREFIX "Bay: unknown event %d\n", event);
- }
+ sprintf(event_string, "BAY_EVENT=%d\n", event);
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
static acpi_status
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e5084ececb6f..6b2658c96242 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -292,6 +292,10 @@ int acpi_bus_generate_event(struct acpi_device *device, u8 type, int data)
if (!device)
return -EINVAL;
+ if (acpi_bus_generate_genetlink_event(device, type, data))
+ printk(KERN_WARNING PREFIX
+ "Failed to generate an ACPI event via genetlink!\n");
+
/* drop event on the floor if no one's listening */
if (!event_is_open)
return 0;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 4546bf873aea..6192c8be66df 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -40,8 +40,15 @@ MODULE_AUTHOR("Kristen Carlson Accardi");
MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
+static int immediate_undock = 1;
+module_param(immediate_undock, bool, 0644);
+MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
+ "undock immediately when the undock button is pressed, 0 will cause"
+ " the driver to wait for userspace to write the undock sysfs file "
+ " before undocking");
+
static struct atomic_notifier_head dock_notifier_list;
-static struct platform_device dock_device;
+static struct platform_device *dock_device;
static char dock_device_name[] = "dock";
struct dock_station {
@@ -63,6 +70,7 @@ struct dock_dependent_device {
};
#define DOCK_DOCKING 0x00000001
+#define DOCK_UNDOCKING 0x00000002
#define DOCK_EVENT 3
#define UNDOCK_EVENT 2
@@ -327,12 +335,20 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
static void dock_event(struct dock_station *ds, u32 event, int num)
{
- struct device *dev = &dock_device.dev;
+ struct device *dev = &dock_device->dev;
+ char event_string[7];
+ char *envp[] = { event_string, NULL };
+
+ if (num == UNDOCK_EVENT)
+ sprintf(event_string, "UNDOCK");
+ else
+ sprintf(event_string, "DOCK");
+
/*
* Indicate that the status of the dock station has
* changed.
*/
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
/**
@@ -380,12 +396,11 @@ static void handle_dock(struct dock_station *ds, int dock)
union acpi_object arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer);
- obj = name_buffer.pointer;
- printk(KERN_INFO PREFIX "%s\n", dock ? "docking" : "undocking");
+ printk(KERN_INFO PREFIX "%s - %s\n",
+ (char *)name_buffer.pointer, dock ? "docking" : "undocking");
/* _DCK method has one argument */
arg_list.count = 1;
@@ -394,7 +409,8 @@ static void handle_dock(struct dock_station *ds, int dock)
arg.integer.value = dock;
status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
if (ACPI_FAILURE(status))
- pr_debug("%s: failed to execute _DCK\n", obj->string.pointer);
+ printk(KERN_ERR PREFIX "%s - failed to execute _DCK\n",
+ (char *)name_buffer.pointer);
kfree(buffer.pointer);
kfree(name_buffer.pointer);
}
@@ -420,6 +436,16 @@ static inline void complete_dock(struct dock_station *ds)
ds->last_dock_time = jiffies;
}
+static inline void begin_undock(struct dock_station *ds)
+{
+ ds->flags |= DOCK_UNDOCKING;
+}
+
+static inline void complete_undock(struct dock_station *ds)
+{
+ ds->flags &= ~(DOCK_UNDOCKING);
+}
+
/**
* dock_in_progress - see if we are in the middle of handling a dock event
* @ds: the dock station
@@ -550,7 +576,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
printk(KERN_ERR PREFIX "Unable to undock!\n");
return -EBUSY;
}
-
+ complete_undock(ds);
return 0;
}
@@ -594,7 +620,11 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
* to the driver who wish to hotplug.
*/
case ACPI_NOTIFY_EJECT_REQUEST:
- handle_eject_request(ds, event);
+ begin_undock(ds);
+ if (immediate_undock)
+ handle_eject_request(ds, event);
+ else
+ dock_event(ds, event, UNDOCK_EVENT);
break;
default:
printk(KERN_ERR PREFIX "Unknown dock event %d\n", event);
@@ -653,6 +683,17 @@ static ssize_t show_docked(struct device *dev,
DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
/*
+ * show_flags - read method for flags file in sysfs
+ */
+static ssize_t show_flags(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
+
+}
+DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
+
+/*
* write_undock - write method for "undock" file in sysfs
*/
static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
@@ -675,16 +716,15 @@ static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long lbuf;
- acpi_status status = acpi_evaluate_integer(dock_station->handle, "_UID", NULL, &lbuf);
- if(ACPI_FAILURE(status)) {
+ acpi_status status = acpi_evaluate_integer(dock_station->handle,
+ "_UID", NULL, &lbuf);
+ if (ACPI_FAILURE(status))
return 0;
- }
+
return snprintf(buf, PAGE_SIZE, "%lx\n", lbuf);
}
DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
-
-
/**
* dock_add - add a new dock station
* @handle: the dock station handle
@@ -711,33 +751,53 @@ static int dock_add(acpi_handle handle)
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
/* initialize platform device stuff */
- dock_device.name = dock_device_name;
- ret = platform_device_register(&dock_device);
+ dock_device =
+ platform_device_register_simple(dock_device_name, 0, NULL, 0);
+ if (IS_ERR(dock_device)) {
+ kfree(dock_station);
+ dock_station = NULL;
+ return PTR_ERR(dock_device);
+ }
+
+ /* we want the dock device to send uevents */
+ dock_device->dev.uevent_suppress = 0;
+
+ ret = device_create_file(&dock_device->dev, &dev_attr_docked);
if (ret) {
- printk(KERN_ERR PREFIX "Error %d registering dock device\n", ret);
+ printk("Error %d adding sysfs file\n", ret);
+ platform_device_unregister(dock_device);
kfree(dock_station);
+ dock_station = NULL;
return ret;
}
- ret = device_create_file(&dock_device.dev, &dev_attr_docked);
+ ret = device_create_file(&dock_device->dev, &dev_attr_undock);
if (ret) {
printk("Error %d adding sysfs file\n", ret);
- platform_device_unregister(&dock_device);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ platform_device_unregister(dock_device);
kfree(dock_station);
+ dock_station = NULL;
return ret;
}
- ret = device_create_file(&dock_device.dev, &dev_attr_undock);
+ ret = device_create_file(&dock_device->dev, &dev_attr_uid);
if (ret) {
printk("Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device.dev, &dev_attr_docked);
- platform_device_unregister(&dock_device);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ platform_device_unregister(dock_device);
kfree(dock_station);
+ dock_station = NULL;
return ret;
}
- ret = device_create_file(&dock_device.dev, &dev_attr_uid);
+ ret = device_create_file(&dock_device->dev, &dev_attr_flags);
if (ret) {
printk("Error %d adding sysfs file\n", ret);
- platform_device_unregister(&dock_device);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ device_remove_file(&dock_device->dev, &dev_attr_uid);
+ platform_device_unregister(dock_device);
kfree(dock_station);
+ dock_station = NULL;
return ret;
}
@@ -750,6 +810,7 @@ static int dock_add(acpi_handle handle)
dd = alloc_dock_dependent_device(handle);
if (!dd) {
kfree(dock_station);
+ dock_station = NULL;
ret = -ENOMEM;
goto dock_add_err_unregister;
}
@@ -773,10 +834,13 @@ static int dock_add(acpi_handle handle)
dock_add_err:
kfree(dd);
dock_add_err_unregister:
- device_remove_file(&dock_device.dev, &dev_attr_docked);
- device_remove_file(&dock_device.dev, &dev_attr_undock);
- platform_device_unregister(&dock_device);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ device_remove_file(&dock_device->dev, &dev_attr_uid);
+ device_remove_file(&dock_device->dev, &dev_attr_flags);
+ platform_device_unregister(dock_device);
kfree(dock_station);
+ dock_station = NULL;
return ret;
}
@@ -804,12 +868,15 @@ static int dock_remove(void)
printk(KERN_ERR "Error removing notify handler\n");
/* cleanup sysfs */
- device_remove_file(&dock_device.dev, &dev_attr_docked);
- device_remove_file(&dock_device.dev, &dev_attr_undock);
- platform_device_unregister(&dock_device);
+ device_remove_file(&dock_device->dev, &dev_attr_docked);
+ device_remove_file(&dock_device->dev, &dev_attr_undock);
+ device_remove_file(&dock_device->dev, &dev_attr_uid);
+ device_remove_file(&dock_device->dev, &dev_attr_flags);
+ platform_device_unregister(dock_device);
/* free dock station memory */
kfree(dock_station);
+ dock_station = NULL;
return 0;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 82f496c07675..10e851021eca 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -34,25 +34,26 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/list.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/actypes.h>
-#define _COMPONENT ACPI_EC_COMPONENT
-ACPI_MODULE_NAME("ec");
-#define ACPI_EC_COMPONENT 0x00100000
#define ACPI_EC_CLASS "embedded_controller"
#define ACPI_EC_HID "PNP0C09"
#define ACPI_EC_DEVICE_NAME "Embedded Controller"
#define ACPI_EC_FILE_INFO "info"
+
#undef PREFIX
#define PREFIX "ACPI: EC: "
+
/* EC status register */
#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
+
/* EC commands */
enum ec_command {
ACPI_EC_COMMAND_READ = 0x80,
@@ -61,6 +62,7 @@ enum ec_command {
ACPI_EC_BURST_DISABLE = 0x83,
ACPI_EC_COMMAND_QUERY = 0x84,
};
+
/* EC events */
enum ec_event {
ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
@@ -94,6 +96,16 @@ static struct acpi_driver acpi_ec_driver = {
/* If we find an EC via the ECDT, we need to keep a ptr to its context */
/* External interfaces use first EC only, so remember */
+typedef int (*acpi_ec_query_func) (void *data);
+
+struct acpi_ec_query_handler {
+ struct list_head node;
+ acpi_ec_query_func func;
+ acpi_handle handle;
+ void *data;
+ u8 query_bit;
+};
+
static struct acpi_ec {
acpi_handle handle;
unsigned long gpe;
@@ -104,6 +116,7 @@ static struct acpi_ec {
atomic_t query_pending;
atomic_t event_count;
wait_queue_head_t wait;
+ struct list_head list;
} *boot_ec, *first_ec;
/* --------------------------------------------------------------------------
@@ -245,7 +258,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
if (status) {
- printk(KERN_DEBUG PREFIX
+ printk(KERN_ERR PREFIX
"input buffer is not empty, aborting transaction\n");
goto end;
}
@@ -394,21 +407,67 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
+int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data)
+{
+ struct acpi_ec_query_handler *handler =
+ kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
+ if (!handler)
+ return -ENOMEM;
+
+ handler->query_bit = query_bit;
+ handler->handle = handle;
+ handler->func = func;
+ handler->data = data;
+ mutex_lock(&ec->lock);
+ list_add_tail(&handler->node, &ec->list);
+ mutex_unlock(&ec->lock);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
+
+void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
+{
+ struct acpi_ec_query_handler *handler;
+ mutex_lock(&ec->lock);
+ list_for_each_entry(handler, &ec->list, node) {
+ if (query_bit == handler->query_bit) {
+ list_del(&handler->node);
+ kfree(handler);
+ break;
+ }
+ }
+ mutex_unlock(&ec->lock);
+}
+
+EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
static void acpi_ec_gpe_query(void *ec_cxt)
{
struct acpi_ec *ec = ec_cxt;
u8 value = 0;
- char object_name[8];
+ struct acpi_ec_query_handler *handler, copy;
if (!ec || acpi_ec_query(ec, &value))
return;
-
- snprintf(object_name, 8, "_Q%2.2X", value);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s", object_name));
-
- acpi_evaluate_object(ec->handle, object_name, NULL, NULL);
+ mutex_lock(&ec->lock);
+ list_for_each_entry(handler, &ec->list, node) {
+ if (value == handler->query_bit) {
+ /* have custom handler for this bit */
+ memcpy(&copy, handler, sizeof(copy));
+ mutex_unlock(&ec->lock);
+ if (copy.func) {
+ copy.func(copy.data);
+ } else if (copy.handle) {
+ acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
+ }
+ return;
+ }
+ }
+ mutex_unlock(&ec->lock);
+ printk(KERN_ERR PREFIX "Handler for query 0x%x is not found!\n", value);
}
static u32 acpi_ec_gpe_handler(void *data)
@@ -427,8 +486,7 @@ static u32 acpi_ec_gpe_handler(void *data)
if ((value & ACPI_EC_FLAG_SCI) && !atomic_read(&ec->query_pending)) {
atomic_set(&ec->query_pending, 1);
status =
- acpi_os_execute(OSL_EC_BURST_HANDLER, acpi_ec_gpe_query,
- ec);
+ acpi_os_execute(OSL_EC_BURST_HANDLER, acpi_ec_gpe_query, ec);
}
return status == AE_OK ?
@@ -454,57 +512,35 @@ acpi_ec_space_setup(acpi_handle region_handle,
}
static acpi_status
-acpi_ec_space_handler(u32 function,
- acpi_physical_address address,
- u32 bit_width,
- acpi_integer * value,
+acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, acpi_integer *value,
void *handler_context, void *region_context)
{
- int result = 0;
struct acpi_ec *ec = handler_context;
- u64 temp = *value;
- acpi_integer f_v = 0;
- int i = 0;
+ int result = 0, i = 0;
+ u8 temp = 0;
if ((address > 0xFF) || !value || !handler_context)
return AE_BAD_PARAMETER;
- if (bit_width != 8 && acpi_strict) {
+ if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
- }
-
- next_byte:
- switch (function) {
- case ACPI_READ:
- temp = 0;
- result = acpi_ec_read(ec, (u8) address, (u8 *) & temp);
- break;
- case ACPI_WRITE:
- result = acpi_ec_write(ec, (u8) address, (u8) temp);
- break;
- default:
- result = -EINVAL;
- goto out;
- break;
- }
- bit_width -= 8;
- if (bit_width) {
- if (function == ACPI_READ)
- f_v |= temp << 8 * i;
- if (function == ACPI_WRITE)
- temp >>= 8;
- i++;
- address++;
- goto next_byte;
- }
+ if (bits != 8 && acpi_strict)
+ return AE_BAD_PARAMETER;
- if (function == ACPI_READ) {
- f_v |= temp << 8 * i;
- *value = f_v;
+ while (bits - i > 0) {
+ if (function == ACPI_READ) {
+ result = acpi_ec_read(ec, address, &temp);
+ (*value) |= ((acpi_integer)temp) << i;
+ } else {
+ temp = 0xff & ((*value) >> i);
+ result = acpi_ec_write(ec, address, temp);
+ }
+ i += 8;
+ ++address;
}
- out:
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
@@ -597,9 +633,6 @@ static int acpi_ec_remove_fs(struct acpi_device *device)
static acpi_status
ec_parse_io_ports(struct acpi_resource *resource, void *context);
-static acpi_status
-ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval);
-
static struct acpi_ec *make_acpi_ec(void)
{
struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
@@ -610,13 +643,52 @@ static struct acpi_ec *make_acpi_ec(void)
atomic_set(&ec->event_count, 1);
mutex_init(&ec->lock);
init_waitqueue_head(&ec->wait);
+ INIT_LIST_HEAD(&ec->list);
return ec;
}
+static acpi_status
+acpi_ec_register_query_methods(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ struct acpi_namespace_node *node = handle;
+ struct acpi_ec *ec = context;
+ int value = 0;
+ if (sscanf(node->name.ascii, "_Q%x", &value) == 1) {
+ acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
+ }
+ return AE_OK;
+}
+
+static int ec_parse_device(struct acpi_ec *ec, acpi_handle handle)
+{
+ if (ACPI_FAILURE(acpi_walk_resources(handle, METHOD_NAME__CRS,
+ ec_parse_io_ports, ec)))
+ return -EINVAL;
+
+ /* Get GPE bit assignment (EC events). */
+ /* TODO: Add support for _GPE returning a package */
+ if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe)))
+ return -EINVAL;
+
+ /* Use the global lock for all EC transactions? */
+ acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
+
+ /* Find and register all query methods */
+ acpi_walk_namespace(ACPI_TYPE_METHOD, handle, 1,
+ acpi_ec_register_query_methods, ec, NULL);
+
+ ec->handle = handle;
+
+ printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx",
+ ec->gpe, ec->command_addr, ec->data_addr);
+
+ return 0;
+}
+
static int acpi_ec_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
struct acpi_ec *ec = NULL;
if (!device)
@@ -629,8 +701,7 @@ static int acpi_ec_add(struct acpi_device *device)
if (!ec)
return -ENOMEM;
- status = ec_parse_device(device->handle, 0, ec, NULL);
- if (status != AE_CTRL_TERMINATE) {
+ if (ec_parse_device(ec, device->handle)) {
kfree(ec);
return -EINVAL;
}
@@ -641,6 +712,8 @@ static int acpi_ec_add(struct acpi_device *device)
/* We might have incorrect info for GL at boot time */
mutex_lock(&boot_ec->lock);
boot_ec->global_lock = ec->global_lock;
+ /* Copy handlers from new ec into boot ec */
+ list_splice(&ec->list, &boot_ec->list);
mutex_unlock(&boot_ec->lock);
kfree(ec);
ec = boot_ec;
@@ -651,22 +724,24 @@ static int acpi_ec_add(struct acpi_device *device)
acpi_driver_data(device) = ec;
acpi_ec_add_fs(device);
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s [%s] (gpe %d) interrupt mode.",
- acpi_device_name(device), acpi_device_bid(device),
- (u32) ec->gpe));
-
return 0;
}
static int acpi_ec_remove(struct acpi_device *device, int type)
{
struct acpi_ec *ec;
+ struct acpi_ec_query_handler *handler;
if (!device)
return -EINVAL;
ec = acpi_driver_data(device);
+ mutex_lock(&ec->lock);
+ list_for_each_entry(handler, &ec->list, node) {
+ list_del(&handler->node);
+ kfree(handler);
+ }
+ mutex_unlock(&ec->lock);
acpi_ec_remove_fs(device);
acpi_driver_data(device) = NULL;
if (ec == first_ec)
@@ -722,15 +797,13 @@ static int ec_install_handlers(struct acpi_ec *ec)
return -ENODEV;
}
- /* EC is fully operational, allow queries */
- atomic_set(&ec->query_pending, 0);
-
return 0;
}
static int acpi_ec_start(struct acpi_device *device)
{
struct acpi_ec *ec;
+ int ret = 0;
if (!device)
return -EINVAL;
@@ -740,14 +813,14 @@ static int acpi_ec_start(struct acpi_device *device)
if (!ec)
return -EINVAL;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02lx, ports=0x%2lx,0x%2lx",
- ec->gpe, ec->command_addr, ec->data_addr));
-
/* Boot EC is already working */
- if (ec == boot_ec)
- return 0;
+ if (ec != boot_ec)
+ ret = ec_install_handlers(ec);
+
+ /* EC is fully operational, allow queries */
+ atomic_set(&ec->query_pending, 0);
- return ec_install_handlers(ec);
+ return ret;
}
static int acpi_ec_stop(struct acpi_device *device, int type)
@@ -779,34 +852,6 @@ static int acpi_ec_stop(struct acpi_device *device, int type)
return 0;
}
-static acpi_status
-ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
-{
- acpi_status status;
-
- struct acpi_ec *ec = context;
- status = acpi_walk_resources(handle, METHOD_NAME__CRS,
- ec_parse_io_ports, ec);
- if (ACPI_FAILURE(status))
- return status;
-
- /* Get GPE bit assignment (EC events). */
- /* TODO: Add support for _GPE returning a package */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe);
- if (ACPI_FAILURE(status))
- return status;
-
- /* Use the global lock for all EC transactions? */
- acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
-
- ec->handle = handle;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "GPE=0x%02lx, ports=0x%2lx, 0x%2lx",
- ec->gpe, ec->command_addr, ec->data_addr));
-
- return AE_CTRL_TERMINATE;
-}
-
int __init acpi_ec_ecdt_probe(void)
{
int ret;
@@ -825,7 +870,7 @@ int __init acpi_ec_ecdt_probe(void)
if (ACPI_FAILURE(status))
goto error;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found ECDT"));
+ printk(KERN_INFO PREFIX "EC description table is found, configuring boot EC\n");
boot_ec->command_addr = ecdt_ptr->control.address;
boot_ec->data_addr = ecdt_ptr->data.address;
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 3b23562e6f92..dfa5853b17f0 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <acpi/acpi_drivers.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("event");
@@ -48,7 +50,6 @@ acpi_system_read_event(struct file *file, char __user * buffer, size_t count,
static int chars_remaining = 0;
static char *ptr;
-
if (!chars_remaining) {
memset(&event, 0, sizeof(struct acpi_bus_event));
@@ -106,23 +107,161 @@ static const struct file_operations acpi_system_event_ops = {
.poll = acpi_system_poll_event,
};
+#ifdef CONFIG_NET
+unsigned int acpi_event_seqnum;
+struct acpi_genl_event {
+ acpi_device_class device_class;
+ char bus_id[15];
+ u32 type;
+ u32 data;
+};
+
+/* attributes of acpi_genl_family */
+enum {
+ ACPI_GENL_ATTR_UNSPEC,
+ ACPI_GENL_ATTR_EVENT, /* ACPI event info needed by user space */
+ __ACPI_GENL_ATTR_MAX,
+};
+#define ACPI_GENL_ATTR_MAX (__ACPI_GENL_ATTR_MAX - 1)
+
+/* commands supported by the acpi_genl_family */
+enum {
+ ACPI_GENL_CMD_UNSPEC,
+ ACPI_GENL_CMD_EVENT, /* kernel->user notifications for ACPI events */
+ __ACPI_GENL_CMD_MAX,
+};
+#define ACPI_GENL_CMD_MAX (__ACPI_GENL_CMD_MAX - 1)
+
+#define ACPI_GENL_FAMILY_NAME "acpi_event"
+#define ACPI_GENL_VERSION 0x01
+#define ACPI_GENL_MCAST_GROUP_NAME "acpi_mc_group"
+
+static struct genl_family acpi_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = ACPI_GENL_FAMILY_NAME,
+ .version = ACPI_GENL_VERSION,
+ .maxattr = ACPI_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group acpi_event_mcgrp = {
+ .name = ACPI_GENL_MCAST_GROUP_NAME,
+};
+
+int acpi_bus_generate_genetlink_event(struct acpi_device *device,
+ u8 type, int data)
+{
+ struct sk_buff *skb;
+ struct nlattr *attr;
+ struct acpi_genl_event *event;
+ void *msg_header;
+ int size;
+ int result;
+
+ /* allocate memory */
+ size = nla_total_size(sizeof(struct acpi_genl_event)) +
+ nla_total_size(0);
+
+ skb = genlmsg_new(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ /* add the genetlink message header */
+ msg_header = genlmsg_put(skb, 0, acpi_event_seqnum++,
+ &acpi_event_genl_family, 0,
+ ACPI_GENL_CMD_EVENT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ return -ENOMEM;
+ }
+
+ /* fill the data */
+ attr =
+ nla_reserve(skb, ACPI_GENL_ATTR_EVENT,
+ sizeof(struct acpi_genl_event));
+ if (!attr) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ event = nla_data(attr);
+ if (!event) {
+ nlmsg_free(skb);
+ return -EINVAL;
+ }
+
+ memset(event, 0, sizeof(struct acpi_genl_event));
+
+ strcpy(event->device_class, device->pnp.device_class);
+ strcpy(event->bus_id, device->dev.bus_id);
+ event->type = type;
+ event->data = data;
+
+ /* send multicast genetlink message */
+ result = genlmsg_end(skb, msg_header);
+ if (result < 0) {
+ nlmsg_free(skb);
+ return result;
+ }
+
+ result =
+ genlmsg_multicast(skb, 0, acpi_event_mcgrp.id, GFP_ATOMIC);
+ if (result)
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Failed to send a Genetlink message!\n"));
+ return 0;
+}
+
+static int acpi_event_genetlink_init(void)
+{
+ int result;
+
+ result = genl_register_family(&acpi_event_genl_family);
+ if (result)
+ return result;
+
+ result = genl_register_mc_group(&acpi_event_genl_family,
+ &acpi_event_mcgrp);
+ if (result)
+ genl_unregister_family(&acpi_event_genl_family);
+
+ return result;
+}
+
+#else
+int acpi_bus_generate_genetlink_event(struct acpi_device *device, u8 type,
+ int data)
+{
+ return 0;
+}
+
+static int acpi_event_genetlink_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
static int __init acpi_event_init(void)
{
struct proc_dir_entry *entry;
int error = 0;
-
if (acpi_disabled)
return 0;
+ /* create genetlink for acpi event */
+ error = acpi_event_genetlink_init();
+ if (error)
+ printk(KERN_WARNING PREFIX
+ "Failed to create genetlink family for ACPI event\n");
+
/* 'event' [R] */
entry = create_proc_entry("event", S_IRUSR, acpi_root_dir);
if (entry)
entry->proc_fops = &acpi_system_event_ops;
- else {
- error = -ENODEV;
- }
- return error;
+ else
+ return -ENODEV;
+
+ return 0;
}
-subsys_initcall(acpi_event_init);
+fs_initcall(acpi_event_init);
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 902c287b3a4f..361ebe6c4a6f 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -586,6 +586,10 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
if (gpe_xrupt->previous) {
gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
}
if (gpe_xrupt->next) {
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 400d90fca966..23ee7bc4a705 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -284,6 +284,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
}
if (!pci_device_node) {
+ ACPI_FREE(pci_id);
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 41427a41f620..4893e256e399 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -16,7 +16,7 @@
#if ACPI_GLUE_DEBUG
#define DBG(x...) printk(PREFIX x)
#else
-#define DBG(x...)
+#define DBG(x...) do { } while(0)
#endif
static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 0c9f15c54e8c..ab04d848b19d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -36,13 +36,11 @@
ACPI_MODULE_NAME("numa");
static nodemask_t nodes_found_map = NODE_MASK_NONE;
-#define PXM_INVAL -1
-#define NID_INVAL -1
/* maps to convert between proximity domain and logical node ID */
-static int pxm_to_node_map[MAX_PXM_DOMAINS]
+static int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
= { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
-static int node_to_pxm_map[MAX_NUMNODES]
+static int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
int pxm_to_node(int pxm)
@@ -59,6 +57,12 @@ int node_to_pxm(int node)
return node_to_pxm_map[node];
}
+void __acpi_map_pxm_to_node(int pxm, int node)
+{
+ pxm_to_node_map[pxm] = node;
+ node_to_pxm_map[node] = pxm;
+}
+
int acpi_map_pxm_to_node(int pxm)
{
int node = pxm_to_node_map[pxm];
@@ -67,8 +71,7 @@ int acpi_map_pxm_to_node(int pxm)
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
return NID_INVAL;
node = first_unset_node(nodes_found_map);
- pxm_to_node_map[pxm] = node;
- node_to_pxm_map[node] = pxm;
+ __acpi_map_pxm_to_node(pxm, node);
node_set(node, nodes_found_map);
}
@@ -83,7 +86,8 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
node_clear(node, nodes_found_map);
}
-void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header)
+static void __init
+acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
@@ -200,7 +204,7 @@ static int __init acpi_parse_srat(struct acpi_table_header *table)
return 0;
}
-int __init
+static int __init
acpi_table_parse_srat(enum acpi_srat_type id,
acpi_table_entry_handler handler, unsigned int max_entries)
{
@@ -211,14 +215,13 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
- int result;
-
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity,
- NR_CPUS);
- result = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific
+ acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
+ acpi_parse_processor_affinity, NR_CPUS);
+ acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ acpi_parse_memory_affinity,
+ NR_NODE_MEMBLKS);
}
/* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 2e7ba615d760..12c09fafce9a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -77,13 +77,7 @@ static struct workqueue_struct *kacpi_notify_wq;
#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
static char osi_additional_string[OSI_STRING_LENGTH_MAX];
-#define OSI_LINUX_ENABLED
-#ifdef OSI_LINUX_ENABLED
-int osi_linux = 1; /* enable _OSI(Linux) by default */
-#else
-int osi_linux; /* disable _OSI(Linux) by default */
-#endif
-
+static int osi_linux; /* disable _OSI(Linux) by default */
#ifdef CONFIG_DMI
static struct __initdata dmi_system_id acpi_osl_dmi_table[];
@@ -1098,7 +1092,7 @@ void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
- *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL);
+ *cache = kmem_cache_create(name, size, 0, 0, NULL);
if (*cache == NULL)
return AE_ERROR;
else
@@ -1183,17 +1177,10 @@ acpi_os_validate_interface (char *interface)
if (!strcmp("Linux", interface)) {
printk(KERN_WARNING PREFIX
"System BIOS is requesting _OSI(Linux)\n");
-#ifdef OSI_LINUX_ENABLED
- printk(KERN_WARNING PREFIX
- "Please test with \"acpi_osi=!Linux\"\n"
- "Please send dmidecode "
- "to linux-acpi@vger.kernel.org\n");
-#else
printk(KERN_WARNING PREFIX
"If \"acpi_osi=Linux\" works better,\n"
"Please send dmidecode "
"to linux-acpi@vger.kernel.org\n");
-#endif
if(osi_linux)
return AE_OK;
}
@@ -1227,36 +1214,14 @@ acpi_os_validate_address (
}
#ifdef CONFIG_DMI
-#ifdef OSI_LINUX_ENABLED
-static int dmi_osi_not_linux(struct dmi_system_id *d)
-{
- printk(KERN_NOTICE "%s detected: requires not _OSI(Linux)\n", d->ident);
- enable_osi_linux(0);
- return 0;
-}
-#else
static int dmi_osi_linux(struct dmi_system_id *d)
{
- printk(KERN_NOTICE "%s detected: requires _OSI(Linux)\n", d->ident);
+ printk(KERN_NOTICE "%s detected: enabling _OSI(Linux)\n", d->ident);
enable_osi_linux(1);
return 0;
}
-#endif
static struct dmi_system_id acpi_osl_dmi_table[] __initdata = {
-#ifdef OSI_LINUX_ENABLED
- /*
- * Boxes that need NOT _OSI(Linux)
- */
- {
- .callback = dmi_osi_not_linux,
- .ident = "Toshiba Satellite P100",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_BOARD_NAME, "Satellite P100"),
- },
- },
-#else
/*
* Boxes that need _OSI(Linux)
*/
@@ -1268,7 +1233,6 @@ static struct dmi_system_id acpi_osl_dmi_table[] __initdata = {
DMI_MATCH(DMI_BOARD_NAME, "MPAD-MSAE Customer Reference Boards"),
},
},
-#endif
{}
};
#endif /* CONFIG_DMI */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index acc594771379..3448edd61dc4 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -733,7 +733,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
/* query and set link->irq.active */
acpi_pci_link_get_current(link);
- printk(PREFIX "%s [%s] (IRQs", acpi_device_name(device),
+ printk(KERN_INFO PREFIX "%s [%s] (IRQs", acpi_device_name(device),
acpi_device_bid(device));
for (i = 0; i < link->irq.possible_count; i++) {
if (link->irq.active == link->irq.possible[i]) {
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index f7de02a6f497..81aceb5da7c7 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -66,6 +66,7 @@
#define ACPI_PROCESSOR_FILE_LIMIT "limit"
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
+#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
#define ACPI_PROCESSOR_LIMIT_USER 0
#define ACPI_PROCESSOR_LIMIT_THERMAL 1
@@ -84,6 +85,8 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
+extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
+
static struct acpi_driver acpi_processor_driver = {
.name = "processor",
@@ -115,7 +118,6 @@ struct acpi_processor_errata errata __read_mostly;
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
- u8 rev = 0;
u8 value1 = 0;
u8 value2 = 0;
@@ -127,9 +129,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
* Note that 'dev' references the PIIX4 ACPI Controller.
*/
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
-
- switch (rev) {
+ switch (dev->revision) {
case 0:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
break;
@@ -147,7 +147,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
break;
}
- switch (rev) {
+ switch (dev->revision) {
case 0: /* PIIX4 A-step */
case 1: /* PIIX4 B-step */
@@ -699,6 +699,9 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
acpi_processor_cst_has_changed(pr);
acpi_bus_generate_event(device, event, 0);
break;
+ case ACPI_PROCESSOR_NOTIFY_THROTTLING:
+ acpi_processor_tstate_has_changed(pr);
+ acpi_bus_generate_event(device, event, 0);
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 80ffc7829916..a898991f77cb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -475,7 +475,7 @@ static void acpi_processor_idle(void)
/* Get end time (ticks) */
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-#ifdef CONFIG_GENERIC_TIME
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
/* TSC halts in C2, so notify users */
mark_tsc_unstable("possible TSC halt in C2");
#endif
@@ -490,7 +490,17 @@ static void acpi_processor_idle(void)
case ACPI_STATE_C3:
- if (pr->flags.bm_check) {
+ /*
+ * disable bus master
+ * bm_check implies we need ARB_DIS
+ * !bm_check implies we need cache flush
+ * bm_control implies whether we can do ARB_DIS
+ *
+ * That leaves a case where bm_check is set and bm_control is
+ * not set. In that case we cannot do much, we enter C3
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
if (atomic_inc_return(&c3_cpu_count) ==
num_online_cpus()) {
/*
@@ -499,7 +509,7 @@ static void acpi_processor_idle(void)
*/
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
}
- } else {
+ } else if (!pr->flags.bm_check) {
/* SMP with no shared cache... Invalidate cache */
ACPI_FLUSH_CPU_CACHE();
}
@@ -511,13 +521,13 @@ static void acpi_processor_idle(void)
acpi_cstate_enter(cx);
/* Get end time (ticks) */
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
- if (pr->flags.bm_check) {
+ if (pr->flags.bm_check && pr->flags.bm_control) {
/* Enable bus master arbitration */
atomic_dec(&c3_cpu_count);
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
-#ifdef CONFIG_GENERIC_TIME
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
/* TSC halts in C3, so notify users */
mark_tsc_unstable("TSC halts in C3");
#endif
@@ -961,9 +971,9 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
if (pr->flags.bm_check) {
/* bus mastering control is necessary */
if (!pr->flags.bm_control) {
+ /* In this case we enter C3 without bus mastering */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "C3 support requires bus mastering control\n"));
- return;
+ "C3 support without bus mastering control\n"));
}
} else {
/*
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index b33486009f41..3f55d1f90c11 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -44,17 +44,231 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
+static int acpi_processor_get_throttling(struct acpi_processor *pr);
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+
+static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+{
+ acpi_status status = 0;
+ unsigned long tpc = 0;
+
+ if (!pr)
+ return -EINVAL;
+ status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
+ return -ENODEV;
+ }
+ pr->throttling_platform_limit = (int)tpc;
+ return 0;
+}
+
+int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+{
+ return acpi_processor_get_platform_limit(pr);
+}
+
+/* --------------------------------------------------------------------------
+ _PTC, _TSS, _TSD support
+ -------------------------------------------------------------------------- */
+static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = 0;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *ptc = NULL;
+ union acpi_object obj = { 0 };
+
+ status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
+ return -ENODEV;
+ }
+
+ ptc = (union acpi_object *)buffer.pointer;
+ if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
+ || (ptc->package.count != 2)) {
+ printk(KERN_ERR PREFIX "Invalid _PTC data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ /*
+ * control_register
+ */
+
+ obj = ptc->package.elements[0];
+
+ if ((obj.type != ACPI_TYPE_BUFFER)
+ || (obj.buffer.length < sizeof(struct acpi_ptc_register))
+ || (obj.buffer.pointer == NULL)) {
+ printk(KERN_ERR PREFIX
+ "Invalid _PTC data (control_register)\n");
+ result = -EFAULT;
+ goto end;
+ }
+ memcpy(&pr->throttling.control_register, obj.buffer.pointer,
+ sizeof(struct acpi_ptc_register));
+
+ /*
+ * status_register
+ */
+
+ obj = ptc->package.elements[1];
+
+ if ((obj.type != ACPI_TYPE_BUFFER)
+ || (obj.buffer.length < sizeof(struct acpi_ptc_register))
+ || (obj.buffer.pointer == NULL)) {
+ printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ memcpy(&pr->throttling.status_register, obj.buffer.pointer,
+ sizeof(struct acpi_ptc_register));
+
+ end:
+ kfree(buffer.pointer);
+
+ return result;
+}
+static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
+ struct acpi_buffer state = { 0, NULL };
+ union acpi_object *tss = NULL;
+ int i;
+
+ status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
+ return -ENODEV;
+ }
+
+ tss = buffer.pointer;
+ if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
+ printk(KERN_ERR PREFIX "Invalid _TSS data\n");
+ result = -EFAULT;
+ goto end;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
+ tss->package.count));
+
+ pr->throttling.state_count = tss->package.count;
+ pr->throttling.states_tss =
+ kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
+ GFP_KERNEL);
+ if (!pr->throttling.states_tss) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ for (i = 0; i < pr->throttling.state_count; i++) {
+
+ struct acpi_processor_tx_tss *tx =
+ (struct acpi_processor_tx_tss *)&(pr->throttling.
+ states_tss[i]);
+
+ state.length = sizeof(struct acpi_processor_tx_tss);
+ state.pointer = tx;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
+
+ status = acpi_extract_package(&(tss->package.elements[i]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
+ result = -EFAULT;
+ kfree(pr->throttling.states_tss);
+ goto end;
+ }
+
+ if (!tx->freqpercentage) {
+ printk(KERN_ERR PREFIX
+ "Invalid _TSS data: freq is zero\n");
+ result = -EFAULT;
+ kfree(pr->throttling.states_tss);
+ goto end;
+ }
+ }
+
+ end:
+ kfree(buffer.pointer);
+
+ return result;
+}
+static int acpi_processor_get_tsd(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
+ struct acpi_buffer state = { 0, NULL };
+ union acpi_object *tsd = NULL;
+ struct acpi_tsd_package *pdomain;
+
+ status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return -ENODEV;
+ }
+
+ tsd = buffer.pointer;
+ if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (tsd->package.count != 1) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ pdomain = &(pr->throttling.domain_info);
+
+ state.length = sizeof(struct acpi_tsd_package);
+ state.pointer = pdomain;
+
+ status = acpi_extract_package(&(tsd->package.elements[0]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ end:
+ kfree(buffer.pointer);
+ return result;
+}
+
/* --------------------------------------------------------------------------
Throttling Control
-------------------------------------------------------------------------- */
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
{
int state = 0;
u32 value = 0;
u32 duty_mask = 0;
u32 duty_value = 0;
-
if (!pr)
return -EINVAL;
@@ -94,13 +308,115 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
return 0;
}
-int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+static int acpi_read_throttling_status(struct acpi_processor_throttling
+ *throttling)
+{
+ int value = -1;
+ switch (throttling->status_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ acpi_os_read_port((acpi_io_address) throttling->status_register.
+ address, &value,
+ (u32) throttling->status_register.bit_width *
+ 8);
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ printk(KERN_ERR PREFIX
+ "HARDWARE addr space,NOT supported yet\n");
+ break;
+ default:
+ printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+ (u32) (throttling->status_register.space_id));
+ }
+ return value;
+}
+
+static int acpi_write_throttling_state(struct acpi_processor_throttling
+ *throttling, int value)
+{
+ int ret = -1;
+
+ switch (throttling->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ acpi_os_write_port((acpi_io_address) throttling->
+ control_register.address, value,
+ (u32) throttling->control_register.
+ bit_width * 8);
+ ret = 0;
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ printk(KERN_ERR PREFIX
+ "HARDWARE addr space,NOT supported yet\n");
+ break;
+ default:
+ printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+ (u32) (throttling->control_register.space_id));
+ }
+ return ret;
+}
+
+static int acpi_get_throttling_state(struct acpi_processor *pr, int value)
+{
+ int i;
+
+ for (i = 0; i < pr->throttling.state_count; i++) {
+ struct acpi_processor_tx_tss *tx =
+ (struct acpi_processor_tx_tss *)&(pr->throttling.
+ states_tss[i]);
+ if (tx->control == value)
+ break;
+ }
+ if (i > pr->throttling.state_count)
+ i = -1;
+ return i;
+}
+
+static int acpi_get_throttling_value(struct acpi_processor *pr, int state)
+{
+ int value = -1;
+ if (state >= 0 && state <= pr->throttling.state_count) {
+ struct acpi_processor_tx_tss *tx =
+ (struct acpi_processor_tx_tss *)&(pr->throttling.
+ states_tss[state]);
+ value = tx->control;
+ }
+ return value;
+}
+
+static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
+{
+ int state = 0;
+ u32 value = 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ pr->throttling.state = 0;
+ local_irq_disable();
+ value = acpi_read_throttling_status(&pr->throttling);
+ if (value >= 0) {
+ state = acpi_get_throttling_state(pr, value);
+ pr->throttling.state = state;
+ }
+ local_irq_enable();
+
+ return 0;
+}
+
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
+ return pr->throttling.acpi_processor_get_throttling(pr);
+}
+
+static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+ int state)
{
u32 value = 0;
u32 duty_mask = 0;
u32 duty_value = 0;
-
if (!pr)
return -EINVAL;
@@ -113,6 +429,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
if (state == pr->throttling.state)
return 0;
+ if (state < pr->throttling_platform_limit)
+ return -EPERM;
/*
* Calculate the duty_value and duty_mask.
*/
@@ -165,12 +483,51 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
return 0;
}
+static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+ int state)
+{
+ u32 value = 0;
+
+ if (!pr)
+ return -EINVAL;
+
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ if (state == pr->throttling.state)
+ return 0;
+
+ if (state < pr->throttling_platform_limit)
+ return -EPERM;
+
+ local_irq_disable();
+
+ value = acpi_get_throttling_value(pr, state);
+ if (value >= 0) {
+ acpi_write_throttling_state(&pr->throttling, value);
+ pr->throttling.state = state;
+ }
+ local_irq_enable();
+
+ return 0;
+}
+
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+{
+ return pr->throttling.acpi_processor_set_throttling(pr, state);
+}
+
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
int step = 0;
int i = 0;
-
+ int no_ptc = 0;
+ int no_tss = 0;
+ int no_tsd = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
@@ -182,6 +539,21 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
return -EINVAL;
/* TBD: Support ACPI 2.0 objects */
+ no_ptc = acpi_processor_get_throttling_control(pr);
+ no_tss = acpi_processor_get_throttling_states(pr);
+ no_tsd = acpi_processor_get_tsd(pr);
+
+ if (no_ptc || no_tss) {
+ pr->throttling.acpi_processor_get_throttling =
+ &acpi_processor_get_throttling_fadt;
+ pr->throttling.acpi_processor_set_throttling =
+ &acpi_processor_set_throttling_fadt;
+ } else {
+ pr->throttling.acpi_processor_get_throttling =
+ &acpi_processor_get_throttling_ptc;
+ pr->throttling.acpi_processor_set_throttling =
+ &acpi_processor_set_throttling_ptc;
+ }
if (!pr->throttling.address) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
@@ -262,7 +634,6 @@ static int acpi_processor_throttling_seq_show(struct seq_file *seq,
int i = 0;
int result = 0;
-
if (!pr)
goto end;
@@ -280,15 +651,25 @@ static int acpi_processor_throttling_seq_show(struct seq_file *seq,
}
seq_printf(seq, "state count: %d\n"
- "active state: T%d\n",
- pr->throttling.state_count, pr->throttling.state);
+ "active state: T%d\n"
+ "state available: T%d to T%d\n",
+ pr->throttling.state_count, pr->throttling.state,
+ pr->throttling_platform_limit,
+ pr->throttling.state_count - 1);
seq_puts(seq, "states:\n");
- for (i = 0; i < pr->throttling.state_count; i++)
- seq_printf(seq, " %cT%d: %02d%%\n",
- (i == pr->throttling.state ? '*' : ' '), i,
- (pr->throttling.states[i].performance ? pr->
- throttling.states[i].performance / 10 : 0));
+ if (acpi_processor_get_throttling == acpi_processor_get_throttling_fadt)
+ for (i = 0; i < pr->throttling.state_count; i++)
+ seq_printf(seq, " %cT%d: %02d%%\n",
+ (i == pr->throttling.state ? '*' : ' '), i,
+ (pr->throttling.states[i].performance ? pr->
+ throttling.states[i].performance / 10 : 0));
+ else
+ for (i = 0; i < pr->throttling.state_count; i++)
+ seq_printf(seq, " %cT%d: %02d%%\n",
+ (i == pr->throttling.state ? '*' : ' '), i,
+ (int)pr->throttling.states_tss[i].
+ freqpercentage);
end:
return 0;
@@ -301,7 +682,7 @@ static int acpi_processor_throttling_open_fs(struct inode *inode,
PDE(inode)->data);
}
-static ssize_t acpi_processor_write_throttling(struct file * file,
+static ssize_t acpi_processor_write_throttling(struct file *file,
const char __user * buffer,
size_t count, loff_t * data)
{
@@ -310,7 +691,6 @@ static ssize_t acpi_processor_write_throttling(struct file * file,
struct acpi_processor *pr = m->private;
char state_string[12] = { '\0' };
-
if (!pr || (count > sizeof(state_string) - 1))
return -EINVAL;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index c1bae106833c..974d00ccfe84 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -127,7 +127,7 @@ static int acpi_sbs_resume(struct acpi_device *device);
static struct acpi_driver acpi_sbs_driver = {
.name = "sbs",
.class = ACPI_SBS_CLASS,
- .ids = ACPI_SBS_HID,
+ .ids = "ACPI0001,ACPI0005",
.ops = {
.add = acpi_sbs_add,
.remove = acpi_sbs_remove,
@@ -176,10 +176,8 @@ struct acpi_battery {
};
struct acpi_sbs {
- acpi_handle handle;
int base;
struct acpi_device *device;
- struct acpi_ec_smbus *smbus;
struct mutex mutex;
int sbsm_present;
int sbsm_batteries_supported;
@@ -511,7 +509,7 @@ static int acpi_sbsm_get_info(struct acpi_sbs *sbs)
"acpi_sbs_read_word() failed"));
goto end;
}
-
+ sbs->sbsm_present = 1;
sbs->sbsm_batteries_supported = battery_system_info & 0x000f;
end:
@@ -1630,13 +1628,12 @@ static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs = NULL;
int result = 0, remove_result = 0;
- unsigned long sbs_obj;
int id;
acpi_status status = AE_OK;
unsigned long val;
status =
- acpi_evaluate_integer(device->parent->handle, "_EC", NULL, &val);
+ acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Error obtaining _EC"));
return -EIO;
@@ -1653,7 +1650,7 @@ static int acpi_sbs_add(struct acpi_device *device)
sbs_mutex_lock(sbs);
- sbs->base = (val & 0xff00ull) >> 8;
+ sbs->base = 0xff & (val >> 8);
sbs->device = device;
strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
@@ -1665,24 +1662,10 @@ static int acpi_sbs_add(struct acpi_device *device)
ACPI_EXCEPTION((AE_INFO, AE_ERROR, "acpi_ac_add() failed"));
goto end;
}
- status = acpi_evaluate_integer(device->handle, "_SBS", NULL, &sbs_obj);
- if (status) {
- ACPI_EXCEPTION((AE_INFO, status,
- "acpi_evaluate_integer() failed"));
- result = -EIO;
- goto end;
- }
- if (sbs_obj > 0) {
- result = acpi_sbsm_get_info(sbs);
- if (result) {
- ACPI_EXCEPTION((AE_INFO, AE_ERROR,
- "acpi_sbsm_get_info() failed"));
- goto end;
- }
- sbs->sbsm_present = 1;
- }
- if (sbs->sbsm_present == 0) {
+ acpi_sbsm_get_info(sbs);
+
+ if (!sbs->sbsm_present) {
result = acpi_battery_add(sbs, 0);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_ERROR,
@@ -1702,8 +1685,6 @@ static int acpi_sbs_add(struct acpi_device *device)
}
}
- sbs->handle = device->handle;
-
init_timer(&sbs->update_timer);
result = acpi_check_update_proc(sbs);
if (result)
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index bc7e16ec8393..3279e72a94f8 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -210,17 +210,28 @@ static void acpi_hibernation_finish(void)
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
+}
- if (init_8259A_after_S1) {
- printk("Broken toshiba laptop -> kicking interrupts\n");
- init_8259A(0);
- }
+static int acpi_hibernation_pre_restore(void)
+{
+ acpi_status status;
+
+ status = acpi_hw_disable_all_gpes();
+
+ return ACPI_SUCCESS(status) ? 0 : -EFAULT;
+}
+
+static void acpi_hibernation_restore_cleanup(void)
+{
+ acpi_hw_enable_all_runtime_gpes();
}
static struct hibernation_ops acpi_hibernation_ops = {
.prepare = acpi_hibernation_prepare,
.enter = acpi_hibernation_enter,
.finish = acpi_hibernation_finish,
+ .pre_restore = acpi_hibernation_pre_restore,
+ .restore_cleanup = acpi_hibernation_restore_cleanup,
};
#endif /* CONFIG_SOFTWARE_SUSPEND */
diff --git a/drivers/acpi/sleep/poweroff.c b/drivers/acpi/sleep/poweroff.c
index d9801eff6489..39e40d56b034 100644
--- a/drivers/acpi/sleep/poweroff.c
+++ b/drivers/acpi/sleep/poweroff.c
@@ -39,7 +39,13 @@ int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_PM
-void acpi_power_off(void)
+static void acpi_power_off_prepare(void)
+{
+ /* Prepare to power off the system */
+ acpi_sleep_prepare(ACPI_STATE_S5);
+}
+
+static void acpi_power_off(void)
{
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk("%s called\n", __FUNCTION__);
@@ -48,30 +54,6 @@ void acpi_power_off(void)
acpi_enter_sleep_state(ACPI_STATE_S5);
}
-static int acpi_shutdown(struct sys_device *x)
-{
- switch (system_state) {
- case SYSTEM_POWER_OFF:
- /* Prepare to power off the system */
- return acpi_sleep_prepare(ACPI_STATE_S5);
- case SYSTEM_SUSPEND_DISK:
- /* Prepare to suspend the system to disk */
- return acpi_sleep_prepare(ACPI_STATE_S4);
- default:
- return 0;
- }
-}
-
-static struct sysdev_class acpi_sysclass = {
- set_kset_name("acpi"),
- .shutdown = acpi_shutdown
-};
-
-static struct sys_device device_acpi = {
- .id = 0,
- .cls = &acpi_sysclass,
-};
-
static int acpi_poweroff_init(void)
{
if (!acpi_disabled) {
@@ -81,13 +63,8 @@ static int acpi_poweroff_init(void)
status =
acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
if (ACPI_SUCCESS(status)) {
- int error;
- error = sysdev_class_register(&acpi_sysclass);
- if (!error)
- error = sysdev_register(&device_acpi);
- if (!error)
- pm_power_off = acpi_power_off;
- return error;
+ pm_power_off_prepare = acpi_power_off_prepare;
+ pm_power_off = acpi_power_off;
}
}
return 0;
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 83a8d3097904..edee2806e37b 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -39,15 +39,12 @@ ACPI_MODULE_NAME("system");
#define ACPI_SYSTEM_CLASS "system"
#define ACPI_SYSTEM_DEVICE_NAME "System"
-#define ACPI_SYSTEM_FILE_INFO "info"
-#define ACPI_SYSTEM_FILE_EVENT "event"
-#define ACPI_SYSTEM_FILE_DSDT "dsdt"
-#define ACPI_SYSTEM_FILE_FADT "fadt"
/*
* Make ACPICA version work as module param
*/
-static int param_get_acpica_version(char *buffer, struct kernel_param *kp) {
+static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
+{
int result;
result = sprintf(buffer, "%x", ACPI_CA_VERSION);
@@ -58,9 +55,126 @@ static int param_get_acpica_version(char *buffer, struct kernel_param *kp) {
module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/* --------------------------------------------------------------------------
+ FS Interface (/sys)
+ -------------------------------------------------------------------------- */
+static LIST_HEAD(acpi_table_attr_list);
+static struct kobject tables_kobj;
+
+struct acpi_table_attr {
+ struct bin_attribute attr;
+ char name[8];
+ int instance;
+ struct list_head node;
+};
+
+static ssize_t acpi_table_show(struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct acpi_table_attr *table_attr =
+ container_of(bin_attr, struct acpi_table_attr, attr);
+ struct acpi_table_header *table_header = NULL;
+ acpi_status status;
+ ssize_t ret_count = count;
+
+ status =
+ acpi_get_table(table_attr->name, table_attr->instance,
+ &table_header);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (offset >= table_header->length) {
+ ret_count = 0;
+ goto end;
+ }
+
+ if (offset + ret_count > table_header->length)
+ ret_count = table_header->length - offset;
+
+ memcpy(buf, ((char *)table_header) + offset, ret_count);
+
+ end:
+ return ret_count;
+}
+
+static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
+ struct acpi_table_header *table_header)
+{
+ struct acpi_table_header *header = NULL;
+ struct acpi_table_attr *attr = NULL;
+
+ memcpy(table_attr->name, table_header->signature, ACPI_NAME_SIZE);
+
+ list_for_each_entry(attr, &acpi_table_attr_list, node) {
+ if (!memcmp(table_header->signature, attr->name,
+ ACPI_NAME_SIZE))
+ if (table_attr->instance < attr->instance)
+ table_attr->instance = attr->instance;
+ }
+ table_attr->instance++;
+
+ if (table_attr->instance > 1 || (table_attr->instance == 1 &&
+ !acpi_get_table(table_header->
+ signature, 2,
+ &header)))
+ sprintf(table_attr->name + 4, "%d", table_attr->instance);
+
+ table_attr->attr.size = 0;
+ table_attr->attr.read = acpi_table_show;
+ table_attr->attr.attr.name = table_attr->name;
+ table_attr->attr.attr.mode = 0444;
+ table_attr->attr.attr.owner = THIS_MODULE;
+
+ return;
+}
+
+static int acpi_system_sysfs_init(void)
+{
+ struct acpi_table_attr *table_attr;
+ struct acpi_table_header *table_header = NULL;
+ int table_index = 0;
+ int result;
+
+ tables_kobj.parent = &acpi_subsys.kobj;
+ kobject_set_name(&tables_kobj, "tables");
+ result = kobject_register(&tables_kobj);
+ if (result)
+ return result;
+
+ do {
+ result = acpi_get_table_by_index(table_index, &table_header);
+ if (!result) {
+ table_index++;
+ table_attr = NULL;
+ table_attr =
+ kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
+ if (!table_attr)
+ return -ENOMEM;
+
+ acpi_table_attr_init(table_attr, table_header);
+ result =
+ sysfs_create_bin_file(&tables_kobj,
+ &table_attr->attr);
+ if (result) {
+ kfree(table_attr);
+ return result;
+ } else
+ list_add_tail(&table_attr->node,
+ &acpi_table_attr_list);
+ }
+ } while (!result);
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_PROCFS
+#define ACPI_SYSTEM_FILE_INFO "info"
+#define ACPI_SYSTEM_FILE_EVENT "event"
+#define ACPI_SYSTEM_FILE_DSDT "dsdt"
+#define ACPI_SYSTEM_FILE_FADT "fadt"
static int acpi_system_read_info(struct seq_file *seq, void *offset)
{
@@ -80,7 +194,6 @@ static const struct file_operations acpi_system_info_ops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif
static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
loff_t *);
@@ -97,13 +210,11 @@ acpi_system_read_dsdt(struct file *file,
struct acpi_table_header *dsdt = NULL;
ssize_t res;
-
status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
return -ENODEV;
- res = simple_read_from_buffer(buffer, count, ppos,
- dsdt, dsdt->length);
+ res = simple_read_from_buffer(buffer, count, ppos, dsdt, dsdt->length);
return res;
}
@@ -123,28 +234,21 @@ acpi_system_read_fadt(struct file *file,
struct acpi_table_header *fadt = NULL;
ssize_t res;
-
status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt);
if (ACPI_FAILURE(status))
return -ENODEV;
- res = simple_read_from_buffer(buffer, count, ppos,
- fadt, fadt->length);
+ res = simple_read_from_buffer(buffer, count, ppos, fadt, fadt->length);
return res;
}
-static int __init acpi_system_init(void)
+static int acpi_system_procfs_init(void)
{
struct proc_dir_entry *entry;
int error = 0;
char *name;
-
- if (acpi_disabled)
- return 0;
-
-#ifdef CONFIG_ACPI_PROCFS
/* 'info' [R] */
name = ACPI_SYSTEM_FILE_INFO;
entry = create_proc_entry(name, S_IRUGO, acpi_root_dir);
@@ -153,7 +257,6 @@ static int __init acpi_system_init(void)
else {
entry->proc_fops = &acpi_system_info_ops;
}
-#endif
/* 'dsdt' [R] */
name = ACPI_SYSTEM_FILE_DSDT;
@@ -177,12 +280,32 @@ static int __init acpi_system_init(void)
Error:
remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir);
remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir);
-#ifdef CONFIG_ACPI_PROCFS
remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir);
-#endif
error = -EFAULT;
goto Done;
}
+#else
+static int acpi_system_procfs_init(void)
+{
+ return 0;
+}
+#endif
+
+static int __init acpi_system_init(void)
+{
+ int result = 0;
+
+ if (acpi_disabled)
+ return 0;
+
+ result = acpi_system_procfs_init();
+ if (result)
+ return result;
+
+ result = acpi_system_sysfs_init();
+
+ return result;
+}
subsys_initcall(acpi_system_init);
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index 1285e91474fb..002bb33003af 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -211,14 +211,17 @@ void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags)
* DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
* Performs validation on some important FADT fields.
*
+ * NOTE: We create a local copy of the FADT regardless of the version.
+ *
******************************************************************************/
void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
- * Check if the FADT is larger than what we know about (ACPI 2.0 version).
- * Truncate the table, but make some noise.
+ * Check if the FADT is larger than the largest table that we expect
+ * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+ * a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
@@ -227,10 +230,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
sizeof(struct acpi_table_fadt)));
}
- /* Copy the entire FADT locally. Zero first for tb_convert_fadt */
+ /* Clear the entire local FADT */
ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
+ /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
+
ACPI_MEMCPY(&acpi_gbl_FADT, table,
ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
@@ -251,7 +256,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
* RETURN: None
*
* DESCRIPTION: Converts all versions of the FADT to a common internal format.
- * -> Expand all 32-bit addresses to 64-bit.
+ * Expand all 32-bit addresses to 64-bit.
*
* NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
* and must contain a copy of the actual FADT.
@@ -292,8 +297,23 @@ static void acpi_tb_convert_fadt(void)
}
/*
- * Expand the 32-bit V1.0 addresses to the 64-bit "X" generic address
- * structures as necessary.
+ * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
+ * should be zero are indeed zero. This will workaround BIOSs that
+ * inadvertently place values in these fields.
+ *
+ * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
+ * offset 45, 55, 95, and the word located at offset 109, 110.
+ */
+ if (acpi_gbl_FADT.header.revision < 3) {
+ acpi_gbl_FADT.preferred_profile = 0;
+ acpi_gbl_FADT.pstate_control = 0;
+ acpi_gbl_FADT.cst_control = 0;
+ acpi_gbl_FADT.boot_flags = 0;
+ }
+
+ /*
+ * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X"
+ * generic address structures as necessary.
*/
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
target =
@@ -349,18 +369,6 @@ static void acpi_tb_convert_fadt(void)
acpi_gbl_FADT.xpm1a_event_block.space_id;
}
-
- /*
- * For ACPI 1.0 FADTs, ensure that reserved fields (which should be zero)
- * are indeed zero. This will workaround BIOSs that inadvertently placed
- * values in these fields.
- */
- if (acpi_gbl_FADT.header.revision < 3) {
- acpi_gbl_FADT.preferred_profile = 0;
- acpi_gbl_FADT.pstate_control = 0;
- acpi_gbl_FADT.cst_control = 0;
- acpi_gbl_FADT.boot_flags = 0;
- }
}
/******************************************************************************
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 88a6fc7fd271..58f1338981bc 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -40,6 +40,7 @@
#include <linux/jiffies.h>
#include <linux/kmod.h>
#include <linux/seq_file.h>
+#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
@@ -59,7 +60,6 @@
#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
#define ACPI_THERMAL_NOTIFY_HOT 0xF1
#define ACPI_THERMAL_MODE_ACTIVE 0x00
-#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff"
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
@@ -419,26 +419,6 @@ static int acpi_thermal_get_devices(struct acpi_thermal *tz)
return 0;
}
-static int acpi_thermal_call_usermode(char *path)
-{
- char *argv[2] = { NULL, NULL };
- char *envp[3] = { NULL, NULL, NULL };
-
-
- if (!path)
- return -EINVAL;
-
- argv[0] = path;
-
- /* minimal command environment */
- envp[0] = "HOME=/";
- envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
-
- call_usermodehelper(argv[0], argv, envp, 0);
-
- return 0;
-}
-
static int acpi_thermal_critical(struct acpi_thermal *tz)
{
if (!tz || !tz->trips.critical.flags.valid)
@@ -456,7 +436,7 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
tz->trips.critical.flags.enabled);
- acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF);
+ orderly_poweroff(true);
return 0;
}
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index 8ec6f8e48138..f112af433e36 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -62,16 +62,13 @@ acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
static char *acpi_interfaces_supported[] = {
/* Operating System Vendor Strings */
- "Windows 2000",
- "Windows 2001",
- "Windows 2001 SP0",
- "Windows 2001 SP1",
- "Windows 2001 SP2",
- "Windows 2001 SP3",
- "Windows 2001 SP4",
- "Windows 2001.1",
- "Windows 2001.1 SP1", /* Added 03/2006 */
- "Windows 2006", /* Added 03/2006 */
+ "Windows 2000", /* Windows 2000 */
+ "Windows 2001", /* Windows XP */
+ "Windows 2001 SP1", /* Windows XP SP1 */
+ "Windows 2001 SP2", /* Windows XP SP2 */
+ "Windows 2001.1", /* Windows Server 2003 */
+ "Windows 2001.1 SP1", /* Windows Server 2003 SP1 - Added 03/2006 */
+ "Windows 2006", /* Windows Vista - Added 03/2006 */
/* Feature Group Strings */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 00d25b347255..04ea697f72bf 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -33,6 +33,7 @@
#include <linux/seq_file.h>
#include <linux/backlight.h>
+#include <linux/video_output.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
@@ -169,6 +170,7 @@ struct acpi_video_device {
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
+ struct output_device *output_dev;
};
/* bus */
@@ -272,13 +274,17 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
static void acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static int acpi_video_device_get_state(struct acpi_video_device *device,
+ unsigned long *state);
+static int acpi_video_output_get(struct output_device *od);
+static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
/*backlight device sysfs support*/
static int acpi_video_get_brightness(struct backlight_device *bd)
{
unsigned long cur_level;
struct acpi_video_device *vd =
- (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
+ (struct acpi_video_device *)bl_get_data(bd);
acpi_video_device_lcd_get_level_current(vd, &cur_level);
return (int) cur_level;
}
@@ -287,7 +293,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
{
int request_level = bd->props.brightness;
struct acpi_video_device *vd =
- (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
+ (struct acpi_video_device *)bl_get_data(bd);
acpi_video_device_lcd_set_level(vd, request_level);
return 0;
}
@@ -297,6 +303,28 @@ static struct backlight_ops acpi_backlight_ops = {
.update_status = acpi_video_set_brightness,
};
+/*video output device sysfs support*/
+static int acpi_video_output_get(struct output_device *od)
+{
+ unsigned long state;
+ struct acpi_video_device *vd =
+ (struct acpi_video_device *)class_get_devdata(&od->class_dev);
+ acpi_video_device_get_state(vd, &state);
+ return (int)state;
+}
+
+static int acpi_video_output_set(struct output_device *od)
+{
+ unsigned long state = od->request_state;
+ struct acpi_video_device *vd=
+ (struct acpi_video_device *)class_get_devdata(&od->class_dev);
+ return acpi_video_device_set_state(vd, state);
+}
+
+static struct output_properties acpi_output_properties = {
+ .set_state = acpi_video_output_set,
+ .get_status = acpi_video_output_get,
+};
/* --------------------------------------------------------------------------
Video Management
-------------------------------------------------------------------------- */
@@ -531,7 +559,6 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
static void acpi_video_device_find_cap(struct acpi_video_device *device)
{
- acpi_integer status;
acpi_handle h_dummy1;
int i;
u32 max_level = 0;
@@ -565,50 +592,55 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
device->cap._DSS = 1;
}
- status = acpi_video_device_lcd_query_levels(device, &obj);
-
- if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count >= 2) {
- int count = 0;
- union acpi_object *o;
-
- br = kzalloc(sizeof(*br), GFP_KERNEL);
- if (!br) {
- printk(KERN_ERR "can't allocate memory\n");
- } else {
- br->levels = kmalloc(obj->package.count *
- sizeof *(br->levels), GFP_KERNEL);
- if (!br->levels)
- goto out;
-
- for (i = 0; i < obj->package.count; i++) {
- o = (union acpi_object *)&obj->package.
- elements[i];
- if (o->type != ACPI_TYPE_INTEGER) {
- printk(KERN_ERR PREFIX "Invalid data\n");
- continue;
- }
- br->levels[count] = (u32) o->integer.value;
- if (br->levels[count] > max_level)
- max_level = br->levels[count];
- count++;
- }
- out:
- if (count < 2) {
- kfree(br->levels);
- kfree(br);
+ if (ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+
+ if (obj->package.count >= 2) {
+ int count = 0;
+ union acpi_object *o;
+
+ br = kzalloc(sizeof(*br), GFP_KERNEL);
+ if (!br) {
+ printk(KERN_ERR "can't allocate memory\n");
} else {
- br->count = count;
- device->brightness = br;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n",
- count));
+ br->levels = kmalloc(obj->package.count *
+ sizeof *(br->levels), GFP_KERNEL);
+ if (!br->levels)
+ goto out;
+
+ for (i = 0; i < obj->package.count; i++) {
+ o = (union acpi_object *)&obj->package.
+ elements[i];
+ if (o->type != ACPI_TYPE_INTEGER) {
+ printk(KERN_ERR PREFIX "Invalid data\n");
+ continue;
+ }
+ br->levels[count] = (u32) o->integer.value;
+
+ if (br->levels[count] > max_level)
+ max_level = br->levels[count];
+ count++;
+ }
+ out:
+ if (count < 2) {
+ kfree(br->levels);
+ kfree(br);
+ } else {
+ br->count = count;
+ device->brightness = br;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "found %d brightness levels\n",
+ count));
+ }
}
}
+
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available LCD brightness level\n"));
}
kfree(obj);
- if (device->cap._BCL && device->cap._BCM && device->cap._BQC){
+ if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){
unsigned long tmp;
static int count = 0;
char *name;
@@ -626,6 +658,17 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
kfree(name);
}
+ if (device->cap._DCS && device->cap._DSS){
+ static int count = 0;
+ char *name;
+ name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
+ if (!name)
+ return;
+ sprintf(name, "acpi_video%d", count++);
+ device->output_dev = video_output_register(name,
+ NULL, device, &acpi_output_properties);
+ kfree(name);
+ }
return;
}
@@ -1669,6 +1712,7 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
backlight_device_unregister(device->backlight);
+ video_output_unregister(device->output_dev);
return 0;
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4ad8675f5a16..d8046a113c37 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -309,7 +309,7 @@ config PATA_HPT3X2N
If unsure, say N.
config PATA_HPT3X3
- tristate "HPT 343/363 PATA support (Experimental)"
+ tristate "HPT 343/363 PATA support"
depends on PCI
help
This option enables support for the HPT 343/363
@@ -317,6 +317,14 @@ config PATA_HPT3X3
If unsure, say N.
+config PATA_HPT3X3_DMA
+ bool "HPT 343/363 DMA support (Experimental)"
+ depends on PATA_HPT3X3
+ help
+ This option enables DMA support for the HPT343/363
+ controllers. Enable with care as there are still some
+ problems with DMA on this chipset.
+
config PATA_ISAPNP
tristate "ISA Plug and Play PATA support (Experimental)"
depends on EXPERIMENTAL && ISAPNP
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 11e4eb9f304e..06f212ff2b4f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -99,6 +99,7 @@ enum {
HOST_CAP_SSC = (1 << 14), /* Slumber capable */
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
+ HOST_CAP_SNTF = (1 << 29), /* SNotification register */
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
@@ -113,11 +114,11 @@ enum {
PORT_TFDATA = 0x20, /* taskfile data */
PORT_SIG = 0x24, /* device TF signature */
PORT_CMD_ISSUE = 0x38, /* command issue */
- PORT_SCR = 0x28, /* SATA phy register block */
PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
+ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
/* PORT_IRQ_{STAT,MASK} bits */
PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
@@ -216,8 +217,8 @@ struct ahci_port_priv {
unsigned int ncq_saw_sdb:1;
};
-static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static void ahci_irq_clear(struct ata_port *ap);
@@ -417,7 +418,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
- { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 */
+ { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */
+ { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */
+ { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */
+ { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -545,13 +549,19 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
- /* some chips lie about 64bit support */
+ /* some chips have errata preventing 64bit use */
if ((cap & HOST_CAP_64) && (pi->flags & AHCI_FLAG_32BIT_ONLY)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can't do 64bit DMA, forcing 32bit\n");
cap &= ~HOST_CAP_64;
}
+ if ((cap & HOST_CAP_NCQ) && (pi->flags & AHCI_FLAG_NO_NCQ)) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "controller can't do NCQ, turning off CAP_NCQ\n");
+ cap &= ~HOST_CAP_NCQ;
+ }
+
/* fixup zero port_map */
if (!port_map) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
@@ -625,38 +635,45 @@ static void ahci_restore_initial_config(struct ata_host *host)
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
}
-static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
+static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
{
- unsigned int sc_reg;
-
- switch (sc_reg_in) {
- case SCR_STATUS: sc_reg = 0; break;
- case SCR_CONTROL: sc_reg = 1; break;
- case SCR_ERROR: sc_reg = 2; break;
- case SCR_ACTIVE: sc_reg = 3; break;
- default:
- return 0xffffffffU;
- }
+ static const int offset[] = {
+ [SCR_STATUS] = PORT_SCR_STAT,
+ [SCR_CONTROL] = PORT_SCR_CTL,
+ [SCR_ERROR] = PORT_SCR_ERR,
+ [SCR_ACTIVE] = PORT_SCR_ACT,
+ [SCR_NOTIFICATION] = PORT_SCR_NTF,
+ };
+ struct ahci_host_priv *hpriv = ap->host->private_data;
- return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ if (sc_reg < ARRAY_SIZE(offset) &&
+ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
+ return offset[sc_reg];
+ return 0;
}
-
-static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
- u32 val)
+static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
- unsigned int sc_reg;
-
- switch (sc_reg_in) {
- case SCR_STATUS: sc_reg = 0; break;
- case SCR_CONTROL: sc_reg = 1; break;
- case SCR_ERROR: sc_reg = 2; break;
- case SCR_ACTIVE: sc_reg = 3; break;
- default:
- return;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ int offset = ahci_scr_offset(ap, sc_reg);
+
+ if (offset) {
+ *val = readl(port_mmio + offset);
+ return 0;
}
+ return -EINVAL;
+}
- writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ int offset = ahci_scr_offset(ap, sc_reg);
+
+ if (offset) {
+ writel(val, port_mmio + offset);
+ return 0;
+ }
+ return -EINVAL;
}
static void ahci_start_engine(struct ata_port *ap)
@@ -948,37 +965,87 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
}
-static int ahci_clo(struct ata_port *ap)
+static int ahci_kick_engine(struct ata_port *ap, int force_restart)
{
void __iomem *port_mmio = ap->ioaddr.cmd_addr;
struct ahci_host_priv *hpriv = ap->host->private_data;
u32 tmp;
+ int busy, rc;
+
+ /* do we need to kick the port? */
+ busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ);
+ if (!busy && !force_restart)
+ return 0;
+
+ /* stop engine */
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ goto out_restart;
- if (!(hpriv->cap & HOST_CAP_CLO))
- return -EOPNOTSUPP;
+ /* need to do CLO? */
+ if (!busy) {
+ rc = 0;
+ goto out_restart;
+ }
+ if (!(hpriv->cap & HOST_CAP_CLO)) {
+ rc = -EOPNOTSUPP;
+ goto out_restart;
+ }
+
+ /* perform CLO */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_CLO;
writel(tmp, port_mmio + PORT_CMD);
+ rc = 0;
tmp = ata_wait_register(port_mmio + PORT_CMD,
PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
if (tmp & PORT_CMD_CLO)
- return -EIO;
+ rc = -EIO;
- return 0;
+ /* restart engine */
+ out_restart:
+ ahci_start_engine(ap);
+ return rc;
}
-static int ahci_softreset(struct ata_port *ap, unsigned int *class,
- unsigned long deadline)
+static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
+ struct ata_taskfile *tf, int is_cmd, u16 flags,
+ unsigned long timeout_msec)
{
+ const u32 cmd_fis_len = 5; /* five dwords */
struct ahci_port_priv *pp = ap->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
- const u32 cmd_fis_len = 5; /* five dwords */
+ u8 *fis = pp->cmd_tbl;
+ u32 tmp;
+
+ /* prep the command */
+ ata_tf_to_fis(tf, pmp, is_cmd, fis);
+ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
+
+ /* issue & wait */
+ writel(1, port_mmio + PORT_CMD_ISSUE);
+
+ if (timeout_msec) {
+ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
+ 1, timeout_msec);
+ if (tmp & 0x1) {
+ ahci_kick_engine(ap, 1);
+ return -EBUSY;
+ }
+ } else
+ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+
+ return 0;
+}
+
+static int ahci_do_softreset(struct ata_port *ap, unsigned int *class,
+ int pmp, unsigned long deadline)
+{
const char *reason = NULL;
+ unsigned long now, msecs;
struct ata_taskfile tf;
- u32 tmp;
- u8 *fis;
int rc;
DPRINTK("ENTER\n");
@@ -990,43 +1057,22 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class,
}
/* prepare for SRST (AHCI-1.1 10.4.1) */
- rc = ahci_stop_engine(ap);
- if (rc) {
- reason = "failed to stop engine";
- goto fail_restart;
- }
-
- /* check BUSY/DRQ, perform Command List Override if necessary */
- if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) {
- rc = ahci_clo(ap);
-
- if (rc == -EOPNOTSUPP) {
- reason = "port busy but CLO unavailable";
- goto fail_restart;
- } else if (rc) {
- reason = "port busy but CLO failed";
- goto fail_restart;
- }
- }
-
- /* restart engine */
- ahci_start_engine(ap);
+ rc = ahci_kick_engine(ap, 1);
+ if (rc)
+ ata_port_printk(ap, KERN_WARNING,
+ "failed to reset engine (errno=%d)", rc);
ata_tf_init(ap->device, &tf);
- fis = pp->cmd_tbl;
/* issue the first D2H Register FIS */
- ahci_fill_cmd_slot(pp, 0,
- cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
+ msecs = 0;
+ now = jiffies;
+ if (time_after(now, deadline))
+ msecs = jiffies_to_msecs(deadline - now);
tf.ctl |= ATA_SRST;
- ata_tf_to_fis(&tf, fis, 0);
- fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
-
- writel(1, port_mmio + PORT_CMD_ISSUE);
-
- tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
- if (tmp & 0x1) {
+ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
+ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
rc = -EIO;
reason = "1st FIS failed";
goto fail;
@@ -1036,14 +1082,8 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class,
msleep(1);
/* issue the second D2H Register FIS */
- ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
-
tf.ctl &= ~ATA_SRST;
- ata_tf_to_fis(&tf, fis, 0);
- fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
-
- writel(1, port_mmio + PORT_CMD_ISSUE);
- readl(port_mmio + PORT_CMD_ISSUE); /* flush */
+ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
/* spec mandates ">= 2ms" before checking status.
* We wait 150ms, because that was the magic delay used for
@@ -1066,13 +1106,17 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class,
DPRINTK("EXIT, class=%u\n", *class);
return 0;
- fail_restart:
- ahci_start_engine(ap);
fail:
ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
return rc;
}
+static int ahci_softreset(struct ata_port *ap, unsigned int *class,
+ unsigned long deadline)
+{
+ return ahci_do_softreset(ap, class, 0, deadline);
+}
+
static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
unsigned long deadline)
{
@@ -1088,7 +1132,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(ap->device, &tf);
tf.command = 0x80;
- ata_tf_to_fis(&tf, d2h_fis, 0);
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
rc = sata_std_hardreset(ap, class, deadline);
@@ -1106,6 +1150,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class,
static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
unsigned long deadline)
{
+ u32 serror;
int rc;
DPRINTK("ENTER\n");
@@ -1116,7 +1161,8 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class,
deadline);
/* vt8251 needs SError cleared for the port to operate */
- ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
+ ahci_scr_read(ap, SCR_ERROR, &serror);
+ ahci_scr_write(ap, SCR_ERROR, serror);
ahci_start_engine(ap);
@@ -1205,7 +1251,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
*/
cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
- ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
+ ata_tf_to_fis(&qc->tf, 0, 1, cmd_tbl);
if (is_atapi) {
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
@@ -1238,7 +1284,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_ehi_clear_desc(ehi);
/* AHCI needs SError cleared; otherwise, it might lock up */
- serror = ahci_scr_read(ap, SCR_ERROR);
+ ahci_scr_read(ap, SCR_ERROR, &serror);
ahci_scr_write(ap, SCR_ERROR, serror);
/* analyze @irq_stat */
@@ -1262,12 +1308,12 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
if (irq_stat & PORT_IRQ_IF_ERR) {
err_mask |= AC_ERR_ATA_BUS;
action |= ATA_EH_SOFTRESET;
- ata_ehi_push_desc(ehi, ", interface fatal error");
+ ata_ehi_push_desc(ehi, "interface fatal error");
}
if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
+ ata_ehi_push_desc(ehi, "%s", irq_stat & PORT_IRQ_CONNECT ?
"connection status changed" : "PHY RDY changed");
}
@@ -1276,7 +1322,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
err_mask |= AC_ERR_HSM;
action |= ATA_EH_SOFTRESET;
- ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
+ ata_ehi_push_desc(ehi, "unknown FIS %08x %08x %08x %08x",
unk[0], unk[1], unk[2], unk[3]);
}
@@ -1512,11 +1558,17 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- if (qc->flags & ATA_QCFLAG_FAILED) {
- /* make DMA engine forget about the failed command */
- ahci_stop_engine(ap);
- ahci_start_engine(ap);
- }
+ /* make DMA engine forget about the failed command */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ahci_kick_engine(ap, 1);
+}
+
+static int ahci_port_resume(struct ata_port *ap)
+{
+ ahci_power_up(ap);
+ ahci_start_port(ap);
+
+ return 0;
}
#ifdef CONFIG_PM
@@ -1536,14 +1588,6 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
return rc;
}
-static int ahci_port_resume(struct ata_port *ap)
-{
- ahci_power_up(ap);
- ahci_start_port(ap);
-
- return 0;
-}
-
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
@@ -1734,12 +1778,13 @@ static void ahci_print_info(struct ata_host *host)
dev_printk(KERN_INFO, &pdev->dev,
"flags: "
- "%s%s%s%s%s%s"
- "%s%s%s%s%s%s%s\n"
+ "%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s\n"
,
cap & (1 << 31) ? "64bit " : "",
cap & (1 << 30) ? "ncq " : "",
+ cap & (1 << 29) ? "sntf " : "",
cap & (1 << 28) ? "ilck " : "",
cap & (1 << 27) ? "stag " : "",
cap & (1 << 26) ? "pm " : "",
@@ -1794,7 +1839,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_save_initial_config(pdev, &pi, hpriv);
/* prepare host */
- if (!(pi.flags & AHCI_FLAG_NO_NCQ) && (hpriv->cap & HOST_CAP_NCQ))
+ if (hpriv->cap & HOST_CAP_NCQ)
pi.flags |= ATA_FLAG_NCQ;
host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map));
@@ -1808,10 +1853,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *port_mmio = ahci_port_base(ap);
/* standard SATA port setup */
- if (hpriv->port_map & (1 << i)) {
+ if (hpriv->port_map & (1 << i))
ap->ioaddr.cmd_addr = port_mmio;
- ap->ioaddr.scr_addr = port_mmio + PORT_SCR;
- }
/* disabled/not-implemented port */
else
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6a3bfef58e13..d9fa329fd157 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -414,7 +414,7 @@ static const struct piix_map_db ich6m_map_db = {
*/
.map = {
/* PM PS SM SS MAP */
- { P0, P2, RV, RV }, /* 00b */
+ { P0, P2, NA, NA }, /* 00b */
{ IDE, IDE, P1, P3 }, /* 01b */
{ P0, P2, IDE, IDE }, /* 10b */
{ RV, RV, RV, RV },
@@ -928,20 +928,18 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
{
struct pci_dev *pdev = NULL;
u16 cfg;
- u8 rev;
int no_piix_dma = 0;
while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL)
{
/* Look for 450NX PXB. Check for problem configurations
A PCI quirk checks bit 6 already */
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
pci_read_config_word(pdev, 0x41, &cfg);
/* Only on the original revision: IDE DMA can hang */
- if (rev == 0x00)
+ if (pdev->revision == 0x00)
no_piix_dma = 1;
/* On all revisions below 5 PXB bus lock must be disabled for IDE */
- else if (cfg & (1<<14) && rev < 5)
+ else if (cfg & (1<<14) && pdev->revision < 5)
no_piix_dma = 2;
}
if (no_piix_dma)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5b25311ba885..6001aae0b884 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -71,6 +71,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
unsigned int ata_print_id = 1;
static struct workqueue_struct *ata_wq;
@@ -110,8 +111,9 @@ MODULE_VERSION(DRV_VERSION);
/**
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
* @tf: Taskfile to convert
- * @fis: Buffer into which data will output
* @pmp: Port multiplier port
+ * @is_cmd: This FIS is for command
+ * @fis: Buffer into which data will output
*
* Converts a standard ATA taskfile to a Serial ATA
* FIS structure (Register - Host to Device).
@@ -119,12 +121,13 @@ MODULE_VERSION(DRV_VERSION);
* LOCKING:
* Inherited from caller.
*/
-
-void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
{
- fis[0] = 0x27; /* Register - Host to Device FIS */
- fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
- bit 7 indicates Command FIS */
+ fis[0] = 0x27; /* Register - Host to Device FIS */
+ fis[1] = pmp & 0xf; /* Port multiplier number*/
+ if (is_cmd)
+ fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
+
fis[2] = tf->command;
fis[3] = tf->feature;
@@ -1283,18 +1286,11 @@ static unsigned int ata_id_xfermask(const u16 *id)
void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
unsigned long delay)
{
- int rc;
-
- if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
- return;
-
PREPARE_DELAYED_WORK(&ap->port_task, fn);
ap->port_task_data = data;
- rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
-
- /* rc == 0 means that another user is using port task */
- WARN_ON(rc == 0);
+ /* may fail if ata_port_flush_task() in progress */
+ queue_delayed_work(ata_wq, &ap->port_task, delay);
}
/**
@@ -1309,32 +1305,9 @@ void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
*/
void ata_port_flush_task(struct ata_port *ap)
{
- unsigned long flags;
-
DPRINTK("ENTER\n");
- spin_lock_irqsave(ap->lock, flags);
- ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
- spin_unlock_irqrestore(ap->lock, flags);
-
- DPRINTK("flush #1\n");
- cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
-
- /*
- * At this point, if a task is running, it's guaranteed to see
- * the FLUSH flag; thus, it will never queue pio tasks again.
- * Cancel and flush.
- */
- if (!cancel_delayed_work(&ap->port_task)) {
- if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
- __FUNCTION__);
- cancel_work_sync(&ap->port_task.work);
- }
-
- spin_lock_irqsave(ap->lock, flags);
- ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
- spin_unlock_irqrestore(ap->lock, flags);
+ cancel_rearming_delayed_work(&ap->port_task);
if (ata_msg_ctl(ap))
ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
@@ -1814,7 +1787,7 @@ static void ata_dev_config_ncq(struct ata_device *dev,
desc[0] = '\0';
return;
}
- if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
+ if (dev->horkage & ATA_HORKAGE_NONCQ) {
snprintf(desc, desc_sz, "NCQ (not used)");
return;
}
@@ -1863,6 +1836,9 @@ int ata_dev_configure(struct ata_device *dev)
if (ata_msg_probe(ap))
ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
+ /* set horkage */
+ dev->horkage |= ata_dev_blacklisted(dev);
+
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -2038,7 +2014,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = ATA_MAX_SECTORS;
}
- if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
+ if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
@@ -2413,21 +2389,35 @@ int sata_down_spd_limit(struct ata_port *ap)
u32 sstatus, spd, mask;
int rc, highbit;
+ if (!sata_scr_valid(ap))
+ return -EOPNOTSUPP;
+
+ /* If SCR can be read, use it to determine the current SPD.
+ * If not, use cached value in ap->sata_spd.
+ */
rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
- if (rc)
- return rc;
+ if (rc == 0)
+ spd = (sstatus >> 4) & 0xf;
+ else
+ spd = ap->sata_spd;
mask = ap->sata_spd_limit;
if (mask <= 1)
return -EINVAL;
+
+ /* unconditionally mask off the highest bit */
highbit = fls(mask) - 1;
mask &= ~(1 << highbit);
- spd = (sstatus >> 4) & 0xf;
- if (spd <= 1)
- return -EINVAL;
- spd--;
- mask &= (1 << spd) - 1;
+ /* Mask off all speeds higher than or equal to the current
+ * one. Force 1.5Gbps if current SPD is not available.
+ */
+ if (spd > 1)
+ mask &= (1 << (spd - 1)) - 1;
+ else
+ mask &= 1;
+
+ /* were we already at the bottom? */
if (!mask)
return -EINVAL;
@@ -3190,9 +3180,6 @@ void ata_bus_reset(struct ata_port *ap)
if ((slave_possible) && (err != 0x81))
ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
- /* re-enable interrupts */
- ap->ops->irq_on(ap);
-
/* is double-select really necessary? */
if (ap->device[1].class != ATA_DEV_NONE)
ap->ops->dev_select(ap, 1);
@@ -3280,9 +3267,11 @@ int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
last = cur;
last_jiffies = jiffies;
- /* check deadline */
+ /* Check deadline. If debouncing failed, return
+ * -EPIPE to tell upper layer to lower link speed.
+ */
if (time_after(jiffies, deadline))
- return -EBUSY;
+ return -EPIPE;
}
}
@@ -3577,10 +3566,6 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
sata_scr_write(ap, SCR_ERROR, serror);
- /* re-enable interrupts */
- if (!ap->ops->error_handler)
- ap->ops->irq_on(ap);
-
/* is double-select really necessary? */
if (classes[0] != ATA_DEV_NONE)
ap->ops->dev_select(ap, 1);
@@ -3770,6 +3755,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
{ "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
+ { "IOMEGA ZIP 250 ATAPI Floppy",
+ NULL, ATA_HORKAGE_NODMA },
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
@@ -3783,7 +3770,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
{ "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
+ { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
{ "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
+ { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
+ ATA_HORKAGE_NONCQ },
/* NCQ hard hangs device under heavier load, needs hard power cycle */
{ "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
/* Blacklist entries taken from Silicon Image 3124/3132
@@ -3796,6 +3786,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
{ "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
+ { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
+ { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
/* Devices with NCQ limits */
@@ -3803,7 +3795,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ }
};
-unsigned long ata_device_blacklisted(const struct ata_device *dev)
+static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
@@ -3833,7 +3825,7 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
(dev->flags & ATA_DFLAG_CDB_INTR))
return 1;
- return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
+ return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
}
/**
@@ -5756,10 +5748,8 @@ int sata_scr_valid(struct ata_port *ap)
*/
int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
{
- if (sata_scr_valid(ap)) {
- *val = ap->ops->scr_read(ap, reg);
- return 0;
- }
+ if (sata_scr_valid(ap))
+ return ap->ops->scr_read(ap, reg, val);
return -EOPNOTSUPP;
}
@@ -5781,10 +5771,8 @@ int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
*/
int sata_scr_write(struct ata_port *ap, int reg, u32 val)
{
- if (sata_scr_valid(ap)) {
- ap->ops->scr_write(ap, reg, val);
- return 0;
- }
+ if (sata_scr_valid(ap))
+ return ap->ops->scr_write(ap, reg, val);
return -EOPNOTSUPP;
}
@@ -5805,10 +5793,13 @@ int sata_scr_write(struct ata_port *ap, int reg, u32 val)
*/
int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
{
+ int rc;
+
if (sata_scr_valid(ap)) {
- ap->ops->scr_write(ap, reg, val);
- ap->ops->scr_read(ap, reg);
- return 0;
+ rc = ap->ops->scr_write(ap, reg, val);
+ if (rc == 0)
+ rc = ap->ops->scr_read(ap, reg, &val);
+ return rc;
}
return -EOPNOTSUPP;
}
@@ -6020,6 +6011,7 @@ void ata_dev_init(struct ata_device *dev)
/* SATA spd limit is bound to the first device */
ap->sata_spd_limit = ap->hw_sata_spd_limit;
+ ap->sata_spd = 0;
/* High bits of dev->flags are used to record warm plug
* requests which occur asynchronously. Synchronize using
@@ -6085,6 +6077,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
+ init_timer_deferrable(&ap->fastdrain_timer);
+ ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
+ ap->fastdrain_timer.data = (unsigned long)ap;
ap->cbl = ATA_CBL_NONE;
@@ -6461,7 +6456,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- ata_scsi_scan_host(ap);
+ ata_scsi_scan_host(ap, 1);
}
return 0;
@@ -6557,13 +6552,7 @@ void ata_port_detach(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
-
- /* Flush hotplug task. The sequence is similar to
- * ata_port_flush_task().
- */
- cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
- cancel_delayed_work(&ap->hotplug_task);
- cancel_work_sync(&ap->hotplug_task.work);
+ cancel_rearming_delayed_work(&ap->hotplug_task);
skip_eh:
/* remove the associated SCSI host */
@@ -6952,7 +6941,6 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
-EXPORT_SYMBOL_GPL(ata_device_blacklisted);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
@@ -6961,9 +6949,9 @@ EXPORT_SYMBOL_GPL(ata_timing_merge);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
+EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
-EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
+EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
@@ -6976,6 +6964,9 @@ EXPORT_SYMBOL_GPL(ata_pci_default_filter);
EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
#endif /* CONFIG_PCI */
+EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
+EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
EXPORT_SYMBOL_GPL(ata_eng_timeout);
EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
EXPORT_SYMBOL_GPL(ata_port_abort);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9ee0a8c08d96..ac6ceed4bb60 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -56,6 +56,7 @@ enum {
*/
enum {
ATA_EH_PRERESET_TIMEOUT = 10 * HZ,
+ ATA_EH_FASTDRAIN_INTERVAL = 3 * HZ,
};
/* The following table determines how we sequence resets. Each entry
@@ -85,6 +86,71 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
{ }
#endif /* CONFIG_PM */
+static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
+ va_list args)
+{
+ ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
+ ATA_EH_DESC_LEN - ehi->desc_len,
+ fmt, args);
+}
+
+/**
+ * __ata_ehi_push_desc - push error description without adding separator
+ * @ehi: target EHI
+ * @fmt: printf format string
+ *
+ * Format string according to @fmt and append it to @ehi->desc.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __ata_ehi_pushv_desc(ehi, fmt, args);
+ va_end(args);
+}
+
+/**
+ * ata_ehi_push_desc - push error description with separator
+ * @ehi: target EHI
+ * @fmt: printf format string
+ *
+ * Format string according to @fmt and append it to @ehi->desc.
+ * If @ehi->desc is not empty, ", " is added in-between.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
+{
+ va_list args;
+
+ if (ehi->desc_len)
+ __ata_ehi_push_desc(ehi, ", ");
+
+ va_start(args, fmt);
+ __ata_ehi_pushv_desc(ehi, fmt, args);
+ va_end(args);
+}
+
+/**
+ * ata_ehi_clear_desc - clean error description
+ * @ehi: target EHI
+ *
+ * Clear @ehi->desc.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_ehi_clear_desc(struct ata_eh_info *ehi)
+{
+ ehi->desc[0] = '\0';
+ ehi->desc_len = 0;
+}
+
static void ata_ering_record(struct ata_ering *ering, int is_io,
unsigned int err_mask)
{
@@ -296,6 +362,9 @@ void ata_scsi_error(struct Scsi_Host *host)
repeat:
/* invoke error handler */
if (ap->ops->error_handler) {
+ /* kill fast drain timer */
+ del_timer_sync(&ap->fastdrain_timer);
+
/* process port resume request */
ata_eh_handle_port_resume(ap);
@@ -511,6 +580,94 @@ void ata_eng_timeout(struct ata_port *ap)
DPRINTK("EXIT\n");
}
+static int ata_eh_nr_in_flight(struct ata_port *ap)
+{
+ unsigned int tag;
+ int nr = 0;
+
+ /* count only non-internal commands */
+ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
+ if (ata_qc_from_tag(ap, tag))
+ nr++;
+
+ return nr;
+}
+
+void ata_eh_fastdrain_timerfn(unsigned long arg)
+{
+ struct ata_port *ap = (void *)arg;
+ unsigned long flags;
+ int cnt;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ cnt = ata_eh_nr_in_flight(ap);
+
+ /* are we done? */
+ if (!cnt)
+ goto out_unlock;
+
+ if (cnt == ap->fastdrain_cnt) {
+ unsigned int tag;
+
+ /* No progress during the last interval, tag all
+ * in-flight qcs as timed out and freeze the port.
+ */
+ for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+ if (qc)
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ }
+
+ ata_port_freeze(ap);
+ } else {
+ /* some qcs have finished, give it another chance */
+ ap->fastdrain_cnt = cnt;
+ ap->fastdrain_timer.expires =
+ jiffies + ATA_EH_FASTDRAIN_INTERVAL;
+ add_timer(&ap->fastdrain_timer);
+ }
+
+ out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+/**
+ * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
+ * @ap: target ATA port
+ * @fastdrain: activate fast drain
+ *
+ * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
+ * is non-zero and EH wasn't pending before. Fast drain ensures
+ * that EH kicks in in timely manner.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+{
+ int cnt;
+
+ /* already scheduled? */
+ if (ap->pflags & ATA_PFLAG_EH_PENDING)
+ return;
+
+ ap->pflags |= ATA_PFLAG_EH_PENDING;
+
+ if (!fastdrain)
+ return;
+
+ /* do we have in-flight qcs? */
+ cnt = ata_eh_nr_in_flight(ap);
+ if (!cnt)
+ return;
+
+ /* activate fast drain */
+ ap->fastdrain_cnt = cnt;
+ ap->fastdrain_timer.expires = jiffies + ATA_EH_FASTDRAIN_INTERVAL;
+ add_timer(&ap->fastdrain_timer);
+}
+
/**
* ata_qc_schedule_eh - schedule qc for error handling
* @qc: command to schedule error handling for
@@ -528,7 +685,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
WARN_ON(!ap->ops->error_handler);
qc->flags |= ATA_QCFLAG_FAILED;
- qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
+ ata_eh_set_pending(ap, 1);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
@@ -555,7 +712,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
- ap->pflags |= ATA_PFLAG_EH_PENDING;
+ ata_eh_set_pending(ap, 1);
scsi_schedule_eh(ap->scsi_host);
DPRINTK("port EH scheduled\n");
@@ -579,6 +736,9 @@ int ata_port_abort(struct ata_port *ap)
WARN_ON(!ap->ops->error_handler);
+ /* we're gonna abort all commands, no need for fast drain */
+ ata_eh_set_pending(ap, 0);
+
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
@@ -1130,7 +1290,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
/* we've got the perpetrator, condemn it */
qc = __ata_qc_from_tag(ap, tag);
memcpy(&qc->result_tf, &tf, sizeof(tf));
- qc->err_mask |= AC_ERR_DEV;
+ qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1413,8 +1573,12 @@ static void ata_eh_autopsy(struct ata_port *ap)
if (rc == 0) {
ehc->i.serror |= serror;
ata_eh_analyze_serror(ap);
- } else if (rc != -EOPNOTSUPP)
+ } else if (rc != -EOPNOTSUPP) {
+ /* SError read failed, force hardreset and probing */
+ ata_ehi_schedule_probe(&ehc->i);
ehc->i.action |= ATA_EH_HARDRESET;
+ ehc->i.err_mask |= AC_ERR_OTHER;
+ }
/* analyze NCQ failure */
ata_eh_analyze_ncq_error(ap);
@@ -1524,14 +1688,14 @@ static void ata_eh_report(struct ata_port *ap)
ehc->i.err_mask, ap->sactive, ehc->i.serror,
ehc->i.action, frozen);
if (desc)
- ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
+ ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
} else {
ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
"SAct 0x%x SErr 0x%x action 0x%x%s\n",
ehc->i.err_mask, ap->sactive, ehc->i.serror,
ehc->i.action, frozen);
if (desc)
- ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
+ ata_port_printk(ap, KERN_ERR, "%s\n", desc);
}
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
@@ -1551,7 +1715,7 @@ static void ata_eh_report(struct ata_port *ap)
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
"tag %d cdb 0x%x data %u %s\n "
"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
- "Emask 0x%x (%s)\n",
+ "Emask 0x%x (%s)%s\n",
cmd->command, cmd->feature, cmd->nsect,
cmd->lbal, cmd->lbam, cmd->lbah,
cmd->hob_feature, cmd->hob_nsect,
@@ -1562,7 +1726,8 @@ static void ata_eh_report(struct ata_port *ap)
res->lbal, res->lbam, res->lbah,
res->hob_feature, res->hob_nsect,
res->hob_lbal, res->hob_lbam, res->hob_lbah,
- res->device, qc->err_mask, ata_err_string(qc->err_mask));
+ res->device, qc->err_mask, ata_err_string(qc->err_mask),
+ qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
}
}
@@ -1648,7 +1813,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
} else
ata_port_printk(ap, KERN_ERR,
"prereset failed (errno=%d)\n", rc);
- return rc;
+ goto out;
}
}
@@ -1661,7 +1826,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
/* prereset told us not to reset, bang classes and return */
for (i = 0; i < ATA_MAX_DEVICES; i++)
classes[i] = ATA_DEV_NONE;
- return 0;
+ rc = 0;
+ goto out;
}
/* did prereset() screw up? if so, fix up to avoid oopsing */
@@ -1697,7 +1863,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
ata_port_printk(ap, KERN_ERR,
"follow-up softreset required "
"but no softreset avaliable\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
@@ -1707,7 +1874,8 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
classes[0] == ATA_DEV_UNKNOWN) {
ata_port_printk(ap, KERN_ERR,
"classification failed\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
}
@@ -1724,7 +1892,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
schedule_timeout_uninterruptible(delta);
}
- if (reset == hardreset &&
+ if (rc == -EPIPE ||
try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1)
sata_down_spd_limit(ap);
if (hardreset)
@@ -1733,12 +1901,18 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
}
if (rc == 0) {
+ u32 sstatus;
+
/* After the reset, the device state is PIO 0 and the
* controller state is undefined. Record the mode.
*/
for (i = 0; i < ATA_MAX_DEVICES; i++)
ap->device[i].pio_mode = XFER_PIO_0;
+ /* record current link speed */
+ if (sata_scr_read(ap, SCR_STATUS, &sstatus) == 0)
+ ap->sata_spd = (sstatus >> 4) & 0xf;
+
if (postreset)
postreset(ap, classes);
@@ -1746,7 +1920,9 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
ehc->i.action |= ATA_EH_REVALIDATE;
}
-
+ out:
+ /* clear hotplug flag */
+ ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
return rc;
}
@@ -1897,6 +2073,57 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
return 1;
}
+static void ata_eh_handle_dev_fail(struct ata_device *dev, int err)
+{
+ struct ata_port *ap = dev->ap;
+ struct ata_eh_context *ehc = &ap->eh_context;
+
+ ehc->tries[dev->devno]--;
+
+ switch (err) {
+ case -ENODEV:
+ /* device missing or wrong IDENTIFY data, schedule probing */
+ ehc->i.probe_mask |= (1 << dev->devno);
+ case -EINVAL:
+ /* give it just one more chance */
+ ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+ case -EIO:
+ if (ehc->tries[dev->devno] == 1) {
+ /* This is the last chance, better to slow
+ * down than lose it.
+ */
+ sata_down_spd_limit(ap);
+ ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
+ }
+ }
+
+ if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
+ /* disable device if it has used up all its chances */
+ ata_dev_disable(dev);
+
+ /* detach if offline */
+ if (ata_port_offline(ap))
+ ata_eh_detach_dev(dev);
+
+ /* probe if requested */
+ if ((ehc->i.probe_mask & (1 << dev->devno)) &&
+ !(ehc->did_probe_mask & (1 << dev->devno))) {
+ ata_eh_detach_dev(dev);
+ ata_dev_init(dev);
+
+ ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
+ ehc->did_probe_mask |= (1 << dev->devno);
+ ehc->i.action |= ATA_EH_SOFTRESET;
+ }
+ } else {
+ /* soft didn't work? be haaaaard */
+ if (ehc->i.flags & ATA_EHI_DID_RESET)
+ ehc->i.action |= ATA_EH_HARDRESET;
+ else
+ ehc->i.action |= ATA_EH_SOFTRESET;
+ }
+}
+
/**
* ata_eh_recover - recover host port after error
* @ap: host port to recover
@@ -1997,50 +2224,7 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
goto out;
dev_fail:
- ehc->tries[dev->devno]--;
-
- switch (rc) {
- case -ENODEV:
- /* device missing or wrong IDENTIFY data, schedule probing */
- ehc->i.probe_mask |= (1 << dev->devno);
- case -EINVAL:
- /* give it just one more chance */
- ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
- case -EIO:
- if (ehc->tries[dev->devno] == 1) {
- /* This is the last chance, better to slow
- * down than lose it.
- */
- sata_down_spd_limit(ap);
- ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
- }
- }
-
- if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
- /* disable device if it has used up all its chances */
- ata_dev_disable(dev);
-
- /* detach if offline */
- if (ata_port_offline(ap))
- ata_eh_detach_dev(dev);
-
- /* probe if requested */
- if ((ehc->i.probe_mask & (1 << dev->devno)) &&
- !(ehc->did_probe_mask & (1 << dev->devno))) {
- ata_eh_detach_dev(dev);
- ata_dev_init(dev);
-
- ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
- ehc->did_probe_mask |= (1 << dev->devno);
- ehc->i.action |= ATA_EH_SOFTRESET;
- }
- } else {
- /* soft didn't work? be haaaaard */
- if (ehc->i.flags & ATA_EHI_DID_RESET)
- ehc->i.action |= ATA_EH_HARDRESET;
- else
- ehc->i.action |= ATA_EH_SOFTRESET;
- }
+ ata_eh_handle_dev_fail(dev, rc);
if (ata_port_nr_enabled(ap)) {
ata_port_printk(ap, KERN_WARNING, "failed to recover some "
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cfde22da07ac..12ac0b511f79 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2947,17 +2947,22 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
return rc;
}
-void ata_scsi_scan_host(struct ata_port *ap)
+void ata_scsi_scan_host(struct ata_port *ap, int sync)
{
+ int tries = 5;
+ struct ata_device *last_failed_dev = NULL;
+ struct ata_device *dev;
unsigned int i;
if (ap->flags & ATA_FLAG_DISABLED)
return;
+ repeat:
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
struct scsi_device *sdev;
+ dev = &ap->device[i];
+
if (!ata_dev_enabled(dev) || dev->sdev)
continue;
@@ -2967,6 +2972,45 @@ void ata_scsi_scan_host(struct ata_port *ap)
scsi_device_put(sdev);
}
}
+
+ /* If we scanned while EH was in progress or allocation
+ * failure occurred, scan would have failed silently. Check
+ * whether all devices are attached.
+ */
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ dev = &ap->device[i];
+ if (ata_dev_enabled(dev) && !dev->sdev)
+ break;
+ }
+ if (i == ATA_MAX_DEVICES)
+ return;
+
+ /* we're missing some SCSI devices */
+ if (sync) {
+ /* If caller requested synchrnous scan && we've made
+ * any progress, sleep briefly and repeat.
+ */
+ if (dev != last_failed_dev) {
+ msleep(100);
+ last_failed_dev = dev;
+ goto repeat;
+ }
+
+ /* We might be failing to detect boot device, give it
+ * a few more chances.
+ */
+ if (--tries) {
+ msleep(100);
+ goto repeat;
+ }
+
+ ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
+ "failed without making any progress,\n"
+ " switching to async\n");
+ }
+
+ queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
+ round_jiffies_relative(HZ));
}
/**
@@ -3093,20 +3137,7 @@ void ata_scsi_hotplug(struct work_struct *work)
}
/* scan for new ones */
- ata_scsi_scan_host(ap);
-
- /* If we scanned while EH was in progress, scan would have
- * failed silently. Requeue if there are enabled but
- * unattached devices.
- */
- for (i = 0; i < ATA_MAX_DEVICES; i++) {
- struct ata_device *dev = &ap->device[i];
- if (ata_dev_enabled(dev) && !dev->sdev) {
- queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
- round_jiffies_relative(HZ));
- break;
- }
- }
+ ata_scsi_scan_host(ap, 0);
DPRINTK("EXIT\n");
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index fa1c22c7b38f..6c289c7b1322 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1,5 +1,5 @@
/*
- * libata-bmdma.c - helper library for PCI IDE BMDMA
+ * libata-sff.c - helper library for PCI IDE BMDMA
*
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
* Please ALWAYS copy linux-ide@vger.kernel.org
@@ -211,6 +211,8 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
tf->hob_lbal = ioread8(ioaddr->lbal_addr);
tf->hob_lbam = ioread8(ioaddr->lbam_addr);
tf->hob_lbah = ioread8(ioaddr->lbah_addr);
+ iowrite8(tf->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = tf->ctl;
}
}
@@ -604,13 +606,17 @@ int ata_pci_init_bmdma(struct ata_host *host)
}
/**
- * ata_pci_init_native_host - acquire native ATA resources and init host
+ * ata_pci_init_sff_host - acquire native PCI ATA resources and init host
* @host: target ATA host
*
* Acquire native PCI ATA resources for @host and initialize the
* first two ports of @host accordingly. Ports marked dummy are
* skipped and allocation failure makes the port dummy.
*
+ * Note that native PCI resources are valid even for legacy hosts
+ * as we fix up pdev resources array early in boot, so this
+ * function can be used for both native and legacy SFF hosts.
+ *
* LOCKING:
* Inherited from calling layer (may sleep).
*
@@ -618,7 +624,7 @@ int ata_pci_init_bmdma(struct ata_host *host)
* 0 if at least one port is initialized, -ENODEV if no port is
* available.
*/
-int ata_pci_init_native_host(struct ata_host *host)
+int ata_pci_init_sff_host(struct ata_host *host)
{
struct device *gdev = host->dev;
struct pci_dev *pdev = to_pci_dev(gdev);
@@ -673,7 +679,7 @@ int ata_pci_init_native_host(struct ata_host *host)
}
/**
- * ata_pci_prepare_native_host - helper to prepare native PCI ATA host
+ * ata_pci_prepare_sff_host - helper to prepare native PCI ATA host
* @pdev: target PCI device
* @ppi: array of port_info, must be enough for two ports
* @r_host: out argument for the initialized ATA host
@@ -687,9 +693,9 @@ int ata_pci_init_native_host(struct ata_host *host)
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int ata_pci_prepare_native_host(struct pci_dev *pdev,
- const struct ata_port_info * const * ppi,
- struct ata_host **r_host)
+int ata_pci_prepare_sff_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host)
{
struct ata_host *host;
int rc;
@@ -705,7 +711,7 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev,
goto err_out;
}
- rc = ata_pci_init_native_host(host);
+ rc = ata_pci_init_sff_host(host);
if (rc)
goto err_out;
@@ -730,221 +736,6 @@ int ata_pci_prepare_native_host(struct pci_dev *pdev,
return rc;
}
-struct ata_legacy_devres {
- unsigned int mask;
- unsigned long cmd_port[2];
- void __iomem * cmd_addr[2];
- void __iomem * ctl_addr[2];
- unsigned int irq[2];
- void * irq_dev_id[2];
-};
-
-static void ata_legacy_free_irqs(struct ata_legacy_devres *legacy_dr)
-{
- int i;
-
- for (i = 0; i < 2; i++) {
- if (!legacy_dr->irq[i])
- continue;
-
- free_irq(legacy_dr->irq[i], legacy_dr->irq_dev_id[i]);
- legacy_dr->irq[i] = 0;
- legacy_dr->irq_dev_id[i] = NULL;
- }
-}
-
-static void ata_legacy_release(struct device *gdev, void *res)
-{
- struct ata_legacy_devres *this = res;
- int i;
-
- ata_legacy_free_irqs(this);
-
- for (i = 0; i < 2; i++) {
- if (this->cmd_addr[i])
- ioport_unmap(this->cmd_addr[i]);
- if (this->ctl_addr[i])
- ioport_unmap(this->ctl_addr[i]);
- if (this->cmd_port[i])
- release_region(this->cmd_port[i], 8);
- }
-}
-
-static int ata_init_legacy_port(struct ata_port *ap,
- struct ata_legacy_devres *legacy_dr)
-{
- struct ata_host *host = ap->host;
- int port_no = ap->port_no;
- unsigned long cmd_port, ctl_port;
-
- if (port_no == 0) {
- cmd_port = ATA_PRIMARY_CMD;
- ctl_port = ATA_PRIMARY_CTL;
- } else {
- cmd_port = ATA_SECONDARY_CMD;
- ctl_port = ATA_SECONDARY_CTL;
- }
-
- /* request cmd_port */
- if (request_region(cmd_port, 8, "libata"))
- legacy_dr->cmd_port[port_no] = cmd_port;
- else {
- dev_printk(KERN_WARNING, host->dev,
- "0x%0lX IDE port busy\n", cmd_port);
- return -EBUSY;
- }
-
- /* iomap cmd and ctl ports */
- legacy_dr->cmd_addr[port_no] = ioport_map(cmd_port, 8);
- legacy_dr->ctl_addr[port_no] = ioport_map(ctl_port, 1);
- if (!legacy_dr->cmd_addr[port_no] || !legacy_dr->ctl_addr[port_no]) {
- dev_printk(KERN_WARNING, host->dev,
- "failed to map cmd/ctl ports\n");
- return -ENOMEM;
- }
-
- /* init IO addresses */
- ap->ioaddr.cmd_addr = legacy_dr->cmd_addr[port_no];
- ap->ioaddr.altstatus_addr = legacy_dr->ctl_addr[port_no];
- ap->ioaddr.ctl_addr = legacy_dr->ctl_addr[port_no];
- ata_std_ports(&ap->ioaddr);
-
- return 0;
-}
-
-/**
- * ata_init_legacy_host - acquire legacy ATA resources and init ATA host
- * @host: target ATA host
- * @was_busy: out parameter, indicates whether any port was busy
- *
- * Acquire legacy ATA resources for the first two ports of @host
- * and initialize it accordingly. Ports marked dummy are skipped
- * and resource acquistion failure makes the port dummy.
- *
- * LOCKING:
- * Inherited from calling layer (may sleep).
- *
- * RETURNS:
- * 0 if at least one port is initialized, -ENODEV if no port is
- * available.
- */
-static int ata_init_legacy_host(struct ata_host *host, int *was_busy)
-{
- struct device *gdev = host->dev;
- struct ata_legacy_devres *legacy_dr;
- int i, rc;
-
- if (!devres_open_group(gdev, NULL, GFP_KERNEL))
- return -ENOMEM;
-
- rc = -ENOMEM;
- legacy_dr = devres_alloc(ata_legacy_release, sizeof(*legacy_dr),
- GFP_KERNEL);
- if (!legacy_dr)
- goto err_out;
- devres_add(gdev, legacy_dr);
-
- for (i = 0; i < 2; i++) {
- if (ata_port_is_dummy(host->ports[i]))
- continue;
-
- rc = ata_init_legacy_port(host->ports[i], legacy_dr);
- if (rc == 0)
- legacy_dr->mask |= 1 << i;
- else {
- if (rc == -EBUSY)
- (*was_busy)++;
- host->ports[i]->ops = &ata_dummy_port_ops;
- }
- }
-
- if (!legacy_dr->mask) {
- dev_printk(KERN_ERR, gdev, "no available legacy port\n");
- return -ENODEV;
- }
-
- devres_remove_group(gdev, NULL);
- return 0;
-
- err_out:
- devres_release_group(gdev, NULL);
- return rc;
-}
-
-/**
- * ata_request_legacy_irqs - request legacy ATA IRQs
- * @host: target ATA host
- * @handler: array of IRQ handlers
- * @irq_flags: array of IRQ flags
- * @dev_id: array of IRQ dev_ids
- *
- * Request legacy IRQs for non-dummy legacy ports in @host. All
- * IRQ parameters are passed as array to allow ports to have
- * separate IRQ handlers.
- *
- * LOCKING:
- * Inherited from calling layer (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-static int ata_request_legacy_irqs(struct ata_host *host,
- irq_handler_t const *handler,
- const unsigned int *irq_flags,
- void * const *dev_id)
-{
- struct device *gdev = host->dev;
- struct ata_legacy_devres *legacy_dr;
- int i, rc;
-
- legacy_dr = devres_find(host->dev, ata_legacy_release, NULL, NULL);
- BUG_ON(!legacy_dr);
-
- for (i = 0; i < 2; i++) {
- unsigned int irq;
-
- /* FIXME: ATA_*_IRQ() should take generic device not pci_dev */
- if (i == 0)
- irq = ATA_PRIMARY_IRQ(to_pci_dev(gdev));
- else
- irq = ATA_SECONDARY_IRQ(to_pci_dev(gdev));
-
- if (!(legacy_dr->mask & (1 << i)))
- continue;
-
- if (!handler[i]) {
- dev_printk(KERN_ERR, gdev,
- "NULL handler specified for port %d\n", i);
- rc = -EINVAL;
- goto err_out;
- }
-
- rc = request_irq(irq, handler[i], irq_flags[i], DRV_NAME,
- dev_id[i]);
- if (rc) {
- dev_printk(KERN_ERR, gdev,
- "irq %u request failed (errno=%d)\n", irq, rc);
- goto err_out;
- }
-
- /* record irq allocation in legacy_dr */
- legacy_dr->irq[i] = irq;
- legacy_dr->irq_dev_id[i] = dev_id[i];
-
- /* only used to print info */
- if (i == 0)
- host->irq = irq;
- else
- host->irq2 = irq;
- }
-
- return 0;
-
- err_out:
- ata_legacy_free_irqs(legacy_dr);
- return rc;
-}
-
/**
* ata_pci_init_one - Initialize/register PCI IDE host controller
* @pdev: Controller to be initialized
@@ -1029,35 +820,11 @@ int ata_pci_init_one(struct pci_dev *pdev,
#endif
}
- /* alloc and init host */
- host = ata_host_alloc_pinfo(dev, ppi, 2);
- if (!host) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to allocate ATA host\n");
- rc = -ENOMEM;
+ /* prepare host */
+ rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
+ if (rc)
goto err_out;
- }
- if (!legacy_mode) {
- rc = ata_pci_init_native_host(host);
- if (rc)
- goto err_out;
- } else {
- int was_busy = 0;
-
- rc = ata_init_legacy_host(host, &was_busy);
- if (was_busy)
- pcim_pin_device(pdev);
- if (rc)
- goto err_out;
-
- /* request respective PCI regions, may fail */
- rc = pci_request_region(pdev, 1, DRV_NAME);
- rc = pci_request_region(pdev, 3, DRV_NAME);
- }
-
- /* init BMDMA, may fail */
- ata_pci_init_bmdma(host);
pci_set_master(pdev);
/* start host and request IRQ */
@@ -1068,17 +835,28 @@ int ata_pci_init_one(struct pci_dev *pdev,
if (!legacy_mode) {
rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
IRQF_SHARED, DRV_NAME, host);
+ if (rc)
+ goto err_out;
host->irq = pdev->irq;
} else {
- irq_handler_t handler[2] = { host->ops->irq_handler,
- host->ops->irq_handler };
- unsigned int irq_flags[2] = { IRQF_SHARED, IRQF_SHARED };
- void *dev_id[2] = { host, host };
+ if (!ata_port_is_dummy(host->ports[0])) {
+ host->irq = ATA_PRIMARY_IRQ(pdev);
+ rc = devm_request_irq(dev, host->irq,
+ pi->port_ops->irq_handler,
+ IRQF_SHARED, DRV_NAME, host);
+ if (rc)
+ goto err_out;
+ }
- rc = ata_request_legacy_irqs(host, handler, irq_flags, dev_id);
+ if (!ata_port_is_dummy(host->ports[1])) {
+ host->irq2 = ATA_SECONDARY_IRQ(pdev);
+ rc = devm_request_irq(dev, host->irq2,
+ pi->port_ops->irq_handler,
+ IRQF_SHARED, DRV_NAME, host);
+ if (rc)
+ goto err_out;
+ }
}
- if (rc)
- goto err_out;
/* register */
rc = ata_host_register(host, pi->sht);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ba17fc5f2e99..564cd234c805 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -112,7 +112,7 @@ static inline int ata_acpi_on_devcfg(struct ata_device *adev) { return 0; }
/* libata-scsi.c */
extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
-extern void ata_scsi_scan_host(struct ata_port *ap);
+extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -151,6 +151,7 @@ extern int ata_bus_probe(struct ata_port *ap);
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_port_wait_eh(struct ata_port *ap);
+extern void ata_eh_fastdrain_timerfn(unsigned long arg);
extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
/* libata-sff.c */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 30c4276ec882..010436795d20 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -455,23 +455,21 @@ static struct ata_port_operations ali_c5_port_ops = {
static void ali_init_chipset(struct pci_dev *pdev)
{
- u8 rev, tmp;
+ u8 tmp;
struct pci_dev *north, *isa_bridge;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
-
/*
* The chipset revision selects the driver operations and
* mode data.
*/
- if (rev >= 0x20 && rev < 0xC2) {
+ if (pdev->revision >= 0x20 && pdev->revision < 0xC2) {
/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
pci_read_config_byte(pdev, 0x4B, &tmp);
/* Clear CD-ROM DMA write bit */
tmp &= 0x7F;
pci_write_config_byte(pdev, 0x4B, tmp);
- } else if (rev >= 0xC2) {
+ } else if (pdev->revision >= 0xC2) {
/* Enable cable detection logic */
pci_read_config_byte(pdev, 0x4B, &tmp);
pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
@@ -483,21 +481,21 @@ static void ali_init_chipset(struct pci_dev *pdev)
/* Configure the ALi bridge logic. For non ALi rely on BIOS.
Set the south bridge enable bit */
pci_read_config_byte(isa_bridge, 0x79, &tmp);
- if (rev == 0xC2)
+ if (pdev->revision == 0xC2)
pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
- else if (rev > 0xC2 && rev < 0xC5)
+ else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
}
- if (rev >= 0x20) {
+ if (pdev->revision >= 0x20) {
/*
* CD_ROM DMA on (0x53 bit 0). Enable this even if we want
* to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
* via 0x54/55.
*/
pci_read_config_byte(pdev, 0x53, &tmp);
- if (rev <= 0x20)
+ if (pdev->revision <= 0x20)
tmp &= ~0x02;
- if (rev >= 0xc7)
+ if (pdev->revision >= 0xc7)
tmp |= 0x03;
else
tmp |= 0x01; /* CD_ROM enable for DMA */
@@ -579,25 +577,23 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { NULL, NULL };
- u8 rev, tmp;
+ u8 tmp;
struct pci_dev *isa_bridge;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
-
/*
* The chipset revision selects the driver operations and
* mode data.
*/
- if (rev < 0x20) {
+ if (pdev->revision < 0x20) {
ppi[0] = &info_early;
- } else if (rev < 0xC2) {
+ } else if (pdev->revision < 0xC2) {
ppi[0] = &info_20;
- } else if (rev == 0xC2) {
+ } else if (pdev->revision == 0xC2) {
ppi[0] = &info_c2;
- } else if (rev == 0xC3) {
+ } else if (pdev->revision == 0xC3) {
ppi[0] = &info_c3;
- } else if (rev == 0xC4) {
+ } else if (pdev->revision == 0xC4) {
ppi[0] = &info_c4;
} else
ppi[0] = &info_c5;
@@ -605,7 +601,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ali_init_chipset(pdev);
isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
- if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
+ if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
/* Are we paired with a UDMA capable chip */
pci_read_config_byte(isa_bridge, 0x5E, &tmp);
if ((tmp & 0x1E) == 0x12)
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index b9c44c575ce3..b09facad63e1 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -623,17 +623,15 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { NULL, NULL };
static int printed_version;
int type = id->driver_data;
- u8 rev;
u8 fifo;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
pci_read_config_byte(pdev, 0x41, &fifo);
/* Check for AMD7409 without swdma errata and if found adjust type */
- if (type == 1 && rev > 0x7)
+ if (type == 1 && pdev->revision > 0x7)
type = 2;
/* Check for AMD7411 */
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 6bf037d82b5a..7dc76e71bd55 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -275,7 +275,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
for (i = 0; i < 2; i++) {
static const int irq[] = { 14, 15 };
- struct ata_port *ap = host->ports[0];
+ struct ata_port *ap = host->ports[i];
if (ata_port_is_dummy(ap))
continue;
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 3fca5898642b..68f150a1e2f4 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -266,7 +266,7 @@ static int cs5530_init_chip(void)
}
pci_set_master(cs5530_0);
- pci_set_mwi(cs5530_0);
+ pci_try_set_mwi(cs5530_0);
/*
* Set PCI CacheLineSize to 16-bytes:
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index d928c9105034..be0f05efac6d 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -23,7 +23,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x3"
-#define DRV_VERSION "0.4.3"
+#define DRV_VERSION "0.5.3"
/**
* hpt3x3_set_piomode - PIO setup
@@ -52,6 +52,7 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
pci_write_config_dword(pdev, 0x48, r2);
}
+#if defined(CONFIG_PATA_HPT3X3_DMA)
/**
* hpt3x3_set_dmamode - DMA timing setup
* @ap: ATA interface
@@ -59,6 +60,9 @@ static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
*
* Set up the channel for MWDMA or UDMA modes. Much the same as with
* PIO, load the mode number and then set MWDMA or UDMA flag.
+ *
+ * 0x44 : bit 0-2 master mode, 3-5 slave mode, etc
+ * 0x48 : bit 4/0 DMA/UDMA bit 5/1 for slave etc
*/
static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
@@ -76,13 +80,26 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
if (adev->dma_mode >= XFER_UDMA_0)
- r2 |= 0x01 << dn; /* Ultra mode */
+ r2 |= (0x10 << dn); /* Ultra mode */
else
- r2 |= 0x10 << dn; /* MWDMA */
+ r2 |= (0x01 << dn); /* MWDMA */
pci_write_config_dword(pdev, 0x44, r1);
pci_write_config_dword(pdev, 0x48, r2);
}
+#endif /* CONFIG_PATA_HPT3X3_DMA */
+
+/**
+ * hpt3x3_atapi_dma - ATAPI DMA check
+ * @qc: Queued command
+ *
+ * Just say no - we don't do ATAPI DMA
+ */
+
+static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc)
+{
+ return 1;
+}
static struct scsi_host_template hpt3x3_sht = {
.module = THIS_MODULE,
@@ -105,7 +122,9 @@ static struct scsi_host_template hpt3x3_sht = {
static struct ata_port_operations hpt3x3_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = hpt3x3_set_piomode,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
.set_dmamode = hpt3x3_set_dmamode,
+#endif
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
@@ -124,6 +143,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
+ .check_atapi_dma= hpt3x3_atapi_dma,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
@@ -158,32 +178,79 @@ static void hpt3x3_init_chipset(struct pci_dev *dev)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
}
-
/**
* hpt3x3_init_one - Initialise an HPT343/363
- * @dev: PCI device
+ * @pdev: PCI device
* @id: Entry in match table
*
- * Perform basic initialisation. The chip has a quirk that it won't
- * function unless it is at XX00. The old ATA driver touched this up
- * but we leave it for pci quirks to do properly.
+ * Perform basic initialisation. We set the device up so we access all
+ * ports via BAR4. This is neccessary to work around errata.
*/
-static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ static int printed_version;
static const struct ata_port_info info = {
.sht = &hpt3x3_sht,
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f,
+#if defined(CONFIG_PATA_HPT3X3_DMA)
+ /* Further debug needed */
.mwdma_mask = 0x07,
.udma_mask = 0x07,
+#endif
.port_ops = &hpt3x3_port_ops
};
+ /* Register offsets of taskfiles in BAR4 area */
+ static const u8 offset_cmd[2] = { 0x20, 0x28 };
+ static const u8 offset_ctl[2] = { 0x36, 0x3E };
const struct ata_port_info *ppi[] = { &info, NULL };
-
- hpt3x3_init_chipset(dev);
- /* Now kick off ATA set up */
- return ata_pci_init_one(dev, ppi);
+ struct ata_host *host;
+ int i, rc;
+ void __iomem *base;
+
+ hpt3x3_init_chipset(pdev);
+
+ if (!printed_version++)
+ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
+ if (!host)
+ return -ENOMEM;
+ /* acquire resources and fill host */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ /* Everything is relative to BAR4 if we set up this way */
+ rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+ host->iomap = pcim_iomap_table(pdev);
+ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+ if (rc)
+ return rc;
+
+ base = host->iomap[4]; /* Bus mastering base */
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_ioports *ioaddr = &host->ports[i]->ioaddr;
+
+ ioaddr->cmd_addr = base + offset_cmd[i];
+ ioaddr->altstatus_addr =
+ ioaddr->ctl_addr = base + offset_ctl[i];
+ ioaddr->scr_addr = NULL;
+ ata_std_ports(ioaddr);
+ ioaddr->bmdma_addr = base + 8 * i;
+ }
+ pci_set_master(pdev);
+ return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
+ &hpt3x3_sht);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index b67bbf6516ba..430673be1df7 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -587,8 +587,7 @@ static int it821x_port_start(struct ata_port *ap)
itdev->want[1][1] = ATA_ANY;
itdev->last_device = -1;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &conf);
- if (conf == 0x10) {
+ if (pdev->revision == 0x11) {
itdev->timing10 = 1;
/* Need to disable ATAPI DMA for this case */
if (!itdev->smart)
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 368fac7d168b..182e83c9047b 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -467,13 +467,27 @@ mpc52xx_ata_remove(struct of_device *op)
static int
mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
{
- return 0; /* FIXME : What to do here ? */
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+
+ return ata_host_suspend(host, state);
}
static int
mpc52xx_ata_resume(struct of_device *op)
{
- return 0; /* FIXME : What to do here ? */
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ struct mpc52xx_ata_priv *priv = host->private_data;
+ int rv;
+
+ rv = mpc52xx_ata_hw_init(priv);
+ if (rv) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ return rv;
+ }
+
+ ata_host_resume(host);
+
+ return 0;
}
#endif
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index a56257c98fe5..6da23feed039 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -382,6 +382,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 79f841bca593..a909f793ffc1 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -213,8 +213,9 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
pata_platform_setup_port(&ap->ioaddr, pp_info);
/* activate */
- return ata_host_activate(host, platform_get_irq(pdev, 0), ata_interrupt,
- pp_info->irq_flags, &pata_platform_sht);
+ return ata_host_activate(host, platform_get_irq(pdev, 0),
+ ata_interrupt, pp_info ? pp_info->irq_flags
+ : 0, &pata_platform_sht);
}
/**
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 61502bc7bf1d..36cdbd2b0bd5 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -258,6 +258,17 @@ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
}
+unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+ /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
+ if (adev->class == ATA_DEV_ATAPI &&
+ (mask & (0xE0 << ATA_SHIFT_UDMA))) {
+ printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
+ mask &= ~(0xE0 << ATA_SHIFT_UDMA);
+ }
+ return ata_pci_default_filter(adev, mask);
+}
+
/**
* scc_tf_load - send taskfile registers to host controller
* @ap: Port to which output is sent
@@ -352,6 +363,8 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
tf->hob_lbal = in_be32(ioaddr->lbal_addr);
tf->hob_lbam = in_be32(ioaddr->lbam_addr);
tf->hob_lbah = in_be32(ioaddr->lbah_addr);
+ out_be32(ioaddr->ctl_addr, tf->ctl);
+ ap->last_ctl = tf->ctl;
}
}
@@ -724,22 +737,36 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
static u8 scc_bmdma_status (struct ata_port *ap)
{
- u8 host_stat;
void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
- host_stat = in_be32(mmio + SCC_DMA_STATUS);
-
- /* Workaround for PTERADD: emulate DMA_INTR when
- * - IDE_STATUS[ERR] = 1
- * - INT_STATUS[INTRQ] = 1
- * - DMA_STATUS[IORACTA] = 1
- */
- if (!(host_stat & ATA_DMA_INTR)) {
- u32 int_status = in_be32(mmio + SCC_DMA_INTST);
- if (ata_altstatus(ap) & ATA_ERR &&
- int_status & INTSTS_INTRQ &&
- host_stat & ATA_DMA_ACTIVE)
- host_stat |= ATA_DMA_INTR;
+ u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
+ u32 int_status = in_be32(mmio + SCC_DMA_INTST);
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+ static int retry = 0;
+
+ /* return if IOS_SS is cleared */
+ if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
+ return host_stat;
+
+ /* errata A252,A308 workaround: Step4 */
+ if ((ata_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ))
+ return (host_stat | ATA_DMA_INTR);
+
+ /* errata A308 workaround Step5 */
+ if (int_status & INTSTS_IOIRQS) {
+ host_stat |= ATA_DMA_INTR;
+
+ /* We don't check ATAPI DMA because it is limited to UDMA4 */
+ if ((qc->tf.protocol == ATA_PROT_DMA &&
+ qc->dev->xfer_mode > XFER_UDMA_4)) {
+ if (!(int_status & INTSTS_ACTEINT)) {
+ printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
+ ap->print_id);
+ host_stat |= ATA_DMA_ERR;
+ if (retry++)
+ ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
+ } else
+ retry = 0;
+ }
}
return host_stat;
@@ -892,10 +919,6 @@ static void scc_std_postreset (struct ata_port *ap, unsigned int *classes)
{
DPRINTK("ENTER\n");
- /* re-enable interrupts */
- if (!ap->ops->error_handler)
- ap->ops->irq_on(ap);
-
/* is double-select really necessary? */
if (classes[0] != ATA_DEV_NONE)
ap->ops->dev_select(ap, 1);
@@ -1000,7 +1023,7 @@ static const struct ata_port_operations scc_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = scc_set_piomode,
.set_dmamode = scc_set_dmamode,
- .mode_filter = ata_pci_default_filter,
+ .mode_filter = scc_mode_filter,
.tf_load = scc_tf_load,
.tf_read = scc_tf_read,
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 0231aba51ca4..89691541fe59 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -410,11 +410,8 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev)
static int serverworks_fixup_csb(struct pci_dev *pdev)
{
- u8 rev;
u8 btr;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
-
/* Third Channel Test */
if (!(PCI_FUNC(pdev->devfn) & 1)) {
struct pci_dev * findev = NULL;
@@ -456,7 +453,7 @@ static int serverworks_fixup_csb(struct pci_dev *pdev)
if (!(PCI_FUNC(pdev->devfn) & 1))
btr |= 0x2;
else
- btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
+ btr |= (pdev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
pci_write_config_byte(pdev, 0x5A, btr);
return btr;
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 2b4508206a6c..9a829a7cbc60 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -149,6 +149,9 @@ static int sis_pre_reset(struct ata_port *ap, unsigned long deadline)
if (!pci_test_config_bits(pdev, &sis_enable_bits[ap->port_no]))
return -ENOENT;
+ /* Clear the FIFO settings. We can't enable the FIFO until
+ we know we are poking at a disk */
+ pci_write_config_byte(pdev, 0x4B, 0);
return ata_std_prereset(ap, deadline);
}
@@ -928,9 +931,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (host != NULL) {
chipset = sets; /* Match found */
if (sets->device == 0x630) { /* SIS630 */
- u8 host_rev;
- pci_read_config_byte(host, PCI_REVISION_ID, &host_rev);
- if (host_rev >= 0x30) /* 630 ET */
+ if (host->revision >= 0x30) /* 630 ET */
chipset = &sis100_early;
}
break;
@@ -974,7 +975,6 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
u16 trueid;
u8 prefctl;
u8 idecfg;
- u8 sbrev;
/* Try the second unmasking technique */
pci_read_config_byte(pdev, 0x4a, &idecfg);
@@ -987,11 +987,10 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
lpc_bridge = pci_get_slot(pdev->bus, 0x10); /* Bus 0 Dev 2 Fn 0 */
if (lpc_bridge == NULL)
break;
- pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev);
pci_read_config_byte(pdev, 0x49, &prefctl);
pci_dev_put(lpc_bridge);
- if (sbrev == 0x10 && (prefctl & 0x80)) {
+ if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
chipset = &sis133_early;
break;
}
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index bde734189623..8c2813aa6cdb 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -270,7 +270,6 @@ static struct ata_port_operations sl82c105_port_ops = {
static int sl82c105_bridge_revision(struct pci_dev *pdev)
{
struct pci_dev *bridge;
- u8 rev;
/*
* The bridge should be part of the same device, but function 0.
@@ -292,10 +291,8 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev)
/*
* We need to find function 0's revision, not function 1
*/
- pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
-
pci_dev_put(bridge);
- return rev;
+ return bridge->revision;
}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index f0cadbe6aa11..f645fe22cd1e 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -506,7 +506,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct pci_dev *isa = NULL;
const struct via_isa_bridge *config;
static int printed_version;
- u8 t;
u8 enable;
u32 timing;
@@ -520,9 +519,8 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
!!(config->flags & VIA_BAD_ID),
config->id, NULL))) {
- pci_read_config_byte(isa, PCI_REVISION_ID, &t);
- if (t >= config->rev_min &&
- t <= config->rev_max)
+ if (isa->revision >= config->rev_min &&
+ isa->revision <= config->rev_max)
break;
pci_dev_put(isa);
}
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 3de183461c3c..a9c948d7604a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -190,34 +190,34 @@ static void inic_reset_port(void __iomem *port_base)
writew(ctl, idma_ctl);
}
-static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg)
+static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = ap->ioaddr.scr_addr;
void __iomem *addr;
- u32 val;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
- return 0xffffffffU;
+ return -EINVAL;
addr = scr_addr + scr_map[sc_reg] * 4;
- val = readl(scr_addr + scr_map[sc_reg] * 4);
+ *val = readl(scr_addr + scr_map[sc_reg] * 4);
/* this controller has stuck DIAG.N, ignore it */
if (sc_reg == SCR_ERROR)
- val &= ~SERR_PHYRDY_CHG;
- return val;
+ *val &= ~SERR_PHYRDY_CHG;
+ return 0;
}
-static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
{
void __iomem *scr_addr = ap->ioaddr.scr_addr;
void __iomem *addr;
if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
- return;
+ return -EINVAL;
addr = scr_addr + scr_map[sc_reg] * 4;
writel(val, scr_addr + scr_map[sc_reg] * 4);
+ return 0;
}
/*
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 3873b29c80d6..8ec520885b95 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -29,19 +29,12 @@
I distinctly remember a couple workarounds (one related to PCI-X)
are still needed.
- 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
- probing/error handling in general. MUST HAVE.
-
- 3) Add hotplug support (easy, once new-EH support appears)
-
4) Add NCQ support (easy to intermediate, once new-EH support appears)
5) Investigate problems with PCI Message Signalled Interrupts (MSI).
6) Add port multiplier support (intermediate)
- 7) Test and verify 3.0 Gbps support
-
8) Develop a low-power-consumption strategy, and implement it.
9) [Experiment, low priority] See if ATAPI can be supported using
@@ -108,8 +101,6 @@ enum {
MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
- MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
-
MV_MAX_Q_DEPTH = 32,
MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
@@ -133,18 +124,22 @@ enum {
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
- MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
- ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
+ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
+ ATA_FLAG_PIO_POLLING,
MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
CRQB_FLAG_READ = (1 << 0),
CRQB_TAG_SHIFT = 1,
+ CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
+ CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
CRQB_CMD_ADDR_SHIFT = 8,
CRQB_CMD_CS = (0x2 << 11),
CRQB_CMD_LAST = (1 << 15),
CRPB_FLAG_STATUS_SHIFT = 8,
+ CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
+ CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
EPRD_FLAG_END_OF_TBL = (1 << 31),
@@ -230,31 +225,53 @@ enum {
EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
EDMA_ERR_IRQ_MASK_OFS = 0xc,
- EDMA_ERR_D_PAR = (1 << 0),
- EDMA_ERR_PRD_PAR = (1 << 1),
- EDMA_ERR_DEV = (1 << 2),
- EDMA_ERR_DEV_DCON = (1 << 3),
- EDMA_ERR_DEV_CON = (1 << 4),
- EDMA_ERR_SERR = (1 << 5),
- EDMA_ERR_SELF_DIS = (1 << 7),
- EDMA_ERR_BIST_ASYNC = (1 << 8),
- EDMA_ERR_CRBQ_PAR = (1 << 9),
- EDMA_ERR_CRPB_PAR = (1 << 10),
- EDMA_ERR_INTRL_PAR = (1 << 11),
- EDMA_ERR_IORDY = (1 << 12),
- EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
+ EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
+ EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
+ EDMA_ERR_DEV = (1 << 2), /* device error */
+ EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
+ EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
+ EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
+ EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
+ EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
+ EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
+ EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
+ EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
+ EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
+ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
+ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
+ EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
- EDMA_ERR_LNK_DATA_RX = (0xf << 17),
- EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
- EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
- EDMA_ERR_TRANS_PROTO = (1 << 31),
- EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
- EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
- EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
- EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
- EDMA_ERR_LNK_DATA_RX |
- EDMA_ERR_LNK_DATA_TX |
- EDMA_ERR_TRANS_PROTO),
+ EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
+ EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
+ EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
+ EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
+ EDMA_ERR_OVERRUN_5 = (1 << 5),
+ EDMA_ERR_UNDERRUN_5 = (1 << 6),
+ EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
+ EDMA_ERR_PRD_PAR |
+ EDMA_ERR_DEV_DCON |
+ EDMA_ERR_DEV_CON |
+ EDMA_ERR_SERR |
+ EDMA_ERR_SELF_DIS |
+ EDMA_ERR_CRQB_PAR |
+ EDMA_ERR_CRPB_PAR |
+ EDMA_ERR_INTRL_PAR |
+ EDMA_ERR_IORDY |
+ EDMA_ERR_LNK_CTRL_RX_2 |
+ EDMA_ERR_LNK_DATA_RX |
+ EDMA_ERR_LNK_DATA_TX |
+ EDMA_ERR_TRANS_PROTO,
+ EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
+ EDMA_ERR_PRD_PAR |
+ EDMA_ERR_DEV_DCON |
+ EDMA_ERR_DEV_CON |
+ EDMA_ERR_OVERRUN_5 |
+ EDMA_ERR_UNDERRUN_5 |
+ EDMA_ERR_SELF_DIS_5 |
+ EDMA_ERR_CRQB_PAR |
+ EDMA_ERR_CRPB_PAR |
+ EDMA_ERR_INTRL_PAR |
+ EDMA_ERR_IORDY,
EDMA_REQ_Q_BASE_HI_OFS = 0x10,
EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
@@ -267,10 +284,10 @@ enum {
EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
EDMA_RSP_Q_PTR_SHIFT = 3,
- EDMA_CMD_OFS = 0x28,
- EDMA_EN = (1 << 0),
- EDMA_DS = (1 << 1),
- ATA_RST = (1 << 2),
+ EDMA_CMD_OFS = 0x28, /* EDMA command register */
+ EDMA_EN = (1 << 0), /* enable EDMA */
+ EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
+ ATA_RST = (1 << 2), /* reset trans/link/phy */
EDMA_IORDY_TMOUT = 0x34,
EDMA_ARB_CFG = 0x38,
@@ -282,25 +299,28 @@ enum {
MV_HP_ERRATA_60X1B2 = (1 << 3),
MV_HP_ERRATA_60X1C0 = (1 << 4),
MV_HP_ERRATA_XX42A0 = (1 << 5),
- MV_HP_50XX = (1 << 6),
- MV_HP_GEN_IIE = (1 << 7),
+ MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
+ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
+ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
/* Port private flags (pp_flags) */
- MV_PP_FLAG_EDMA_EN = (1 << 0),
- MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
+ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
+ MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
};
-#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
-#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
-#define IS_GEN_I(hpriv) IS_50XX(hpriv)
-#define IS_GEN_II(hpriv) IS_60XX(hpriv)
+#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
+#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
enum {
MV_DMA_BOUNDARY = 0xffffffffU,
+ /* mask of register bits containing lower 32 bits
+ * of EDMA request queue DMA address
+ */
EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
+ /* ditto, for response queue */
EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
};
@@ -352,6 +372,10 @@ struct mv_port_priv {
dma_addr_t crpb_dma;
struct mv_sg *sg_tbl;
dma_addr_t sg_tbl_dma;
+
+ unsigned int req_idx;
+ unsigned int resp_idx;
+
u32 pp_flags;
};
@@ -380,18 +404,19 @@ struct mv_host_priv {
};
static void mv_irq_clear(struct ata_port *ap);
-static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
-static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
-static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
-static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
-static void mv_phy_reset(struct ata_port *ap);
-static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
+static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
+static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
+static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
+static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
-static void mv_eng_timeout(struct ata_port *ap);
+static void mv_error_handler(struct ata_port *ap);
+static void mv_post_int_cmd(struct ata_queued_cmd *qc);
+static void mv_eh_freeze(struct ata_port *ap);
+static void mv_eh_thaw(struct ata_port *ap);
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
@@ -415,14 +440,31 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
-static void mv_stop_and_reset(struct ata_port *ap);
-static struct scsi_host_template mv_sht = {
+static struct scsi_host_template mv5_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
- .can_queue = MV_USE_Q_DEPTH,
+ .can_queue = ATA_DEF_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = MV_MAX_SG_CT,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = 1,
+ .proc_name = DRV_NAME,
+ .dma_boundary = MV_DMA_BOUNDARY,
+ .slave_configure = ata_scsi_slave_config,
+ .slave_destroy = ata_scsi_slave_destroy,
+ .bios_param = ata_std_bios_param,
+};
+
+static struct scsi_host_template mv6_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .ioctl = ata_scsi_ioctl,
+ .queuecommand = ata_scsi_queuecmd,
+ .can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = MV_MAX_SG_CT,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
@@ -444,19 +486,21 @@ static const struct ata_port_operations mv5_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
- .phy_reset = mv_phy_reset,
.cable_detect = ata_cable_sata,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.data_xfer = ata_data_xfer,
- .eng_timeout = mv_eng_timeout,
-
.irq_clear = mv_irq_clear,
.irq_on = ata_irq_on,
.irq_ack = ata_irq_ack,
+ .error_handler = mv_error_handler,
+ .post_internal_cmd = mv_post_int_cmd,
+ .freeze = mv_eh_freeze,
+ .thaw = mv_eh_thaw,
+
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -473,19 +517,21 @@ static const struct ata_port_operations mv6_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
- .phy_reset = mv_phy_reset,
.cable_detect = ata_cable_sata,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
.data_xfer = ata_data_xfer,
- .eng_timeout = mv_eng_timeout,
-
.irq_clear = mv_irq_clear,
.irq_on = ata_irq_on,
.irq_ack = ata_irq_ack,
+ .error_handler = mv_error_handler,
+ .post_internal_cmd = mv_post_int_cmd,
+ .freeze = mv_eh_freeze,
+ .thaw = mv_eh_thaw,
+
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
@@ -502,19 +548,21 @@ static const struct ata_port_operations mv_iie_ops = {
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
- .phy_reset = mv_phy_reset,
.cable_detect = ata_cable_sata,
.qc_prep = mv_qc_prep_iie,
.qc_issue = mv_qc_issue,
.data_xfer = ata_data_xfer,
- .eng_timeout = mv_eng_timeout,
-
.irq_clear = mv_irq_clear,
.irq_on = ata_irq_on,
.irq_ack = ata_irq_ack,
+ .error_handler = mv_error_handler,
+ .post_internal_cmd = mv_post_int_cmd,
+ .freeze = mv_eh_freeze,
+ .thaw = mv_eh_thaw,
+
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
@@ -530,38 +578,38 @@ static const struct ata_port_info mv_port_info[] = {
.port_ops = &mv5_ops,
},
{ /* chip_508x */
- .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+ .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_5080 */
- .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
+ .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv5_ops,
},
{ /* chip_604x */
- .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_608x */
- .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
- MV_FLAG_DUAL_HC),
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ MV_FLAG_DUAL_HC,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_6042 */
- .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
- .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
@@ -709,6 +757,46 @@ static void mv_irq_clear(struct ata_port *ap)
{
}
+static void mv_set_edma_ptrs(void __iomem *port_mmio,
+ struct mv_host_priv *hpriv,
+ struct mv_port_priv *pp)
+{
+ u32 index;
+
+ /*
+ * initialize request queue
+ */
+ index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
+
+ WARN_ON(pp->crqb_dma & 0x3ff);
+ writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
+ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
+ port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+
+ if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
+ writelfl((pp->crqb_dma & 0xffffffff) | index,
+ port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+ else
+ writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+
+ /*
+ * initialize response queue
+ */
+ index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
+
+ WARN_ON(pp->crpb_dma & 0xff);
+ writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
+
+ if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
+ writelfl((pp->crpb_dma & 0xffffffff) | index,
+ port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+ else
+ writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+
+ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
+ port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+}
+
/**
* mv_start_dma - Enable eDMA engine
* @base: port base address
@@ -720,9 +808,15 @@ static void mv_irq_clear(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
-static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
+static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
+ struct mv_port_priv *pp)
{
- if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
+ /* clear EDMA event indicators, if any */
+ writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ mv_set_edma_ptrs(base, hpriv, pp);
+
writelfl(EDMA_EN, base + EDMA_CMD_OFS);
pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
}
@@ -730,7 +824,7 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
}
/**
- * mv_stop_dma - Disable eDMA engine
+ * __mv_stop_dma - Disable eDMA engine
* @ap: ATA channel to manipulate
*
* Verify the local cache of the eDMA state is accurate with a
@@ -739,14 +833,14 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
* LOCKING:
* Inherited from caller.
*/
-static void mv_stop_dma(struct ata_port *ap)
+static int __mv_stop_dma(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
u32 reg;
- int i;
+ int i, err = 0;
- if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
+ if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
/* Disable EDMA if active. The disable bit auto clears.
*/
writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
@@ -758,16 +852,30 @@ static void mv_stop_dma(struct ata_port *ap)
/* now properly wait for the eDMA to stop */
for (i = 1000; i > 0; i--) {
reg = readl(port_mmio + EDMA_CMD_OFS);
- if (!(EDMA_EN & reg)) {
+ if (!(reg & EDMA_EN))
break;
- }
+
udelay(100);
}
- if (EDMA_EN & reg) {
+ if (reg & EDMA_EN) {
ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
- /* FIXME: Consider doing a reset here to recover */
+ err = -EIO;
}
+
+ return err;
+}
+
+static int mv_stop_dma(struct ata_port *ap)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ rc = __mv_stop_dma(ap);
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ return rc;
}
#ifdef ATA_DEBUG
@@ -866,30 +974,35 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in)
return ofs;
}
-static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
+static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
- if (0xffffffffU != ofs)
- return readl(mv_ap_base(ap) + ofs);
- else
- return (u32) ofs;
+ if (ofs != 0xffffffffU) {
+ *val = readl(mv_ap_base(ap) + ofs);
+ return 0;
+ } else
+ return -EINVAL;
}
-static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
+static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
unsigned int ofs = mv_scr_offset(sc_reg_in);
- if (0xffffffffU != ofs)
+ if (ofs != 0xffffffffU) {
writelfl(val, mv_ap_base(ap) + ofs);
+ return 0;
+ } else
+ return -EINVAL;
}
-static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
+static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
+ void __iomem *port_mmio)
{
u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
/* set up non-NCQ EDMA configuration */
- cfg &= ~(1 << 9); /* disable equeue */
+ cfg &= ~(1 << 9); /* disable eQue */
if (IS_GEN_I(hpriv)) {
cfg &= ~0x1f; /* clear queue depth */
@@ -909,7 +1022,7 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
cfg |= (1 << 18); /* enab early completion */
cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
- cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
+ cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
}
writelfl(cfg, port_mmio + EDMA_CFG_OFS);
@@ -933,6 +1046,7 @@ static int mv_port_start(struct ata_port *ap)
void __iomem *port_mmio = mv_ap_base(ap);
void *mem;
dma_addr_t mem_dma;
+ unsigned long flags;
int rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
@@ -971,28 +1085,13 @@ static int mv_port_start(struct ata_port *ap)
pp->sg_tbl = mem;
pp->sg_tbl_dma = mem_dma;
- mv_edma_cfg(hpriv, port_mmio);
-
- writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
- writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
- port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
-
- if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
- writelfl(pp->crqb_dma & 0xffffffff,
- port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
- else
- writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+ spin_lock_irqsave(&ap->host->lock, flags);
- writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
+ mv_edma_cfg(ap, hpriv, port_mmio);
- if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
- writelfl(pp->crpb_dma & 0xffffffff,
- port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
- else
- writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
+ mv_set_edma_ptrs(port_mmio, hpriv, pp);
- writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
- port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+ spin_unlock_irqrestore(&ap->host->lock, flags);
/* Don't turn on EDMA here...do it before DMA commands only. Else
* we'll be unable to send non-data, PIO, etc due to restricted access
@@ -1013,11 +1112,7 @@ static int mv_port_start(struct ata_port *ap)
*/
static void mv_port_stop(struct ata_port *ap)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ap->host->lock, flags);
mv_stop_dma(ap);
- spin_unlock_irqrestore(&ap->host->lock, flags);
}
/**
@@ -1055,11 +1150,6 @@ static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
return n_sg;
}
-static inline unsigned mv_inc_q_index(unsigned index)
-{
- return (index + 1) & MV_MAX_Q_DEPTH_MASK;
-}
-
static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
@@ -1088,7 +1178,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
u16 flags = 0;
unsigned in_index;
- if (ATA_PROT_DMA != qc->tf.protocol)
+ if (qc->tf.protocol != ATA_PROT_DMA)
return;
/* Fill in command request block
@@ -1097,10 +1187,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
+ flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
- /* get current queue index from hardware */
- in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ /* get current queue index from software */
+ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
pp->crqb[in_index].sg_addr =
cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
@@ -1180,7 +1270,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
unsigned in_index;
u32 flags = 0;
- if (ATA_PROT_DMA != qc->tf.protocol)
+ if (qc->tf.protocol != ATA_PROT_DMA)
return;
/* Fill in Gen IIE command request block
@@ -1190,10 +1280,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
+ flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
+ what we use as our tag */
- /* get current queue index from hardware */
- in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ /* get current queue index from software */
+ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
@@ -1241,83 +1332,41 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
*/
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
{
- void __iomem *port_mmio = mv_ap_base(qc->ap);
- struct mv_port_priv *pp = qc->ap->private_data;
- unsigned in_index;
- u32 in_ptr;
+ struct ata_port *ap = qc->ap;
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ u32 in_index;
- if (ATA_PROT_DMA != qc->tf.protocol) {
+ if (qc->tf.protocol != ATA_PROT_DMA) {
/* We're about to send a non-EDMA capable command to the
* port. Turn off EDMA so there won't be problems accessing
* shadow block, etc registers.
*/
- mv_stop_dma(qc->ap);
+ __mv_stop_dma(ap);
return ata_qc_issue_prot(qc);
}
- in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
- in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ mv_start_dma(port_mmio, hpriv, pp);
+
+ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
/* until we do queuing, the queue should be empty at this point */
WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
- in_index = mv_inc_q_index(in_index); /* now incr producer index */
+ pp->req_idx++;
- mv_start_dma(port_mmio, pp);
+ in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
/* and write the request in pointer to kick the EDMA to life */
- in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
- in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
- writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
+ port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
return 0;
}
/**
- * mv_get_crpb_status - get status from most recently completed cmd
- * @ap: ATA channel to manipulate
- *
- * This routine is for use when the port is in DMA mode, when it
- * will be using the CRPB (command response block) method of
- * returning command completion information. We check indices
- * are good, grab status, and bump the response consumer index to
- * prove that we're up to date.
- *
- * LOCKING:
- * Inherited from caller.
- */
-static u8 mv_get_crpb_status(struct ata_port *ap)
-{
- void __iomem *port_mmio = mv_ap_base(ap);
- struct mv_port_priv *pp = ap->private_data;
- unsigned out_index;
- u32 out_ptr;
- u8 ata_status;
-
- out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
- out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
-
- ata_status = le16_to_cpu(pp->crpb[out_index].flags)
- >> CRPB_FLAG_STATUS_SHIFT;
-
- /* increment our consumer index... */
- out_index = mv_inc_q_index(out_index);
-
- /* and, until we do NCQ, there should only be 1 CRPB waiting */
- WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
- >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
-
- /* write out our inc'd consumer index so EDMA knows we're caught up */
- out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
- out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
- writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
-
- /* Return ATA status register for completed CRPB */
- return ata_status;
-}
-
-/**
* mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate
* @reset_allowed: bool: 0 == don't trigger from reset here
@@ -1331,30 +1380,188 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
-static void mv_err_intr(struct ata_port *ap, int reset_allowed)
+static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
{
void __iomem *port_mmio = mv_ap_base(ap);
- u32 edma_err_cause, serr = 0;
+ u32 edma_err_cause, eh_freeze_mask, serr = 0;
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
+ unsigned int action = 0, err_mask = 0;
+ struct ata_eh_info *ehi = &ap->eh_info;
- edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+ ata_ehi_clear_desc(ehi);
- if (EDMA_ERR_SERR & edma_err_cause) {
+ if (!edma_enabled) {
+ /* just a guess: do we need to do this? should we
+ * expand this, and do it in all cases?
+ */
sata_scr_read(ap, SCR_ERROR, &serr);
sata_scr_write_flush(ap, SCR_ERROR, serr);
}
- if (EDMA_ERR_SELF_DIS & edma_err_cause) {
- struct mv_port_priv *pp = ap->private_data;
- pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+
+ edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
+
+ /*
+ * all generations share these EDMA error cause bits
+ */
+
+ if (edma_err_cause & EDMA_ERR_DEV)
+ err_mask |= AC_ERR_DEV;
+ if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
+ EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
+ EDMA_ERR_INTRL_PAR)) {
+ err_mask |= AC_ERR_ATA_BUS;
+ action |= ATA_EH_HARDRESET;
+ ata_ehi_push_desc(ehi, "parity error");
+ }
+ if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
+ "dev disconnect" : "dev connect");
+ }
+
+ if (IS_GEN_I(hpriv)) {
+ eh_freeze_mask = EDMA_EH_FREEZE_5;
+
+ if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
+ struct mv_port_priv *pp = ap->private_data;
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ ata_ehi_push_desc(ehi, "EDMA self-disable");
+ }
+ } else {
+ eh_freeze_mask = EDMA_EH_FREEZE;
+
+ if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+ struct mv_port_priv *pp = ap->private_data;
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ ata_ehi_push_desc(ehi, "EDMA self-disable");
+ }
+
+ if (edma_err_cause & EDMA_ERR_SERR) {
+ sata_scr_read(ap, SCR_ERROR, &serr);
+ sata_scr_write_flush(ap, SCR_ERROR, serr);
+ err_mask = AC_ERR_ATA_BUS;
+ action |= ATA_EH_HARDRESET;
+ }
}
- DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
- "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
/* Clear EDMA now that SERR cleanup done */
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
- /* check for fatal here and recover if needed */
- if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
- mv_stop_and_reset(ap);
+ if (!err_mask) {
+ err_mask = AC_ERR_OTHER;
+ action |= ATA_EH_HARDRESET;
+ }
+
+ ehi->serror |= serr;
+ ehi->action |= action;
+
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ if (edma_err_cause & eh_freeze_mask)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+}
+
+static void mv_intr_pio(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ u8 ata_status;
+
+ /* ignore spurious intr if drive still BUSY */
+ ata_status = readb(ap->ioaddr.status_addr);
+ if (unlikely(ata_status & ATA_BUSY))
+ return;
+
+ /* get active ATA command */
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (unlikely(!qc)) /* no active tag */
+ return;
+ if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
+ return;
+
+ /* and finally, complete the ATA command */
+ qc->err_mask |= ac_err_mask(ata_status);
+ ata_qc_complete(qc);
+}
+
+static void mv_intr_edma(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+ u32 out_index, in_index;
+ bool work_done = false;
+
+ /* get h/w response queue pointer */
+ in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
+ >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+
+ while (1) {
+ u16 status;
+ unsigned int tag;
+
+ /* get s/w response queue last-read pointer, and compare */
+ out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
+ if (in_index == out_index)
+ break;
+
+ /* 50xx: get active ATA command */
+ if (IS_GEN_I(hpriv))
+ tag = ap->active_tag;
+
+ /* Gen II/IIE: get active ATA command via tag, to enable
+ * support for queueing. this works transparently for
+ * queued and non-queued modes.
+ */
+ else if (IS_GEN_II(hpriv))
+ tag = (le16_to_cpu(pp->crpb[out_index].id)
+ >> CRPB_IOID_SHIFT_6) & 0x3f;
+
+ else /* IS_GEN_IIE */
+ tag = (le16_to_cpu(pp->crpb[out_index].id)
+ >> CRPB_IOID_SHIFT_7) & 0x3f;
+
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
+ * bits (WARNING: might not necessarily be associated
+ * with this command), which -should- be clear
+ * if all is well
+ */
+ status = le16_to_cpu(pp->crpb[out_index].flags);
+ if (unlikely(status & 0xff)) {
+ mv_err_intr(ap, qc);
+ return;
+ }
+
+ /* and finally, complete the ATA command */
+ if (qc) {
+ qc->err_mask |=
+ ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
+ ata_qc_complete(qc);
+ }
+
+ /* advance software response queue pointer, to
+ * indicate (after the loop completes) to hardware
+ * that we have consumed a response queue entry.
+ */
+ work_done = true;
+ pp->resp_idx++;
+ }
+
+ if (work_done)
+ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
+ (out_index << EDMA_RSP_Q_PTR_SHIFT),
+ port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
}
/**
@@ -1377,10 +1584,8 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
{
void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
- struct ata_queued_cmd *qc;
u32 hc_irq_cause;
- int shift, port, port0, hard_port, handled;
- unsigned int err_mask;
+ int port, port0;
if (hc == 0)
port0 = 0;
@@ -1389,79 +1594,95 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
/* we'll need the HC success int register in most cases */
hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
- if (hc_irq_cause)
- writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
+ if (!hc_irq_cause)
+ return;
+
+ writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
hc,relevant,hc_irq_cause);
for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
- u8 ata_status = 0;
struct ata_port *ap = host->ports[port];
struct mv_port_priv *pp = ap->private_data;
+ int have_err_bits, hard_port, shift;
+
+ if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
+ continue;
+
+ shift = port << 1; /* (port * 2) */
+ if (port >= MV_PORTS_PER_HC) {
+ shift++; /* skip bit 8 in the HC Main IRQ reg */
+ }
+ have_err_bits = ((PORT0_ERR << shift) & relevant);
+
+ if (unlikely(have_err_bits)) {
+ struct ata_queued_cmd *qc;
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
+ continue;
+
+ mv_err_intr(ap, qc);
+ continue;
+ }
hard_port = mv_hardport_from_port(port); /* range 0..3 */
- handled = 0; /* ensure ata_status is set if handled++ */
- /* Note that DEV_IRQ might happen spuriously during EDMA,
- * and should be ignored in such cases.
- * The cause of this is still under investigation.
- */
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- /* EDMA: check for response queue interrupt */
- if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
- ata_status = mv_get_crpb_status(ap);
- handled = 1;
- }
+ if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
+ mv_intr_edma(ap);
} else {
- /* PIO: check for device (drive) interrupt */
- if ((DEV_IRQ << hard_port) & hc_irq_cause) {
- ata_status = readb(ap->ioaddr.status_addr);
- handled = 1;
- /* ignore spurious intr if drive still BUSY */
- if (ata_status & ATA_BUSY) {
- ata_status = 0;
- handled = 0;
- }
- }
+ if ((DEV_IRQ << hard_port) & hc_irq_cause)
+ mv_intr_pio(ap);
}
+ }
+ VPRINTK("EXIT\n");
+}
- if (ap && (ap->flags & ATA_FLAG_DISABLED))
- continue;
+static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
+{
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ struct ata_eh_info *ehi;
+ unsigned int i, err_mask, printed = 0;
+ u32 err_cause;
- err_mask = ac_err_mask(ata_status);
+ err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
- shift = port << 1; /* (port * 2) */
- if (port >= MV_PORTS_PER_HC) {
- shift++; /* skip bit 8 in the HC Main IRQ reg */
- }
- if ((PORT0_ERR << shift) & relevant) {
- mv_err_intr(ap, 1);
- err_mask |= AC_ERR_OTHER;
- handled = 1;
- }
+ dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
+ err_cause);
+
+ DPRINTK("All regs @ PCI error\n");
+ mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
- if (handled) {
+ writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
+
+ for (i = 0; i < host->n_ports; i++) {
+ ap = host->ports[i];
+ if (!ata_port_offline(ap)) {
+ ehi = &ap->eh_info;
+ ata_ehi_clear_desc(ehi);
+ if (!printed++)
+ ata_ehi_push_desc(ehi,
+ "PCI err cause 0x%08x", err_cause);
+ err_mask = AC_ERR_HOST_BUS;
+ ehi->action = ATA_EH_HARDRESET;
qc = ata_qc_from_tag(ap, ap->active_tag);
- if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
- VPRINTK("port %u IRQ found for qc, "
- "ata_status 0x%x\n", port,ata_status);
- /* mark qc status appropriately */
- if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
- qc->err_mask |= err_mask;
- ata_qc_complete(qc);
- }
- }
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ ata_port_freeze(ap);
}
}
- VPRINTK("EXIT\n");
}
/**
- * mv_interrupt -
+ * mv_interrupt - Main interrupt event handler
* @irq: unused
* @dev_instance: private data; in this case the host structure
- * @regs: unused
*
* Read the read only register to determine if any host
* controllers have pending interrupts. If so, call lower level
@@ -1477,7 +1698,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
struct ata_host *host = dev_instance;
unsigned int hc, handled = 0, n_hcs;
void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
- struct mv_host_priv *hpriv;
u32 irq_stat;
irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
@@ -1491,34 +1711,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
n_hcs = mv_get_hc_count(host->ports[0]->flags);
spin_lock(&host->lock);
+ if (unlikely(irq_stat & PCI_ERR)) {
+ mv_pci_error(host, mmio);
+ handled = 1;
+ goto out_unlock; /* skip all other HC irq handling */
+ }
+
for (hc = 0; hc < n_hcs; hc++) {
u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
if (relevant) {
mv_host_intr(host, relevant, hc);
- handled++;
- }
- }
-
- hpriv = host->private_data;
- if (IS_60XX(hpriv)) {
- /* deal with the interrupt coalescing bits */
- if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
- writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
- writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
- writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
+ handled = 1;
}
}
- if (PCI_ERR & irq_stat) {
- printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
- readl(mmio + PCI_IRQ_CAUSE_OFS));
-
- DPRINTK("All regs @ PCI error\n");
- mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
-
- writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
- handled++;
- }
+out_unlock:
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
@@ -1549,36 +1756,37 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
return ofs;
}
-static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
+static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
{
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
- if (ofs != 0xffffffffU)
- return readl(addr + ofs);
- else
- return (u32) ofs;
+ if (ofs != 0xffffffffU) {
+ *val = readl(addr + ofs);
+ return 0;
+ } else
+ return -EINVAL;
}
-static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
+static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
- if (ofs != 0xffffffffU)
+ if (ofs != 0xffffffffU) {
writelfl(val, addr + ofs);
+ return 0;
+ } else
+ return -EINVAL;
}
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
{
- u8 rev_id;
int early_5080;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
-
- early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
+ early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
if (!early_5080) {
u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
@@ -1907,7 +2115,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
- if (IS_60XX(hpriv)) {
+ if (IS_GEN_II(hpriv)) {
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
ifctl |= (1 << 7); /* enable gen2i speed */
ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
@@ -1923,32 +2131,12 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
hpriv->ops->phy_errata(hpriv, mmio, port_no);
- if (IS_50XX(hpriv))
+ if (IS_GEN_I(hpriv))
mdelay(1);
}
-static void mv_stop_and_reset(struct ata_port *ap)
-{
- struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
-
- mv_stop_dma(ap);
-
- mv_channel_reset(hpriv, mmio, ap->port_no);
-
- __mv_phy_reset(ap, 0);
-}
-
-static inline void __msleep(unsigned int msec, int can_sleep)
-{
- if (can_sleep)
- msleep(msec);
- else
- mdelay(msec);
-}
-
/**
- * __mv_phy_reset - Perform eDMA reset followed by COMRESET
+ * mv_phy_reset - Perform eDMA reset followed by COMRESET
* @ap: ATA channel to manipulate
*
* Part of this is taken from __sata_phy_reset and modified to
@@ -1958,57 +2146,65 @@ static inline void __msleep(unsigned int msec, int can_sleep)
* Inherited from caller. This is coded to safe to call at
* interrupt level, i.e. it does not sleep.
*/
-static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
+static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
+ unsigned long deadline)
{
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = mv_ap_base(ap);
- struct ata_taskfile tf;
- struct ata_device *dev = &ap->device[0];
- unsigned long timeout;
int retry = 5;
u32 sstatus;
VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
- DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
- "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
- mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
+#ifdef DEBUG
+ {
+ u32 sstatus, serror, scontrol;
+
+ mv_scr_read(ap, SCR_STATUS, &sstatus);
+ mv_scr_read(ap, SCR_ERROR, &serror);
+ mv_scr_read(ap, SCR_CONTROL, &scontrol);
+ DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
+ "SCtrl 0x%08x\n", status, serror, scontrol);
+ }
+#endif
/* Issue COMRESET via SControl */
comreset_retry:
sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
- __msleep(1, can_sleep);
+ msleep(1);
sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
- __msleep(20, can_sleep);
+ msleep(20);
- timeout = jiffies + msecs_to_jiffies(200);
do {
sata_scr_read(ap, SCR_STATUS, &sstatus);
if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
break;
- __msleep(1, can_sleep);
- } while (time_before(jiffies, timeout));
+ msleep(1);
+ } while (time_before(jiffies, deadline));
/* work around errata */
- if (IS_60XX(hpriv) &&
+ if (IS_GEN_II(hpriv) &&
(sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
(retry-- > 0))
goto comreset_retry;
- DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
- "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
- mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
+#ifdef DEBUG
+ {
+ u32 sstatus, serror, scontrol;
- if (ata_port_online(ap)) {
- ata_port_probe(ap);
- } else {
- sata_scr_read(ap, SCR_STATUS, &sstatus);
- ata_port_printk(ap, KERN_INFO,
- "no device found (phy stat %08x)\n", sstatus);
- ata_port_disable(ap);
+ mv_scr_read(ap, SCR_STATUS, &sstatus);
+ mv_scr_read(ap, SCR_ERROR, &serror);
+ mv_scr_read(ap, SCR_CONTROL, &scontrol);
+ DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
+ "SCtrl 0x%08x\n", sstatus, serror, scontrol);
+ }
+#endif
+
+ if (ata_port_offline(ap)) {
+ *class = ATA_DEV_NONE;
return;
}
@@ -2022,68 +2218,152 @@ comreset_retry:
u8 drv_stat = ata_check_status(ap);
if ((drv_stat != 0x80) && (drv_stat != 0x7f))
break;
- __msleep(500, can_sleep);
+ msleep(500);
if (retry-- <= 0)
break;
+ if (time_after(jiffies, deadline))
+ break;
}
- tf.lbah = readb(ap->ioaddr.lbah_addr);
- tf.lbam = readb(ap->ioaddr.lbam_addr);
- tf.lbal = readb(ap->ioaddr.lbal_addr);
- tf.nsect = readb(ap->ioaddr.nsect_addr);
+ /* FIXME: if we passed the deadline, the following
+ * code probably produces an invalid result
+ */
- dev->class = ata_dev_classify(&tf);
- if (!ata_dev_enabled(dev)) {
- VPRINTK("Port disabled post-sig: No device present.\n");
- ata_port_disable(ap);
- }
+ /* finally, read device signature from TF registers */
+ *class = ata_dev_try_classify(ap, 0, NULL);
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
- pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
VPRINTK("EXIT\n");
}
-static void mv_phy_reset(struct ata_port *ap)
+static int mv_prereset(struct ata_port *ap, unsigned long deadline)
{
- __mv_phy_reset(ap, 1);
+ struct mv_port_priv *pp = ap->private_data;
+ struct ata_eh_context *ehc = &ap->eh_context;
+ int rc;
+
+ rc = mv_stop_dma(ap);
+ if (rc)
+ ehc->i.action |= ATA_EH_HARDRESET;
+
+ if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
+ pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
+ ehc->i.action |= ATA_EH_HARDRESET;
+ }
+
+ /* if we're about to do hardreset, nothing more to do */
+ if (ehc->i.action & ATA_EH_HARDRESET)
+ return 0;
+
+ if (ata_port_online(ap))
+ rc = ata_wait_ready(ap, deadline);
+ else
+ rc = -ENODEV;
+
+ return rc;
}
-/**
- * mv_eng_timeout - Routine called by libata when SCSI times out I/O
- * @ap: ATA channel to manipulate
- *
- * Intent is to clear all pending error conditions, reset the
- * chip/bus, fail the command, and move on.
- *
- * LOCKING:
- * This routine holds the host lock while failing the command.
- */
-static void mv_eng_timeout(struct ata_port *ap)
+static int mv_hardreset(struct ata_port *ap, unsigned int *class,
+ unsigned long deadline)
{
+ struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
- struct ata_queued_cmd *qc;
- unsigned long flags;
- ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
- DPRINTK("All regs @ start of eng_timeout\n");
- mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
+ mv_stop_dma(ap);
- qc = ata_qc_from_tag(ap, ap->active_tag);
- printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
- mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
+ mv_channel_reset(hpriv, mmio, ap->port_no);
- spin_lock_irqsave(&ap->host->lock, flags);
- mv_err_intr(ap, 0);
- mv_stop_and_reset(ap);
- spin_unlock_irqrestore(&ap->host->lock, flags);
+ mv_phy_reset(ap, class, deadline);
- WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
- if (qc->flags & ATA_QCFLAG_ACTIVE) {
- qc->err_mask |= AC_ERR_TIMEOUT;
- ata_eh_qc_complete(qc);
+ return 0;
+}
+
+static void mv_postreset(struct ata_port *ap, unsigned int *classes)
+{
+ u32 serr;
+
+ /* print link status */
+ sata_print_link_status(ap);
+
+ /* clear SError */
+ sata_scr_read(ap, SCR_ERROR, &serr);
+ sata_scr_write_flush(ap, SCR_ERROR, serr);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ DPRINTK("EXIT, no device\n");
+ return;
+ }
+
+ /* set up device control */
+ iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
+}
+
+static void mv_error_handler(struct ata_port *ap)
+{
+ ata_do_eh(ap, mv_prereset, ata_std_softreset,
+ mv_hardreset, mv_postreset);
+}
+
+static void mv_post_int_cmd(struct ata_queued_cmd *qc)
+{
+ mv_stop_dma(qc->ap);
+}
+
+static void mv_eh_freeze(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ unsigned int hc = (ap->port_no > 3) ? 1 : 0;
+ u32 tmp, mask;
+ unsigned int shift;
+
+ /* FIXME: handle coalescing completion events properly */
+
+ shift = ap->port_no * 2;
+ if (hc > 0)
+ shift++;
+
+ mask = 0x3 << shift;
+
+ /* disable assertion of portN err, done events */
+ tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+ writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+}
+
+static void mv_eh_thaw(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ unsigned int hc = (ap->port_no > 3) ? 1 : 0;
+ void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 tmp, mask, hc_irq_cause;
+ unsigned int shift, hc_port_no = ap->port_no;
+
+ /* FIXME: handle coalescing completion events properly */
+
+ shift = ap->port_no * 2;
+ if (hc > 0) {
+ shift++;
+ hc_port_no -= 4;
}
+
+ mask = 0x3 << shift;
+
+ /* clear EDMA errors on this port */
+ writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+
+ /* clear pending irq events */
+ hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
+ hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
+ hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
+ writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
+
+ /* enable assertion of portN err, done events */
+ tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+ writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
}
/**
@@ -2139,17 +2419,14 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
- u8 rev_id;
u32 hp_flags = hpriv->hp_flags;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
-
switch(board_idx) {
case chip_5080:
hpriv->ops = &mv5xxx_ops;
- hp_flags |= MV_HP_50XX;
+ hp_flags |= MV_HP_GEN_I;
- switch (rev_id) {
+ switch (pdev->revision) {
case 0x1:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
@@ -2167,9 +2444,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
case chip_504x:
case chip_508x:
hpriv->ops = &mv5xxx_ops;
- hp_flags |= MV_HP_50XX;
+ hp_flags |= MV_HP_GEN_I;
- switch (rev_id) {
+ switch (pdev->revision) {
case 0x0:
hp_flags |= MV_HP_ERRATA_50XXB0;
break;
@@ -2187,8 +2464,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
case chip_604x:
case chip_608x:
hpriv->ops = &mv6xxx_ops;
+ hp_flags |= MV_HP_GEN_II;
- switch (rev_id) {
+ switch (pdev->revision) {
case 0x7:
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
@@ -2206,10 +2484,9 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
case chip_7042:
case chip_6042:
hpriv->ops = &mv6xxx_ops;
-
hp_flags |= MV_HP_GEN_IIE;
- switch (rev_id) {
+ switch (pdev->revision) {
case 0x0:
hp_flags |= MV_HP_ERRATA_XX42A0;
break;
@@ -2273,7 +2550,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
hpriv->ops->enable_leds(hpriv, mmio);
for (port = 0; port < host->n_ports; port++) {
- if (IS_60XX(hpriv)) {
+ if (IS_GEN_II(hpriv)) {
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
@@ -2308,7 +2585,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
/* and unmask interrupt generation for host regs */
writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
- if (IS_50XX(hpriv))
+ if (IS_GEN_I(hpriv))
writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
else
writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
@@ -2337,14 +2614,12 @@ static void mv_print_info(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
struct mv_host_priv *hpriv = host->private_data;
- u8 rev_id, scc;
+ u8 scc;
const char *scc_s, *gen;
/* Use this to determine the HW stepping of the chip so we know
* what errata to workaround
*/
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
-
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
if (scc == 0)
scc_s = "SCSI";
@@ -2426,8 +2701,9 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mv_print_info(host);
pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
- &mv_sht);
+ IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
static int __init mv_init(void)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index b2656867c647..0b58c4df6fd2 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -236,8 +236,8 @@ static void nv_ck804_host_stop(struct ata_host *host);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
-static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
@@ -715,19 +715,20 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
int freeze = 0;
ata_ehi_clear_desc(ehi);
- ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
+ __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags );
if (flags & NV_CPB_RESP_ATA_ERR) {
- ata_ehi_push_desc(ehi, ": ATA error");
+ ata_ehi_push_desc(ehi, "ATA error");
ehi->err_mask |= AC_ERR_DEV;
} else if (flags & NV_CPB_RESP_CMD_ERR) {
- ata_ehi_push_desc(ehi, ": CMD error");
+ ata_ehi_push_desc(ehi, "CMD error");
ehi->err_mask |= AC_ERR_DEV;
} else if (flags & NV_CPB_RESP_CPB_ERR) {
- ata_ehi_push_desc(ehi, ": CPB error");
+ ata_ehi_push_desc(ehi, "CPB error");
ehi->err_mask |= AC_ERR_SYSTEM;
freeze = 1;
} else {
/* notifier error, but no error in CPB flags? */
+ ata_ehi_push_desc(ehi, "unknown");
ehi->err_mask |= AC_ERR_OTHER;
freeze = 1;
}
@@ -854,20 +855,21 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
struct ata_eh_info *ehi = &ap->eh_info;
ata_ehi_clear_desc(ehi);
- ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
+ __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
if (status & NV_ADMA_STAT_TIMEOUT) {
ehi->err_mask |= AC_ERR_SYSTEM;
- ata_ehi_push_desc(ehi, ": timeout");
+ ata_ehi_push_desc(ehi, "timeout");
} else if (status & NV_ADMA_STAT_HOTPLUG) {
ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, ": hotplug");
+ ata_ehi_push_desc(ehi, "hotplug");
} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, ": hot unplug");
+ ata_ehi_push_desc(ehi, "hot unplug");
} else if (status & NV_ADMA_STAT_SERROR) {
/* let libata analyze SError and figure out the cause */
- ata_ehi_push_desc(ehi, ": SError");
- }
+ ata_ehi_push_desc(ehi, "SError");
+ } else
+ ata_ehi_push_desc(ehi, "unknown");
ata_port_freeze(ap);
continue;
}
@@ -1391,20 +1393,22 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
return ret;
}
-static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
+ return -EINVAL;
- return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+ *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
-static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
static void nv_nf2_freeze(struct ata_port *ap)
@@ -1560,7 +1564,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
ppi[0] = &nv_port_info[type];
- rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+ rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
if (rc)
return rc;
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 2ad5872fe90c..d39ebc23c4a9 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -45,7 +45,7 @@
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
-#define DRV_VERSION "2.08"
+#define DRV_VERSION "2.09"
enum {
PDC_MAX_PORTS = 4,
@@ -128,8 +128,8 @@ struct pdc_port_priv {
dma_addr_t pkt_dma;
};
-static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
@@ -427,19 +427,20 @@ static int pdc_sata_cable_detect(struct ata_port *ap)
return ATA_CBL_SATA;
}
-static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
- return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return -EINVAL;
+ *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
-static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
- u32 val)
+static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
@@ -642,8 +643,12 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
| PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
ac_err_mask |= AC_ERR_HOST_BUS;
- if (sata_scr_valid(ap))
- ehi->serror |= pdc_sata_scr_read(ap, SCR_ERROR);
+ if (sata_scr_valid(ap)) {
+ u32 serror;
+
+ pdc_sata_scr_read(ap, SCR_ERROR, &serror);
+ ehi->serror |= serror;
+ }
qc->err_mask |= ac_err_mask;
@@ -716,6 +721,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
unsigned int i, tmp;
unsigned int handled = 0;
void __iomem *mmio_base;
+ unsigned int hotplug_offset, ata_no;
+ u32 hotplug_status;
+ int is_sataii_tx4;
VPRINTK("ENTER\n");
@@ -726,10 +734,20 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
mmio_base = host->iomap[PDC_MMIO_BAR];
+ /* read and clear hotplug flags for all ports */
+ if (host->ports[0]->flags & PDC_FLAG_GEN_II)
+ hotplug_offset = PDC2_SATA_PLUG_CSR;
+ else
+ hotplug_offset = PDC_SATA_PLUG_CSR;
+ hotplug_status = readl(mmio_base + hotplug_offset);
+ if (hotplug_status & 0xff)
+ writel(hotplug_status | 0xff, mmio_base + hotplug_offset);
+ hotplug_status &= 0xff; /* clear uninteresting bits */
+
/* reading should also clear interrupts */
mask = readl(mmio_base + PDC_INT_SEQMASK);
- if (mask == 0xffffffff) {
+ if (mask == 0xffffffff && hotplug_status == 0) {
VPRINTK("QUICK EXIT 2\n");
return IRQ_NONE;
}
@@ -737,16 +755,34 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
spin_lock(&host->lock);
mask &= 0xffff; /* only 16 tags possible */
- if (!mask) {
+ if (mask == 0 && hotplug_status == 0) {
VPRINTK("QUICK EXIT 3\n");
goto done_irq;
}
writel(mask, mmio_base + PDC_INT_SEQMASK);
+ is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
+
for (i = 0; i < host->n_ports; i++) {
VPRINTK("port %u\n", i);
ap = host->ports[i];
+
+ /* check for a plug or unplug event */
+ ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
+ tmp = hotplug_status & (0x11 << ata_no);
+ if (tmp && ap &&
+ !(ap->flags & ATA_FLAG_DISABLED)) {
+ struct ata_eh_info *ehi = &ap->eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
+ ata_port_freeze(ap);
+ ++handled;
+ continue;
+ }
+
+ /* check for a packet interrupt */
tmp = mask & (1 << (i + 1));
if (tmp && ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
@@ -902,9 +938,9 @@ static void pdc_host_init(struct ata_host *host)
tmp = readl(mmio + hotplug_offset);
writel(tmp | 0xff, mmio + hotplug_offset);
- /* mask plug/unplug ints */
+ /* unmask plug/unplug ints */
tmp = readl(mmio + hotplug_offset);
- writel(tmp | 0xff0000, mmio + hotplug_offset);
+ writel(tmp & ~0xff0000, mmio + hotplug_offset);
/* don't initialise TBG or SLEW on 2nd generation chips */
if (is_gen2)
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 9ab554da89bf..c8f9242e7f44 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -111,8 +111,8 @@ struct qs_port_priv {
qs_state_t state;
};
-static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
@@ -255,18 +255,20 @@ static void qs_eng_timeout(struct ata_port *ap)
ata_eng_timeout(ap);
}
-static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return ~0U;
- return readl(ap->ioaddr.scr_addr + (sc_reg * 8));
+ return -EINVAL;
+ *val = readl(ap->ioaddr.scr_addr + (sc_reg * 8));
+ return 0;
}
-static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
writel(val, ap->ioaddr.scr_addr + (sc_reg * 8));
+ return 0;
}
static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
@@ -337,7 +339,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
buf[28] = dflags;
/* frame information structure (FIS) */
- ata_tf_to_fis(&qc->tf, &buf[32], 0);
+ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 2a86dc4598d0..db6763758952 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -115,8 +115,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
static void sil_dev_config(struct ata_device *dev);
-static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static int sil_set_mode (struct ata_port *ap, struct ata_device **r_failed);
static void sil_freeze(struct ata_port *ap);
static void sil_thaw(struct ata_port *ap);
@@ -350,19 +350,26 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_re
return NULL;
}
-static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
void __iomem *mmio = sil_scr_addr(ap, sc_reg);
- if (mmio)
- return readl(mmio);
- return 0xffffffffU;
+
+ if (mmio) {
+ *val = readl(mmio);
+ return 0;
+ }
+ return -EINVAL;
}
-static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
void __iomem *mmio = sil_scr_addr(ap, sc_reg);
- if (mmio)
+
+ if (mmio) {
writel(val, mmio);
+ return 0;
+ }
+ return -EINVAL;
}
static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
@@ -378,7 +385,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
* controllers continue to assert IRQ as long as
* SError bits are pending. Clear SError immediately.
*/
- serror = sil_scr_read(ap, SCR_ERROR);
+ sil_scr_read(ap, SCR_ERROR, &serror);
sil_scr_write(ap, SCR_ERROR, serror);
/* Trigger hotplug and accumulate SError only if the
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index ac43a30ebe29..46fbbe7f121c 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -326,8 +326,8 @@ struct sil24_port_priv {
static void sil24_dev_config(struct ata_device *dev);
static u8 sil24_check_status(struct ata_port *ap);
-static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
-static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
+static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val);
+static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
@@ -464,15 +464,15 @@ static void sil24_dev_config(struct ata_device *dev)
writel(PORT_CS_CDB16, port + PORT_CTRL_CLR);
}
-static inline void sil24_update_tf(struct ata_port *ap)
+static void sil24_read_tf(struct ata_port *ap, int tag, struct ata_taskfile *tf)
{
- struct sil24_port_priv *pp = ap->private_data;
void __iomem *port = ap->ioaddr.cmd_addr;
- struct sil24_prb __iomem *prb = port;
+ struct sil24_prb __iomem *prb;
u8 fis[6 * 4];
- memcpy_fromio(fis, prb->fis, 6 * 4);
- ata_tf_from_fis(fis, &pp->tf);
+ prb = port + PORT_LRAM + sil24_tag(tag) * PORT_LRAM_SLOT_SZ;
+ memcpy_fromio(fis, prb->fis, sizeof(fis));
+ ata_tf_from_fis(fis, tf);
}
static u8 sil24_check_status(struct ata_port *ap)
@@ -488,25 +488,30 @@ static int sil24_scr_map[] = {
[SCR_ACTIVE] = 3,
};
-static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
+static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
{
void __iomem *scr_addr = ap->ioaddr.scr_addr;
+
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
void __iomem *addr;
addr = scr_addr + sil24_scr_map[sc_reg] * 4;
- return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+ *val = readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+ return 0;
}
- return 0xffffffffU;
+ return -EINVAL;
}
-static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
{
void __iomem *scr_addr = ap->ioaddr.scr_addr;
+
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
void __iomem *addr;
addr = scr_addr + sil24_scr_map[sc_reg] * 4;
writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
+ return 0;
}
+ return -EINVAL;
}
static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
@@ -531,15 +536,60 @@ static int sil24_init_port(struct ata_port *ap)
return 0;
}
-static int sil24_softreset(struct ata_port *ap, unsigned int *class,
- unsigned long deadline)
+static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
+ const struct ata_taskfile *tf,
+ int is_cmd, u32 ctrl,
+ unsigned long timeout_msec)
{
void __iomem *port = ap->ioaddr.cmd_addr;
struct sil24_port_priv *pp = ap->private_data;
struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
dma_addr_t paddr = pp->cmd_block_dma;
- u32 mask, irq_stat;
+ u32 irq_enabled, irq_mask, irq_stat;
+ int rc;
+
+ prb->ctrl = cpu_to_le16(ctrl);
+ ata_tf_to_fis(tf, pmp, is_cmd, prb->fis);
+
+ /* temporarily plug completion and error interrupts */
+ irq_enabled = readl(port + PORT_IRQ_ENABLE_SET);
+ writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR, port + PORT_IRQ_ENABLE_CLR);
+
+ writel((u32)paddr, port + PORT_CMD_ACTIVATE);
+ writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
+
+ irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
+ irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0,
+ 10, timeout_msec);
+
+ writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */
+ irq_stat >>= PORT_IRQ_RAW_SHIFT;
+
+ if (irq_stat & PORT_IRQ_COMPLETE)
+ rc = 0;
+ else {
+ /* force port into known state */
+ sil24_init_port(ap);
+
+ if (irq_stat & PORT_IRQ_ERROR)
+ rc = -EIO;
+ else
+ rc = -EBUSY;
+ }
+
+ /* restore IRQ enabled */
+ writel(irq_enabled, port + PORT_IRQ_ENABLE_SET);
+
+ return rc;
+}
+
+static int sil24_do_softreset(struct ata_port *ap, unsigned int *class,
+ int pmp, unsigned long deadline)
+{
+ unsigned long timeout_msec = 0;
+ struct ata_taskfile tf;
const char *reason;
+ int rc;
DPRINTK("ENTER\n");
@@ -556,29 +606,22 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class,
}
/* do SRST */
- prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
- prb->fis[1] = 0; /* no PMP yet */
-
- writel((u32)paddr, port + PORT_CMD_ACTIVATE);
- writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4);
-
- mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT;
- irq_stat = ata_wait_register(port + PORT_IRQ_STAT, mask, 0x0,
- 100, jiffies_to_msecs(deadline - jiffies));
-
- writel(irq_stat, port + PORT_IRQ_STAT); /* clear IRQs */
- irq_stat >>= PORT_IRQ_RAW_SHIFT;
-
- if (!(irq_stat & PORT_IRQ_COMPLETE)) {
- if (irq_stat & PORT_IRQ_ERROR)
- reason = "SRST command error";
- else
- reason = "timeout";
+ if (time_after(deadline, jiffies))
+ timeout_msec = jiffies_to_msecs(deadline - jiffies);
+
+ ata_tf_init(ap->device, &tf); /* doesn't really matter */
+ rc = sil24_exec_polled_cmd(ap, pmp, &tf, 0, PRB_CTRL_SRST,
+ timeout_msec);
+ if (rc == -EBUSY) {
+ reason = "timeout";
+ goto err;
+ } else if (rc) {
+ reason = "SRST command error";
goto err;
}
- sil24_update_tf(ap);
- *class = ata_dev_classify(&pp->tf);
+ sil24_read_tf(ap, 0, &tf);
+ *class = ata_dev_classify(&tf);
if (*class == ATA_DEV_UNKNOWN)
*class = ATA_DEV_NONE;
@@ -592,6 +635,12 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class,
return -EIO;
}
+static int sil24_softreset(struct ata_port *ap, unsigned int *class,
+ unsigned long deadline)
+{
+ return sil24_do_softreset(ap, class, 0, deadline);
+}
+
static int sil24_hardreset(struct ata_port *ap, unsigned int *class,
unsigned long deadline)
{
@@ -699,7 +748,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
}
prb->ctrl = cpu_to_le16(ctrl);
- ata_tf_to_fis(&qc->tf, prb->fis, 0);
+ ata_tf_to_fis(&qc->tf, 0, 1, prb->fis);
if (qc->flags & ATA_QCFLAG_DMAMAP)
sil24_fill_sg(qc, sge);
@@ -754,6 +803,7 @@ static void sil24_thaw(struct ata_port *ap)
static void sil24_error_intr(struct ata_port *ap)
{
void __iomem *port = ap->ioaddr.cmd_addr;
+ struct sil24_port_priv *pp = ap->private_data;
struct ata_eh_info *ehi = &ap->eh_info;
int freeze = 0;
u32 irq_stat;
@@ -769,16 +819,16 @@ static void sil24_error_intr(struct ata_port *ap)
if (irq_stat & (PORT_IRQ_PHYRDY_CHG | PORT_IRQ_DEV_XCHG)) {
ata_ehi_hotplugged(ehi);
- ata_ehi_push_desc(ehi, ", %s",
- irq_stat & PORT_IRQ_PHYRDY_CHG ?
- "PHY RDY changed" : "device exchanged");
+ ata_ehi_push_desc(ehi, "%s",
+ irq_stat & PORT_IRQ_PHYRDY_CHG ?
+ "PHY RDY changed" : "device exchanged");
freeze = 1;
}
if (irq_stat & PORT_IRQ_UNK_FIS) {
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
- ata_ehi_push_desc(ehi , ", unknown FIS");
+ ata_ehi_push_desc(ehi, "unknown FIS");
freeze = 1;
}
@@ -797,18 +847,18 @@ static void sil24_error_intr(struct ata_port *ap)
if (ci && ci->desc) {
err_mask |= ci->err_mask;
action |= ci->action;
- ata_ehi_push_desc(ehi, ", %s", ci->desc);
+ ata_ehi_push_desc(ehi, "%s", ci->desc);
} else {
err_mask |= AC_ERR_OTHER;
action |= ATA_EH_SOFTRESET;
- ata_ehi_push_desc(ehi, ", unknown command error %d",
+ ata_ehi_push_desc(ehi, "unknown command error %d",
cerr);
}
/* record error info */
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc) {
- sil24_update_tf(ap);
+ sil24_read_tf(ap, qc->tag, &pp->tf);
qc->err_mask |= err_mask;
} else
ehi->err_mask |= err_mask;
@@ -825,8 +875,11 @@ static void sil24_error_intr(struct ata_port *ap)
static void sil24_finish_qc(struct ata_queued_cmd *qc)
{
+ struct ata_port *ap = qc->ap;
+ struct sil24_port_priv *pp = ap->private_data;
+
if (qc->flags & ATA_QCFLAG_RESULT_TF)
- sil24_update_tf(qc->ap);
+ sil24_read_tf(ap, qc->tag, &pp->tf);
}
static inline void sil24_host_intr(struct ata_port *ap)
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fd80bcf1b236..31a2f55aae66 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -64,8 +64,8 @@ enum {
};
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sis_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static const struct pci_device_id sis_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
@@ -207,36 +207,37 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val
pci_write_config_dword(pdev, cfg_addr+0x10, val);
}
-static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 val, val2 = 0;
u8 pmr;
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
+ return -EINVAL;
if (ap->flags & SIS_FLAG_CFGSCR)
return sis_scr_cfg_read(ap, sc_reg);
pci_read_config_byte(pdev, SIS_PMR, &pmr);
- val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
+ *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
(pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
- val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
+ *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
+
+ *val &= 0xfffffffb;
- return (val | val2) & 0xfffffffb;
+ return 0;
}
-static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 pmr;
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
pci_read_config_byte(pdev, SIS_PMR, &pmr);
@@ -248,6 +249,7 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
(pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
}
+ return 0;
}
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -334,7 +336,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+ rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
if (rc)
return rc;
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 63fe99afd59f..92e877075037 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -103,20 +103,21 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
return 0;
}
-static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
- return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return -EINVAL;
+ *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
-static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
- u32 val)
+static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index aca71819f6e8..78c28512f01c 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -57,8 +57,8 @@ struct uli_priv {
};
static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static const struct pci_device_id uli_pci_tbl[] = {
{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
@@ -164,20 +164,22 @@ static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
pci_write_config_dword(pdev, cfg_addr, val);
}
-static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
+ return -EINVAL;
- return uli_scr_cfg_read(ap, sc_reg);
+ *val = uli_scr_cfg_read(ap, sc_reg);
+ return 0;
}
-static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
- return;
+ return -EINVAL;
uli_scr_cfg_write(ap, sc_reg, val);
+ return 0;
}
static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -213,7 +215,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
host->private_data = hpriv;
/* the first two ports are standard SFF */
- rc = ata_pci_init_native_host(host);
+ rc = ata_pci_init_sff_host(host);
if (rc)
return rc;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index a4c0832033d8..86b7bfc17324 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -72,8 +72,8 @@ enum {
};
static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
-static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static void svia_noop_freeze(struct ata_port *ap);
static void vt6420_error_handler(struct ata_port *ap);
static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -249,18 +249,20 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
- return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
+ return -EINVAL;
+ *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
+ return 0;
}
-static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
+ return 0;
}
static void svia_noop_freeze(struct ata_port *ap)
@@ -305,18 +307,19 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
/* Resume phy. This is the old SATA resume sequence */
svia_scr_write(ap, SCR_CONTROL, 0x300);
- svia_scr_read(ap, SCR_CONTROL); /* flush */
+ svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */
/* wait for phy to become ready, if necessary */
do {
msleep(200);
- if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
+ svia_scr_read(ap, SCR_STATUS, &sstatus);
+ if ((sstatus & 0xf) != 1)
break;
} while (time_before(jiffies, timeout));
/* open code sata_print_link_status() */
- sstatus = svia_scr_read(ap, SCR_STATUS);
- scontrol = svia_scr_read(ap, SCR_CONTROL);
+ svia_scr_read(ap, SCR_STATUS, &sstatus);
+ svia_scr_read(ap, SCR_CONTROL, &scontrol);
online = (sstatus & 0xf) == 0x3;
@@ -325,7 +328,7 @@ static int vt6420_prereset(struct ata_port *ap, unsigned long deadline)
online ? "up" : "down", sstatus, scontrol);
/* SStatus is read one more time */
- svia_scr_read(ap, SCR_STATUS);
+ svia_scr_read(ap, SCR_STATUS, &sstatus);
if (!online) {
/* tell EH to bail */
@@ -412,7 +415,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int rc;
- rc = ata_pci_prepare_native_host(pdev, ppi, &host);
+ rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
if (rc)
return rc;
*r_host = host;
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 1b5d81faa102..24344d0d0575 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -98,20 +98,21 @@ enum {
VSC_SATA_INT_PHY_CHANGE),
};
-static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
+static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
- return 0xffffffffU;
- return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return -EINVAL;
+ *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
-static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
- u32 val)
+static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
- return;
+ return -EINVAL;
writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
+ return 0;
}
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 5b4fab24155f..bed9f58c2d5a 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -142,7 +142,7 @@ config ATM_ENI_BURST_RX_2W
config ATM_FIRESTREAM
tristate "Fujitsu FireStream (FS50/FS155) "
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
help
Driver for the Fujitsu FireStream 155 (MB86697) and
FireStream 50 (MB86695) ATM PCI chips.
@@ -152,7 +152,7 @@ config ATM_FIRESTREAM
config ATM_ZATM
tristate "ZeitNet ZN1221/ZN1225"
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
help
Driver for the ZeitNet ZN1221 (MMF) and ZN1225 (UTP-5) 155 Mbps ATM
adapters.
@@ -172,7 +172,7 @@ config ATM_ZATM_DEBUG
config ATM_NICSTAR
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
- depends on PCI && !64BIT
+ depends on PCI && !64BIT && VIRT_TO_BUS
help
The NICStAR chipset family is used in a large number of ATM NICs for
25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
@@ -240,7 +240,7 @@ config ATM_IDT77252_USE_SUNI
config ATM_AMBASSADOR
tristate "Madge Ambassador (Collage PCI 155 Server)"
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
select BITREVERSE
help
This is a driver for ATMizer based ATM card produced by Madge
@@ -265,7 +265,7 @@ config ATM_AMBASSADOR_DEBUG
config ATM_HORIZON
tristate "Madge Horizon [Ultra] (Collage PCI 25 and Collage PCI 155 Client)"
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
help
This is a driver for the Horizon chipset ATM adapter cards once
produced by Madge Networks Ltd. Say Y (or M to compile as a module
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 59651abfa4f8..b34b3829f6a9 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1040,7 +1040,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
struct atm_qos * qos;
struct atm_trafprm * txtp;
struct atm_trafprm * rxtp;
- u16 tx_rate_bits;
+ u16 tx_rate_bits = -1; // hush gcc
u16 tx_vc_bits = -1; // hush gcc
u16 tx_frame_bits = -1; // hush gcc
@@ -1096,6 +1096,8 @@ static int amb_open (struct atm_vcc * atm_vcc)
r = round_up;
}
error = make_rate (pcr, r, &tx_rate_bits, NULL);
+ if (error)
+ return error;
tx_vc_bits = TX_UBR_CAPPED;
tx_frame_bits = TX_FRAME_CAPPED;
}
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 0d3a38b1cb0b..41b2204ebc6e 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1704,7 +1704,6 @@ static int __devinit eni_do_init(struct atm_dev *dev)
struct pci_dev *pci_dev;
unsigned long real_base;
void __iomem *base;
- unsigned char revision;
int error,i,last;
DPRINTK(">eni_init\n");
@@ -1715,12 +1714,6 @@ static int __devinit eni_do_init(struct atm_dev *dev)
pci_dev = eni_dev->pci_dev;
real_base = pci_resource_start(pci_dev, 0);
eni_dev->irq = pci_dev->irq;
- error = pci_read_config_byte(pci_dev,PCI_REVISION_ID,&revision);
- if (error) {
- printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
- dev->number,error);
- return -EINVAL;
- }
if ((error = pci_write_config_word(pci_dev,PCI_COMMAND,
PCI_COMMAND_MEMORY |
(eni_dev->asic ? PCI_COMMAND_PARITY | PCI_COMMAND_SERR : 0)))) {
@@ -1729,7 +1722,7 @@ static int __devinit eni_do_init(struct atm_dev *dev)
return -EIO;
}
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%lx,irq=%d,",
- dev->number,revision,real_base,eni_dev->irq);
+ dev->number,pci_dev->revision,real_base,eni_dev->irq);
if (!(base = ioremap_nocache(real_base,MAP_MAX_SIZE))) {
printk("\n");
printk(KERN_ERR DEV_LABEL "(itf %d): can't set up page "
@@ -1745,7 +1738,8 @@ static int __devinit eni_do_init(struct atm_dev *dev)
printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
"magic - expected 0x%x, got 0x%x\n",dev->number,
ENI155_MAGIC,(unsigned) readl(&eprom->magic));
- return -EINVAL;
+ error = -EINVAL;
+ goto unmap;
}
}
eni_dev->phy = base+PHY_BASE;
@@ -1772,17 +1766,27 @@ static int __devinit eni_do_init(struct atm_dev *dev)
printk(")\n");
printk(KERN_ERR DEV_LABEL "(itf %d): ERROR - wrong id 0x%x\n",
dev->number,(unsigned) eni_in(MID_RES_ID_MCON));
- return -EINVAL;
+ error = -EINVAL;
+ goto unmap;
}
error = eni_dev->asic ? get_esi_asic(dev) : get_esi_fpga(dev,base);
- if (error) return error;
+ if (error)
+ goto unmap;
for (i = 0; i < ESI_LEN; i++)
printk("%s%02X",i ? "-" : "",dev->esi[i]);
printk(")\n");
printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
- return suni_init(dev);
+
+ error = suni_init(dev);
+ if (error)
+ goto unmap;
+out:
+ return error;
+unmap:
+ iounmap(base);
+ goto out;
}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 38b688f9f6a9..737cea49f872 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1710,7 +1710,7 @@ static int __devinit fs_init (struct fs_dev *dev)
/* This bit is documented as "RESERVED" */
if (isr & ISR_INIT_ERR) {
printk (KERN_ERR "Error initializing the FS... \n");
- return 1;
+ goto unmap;
}
if (isr & ISR_INIT) {
fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
@@ -1723,7 +1723,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (!to) {
printk (KERN_ERR "timeout initializing the FS... \n");
- return 1;
+ goto unmap;
}
/* XXX fix for fs155 */
@@ -1803,7 +1803,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (!dev->atm_vccs) {
printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
/* XXX Clean up..... */
- return 1;
+ goto unmap;
}
dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
@@ -1813,7 +1813,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (!dev->tx_inuse) {
printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
/* XXX Clean up..... */
- return 1;
+ goto unmap;
}
/* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
/* -- RAS2 : FS50 only: Default is OK. */
@@ -1840,7 +1840,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
/* XXX undo all previous stuff... */
- return 1;
+ goto unmap;
}
fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
@@ -1890,6 +1890,9 @@ static int __devinit fs_init (struct fs_dev *dev)
func_exit ();
return 0;
+unmap:
+ iounmap(dev->base);
+ return 1;
}
static int __devinit firestream_init_one (struct pci_dev *pci_dev,
@@ -2012,6 +2015,7 @@ static void __devexit firestream_remove_one (struct pci_dev *pdev)
for (i=0;i < FS_NR_RX_QUEUES;i++)
free_queue (dev, &dev->rx_rq[i]);
+ iounmap(dev->base);
fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
nxtdev = dev->next;
kfree (dev);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 3800bc0cb2ef..f8b1700f4c16 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -65,7 +65,7 @@ static char const rcsid[] =
static unsigned int vpibits = 1;
-#define CONFIG_ATM_IDT77252_SEND_IDLE 1
+#define ATM_IDT77252_SEND_IDLE 1
/*
@@ -3404,7 +3404,7 @@ init_card(struct atm_dev *dev)
conf = SAR_CFG_TX_FIFO_SIZE_9 | /* Use maximum fifo size */
SAR_CFG_RXSTQ_SIZE_8k | /* Receive Status Queue is 8k */
SAR_CFG_IDLE_CLP | /* Set CLP on idle cells */
-#ifndef CONFIG_ATM_IDT77252_SEND_IDLE
+#ifndef ATM_IDT77252_SEND_IDLE
SAR_CFG_NO_IDLE | /* Do not send idle cells */
#endif
0;
@@ -3541,7 +3541,7 @@ init_card(struct atm_dev *dev)
printk("%s: Linkrate on ATM line : %u bit/s, %u cell/s.\n",
card->name, linkrate, card->link_pcr);
-#ifdef CONFIG_ATM_IDT77252_SEND_IDLE
+#ifdef ATM_IDT77252_SEND_IDLE
card->utopia_pcr = card->link_pcr;
#else
card->utopia_pcr = (160000000 / 8 / 54);
@@ -3679,7 +3679,6 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
unsigned long membase, srambase;
struct idt77252_dev *card;
struct atm_dev *dev;
- ushort revision = 0;
int i, err;
@@ -3688,19 +3687,13 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
return err;
}
- if (pci_read_config_word(pcidev, PCI_REVISION_ID, &revision)) {
- printk("idt77252-%d: can't read PCI_REVISION_ID\n", index);
- err = -ENODEV;
- goto err_out_disable_pdev;
- }
-
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
if (!card) {
printk("idt77252-%d: can't allocate private data\n", index);
err = -ENOMEM;
goto err_out_disable_pdev;
}
- card->revision = revision;
+ card->revision = pcidev->revision;
card->index = index;
card->pcidev = pcidev;
sprintf(card->name, "idt77252-%d", card->index);
@@ -3762,8 +3755,8 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
}
printk("%s: ABR SAR (Rev %c): MEM %08lx SRAM %08lx [%u KB]\n",
- card->name, ((revision > 1) && (revision < 25)) ?
- 'A' + revision - 1 : '?', membase, srambase,
+ card->name, ((card->revision > 1) && (card->revision < 25)) ?
+ 'A' + card->revision - 1 : '?', membase, srambase,
card->sramsize / 1024);
if (init_card(dev)) {
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index bb7ef570514c..a3b605a0ca17 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2290,7 +2290,6 @@ static int __devinit ia_init(struct atm_dev *dev)
unsigned long real_base;
void __iomem *base;
unsigned short command;
- unsigned char revision;
int error, i;
/* The device has been identified and registered. Now we read
@@ -2305,16 +2304,14 @@ static int __devinit ia_init(struct atm_dev *dev)
real_base = pci_resource_start (iadev->pci, 0);
iadev->irq = iadev->pci->irq;
- if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))
- || (error = pci_read_config_byte(iadev->pci,
- PCI_REVISION_ID,&revision)))
- {
+ error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
+ if (error) {
printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
dev->number,error);
return -EINVAL;
}
IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
- dev->number, revision, real_base, iadev->irq);)
+ dev->number, iadev->pci->revision, real_base, iadev->irq);)
/* find mapping size of board */
@@ -2353,7 +2350,7 @@ static int __devinit ia_init(struct atm_dev *dev)
return error;
}
IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
- dev->number, revision, base, iadev->irq);)
+ dev->number, iadev->pci->revision, base, iadev->irq);)
/* filling the iphase dev structure */
iadev->mem = iadev->pci_map_size /2;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 09f477d4237a..55fd1b4543fd 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -246,8 +246,8 @@ struct lanai_vcc {
};
enum lanai_type {
- lanai2 = PCI_VENDOR_ID_EF_ATM_LANAI2,
- lanaihb = PCI_VENDOR_ID_EF_ATM_LANAIHB
+ lanai2 = PCI_DEVICE_ID_EF_ATM_LANAI2,
+ lanaihb = PCI_DEVICE_ID_EF_ATM_LANAIHB
};
struct lanai_dev_stats {
@@ -293,7 +293,6 @@ struct lanai_dev {
struct atm_vcc *cbrvcc;
int number;
int board_rev;
- u8 pci_revision;
/* TODO - look at race conditions with maintence of conf1/conf2 */
/* TODO - transmit locking: should we use _irq not _irqsave? */
/* TODO - organize above in some rational fashion (see <asm/cache.h>) */
@@ -553,8 +552,8 @@ static inline void sram_write(const struct lanai_dev *lanai,
writel(val, sram_addr(lanai, offset));
}
-static int __init sram_test_word(
- const struct lanai_dev *lanai, int offset, u32 pattern)
+static int __devinit sram_test_word(const struct lanai_dev *lanai,
+ int offset, u32 pattern)
{
u32 readback;
sram_write(lanai, pattern, offset);
@@ -1969,14 +1968,6 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai)
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
}
- /* Get the pci revision byte */
- result = pci_read_config_byte(pci, PCI_REVISION_ID,
- &lanai->pci_revision);
- if (result != PCIBIOS_SUCCESSFUL) {
- printk(KERN_ERR DEV_LABEL "(itf %d): can't read "
- "PCI_REVISION_ID: %d\n", lanai->number, result);
- return -EINVAL;
- }
result = pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &w);
if (result != PCIBIOS_SUCCESSFUL) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't read "
@@ -2254,7 +2245,7 @@ static int __devinit lanai_dev_open(struct atm_dev *atmdev)
lanai_timed_poll_start(lanai);
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
"(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number,
- (int) lanai->pci_revision, (unsigned long) lanai->base,
+ (int) lanai->pci->revision, (unsigned long) lanai->base,
lanai->pci->irq,
atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
@@ -2491,7 +2482,7 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
(unsigned int) lanai->magicno, lanai->num_vci);
if (left-- == 0)
return sprintf(page, "revision: board=%d, pci_if=%d\n",
- lanai->board_rev, (int) lanai->pci_revision);
+ lanai->board_rev, (int) lanai->pci->revision);
if (left-- == 0)
return sprintf(page, "EEPROM ESI: "
"%02X:%02X:%02X:%02X:%02X:%02X\n",
@@ -2631,14 +2622,8 @@ static int __devinit lanai_init_one(struct pci_dev *pci,
}
static struct pci_device_id lanai_pci_tbl[] = {
- {
- PCI_VENDOR_ID_EF, PCI_VENDOR_ID_EF_ATM_LANAI2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
- },
- {
- PCI_VENDOR_ID_EF, PCI_VENDOR_ID_EF_ATM_LANAIHB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
- },
+ { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) },
+ { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) },
{ 0, } /* terminal entry */
};
MODULE_DEVICE_TABLE(pci, lanai_pci_tbl);
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index 480947f4e01e..842e26c45557 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -134,7 +134,7 @@ nicstar_read_eprom_status( virt_addr_t base )
/* Send read instruction */
val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
- for (i=0; i<sizeof rdsrtab/sizeof rdsrtab[0]; i++)
+ for (i=0; i<ARRAY_SIZE(rdsrtab); i++)
{
NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
(val | rdsrtab[i]) );
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 2ad2527cf5b3..58583c6ac5be 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -915,7 +915,7 @@ static int open_tx_first(struct atm_vcc *vcc)
unsigned long flags;
u32 *loop;
unsigned short chan;
- int pcr,unlimited;
+ int unlimited;
DPRINTK("open_tx_first\n");
zatm_dev = ZATM_DEV(vcc->dev);
@@ -936,6 +936,8 @@ static int open_tx_first(struct atm_vcc *vcc)
vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
else {
+ int uninitialized_var(pcr);
+
if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
@@ -1182,7 +1184,6 @@ static int __devinit zatm_init(struct atm_dev *dev)
struct zatm_dev *zatm_dev;
struct pci_dev *pci_dev;
unsigned short command;
- unsigned char revision;
int error,i,last;
unsigned long t0,t1,t2;
@@ -1192,8 +1193,7 @@ static int __devinit zatm_init(struct atm_dev *dev)
pci_dev = zatm_dev->pci_dev;
zatm_dev->base = pci_resource_start(pci_dev, 0);
zatm_dev->irq = pci_dev->irq;
- if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command)) ||
- (error = pci_read_config_byte(pci_dev,PCI_REVISION_ID,&revision))) {
+ if ((error = pci_read_config_word(pci_dev,PCI_COMMAND,&command))) {
printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%02x\n",
dev->number,error);
return -EINVAL;
@@ -1206,7 +1206,7 @@ static int __devinit zatm_init(struct atm_dev *dev)
}
eprom_get_esi(dev);
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d,base=0x%x,irq=%d,",
- dev->number,revision,zatm_dev->base,zatm_dev->irq);
+ dev->number,pci_dev->revision,zatm_dev->base,zatm_dev->irq);
/* reset uPD98401 */
zout(0,SWR);
while (!(zin(GSR) & uPD98401_INT_IND));
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index ea4fe3e48f33..de2fcce10ba5 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -5,8 +5,11 @@
# Auxiliary display drivers configuration.
#
-menu "Auxiliary Display support"
+menuconfig AUXDISPLAY
depends on PARPORT
+ bool "Auxiliary Display support"
+
+if AUXDISPLAY && PARPORT
config KS0108
tristate "KS0108 LCD Controller"
@@ -111,4 +114,5 @@ config CFAG12864B_RATE
If you compile this as a module, you can still override this
value using the module parameters.
-endmenu
+
+endif # AUXDISPLAY
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 1ec0654665cf..7370d7cf5988 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include "base.h"
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 5512d84452f2..47eb02d9f1af 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -44,6 +44,6 @@ struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
extern char *make_class_name(const char *name, struct kobject *kobj);
-extern void devres_release_all(struct device *dev);
+extern int devres_release_all(struct device *dev);
extern struct kset devices_subsys;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index dca734819e50..61c67526a656 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -138,12 +138,24 @@ void bus_remove_file(struct bus_type * bus, struct bus_attribute * attr)
}
}
-static struct kobj_type ktype_bus = {
+static struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
+};
+
+static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
+{
+ struct kobj_type *ktype = get_ktype(kobj);
+ if (ktype == &bus_ktype)
+ return 1;
+ return 0;
+}
+
+static struct kset_uevent_ops bus_uevent_ops = {
+ .filter = bus_uevent_filter,
};
-static decl_subsys(bus, &ktype_bus, NULL);
+static decl_subsys(bus, &bus_ktype, &bus_uevent_ops);
#ifdef CONFIG_HOTPLUG
@@ -562,7 +574,6 @@ static int add_probe_files(struct bus_type *bus)
bus->drivers_probe_attr.attr.name = "drivers_probe";
bus->drivers_probe_attr.attr.mode = S_IWUSR;
- bus->drivers_probe_attr.attr.owner = bus->owner;
bus->drivers_probe_attr.store = store_drivers_probe;
retval = bus_create_file(bus, &bus->drivers_probe_attr);
if (retval)
@@ -570,7 +581,6 @@ static int add_probe_files(struct bus_type *bus)
bus->drivers_autoprobe_attr.attr.name = "drivers_autoprobe";
bus->drivers_autoprobe_attr.attr.mode = S_IWUSR | S_IRUGO;
- bus->drivers_autoprobe_attr.attr.owner = bus->owner;
bus->drivers_autoprobe_attr.show = show_drivers_autoprobe;
bus->drivers_autoprobe_attr.store = store_drivers_autoprobe;
retval = bus_create_file(bus, &bus->drivers_autoprobe_attr);
@@ -610,7 +620,8 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_put_bus;
drv->kobj.kset = &bus->drivers;
- if ((error = kobject_register(&drv->kobj)))
+ error = kobject_register(&drv->kobj);
+ if (error)
goto out_put_bus;
if (drv->bus->drivers_autoprobe) {
@@ -760,7 +771,8 @@ static int bus_add_attrs(struct bus_type * bus)
if (bus->bus_attrs) {
for (i = 0; attr_name(bus->bus_attrs[i]); i++) {
- if ((error = bus_create_file(bus,&bus->bus_attrs[i])))
+ error = bus_create_file(bus,&bus->bus_attrs[i]);
+ if (error)
goto Err;
}
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8c506dbe3913..4d2222618b78 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -312,9 +312,6 @@ static void class_dev_release(struct kobject * kobj)
pr_debug("device class '%s': release.\n", cd->class_id);
- kfree(cd->devt_attr);
- cd->devt_attr = NULL;
-
if (cd->release)
cd->release(cd);
else if (cls->release)
@@ -547,6 +544,9 @@ static ssize_t show_dev(struct class_device *class_dev, char *buf)
return print_dev_t(buf, class_dev->devt);
}
+static struct class_device_attribute class_devt_attr =
+ __ATTR(dev, S_IRUGO, show_dev, NULL);
+
static ssize_t store_uevent(struct class_device *class_dev,
const char *buf, size_t count)
{
@@ -554,6 +554,9 @@ static ssize_t store_uevent(struct class_device *class_dev,
return count;
}
+static struct class_device_attribute class_uevent_attr =
+ __ATTR(uevent, S_IWUSR, NULL, store_uevent);
+
void class_device_initialize(struct class_device *class_dev)
{
kobj_set_kset_s(class_dev, class_obj_subsys);
@@ -603,32 +606,15 @@ int class_device_add(struct class_device *class_dev)
&parent_class->subsys.kobj, "subsystem");
if (error)
goto out3;
- class_dev->uevent_attr.attr.name = "uevent";
- class_dev->uevent_attr.attr.mode = S_IWUSR;
- class_dev->uevent_attr.attr.owner = parent_class->owner;
- class_dev->uevent_attr.store = store_uevent;
- error = class_device_create_file(class_dev, &class_dev->uevent_attr);
+
+ error = class_device_create_file(class_dev, &class_uevent_attr);
if (error)
goto out3;
if (MAJOR(class_dev->devt)) {
- struct class_device_attribute *attr;
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr) {
- error = -ENOMEM;
- goto out4;
- }
- attr->attr.name = "dev";
- attr->attr.mode = S_IRUGO;
- attr->attr.owner = parent_class->owner;
- attr->show = show_dev;
- error = class_device_create_file(class_dev, attr);
- if (error) {
- kfree(attr);
+ error = class_device_create_file(class_dev, &class_devt_attr);
+ if (error)
goto out4;
- }
-
- class_dev->devt_attr = attr;
}
error = class_device_add_attrs(class_dev);
@@ -671,10 +657,10 @@ int class_device_add(struct class_device *class_dev)
out6:
class_device_remove_attrs(class_dev);
out5:
- if (class_dev->devt_attr)
- class_device_remove_file(class_dev, class_dev->devt_attr);
+ if (MAJOR(class_dev->devt))
+ class_device_remove_file(class_dev, &class_devt_attr);
out4:
- class_device_remove_file(class_dev, &class_dev->uevent_attr);
+ class_device_remove_file(class_dev, &class_uevent_attr);
out3:
kobject_del(&class_dev->kobj);
out2:
@@ -774,9 +760,9 @@ void class_device_del(struct class_device *class_dev)
sysfs_remove_link(&class_dev->kobj, "device");
}
sysfs_remove_link(&class_dev->kobj, "subsystem");
- class_device_remove_file(class_dev, &class_dev->uevent_attr);
- if (class_dev->devt_attr)
- class_device_remove_file(class_dev, class_dev->devt_attr);
+ class_device_remove_file(class_dev, &class_uevent_attr);
+ if (MAJOR(class_dev->devt))
+ class_device_remove_file(class_dev, &class_devt_attr);
class_device_remove_attrs(class_dev);
class_device_remove_groups(class_dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index dd40d78a023d..3599ab2506d2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -24,6 +24,8 @@
#include "base.h"
#include "power/power.h"
+extern const char *kobject_actions[];
+
int (*platform_notify)(struct device * dev) = NULL;
int (*platform_notify_remove)(struct device * dev) = NULL;
@@ -303,13 +305,31 @@ out:
static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (memcmp(buf, "add", 3) != 0)
- dev_err(dev, "uevent: unsupported action-string; this will "
- "be ignored in a future kernel version");
+ size_t len = count;
+ enum kobject_action action;
+
+ if (len && buf[len-1] == '\n')
+ len--;
+
+ for (action = 0; action < KOBJ_MAX; action++) {
+ if (strncmp(kobject_actions[action], buf, len) != 0)
+ continue;
+ if (kobject_actions[action][len] != '\0')
+ continue;
+ kobject_uevent(&dev->kobj, action);
+ goto out;
+ }
+
+ dev_err(dev, "uevent: unsupported action-string; this will "
+ "be ignored in a future kernel version\n");
kobject_uevent(&dev->kobj, KOBJ_ADD);
+out:
return count;
}
+static struct device_attribute uevent_attr =
+ __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent);
+
static int device_add_attributes(struct device *dev,
struct device_attribute *attrs)
{
@@ -423,6 +443,9 @@ static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
return print_dev_t(buf, dev->devt);
}
+static struct device_attribute devt_attr =
+ __ATTR(dev, S_IRUGO, show_dev, NULL);
+
/*
* devices_subsys - structure to be registered with kobject core.
*/
@@ -637,6 +660,82 @@ static int setup_parent(struct device *dev, struct device *parent)
return 0;
}
+static int device_add_class_symlinks(struct device *dev)
+{
+ int error;
+
+ if (!dev->class)
+ return 0;
+ error = sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj,
+ "subsystem");
+ if (error)
+ goto out;
+ /*
+ * If this is not a "fake" compatible device, then create the
+ * symlink from the class to the device.
+ */
+ if (dev->kobj.parent != &dev->class->subsys.kobj) {
+ error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
+ dev->bus_id);
+ if (error)
+ goto out_subsys;
+ }
+ /* only bus-device parents get a "device"-link */
+ if (dev->parent && dev->parent->bus) {
+ error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
+ "device");
+ if (error)
+ goto out_busid;
+#ifdef CONFIG_SYSFS_DEPRECATED
+ {
+ char * class_name = make_class_name(dev->class->name,
+ &dev->kobj);
+ if (class_name)
+ error = sysfs_create_link(&dev->parent->kobj,
+ &dev->kobj, class_name);
+ kfree(class_name);
+ if (error)
+ goto out_device;
+ }
+#endif
+ }
+ return 0;
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+out_device:
+ if (dev->parent)
+ sysfs_remove_link(&dev->kobj, "device");
+#endif
+out_busid:
+ if (dev->kobj.parent != &dev->class->subsys.kobj)
+ sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
+out_subsys:
+ sysfs_remove_link(&dev->kobj, "subsystem");
+out:
+ return error;
+}
+
+static void device_remove_class_symlinks(struct device *dev)
+{
+ if (!dev->class)
+ return;
+ if (dev->parent) {
+#ifdef CONFIG_SYSFS_DEPRECATED
+ char *class_name;
+
+ class_name = make_class_name(dev->class->name, &dev->kobj);
+ if (class_name) {
+ sysfs_remove_link(&dev->parent->kobj, class_name);
+ kfree(class_name);
+ }
+#endif
+ sysfs_remove_link(&dev->kobj, "device");
+ }
+ if (dev->kobj.parent != &dev->class->subsys.kobj)
+ sysfs_remove_link(&dev->class->subsys.kobj, dev->bus_id);
+ sysfs_remove_link(&dev->kobj, "subsystem");
+}
+
/**
* device_add - add device to device hierarchy.
* @dev: device.
@@ -651,7 +750,6 @@ static int setup_parent(struct device *dev, struct device *parent)
int device_add(struct device *dev)
{
struct device *parent = NULL;
- char *class_name = NULL;
struct class_interface *class_intf;
int error = -EINVAL;
@@ -681,63 +779,27 @@ int device_add(struct device *dev)
blocking_notifier_call_chain(&dev->bus->bus_notifier,
BUS_NOTIFY_ADD_DEVICE, dev);
- dev->uevent_attr.attr.name = "uevent";
- dev->uevent_attr.attr.mode = S_IRUGO | S_IWUSR;
- if (dev->driver)
- dev->uevent_attr.attr.owner = dev->driver->owner;
- dev->uevent_attr.store = store_uevent;
- dev->uevent_attr.show = show_uevent;
- error = device_create_file(dev, &dev->uevent_attr);
+ error = device_create_file(dev, &uevent_attr);
if (error)
goto attrError;
if (MAJOR(dev->devt)) {
- struct device_attribute *attr;
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr) {
- error = -ENOMEM;
- goto ueventattrError;
- }
- attr->attr.name = "dev";
- attr->attr.mode = S_IRUGO;
- if (dev->driver)
- attr->attr.owner = dev->driver->owner;
- attr->show = show_dev;
- error = device_create_file(dev, attr);
- if (error) {
- kfree(attr);
+ error = device_create_file(dev, &devt_attr);
+ if (error)
goto ueventattrError;
- }
-
- dev->devt_attr = attr;
}
- if (dev->class) {
- sysfs_create_link(&dev->kobj, &dev->class->subsys.kobj,
- "subsystem");
- /* If this is not a "fake" compatible device, then create the
- * symlink from the class to the device. */
- if (dev->kobj.parent != &dev->class->subsys.kobj)
- sysfs_create_link(&dev->class->subsys.kobj,
- &dev->kobj, dev->bus_id);
- if (parent) {
- sysfs_create_link(&dev->kobj, &dev->parent->kobj,
- "device");
-#ifdef CONFIG_SYSFS_DEPRECATED
- class_name = make_class_name(dev->class->name,
- &dev->kobj);
- if (class_name)
- sysfs_create_link(&dev->parent->kobj,
- &dev->kobj, class_name);
-#endif
- }
- }
-
- if ((error = device_add_attrs(dev)))
+ error = device_add_class_symlinks(dev);
+ if (error)
+ goto SymlinkError;
+ error = device_add_attrs(dev);
+ if (error)
goto AttrsError;
- if ((error = device_pm_add(dev)))
+ error = device_pm_add(dev);
+ if (error)
goto PMError;
- if ((error = bus_add_device(dev)))
+ error = bus_add_device(dev);
+ if (error)
goto BusError;
kobject_uevent(&dev->kobj, KOBJ_ADD);
bus_attach_device(dev);
@@ -756,7 +818,6 @@ int device_add(struct device *dev)
up(&dev->class->sem);
}
Done:
- kfree(class_name);
put_device(dev);
return error;
BusError:
@@ -767,10 +828,10 @@ int device_add(struct device *dev)
BUS_NOTIFY_DEL_DEVICE, dev);
device_remove_attrs(dev);
AttrsError:
- if (dev->devt_attr) {
- device_remove_file(dev, dev->devt_attr);
- kfree(dev->devt_attr);
- }
+ device_remove_class_symlinks(dev);
+ SymlinkError:
+ if (MAJOR(dev->devt))
+ device_remove_file(dev, &devt_attr);
if (dev->class) {
sysfs_remove_link(&dev->kobj, "subsystem");
@@ -792,7 +853,7 @@ int device_add(struct device *dev)
}
}
ueventattrError:
- device_remove_file(dev, &dev->uevent_attr);
+ device_remove_file(dev, &uevent_attr);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
kobject_del(&dev->kobj);
@@ -869,10 +930,8 @@ void device_del(struct device * dev)
if (parent)
klist_del(&dev->knode_parent);
- if (dev->devt_attr) {
- device_remove_file(dev, dev->devt_attr);
- kfree(dev->devt_attr);
- }
+ if (MAJOR(dev->devt))
+ device_remove_file(dev, &devt_attr);
if (dev->class) {
sysfs_remove_link(&dev->kobj, "subsystem");
/* If this is not a "fake" compatible device, remove the
@@ -926,7 +985,7 @@ void device_del(struct device * dev)
up(&dev->class->sem);
}
}
- device_remove_file(dev, &dev->uevent_attr);
+ device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
bus_remove_device(dev);
@@ -1155,7 +1214,7 @@ int device_rename(struct device *dev, char *new_name)
{
char *old_class_name = NULL;
char *new_class_name = NULL;
- char *old_symlink_name = NULL;
+ char *old_device_name = NULL;
int error;
dev = get_device(dev);
@@ -1169,42 +1228,49 @@ int device_rename(struct device *dev, char *new_name)
old_class_name = make_class_name(dev->class->name, &dev->kobj);
#endif
- if (dev->class) {
- old_symlink_name = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
- if (!old_symlink_name) {
- error = -ENOMEM;
- goto out_free_old_class;
- }
- strlcpy(old_symlink_name, dev->bus_id, BUS_ID_SIZE);
+ old_device_name = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
+ if (!old_device_name) {
+ error = -ENOMEM;
+ goto out;
}
-
+ strlcpy(old_device_name, dev->bus_id, BUS_ID_SIZE);
strlcpy(dev->bus_id, new_name, BUS_ID_SIZE);
error = kobject_rename(&dev->kobj, new_name);
+ if (error) {
+ strlcpy(dev->bus_id, old_device_name, BUS_ID_SIZE);
+ goto out;
+ }
#ifdef CONFIG_SYSFS_DEPRECATED
if (old_class_name) {
new_class_name = make_class_name(dev->class->name, &dev->kobj);
if (new_class_name) {
- sysfs_create_link(&dev->parent->kobj, &dev->kobj,
- new_class_name);
+ error = sysfs_create_link(&dev->parent->kobj,
+ &dev->kobj, new_class_name);
+ if (error)
+ goto out;
sysfs_remove_link(&dev->parent->kobj, old_class_name);
}
}
#endif
if (dev->class) {
- sysfs_remove_link(&dev->class->subsys.kobj,
- old_symlink_name);
- sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
- dev->bus_id);
+ sysfs_remove_link(&dev->class->subsys.kobj, old_device_name);
+ error = sysfs_create_link(&dev->class->subsys.kobj, &dev->kobj,
+ dev->bus_id);
+ if (error) {
+ /* Uh... how to unravel this if restoring can fail? */
+ dev_err(dev, "%s: sysfs_create_symlink failed (%d)\n",
+ __FUNCTION__, error);
+ }
}
+out:
put_device(dev);
kfree(new_class_name);
- kfree(old_symlink_name);
- out_free_old_class:
kfree(old_class_name);
+ kfree(old_device_name);
return error;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b0088b0efecd..7ac474db88c5 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -281,24 +281,16 @@ int driver_attach(struct device_driver * drv)
return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
}
-/**
- * device_release_driver - manually detach device from driver.
- * @dev: device.
- *
- * Manually detach device from driver.
- *
+/*
* __device_release_driver() must be called with @dev->sem held.
- * When called for a USB interface, @dev->parent->sem must be held
- * as well.
+ * When called for a USB interface, @dev->parent->sem must be held as well.
*/
-
static void __device_release_driver(struct device * dev)
{
struct device_driver * drv;
- drv = dev->driver;
+ drv = get_driver(dev->driver);
if (drv) {
- get_driver(drv);
driver_sysfs_remove(dev);
sysfs_remove_link(&dev->kobj, "driver");
klist_remove(&dev->knode_driver);
@@ -318,6 +310,13 @@ static void __device_release_driver(struct device * dev)
}
}
+/**
+ * device_release_driver - manually detach device from driver.
+ * @dev: device.
+ *
+ * Manually detach device from driver.
+ * When called for a USB interface, @dev->parent->sem must be held.
+ */
void device_release_driver(struct device * dev)
{
/*
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e1c0730a3b99..e8beb8e5b626 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -10,6 +10,8 @@
#include <linux/device.h>
#include <linux/module.h>
+#include "base.h"
+
struct devres_node {
struct list_head entry;
dr_release_t release;
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index 91970e9bb05e..7647abfe1890 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -127,7 +127,7 @@ dma_pool_create (const char *name, struct device *dev,
} else if (allocation < size)
return NULL;
- if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
+ if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
return retval;
strlcpy (retval->name, name, sizeof retval->name);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 89a5f4a54913..53f0ee6f3016 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -175,7 +175,7 @@ static ssize_t firmware_loading_store(struct device *dev,
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
static ssize_t
-firmware_data_read(struct kobject *kobj,
+firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
@@ -240,7 +240,7 @@ fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
* the driver as a firmware image.
**/
static ssize_t
-firmware_data_write(struct kobject *kobj,
+firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = to_dev(kobj);
@@ -271,7 +271,7 @@ out:
}
static struct bin_attribute firmware_attr_data_tmpl = {
- .attr = {.name = "data", .mode = 0644, .owner = THIS_MODULE},
+ .attr = {.name = "data", .mode = 0644},
.size = 0,
.read = firmware_data_read,
.write = firmware_data_write,
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 91f230939c1e..966a5e287415 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,10 +1,10 @@
obj-y := shutdown.o
-obj-$(CONFIG_PM) += main.o suspend.o resume.o runtime.o sysfs.o
+obj-$(CONFIG_PM) += main.o suspend.o resume.o sysfs.o
obj-$(CONFIG_PM_TRACE) += trace.o
ifeq ($(CONFIG_DEBUG_DRIVER),y)
EXTRA_CFLAGS += -DDEBUG
endif
-ifeq ($(CONFIG_PM_DEBUG),y)
+ifeq ($(CONFIG_PM_VERBOSE),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 05dc8764e765..eb9f38d0aa58 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -20,64 +20,44 @@
*/
#include <linux/device.h>
+#include <linux/mutex.h>
+
#include "power.h"
LIST_HEAD(dpm_active);
LIST_HEAD(dpm_off);
LIST_HEAD(dpm_off_irq);
-DECLARE_MUTEX(dpm_sem);
-DECLARE_MUTEX(dpm_list_sem);
+DEFINE_MUTEX(dpm_mtx);
+DEFINE_MUTEX(dpm_list_mtx);
int (*platform_enable_wakeup)(struct device *dev, int is_on);
-
-/**
- * device_pm_set_parent - Specify power dependency.
- * @dev: Device who needs power.
- * @parent: Device that supplies power.
- *
- * This function is used to manually describe a power-dependency
- * relationship. It may be used to specify a transversal relationship
- * (where the power supplier is not the physical (or electrical)
- * ancestor of a specific device.
- * The effect of this is that the supplier will not be powered down
- * before the power dependent.
- */
-
-void device_pm_set_parent(struct device * dev, struct device * parent)
-{
- put_device(dev->power.pm_parent);
- dev->power.pm_parent = get_device(parent);
-}
-EXPORT_SYMBOL_GPL(device_pm_set_parent);
-
-int device_pm_add(struct device * dev)
+int device_pm_add(struct device *dev)
{
int error;
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
- down(&dpm_list_sem);
+ mutex_lock(&dpm_list_mtx);
list_add_tail(&dev->power.entry, &dpm_active);
- device_pm_set_parent(dev, dev->parent);
- if ((error = dpm_sysfs_add(dev)))
+ error = dpm_sysfs_add(dev);
+ if (error)
list_del(&dev->power.entry);
- up(&dpm_list_sem);
+ mutex_unlock(&dpm_list_mtx);
return error;
}
-void device_pm_remove(struct device * dev)
+void device_pm_remove(struct device *dev)
{
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
- down(&dpm_list_sem);
+ mutex_lock(&dpm_list_mtx);
dpm_sysfs_remove(dev);
- put_device(dev->power.pm_parent);
list_del_init(&dev->power.entry);
- up(&dpm_list_sem);
+ mutex_unlock(&dpm_list_mtx);
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index fb3d35a9e101..591a0dd5deee 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -14,12 +14,12 @@ extern void device_shutdown(void);
/*
* Used to synchronize global power management operations.
*/
-extern struct semaphore dpm_sem;
+extern struct mutex dpm_mtx;
/*
* Used to serialize changes to the dpm_* lists.
*/
-extern struct semaphore dpm_list_sem;
+extern struct mutex dpm_list_mtx;
/*
* The PM lists.
@@ -62,11 +62,6 @@ extern int resume_device(struct device *);
*/
extern int suspend_device(struct device *, pm_message_t);
-
-/*
- * runtime.c
- */
-
#else /* CONFIG_PM */
diff --git a/drivers/base/power/resume.c b/drivers/base/power/resume.c
index a2c64188d713..00fd84ae6e66 100644
--- a/drivers/base/power/resume.c
+++ b/drivers/base/power/resume.c
@@ -29,14 +29,6 @@ int resume_device(struct device * dev)
down(&dev->sem);
- if (dev->power.pm_parent
- && dev->power.pm_parent->power.power_state.event) {
- dev_err(dev, "PM: resume from %d, parent %s still %d\n",
- dev->power.power_state.event,
- dev->power.pm_parent->bus_id,
- dev->power.pm_parent->power.power_state.event);
- }
-
if (dev->bus && dev->bus->resume) {
dev_dbg(dev,"resuming\n");
error = dev->bus->resume(dev);
@@ -80,7 +72,7 @@ static int resume_device_early(struct device * dev)
*/
void dpm_resume(void)
{
- down(&dpm_list_sem);
+ mutex_lock(&dpm_list_mtx);
while(!list_empty(&dpm_off)) {
struct list_head * entry = dpm_off.next;
struct device * dev = to_device(entry);
@@ -88,13 +80,12 @@ void dpm_resume(void)
get_device(dev);
list_move_tail(entry, &dpm_active);
- up(&dpm_list_sem);
- if (!dev->power.prev_state.event)
- resume_device(dev);
- down(&dpm_list_sem);
+ mutex_unlock(&dpm_list_mtx);
+ resume_device(dev);
+ mutex_lock(&dpm_list_mtx);
put_device(dev);
}
- up(&dpm_list_sem);
+ mutex_unlock(&dpm_list_mtx);
}
@@ -108,9 +99,9 @@ void dpm_resume(void)
void device_resume(void)
{
might_sleep();
- down(&dpm_sem);
+ mutex_lock(&dpm_mtx);
dpm_resume();
- up(&dpm_sem);
+ mutex_unlock(&dpm_mtx);
}
EXPORT_SYMBOL_GPL(device_resume);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
deleted file mode 100644
index 96370ec1d673..000000000000
--- a/drivers/base/power/runtime.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * drivers/base/power/runtime.c - Handling dynamic device power management.
- *
- * Copyright (c) 2003 Patrick Mochel
- * Copyright (c) 2003 Open Source Development Lab
- *
- */
-
-#include <linux/device.h>
-#include "power.h"
-
-
-static void runtime_resume(struct device * dev)
-{
- dev_dbg(dev, "resuming\n");
- if (!dev->power.power_state.event)
- return;
- if (!resume_device(dev))
- dev->power.power_state = PMSG_ON;
-}
-
-
-/**
- * dpm_runtime_resume - Power one device back on.
- * @dev: Device.
- *
- * Bring one device back to the on state by first powering it
- * on, then restoring state. We only operate on devices that aren't
- * already on.
- * FIXME: We need to handle devices that are in an unknown state.
- */
-
-void dpm_runtime_resume(struct device * dev)
-{
- down(&dpm_sem);
- runtime_resume(dev);
- up(&dpm_sem);
-}
-EXPORT_SYMBOL(dpm_runtime_resume);
-
-
-/**
- * dpm_runtime_suspend - Put one device in low-power state.
- * @dev: Device.
- * @state: State to enter.
- */
-
-int dpm_runtime_suspend(struct device * dev, pm_message_t state)
-{
- int error = 0;
-
- down(&dpm_sem);
- if (dev->power.power_state.event == state.event)
- goto Done;
-
- if (dev->power.power_state.event)
- runtime_resume(dev);
-
- if (!(error = suspend_device(dev, state)))
- dev->power.power_state = state;
- Done:
- up(&dpm_sem);
- return error;
-}
-EXPORT_SYMBOL(dpm_runtime_suspend);
-
-
-#if 0
-/**
- * dpm_set_power_state - Update power_state field.
- * @dev: Device.
- * @state: Power state device is in.
- *
- * This is an update mechanism for drivers to notify the core
- * what power state a device is in. Device probing code may not
- * always be able to tell, but we need accurate information to
- * work reliably.
- */
-void dpm_set_power_state(struct device * dev, pm_message_t state)
-{
- down(&dpm_sem);
- dev->power.power_state = state;
- up(&dpm_sem);
-}
-#endif /* 0 */
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index 42d2b86ba765..26df9b231737 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -40,6 +40,14 @@ static inline char *suspend_verb(u32 event)
}
+static void
+suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
+{
+ dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
+ ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
+ ", may wakeup" : "");
+}
+
/**
* suspend_device - Save state of one device.
* @dev: Device.
@@ -55,49 +63,21 @@ int suspend_device(struct device * dev, pm_message_t state)
dev_dbg(dev, "PM: suspend %d-->%d\n",
dev->power.power_state.event, state.event);
}
- if (dev->power.pm_parent
- && dev->power.pm_parent->power.power_state.event) {
- dev_err(dev,
- "PM: suspend %d->%d, parent %s already %d\n",
- dev->power.power_state.event, state.event,
- dev->power.pm_parent->bus_id,
- dev->power.pm_parent->power.power_state.event);
- }
-
- dev->power.prev_state = dev->power.power_state;
- if (dev->class && dev->class->suspend && !dev->power.power_state.event) {
- dev_dbg(dev, "class %s%s\n",
- suspend_verb(state.event),
- ((state.event == PM_EVENT_SUSPEND)
- && device_may_wakeup(dev))
- ? ", may wakeup"
- : ""
- );
+ if (dev->class && dev->class->suspend) {
+ suspend_device_dbg(dev, state, "class ");
error = dev->class->suspend(dev, state);
suspend_report_result(dev->class->suspend, error);
}
- if (!error && dev->type && dev->type->suspend && !dev->power.power_state.event) {
- dev_dbg(dev, "%s%s\n",
- suspend_verb(state.event),
- ((state.event == PM_EVENT_SUSPEND)
- && device_may_wakeup(dev))
- ? ", may wakeup"
- : ""
- );
+ if (!error && dev->type && dev->type->suspend) {
+ suspend_device_dbg(dev, state, "type ");
error = dev->type->suspend(dev, state);
suspend_report_result(dev->type->suspend, error);
}
- if (!error && dev->bus && dev->bus->suspend && !dev->power.power_state.event) {
- dev_dbg(dev, "%s%s\n",
- suspend_verb(state.event),
- ((state.event == PM_EVENT_SUSPEND)
- && device_may_wakeup(dev))
- ? ", may wakeup"
- : ""
- );
+ if (!error && dev->bus && dev->bus->suspend) {
+ suspend_device_dbg(dev, state, "");
error = dev->bus->suspend(dev, state);
suspend_report_result(dev->bus->suspend, error);
}
@@ -108,21 +88,15 @@ int suspend_device(struct device * dev, pm_message_t state)
/*
* This is called with interrupts off, only a single CPU
- * running. We can't do down() on a semaphore (and we don't
+ * running. We can't acquire a mutex or semaphore (and we don't
* need the protection)
*/
static int suspend_device_late(struct device *dev, pm_message_t state)
{
int error = 0;
- if (dev->bus && dev->bus->suspend_late && !dev->power.power_state.event) {
- dev_dbg(dev, "LATE %s%s\n",
- suspend_verb(state.event),
- ((state.event == PM_EVENT_SUSPEND)
- && device_may_wakeup(dev))
- ? ", may wakeup"
- : ""
- );
+ if (dev->bus && dev->bus->suspend_late) {
+ suspend_device_dbg(dev, state, "LATE ");
error = dev->bus->suspend_late(dev, state);
suspend_report_result(dev->bus->suspend_late, error);
}
@@ -153,18 +127,18 @@ int device_suspend(pm_message_t state)
int error = 0;
might_sleep();
- down(&dpm_sem);
- down(&dpm_list_sem);
+ mutex_lock(&dpm_mtx);
+ mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_active) && error == 0) {
struct list_head * entry = dpm_active.prev;
struct device * dev = to_device(entry);
get_device(dev);
- up(&dpm_list_sem);
+ mutex_unlock(&dpm_list_mtx);
error = suspend_device(dev, state);
- down(&dpm_list_sem);
+ mutex_lock(&dpm_list_mtx);
/* Check if the device got removed */
if (!list_empty(&dev->power.entry)) {
@@ -179,11 +153,11 @@ int device_suspend(pm_message_t state)
error == -EAGAIN ? " (please convert to suspend_late)" : "");
put_device(dev);
}
- up(&dpm_list_sem);
+ mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume();
- up(&dpm_sem);
+ mutex_unlock(&dpm_mtx);
return error;
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 2d47517dbe32..f2ed179cd695 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -7,69 +7,6 @@
#include "power.h"
-#ifdef CONFIG_PM_SYSFS_DEPRECATED
-
-/**
- * state - Control current power state of device
- *
- * show() returns the current power state of the device. '0' indicates
- * the device is on. Other values (2) indicate the device is in some low
- * power state.
- *
- * store() sets the current power state, which is an integer valued
- * 0, 2, or 3. Devices with bus.suspend_late(), or bus.resume_early()
- * methods fail this operation; those methods couldn't be called.
- * Otherwise,
- *
- * - If the recorded dev->power.power_state.event matches the
- * target value, nothing is done.
- * - If the recorded event code is nonzero, the device is reactivated
- * by calling bus.resume() and/or class.resume().
- * - If the target value is nonzero, the device is suspended by
- * calling class.suspend() and/or bus.suspend() with event code
- * PM_EVENT_SUSPEND.
- *
- * This mechanism is DEPRECATED and should only be used for testing.
- */
-
-static ssize_t state_show(struct device * dev, struct device_attribute *attr, char * buf)
-{
- if (dev->power.power_state.event)
- return sprintf(buf, "2\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t state_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n)
-{
- pm_message_t state;
- int error = -EINVAL;
-
- /* disallow incomplete suspend sequences */
- if (dev->bus && (dev->bus->suspend_late || dev->bus->resume_early))
- return error;
-
- state.event = PM_EVENT_SUSPEND;
- /* Older apps expected to write "3" here - confused with PCI D3 */
- if ((n == 1) && !strcmp(buf, "3"))
- error = dpm_runtime_suspend(dev, state);
-
- if ((n == 1) && !strcmp(buf, "2"))
- error = dpm_runtime_suspend(dev, state);
-
- if ((n == 1) && !strcmp(buf, "0")) {
- dpm_runtime_resume(dev);
- error = 0;
- }
-
- return error ? error : n;
-}
-
-static DEVICE_ATTR(state, 0644, state_show, state_store);
-
-
-#endif /* CONFIG_PM_SYSFS_DEPRECATED */
-
/*
* wakeup - Report/change current wakeup option for device
*
@@ -143,9 +80,6 @@ static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
static struct attribute * power_attrs[] = {
-#ifdef CONFIG_PM_SYSFS_DEPRECATED
- &dev_attr_state.attr,
-#endif
&dev_attr_wakeup.attr,
NULL,
};
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a9ab30fefffc..2b0c601e422e 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -142,6 +142,7 @@ void set_trace_device(struct device *dev)
{
dev_hash_value = hash_string(DEVSEED, dev->bus_id, DEVHASH);
}
+EXPORT_SYMBOL(set_trace_device);
/*
* We could just take the "tracedata" index into the .tracedata
@@ -162,6 +163,7 @@ void generate_resume_trace(void *tracedata, unsigned int user)
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
}
+EXPORT_SYMBOL(generate_resume_trace);
extern char __tracedata_start, __tracedata_end;
static int show_file_hash(unsigned int value)
@@ -170,7 +172,8 @@ static int show_file_hash(unsigned int value)
char *tracedata;
match = 0;
- for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ; tracedata += 6) {
+ for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
+ tracedata += 2 + sizeof(unsigned long)) {
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
unsigned int hash = hash_string(lineno, file, FILEHASH);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 29f1291966c1..18febe26caa1 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -21,7 +21,7 @@
#include <linux/string.h>
#include <linux/pm.h>
#include <linux/device.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "base.h"
@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(sysdev_class_unregister);
static LIST_HEAD(sysdev_drivers);
-static DECLARE_MUTEX(sysdev_drivers_lock);
+static DEFINE_MUTEX(sysdev_drivers_lock);
/**
* sysdev_driver_register - Register auxillary driver
@@ -172,7 +172,7 @@ static DECLARE_MUTEX(sysdev_drivers_lock);
int sysdev_driver_register(struct sysdev_class * cls,
struct sysdev_driver * drv)
{
- down(&sysdev_drivers_lock);
+ mutex_lock(&sysdev_drivers_lock);
if (cls && kset_get(&cls->kset)) {
list_add_tail(&drv->entry, &cls->drivers);
@@ -184,7 +184,7 @@ int sysdev_driver_register(struct sysdev_class * cls,
}
} else
list_add_tail(&drv->entry, &sysdev_drivers);
- up(&sysdev_drivers_lock);
+ mutex_unlock(&sysdev_drivers_lock);
return 0;
}
@@ -197,7 +197,7 @@ int sysdev_driver_register(struct sysdev_class * cls,
void sysdev_driver_unregister(struct sysdev_class * cls,
struct sysdev_driver * drv)
{
- down(&sysdev_drivers_lock);
+ mutex_lock(&sysdev_drivers_lock);
list_del_init(&drv->entry);
if (cls) {
if (drv->remove) {
@@ -207,7 +207,7 @@ void sysdev_driver_unregister(struct sysdev_class * cls,
}
kset_put(&cls->kset);
}
- up(&sysdev_drivers_lock);
+ mutex_unlock(&sysdev_drivers_lock);
}
EXPORT_SYMBOL_GPL(sysdev_driver_register);
@@ -246,7 +246,7 @@ int sysdev_register(struct sys_device * sysdev)
if (!error) {
struct sysdev_driver * drv;
- down(&sysdev_drivers_lock);
+ mutex_lock(&sysdev_drivers_lock);
/* Generic notification is implicit, because it's that
* code that should have called us.
*/
@@ -262,7 +262,7 @@ int sysdev_register(struct sys_device * sysdev)
if (drv->add)
drv->add(sysdev);
}
- up(&sysdev_drivers_lock);
+ mutex_unlock(&sysdev_drivers_lock);
}
return error;
}
@@ -271,7 +271,7 @@ void sysdev_unregister(struct sys_device * sysdev)
{
struct sysdev_driver * drv;
- down(&sysdev_drivers_lock);
+ mutex_lock(&sysdev_drivers_lock);
list_for_each_entry(drv, &sysdev_drivers, entry) {
if (drv->remove)
drv->remove(sysdev);
@@ -281,7 +281,7 @@ void sysdev_unregister(struct sys_device * sysdev)
if (drv->remove)
drv->remove(sysdev);
}
- up(&sysdev_drivers_lock);
+ mutex_unlock(&sysdev_drivers_lock);
kobject_unregister(&sysdev->kobj);
}
@@ -308,7 +308,7 @@ void sysdev_shutdown(void)
pr_debug("Shutting Down System Devices\n");
- down(&sysdev_drivers_lock);
+ mutex_lock(&sysdev_drivers_lock);
list_for_each_entry_reverse(cls, &system_subsys.list,
kset.kobj.entry) {
struct sys_device * sysdev;
@@ -337,7 +337,7 @@ void sysdev_shutdown(void)
cls->shutdown(sysdev);
}
}
- up(&sysdev_drivers_lock);
+ mutex_unlock(&sysdev_drivers_lock);
}
static void __sysdev_resume(struct sys_device *dev)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 6e23af1ecbdb..a4a311992408 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -59,17 +59,6 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
-config ATARI_SLM
- tristate "Atari SLM laser printer support"
- depends on ATARI
- help
- If you have an Atari SLM laser printer, say Y to include support for
- it in the kernel. Otherwise, say N. This driver is also available as
- a module ( = code which can be inserted in and removed from the
- running kernel whenever you want). The module will be called
- acsi_slm. Be warned: the driver needs much ST-RAM and can cause
- problems due to that fact!
-
config BLK_DEV_XD
tristate "XT hard disk support"
depends on ISA && ISA_DMA_API
@@ -113,7 +102,7 @@ source "drivers/block/paride/Kconfig"
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
help
This is the driver for Compaq Smart Array controllers. Everyone
using these boards should say Y here. See the file
@@ -423,6 +412,28 @@ config ATA_OVER_ETH
This driver provides Support for ATA over Ethernet block
devices like the Coraid EtherDrive (R) Storage Blade.
+config SUNVDC
+ tristate "Sun Virtual Disk Client support"
+ depends on SUN_LDOMS
+ help
+ Support for virtual disk devices as a client under Sun
+ Logical Domains.
+
source "drivers/s390/block/Kconfig"
+config XILINX_SYSACE
+ tristate "Xilinx SystemACE support"
+ depends on 4xx
+ help
+ Include support for the Xilinx SystemACE CompactFlash interface
+
+config XEN_BLKDEV_FRONTEND
+ tristate "Xen virtual block device support"
+ depends on XEN
+ default y
+ help
+ This driver implements the front-end of the Xen virtual
+ block device driver. It communicates with a back-end driver
+ in another domain which drives the actual block device.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index e5f98acc5d52..a7a099027fca 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -8,8 +8,8 @@
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
+obj-$(CONFIG_PS3_DISK) += ps3disk.o
obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
-obj-$(CONFIG_ATARI_SLM) += acsi_slm.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += rd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
@@ -18,7 +18,9 @@ obj-$(CONFIG_BLK_DEV_XD) += xd.o
obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
+obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
+obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
@@ -28,3 +30,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_BLK_DEV_UB) += ub.o
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
+obj-$(CONFIG_LGUEST_GUEST) += lguest_blk.o
diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c
deleted file mode 100644
index 1d9d9b4f48cc..000000000000
--- a/drivers/block/acsi_slm.c
+++ /dev/null
@@ -1,1032 +0,0 @@
-/*
- * acsi_slm.c -- Device driver for the Atari SLM laser printer
- *
- * Copyright 1995 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-/*
-
-Notes:
-
-The major number for SLM printers is 28 (like ACSI), but as a character
-device, not block device. The minor number is the number of the printer (if
-you have more than one SLM; currently max. 2 (#define-constant) SLMs are
-supported). The device can be opened for reading and writing. If reading it,
-you get some status infos (MODE SENSE data). Writing mode is used for the data
-to be printed. Some ioctls allow to get the printer status and to tune printer
-modes and some internal variables.
-
-A special problem of the SLM driver is the timing and thus the buffering of
-the print data. The problem is that all the data for one page must be present
-in memory when printing starts, else --when swapping occurs-- the timing could
-not be guaranteed. There are several ways to assure this:
-
- 1) Reserve a buffer of 1196k (maximum page size) statically by
- atari_stram_alloc(). The data are collected there until they're complete,
- and then printing starts. Since the buffer is reserved, no further
- considerations about memory and swapping are needed. So this is the
- simplest method, but it needs a lot of memory for just the SLM.
-
- An striking advantage of this method is (supposed the SLM_CONT_CNT_REPROG
- method works, see there), that there are no timing problems with the DMA
- anymore.
-
- 2) The other method would be to reserve the buffer dynamically each time
- printing is required. I could think of looking at mem_map where the
- largest unallocted ST-RAM area is, taking the area, and then extending it
- by swapping out the neighbored pages, until the needed size is reached.
- This requires some mm hacking, but seems possible. The only obstacle could
- be pages that cannot be swapped out (reserved pages)...
-
- 3) Another possibility would be to leave the real data in user space and to
- work with two dribble buffers of about 32k in the driver: While the one
- buffer is DMAed to the SLM, the other can be filled with new data. But
- to keep the timing, that requires that the user data remain in memory and
- are not swapped out. Requires mm hacking, too, but maybe not so bad as
- method 2).
-
-*/
-
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/fs.h>
-#include <linux/major.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_acsi.h>
-#include <asm/atari_stdma.h>
-#include <asm/atari_stram.h>
-#include <asm/atari_SLM.h>
-
-
-#undef DEBUG
-
-/* Define this if the page data are continuous in physical memory. That
- * requires less reprogramming of the ST-DMA */
-#define SLM_CONTINUOUS_DMA
-
-/* Use continuous reprogramming of the ST-DMA counter register. This is
- * --strictly speaking-- not allowed, Atari recommends not to look at the
- * counter register while a DMA is going on. But I don't know if that applies
- * only for reading the register, or also writing to it. Writing only works
- * fine for me... The advantage is that the timing becomes absolutely
- * uncritical: Just update each, say 200ms, the counter reg to its maximum,
- * and the DMA will work until the status byte interrupt occurs.
- */
-#define SLM_CONT_CNT_REPROG
-
-#define CMDSET_TARG_LUN(cmd,targ,lun) \
- do { \
- cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5; \
- cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5; \
- } while(0)
-
-#define START_TIMER(to) mod_timer(&slm_timer, jiffies + (to))
-#define STOP_TIMER() del_timer(&slm_timer)
-
-
-static char slmreqsense_cmd[6] = { 0x03, 0, 0, 0, 0, 0 };
-static char slmprint_cmd[6] = { 0x0a, 0, 0, 0, 0, 0 };
-static char slminquiry_cmd[6] = { 0x12, 0, 0, 0, 0, 0x80 };
-static char slmmsense_cmd[6] = { 0x1a, 0, 0, 0, 255, 0 };
-#if 0
-static char slmmselect_cmd[6] = { 0x15, 0, 0, 0, 0, 0 };
-#endif
-
-
-#define MAX_SLM 2
-
-static struct slm {
- unsigned target; /* target number */
- unsigned lun; /* LUN in target controller */
- atomic_t wr_ok; /* set to 0 if output part busy */
- atomic_t rd_ok; /* set to 0 if status part busy */
-} slm_info[MAX_SLM];
-
-int N_SLM_Printers = 0;
-
-/* printer buffer */
-static unsigned char *SLMBuffer; /* start of buffer */
-static unsigned char *BufferP; /* current position in buffer */
-static int BufferSize; /* length of buffer for page size */
-
-typedef enum { IDLE, FILLING, PRINTING } SLMSTATE;
-static SLMSTATE SLMState;
-static int SLMBufOwner; /* SLM# currently using the buffer */
-
-/* DMA variables */
-#ifndef SLM_CONT_CNT_REPROG
-static unsigned long SLMCurAddr; /* current base addr of DMA chunk */
-static unsigned long SLMEndAddr; /* expected end addr */
-static unsigned long SLMSliceSize; /* size of one DMA chunk */
-#endif
-static int SLMError;
-
-/* wait queues */
-static DECLARE_WAIT_QUEUE_HEAD(slm_wait); /* waiting for buffer */
-static DECLARE_WAIT_QUEUE_HEAD(print_wait); /* waiting for printing finished */
-
-/* status codes */
-#define SLMSTAT_OK 0x00
-#define SLMSTAT_ORNERY 0x02
-#define SLMSTAT_TONER 0x03
-#define SLMSTAT_WARMUP 0x04
-#define SLMSTAT_PAPER 0x05
-#define SLMSTAT_DRUM 0x06
-#define SLMSTAT_INJAM 0x07
-#define SLMSTAT_THRJAM 0x08
-#define SLMSTAT_OUTJAM 0x09
-#define SLMSTAT_COVER 0x0a
-#define SLMSTAT_FUSER 0x0b
-#define SLMSTAT_IMAGER 0x0c
-#define SLMSTAT_MOTOR 0x0d
-#define SLMSTAT_VIDEO 0x0e
-#define SLMSTAT_SYSTO 0x10
-#define SLMSTAT_OPCODE 0x12
-#define SLMSTAT_DEVNUM 0x15
-#define SLMSTAT_PARAM 0x1a
-#define SLMSTAT_ACSITO 0x1b /* driver defined */
-#define SLMSTAT_NOTALL 0x1c /* driver defined */
-
-static char *SLMErrors[] = {
- /* 0x00 */ "OK and ready",
- /* 0x01 */ NULL,
- /* 0x02 */ "ornery printer",
- /* 0x03 */ "toner empty",
- /* 0x04 */ "warming up",
- /* 0x05 */ "paper empty",
- /* 0x06 */ "drum empty",
- /* 0x07 */ "input jam",
- /* 0x08 */ "through jam",
- /* 0x09 */ "output jam",
- /* 0x0a */ "cover open",
- /* 0x0b */ "fuser malfunction",
- /* 0x0c */ "imager malfunction",
- /* 0x0d */ "motor malfunction",
- /* 0x0e */ "video malfunction",
- /* 0x0f */ NULL,
- /* 0x10 */ "printer system timeout",
- /* 0x11 */ NULL,
- /* 0x12 */ "invalid operation code",
- /* 0x13 */ NULL,
- /* 0x14 */ NULL,
- /* 0x15 */ "invalid device number",
- /* 0x16 */ NULL,
- /* 0x17 */ NULL,
- /* 0x18 */ NULL,
- /* 0x19 */ NULL,
- /* 0x1a */ "invalid parameter list",
- /* 0x1b */ "ACSI timeout",
- /* 0x1c */ "not all printed"
-};
-
-#define N_ERRORS (sizeof(SLMErrors)/sizeof(*SLMErrors))
-
-/* real (driver caused) error? */
-#define IS_REAL_ERROR(x) (x > 0x10)
-
-
-static struct {
- char *name;
- int w, h;
-} StdPageSize[] = {
- { "Letter", 2400, 3180 },
- { "Legal", 2400, 4080 },
- { "A4", 2336, 3386 },
- { "B5", 2016, 2914 }
-};
-
-#define N_STD_SIZES (sizeof(StdPageSize)/sizeof(*StdPageSize))
-
-#define SLM_BUFFER_SIZE (2336*3386/8) /* A4 for now */
-#define SLM_DMA_AMOUNT 255 /* #sectors to program the DMA for */
-
-#ifdef SLM_CONTINUOUS_DMA
-# define SLM_DMA_INT_OFFSET 0 /* DMA goes until seccnt 0, no offs */
-# define SLM_DMA_END_OFFSET 32 /* 32 Byte ST-DMA FIFO */
-# define SLM_SLICE_SIZE(w) (255*512)
-#else
-# define SLM_DMA_INT_OFFSET 32 /* 32 Byte ST-DMA FIFO */
-# define SLM_DMA_END_OFFSET 32 /* 32 Byte ST-DMA FIFO */
-# define SLM_SLICE_SIZE(w) ((254*512)/(w/8)*(w/8))
-#endif
-
-/* calculate the number of jiffies to wait for 'n' bytes */
-#ifdef SLM_CONT_CNT_REPROG
-#define DMA_TIME_FOR(n) 50
-#define DMA_STARTUP_TIME 0
-#else
-#define DMA_TIME_FOR(n) (n/1400-1)
-#define DMA_STARTUP_TIME 650
-#endif
-
-/***************************** Prototypes *****************************/
-
-static char *slm_errstr( int stat );
-static int slm_getstats( char *buffer, int device );
-static ssize_t slm_read( struct file* file, char *buf, size_t count, loff_t
- *ppos );
-static void start_print( int device );
-static irqreturn_t slm_interrupt(int irc, void *data);
-static void slm_test_ready( unsigned long dummy );
-static void set_dma_addr( unsigned long paddr );
-static unsigned long get_dma_addr( void );
-static ssize_t slm_write( struct file *file, const char *buf, size_t count,
- loff_t *ppos );
-static int slm_ioctl( struct inode *inode, struct file *file, unsigned int
- cmd, unsigned long arg );
-static int slm_open( struct inode *inode, struct file *file );
-static int slm_release( struct inode *inode, struct file *file );
-static int slm_req_sense( int device );
-static int slm_mode_sense( int device, char *buffer, int abs_flag );
-#if 0
-static int slm_mode_select( int device, char *buffer, int len, int
- default_flag );
-#endif
-static int slm_get_pagesize( int device, int *w, int *h );
-
-/************************* End of Prototypes **************************/
-
-
-static DEFINE_TIMER(slm_timer, slm_test_ready, 0, 0);
-
-static const struct file_operations slm_fops = {
- .owner = THIS_MODULE,
- .read = slm_read,
- .write = slm_write,
- .ioctl = slm_ioctl,
- .open = slm_open,
- .release = slm_release,
-};
-
-
-/* ---------------------------------------------------------------------- */
-/* Status Functions */
-
-
-static char *slm_errstr( int stat )
-
-{ char *p;
- static char str[22];
-
- stat &= 0x1f;
- if (stat >= 0 && stat < N_ERRORS && (p = SLMErrors[stat]))
- return( p );
- sprintf( str, "unknown status 0x%02x", stat );
- return( str );
-}
-
-
-static int slm_getstats( char *buffer, int device )
-
-{ int len = 0, stat, i, w, h;
- unsigned char buf[256];
-
- stat = slm_mode_sense( device, buf, 0 );
- if (IS_REAL_ERROR(stat))
- return( -EIO );
-
-#define SHORTDATA(i) ((buf[i] << 8) | buf[i+1])
-#define BOOLDATA(i,mask) ((buf[i] & mask) ? "on" : "off")
-
- w = SHORTDATA( 3 );
- h = SHORTDATA( 1 );
-
- len += sprintf( buffer+len, "Status\t\t%s\n",
- slm_errstr( stat ) );
- len += sprintf( buffer+len, "Page Size\t%dx%d",
- w, h );
-
- for( i = 0; i < N_STD_SIZES; ++i ) {
- if (w == StdPageSize[i].w && h == StdPageSize[i].h)
- break;
- }
- if (i < N_STD_SIZES)
- len += sprintf( buffer+len, " (%s)", StdPageSize[i].name );
- buffer[len++] = '\n';
-
- len += sprintf( buffer+len, "Top/Left Margin\t%d/%d\n",
- SHORTDATA( 5 ), SHORTDATA( 7 ) );
- len += sprintf( buffer+len, "Manual Feed\t%s\n",
- BOOLDATA( 9, 0x01 ) );
- len += sprintf( buffer+len, "Input Select\t%d\n",
- (buf[9] >> 1) & 7 );
- len += sprintf( buffer+len, "Auto Select\t%s\n",
- BOOLDATA( 9, 0x10 ) );
- len += sprintf( buffer+len, "Prefeed Paper\t%s\n",
- BOOLDATA( 9, 0x20 ) );
- len += sprintf( buffer+len, "Thick Pixels\t%s\n",
- BOOLDATA( 9, 0x40 ) );
- len += sprintf( buffer+len, "H/V Resol.\t%d/%d dpi\n",
- SHORTDATA( 12 ), SHORTDATA( 10 ) );
- len += sprintf( buffer+len, "System Timeout\t%d\n",
- buf[14] );
- len += sprintf( buffer+len, "Scan Time\t%d\n",
- SHORTDATA( 15 ) );
- len += sprintf( buffer+len, "Page Count\t%d\n",
- SHORTDATA( 17 ) );
- len += sprintf( buffer+len, "In/Out Cap.\t%d/%d\n",
- SHORTDATA( 19 ), SHORTDATA( 21 ) );
- len += sprintf( buffer+len, "Stagger Output\t%s\n",
- BOOLDATA( 23, 0x01 ) );
- len += sprintf( buffer+len, "Output Select\t%d\n",
- (buf[23] >> 1) & 7 );
- len += sprintf( buffer+len, "Duplex Print\t%s\n",
- BOOLDATA( 23, 0x10 ) );
- len += sprintf( buffer+len, "Color Sep.\t%s\n",
- BOOLDATA( 23, 0x20 ) );
-
- return( len );
-}
-
-
-static ssize_t slm_read( struct file *file, char *buf, size_t count,
- loff_t *ppos )
-
-{
- struct inode *node = file->f_path.dentry->d_inode;
- unsigned long page;
- int length;
- int end;
-
- if (!(page = __get_free_page( GFP_KERNEL )))
- return( -ENOMEM );
-
- length = slm_getstats( (char *)page, iminor(node) );
- if (length < 0) {
- count = length;
- goto out;
- }
- if (file->f_pos >= length) {
- count = 0;
- goto out;
- }
- if (count + file->f_pos > length)
- count = length - file->f_pos;
- end = count + file->f_pos;
- if (copy_to_user(buf, (char *)page + file->f_pos, count)) {
- count = -EFAULT;
- goto out;
- }
- file->f_pos = end;
-out: free_page( page );
- return( count );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* Printing */
-
-
-static void start_print( int device )
-
-{ struct slm *sip = &slm_info[device];
- unsigned char *cmd;
- unsigned long paddr;
- int i;
-
- stdma_lock( slm_interrupt, NULL );
-
- CMDSET_TARG_LUN( slmprint_cmd, sip->target, sip->lun );
- cmd = slmprint_cmd;
- paddr = virt_to_phys( SLMBuffer );
- dma_cache_maintenance( paddr, virt_to_phys(BufferP)-paddr, 1 );
- DISABLE_IRQ();
-
- /* Low on A1 */
- dma_wd.dma_mode_status = 0x88;
- MFPDELAY();
-
- /* send the command bytes except the last */
- for( i = 0; i < 5; ++i ) {
- DMA_LONG_WRITE( *cmd++, 0x8a );
- udelay(20);
- if (!acsi_wait_for_IRQ( HZ/2 )) {
- SLMError = 1;
- return; /* timeout */
- }
- }
- /* last command byte */
- DMA_LONG_WRITE( *cmd++, 0x82 );
- MFPDELAY();
- /* set DMA address */
- set_dma_addr( paddr );
- /* program DMA for write and select sector counter reg */
- dma_wd.dma_mode_status = 0x192;
- MFPDELAY();
- /* program for 255*512 bytes and start DMA */
- DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
-
-#ifndef SLM_CONT_CNT_REPROG
- SLMCurAddr = paddr;
- SLMEndAddr = paddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
-#endif
- START_TIMER( DMA_STARTUP_TIME + DMA_TIME_FOR( SLMSliceSize ));
-#if !defined(SLM_CONT_CNT_REPROG) && defined(DEBUG)
- printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
- SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
-#endif
-
- ENABLE_IRQ();
-}
-
-
-/* Only called when an error happened or at the end of a page */
-
-static irqreturn_t slm_interrupt(int irc, void *data)
-
-{ unsigned long addr;
- int stat;
-
- STOP_TIMER();
- addr = get_dma_addr();
- stat = acsi_getstatus();
- SLMError = (stat < 0) ? SLMSTAT_ACSITO :
- (addr < virt_to_phys(BufferP)) ? SLMSTAT_NOTALL :
- stat;
-
- dma_wd.dma_mode_status = 0x80;
- MFPDELAY();
-#ifdef DEBUG
- printk( "SLM: interrupt, addr=%#lx, error=%d\n", addr, SLMError );
-#endif
-
- wake_up( &print_wait );
- stdma_release();
- ENABLE_IRQ();
- return IRQ_HANDLED;
-}
-
-
-static void slm_test_ready( unsigned long dummy )
-
-{
-#ifdef SLM_CONT_CNT_REPROG
- /* program for 255*512 bytes again */
- dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
- START_TIMER( DMA_TIME_FOR(0) );
-#ifdef DEBUG
- printk( "SLM: reprogramming timer for %d jiffies, addr=%#lx\n",
- DMA_TIME_FOR(0), get_dma_addr() );
-#endif
-
-#else /* !SLM_CONT_CNT_REPROG */
-
- unsigned long flags, addr;
- int d, ti;
-#ifdef DEBUG
- struct timeval start_tm, end_tm;
- int did_wait = 0;
-#endif
-
- local_irq_save(flags);
-
- addr = get_dma_addr();
- if ((d = SLMEndAddr - addr) > 0) {
- local_irq_restore(flags);
-
- /* slice not yet finished, decide whether to start another timer or to
- * busy-wait */
- ti = DMA_TIME_FOR( d );
- if (ti > 0) {
-#ifdef DEBUG
- printk( "SLM: reprogramming timer for %d jiffies, rest %d bytes\n",
- ti, d );
-#endif
- START_TIMER( ti );
- return;
- }
- /* wait for desired end address to be reached */
-#ifdef DEBUG
- do_gettimeofday( &start_tm );
- did_wait = 1;
-#endif
- local_irq_disable();
- while( get_dma_addr() < SLMEndAddr )
- barrier();
- }
-
- /* slice finished, start next one */
- SLMCurAddr += SLMSliceSize;
-
-#ifdef SLM_CONTINUOUS_DMA
- /* program for 255*512 bytes again */
- dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
-#else
- /* set DMA address;
- * add 2 bytes for the ones in the SLM controller FIFO! */
- set_dma_addr( SLMCurAddr + 2 );
- /* toggle DMA to write and select sector counter reg */
- dma_wd.dma_mode_status = 0x92;
- MFPDELAY();
- dma_wd.dma_mode_status = 0x192;
- MFPDELAY();
- /* program for 255*512 bytes and start DMA */
- DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
-#endif
-
- local_irq_restore(flags);
-
-#ifdef DEBUG
- if (did_wait) {
- int ms;
- do_gettimeofday( &end_tm );
- ms = (end_tm.tv_sec*1000000+end_tm.tv_usec) -
- (start_tm.tv_sec*1000000+start_tm.tv_usec);
- printk( "SLM: did %ld.%ld ms busy waiting for %d bytes\n",
- ms/1000, ms%1000, d );
- }
- else
- printk( "SLM: didn't wait (!)\n" );
-#endif
-
- if ((unsigned char *)PTOV( SLMCurAddr + SLMSliceSize ) >= BufferP) {
- /* will be last slice, no timer necessary */
-#ifdef DEBUG
- printk( "SLM: CurAddr=%#lx EndAddr=%#lx last slice -> no timer\n",
- SLMCurAddr, SLMEndAddr );
-#endif
- }
- else {
- /* not last slice */
- SLMEndAddr = SLMCurAddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
- START_TIMER( DMA_TIME_FOR( SLMSliceSize ));
-#ifdef DEBUG
- printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
- SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
-#endif
- }
-#endif /* SLM_CONT_CNT_REPROG */
-}
-
-
-static void set_dma_addr( unsigned long paddr )
-
-{ unsigned long flags;
-
- local_irq_save(flags);
- dma_wd.dma_lo = (unsigned char)paddr;
- paddr >>= 8;
- MFPDELAY();
- dma_wd.dma_md = (unsigned char)paddr;
- paddr >>= 8;
- MFPDELAY();
- if (ATARIHW_PRESENT( EXTD_DMA ))
- st_dma_ext_dmahi = (unsigned short)paddr;
- else
- dma_wd.dma_hi = (unsigned char)paddr;
- MFPDELAY();
- local_irq_restore(flags);
-}
-
-
-static unsigned long get_dma_addr( void )
-
-{ unsigned long addr;
-
- addr = dma_wd.dma_lo & 0xff;
- MFPDELAY();
- addr |= (dma_wd.dma_md & 0xff) << 8;
- MFPDELAY();
- addr |= (dma_wd.dma_hi & 0xff) << 16;
- MFPDELAY();
-
- return( addr );
-}
-
-
-static ssize_t slm_write( struct file *file, const char *buf, size_t count,
- loff_t *ppos )
-
-{
- struct inode *node = file->f_path.dentry->d_inode;
- int device = iminor(node);
- int n, filled, w, h;
-
- while( SLMState == PRINTING ||
- (SLMState == FILLING && SLMBufOwner != device) ) {
- interruptible_sleep_on( &slm_wait );
- if (signal_pending(current))
- return( -ERESTARTSYS );
- }
- if (SLMState == IDLE) {
- /* first data of page: get current page size */
- if (slm_get_pagesize( device, &w, &h ))
- return( -EIO );
- BufferSize = w*h/8;
- if (BufferSize > SLM_BUFFER_SIZE)
- return( -ENOMEM );
-
- SLMState = FILLING;
- SLMBufOwner = device;
- }
-
- n = count;
- filled = BufferP - SLMBuffer;
- if (filled + n > BufferSize)
- n = BufferSize - filled;
-
- if (copy_from_user(BufferP, buf, n))
- return -EFAULT;
- BufferP += n;
- filled += n;
-
- if (filled == BufferSize) {
- /* Check the paper size again! The user may have switched it in the
- * time between starting the data and finishing them. Would end up in
- * a trashy page... */
- if (slm_get_pagesize( device, &w, &h ))
- return( -EIO );
- if (BufferSize != w*h/8) {
- printk( KERN_NOTICE "slm%d: page size changed while printing\n",
- device );
- return( -EAGAIN );
- }
-
- SLMState = PRINTING;
- /* choose a slice size that is a multiple of the line size */
-#ifndef SLM_CONT_CNT_REPROG
- SLMSliceSize = SLM_SLICE_SIZE(w);
-#endif
-
- start_print( device );
- sleep_on( &print_wait );
- if (SLMError && IS_REAL_ERROR(SLMError)) {
- printk( KERN_ERR "slm%d: %s\n", device, slm_errstr(SLMError) );
- n = -EIO;
- }
-
- SLMState = IDLE;
- BufferP = SLMBuffer;
- wake_up_interruptible( &slm_wait );
- }
-
- return( n );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* ioctl Functions */
-
-
-static int slm_ioctl( struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg )
-
-{ int device = iminor(inode), err;
-
- /* I can think of setting:
- * - manual feed
- * - paper format
- * - copy count
- * - ...
- * but haven't implemented that yet :-)
- * BTW, has anybody better docs about the MODE SENSE/MODE SELECT data?
- */
- switch( cmd ) {
-
- case SLMIORESET: /* reset buffer, i.e. empty the buffer */
- if (!(file->f_mode & 2))
- return( -EINVAL );
- if (SLMState == PRINTING)
- return( -EBUSY );
- SLMState = IDLE;
- BufferP = SLMBuffer;
- wake_up_interruptible( &slm_wait );
- return( 0 );
-
- case SLMIOGSTAT: { /* get status */
- int stat;
- char *str;
-
- stat = slm_req_sense( device );
- if (arg) {
- str = slm_errstr( stat );
- if (put_user(stat,
- (long *)&((struct SLM_status *)arg)->stat))
- return -EFAULT;
- if (copy_to_user( ((struct SLM_status *)arg)->str, str,
- strlen(str) + 1))
- return -EFAULT;
- }
- return( stat );
- }
-
- case SLMIOGPSIZE: { /* get paper size */
- int w, h;
-
- if ((err = slm_get_pagesize( device, &w, &h ))) return( err );
-
- if (put_user(w, (long *)&((struct SLM_paper_size *)arg)->width))
- return -EFAULT;
- if (put_user(h, (long *)&((struct SLM_paper_size *)arg)->height))
- return -EFAULT;
- return( 0 );
- }
-
- case SLMIOGMFEED: /* get manual feed */
- return( -EINVAL );
-
- case SLMIOSPSIZE: /* set paper size */
- return( -EINVAL );
-
- case SLMIOSMFEED: /* set manual feed */
- return( -EINVAL );
-
- }
- return( -EINVAL );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* Opening and Closing */
-
-
-static int slm_open( struct inode *inode, struct file *file )
-
-{ int device;
- struct slm *sip;
-
- device = iminor(inode);
- if (device >= N_SLM_Printers)
- return( -ENXIO );
- sip = &slm_info[device];
-
- if (file->f_mode & 2) {
- /* open for writing is exclusive */
- if ( !atomic_dec_and_test(&sip->wr_ok) ) {
- atomic_inc(&sip->wr_ok);
- return( -EBUSY );
- }
- }
- if (file->f_mode & 1) {
- /* open for reading is exclusive */
- if ( !atomic_dec_and_test(&sip->rd_ok) ) {
- atomic_inc(&sip->rd_ok);
- return( -EBUSY );
- }
- }
-
- return( 0 );
-}
-
-
-static int slm_release( struct inode *inode, struct file *file )
-
-{ int device;
- struct slm *sip;
-
- device = iminor(inode);
- sip = &slm_info[device];
-
- if (file->f_mode & 2)
- atomic_inc( &sip->wr_ok );
- if (file->f_mode & 1)
- atomic_inc( &sip->rd_ok );
-
- return( 0 );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* ACSI Primitives for the SLM */
-
-
-static int slm_req_sense( int device )
-
-{ int stat, rv;
- struct slm *sip = &slm_info[device];
-
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN( slmreqsense_cmd, sip->target, sip->lun );
- if (!acsicmd_nodma( slmreqsense_cmd, 0 ) ||
- (stat = acsi_getstatus()) < 0)
- rv = SLMSTAT_ACSITO;
- else
- rv = stat & 0x1f;
-
- ENABLE_IRQ();
- stdma_release();
- return( rv );
-}
-
-
-static int slm_mode_sense( int device, char *buffer, int abs_flag )
-
-{ unsigned char stat, len;
- int rv = 0;
- struct slm *sip = &slm_info[device];
-
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN( slmmsense_cmd, sip->target, sip->lun );
- slmmsense_cmd[5] = abs_flag ? 0x80 : 0;
- if (!acsicmd_nodma( slmmsense_cmd, 0 )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- if (!acsi_extstatus( &stat, 1 )) {
- acsi_end_extstatus();
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- if (!acsi_extstatus( &len, 1 )) {
- acsi_end_extstatus();
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
- buffer[0] = len;
- if (!acsi_extstatus( buffer+1, len )) {
- acsi_end_extstatus();
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- acsi_end_extstatus();
- rv = stat & 0x1f;
-
- the_end:
- ENABLE_IRQ();
- stdma_release();
- return( rv );
-}
-
-
-#if 0
-/* currently unused */
-static int slm_mode_select( int device, char *buffer, int len,
- int default_flag )
-
-{ int stat, rv;
- struct slm *sip = &slm_info[device];
-
- stdma_lock( NULL, NULL );
-
- CMDSET_TARG_LUN( slmmselect_cmd, sip->target, sip->lun );
- slmmselect_cmd[5] = default_flag ? 0x80 : 0;
- if (!acsicmd_nodma( slmmselect_cmd, 0 )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
-
- if (!default_flag) {
- unsigned char c = len;
- if (!acsi_extcmd( &c, 1 )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
- if (!acsi_extcmd( buffer, len )) {
- rv = SLMSTAT_ACSITO;
- goto the_end;
- }
- }
-
- stat = acsi_getstatus();
- rv = (stat < 0 ? SLMSTAT_ACSITO : stat);
-
- the_end:
- ENABLE_IRQ();
- stdma_release();
- return( rv );
-}
-#endif
-
-
-static int slm_get_pagesize( int device, int *w, int *h )
-
-{ char buf[256];
- int stat;
-
- stat = slm_mode_sense( device, buf, 0 );
- ENABLE_IRQ();
- stdma_release();
-
- if (stat != SLMSTAT_OK)
- return( -EIO );
-
- *w = (buf[3] << 8) | buf[4];
- *h = (buf[1] << 8) | buf[2];
- return( 0 );
-}
-
-
-/* ---------------------------------------------------------------------- */
-/* Initialization */
-
-
-int attach_slm( int target, int lun )
-
-{ static int did_register;
- int len;
-
- if (N_SLM_Printers >= MAX_SLM) {
- printk( KERN_WARNING "Too much SLMs\n" );
- return( 0 );
- }
-
- /* do an INQUIRY */
- udelay(100);
- CMDSET_TARG_LUN( slminquiry_cmd, target, lun );
- if (!acsicmd_nodma( slminquiry_cmd, 0 )) {
- inq_timeout:
- printk( KERN_ERR "SLM inquiry command timed out.\n" );
- inq_fail:
- acsi_end_extstatus();
- return( 0 );
- }
- /* read status and header of return data */
- if (!acsi_extstatus( SLMBuffer, 6 ))
- goto inq_timeout;
-
- if (SLMBuffer[1] != 2) { /* device type == printer? */
- printk( KERN_ERR "SLM inquiry returned device type != printer\n" );
- goto inq_fail;
- }
- len = SLMBuffer[5];
-
- /* read id string */
- if (!acsi_extstatus( SLMBuffer, len ))
- goto inq_timeout;
- acsi_end_extstatus();
- SLMBuffer[len] = 0;
-
- if (!did_register) {
- did_register = 1;
- }
-
- slm_info[N_SLM_Printers].target = target;
- slm_info[N_SLM_Printers].lun = lun;
- atomic_set(&slm_info[N_SLM_Printers].wr_ok, 1 );
- atomic_set(&slm_info[N_SLM_Printers].rd_ok, 1 );
-
- printk( KERN_INFO " Printer: %s\n", SLMBuffer );
- printk( KERN_INFO "Detected slm%d at id %d lun %d\n",
- N_SLM_Printers, target, lun );
- N_SLM_Printers++;
- return( 1 );
-}
-
-int slm_init( void )
-
-{
- int i;
- if (register_chrdev( ACSI_MAJOR, "slm", &slm_fops )) {
- printk( KERN_ERR "Unable to get major %d for ACSI SLM\n", ACSI_MAJOR );
- return -EBUSY;
- }
-
- if (!(SLMBuffer = atari_stram_alloc( SLM_BUFFER_SIZE, "SLM" ))) {
- printk( KERN_ERR "Unable to get SLM ST-Ram buffer.\n" );
- unregister_chrdev( ACSI_MAJOR, "slm" );
- return -ENOMEM;
- }
- BufferP = SLMBuffer;
- SLMState = IDLE;
-
- return 0;
-}
-
-#ifdef MODULE
-
-/* from acsi.c */
-void acsi_attach_SLMs( int (*attach_func)( int, int ) );
-
-int init_module(void)
-{
- int err;
-
- if ((err = slm_init()))
- return( err );
- /* This calls attach_slm() for every target/lun where acsi.c detected a
- * printer */
- acsi_attach_SLMs( attach_slm );
- return( 0 );
-}
-
-void cleanup_module(void)
-{
- if (unregister_chrdev( ACSI_MAJOR, "slm" ) != 0)
- printk( KERN_ERR "acsi_slm: cleanup_module failed\n");
- atari_stram_free( SLMBuffer );
-}
-#endif
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 478489c568a4..4f598270fa31 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -257,9 +257,9 @@ aoeblk_exit(void)
int __init
aoeblk_init(void)
{
- buf_pool_cache = kmem_cache_create("aoe_bufs",
+ buf_pool_cache = kmem_cache_create("aoe_bufs",
sizeof(struct buf),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (buf_pool_cache == NULL)
return -ENOMEM;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 0fcad430474e..a2d6612b80d2 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1170,7 +1170,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
case SG_EMULATED_HOST:
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_ioctl(filep, disk, cmd, argp);
+ return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
/* scsi_cmd_ioctl would normally handle these, below, but */
/* they aren't a good fit for cciss, as CD-ROMs are */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 90961a8ea895..4aca7ddfdddf 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -555,7 +555,6 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
{
struct scsi_cmnd *cmd;
ctlr_info_t *ctlr;
- u64bit addr64;
ErrorInfo_struct *ei;
ei = cp->err_info;
@@ -569,20 +568,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
ctlr = hba[cp->ctlr];
- /* undo the DMA mappings */
-
- if (cmd->use_sg) {
- pci_unmap_sg(ctlr->pdev,
- cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- }
- else if (cmd->request_bufflen) {
- addr64.val32.lower = cp->SG[0].Addr.lower;
- addr64.val32.upper = cp->SG[0].Addr.upper;
- pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
@@ -597,7 +583,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
SCSI_SENSE_BUFFERSIZE :
ei->SenseLen);
- cmd->resid = ei->ResidualCnt;
+ scsi_set_resid(cmd, ei->ResidualCnt);
if(ei->CommandStatus != 0)
{ /* an error has occurred */
@@ -1204,46 +1190,29 @@ cciss_scatter_gather(struct pci_dev *pdev,
CommandList_struct *cp,
struct scsi_cmnd *cmd)
{
- unsigned int use_sg, nsegs=0, len;
- struct scatterlist *scatter = (struct scatterlist *) cmd->request_buffer;
+ unsigned int len;
+ struct scatterlist *sg;
__u64 addr64;
-
- /* is it just one virtual address? */
- if (!cmd->use_sg) {
- if (cmd->request_bufflen) { /* anything to xfer? */
-
- addr64 = (__u64) pci_map_single(pdev,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
-
- cp->SG[0].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[0].Len = cmd->request_bufflen;
- nsegs=1;
- }
- } /* else, must be a list of virtual addresses.... */
- else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
-
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
-
- for (nsegs=0; nsegs < use_sg; nsegs++) {
- addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
- len = sg_dma_len(&scatter[nsegs]);
- cp->SG[nsegs].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- cp->SG[nsegs].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- cp->SG[nsegs].Len = len;
- cp->SG[nsegs].Ext = 0; // we are not chaining
+ int use_sg, i;
+
+ BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg) { /* not too many addrs? */
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (__u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ cp->SG[i].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Len = len;
+ cp->SG[i].Ext = 0; // we are not chaining
}
- } else BUG();
+ }
- cp->Header.SGList = (__u8) nsegs; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
+ cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
return;
}
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
new file mode 100644
index 000000000000..1634c2dd25ec
--- /dev/null
+++ b/drivers/block/lguest_blk.c
@@ -0,0 +1,275 @@
+/* A simple block driver for lguest.
+ *
+ * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+//#define DEBUG
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/lguest_bus.h>
+
+static char next_block_index = 'a';
+
+struct blockdev
+{
+ spinlock_t lock;
+
+ /* The disk structure for the kernel. */
+ struct gendisk *disk;
+
+ /* The major number for this disk. */
+ int major;
+ int irq;
+
+ unsigned long phys_addr;
+ /* The mapped block page. */
+ struct lguest_block_page *lb_page;
+
+ /* We only have a single request outstanding at a time. */
+ struct lguest_dma dma;
+ struct request *req;
+};
+
+/* Jens gave me this nice helper to end all chunks of a request. */
+static void end_entire_request(struct request *req, int uptodate)
+{
+ if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
+ BUG();
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, uptodate);
+}
+
+static irqreturn_t lgb_irq(int irq, void *_bd)
+{
+ struct blockdev *bd = _bd;
+ unsigned long flags;
+
+ if (!bd->req) {
+ pr_debug("No work!\n");
+ return IRQ_NONE;
+ }
+
+ if (!bd->lb_page->result) {
+ pr_debug("No result!\n");
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&bd->lock, flags);
+ end_entire_request(bd->req, bd->lb_page->result == 1);
+ bd->req = NULL;
+ bd->dma.used_len = 0;
+ blk_start_queue(bd->disk->queue);
+ spin_unlock_irqrestore(&bd->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
+{
+ unsigned int i = 0, idx, len = 0;
+ struct bio *bio;
+
+ rq_for_each_bio(bio, req) {
+ struct bio_vec *bvec;
+ bio_for_each_segment(bvec, bio, idx) {
+ BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
+ BUG_ON(!bvec->bv_len);
+ dma->addr[i] = page_to_phys(bvec->bv_page)
+ + bvec->bv_offset;
+ dma->len[i] = bvec->bv_len;
+ len += bvec->bv_len;
+ i++;
+ }
+ }
+ if (i < LGUEST_MAX_DMA_SECTIONS)
+ dma->len[i] = 0;
+ return len;
+}
+
+static void empty_dma(struct lguest_dma *dma)
+{
+ dma->len[0] = 0;
+}
+
+static void setup_req(struct blockdev *bd,
+ int type, struct request *req, struct lguest_dma *dma)
+{
+ bd->lb_page->type = type;
+ bd->lb_page->sector = req->sector;
+ bd->lb_page->result = 0;
+ bd->req = req;
+ bd->lb_page->bytes = req_to_dma(req, dma);
+}
+
+static void do_write(struct blockdev *bd, struct request *req)
+{
+ struct lguest_dma send;
+
+ pr_debug("lgb: WRITE sector %li\n", (long)req->sector);
+ setup_req(bd, 1, req, &send);
+
+ lguest_send_dma(bd->phys_addr, &send);
+}
+
+static void do_read(struct blockdev *bd, struct request *req)
+{
+ struct lguest_dma ping;
+
+ pr_debug("lgb: READ sector %li\n", (long)req->sector);
+ setup_req(bd, 0, req, &bd->dma);
+
+ empty_dma(&ping);
+ lguest_send_dma(bd->phys_addr, &ping);
+}
+
+static void do_lgb_request(request_queue_t *q)
+{
+ struct blockdev *bd;
+ struct request *req;
+
+again:
+ req = elv_next_request(q);
+ if (!req)
+ return;
+
+ bd = req->rq_disk->private_data;
+ /* Sometimes we get repeated requests after blk_stop_queue. */
+ if (bd->req)
+ return;
+
+ if (!blk_fs_request(req)) {
+ pr_debug("Got non-command 0x%08x\n", req->cmd_type);
+ req->errors++;
+ end_entire_request(req, 0);
+ goto again;
+ }
+
+ if (rq_data_dir(req) == WRITE)
+ do_write(bd, req);
+ else
+ do_read(bd, req);
+
+ /* Wait for interrupt to tell us it's done. */
+ blk_stop_queue(q);
+}
+
+static struct block_device_operations lguestblk_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int lguestblk_probe(struct lguest_device *lgdev)
+{
+ struct blockdev *bd;
+ int err;
+ int irqflags = IRQF_SHARED;
+
+ bd = kmalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+
+ spin_lock_init(&bd->lock);
+ bd->irq = lgdev_irq(lgdev);
+ bd->req = NULL;
+ bd->dma.used_len = 0;
+ bd->dma.len[0] = 0;
+ bd->phys_addr = (lguest_devices[lgdev->index].pfn << PAGE_SHIFT);
+
+ bd->lb_page = lguest_map(bd->phys_addr, 1);
+ if (!bd->lb_page) {
+ err = -ENOMEM;
+ goto out_free_bd;
+ }
+
+ bd->major = register_blkdev(0, "lguestblk");
+ if (bd->major < 0) {
+ err = bd->major;
+ goto out_unmap;
+ }
+
+ bd->disk = alloc_disk(1);
+ if (!bd->disk) {
+ err = -ENOMEM;
+ goto out_unregister_blkdev;
+ }
+
+ bd->disk->queue = blk_init_queue(do_lgb_request, &bd->lock);
+ if (!bd->disk->queue) {
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+
+ /* We can only handle a certain number of sg entries */
+ blk_queue_max_hw_segments(bd->disk->queue, LGUEST_MAX_DMA_SECTIONS);
+ /* Buffers must not cross page boundaries */
+ blk_queue_segment_boundary(bd->disk->queue, PAGE_SIZE-1);
+
+ sprintf(bd->disk->disk_name, "lgb%c", next_block_index++);
+ if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
+ irqflags |= IRQF_SAMPLE_RANDOM;
+ err = request_irq(bd->irq, lgb_irq, irqflags, bd->disk->disk_name, bd);
+ if (err)
+ goto out_cleanup_queue;
+
+ err = lguest_bind_dma(bd->phys_addr, &bd->dma, 1, bd->irq);
+ if (err)
+ goto out_free_irq;
+
+ bd->disk->major = bd->major;
+ bd->disk->first_minor = 0;
+ bd->disk->private_data = bd;
+ bd->disk->fops = &lguestblk_fops;
+ /* This is initialized to the disk size by the other end. */
+ set_capacity(bd->disk, bd->lb_page->num_sectors);
+ add_disk(bd->disk);
+
+ printk(KERN_INFO "%s: device %i at major %d\n",
+ bd->disk->disk_name, lgdev->index, bd->major);
+
+ lgdev->private = bd;
+ return 0;
+
+out_free_irq:
+ free_irq(bd->irq, bd);
+out_cleanup_queue:
+ blk_cleanup_queue(bd->disk->queue);
+out_put_disk:
+ put_disk(bd->disk);
+out_unregister_blkdev:
+ unregister_blkdev(bd->major, "lguestblk");
+out_unmap:
+ lguest_unmap(bd->lb_page);
+out_free_bd:
+ kfree(bd);
+ return err;
+}
+
+static struct lguest_driver lguestblk_drv = {
+ .name = "lguestblk",
+ .owner = THIS_MODULE,
+ .device_type = LGUEST_DEVICE_T_BLOCK,
+ .probe = lguestblk_probe,
+};
+
+static __init int lguestblk_init(void)
+{
+ return register_lguest_driver(&lguestblk_drv);
+}
+module_init(lguestblk_init);
+
+MODULE_DESCRIPTION("Lguest block driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4503290da407..e425daa1eac3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,6 +68,7 @@
#include <linux/loop.h>
#include <linux/compat.h>
#include <linux/suspend.h>
+#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
@@ -600,13 +601,6 @@ static int loop_thread(void *data)
struct loop_device *lo = data;
struct bio *bio;
- /*
- * loop can be used in an encrypted device,
- * hence, it mustn't be stopped at all
- * because it could be indirectly used during suspension
- */
- current->flags |= PF_NOFREEZE;
-
set_user_nice(current, -20);
while (!kthread_should_stop() || lo->lo_bio) {
@@ -1574,8 +1568,7 @@ static void __exit loop_exit(void)
loop_del_one(lo);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
- if (unregister_blkdev(LOOP_MAJOR, "loop"))
- printk(KERN_WARNING "loop: cannot unregister blkdev\n");
+ unregister_blkdev(LOOP_MAJOR, "loop");
}
module_init(loop_init);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c575fb1d585f..c12951024090 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -122,17 +122,12 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
int result;
struct msghdr msg;
struct kvec iov;
- unsigned long flags;
- sigset_t oldset;
+ sigset_t blocked, oldset;
/* Allow interception of SIGKILL only
* Don't allow other signals to interrupt the transmission */
- spin_lock_irqsave(&current->sighand->siglock, flags);
- oldset = current->blocked;
- sigfillset(&current->blocked);
- sigdelsetmask(&current->blocked, sigmask(SIGKILL));
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ siginitsetinv(&blocked, sigmask(SIGKILL));
+ sigprocmask(SIG_SETMASK, &blocked, &oldset);
do {
sock->sk->sk_allocation = GFP_NOIO;
@@ -151,11 +146,9 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
if (signal_pending(current)) {
siginfo_t info;
- spin_lock_irqsave(&current->sighand->siglock, flags);
printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
- current->pid, current->comm,
- dequeue_signal(current, &current->blocked, &info));
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ current->pid, current->comm,
+ dequeue_signal_lock(current, &current->blocked, &info));
result = -EINTR;
break;
}
@@ -169,10 +162,7 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
buf += result;
} while (size > 0);
- spin_lock_irqsave(&current->sighand->siglock, flags);
- current->blocked = oldset;
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ sigprocmask(SIG_SETMASK, &oldset, NULL);
return result;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f1b9dd7d47d6..31be33e4f119 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -146,8 +146,7 @@ static void pkt_kobj_release(struct kobject *kobj)
**********************************************************/
#define DEF_ATTR(_obj,_name,_mode) \
- static struct attribute _obj = { \
- .name = _name, .owner = THIS_MODULE, .mode = _mode }
+ static struct attribute _obj = { .name = _name, .mode = _mode }
/**********************************************************
/sys/class/pktcdvd/pktcdvd[0-7]/
@@ -1594,6 +1593,7 @@ static int kcdrwd(void *foobar)
long min_sleep_time, residue;
set_user_nice(current, -20);
+ set_freezable();
for (;;) {
DECLARE_WAITQUEUE(wait, current);
@@ -1653,9 +1653,6 @@ static int kcdrwd(void *foobar)
}
}
- if (signal_pending(current)) {
- flush_signals(current);
- }
if (kthread_should_stop())
break;
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
new file mode 100644
index 000000000000..170fb33dba97
--- /dev/null
+++ b/drivers/block/ps3disk.c
@@ -0,0 +1,630 @@
+/*
+ * PS3 Disk Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/ata.h>
+#include <linux/blkdev.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+#include <asm/firmware.h>
+
+
+#define DEVICE_NAME "ps3disk"
+
+#define BOUNCE_SIZE (64*1024)
+
+#define PS3DISK_MAX_DISKS 16
+#define PS3DISK_MINORS 16
+
+
+#define PS3DISK_NAME "ps3d%c"
+
+
+struct ps3disk_private {
+ spinlock_t lock; /* Request queue spinlock */
+ struct request_queue *queue;
+ struct gendisk *gendisk;
+ unsigned int blocking_factor;
+ struct request *req;
+ u64 raw_capacity;
+ unsigned char model[ATA_ID_PROD_LEN+1];
+};
+
+
+#define LV1_STORAGE_SEND_ATA_COMMAND (2)
+#define LV1_STORAGE_ATA_HDDOUT (0x23)
+
+struct lv1_ata_cmnd_block {
+ u16 features;
+ u16 sector_count;
+ u16 LBA_low;
+ u16 LBA_mid;
+ u16 LBA_high;
+ u8 device;
+ u8 command;
+ u32 is_ext;
+ u32 proto;
+ u32 in_out;
+ u32 size;
+ u64 buffer;
+ u32 arglen;
+};
+
+enum lv1_ata_proto {
+ NON_DATA_PROTO = 0,
+ PIO_DATA_IN_PROTO = 1,
+ PIO_DATA_OUT_PROTO = 2,
+ DMA_PROTO = 3
+};
+
+enum lv1_ata_in_out {
+ DIR_WRITE = 0, /* memory -> device */
+ DIR_READ = 1 /* device -> memory */
+};
+
+static int ps3disk_major;
+
+
+static struct block_device_operations ps3disk_fops = {
+ .owner = THIS_MODULE,
+};
+
+
+static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
+ struct request *req, int gather)
+{
+ unsigned int offset = 0;
+ struct bio *bio;
+ sector_t sector;
+ struct bio_vec *bvec;
+ unsigned int i = 0, j;
+ size_t size;
+ void *buf;
+
+ rq_for_each_bio(bio, req) {
+ sector = bio->bi_sector;
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: bio %u: %u segs %u sectors from %lu\n",
+ __func__, __LINE__, i, bio_segments(bio),
+ bio_sectors(bio), sector);
+ bio_for_each_segment(bvec, bio, j) {
+ size = bvec->bv_len;
+ buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
+ if (gather)
+ memcpy(dev->bounce_buf+offset, buf, size);
+ else
+ memcpy(buf, dev->bounce_buf+offset, size);
+ offset += size;
+ flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
+ __bio_kunmap_atomic(bio, KM_IRQ0);
+ }
+ i++;
+ }
+}
+
+static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
+ struct request *req)
+{
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ int write = rq_data_dir(req), res;
+ const char *op = write ? "write" : "read";
+ u64 start_sector, sectors;
+ unsigned int region_id = dev->regions[dev->region_idx].id;
+
+#ifdef DEBUG
+ unsigned int n = 0;
+ struct bio *bio;
+
+ rq_for_each_bio(bio, req)
+ n++;
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
+ __func__, __LINE__, op, n, req->nr_sectors,
+ req->hard_nr_sectors);
+#endif
+
+ start_sector = req->sector * priv->blocking_factor;
+ sectors = req->nr_sectors * priv->blocking_factor;
+ dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
+ __func__, __LINE__, op, sectors, start_sector);
+
+ if (write) {
+ ps3disk_scatter_gather(dev, req, 1);
+
+ res = lv1_storage_write(dev->sbd.dev_id, region_id,
+ start_sector, sectors, 0,
+ dev->bounce_lpar, &dev->tag);
+ } else {
+ res = lv1_storage_read(dev->sbd.dev_id, region_id,
+ start_sector, sectors, 0,
+ dev->bounce_lpar, &dev->tag);
+ }
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
+ __LINE__, op, res);
+ end_request(req, 0);
+ return 0;
+ }
+
+ priv->req = req;
+ return 1;
+}
+
+static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
+ struct request *req)
+{
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ u64 res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__);
+
+ res = lv1_storage_send_device_command(dev->sbd.dev_id,
+ LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
+ 0, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
+ __func__, __LINE__, res);
+ end_request(req, 0);
+ return 0;
+ }
+
+ priv->req = req;
+ return 1;
+}
+
+static void ps3disk_do_request(struct ps3_storage_device *dev,
+ request_queue_t *q)
+{
+ struct request *req;
+
+ dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
+
+ while ((req = elv_next_request(q))) {
+ if (blk_fs_request(req)) {
+ if (ps3disk_submit_request_sg(dev, req))
+ break;
+ } else if (req->cmd_type == REQ_TYPE_FLUSH) {
+ if (ps3disk_submit_flush_request(dev, req))
+ break;
+ } else {
+ blk_dump_rq_flags(req, DEVICE_NAME " bad request");
+ end_request(req, 0);
+ continue;
+ }
+ }
+}
+
+static void ps3disk_request(request_queue_t *q)
+{
+ struct ps3_storage_device *dev = q->queuedata;
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+
+ if (priv->req) {
+ dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
+ return;
+ }
+
+ ps3disk_do_request(dev, q);
+}
+
+static irqreturn_t ps3disk_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ struct ps3disk_private *priv;
+ struct request *req;
+ int res, read, uptodate;
+ u64 tag, status;
+ unsigned long num_sectors;
+ const char *op;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %lx, expected %lx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+ __func__, __LINE__, res, status);
+ return IRQ_HANDLED;
+ }
+
+ priv = dev->sbd.core.driver_data;
+ req = priv->req;
+ if (!req) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u non-block layer request completed\n", __func__,
+ __LINE__);
+ dev->lv1_status = status;
+ complete(&dev->done);
+ return IRQ_HANDLED;
+ }
+
+ if (req->cmd_type == REQ_TYPE_FLUSH) {
+ read = 0;
+ num_sectors = req->hard_cur_sectors;
+ op = "flush";
+ } else {
+ read = !rq_data_dir(req);
+ num_sectors = req->nr_sectors;
+ op = read ? "read" : "write";
+ }
+ if (status) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+ __LINE__, op, status);
+ uptodate = 0;
+ } else {
+ dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
+ __LINE__, op);
+ uptodate = 1;
+ if (read)
+ ps3disk_scatter_gather(dev, req, 0);
+ }
+
+ spin_lock(&priv->lock);
+ if (!end_that_request_first(req, uptodate, num_sectors)) {
+ add_disk_randomness(req->rq_disk);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req, uptodate);
+ }
+ priv->req = NULL;
+ ps3disk_do_request(dev, priv->queue);
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ps3disk_sync_cache(struct ps3_storage_device *dev)
+{
+ u64 res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: sync cache\n", __func__, __LINE__);
+
+ res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n",
+ __func__, __LINE__, res);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+/* ATA helpers copied from drivers/ata/libata-core.c */
+
+static void swap_buf_le16(u16 *buf, unsigned int buf_words)
+{
+#ifdef __BIG_ENDIAN
+ unsigned int i;
+
+ for (i = 0; i < buf_words; i++)
+ buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+static u64 ata_id_n_sectors(const u16 *id)
+{
+ if (ata_id_has_lba(id)) {
+ if (ata_id_has_lba48(id))
+ return ata_id_u64(id, 100);
+ else
+ return ata_id_u32(id, 60);
+ } else {
+ if (ata_id_current_chs_valid(id))
+ return ata_id_u32(id, 57);
+ else
+ return id[1] * id[3] * id[6];
+ }
+}
+
+static void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs,
+ unsigned int len)
+{
+ unsigned int c;
+
+ while (len > 0) {
+ c = id[ofs] >> 8;
+ *s = c;
+ s++;
+
+ c = id[ofs] & 0xff;
+ *s = c;
+ s++;
+
+ ofs++;
+ len -= 2;
+ }
+}
+
+static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs,
+ unsigned int len)
+{
+ unsigned char *p;
+
+ WARN_ON(!(len & 1));
+
+ ata_id_string(id, s, ofs, len - 1);
+
+ p = s + strnlen(s, len - 1);
+ while (p > s && p[-1] == ' ')
+ p--;
+ *p = '\0';
+}
+
+static int ps3disk_identify(struct ps3_storage_device *dev)
+{
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+ struct lv1_ata_cmnd_block ata_cmnd;
+ u16 *id = dev->bounce_buf;
+ u64 res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: identify disk\n", __func__, __LINE__);
+
+ memset(&ata_cmnd, 0, sizeof(struct lv1_ata_cmnd_block));
+ ata_cmnd.command = ATA_CMD_ID_ATA;
+ ata_cmnd.sector_count = 1;
+ ata_cmnd.size = ata_cmnd.arglen = ATA_ID_WORDS * 2;
+ ata_cmnd.buffer = dev->bounce_lpar;
+ ata_cmnd.proto = PIO_DATA_IN_PROTO;
+ ata_cmnd.in_out = DIR_READ;
+
+ res = ps3stor_send_command(dev, LV1_STORAGE_SEND_ATA_COMMAND,
+ ps3_mm_phys_to_lpar(__pa(&ata_cmnd)),
+ sizeof(ata_cmnd), ata_cmnd.buffer,
+ ata_cmnd.arglen);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%lx\n",
+ __func__, __LINE__, res);
+ return -EIO;
+ }
+
+ swap_buf_le16(id, ATA_ID_WORDS);
+
+ /* All we're interested in are raw capacity and model name */
+ priv->raw_capacity = ata_id_n_sectors(id);
+ ata_id_c_string(id, priv->model, ATA_ID_PROD, sizeof(priv->model));
+ return 0;
+}
+
+static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
+{
+ struct ps3_storage_device *dev = q->queuedata;
+
+ dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
+
+ memset(req->cmd, 0, sizeof(req->cmd));
+ req->cmd_type = REQ_TYPE_FLUSH;
+}
+
+static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk,
+ sector_t *sector)
+{
+ struct ps3_storage_device *dev = q->queuedata;
+ struct request *req;
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
+
+ req = blk_get_request(q, WRITE, __GFP_WAIT);
+ ps3disk_prepare_flush(q, req);
+ res = blk_execute_rq(q, gendisk, req, 0);
+ if (res)
+ dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
+ __func__, __LINE__, res);
+ blk_put_request(req);
+ return res;
+}
+
+
+static unsigned long ps3disk_mask;
+
+static DEFINE_MUTEX(ps3disk_mask_mutex);
+
+static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3disk_private *priv;
+ int error;
+ unsigned int devidx;
+ struct request_queue *queue;
+ struct gendisk *gendisk;
+
+ if (dev->blk_size < 512) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: cannot handle block size %lu\n", __func__,
+ __LINE__, dev->blk_size);
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(PS3DISK_MAX_DISKS > BITS_PER_LONG);
+ mutex_lock(&ps3disk_mask_mutex);
+ devidx = find_first_zero_bit(&ps3disk_mask, PS3DISK_MAX_DISKS);
+ if (devidx >= PS3DISK_MAX_DISKS) {
+ dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__,
+ __LINE__);
+ mutex_unlock(&ps3disk_mask_mutex);
+ return -ENOSPC;
+ }
+ __set_bit(devidx, &ps3disk_mask);
+ mutex_unlock(&ps3disk_mask_mutex);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dev->sbd.core.driver_data = priv;
+ spin_lock_init(&priv->lock);
+
+ dev->bounce_size = BOUNCE_SIZE;
+ dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
+ if (!dev->bounce_buf) {
+ error = -ENOMEM;
+ goto fail_free_priv;
+ }
+
+ error = ps3stor_setup(dev, ps3disk_interrupt);
+ if (error)
+ goto fail_free_bounce;
+
+ ps3disk_identify(dev);
+
+ queue = blk_init_queue(ps3disk_request, &priv->lock);
+ if (!queue) {
+ dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
+ __func__, __LINE__);
+ error = -ENOMEM;
+ goto fail_teardown;
+ }
+
+ priv->queue = queue;
+ queue->queuedata = dev;
+
+ blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH);
+
+ blk_queue_max_sectors(queue, dev->bounce_size >> 9);
+ blk_queue_segment_boundary(queue, -1UL);
+ blk_queue_dma_alignment(queue, dev->blk_size-1);
+ blk_queue_hardsect_size(queue, dev->blk_size);
+
+ blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
+ blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ ps3disk_prepare_flush);
+
+ blk_queue_max_phys_segments(queue, -1);
+ blk_queue_max_hw_segments(queue, -1);
+ blk_queue_max_segment_size(queue, dev->bounce_size);
+
+ gendisk = alloc_disk(PS3DISK_MINORS);
+ if (!gendisk) {
+ dev_err(&dev->sbd.core, "%s:%u: alloc_disk failed\n", __func__,
+ __LINE__);
+ error = -ENOMEM;
+ goto fail_cleanup_queue;
+ }
+
+ priv->gendisk = gendisk;
+ gendisk->major = ps3disk_major;
+ gendisk->first_minor = devidx * PS3DISK_MINORS;
+ gendisk->fops = &ps3disk_fops;
+ gendisk->queue = queue;
+ gendisk->private_data = dev;
+ gendisk->driverfs_dev = &dev->sbd.core;
+ snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
+ devidx+'a');
+ priv->blocking_factor = dev->blk_size >> 9;
+ set_capacity(gendisk,
+ dev->regions[dev->region_idx].size*priv->blocking_factor);
+
+ dev_info(&dev->sbd.core,
+ "%s is a %s (%lu MiB total, %lu MiB for OtherOS)\n",
+ gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
+ get_capacity(gendisk) >> 11);
+
+ add_disk(gendisk);
+ return 0;
+
+fail_cleanup_queue:
+ blk_cleanup_queue(queue);
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_bounce:
+ kfree(dev->bounce_buf);
+fail_free_priv:
+ kfree(priv);
+ dev->sbd.core.driver_data = NULL;
+fail:
+ mutex_lock(&ps3disk_mask_mutex);
+ __clear_bit(devidx, &ps3disk_mask);
+ mutex_unlock(&ps3disk_mask_mutex);
+ return error;
+}
+
+static int ps3disk_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3disk_private *priv = dev->sbd.core.driver_data;
+
+ mutex_lock(&ps3disk_mask_mutex);
+ __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS,
+ &ps3disk_mask);
+ mutex_unlock(&ps3disk_mask_mutex);
+ del_gendisk(priv->gendisk);
+ blk_cleanup_queue(priv->queue);
+ put_disk(priv->gendisk);
+ dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
+ ps3disk_sync_cache(dev);
+ ps3stor_teardown(dev);
+ kfree(dev->bounce_buf);
+ kfree(priv);
+ dev->sbd.core.driver_data = NULL;
+ return 0;
+}
+
+static struct ps3_system_bus_driver ps3disk = {
+ .match_id = PS3_MATCH_ID_STOR_DISK,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3disk_probe,
+ .remove = ps3disk_remove,
+ .shutdown = ps3disk_remove,
+};
+
+
+static int __init ps3disk_init(void)
+{
+ int error;
+
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return -ENODEV;
+
+ error = register_blkdev(0, DEVICE_NAME);
+ if (error <= 0) {
+ printk(KERN_ERR "%s:%u: register_blkdev failed %d\n", __func__,
+ __LINE__, error);
+ return error;
+ }
+ ps3disk_major = error;
+
+ pr_info("%s:%u: registered block device major %d\n", __func__,
+ __LINE__, ps3disk_major);
+
+ error = ps3_system_bus_driver_register(&ps3disk);
+ if (error)
+ unregister_blkdev(ps3disk_major, DEVICE_NAME);
+
+ return error;
+}
+
+static void __exit ps3disk_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3disk);
+ unregister_blkdev(ps3disk_major, DEVICE_NAME);
+}
+
+module_init(ps3disk_init);
+module_exit(ps3disk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 Disk Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
new file mode 100644
index 000000000000..d50b82381155
--- /dev/null
+++ b/drivers/block/sunvdc.c
@@ -0,0 +1,887 @@
+/* sunvdc.c: Sun LDOM Virtual Disk Client.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+#define DRV_MODULE_NAME "sunvdc"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "June 25, 2007"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define VDC_TX_RING_SIZE 256
+
+#define WAITING_FOR_LINK_UP 0x01
+#define WAITING_FOR_TX_SPACE 0x02
+#define WAITING_FOR_GEN_CMD 0x04
+#define WAITING_FOR_ANY -1
+
+struct vdc_req_entry {
+ struct request *req;
+};
+
+struct vdc_port {
+ struct vio_driver_state vio;
+
+ struct gendisk *disk;
+
+ struct vdc_completion *cmp;
+
+ u64 req_id;
+ u64 seq;
+ struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
+
+ unsigned long ring_cookies;
+
+ u64 max_xfer_size;
+ u32 vdisk_block_size;
+
+ /* The server fills these in for us in the disk attribute
+ * ACK packet.
+ */
+ u64 operations;
+ u32 vdisk_size;
+ u8 vdisk_type;
+
+ char disk_name[32];
+
+ struct vio_disk_geom geom;
+ struct vio_disk_vtoc label;
+};
+
+static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
+{
+ return container_of(vio, struct vdc_port, vio);
+}
+
+/* Ordered from largest major to lowest */
+static struct vio_version vdc_versions[] = {
+ { .major = 1, .minor = 0 },
+};
+
+#define VDCBLK_NAME "vdisk"
+static int vdc_major;
+#define PARTITION_SHIFT 3
+
+static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
+{
+ return vio_dring_avail(dr, VDC_TX_RING_SIZE);
+}
+
+static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct vdc_port *port = disk->private_data;
+
+ geo->heads = (u8) port->geom.num_hd;
+ geo->sectors = (u8) port->geom.num_sec;
+ geo->cylinders = port->geom.num_cyl;
+
+ return 0;
+}
+
+static struct block_device_operations vdc_fops = {
+ .owner = THIS_MODULE,
+ .getgeo = vdc_getgeo,
+};
+
+static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
+{
+ if (vio->cmp &&
+ (waiting_for == -1 ||
+ vio->cmp->waiting_for == waiting_for)) {
+ vio->cmp->err = err;
+ complete(&vio->cmp->com);
+ vio->cmp = NULL;
+ }
+}
+
+static void vdc_handshake_complete(struct vio_driver_state *vio)
+{
+ vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
+}
+
+static int vdc_handle_unknown(struct vdc_port *port, void *arg)
+{
+ struct vio_msg_tag *pkt = arg;
+
+ printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
+ printk(KERN_ERR PFX "Resetting connection.\n");
+
+ ldc_disconnect(port->vio.lp);
+
+ return -ECONNRESET;
+}
+
+static int vdc_send_attr(struct vio_driver_state *vio)
+{
+ struct vdc_port *port = to_vdc_port(vio);
+ struct vio_disk_attr_info pkt;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.tag.type = VIO_TYPE_CTRL;
+ pkt.tag.stype = VIO_SUBTYPE_INFO;
+ pkt.tag.stype_env = VIO_ATTR_INFO;
+ pkt.tag.sid = vio_send_sid(vio);
+
+ pkt.xfer_mode = VIO_DRING_MODE;
+ pkt.vdisk_block_size = port->vdisk_block_size;
+ pkt.max_xfer_size = port->max_xfer_size;
+
+ viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
+ pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
+
+ return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
+}
+
+static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
+{
+ struct vdc_port *port = to_vdc_port(vio);
+ struct vio_disk_attr_info *pkt = arg;
+
+ viodbg(HS, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] "
+ "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n",
+ pkt->tag.stype, pkt->operations,
+ pkt->vdisk_size, pkt->vdisk_type,
+ pkt->xfer_mode, pkt->vdisk_block_size,
+ pkt->max_xfer_size);
+
+ if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
+ switch (pkt->vdisk_type) {
+ case VD_DISK_TYPE_DISK:
+ case VD_DISK_TYPE_SLICE:
+ break;
+
+ default:
+ printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
+ vio->name, pkt->vdisk_type);
+ return -ECONNRESET;
+ }
+
+ if (pkt->vdisk_block_size > port->vdisk_block_size) {
+ printk(KERN_ERR PFX "%s: BLOCK size increased "
+ "%u --> %u\n",
+ vio->name,
+ port->vdisk_block_size, pkt->vdisk_block_size);
+ return -ECONNRESET;
+ }
+
+ port->operations = pkt->operations;
+ port->vdisk_size = pkt->vdisk_size;
+ port->vdisk_type = pkt->vdisk_type;
+ if (pkt->max_xfer_size < port->max_xfer_size)
+ port->max_xfer_size = pkt->max_xfer_size;
+ port->vdisk_block_size = pkt->vdisk_block_size;
+ return 0;
+ } else {
+ printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
+
+ return -ECONNRESET;
+ }
+}
+
+static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
+{
+ int err = desc->status;
+
+ vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
+}
+
+static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
+{
+ if (end_that_request_first(req, uptodate, num_sectors))
+ return;
+ add_disk_randomness(req->rq_disk);
+ end_that_request_last(req, uptodate);
+}
+
+static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
+ unsigned int index)
+{
+ struct vio_disk_desc *desc = vio_dring_entry(dr, index);
+ struct vdc_req_entry *rqe = &port->rq_arr[index];
+ struct request *req;
+
+ if (unlikely(desc->hdr.state != VIO_DESC_DONE))
+ return;
+
+ ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
+ desc->hdr.state = VIO_DESC_FREE;
+ dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
+
+ req = rqe->req;
+ if (req == NULL) {
+ vdc_end_special(port, desc);
+ return;
+ }
+
+ rqe->req = NULL;
+
+ vdc_end_request(req, !desc->status, desc->size >> 9);
+
+ if (blk_queue_stopped(port->disk->queue))
+ blk_start_queue(port->disk->queue);
+}
+
+static int vdc_ack(struct vdc_port *port, void *msgbuf)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data *pkt = msgbuf;
+
+ if (unlikely(pkt->dring_ident != dr->ident ||
+ pkt->start_idx != pkt->end_idx ||
+ pkt->start_idx >= VDC_TX_RING_SIZE))
+ return 0;
+
+ vdc_end_one(port, dr, pkt->start_idx);
+
+ return 0;
+}
+
+static int vdc_nack(struct vdc_port *port, void *msgbuf)
+{
+ /* XXX Implement me XXX */
+ return 0;
+}
+
+static void vdc_event(void *arg, int event)
+{
+ struct vdc_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&vio->lock, flags);
+
+ if (unlikely(event == LDC_EVENT_RESET ||
+ event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+ return;
+ }
+
+ if (unlikely(event != LDC_EVENT_DATA_READY)) {
+ printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+ return;
+ }
+
+ err = 0;
+ while (1) {
+ union {
+ struct vio_msg_tag tag;
+ u64 raw[8];
+ } msgbuf;
+
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
+
+ if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
+ if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
+ err = vdc_ack(port, &msgbuf);
+ else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
+ err = vdc_nack(port, &msgbuf);
+ else
+ err = vdc_handle_unknown(port, &msgbuf);
+ } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
+ err = vio_control_pkt_engine(vio, &msgbuf);
+ } else {
+ err = vdc_handle_unknown(port, &msgbuf);
+ }
+ if (err < 0)
+ break;
+ }
+ if (err < 0)
+ vdc_finish(&port->vio, err, WAITING_FOR_ANY);
+ spin_unlock_irqrestore(&vio->lock, flags);
+}
+
+static int __vdc_tx_trigger(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_INFO,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = dr->prod,
+ .end_idx = dr->prod,
+ };
+ int err, delay;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ } while (err == -EAGAIN);
+
+ return err;
+}
+
+static int __send_request(struct request *req)
+{
+ struct vdc_port *port = req->rq_disk->private_data;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct scatterlist sg[port->ring_cookies];
+ struct vdc_req_entry *rqe;
+ struct vio_disk_desc *desc;
+ unsigned int map_perm;
+ int nsg, err, i;
+ u64 len;
+ u8 op;
+
+ map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
+
+ if (rq_data_dir(req) == READ) {
+ map_perm |= LDC_MAP_W;
+ op = VD_OP_BREAD;
+ } else {
+ map_perm |= LDC_MAP_R;
+ op = VD_OP_BWRITE;
+ }
+
+ nsg = blk_rq_map_sg(req->q, req, sg);
+
+ len = 0;
+ for (i = 0; i < nsg; i++)
+ len += sg[i].length;
+
+ if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
+ blk_stop_queue(port->disk->queue);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ desc = vio_dring_cur(dr);
+
+ err = ldc_map_sg(port->vio.lp, sg, nsg,
+ desc->cookies, port->ring_cookies,
+ map_perm);
+ if (err < 0) {
+ printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
+ return err;
+ }
+
+ rqe = &port->rq_arr[dr->prod];
+ rqe->req = req;
+
+ desc->hdr.ack = VIO_ACK_ENABLE;
+ desc->req_id = port->req_id;
+ desc->operation = op;
+ if (port->vdisk_type == VD_DISK_TYPE_DISK) {
+ desc->slice = 2;
+ } else {
+ desc->slice = 0;
+ }
+ desc->status = ~0;
+ desc->offset = (req->sector << 9) / port->vdisk_block_size;
+ desc->size = len;
+ desc->ncookies = err;
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ wmb();
+ desc->hdr.state = VIO_DESC_READY;
+
+ err = __vdc_tx_trigger(port);
+ if (err < 0) {
+ printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
+ } else {
+ port->req_id++;
+ dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ }
+out:
+
+ return err;
+}
+
+static void do_vdc_request(request_queue_t *q)
+{
+ while (1) {
+ struct request *req = elv_next_request(q);
+
+ if (!req)
+ break;
+
+ blkdev_dequeue_request(req);
+ if (__send_request(req) < 0)
+ vdc_end_request(req, 0, req->hard_nr_sectors);
+ }
+}
+
+static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+{
+ struct vio_dring_state *dr;
+ struct vio_completion comp;
+ struct vio_disk_desc *desc;
+ unsigned int map_perm;
+ unsigned long flags;
+ int op_len, err;
+ void *req_buf;
+
+ if (!(((u64)1 << ((u64)op - 1)) & port->operations))
+ return -EOPNOTSUPP;
+
+ switch (op) {
+ case VD_OP_BREAD:
+ case VD_OP_BWRITE:
+ default:
+ return -EINVAL;
+
+ case VD_OP_FLUSH:
+ op_len = 0;
+ map_perm = 0;
+ break;
+
+ case VD_OP_GET_WCE:
+ op_len = sizeof(u32);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_SET_WCE:
+ op_len = sizeof(u32);
+ map_perm = LDC_MAP_R;
+ break;
+
+ case VD_OP_GET_VTOC:
+ op_len = sizeof(struct vio_disk_vtoc);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_SET_VTOC:
+ op_len = sizeof(struct vio_disk_vtoc);
+ map_perm = LDC_MAP_R;
+ break;
+
+ case VD_OP_GET_DISKGEOM:
+ op_len = sizeof(struct vio_disk_geom);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_SET_DISKGEOM:
+ op_len = sizeof(struct vio_disk_geom);
+ map_perm = LDC_MAP_R;
+ break;
+
+ case VD_OP_SCSICMD:
+ op_len = 16;
+ map_perm = LDC_MAP_RW;
+ break;
+
+ case VD_OP_GET_DEVID:
+ op_len = sizeof(struct vio_disk_devid);
+ map_perm = LDC_MAP_W;
+ break;
+
+ case VD_OP_GET_EFI:
+ case VD_OP_SET_EFI:
+ return -EOPNOTSUPP;
+ break;
+ };
+
+ map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
+
+ op_len = (op_len + 7) & ~7;
+ req_buf = kzalloc(op_len, GFP_KERNEL);
+ if (!req_buf)
+ return -ENOMEM;
+
+ if (len > op_len)
+ len = op_len;
+
+ if (map_perm & LDC_MAP_R)
+ memcpy(req_buf, buf, len);
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ /* XXX If we want to use this code generically we have to
+ * XXX handle TX ring exhaustion etc.
+ */
+ desc = vio_dring_cur(dr);
+
+ err = ldc_map_single(port->vio.lp, req_buf, op_len,
+ desc->cookies, port->ring_cookies,
+ map_perm);
+ if (err < 0) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ kfree(req_buf);
+ return err;
+ }
+
+ init_completion(&comp.com);
+ comp.waiting_for = WAITING_FOR_GEN_CMD;
+ port->vio.cmp = &comp;
+
+ desc->hdr.ack = VIO_ACK_ENABLE;
+ desc->req_id = port->req_id;
+ desc->operation = op;
+ desc->slice = 0;
+ desc->status = ~0;
+ desc->offset = 0;
+ desc->size = op_len;
+ desc->ncookies = err;
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ wmb();
+ desc->hdr.state = VIO_DESC_READY;
+
+ err = __vdc_tx_trigger(port);
+ if (err >= 0) {
+ port->req_id++;
+ dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ wait_for_completion(&comp.com);
+ err = comp.err;
+ } else {
+ port->vio.cmp = NULL;
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ }
+
+ if (map_perm & LDC_MAP_W)
+ memcpy(buf, req_buf, len);
+
+ kfree(req_buf);
+
+ return err;
+}
+
+static int __devinit vdc_alloc_tx_ring(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ unsigned long len, entry_size;
+ int ncookies;
+ void *dring;
+
+ entry_size = sizeof(struct vio_disk_desc) +
+ (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
+ len = (VDC_TX_RING_SIZE * entry_size);
+
+ ncookies = VIO_MAX_RING_COOKIES;
+ dring = ldc_alloc_exp_dring(port->vio.lp, len,
+ dr->cookies, &ncookies,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (IS_ERR(dring))
+ return PTR_ERR(dring);
+
+ dr->base = dring;
+ dr->entry_size = entry_size;
+ dr->num_entries = VDC_TX_RING_SIZE;
+ dr->prod = dr->cons = 0;
+ dr->pending = VDC_TX_RING_SIZE;
+ dr->ncookies = ncookies;
+
+ return 0;
+}
+
+static void vdc_free_tx_ring(struct vdc_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ if (dr->base) {
+ ldc_free_exp_dring(port->vio.lp, dr->base,
+ (dr->entry_size * dr->num_entries),
+ dr->cookies, dr->ncookies);
+ dr->base = NULL;
+ dr->entry_size = 0;
+ dr->num_entries = 0;
+ dr->pending = 0;
+ dr->ncookies = 0;
+ }
+}
+
+static int probe_disk(struct vdc_port *port)
+{
+ struct vio_completion comp;
+ struct request_queue *q;
+ struct gendisk *g;
+ int err;
+
+ init_completion(&comp.com);
+ comp.err = 0;
+ comp.waiting_for = WAITING_FOR_LINK_UP;
+ port->vio.cmp = &comp;
+
+ vio_port_up(&port->vio);
+
+ wait_for_completion(&comp.com);
+ if (comp.err)
+ return comp.err;
+
+ err = generic_request(port, VD_OP_GET_VTOC,
+ &port->label, sizeof(port->label));
+ if (err < 0) {
+ printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
+ return err;
+ }
+
+ err = generic_request(port, VD_OP_GET_DISKGEOM,
+ &port->geom, sizeof(port->geom));
+ if (err < 0) {
+ printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
+ "error %d\n", err);
+ return err;
+ }
+
+ port->vdisk_size = ((u64)port->geom.num_cyl *
+ (u64)port->geom.num_hd *
+ (u64)port->geom.num_sec);
+
+ q = blk_init_queue(do_vdc_request, &port->vio.lock);
+ if (!q) {
+ printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
+ port->vio.name);
+ return -ENOMEM;
+ }
+ g = alloc_disk(1 << PARTITION_SHIFT);
+ if (!g) {
+ printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
+ port->vio.name);
+ blk_cleanup_queue(q);
+ return -ENOMEM;
+ }
+
+ port->disk = g;
+
+ blk_queue_max_hw_segments(q, port->ring_cookies);
+ blk_queue_max_phys_segments(q, port->ring_cookies);
+ blk_queue_max_sectors(q, port->max_xfer_size);
+ g->major = vdc_major;
+ g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
+ strcpy(g->disk_name, port->disk_name);
+
+ g->fops = &vdc_fops;
+ g->queue = q;
+ g->private_data = port;
+ g->driverfs_dev = &port->vio.vdev->dev;
+
+ set_capacity(g, port->vdisk_size);
+
+ printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
+ g->disk_name,
+ port->vdisk_size, (port->vdisk_size >> (20 - 9)));
+
+ add_disk(g);
+
+ return 0;
+}
+
+static struct ldc_channel_config vdc_ldc_cfg = {
+ .event = vdc_event,
+ .mtu = 64,
+ .mode = LDC_MODE_UNRELIABLE,
+};
+
+static struct vio_driver_ops vdc_vio_ops = {
+ .send_attr = vdc_send_attr,
+ .handle_attr = vdc_handle_attr,
+ .handshake_complete = vdc_handshake_complete,
+};
+
+static void print_version(void)
+{
+ static int version_printed;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+static int __devinit vdc_port_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct mdesc_handle *hp;
+ struct vdc_port *port;
+ int err;
+
+ print_version();
+
+ hp = mdesc_grab();
+
+ err = -ENODEV;
+ if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
+ printk(KERN_ERR PFX "Port id [%lu] too large.\n",
+ vdev->dev_no);
+ goto err_out_release_mdesc;
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!port) {
+ printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
+ goto err_out_release_mdesc;
+ }
+
+ if (vdev->dev_no >= 26)
+ snprintf(port->disk_name, sizeof(port->disk_name),
+ VDCBLK_NAME "%c%c",
+ 'a' + ((int)vdev->dev_no / 26) - 1,
+ 'a' + ((int)vdev->dev_no % 26));
+ else
+ snprintf(port->disk_name, sizeof(port->disk_name),
+ VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
+
+ err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
+ vdc_versions, ARRAY_SIZE(vdc_versions),
+ &vdc_vio_ops, port->disk_name);
+ if (err)
+ goto err_out_free_port;
+
+ port->vdisk_block_size = 512;
+ port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
+ port->ring_cookies = ((port->max_xfer_size *
+ port->vdisk_block_size) / PAGE_SIZE) + 2;
+
+ err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
+ if (err)
+ goto err_out_free_port;
+
+ err = vdc_alloc_tx_ring(port);
+ if (err)
+ goto err_out_free_ldc;
+
+ err = probe_disk(port);
+ if (err)
+ goto err_out_free_tx_ring;
+
+ dev_set_drvdata(&vdev->dev, port);
+
+ mdesc_release(hp);
+
+ return 0;
+
+err_out_free_tx_ring:
+ vdc_free_tx_ring(port);
+
+err_out_free_ldc:
+ vio_ldc_free(&port->vio);
+
+err_out_free_port:
+ kfree(port);
+
+err_out_release_mdesc:
+ mdesc_release(hp);
+ return err;
+}
+
+static int vdc_port_remove(struct vio_dev *vdev)
+{
+ struct vdc_port *port = dev_get_drvdata(&vdev->dev);
+
+ if (port) {
+ del_timer_sync(&port->vio.timer);
+
+ vdc_free_tx_ring(port);
+ vio_ldc_free(&port->vio);
+
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
+ }
+ return 0;
+}
+
+static struct vio_device_id vdc_port_match[] = {
+ {
+ .type = "vdc-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(vio, vdc_port_match);
+
+static struct vio_driver vdc_port_driver = {
+ .id_table = vdc_port_match,
+ .probe = vdc_port_probe,
+ .remove = vdc_port_remove,
+ .driver = {
+ .name = "vdc_port",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init vdc_init(void)
+{
+ int err;
+
+ err = register_blkdev(0, VDCBLK_NAME);
+ if (err < 0)
+ goto out_err;
+
+ vdc_major = err;
+
+ err = vio_register_driver(&vdc_port_driver);
+ if (err)
+ goto out_unregister_blkdev;
+
+ return 0;
+
+out_unregister_blkdev:
+ unregister_blkdev(vdc_major, VDCBLK_NAME);
+ vdc_major = 0;
+
+out_err:
+ return err;
+}
+
+static void __exit vdc_exit(void)
+{
+ vio_unregister_driver(&vdc_port_driver);
+ unregister_blkdev(vdc_major, VDCBLK_NAME);
+}
+
+module_init(vdc_init);
+module_exit(vdc_exit);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 54509eb3391b..949ae93499e5 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -1608,7 +1608,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
- host = kmalloc(sizeof(*host), GFP_KERNEL);
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host) {
printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
pci_name(pdev));
@@ -1616,7 +1616,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_regions;
}
- memset(host, 0, sizeof(*host));
host->pdev = pdev;
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 746a118a9b52..8b13d7d2cb63 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1547,10 +1547,8 @@ static void ub_reset_enter(struct ub_dev *sc, int try)
#endif
#if 0 /* We let them stop themselves. */
- struct list_head *p;
struct ub_lun *lun;
- list_for_each(p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
blk_stop_queue(lun->disk->queue);
}
#endif
@@ -1562,7 +1560,6 @@ static void ub_reset_task(struct work_struct *work)
{
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
- struct list_head *p;
struct ub_lun *lun;
int lkr, rc;
@@ -1608,8 +1605,7 @@ static void ub_reset_task(struct work_struct *work)
spin_lock_irqsave(sc->lock, flags);
sc->reset = 0;
tasklet_schedule(&sc->tasklet);
- list_for_each(p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
blk_start_queue(lun->disk->queue);
}
wake_up(&sc->reset_wait);
@@ -1713,7 +1709,7 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp,
struct gendisk *disk = inode->i_bdev->bd_disk;
void __user *usermem = (void __user *) arg;
- return scsi_cmd_ioctl(filp, disk, cmd, usermem);
+ return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem);
}
/*
@@ -2348,7 +2344,6 @@ err_alloc:
static void ub_disconnect(struct usb_interface *intf)
{
struct ub_dev *sc = usb_get_intfdata(intf);
- struct list_head *p;
struct ub_lun *lun;
unsigned long flags;
@@ -2403,8 +2398,7 @@ static void ub_disconnect(struct usb_interface *intf)
/*
* Unregister the upper layer.
*/
- list_for_each (p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
+ list_for_each_entry(lun, &sc->luns, link) {
del_gendisk(lun->disk);
/*
* I wish I could do:
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 6f5d6203d725..dec74bd23496 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -105,12 +105,6 @@ struct cardinfo {
unsigned long csr_base;
unsigned char __iomem *csr_remap;
unsigned long csr_len;
-#ifdef CONFIG_MM_MAP_MEMORY
- unsigned long mem_base;
- unsigned char __iomem *mem_remap;
- unsigned long mem_len;
-#endif
-
unsigned int win_size; /* PCI window size */
unsigned int mm_size; /* size in kbytes */
@@ -872,10 +866,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
card->csr_base = pci_resource_start(dev, 0);
card->csr_len = pci_resource_len(dev, 0);
-#ifdef CONFIG_MM_MAP_MEMORY
- card->mem_base = pci_resource_start(dev, 1);
- card->mem_len = pci_resource_len(dev, 1);
-#endif
printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n",
card->card_number, dev->bus->number, dev->devfn);
@@ -903,27 +893,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number,
card->csr_base, card->csr_remap, card->csr_len);
-#ifdef CONFIG_MM_MAP_MEMORY
- if (!request_mem_region(card->mem_base, card->mem_len, "Micro Memory")) {
- printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
- ret = -ENOMEM;
-
- goto failed_req_mem;
- }
-
- if (!(card->mem_remap = ioremap(card->mem_base, cards->mem_len))) {
- printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
- ret = -ENOMEM;
-
- goto failed_remap_mem;
- }
-
- printk(KERN_INFO "MM%d: MEM 0x%8lx -> 0x%8lx (0x%lx)\n", card->card_number,
- card->mem_base, card->mem_remap, card->mem_len);
-#else
- printk(KERN_INFO "MM%d: MEM area not remapped (CONFIG_MM_MAP_MEMORY not set)\n",
- card->card_number);
-#endif
switch(card->dev->device) {
case 0x5415:
card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG;
@@ -1091,12 +1060,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
failed_magic:
-#ifdef CONFIG_MM_MAP_MEMORY
- iounmap(card->mem_remap);
- failed_remap_mem:
- release_mem_region(card->mem_base, card->mem_len);
- failed_req_mem:
-#endif
iounmap(card->csr_remap);
failed_remap_csr:
release_mem_region(card->csr_base, card->csr_len);
@@ -1116,10 +1079,6 @@ static void mm_pci_remove(struct pci_dev *dev)
tasklet_kill(&card->tasklet);
iounmap(card->csr_remap);
release_mem_region(card->csr_base, card->csr_len);
-#ifdef CONFIG_MM_MAP_MEMORY
- iounmap(card->mem_remap);
- release_mem_region(card->mem_base, card->mem_len);
-#endif
free_irq(card->irq, card);
if (card->mm_pages[0].desc)
@@ -1133,23 +1092,18 @@ static void mm_pci_remove(struct pci_dev *dev)
blk_cleanup_queue(card->queue);
}
-static const struct pci_device_id mm_pci_ids[] = { {
- .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
- .device = PCI_DEVICE_ID_MICRO_MEMORY_5415CN,
- }, {
- .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
- .device = PCI_DEVICE_ID_MICRO_MEMORY_5425CN,
- }, {
- .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
- .device = PCI_DEVICE_ID_MICRO_MEMORY_6155,
- }, {
+static const struct pci_device_id mm_pci_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_5415CN)},
+ {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_5425CN)},
+ {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY,PCI_DEVICE_ID_MICRO_MEMORY_6155)},
+ {
.vendor = 0x8086,
.device = 0xB555,
.subvendor= 0x1332,
.subdevice= 0x5460,
.class = 0x050000,
.class_mask= 0,
- }, { /* end: all zeroes */ }
+ }, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, mm_pci_ids);
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 68592c336011..dae39911a11d 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -252,10 +252,10 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct gendisk *disk = bdev->bd_disk;
struct viodasd_device *d = disk->private_data;
- geo->sectors = d->sectors ? d->sectors : 0;
+ geo->sectors = d->sectors ? d->sectors : 32;
geo->heads = d->tracks ? d->tracks : 64;
geo->cylinders = d->cylinders ? d->cylinders :
- get_capacity(disk) / (geo->cylinders * geo->heads);
+ get_capacity(disk) / (geo->sectors * geo->heads);
return 0;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
new file mode 100644
index 000000000000..6746c29181f8
--- /dev/null
+++ b/drivers/block/xen-blkfront.c
@@ -0,0 +1,988 @@
+/*
+ * blkfront.c
+ *
+ * XenLinux virtual block device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * Copyright (c) 2004, Andrew Warfield
+ * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/blkif.h>
+
+#include <asm/xen/hypervisor.h>
+
+enum blkif_state {
+ BLKIF_STATE_DISCONNECTED,
+ BLKIF_STATE_CONNECTED,
+ BLKIF_STATE_SUSPENDED,
+};
+
+struct blk_shadow {
+ struct blkif_request req;
+ unsigned long request;
+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+static struct block_device_operations xlvbd_block_fops;
+
+#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'. They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct blkfront_info
+{
+ struct xenbus_device *xbdev;
+ dev_t dev;
+ struct gendisk *gd;
+ int vdevice;
+ blkif_vdev_t handle;
+ enum blkif_state connected;
+ int ring_ref;
+ struct blkif_front_ring ring;
+ unsigned int evtchn, irq;
+ struct request_queue *rq;
+ struct work_struct work;
+ struct gnttab_free_callback callback;
+ struct blk_shadow shadow[BLK_RING_SIZE];
+ unsigned long shadow_free;
+ int feature_barrier;
+
+ /**
+ * The number of people holding this device open. We won't allow a
+ * hot-unplug unless this is 0.
+ */
+ int users;
+};
+
+static DEFINE_SPINLOCK(blkif_io_lock);
+
+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
+#define GRANT_INVALID_REF 0
+
+#define PARTS_PER_DISK 16
+
+#define BLKIF_MAJOR(dev) ((dev)>>8)
+#define BLKIF_MINOR(dev) ((dev) & 0xff)
+
+#define DEV_NAME "xvd" /* name in /dev */
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static LIST_HEAD(vbds_list);
+
+static int get_id_from_freelist(struct blkfront_info *info)
+{
+ unsigned long free = info->shadow_free;
+ BUG_ON(free > BLK_RING_SIZE);
+ info->shadow_free = info->shadow[free].req.id;
+ info->shadow[free].req.id = 0x0fffffee; /* debug */
+ return free;
+}
+
+static void add_id_to_freelist(struct blkfront_info *info,
+ unsigned long id)
+{
+ info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].request = 0;
+ info->shadow_free = id;
+}
+
+static void blkif_restart_queue_callback(void *arg)
+{
+ struct blkfront_info *info = (struct blkfront_info *)arg;
+ schedule_work(&info->work);
+}
+
+/*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+ struct blkfront_info *info = req->rq_disk->private_data;
+ unsigned long buffer_mfn;
+ struct blkif_request *ring_req;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int idx;
+ unsigned long id;
+ unsigned int fsect, lsect;
+ int ref;
+ grant_ref_t gref_head;
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+ return 1;
+
+ if (gnttab_alloc_grant_references(
+ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
+ gnttab_request_free_callback(
+ &info->callback,
+ blkif_restart_queue_callback,
+ info,
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ return 1;
+ }
+
+ /* Fill out a communications ring structure. */
+ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ id = get_id_from_freelist(info);
+ info->shadow[id].request = (unsigned long)req;
+
+ ring_req->id = id;
+ ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->handle = info->handle;
+
+ ring_req->operation = rq_data_dir(req) ?
+ BLKIF_OP_WRITE : BLKIF_OP_READ;
+ if (blk_barrier_rq(req))
+ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+
+ ring_req->nr_segments = 0;
+ rq_for_each_bio (bio, req) {
+ bio_for_each_segment (bvec, bio, idx) {
+ BUG_ON(ring_req->nr_segments
+ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ info->xbdev->otherend_id,
+ buffer_mfn,
+ rq_data_dir(req) );
+
+ info->shadow[id].frame[ring_req->nr_segments] =
+ mfn_to_pfn(buffer_mfn);
+
+ ring_req->seg[ring_req->nr_segments] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+
+ ring_req->nr_segments++;
+ }
+ }
+
+ info->ring.req_prod_pvt++;
+
+ /* Keep a private copy so we can reissue requests when recovering. */
+ info->shadow[id].req = *ring_req;
+
+ gnttab_free_grant_references(gref_head);
+
+ return 0;
+}
+
+
+static inline void flush_requests(struct blkfront_info *info)
+{
+ int notify;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
+
+ if (notify)
+ notify_remote_via_irq(info->irq);
+}
+
+/*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+static void do_blkif_request(request_queue_t *rq)
+{
+ struct blkfront_info *info = NULL;
+ struct request *req;
+ int queued;
+
+ pr_debug("Entered do_blkif_request\n");
+
+ queued = 0;
+
+ while ((req = elv_next_request(rq)) != NULL) {
+ info = req->rq_disk->private_data;
+ if (!blk_fs_request(req)) {
+ end_request(req, 0);
+ continue;
+ }
+
+ if (RING_FULL(&info->ring))
+ goto wait;
+
+ pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+ "(%u/%li) buffer:%p [%s]\n",
+ req, req->cmd, (unsigned long)req->sector,
+ req->current_nr_sectors,
+ req->nr_sectors, req->buffer,
+ rq_data_dir(req) ? "write" : "read");
+
+
+ blkdev_dequeue_request(req);
+ if (blkif_queue_request(req)) {
+ blk_requeue_request(rq, req);
+wait:
+ /* Avoid pointless unplugs. */
+ blk_stop_queue(rq);
+ break;
+ }
+
+ queued++;
+ }
+
+ if (queued != 0)
+ flush_requests(info);
+}
+
+static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+{
+ request_queue_t *rq;
+
+ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ if (rq == NULL)
+ return -1;
+
+ elevator_init(rq, "noop");
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_hardsect_size(rq, sector_size);
+ blk_queue_max_sectors(rq, 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ gd->queue = rq;
+
+ return 0;
+}
+
+
+static int xlvbd_barrier(struct blkfront_info *info)
+{
+ int err;
+
+ err = blk_queue_ordered(info->rq,
+ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
+ NULL);
+
+ if (err)
+ return err;
+
+ printk(KERN_INFO "blkfront: %s: barriers %s\n",
+ info->gd->disk_name,
+ info->feature_barrier ? "enabled" : "disabled");
+ return 0;
+}
+
+
+static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
+ int vdevice, u16 vdisk_info, u16 sector_size,
+ struct blkfront_info *info)
+{
+ struct gendisk *gd;
+ int nr_minors = 1;
+ int err = -ENODEV;
+
+ BUG_ON(info->gd != NULL);
+ BUG_ON(info->rq != NULL);
+
+ if ((minor % PARTS_PER_DISK) == 0)
+ nr_minors = PARTS_PER_DISK;
+
+ gd = alloc_disk(nr_minors);
+ if (gd == NULL)
+ goto out;
+
+ if (nr_minors > 1)
+ sprintf(gd->disk_name, "%s%c", DEV_NAME,
+ 'a' + minor / PARTS_PER_DISK);
+ else
+ sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
+ 'a' + minor / PARTS_PER_DISK,
+ minor % PARTS_PER_DISK);
+
+ gd->major = XENVBD_MAJOR;
+ gd->first_minor = minor;
+ gd->fops = &xlvbd_block_fops;
+ gd->private_data = info;
+ gd->driverfs_dev = &(info->xbdev->dev);
+ set_capacity(gd, capacity);
+
+ if (xlvbd_init_blk_queue(gd, sector_size)) {
+ del_gendisk(gd);
+ goto out;
+ }
+
+ info->rq = gd->queue;
+ info->gd = gd;
+
+ if (info->feature_barrier)
+ xlvbd_barrier(info);
+
+ if (vdisk_info & VDISK_READONLY)
+ set_disk_ro(gd, 1);
+
+ if (vdisk_info & VDISK_REMOVABLE)
+ gd->flags |= GENHD_FL_REMOVABLE;
+
+ if (vdisk_info & VDISK_CDROM)
+ gd->flags |= GENHD_FL_CD;
+
+ return 0;
+
+ out:
+ return err;
+}
+
+static void kick_pending_request_queues(struct blkfront_info *info)
+{
+ if (!RING_FULL(&info->ring)) {
+ /* Re-enable calldowns. */
+ blk_start_queue(info->rq);
+ /* Kick things off immediately. */
+ do_blkif_request(info->rq);
+ }
+}
+
+static void blkif_restart_queue(struct work_struct *work)
+{
+ struct blkfront_info *info = container_of(work, struct blkfront_info, work);
+
+ spin_lock_irq(&blkif_io_lock);
+ if (info->connected == BLKIF_STATE_CONNECTED)
+ kick_pending_request_queues(info);
+ spin_unlock_irq(&blkif_io_lock);
+}
+
+static void blkif_free(struct blkfront_info *info, int suspend)
+{
+ /* Prevent new requests being issued until we fix things up. */
+ spin_lock_irq(&blkif_io_lock);
+ info->connected = suspend ?
+ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
+ /* No more blkif_request(). */
+ if (info->rq)
+ blk_stop_queue(info->rq);
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irq(&blkif_io_lock);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ /* Free resources associated with old device channel. */
+ if (info->ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ info->ring_ref = GRANT_INVALID_REF;
+ info->ring.sring = NULL;
+ }
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, info);
+ info->evtchn = info->irq = 0;
+
+}
+
+static void blkif_completion(struct blk_shadow *s)
+{
+ int i;
+ for (i = 0; i < s->req.nr_segments; i++)
+ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+}
+
+static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+{
+ struct request *req;
+ struct blkif_response *bret;
+ RING_IDX i, rp;
+ unsigned long flags;
+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int uptodate;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ again:
+ rp = info->ring.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for (i = info->ring.rsp_cons; i != rp; i++) {
+ unsigned long id;
+ int ret;
+
+ bret = RING_GET_RESPONSE(&info->ring, i);
+ id = bret->id;
+ req = (struct request *)info->shadow[id].request;
+
+ blkif_completion(&info->shadow[id]);
+
+ add_id_to_freelist(info, id);
+
+ uptodate = (bret->status == BLKIF_RSP_OKAY);
+ switch (bret->operation) {
+ case BLKIF_OP_WRITE_BARRIER:
+ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
+ info->gd->disk_name);
+ uptodate = -EOPNOTSUPP;
+ info->feature_barrier = 0;
+ xlvbd_barrier(info);
+ }
+ /* fall through */
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if (unlikely(bret->status != BLKIF_RSP_OKAY))
+ dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
+ "request: %x\n", bret->status);
+
+ ret = end_that_request_first(req, uptodate,
+ req->hard_nr_sectors);
+ BUG_ON(ret);
+ end_that_request_last(req, uptodate);
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ info->ring.rsp_cons = i;
+
+ if (i != info->ring.req_prod_pvt) {
+ int more_to_do;
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+ if (more_to_do)
+ goto again;
+ } else
+ info->ring.sring->rsp_event = i + 1;
+
+ kick_pending_request_queues(info);
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int setup_blkring(struct xenbus_device *dev,
+ struct blkfront_info *info)
+{
+ struct blkif_sring *sring;
+ int err;
+
+ info->ring_ref = GRANT_INVALID_REF;
+
+ sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
+ if (!sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+ return -ENOMEM;
+ }
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
+ if (err < 0) {
+ free_page((unsigned long)sring);
+ info->ring.sring = NULL;
+ goto fail;
+ }
+ info->ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err)
+ goto fail;
+
+ err = bind_evtchn_to_irqhandler(info->evtchn,
+ blkif_interrupt,
+ IRQF_SAMPLE_RANDOM, "blkif", info);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err,
+ "bind_evtchn_to_irqhandler failed");
+ goto fail;
+ }
+ info->irq = err;
+
+ return 0;
+fail:
+ blkif_free(info, 0);
+ return err;
+}
+
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+ struct blkfront_info *info)
+{
+ const char *message = NULL;
+ struct xenbus_transaction xbt;
+ int err;
+
+ /* Create shared ring, alloc event channel. */
+ err = setup_blkring(dev, info);
+ if (err)
+ goto out;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_blkring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename,
+ "ring-ref", "%u", info->ring_ref);
+ if (err) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "event-channel", "%u", info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_blkring;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_blkring:
+ blkif_free(info, 0);
+ out:
+ return err;
+}
+
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and the ring buffer for communication with the backend, and
+ * inform the backend of the appropriate details for those. Switch to
+ * Initialised state.
+ */
+static int blkfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err, vdevice, i;
+ struct blkfront_info *info;
+
+ /* FIXME: Use dynamic device id if this is not set. */
+ err = xenbus_scanf(XBT_NIL, dev->nodename,
+ "virtual-device", "%i", &vdevice);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading virtual-device");
+ return err;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+ return -ENOMEM;
+ }
+
+ info->xbdev = dev;
+ info->vdevice = vdevice;
+ info->connected = BLKIF_STATE_DISCONNECTED;
+ INIT_WORK(&info->work, blkif_restart_queue);
+
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Front end dir is a number, which is used as the id. */
+ info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
+ dev->dev.driver_data = info;
+
+ err = talk_to_backend(dev, info);
+ if (err) {
+ kfree(info);
+ dev->dev.driver_data = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+
+static int blkif_recover(struct blkfront_info *info)
+{
+ int i;
+ struct blkif_request *req;
+ struct blk_shadow *copy;
+ int j;
+
+ /* Stage 1: Make a safe copy of the shadow state. */
+ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ memcpy(copy, info->shadow, sizeof(info->shadow));
+
+ /* Stage 2: Set up free list. */
+ memset(&info->shadow, 0, sizeof(info->shadow));
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+ info->shadow_free = info->ring.req_prod_pvt;
+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Stage 3: Find pending requests and requeue them. */
+ for (i = 0; i < BLK_RING_SIZE; i++) {
+ /* Not in use? */
+ if (copy[i].request == 0)
+ continue;
+
+ /* Grab a request slot and copy shadow state into it. */
+ req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ *req = copy[i].req;
+
+ /* We get a new request id, and must reset the shadow state. */
+ req->id = get_id_from_freelist(info);
+ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+
+ /* Rewrite any grant references invalidated by susp/resume. */
+ for (j = 0; j < req->nr_segments; j++)
+ gnttab_grant_foreign_access_ref(
+ req->seg[j].gref,
+ info->xbdev->otherend_id,
+ pfn_to_mfn(info->shadow[req->id].frame[j]),
+ rq_data_dir(
+ (struct request *)
+ info->shadow[req->id].request));
+ info->shadow[req->id].req = *req;
+
+ info->ring.req_prod_pvt++;
+ }
+
+ kfree(copy);
+
+ xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+ spin_lock_irq(&blkif_io_lock);
+
+ /* Now safe for us to use the shared ring */
+ info->connected = BLKIF_STATE_CONNECTED;
+
+ /* Send off requeued requests */
+ flush_requests(info);
+
+ /* Kick any other new requests queued since we resumed */
+ kick_pending_request_queues(info);
+
+ spin_unlock_irq(&blkif_io_lock);
+
+ return 0;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart. We tear down our blkif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int blkfront_resume(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ int err;
+
+ dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+
+ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
+
+ err = talk_to_backend(dev, info);
+ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
+ err = blkif_recover(info);
+
+ return err;
+}
+
+
+/*
+ * Invoked when the backend is finally 'ready' (and has told produced
+ * the details about the physical device - #sectors, size, etc).
+ */
+static void blkfront_connect(struct blkfront_info *info)
+{
+ unsigned long long sectors;
+ unsigned long sector_size;
+ unsigned int binfo;
+ int err;
+
+ if ((info->connected == BLKIF_STATE_CONNECTED) ||
+ (info->connected == BLKIF_STATE_SUSPENDED) )
+ return;
+
+ dev_dbg(&info->xbdev->dev, "%s:%s.\n",
+ __func__, info->xbdev->otherend);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "sectors", "%llu", &sectors,
+ "info", "%u", &binfo,
+ "sector-size", "%lu", &sector_size,
+ NULL);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err,
+ "reading backend fields at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%lu", &info->feature_barrier,
+ NULL);
+ if (err)
+ info->feature_barrier = 0;
+
+ err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
+ sectors, info->vdevice,
+ binfo, sector_size, info);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+ /* Kick pending requests. */
+ spin_lock_irq(&blkif_io_lock);
+ info->connected = BLKIF_STATE_CONNECTED;
+ kick_pending_request_queues(info);
+ spin_unlock_irq(&blkif_io_lock);
+
+ add_disk(info->gd);
+}
+
+/**
+ * Handle the change of state of the backend to Closing. We must delete our
+ * device-layer structures now, to ensure that writes are flushed through to
+ * the backend. Once is this done, we can switch to Closed in
+ * acknowledgement.
+ */
+static void blkfront_closing(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ unsigned long flags;
+
+ dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
+
+ if (info->rq == NULL)
+ goto out;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ del_gendisk(info->gd);
+
+ /* No more blkif_request(). */
+ blk_stop_queue(info->rq);
+
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ blk_cleanup_queue(info->rq);
+ info->rq = NULL;
+
+ out:
+ xenbus_frontend_closed(dev);
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ struct block_device *bd;
+
+ dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
+
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateConnected:
+ blkfront_connect(info);
+ break;
+
+ case XenbusStateClosing:
+ bd = bdget(info->dev);
+ if (bd == NULL)
+ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
+
+ mutex_lock(&bd->bd_mutex);
+ if (info->users > 0)
+ xenbus_dev_error(dev, -EBUSY,
+ "Device in use; refusing to close");
+ else
+ blkfront_closing(dev);
+ mutex_unlock(&bd->bd_mutex);
+ bdput(bd);
+ break;
+ }
+}
+
+static int blkfront_remove(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
+
+ blkif_free(info, 0);
+
+ kfree(info);
+
+ return 0;
+}
+
+static int blkif_open(struct inode *inode, struct file *filep)
+{
+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ info->users++;
+ return 0;
+}
+
+static int blkif_release(struct inode *inode, struct file *filep)
+{
+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ info->users--;
+ if (info->users == 0) {
+ /* Check whether we have been instructed to close. We will
+ have ignored this request initially, as the device was
+ still mounted. */
+ struct xenbus_device *dev = info->xbdev;
+ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
+
+ if (state == XenbusStateClosing)
+ blkfront_closing(dev);
+ }
+ return 0;
+}
+
+static struct block_device_operations xlvbd_block_fops =
+{
+ .owner = THIS_MODULE,
+ .open = blkif_open,
+ .release = blkif_release,
+};
+
+
+static struct xenbus_device_id blkfront_ids[] = {
+ { "vbd" },
+ { "" }
+};
+
+static struct xenbus_driver blkfront = {
+ .name = "vbd",
+ .owner = THIS_MODULE,
+ .ids = blkfront_ids,
+ .probe = blkfront_probe,
+ .remove = blkfront_remove,
+ .resume = blkfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init xlblk_init(void)
+{
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+ printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
+ XENVBD_MAJOR, DEV_NAME);
+ return -ENODEV;
+ }
+
+ return xenbus_register_frontend(&blkfront);
+}
+module_init(xlblk_init);
+
+
+static void xlblk_exit(void)
+{
+ return xenbus_unregister_driver(&blkfront);
+}
+module_exit(xlblk_exit);
+
+MODULE_DESCRIPTION("Xen virtual block device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
new file mode 100644
index 000000000000..732ec63b6e9c
--- /dev/null
+++ b/drivers/block/xsysace.c
@@ -0,0 +1,1164 @@
+/*
+ * Xilinx SystemACE device driver
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+/*
+ * The SystemACE chip is designed to configure FPGAs by loading an FPGA
+ * bitstream from a file on a CF card and squirting it into FPGAs connected
+ * to the SystemACE JTAG chain. It also has the advantage of providing an
+ * MPU interface which can be used to control the FPGA configuration process
+ * and to use the attached CF card for general purpose storage.
+ *
+ * This driver is a block device driver for the SystemACE.
+ *
+ * Initialization:
+ * The driver registers itself as a platform_device driver at module
+ * load time. The platform bus will take care of calling the
+ * ace_probe() method for all SystemACE instances in the system. Any
+ * number of SystemACE instances are supported. ace_probe() calls
+ * ace_setup() which initialized all data structures, reads the CF
+ * id structure and registers the device.
+ *
+ * Processing:
+ * Just about all of the heavy lifting in this driver is performed by
+ * a Finite State Machine (FSM). The driver needs to wait on a number
+ * of events; some raised by interrupts, some which need to be polled
+ * for. Describing all of the behaviour in a FSM seems to be the
+ * easiest way to keep the complexity low and make it easy to
+ * understand what the driver is doing. If the block ops or the
+ * request function need to interact with the hardware, then they
+ * simply need to flag the request and kick of FSM processing.
+ *
+ * The FSM itself is atomic-safe code which can be run from any
+ * context. The general process flow is:
+ * 1. obtain the ace->lock spinlock.
+ * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
+ * cleared.
+ * 3. release the lock.
+ *
+ * Individual states do not sleep in any way. If a condition needs to
+ * be waited for then the state much clear the fsm_continue flag and
+ * either schedule the FSM to be run again at a later time, or expect
+ * an interrupt to call the FSM when the desired condition is met.
+ *
+ * In normal operation, the FSM is processed at interrupt context
+ * either when the driver's tasklet is scheduled, or when an irq is
+ * raised by the hardware. The tasklet can be scheduled at any time.
+ * The request method in particular schedules the tasklet when a new
+ * request has been indicated by the block layer. Once started, the
+ * FSM proceeds as far as it can processing the request until it
+ * needs on a hardware event. At this point, it must yield execution.
+ *
+ * A state has two options when yielding execution:
+ * 1. ace_fsm_yield()
+ * - Call if need to poll for event.
+ * - clears the fsm_continue flag to exit the processing loop
+ * - reschedules the tasklet to run again as soon as possible
+ * 2. ace_fsm_yieldirq()
+ * - Call if an irq is expected from the HW
+ * - clears the fsm_continue flag to exit the processing loop
+ * - does not reschedule the tasklet so the FSM will not be processed
+ * again until an irq is received.
+ * After calling a yield function, the state must return control back
+ * to the FSM main loop.
+ *
+ * Additionally, the driver maintains a kernel timer which can process
+ * the FSM. If the FSM gets stalled, typically due to a missed
+ * interrupt, then the kernel timer will expire and the driver can
+ * continue where it left off.
+ *
+ * To Do:
+ * - Add FPGA configuration control interface.
+ * - Request major number from lanana
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/platform_device.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("Xilinx SystemACE device driver");
+MODULE_LICENSE("GPL");
+
+/* SystemACE register definitions */
+#define ACE_BUSMODE (0x00)
+
+#define ACE_STATUS (0x04)
+#define ACE_STATUS_CFGLOCK (0x00000001)
+#define ACE_STATUS_MPULOCK (0x00000002)
+#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
+#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
+#define ACE_STATUS_CFDETECT (0x00000010)
+#define ACE_STATUS_DATABUFRDY (0x00000020)
+#define ACE_STATUS_DATABUFMODE (0x00000040)
+#define ACE_STATUS_CFGDONE (0x00000080)
+#define ACE_STATUS_RDYFORCFCMD (0x00000100)
+#define ACE_STATUS_CFGMODEPIN (0x00000200)
+#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
+#define ACE_STATUS_CFBSY (0x00020000)
+#define ACE_STATUS_CFRDY (0x00040000)
+#define ACE_STATUS_CFDWF (0x00080000)
+#define ACE_STATUS_CFDSC (0x00100000)
+#define ACE_STATUS_CFDRQ (0x00200000)
+#define ACE_STATUS_CFCORR (0x00400000)
+#define ACE_STATUS_CFERR (0x00800000)
+
+#define ACE_ERROR (0x08)
+#define ACE_CFGLBA (0x0c)
+#define ACE_MPULBA (0x10)
+
+#define ACE_SECCNTCMD (0x14)
+#define ACE_SECCNTCMD_RESET (0x0100)
+#define ACE_SECCNTCMD_IDENTIFY (0x0200)
+#define ACE_SECCNTCMD_READ_DATA (0x0300)
+#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
+#define ACE_SECCNTCMD_ABORT (0x0600)
+
+#define ACE_VERSION (0x16)
+#define ACE_VERSION_REVISION_MASK (0x00FF)
+#define ACE_VERSION_MINOR_MASK (0x0F00)
+#define ACE_VERSION_MAJOR_MASK (0xF000)
+
+#define ACE_CTRL (0x18)
+#define ACE_CTRL_FORCELOCKREQ (0x0001)
+#define ACE_CTRL_LOCKREQ (0x0002)
+#define ACE_CTRL_FORCECFGADDR (0x0004)
+#define ACE_CTRL_FORCECFGMODE (0x0008)
+#define ACE_CTRL_CFGMODE (0x0010)
+#define ACE_CTRL_CFGSTART (0x0020)
+#define ACE_CTRL_CFGSEL (0x0040)
+#define ACE_CTRL_CFGRESET (0x0080)
+#define ACE_CTRL_DATABUFRDYIRQ (0x0100)
+#define ACE_CTRL_ERRORIRQ (0x0200)
+#define ACE_CTRL_CFGDONEIRQ (0x0400)
+#define ACE_CTRL_RESETIRQ (0x0800)
+#define ACE_CTRL_CFGPROG (0x1000)
+#define ACE_CTRL_CFGADDR_MASK (0xe000)
+
+#define ACE_FATSTAT (0x1c)
+
+#define ACE_NUM_MINORS 16
+#define ACE_SECTOR_SIZE (512)
+#define ACE_FIFO_SIZE (32)
+#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
+
+struct ace_reg_ops;
+
+struct ace_device {
+ /* driver state data */
+ int id;
+ int media_change;
+ int users;
+ struct list_head list;
+
+ /* finite state machine data */
+ struct tasklet_struct fsm_tasklet;
+ uint fsm_task; /* Current activity (ACE_TASK_*) */
+ uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
+ uint fsm_continue_flag; /* cleared to exit FSM mainloop */
+ uint fsm_iter_num;
+ struct timer_list stall_timer;
+
+ /* Transfer state/result, use for both id and block request */
+ struct request *req; /* request being processed */
+ void *data_ptr; /* pointer to I/O buffer */
+ int data_count; /* number of buffers remaining */
+ int data_result; /* Result of transfer; 0 := success */
+
+ int id_req_count; /* count of id requests */
+ int id_result;
+ struct completion id_completion; /* used when id req finishes */
+ int in_irq;
+
+ /* Details of hardware device */
+ unsigned long physaddr;
+ void *baseaddr;
+ int irq;
+ int bus_width; /* 0 := 8 bit; 1 := 16 bit */
+ struct ace_reg_ops *reg_ops;
+ int lock_count;
+
+ /* Block device data structures */
+ spinlock_t lock;
+ struct device *dev;
+ struct request_queue *queue;
+ struct gendisk *gd;
+
+ /* Inserted CF card parameters */
+ struct hd_driveid cf_id;
+};
+
+static int ace_major;
+
+/* ---------------------------------------------------------------------
+ * Low level register access
+ */
+
+struct ace_reg_ops {
+ u16(*in) (struct ace_device * ace, int reg);
+ void (*out) (struct ace_device * ace, int reg, u16 val);
+ void (*datain) (struct ace_device * ace);
+ void (*dataout) (struct ace_device * ace);
+};
+
+/* 8 Bit bus width */
+static u16 ace_in_8(struct ace_device *ace, int reg)
+{
+ void *r = ace->baseaddr + reg;
+ return in_8(r) | (in_8(r + 1) << 8);
+}
+
+static void ace_out_8(struct ace_device *ace, int reg, u16 val)
+{
+ void *r = ace->baseaddr + reg;
+ out_8(r, val);
+ out_8(r + 1, val >> 8);
+}
+
+static void ace_datain_8(struct ace_device *ace)
+{
+ void *r = ace->baseaddr + 0x40;
+ u8 *dst = ace->data_ptr;
+ int i = ACE_FIFO_SIZE;
+ while (i--)
+ *dst++ = in_8(r++);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_8(struct ace_device *ace)
+{
+ void *r = ace->baseaddr + 0x40;
+ u8 *src = ace->data_ptr;
+ int i = ACE_FIFO_SIZE;
+ while (i--)
+ out_8(r++, *src++);
+ ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_8_ops = {
+ .in = ace_in_8,
+ .out = ace_out_8,
+ .datain = ace_datain_8,
+ .dataout = ace_dataout_8,
+};
+
+/* 16 bit big endian bus attachment */
+static u16 ace_in_be16(struct ace_device *ace, int reg)
+{
+ return in_be16(ace->baseaddr + reg);
+}
+
+static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
+{
+ out_be16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_be16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *dst = ace->data_ptr;
+ while (i--)
+ *dst++ = in_le16(ace->baseaddr + 0x40);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_be16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *src = ace->data_ptr;
+ while (i--)
+ out_le16(ace->baseaddr + 0x40, *src++);
+ ace->data_ptr = src;
+}
+
+/* 16 bit little endian bus attachment */
+static u16 ace_in_le16(struct ace_device *ace, int reg)
+{
+ return in_le16(ace->baseaddr + reg);
+}
+
+static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
+{
+ out_le16(ace->baseaddr + reg, val);
+}
+
+static void ace_datain_le16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *dst = ace->data_ptr;
+ while (i--)
+ *dst++ = in_be16(ace->baseaddr + 0x40);
+ ace->data_ptr = dst;
+}
+
+static void ace_dataout_le16(struct ace_device *ace)
+{
+ int i = ACE_FIFO_SIZE / 2;
+ u16 *src = ace->data_ptr;
+ while (i--)
+ out_be16(ace->baseaddr + 0x40, *src++);
+ ace->data_ptr = src;
+}
+
+static struct ace_reg_ops ace_reg_be16_ops = {
+ .in = ace_in_be16,
+ .out = ace_out_be16,
+ .datain = ace_datain_be16,
+ .dataout = ace_dataout_be16,
+};
+
+static struct ace_reg_ops ace_reg_le16_ops = {
+ .in = ace_in_le16,
+ .out = ace_out_le16,
+ .datain = ace_datain_le16,
+ .dataout = ace_dataout_le16,
+};
+
+static inline u16 ace_in(struct ace_device *ace, int reg)
+{
+ return ace->reg_ops->in(ace, reg);
+}
+
+static inline u32 ace_in32(struct ace_device *ace, int reg)
+{
+ return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
+}
+
+static inline void ace_out(struct ace_device *ace, int reg, u16 val)
+{
+ ace->reg_ops->out(ace, reg, val);
+}
+
+static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
+{
+ ace_out(ace, reg, val);
+ ace_out(ace, reg + 2, val >> 16);
+}
+
+/* ---------------------------------------------------------------------
+ * Debug support functions
+ */
+
+#if defined(DEBUG)
+static void ace_dump_mem(void *base, int len)
+{
+ const char *ptr = base;
+ int i, j;
+
+ for (i = 0; i < len; i += 16) {
+ printk(KERN_INFO "%.8x:", i);
+ for (j = 0; j < 16; j++) {
+ if (!(j % 4))
+ printk(" ");
+ printk("%.2x", ptr[i + j]);
+ }
+ printk(" ");
+ for (j = 0; j < 16; j++)
+ printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
+ printk("\n");
+ }
+}
+#else
+static inline void ace_dump_mem(void *base, int len)
+{
+}
+#endif
+
+static void ace_dump_regs(struct ace_device *ace)
+{
+ dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
+ " status:%.8x mpu_lba:%.8x busmode:%4x\n"
+ " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
+ ace_in32(ace, ACE_CTRL),
+ ace_in(ace, ACE_SECCNTCMD),
+ ace_in(ace, ACE_VERSION),
+ ace_in32(ace, ACE_STATUS),
+ ace_in32(ace, ACE_MPULBA),
+ ace_in(ace, ACE_BUSMODE),
+ ace_in32(ace, ACE_ERROR),
+ ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
+}
+
+void ace_fix_driveid(struct hd_driveid *id)
+{
+#if defined(__BIG_ENDIAN)
+ u16 *buf = (void *)id;
+ int i;
+
+ /* All half words have wrong byte order; swap the bytes */
+ for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++)
+ *buf = le16_to_cpu(*buf);
+
+ /* Some of the data values are 32bit; swap the half words */
+ id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) |
+ ((id->lba_capacity << 16) & 0xFFFF0000);
+ id->spg = ((id->spg >> 16) & 0x0000FFFF) |
+ ((id->spg << 16) & 0xFFFF0000);
+#endif
+}
+
+/* ---------------------------------------------------------------------
+ * Finite State Machine (FSM) implementation
+ */
+
+/* FSM tasks; used to direct state transitions */
+#define ACE_TASK_IDLE 0
+#define ACE_TASK_IDENTIFY 1
+#define ACE_TASK_READ 2
+#define ACE_TASK_WRITE 3
+#define ACE_FSM_NUM_TASKS 4
+
+/* FSM state definitions */
+#define ACE_FSM_STATE_IDLE 0
+#define ACE_FSM_STATE_REQ_LOCK 1
+#define ACE_FSM_STATE_WAIT_LOCK 2
+#define ACE_FSM_STATE_WAIT_CFREADY 3
+#define ACE_FSM_STATE_IDENTIFY_PREPARE 4
+#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
+#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
+#define ACE_FSM_STATE_REQ_PREPARE 7
+#define ACE_FSM_STATE_REQ_TRANSFER 8
+#define ACE_FSM_STATE_REQ_COMPLETE 9
+#define ACE_FSM_STATE_ERROR 10
+#define ACE_FSM_NUM_STATES 11
+
+/* Set flag to exit FSM loop and reschedule tasklet */
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "ace_fsm_yield()\n");
+ tasklet_schedule(&ace->fsm_tasklet);
+ ace->fsm_continue_flag = 0;
+}
+
+/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
+static inline void ace_fsm_yieldirq(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
+
+ if (ace->irq == NO_IRQ)
+ /* No IRQ assigned, so need to poll */
+ tasklet_schedule(&ace->fsm_tasklet);
+ ace->fsm_continue_flag = 0;
+}
+
+/* Get the next read/write request; ending requests that we don't handle */
+struct request *ace_get_next_request(request_queue_t * q)
+{
+ struct request *req;
+
+ while ((req = elv_next_request(q)) != NULL) {
+ if (blk_fs_request(req))
+ break;
+ end_request(req, 0);
+ }
+ return req;
+}
+
+static void ace_fsm_dostate(struct ace_device *ace)
+{
+ struct request *req;
+ u32 status;
+ u16 val;
+ int count;
+ int i;
+
+#if defined(DEBUG)
+ dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
+ ace->fsm_state, ace->id_req_count);
+#endif
+
+ switch (ace->fsm_state) {
+ case ACE_FSM_STATE_IDLE:
+ /* See if there is anything to do */
+ if (ace->id_req_count || ace_get_next_request(ace->queue)) {
+ ace->fsm_iter_num++;
+ ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
+ mod_timer(&ace->stall_timer, jiffies + HZ);
+ if (!timer_pending(&ace->stall_timer))
+ add_timer(&ace->stall_timer);
+ break;
+ }
+ del_timer(&ace->stall_timer);
+ ace->fsm_continue_flag = 0;
+ break;
+
+ case ACE_FSM_STATE_REQ_LOCK:
+ if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+ /* Already have the lock, jump to next state */
+ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+ break;
+ }
+
+ /* Request the lock */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
+ ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
+ break;
+
+ case ACE_FSM_STATE_WAIT_LOCK:
+ if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
+ /* got the lock; move to next state */
+ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
+ break;
+ }
+
+ /* wait a bit for the lock */
+ ace_fsm_yield(ace);
+ break;
+
+ case ACE_FSM_STATE_WAIT_CFREADY:
+ status = ace_in32(ace, ACE_STATUS);
+ if (!(status & ACE_STATUS_RDYFORCFCMD) ||
+ (status & ACE_STATUS_CFBSY)) {
+ /* CF card isn't ready; it needs to be polled */
+ ace_fsm_yield(ace);
+ break;
+ }
+
+ /* Device is ready for command; determine what to do next */
+ if (ace->id_req_count)
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
+ else
+ ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_PREPARE:
+ /* Send identify command */
+ ace->fsm_task = ACE_TASK_IDENTIFY;
+ ace->data_ptr = &ace->cf_id;
+ ace->data_count = ACE_BUF_PER_SECTOR;
+ ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
+
+ /* As per datasheet, put config controller in reset */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+ /* irq handler takes over from this point; wait for the
+ * transfer to complete */
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
+ ace_fsm_yieldirq(ace);
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_TRANSFER:
+ /* Check that the sysace is ready to receive data */
+ status = ace_in32(ace, ACE_STATUS);
+ if (status & ACE_STATUS_CFBSY) {
+ dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->data_count);
+ ace_fsm_yield(ace);
+ break;
+ }
+ if (!(status & ACE_STATUS_DATABUFRDY)) {
+ ace_fsm_yield(ace);
+ break;
+ }
+
+ /* Transfer the next buffer */
+ ace->reg_ops->datain(ace);
+ ace->data_count--;
+
+ /* If there are still buffers to be transfers; jump out here */
+ if (ace->data_count != 0) {
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* transfer finished; kick state machine */
+ dev_dbg(ace->dev, "identify finished\n");
+ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
+ break;
+
+ case ACE_FSM_STATE_IDENTIFY_COMPLETE:
+ ace_fix_driveid(&ace->cf_id);
+ ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */
+
+ if (ace->data_result) {
+ /* Error occured, disable the disk */
+ ace->media_change = 1;
+ set_capacity(ace->gd, 0);
+ dev_err(ace->dev, "error fetching CF id (%i)\n",
+ ace->data_result);
+ } else {
+ ace->media_change = 0;
+
+ /* Record disk parameters */
+ set_capacity(ace->gd, ace->cf_id.lba_capacity);
+ dev_info(ace->dev, "capacity: %i sectors\n",
+ ace->cf_id.lba_capacity);
+ }
+
+ /* We're done, drop to IDLE state and notify waiters */
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ ace->id_result = ace->data_result;
+ while (ace->id_req_count) {
+ complete(&ace->id_completion);
+ ace->id_req_count--;
+ }
+ break;
+
+ case ACE_FSM_STATE_REQ_PREPARE:
+ req = ace_get_next_request(ace->queue);
+ if (!req) {
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+ }
+
+ /* Okay, it's a data request, set it up for transfer */
+ dev_dbg(ace->dev,
+ "request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
+ req->sector, req->hard_nr_sectors,
+ req->current_nr_sectors, rq_data_dir(req));
+
+ ace->req = req;
+ ace->data_ptr = req->buffer;
+ ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
+ ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
+
+ count = req->hard_nr_sectors;
+ if (rq_data_dir(req)) {
+ /* Kick off write request */
+ dev_dbg(ace->dev, "write data\n");
+ ace->fsm_task = ACE_TASK_WRITE;
+ ace_out(ace, ACE_SECCNTCMD,
+ count | ACE_SECCNTCMD_WRITE_DATA);
+ } else {
+ /* Kick off read request */
+ dev_dbg(ace->dev, "read data\n");
+ ace->fsm_task = ACE_TASK_READ;
+ ace_out(ace, ACE_SECCNTCMD,
+ count | ACE_SECCNTCMD_READ_DATA);
+ }
+
+ /* As per datasheet, put config controller in reset */
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
+
+ /* Move to the transfer state. The systemace will raise
+ * an interrupt once there is something to do
+ */
+ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
+ if (ace->fsm_task == ACE_TASK_READ)
+ ace_fsm_yieldirq(ace); /* wait for data ready */
+ break;
+
+ case ACE_FSM_STATE_REQ_TRANSFER:
+ /* Check that the sysace is ready to receive data */
+ status = ace_in32(ace, ACE_STATUS);
+ if (status & ACE_STATUS_CFBSY) {
+ dev_dbg(ace->dev,
+ "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->req->current_nr_sectors * 16,
+ ace->data_count, ace->in_irq);
+ ace_fsm_yield(ace); /* need to poll CFBSY bit */
+ break;
+ }
+ if (!(status & ACE_STATUS_DATABUFRDY)) {
+ dev_dbg(ace->dev,
+ "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
+ ace->fsm_task, ace->fsm_iter_num,
+ ace->req->current_nr_sectors * 16,
+ ace->data_count, ace->in_irq);
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* Transfer the next buffer */
+ i = 16;
+ if (ace->fsm_task == ACE_TASK_WRITE)
+ ace->reg_ops->dataout(ace);
+ else
+ ace->reg_ops->datain(ace);
+ ace->data_count--;
+
+ /* If there are still buffers to be transfers; jump out here */
+ if (ace->data_count != 0) {
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ /* bio finished; is there another one? */
+ i = ace->req->current_nr_sectors;
+ if (end_that_request_first(ace->req, 1, i)) {
+ /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
+ * ace->req->hard_nr_sectors,
+ * ace->req->current_nr_sectors);
+ */
+ ace->data_ptr = ace->req->buffer;
+ ace->data_count = ace->req->current_nr_sectors * 16;
+ ace_fsm_yieldirq(ace);
+ break;
+ }
+
+ ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
+ break;
+
+ case ACE_FSM_STATE_REQ_COMPLETE:
+ /* Complete the block request */
+ blkdev_dequeue_request(ace->req);
+ end_that_request_last(ace->req, 1);
+ ace->req = NULL;
+
+ /* Finished request; go to idle state */
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+
+ default:
+ ace->fsm_state = ACE_FSM_STATE_IDLE;
+ break;
+ }
+}
+
+static void ace_fsm_tasklet(unsigned long data)
+{
+ struct ace_device *ace = (void *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ace->lock, flags);
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+static void ace_stall_timer(unsigned long data)
+{
+ struct ace_device *ace = (void *)data;
+ unsigned long flags;
+
+ dev_warn(ace->dev,
+ "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
+ ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
+ ace->data_count);
+ spin_lock_irqsave(&ace->lock, flags);
+
+ /* Rearm the stall timer *before* entering FSM (which may then
+ * delete the timer) */
+ mod_timer(&ace->stall_timer, jiffies + HZ);
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ spin_unlock_irqrestore(&ace->lock, flags);
+}
+
+/* ---------------------------------------------------------------------
+ * Interrupt handling routines
+ */
+static int ace_interrupt_checkstate(struct ace_device *ace)
+{
+ u32 sreg = ace_in32(ace, ACE_STATUS);
+ u16 creg = ace_in(ace, ACE_CTRL);
+
+ /* Check for error occurance */
+ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
+ (creg & ACE_CTRL_ERRORIRQ)) {
+ dev_err(ace->dev, "transfer failure\n");
+ ace_dump_regs(ace);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ace_interrupt(int irq, void *dev_id)
+{
+ u16 creg;
+ struct ace_device *ace = dev_id;
+
+ /* be safe and get the lock */
+ spin_lock(&ace->lock);
+ ace->in_irq = 1;
+
+ /* clear the interrupt */
+ creg = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
+ ace_out(ace, ACE_CTRL, creg);
+
+ /* check for IO failures */
+ if (ace_interrupt_checkstate(ace))
+ ace->data_result = -EIO;
+
+ if (ace->fsm_task == 0) {
+ dev_err(ace->dev,
+ "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
+ ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
+ ace_in(ace, ACE_SECCNTCMD));
+ dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
+ ace->fsm_task, ace->fsm_state, ace->data_count);
+ }
+
+ /* Loop over state machine until told to stop */
+ ace->fsm_continue_flag = 1;
+ while (ace->fsm_continue_flag)
+ ace_fsm_dostate(ace);
+
+ /* done with interrupt; drop the lock */
+ ace->in_irq = 0;
+ spin_unlock(&ace->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------
+ * Block ops
+ */
+static void ace_request(request_queue_t * q)
+{
+ struct request *req;
+ struct ace_device *ace;
+
+ req = ace_get_next_request(q);
+
+ if (req) {
+ ace = req->rq_disk->private_data;
+ tasklet_schedule(&ace->fsm_tasklet);
+ }
+}
+
+static int ace_media_changed(struct gendisk *gd)
+{
+ struct ace_device *ace = gd->private_data;
+ dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
+
+ return ace->media_change;
+}
+
+static int ace_revalidate_disk(struct gendisk *gd)
+{
+ struct ace_device *ace = gd->private_data;
+ unsigned long flags;
+
+ dev_dbg(ace->dev, "ace_revalidate_disk()\n");
+
+ if (ace->media_change) {
+ dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
+
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->id_req_count++;
+ spin_unlock_irqrestore(&ace->lock, flags);
+
+ tasklet_schedule(&ace->fsm_tasklet);
+ wait_for_completion(&ace->id_completion);
+ }
+
+ dev_dbg(ace->dev, "revalidate complete\n");
+ return ace->id_result;
+}
+
+static int ace_open(struct inode *inode, struct file *filp)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ unsigned long flags;
+
+ dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
+
+ filp->private_data = ace;
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->users++;
+ spin_unlock_irqrestore(&ace->lock, flags);
+
+ check_disk_change(inode->i_bdev);
+ return 0;
+}
+
+static int ace_release(struct inode *inode, struct file *filp)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ unsigned long flags;
+ u16 val;
+
+ dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
+
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->users--;
+ if (ace->users == 0) {
+ val = ace_in(ace, ACE_CTRL);
+ ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
+ }
+ spin_unlock_irqrestore(&ace->lock, flags);
+ return 0;
+}
+
+static int ace_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
+ struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
+ struct hd_geometry g;
+ dev_dbg(ace->dev, "ace_ioctl()\n");
+
+ switch (cmd) {
+ case HDIO_GETGEO:
+ g.heads = ace->cf_id.heads;
+ g.sectors = ace->cf_id.sectors;
+ g.cylinders = ace->cf_id.cyls;
+ g.start = 0;
+ return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
+
+ default:
+ return -ENOTTY;
+ }
+ return -ENOTTY;
+}
+
+static struct block_device_operations ace_fops = {
+ .owner = THIS_MODULE,
+ .open = ace_open,
+ .release = ace_release,
+ .media_changed = ace_media_changed,
+ .revalidate_disk = ace_revalidate_disk,
+ .ioctl = ace_ioctl,
+};
+
+/* --------------------------------------------------------------------
+ * SystemACE device setup/teardown code
+ */
+static int __devinit ace_setup(struct ace_device *ace)
+{
+ u16 version;
+ u16 val;
+
+ int rc;
+
+ spin_lock_init(&ace->lock);
+ init_completion(&ace->id_completion);
+
+ /*
+ * Map the device
+ */
+ ace->baseaddr = ioremap(ace->physaddr, 0x80);
+ if (!ace->baseaddr)
+ goto err_ioremap;
+
+ if (ace->irq != NO_IRQ) {
+ rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
+ if (rc) {
+ /* Failure - fall back to polled mode */
+ dev_err(ace->dev, "request_irq failed\n");
+ ace->irq = NO_IRQ;
+ }
+ }
+
+ /*
+ * Initialize the state machine tasklet and stall timer
+ */
+ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
+ setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+
+ /*
+ * Initialize the request queue
+ */
+ ace->queue = blk_init_queue(ace_request, &ace->lock);
+ if (ace->queue == NULL)
+ goto err_blk_initq;
+ blk_queue_hardsect_size(ace->queue, 512);
+
+ /*
+ * Allocate and initialize GD structure
+ */
+ ace->gd = alloc_disk(ACE_NUM_MINORS);
+ if (!ace->gd)
+ goto err_alloc_disk;
+
+ ace->gd->major = ace_major;
+ ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
+ ace->gd->fops = &ace_fops;
+ ace->gd->queue = ace->queue;
+ ace->gd->private_data = ace;
+ snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
+
+ /* set bus width */
+ if (ace->bus_width == 1) {
+ /* 0x0101 should work regardless of endianess */
+ ace_out_le16(ace, ACE_BUSMODE, 0x0101);
+
+ /* read it back to determine endianess */
+ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
+ ace->reg_ops = &ace_reg_le16_ops;
+ else
+ ace->reg_ops = &ace_reg_be16_ops;
+ } else {
+ ace_out_8(ace, ACE_BUSMODE, 0x00);
+ ace->reg_ops = &ace_reg_8_ops;
+ }
+
+ /* Make sure version register is sane */
+ version = ace_in(ace, ACE_VERSION);
+ if ((version == 0) || (version == 0xFFFF))
+ goto err_read;
+
+ /* Put sysace in a sane state by clearing most control reg bits */
+ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
+ ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
+
+ /* Enable interrupts */
+ val = ace_in(ace, ACE_CTRL);
+ val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
+ ace_out(ace, ACE_CTRL, val);
+
+ /* Print the identification */
+ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
+ (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
+ dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n",
+ ace->physaddr, ace->baseaddr, ace->irq);
+
+ ace->media_change = 1;
+ ace_revalidate_disk(ace->gd);
+
+ /* Make the sysace device 'live' */
+ add_disk(ace->gd);
+
+ return 0;
+
+ err_read:
+ put_disk(ace->gd);
+ err_alloc_disk:
+ blk_cleanup_queue(ace->queue);
+ err_blk_initq:
+ iounmap(ace->baseaddr);
+ if (ace->irq != NO_IRQ)
+ free_irq(ace->irq, ace);
+ err_ioremap:
+ printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
+ ace->physaddr);
+ return -ENOMEM;
+}
+
+static void __devexit ace_teardown(struct ace_device *ace)
+{
+ if (ace->gd) {
+ del_gendisk(ace->gd);
+ put_disk(ace->gd);
+ }
+
+ if (ace->queue)
+ blk_cleanup_queue(ace->queue);
+
+ tasklet_kill(&ace->fsm_tasklet);
+
+ if (ace->irq != NO_IRQ)
+ free_irq(ace->irq, ace);
+
+ iounmap(ace->baseaddr);
+}
+
+/* ---------------------------------------------------------------------
+ * Platform Bus Support
+ */
+
+static int __devinit ace_probe(struct device *device)
+{
+ struct platform_device *dev = to_platform_device(device);
+ struct ace_device *ace;
+ int i;
+
+ dev_dbg(device, "ace_probe(%p)\n", device);
+
+ /*
+ * Allocate the ace device structure
+ */
+ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
+ if (!ace)
+ goto err_alloc;
+
+ ace->dev = device;
+ ace->id = dev->id;
+ ace->irq = NO_IRQ;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ ace->physaddr = dev->resource[i].start;
+ if (dev->resource[i].flags & IORESOURCE_IRQ)
+ ace->irq = dev->resource[i].start;
+ }
+
+ /* FIXME: Should get bus_width from the platform_device struct */
+ ace->bus_width = 1;
+
+ dev_set_drvdata(&dev->dev, ace);
+
+ /* Call the bus-independant setup code */
+ if (ace_setup(ace) != 0)
+ goto err_setup;
+
+ return 0;
+
+ err_setup:
+ dev_set_drvdata(&dev->dev, NULL);
+ kfree(ace);
+ err_alloc:
+ printk(KERN_ERR "xsysace: could not initialize device\n");
+ return -ENOMEM;
+}
+
+/*
+ * Platform bus remove() method
+ */
+static int __devexit ace_remove(struct device *device)
+{
+ struct ace_device *ace = dev_get_drvdata(device);
+
+ dev_dbg(device, "ace_remove(%p)\n", device);
+
+ if (ace) {
+ ace_teardown(ace);
+ kfree(ace);
+ }
+
+ return 0;
+}
+
+static struct device_driver ace_driver = {
+ .name = "xsysace",
+ .bus = &platform_bus_type,
+ .probe = ace_probe,
+ .remove = __devexit_p(ace_remove),
+};
+
+/* ---------------------------------------------------------------------
+ * Module init/exit routines
+ */
+static int __init ace_init(void)
+{
+ ace_major = register_blkdev(ace_major, "xsysace");
+ if (ace_major <= 0) {
+ printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
+ return ace_major;
+ }
+
+ pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
+ return driver_register(&ace_driver);
+}
+
+static void __exit ace_exit(void)
+{
+ pr_debug("Unregistering Xilinx SystemACE driver\n");
+ driver_unregister(&ace_driver);
+ unregister_blkdev(ace_major, "xsysace");
+}
+
+module_init(ace_init);
+module_exit(ace_exit);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7cc2685ca84a..e40fa98842e5 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -44,9 +44,6 @@
extern int m68k_realnum_memory;
extern struct mem_info m68k_memory[NUM_MEMINFO];
-#define TRUE (1)
-#define FALSE (0)
-
#define Z2MINOR_COMBINED (0)
#define Z2MINOR_Z2ONLY (1)
#define Z2MINOR_CHIPONLY (2)
@@ -374,9 +371,7 @@ static void __exit z2_exit(void)
{
int i, j;
blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
- if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 )
- printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
-
+ unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
blk_cleanup_queue(z2_queue);
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 7e04dd69f609..59b054810ed0 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -199,7 +199,6 @@ static void hci_usb_tx_complete(struct urb *urb);
#define __pending_q(husb, type) (&husb->pending_q[type-1])
#define __completed_q(husb, type) (&husb->completed_q[type-1])
#define __transmit_q(husb, type) (&husb->transmit_q[type-1])
-#define __reassembly(husb, type) (husb->reassembly[type-1])
static inline struct _urb *__get_completed(struct hci_usb *husb, int type)
{
@@ -429,12 +428,6 @@ static void hci_usb_unlink_urbs(struct hci_usb *husb)
kfree(urb->transfer_buffer);
_urb_free(_urb);
}
-
- /* Release reassembly buffers */
- if (husb->reassembly[i]) {
- kfree_skb(husb->reassembly[i]);
- husb->reassembly[i] = NULL;
- }
}
}
@@ -671,83 +664,6 @@ static int hci_usb_send_frame(struct sk_buff *skb)
return 0;
}
-static inline int __recv_frame(struct hci_usb *husb, int type, void *data, int count)
-{
- BT_DBG("%s type %d data %p count %d", husb->hdev->name, type, data, count);
-
- husb->hdev->stat.byte_rx += count;
-
- while (count) {
- struct sk_buff *skb = __reassembly(husb, type);
- struct { int expect; } *scb;
- int len = 0;
-
- if (!skb) {
- /* Start of the frame */
-
- switch (type) {
- case HCI_EVENT_PKT:
- if (count >= HCI_EVENT_HDR_SIZE) {
- struct hci_event_hdr *h = data;
- len = HCI_EVENT_HDR_SIZE + h->plen;
- } else
- return -EILSEQ;
- break;
-
- case HCI_ACLDATA_PKT:
- if (count >= HCI_ACL_HDR_SIZE) {
- struct hci_acl_hdr *h = data;
- len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
- } else
- return -EILSEQ;
- break;
-#ifdef CONFIG_BT_HCIUSB_SCO
- case HCI_SCODATA_PKT:
- if (count >= HCI_SCO_HDR_SIZE) {
- struct hci_sco_hdr *h = data;
- len = HCI_SCO_HDR_SIZE + h->dlen;
- } else
- return -EILSEQ;
- break;
-#endif
- }
- BT_DBG("new packet len %d", len);
-
- skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb) {
- BT_ERR("%s no memory for the packet", husb->hdev->name);
- return -ENOMEM;
- }
- skb->dev = (void *) husb->hdev;
- bt_cb(skb)->pkt_type = type;
-
- __reassembly(husb, type) = skb;
-
- scb = (void *) skb->cb;
- scb->expect = len;
- } else {
- /* Continuation */
- scb = (void *) skb->cb;
- len = scb->expect;
- }
-
- len = min(len, count);
-
- memcpy(skb_put(skb, len), data, len);
-
- scb->expect -= len;
- if (!scb->expect) {
- /* Complete frame */
- __reassembly(husb, type) = NULL;
- bt_cb(skb)->pkt_type = type;
- hci_recv_frame(skb);
- }
-
- count -= len; data += len;
- }
- return 0;
-}
-
static void hci_usb_rx_complete(struct urb *urb)
{
struct _urb *_urb = container_of(urb, struct _urb, urb);
@@ -776,7 +692,7 @@ static void hci_usb_rx_complete(struct urb *urb)
urb->iso_frame_desc[i].actual_length);
if (!urb->iso_frame_desc[i].status)
- __recv_frame(husb, _urb->type,
+ hci_recv_fragment(husb->hdev, _urb->type,
urb->transfer_buffer + urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
}
@@ -784,7 +700,7 @@ static void hci_usb_rx_complete(struct urb *urb)
;
#endif
} else {
- err = __recv_frame(husb, _urb->type, urb->transfer_buffer, count);
+ err = hci_recv_fragment(husb->hdev, _urb->type, urb->transfer_buffer, count);
if (err < 0) {
BT_ERR("%s corrupted packet: type %d count %d",
husb->hdev->name, _urb->type, count);
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 963fc55cdc85..56cd3a92ceca 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -102,9 +102,9 @@ struct hci_usb {
struct hci_dev *hdev;
unsigned long state;
-
+
struct usb_device *udev;
-
+
struct usb_host_endpoint *bulk_in_ep;
struct usb_host_endpoint *bulk_out_ep;
struct usb_host_endpoint *intr_in_ep;
@@ -116,7 +116,6 @@ struct hci_usb {
__u8 ctrl_req;
struct sk_buff_head transmit_q[4];
- struct sk_buff *reassembly[4]; /* Reassembly buffers */
rwlock_t completion_lock;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index b71a5ccc587f..0638730a4a19 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -180,11 +180,6 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
return total;
}
-static loff_t vhci_llseek(struct file *file, loff_t offset, int origin)
-{
- return -ESPIPE;
-}
-
static ssize_t vhci_read(struct file *file,
char __user *buf, size_t count, loff_t *pos)
{
@@ -334,7 +329,6 @@ static int vhci_fasync(int fd, struct file *file, int on)
static const struct file_operations vhci_fops = {
.owner = THIS_MODULE,
- .llseek = vhci_llseek,
.read = vhci_read,
.write = vhci_write,
.poll = vhci_poll,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index aa5468f487ba..499019bf8f40 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2695,11 +2695,12 @@ int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
{
void __user *argp = (void __user *)arg;
int ret;
+ struct gendisk *disk = ip->i_bdev->bd_disk;
/*
* Try the generic SCSI command ioctl's first.
*/
- ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, argp);
+ ret = scsi_cmd_ioctl(file, disk->queue, disk, cmd, argp);
if (ret != -ENOTTY)
return ret;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ef683ebd367c..c8dfd18bea44 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -114,7 +114,7 @@ config COMPUTONE
config ROCKETPORT
tristate "Comtrol RocketPort support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
help
This driver supports Comtrol RocketPort and RocketModem PCI boards.
These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
@@ -157,7 +157,7 @@ config CYZ_INTR
config DIGIEPCA
tristate "Digiboard Intelligent Async Support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
---help---
This is a driver for Digi International's Xx, Xeve, and Xem series
of cards which provide multiple serial ports. You would need
@@ -185,7 +185,7 @@ config ESPSERIAL
config MOXA_INTELLIO
tristate "Moxa Intellio support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
help
Say Y here if you have a Moxa Intellio multiport serial card.
@@ -213,8 +213,6 @@ config MOXA_SMARTIO_NEW
This is upgraded (1.9.1) driver from original Moxa drivers with
changes finally resulting in PCI probing.
- Use at your own risk.
-
This driver can also be built as a module. The module will be called
mxser_new. If you want to do that, say M here.
@@ -243,7 +241,7 @@ config SYNCLINK
config SYNCLINKMP
tristate "SyncLink Multiport support"
- depends on SERIAL_NONSTANDARD
+ depends on SERIAL_NONSTANDARD && PCI
help
Enable support for the SyncLink Multiport (2 or 4 ports)
serial adapter, running asynchronous and HDLC communications up
@@ -354,7 +352,7 @@ config STALDRV
config STALLION
tristate "Stallion EasyIO or EC8/32 support"
- depends on STALDRV && BROKEN_ON_SMP
+ depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
help
If you have an EasyIO or EasyConnection 8/32 multiport Stallion
card, then this is for you; say Y. Make sure to read
@@ -365,7 +363,7 @@ config STALLION
config ISTALLION
tristate "Stallion EC8/64, ONboard, Brumby support"
- depends on STALDRV && BROKEN_ON_SMP
+ depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
help
If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
serial multiport card, say Y here. Make sure to read
@@ -374,53 +372,6 @@ config ISTALLION
To compile this driver as a module, choose M here: the
module will be called istallion.
-config AU1000_UART
- bool "Enable Au1000 UART Support"
- depends on SERIAL_NONSTANDARD && MIPS
- help
- If you have an Alchemy AU1000 processor (MIPS based) and you want
- to use serial ports, say Y. Otherwise, say N.
-
-config AU1000_SERIAL_CONSOLE
- bool "Enable Au1000 serial console"
- depends on AU1000_UART
- help
- If you have an Alchemy AU1000 processor (MIPS based) and you want
- to use a console on a serial port, say Y. Otherwise, say N.
-
-config SERIAL_DEC
- bool "DECstation serial support"
- depends on MACH_DECSTATION
- default y
- help
- This selects whether you want to be asked about drivers for
- DECstation serial ports.
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about DECstation serial ports.
-
-config SERIAL_DEC_CONSOLE
- bool "Support for console on a DECstation serial port"
- depends on SERIAL_DEC
- default y
- help
- If you say Y here, it will be possible to use a serial port as the
- system console (the system console is the device which receives all
- kernel messages and warnings and which allows logins in single user
- mode). Note that the firmware uses ttyS0 as the serial console on
- the Maxine and ttyS2 on the others.
-
- If unsure, say Y.
-
-config ZS
- bool "Z85C30 Serial Support"
- depends on SERIAL_DEC
- default y
- help
- Documentation on the Zilog 85C350 serial communications controller
- is downloadable at <http://www.zilog.com/pdfs/serial/z85c30.pdf>
-
config A2232
tristate "Commodore A2232 serial support (EXPERIMENTAL)"
depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
@@ -653,6 +604,14 @@ config HVC_BEAT
help
Toshiba's Cell Reference Set Beat Console device driver
+config HVC_XEN
+ bool "Xen Hypervisor Console support"
+ depends on XEN
+ select HVC_DRIVER
+ default y
+ help
+ Xen virtual console device driver
+
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
depends on PPC_PSERIES
@@ -767,7 +726,7 @@ config NVRAM
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH && !S390
+ depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC64 && (!SPARC32 || PCI) && !FRV && !ARM && !SUPERH && !S390
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -815,7 +774,7 @@ config SGI_IP27_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV && !S390 && !SUPERH
+ depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -993,15 +952,14 @@ config GPIO_VR41XX
depends on CPU_VR41XX
config RAW_DRIVER
- tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)"
+ tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
help
- The raw driver permits block devices to be bound to /dev/raw/rawN.
- Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
+ The raw driver permits block devices to be bound to /dev/raw/rawN.
+ Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
See the raw(8) manpage for more details.
- The raw driver is deprecated and will be removed soon.
- Applications should simply open the device (eg /dev/hda1)
+ Applications should preferably open the device (eg /dev/hda1)
with the O_DIRECT flag.
config MAX_RAW_DEVS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 2f56ecc035aa..8fecaf4010b1 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -15,6 +15,7 @@ obj-y += misc.o
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o consolemap.o \
consolemap_deftbl.o selection.o keyboard.o
obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
+obj-$(CONFIG_AUDIT) += tty_audit.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_ESPSERIAL) += esp.o
obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
@@ -41,12 +42,14 @@ obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
+obj-$(CONFIG_LGUEST_GUEST) += hvc_lguest.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
+obj-$(CONFIG_HVC_XEN) += hvc_xen.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MSPEC) += mspec.o
@@ -104,6 +107,8 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/
obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
obj-$(CONFIG_TCG_TPM) += tpm/
+obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+
# Files generated that shall be removed upon make clean
clean-files := consolemap_deftbl.c defkeymap.c
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index e6c534e62846..df0ddf14b85c 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -462,9 +462,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
* erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
* With this lot disabled, we should prevent lockups. */
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
- u8 revision=0;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
- if (revision == 0x10 || revision == 0x11) {
+ if (pdev->revision == 0x10 || pdev->revision == 0x11) {
agp_bridge->flags = AGP_ERRATA_FASTWRITES;
agp_bridge->flags |= AGP_ERRATA_SBA;
agp_bridge->flags |= AGP_ERRATA_1X;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 801abdd29066..d95662e96326 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -367,10 +367,8 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
{
char *revstring;
- u8 rev_id;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
- switch (rev_id) {
+ switch (pdev->revision) {
case 0x01: revstring="A0"; break;
case 0x02: revstring="A1"; break;
case 0x11: revstring="B0"; break;
@@ -386,7 +384,7 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data
* Work around errata.
* Chips before B2 stepping incorrectly reporting v3.5
*/
- if (rev_id < 0x13) {
+ if (pdev->revision < 0x13) {
printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n");
bridge->major_version = 3;
bridge->minor_version = 0;
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index ebdd6dd66edb..1b47c89a1b99 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -321,7 +321,7 @@ EXPORT_SYMBOL(agp_try_unsupported_boot);
static int __init agp_init(void)
{
if (!agp_off)
- printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Dave Jones\n",
+ printk(KERN_INFO "Linux agpgart interface v%d.%d\n",
AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
return 0;
}
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 4eaceabd8cea..3d468f502d2d 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -738,6 +738,7 @@ static void change_speed(struct async_struct *info,
}
/* If the quotient is zero refuse the change */
if (!quot && old_termios) {
+ /* FIXME: Will need updating for new tty in the end */
info->tty->termios->c_cflag &= ~CBAUD;
info->tty->termios->c_cflag |= (old_termios->c_cflag & CBAUD);
baud = tty_get_baud_rate(info->tty);
@@ -783,7 +784,6 @@ static void change_speed(struct async_struct *info,
/*
* Set up parity check flag
*/
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
info->read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (I_INPCK(info->tty))
@@ -1367,11 +1367,6 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
unsigned long flags;
unsigned int cflag = tty->termios->c_cflag;
- if ( (cflag == old_termios->c_cflag)
- && ( RELEVANT_IFLAG(tty->termios->c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
change_speed(info, old_termios);
/* Handle transition to B0 status */
@@ -1726,12 +1721,11 @@ static int get_async_struct(int line, struct async_struct **ret_info)
*ret_info = sstate->info;
return 0;
}
- info = kmalloc(sizeof(struct async_struct), GFP_KERNEL);
+ info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
if (!info) {
sstate->count--;
return -ENOMEM;
}
- memset(info, 0, sizeof(struct async_struct));
#ifdef DECLARE_WAITQUEUE
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 179c7a3b6e75..ec116df919d9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/apm-emulation.h>
+#include <linux/freezer.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -329,13 +330,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
/*
* Wait for the suspend/resume to complete. If there
* are pending acknowledges, we wait here for them.
- *
- * Note: we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the
- * threads.
*/
flags = current->flags;
- current->flags |= PF_NOFREEZE;
wait_event(apm_suspend_waitqueue,
as->suspend_state == SUSPEND_DONE);
@@ -365,13 +361,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
/*
* Wait for the suspend/resume to complete. If there
* are pending acknowledges, we wait here for them.
- *
- * Note: we need to ensure that the PM subsystem does
- * not kick us out of the wait when it suspends the
- * threads.
*/
flags = current->flags;
- current->flags |= PF_NOFREEZE;
wait_event_interruptible(apm_suspend_waitqueue,
as->suspend_state == SUSPEND_DONE);
@@ -598,7 +589,6 @@ static int __init apm_init(void)
kapmd_tsk = NULL;
return ret;
}
- kapmd_tsk->flags |= PF_NOFREEZE;
wake_up_process(kapmd_tsk);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
index ed53f541d9e8..b6f2639f903d 100644
--- a/drivers/char/briq_panel.c
+++ b/drivers/char/briq_panel.c
@@ -91,11 +91,6 @@ static ssize_t briq_panel_read(struct file *file, char __user *buf, size_t count
unsigned short c;
unsigned char cp;
-#if 0 /* Can't seek (pread) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-#endif
-
if (!vfd_is_open)
return -ENODEV;
@@ -139,11 +134,6 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
size_t indx = len;
int i, esc = 0;
-#if 0 /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-#endif
-
if (!vfd_is_open)
return -EBUSY;
diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c
index fd40b959afdd..4b3916f54909 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/char/consolemap.c
@@ -177,6 +177,7 @@ struct uni_pagedir {
unsigned long refcount;
unsigned long sum;
unsigned char *inverse_translations[4];
+ u16 *inverse_trans_unicode;
int readonly;
};
@@ -207,6 +208,41 @@ static void set_inverse_transl(struct vc_data *conp, struct uni_pagedir *p, int
}
}
+static void set_inverse_trans_unicode(struct vc_data *conp,
+ struct uni_pagedir *p)
+{
+ int i, j, k, glyph;
+ u16 **p1, *p2;
+ u16 *q;
+
+ if (!p) return;
+ q = p->inverse_trans_unicode;
+ if (!q) {
+ q = p->inverse_trans_unicode =
+ kmalloc(MAX_GLYPH * sizeof(u16), GFP_KERNEL);
+ if (!q)
+ return;
+ }
+ memset(q, 0, MAX_GLYPH * sizeof(u16));
+
+ for (i = 0; i < 32; i++) {
+ p1 = p->uni_pgdir[i];
+ if (!p1)
+ continue;
+ for (j = 0; j < 32; j++) {
+ p2 = p1[j];
+ if (!p2)
+ continue;
+ for (k = 0; k < 64; k++) {
+ glyph = p2[k];
+ if (glyph >= 0 && glyph < MAX_GLYPH
+ && q[glyph] < 32)
+ q[glyph] = (i << 11) + (j << 6) + k;
+ }
+ }
+ }
+}
+
unsigned short *set_translate(int m, struct vc_data *vc)
{
inv_translate[vc->vc_num] = m;
@@ -217,19 +253,29 @@ unsigned short *set_translate(int m, struct vc_data *vc)
* Inverse translation is impossible for several reasons:
* 1. The font<->character maps are not 1-1.
* 2. The text may have been written while a different translation map
- * was active, or using Unicode.
+ * was active.
* Still, it is now possible to a certain extent to cut and paste non-ASCII.
*/
-unsigned char inverse_translate(struct vc_data *conp, int glyph)
+u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
{
struct uni_pagedir *p;
+ int m;
if (glyph < 0 || glyph >= MAX_GLYPH)
return 0;
- else if (!(p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc) ||
- !p->inverse_translations[inv_translate[conp->vc_num]])
+ else if (!(p = (struct uni_pagedir *)*conp->vc_uni_pagedir_loc))
return glyph;
- else
- return p->inverse_translations[inv_translate[conp->vc_num]][glyph];
+ else if (use_unicode) {
+ if (!p->inverse_trans_unicode)
+ return glyph;
+ else
+ return p->inverse_trans_unicode[glyph];
+ } else {
+ m = inv_translate[conp->vc_num];
+ if (!p->inverse_translations[m])
+ return glyph;
+ else
+ return p->inverse_translations[m][glyph];
+ }
}
static void update_user_maps(void)
@@ -243,6 +289,7 @@ static void update_user_maps(void)
p = (struct uni_pagedir *)*vc_cons[i].d->vc_uni_pagedir_loc;
if (p && p != q) {
set_inverse_transl(vc_cons[i].d, p, USER_MAP);
+ set_inverse_trans_unicode(vc_cons[i].d, p);
q = p;
}
}
@@ -353,6 +400,10 @@ static void con_release_unimap(struct uni_pagedir *p)
kfree(p->inverse_translations[i]);
p->inverse_translations[i] = NULL;
}
+ if (p->inverse_trans_unicode) {
+ kfree(p->inverse_trans_unicode);
+ p->inverse_trans_unicode = NULL;
+ }
}
void con_free_unimap(struct vc_data *vc)
@@ -511,6 +562,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update all inverse translations */
+ set_inverse_trans_unicode(vc, p);
return err;
}
@@ -561,6 +613,7 @@ int con_set_default_unimap(struct vc_data *vc)
for (i = 0; i <= 3; i++)
set_inverse_transl(vc, p, i); /* Update all inverse translations */
+ set_inverse_trans_unicode(vc, p);
dflt = p;
return err;
}
@@ -617,6 +670,19 @@ void con_protect_unimap(struct vc_data *vc, int rdonly)
p->readonly = rdonly;
}
+/* may be called during an interrupt */
+u32 conv_8bit_to_uni(unsigned char c)
+{
+ /*
+ * Always use USER_MAP. This function is used by the keyboard,
+ * which shouldn't be affected by G0/G1 switching, etc.
+ * If the user map still contains default values, i.e. the
+ * direct-to-font mapping, then assume user is using Latin1.
+ */
+ unsigned short uni = translations[USER_MAP][c];
+ return uni == (0xf000 | c) ? c : uni;
+}
+
int
conv_uni_to_pc(struct vc_data *conp, long ucs)
{
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index ca376b92162c..9e0adfe27c12 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -646,6 +646,7 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
+#include <linux/firmware.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -680,6 +681,44 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
#define STD_COM_FLAGS (0)
+/* firmware stuff */
+#define ZL_MAX_BLOCKS 16
+#define DRIVER_VERSION 0x02010203
+#define RAM_SIZE 0x80000
+
+#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
+
+enum zblock_type {
+ ZBLOCK_PRG = 0,
+ ZBLOCK_FPGA = 1
+};
+
+struct zfile_header {
+ char name[64];
+ char date[32];
+ char aux[32];
+ u32 n_config;
+ u32 config_offset;
+ u32 n_blocks;
+ u32 block_offset;
+ u32 reserved[9];
+} __attribute__ ((packed));
+
+struct zfile_config {
+ char name[64];
+ u32 mailbox;
+ u32 function;
+ u32 n_blocks;
+ u32 block_list[ZL_MAX_BLOCKS];
+} __attribute__ ((packed));
+
+struct zfile_block {
+ u32 type;
+ u32 file_offset;
+ u32 ram_offset;
+ u32 size;
+} __attribute__ ((packed));
+
static struct tty_driver *cy_serial_driver;
#ifdef CONFIG_ISA
@@ -1851,11 +1890,11 @@ static void cyz_poll(unsigned long arg)
struct cyclades_card *cinfo;
struct cyclades_port *info;
struct tty_struct *tty;
- static struct FIRM_ID *firm_id;
- static struct ZFW_CTRL *zfw_ctrl;
- static struct BOARD_CTRL *board_ctrl;
- static struct CH_CTRL *ch_ctrl;
- static struct BUF_CTRL *buf_ctrl;
+ struct FIRM_ID __iomem *firm_id;
+ struct ZFW_CTRL __iomem *zfw_ctrl;
+ struct BOARD_CTRL __iomem *board_ctrl;
+ struct CH_CTRL __iomem *ch_ctrl;
+ struct BUF_CTRL __iomem *buf_ctrl;
unsigned long expires = jiffies + HZ;
int card, port;
@@ -1999,7 +2038,6 @@ static int startup(struct cyclades_port *info)
struct ZFW_CTRL __iomem *zfw_ctrl;
struct BOARD_CTRL __iomem *board_ctrl;
struct CH_CTRL __iomem *ch_ctrl;
- int retval;
base_addr = card->base_addr;
@@ -2371,7 +2409,6 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
struct ZFW_CTRL __iomem *zfw_ctrl;
struct BOARD_CTRL __iomem *board_ctrl;
struct CH_CTRL __iomem *ch_ctrl;
- int retval;
base_addr = cinfo->base_addr;
firm_id = base_addr + ID_ADDRESS;
@@ -4127,10 +4164,6 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
printk(KERN_DEBUG "cyc:cy_set_termios ttyC%d\n", info->line);
#endif
- if (tty->termios->c_cflag == old_termios->c_cflag &&
- (tty->termios->c_iflag & (IXON | IXANY)) ==
- (old_termios->c_iflag & (IXON | IXANY)))
- return;
set_line_char(info);
if ((old_termios->c_cflag & CRTSCTS) &&
@@ -4433,10 +4466,10 @@ static void cy_hangup(struct tty_struct *tty)
static int __devinit cy_init_card(struct cyclades_card *cinfo)
{
struct cyclades_port *info;
- u32 mailbox;
+ u32 uninitialized_var(mailbox);
unsigned int nports;
unsigned short chip_number;
- int index, port;
+ int uninitialized_var(index), port;
spin_lock_init(&cinfo->card_lock);
@@ -4739,17 +4772,295 @@ static int __init cy_detect_isa(void)
} /* cy_detect_isa */
#ifdef CONFIG_PCI
-static void __devinit plx_init(void __iomem * addr, __u32 initctl)
+static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
+{
+ unsigned int a;
+
+ for (a = 0; a < size && *str; a++, str++)
+ if (*str & 0x80)
+ return -EINVAL;
+
+ for (; a < size; a++, str++)
+ if (*str)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline void __devinit cyz_fpga_copy(void __iomem *fpga, u8 *data,
+ unsigned int size)
+{
+ for (; size > 0; size--) {
+ cy_writel(fpga, *data++);
+ udelay(10);
+ }
+}
+
+static void __devinit plx_init(struct pci_dev *pdev, int irq,
+ struct RUNTIME_9060 __iomem *addr)
{
/* Reset PLX */
- cy_writel(addr + initctl, readl(addr + initctl) | 0x40000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
udelay(100L);
- cy_writel(addr + initctl, readl(addr + initctl) & ~0x40000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
/* Reload Config. Registers from EEPROM */
- cy_writel(addr + initctl, readl(addr + initctl) | 0x20000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
udelay(100L);
- cy_writel(addr + initctl, readl(addr + initctl) & ~0x20000000);
+ cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
+
+ /* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
+ * the IRQ is lost and, thus, we have to re-write it to the PCI config.
+ * registers. This will remain here until we find a permanent fix.
+ */
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+}
+
+static int __devinit __cyz_load_fw(const struct firmware *fw,
+ const char *name, const u32 mailbox, void __iomem *base,
+ void __iomem *fpga)
+{
+ void *ptr = fw->data;
+ struct zfile_header *h = ptr;
+ struct zfile_config *c, *cs;
+ struct zfile_block *b, *bs;
+ unsigned int a, tmp, len = fw->size;
+#define BAD_FW KERN_ERR "Bad firmware: "
+ if (len < sizeof(*h)) {
+ printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
+ return -EINVAL;
+ }
+
+ cs = ptr + h->config_offset;
+ bs = ptr + h->block_offset;
+
+ if ((void *)(cs + h->n_config) > ptr + len ||
+ (void *)(bs + h->n_blocks) > ptr + len) {
+ printk(BAD_FW "too short");
+ return -EINVAL;
+ }
+
+ if (cyc_isfwstr(h->name, sizeof(h->name)) ||
+ cyc_isfwstr(h->date, sizeof(h->date))) {
+ printk(BAD_FW "bad formatted header string\n");
+ return -EINVAL;
+ }
+
+ if (strncmp(name, h->name, sizeof(h->name))) {
+ printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
+ return -EINVAL;
+ }
+
+ tmp = 0;
+ for (c = cs; c < cs + h->n_config; c++) {
+ for (a = 0; a < c->n_blocks; a++)
+ if (c->block_list[a] > h->n_blocks) {
+ printk(BAD_FW "bad block ref number in cfgs\n");
+ return -EINVAL;
+ }
+ if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
+ tmp++;
+ }
+ if (!tmp) {
+ printk(BAD_FW "nothing appropriate\n");
+ return -EINVAL;
+ }
+
+ for (b = bs; b < bs + h->n_blocks; b++)
+ if (b->file_offset + b->size > len) {
+ printk(BAD_FW "bad block data offset\n");
+ return -EINVAL;
+ }
+
+ /* everything is OK, let's seek'n'load it */
+ for (c = cs; c < cs + h->n_config; c++)
+ if (c->mailbox == mailbox && c->function == 0)
+ break;
+
+ for (a = 0; a < c->n_blocks; a++) {
+ b = &bs[c->block_list[a]];
+ if (b->type == ZBLOCK_FPGA) {
+ if (fpga != NULL)
+ cyz_fpga_copy(fpga, ptr + b->file_offset,
+ b->size);
+ } else {
+ if (base != NULL)
+ memcpy_toio(base + b->ram_offset,
+ ptr + b->file_offset, b->size);
+ }
+ }
+#undef BAD_FW
+ return 0;
+}
+
+static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
+ struct RUNTIME_9060 __iomem *ctl_addr, int irq)
+{
+ const struct firmware *fw;
+ struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
+ struct CUSTOM_REG __iomem *cust = base_addr;
+ struct ZFW_CTRL __iomem *pt_zfwctrl;
+ void __iomem *tmp;
+ u32 mailbox, status;
+ unsigned int i;
+ int retval;
+
+ retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
+ if (retval) {
+ dev_err(&pdev->dev, "can't get firmware\n");
+ goto err;
+ }
+
+ /* Check whether the firmware is already loaded and running. If
+ positive, skip this board */
+ if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
+ u32 cntval = readl(base_addr + 0x190);
+
+ udelay(100);
+ if (cntval != readl(base_addr + 0x190)) {
+ /* FW counter is working, FW is running */
+ dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
+ "Skipping board.\n");
+ retval = 0;
+ goto err_rel;
+ }
+ }
+
+ /* start boot */
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
+ ~0x00030800UL);
+
+ mailbox = readl(&ctl_addr->mail_box_0);
+
+ if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
+ /* stops CPU and set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_stop, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ udelay(100);
+ }
+
+ plx_init(pdev, irq, ctl_addr);
+
+ if (mailbox != 0) {
+ /* load FPGA */
+ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
+ base_addr);
+ if (retval)
+ goto err_rel;
+ if (!Z_FPGA_LOADED(ctl_addr)) {
+ dev_err(&pdev->dev, "fw upload successful, but fw is "
+ "not loaded\n");
+ goto err_rel;
+ }
+ }
+
+ /* stops CPU and set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_stop, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ udelay(100);
+
+ /* clear memory */
+ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+ cy_writeb(tmp, 255);
+ if (mailbox != 0) {
+ /* set window to last 512K of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
+ //sleep(1);
+ for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
+ cy_writeb(tmp, 255);
+ /* set window to beginning of RAM */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ //sleep(1);
+ }
+
+ retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
+ release_firmware(fw);
+ if (retval)
+ goto err;
+
+ /* finish boot and start boards */
+ cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
+ cy_writel(&cust->cpu_start, 0);
+ cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
+ i = 0;
+ while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
+ msleep(100);
+ if (status != ZFIRM_ID) {
+ if (status == ZFIRM_HLT) {
+ dev_err(&pdev->dev, "you need an external power supply "
+ "for this number of ports. Firmware halted and "
+ "board reset.\n");
+ retval = -EIO;
+ goto err;
+ }
+ dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
+ "some more time\n", status);
+ while ((status = readl(&fid->signature)) != ZFIRM_ID &&
+ i++ < 200)
+ msleep(100);
+ if (status != ZFIRM_ID) {
+ dev_err(&pdev->dev, "Board not started in 20 seconds! "
+ "Giving up. (fid->signature = 0x%x)\n",
+ status);
+ dev_info(&pdev->dev, "*** Warning ***: if you are "
+ "upgrading the FW, please power cycle the "
+ "system before loading the new FW to the "
+ "Cyclades-Z.\n");
+
+ if (Z_FPGA_LOADED(ctl_addr))
+ plx_init(pdev, irq, ctl_addr);
+
+ retval = -EIO;
+ goto err;
+ }
+ dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
+ i / 10);
+ }
+ pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
+
+ dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
+ base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
+ base_addr + readl(&fid->zfwctrl_addr));
+
+ dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
+ readl(&pt_zfwctrl->board_ctrl.fw_version),
+ readl(&pt_zfwctrl->board_ctrl.n_channel));
+
+ if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
+ dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
+ "check the connection between the Z host card and the "
+ "serial expanders.\n");
+
+ if (Z_FPGA_LOADED(ctl_addr))
+ plx_init(pdev, irq, ctl_addr);
+
+ dev_info(&pdev->dev, "Null number of ports detected. Board "
+ "reset.\n");
+ retval = 0;
+ goto err;
+ }
+
+ cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
+ cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
+
+ /*
+ Early firmware failed to start looking for commands.
+ This enables firmware interrupts for those commands.
+ */
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+ (1 << 17));
+ cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
+ 0x00030800UL);
+
+ plx_init(pdev, irq, ctl_addr);
+
+ return 0;
+err_rel:
+ release_firmware(fw);
+err:
+ return retval;
}
static int __devinit cy_pci_probe(struct pci_dev *pdev,
@@ -4831,16 +5142,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
}
/* Disable interrupts on the PLX before resetting it */
- cy_writew(addr0 + 0x68,
- readw(addr0 + 0x68) & ~0x0900);
+ cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
- plx_init(addr0, 0x6c);
- /* For some yet unknown reason, once the PLX9060 reloads
- the EEPROM, the IRQ is lost and, thus, we have to
- re-write it to the PCI config. registers.
- This will remain here until we find a permanent
- fix. */
- pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
+ plx_init(pdev, irq, addr0);
mailbox = (u32)readl(&ctl_addr->mail_box_0);
@@ -4881,6 +5185,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
cy_writel(addr2 + ID_ADDRESS, 0L);
+ retval = cyz_load_fw(pdev, addr2, addr0, irq);
+ if (retval)
+ goto err_unmap;
/* This must be a Cyclades-8Zo/PCI. The extendable
version will have a different device_id and will
be allocated its maximum number of ports. */
@@ -4957,15 +5264,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
case PLX_9060:
case PLX_9080:
default: /* Old boards, use PLX_9060 */
-
- plx_init(addr0, 0x6c);
- /* For some yet unknown reason, once the PLX9060 reloads
- the EEPROM, the IRQ is lost and, thus, we have to
- re-write it to the PCI config. registers.
- This will remain here until we find a permanent
- fix. */
- pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
-
+ plx_init(pdev, irq, addr0);
cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
break;
}
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c
deleted file mode 100644
index 8ea2bea2b183..000000000000
--- a/drivers/char/decserial.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * sercons.c
- * choose the right serial device at boot time
- *
- * triemer 6-SEP-1998
- * sercons.c is designed to allow the three different kinds
- * of serial devices under the decstation world to co-exist
- * in the same kernel. The idea here is to abstract
- * the pieces of the drivers that are common to this file
- * so that they do not clash at compile time and runtime.
- *
- * HK 16-SEP-1998 v0.002
- * removed the PROM console as this is not a real serial
- * device. Added support for PROM console in drivers/char/tty_io.c
- * instead. Although it may work to enable more than one
- * console device I strongly recommend to use only one.
- */
-
-#include <linux/init.h>
-#include <asm/dec/machtype.h>
-
-#ifdef CONFIG_ZS
-extern int zs_init(void);
-#endif
-
-#ifdef CONFIG_SERIAL_CONSOLE
-
-#ifdef CONFIG_ZS
-extern void zs_serial_console_init(void);
-#endif
-
-#endif
-
-/* rs_init - starts up the serial interface -
- handle normal case of starting up the serial interface */
-
-#ifdef CONFIG_SERIAL
-
-int __init rs_init(void)
-{
-#ifdef CONFIG_ZS
- if (IOASIC)
- return zs_init();
-#endif
- return -ENXIO;
-}
-
-__initcall(rs_init);
-
-#endif
-
-#ifdef CONFIG_SERIAL_CONSOLE
-
-/* serial_console_init handles the special case of starting
- * up the console on the serial port
- */
-static int __init decserial_console_init(void)
-{
-#ifdef CONFIG_ZS
- if (IOASIC)
- zs_serial_console_init();
-#endif
- return 0;
-}
-console_initcall(decserial_console_init);
-
-#endif
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 5b91bc04ea4e..3345641ff904 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -73,9 +73,9 @@ static void drm_ati_free_pcigart_table(void *address, int order)
free_pages((unsigned long)address, order);
}
-int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_sg_mem *entry = dev->sg;
unsigned long pages;
int i;
int order;
@@ -122,9 +122,9 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
}
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 089198491f16..2d6f2d0bd02b 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -109,31 +109,31 @@ typedef unsigned int drm_magic_t;
* \note KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
-typedef struct drm_clip_rect {
+struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
-} drm_clip_rect_t;
+};
/**
* Drawable information.
*/
-typedef struct drm_drawable_info {
+struct drm_drawable_info {
unsigned int num_rects;
- drm_clip_rect_t *rects;
-} drm_drawable_info_t;
+ struct drm_clip_rect *rects;
+};
/**
* Texture region,
*/
-typedef struct drm_tex_region {
+struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
-} drm_tex_region_t;
+};
/**
* Hardware lock.
@@ -142,17 +142,17 @@ typedef struct drm_tex_region {
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
-typedef struct drm_hw_lock {
+struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
-} drm_hw_lock_t;
+};
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
*/
-typedef struct drm_version {
+struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
@@ -162,33 +162,33 @@ typedef struct drm_version {
char __user *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
-} drm_version_t;
+};
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
*/
-typedef struct drm_unique {
+struct drm_unique {
size_t unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
-} drm_unique_t;
+};
-typedef struct drm_list {
+struct drm_list {
int count; /**< Length of user-space structures */
- drm_version_t __user *version;
-} drm_list_t;
+ struct drm_version __user *version;
+};
-typedef struct drm_block {
+struct drm_block {
int unused;
-} drm_block_t;
+};
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
*/
-typedef struct drm_control {
+struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
@@ -196,24 +196,24 @@ typedef struct drm_control {
DRM_UNINST_HANDLER
} func;
int irq;
-} drm_control_t;
+};
/**
* Type of memory to map.
*/
-typedef enum drm_map_type {
+enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
-} drm_map_type_t;
+};
/**
* Memory mapping flags.
*/
-typedef enum drm_map_flags {
+enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
@@ -221,12 +221,12 @@ typedef enum drm_map_flags {
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40 /**< Removable mapping */
-} drm_map_flags_t;
+};
-typedef struct drm_ctx_priv_map {
+struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
-} drm_ctx_priv_map_t;
+};
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
@@ -234,30 +234,30 @@ typedef struct drm_ctx_priv_map {
*
* \sa drmAddMap().
*/
-typedef struct drm_map {
+struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
- drm_map_type_t type; /**< Type of memory to map */
- drm_map_flags_t flags; /**< Flags */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
-} drm_map_t;
+};
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
-typedef struct drm_client {
+struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
-} drm_client_t;
+};
-typedef enum {
+enum drm_stat_type {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
@@ -275,23 +275,23 @@ typedef enum {
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
-} drm_stat_type_t;
+};
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
-typedef struct drm_stats {
+struct drm_stats {
unsigned long count;
struct {
unsigned long value;
- drm_stat_type_t type;
+ enum drm_stat_type type;
} data[15];
-} drm_stats_t;
+};
/**
* Hardware locking flags.
*/
-typedef enum drm_lock_flags {
+enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
@@ -301,17 +301,17 @@ typedef enum drm_lock_flags {
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
-} drm_lock_flags_t;
+};
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
-typedef struct drm_lock {
+struct drm_lock {
int context;
- drm_lock_flags_t flags;
-} drm_lock_t;
+ enum drm_lock_flags flags;
+};
/**
* DMA flags
@@ -321,7 +321,7 @@ typedef struct drm_lock {
*
* \sa drm_dma.
*/
-typedef enum drm_dma_flags {
+enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
@@ -340,14 +340,14 @@ typedef enum drm_dma_flags {
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
-} drm_dma_flags_t;
+};
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
-typedef struct drm_buf_desc {
+struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
@@ -363,44 +363,44 @@ typedef struct drm_buf_desc {
* Start address of where the AGP buffers are
* in the AGP aperture
*/
-} drm_buf_desc_t;
+};
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
-typedef struct drm_buf_info {
+struct drm_buf_info {
int count; /**< Entries in list */
- drm_buf_desc_t __user *list;
-} drm_buf_info_t;
+ struct drm_buf_desc __user *list;
+};
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
-typedef struct drm_buf_free {
+struct drm_buf_free {
int count;
int __user *list;
-} drm_buf_free_t;
+};
/**
* Buffer information
*
* \sa drm_buf_map.
*/
-typedef struct drm_buf_pub {
+struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
-} drm_buf_pub_t;
+};
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
-typedef struct drm_buf_map {
+struct drm_buf_map {
int count; /**< Length of the buffer list */
void __user *virtual; /**< Mmap'd area in user-virtual */
- drm_buf_pub_t __user *list; /**< Buffer information */
-} drm_buf_map_t;
+ struct drm_buf_pub __user *list; /**< Buffer information */
+};
/**
* DRM_IOCTL_DMA ioctl argument type.
@@ -409,48 +409,48 @@ typedef struct drm_buf_map {
*
* \sa drmDMA().
*/
-typedef struct drm_dma {
+struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
- drm_dma_flags_t flags; /**< Flags */
+ enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
-} drm_dma_t;
+};
-typedef enum {
+enum drm_ctx_flags {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
-} drm_ctx_flags_t;
+};
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
-typedef struct drm_ctx {
+struct drm_ctx {
drm_context_t handle;
- drm_ctx_flags_t flags;
-} drm_ctx_t;
+ enum drm_ctx_flags flags;
+};
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
-typedef struct drm_ctx_res {
+struct drm_ctx_res {
int count;
- drm_ctx_t __user *contexts;
-} drm_ctx_res_t;
+ struct drm_ctx __user *contexts;
+};
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
-typedef struct drm_draw {
+struct drm_draw {
drm_drawable_t handle;
-} drm_draw_t;
+};
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
@@ -459,52 +459,52 @@ typedef enum {
DRM_DRAWABLE_CLIPRECTS,
} drm_drawable_info_type_t;
-typedef struct drm_update_draw {
+struct drm_update_draw {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
unsigned long long data;
-} drm_update_draw_t;
+};
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
-typedef struct drm_auth {
+struct drm_auth {
drm_magic_t magic;
-} drm_auth_t;
+};
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
*/
-typedef struct drm_irq_busid {
+struct drm_irq_busid {
int irq; /**< IRQ number */
int busnum; /**< bus number */
int devnum; /**< device number */
int funcnum; /**< function number */
-} drm_irq_busid_t;
+};
-typedef enum {
+enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
-} drm_vblank_seq_type_t;
+};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
_DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
long tval_sec;
long tval_usec;
@@ -515,41 +515,41 @@ struct drm_wait_vblank_reply {
*
* \sa drmWaitVBlank().
*/
-typedef union drm_wait_vblank {
+union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
-} drm_wait_vblank_t;
+};
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
*/
-typedef struct drm_agp_mode {
+struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
-} drm_agp_mode_t;
+};
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
*/
-typedef struct drm_agp_buffer {
+struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
-} drm_agp_buffer_t;
+};
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
-typedef struct drm_agp_binding {
+struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding_t;
+};
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
@@ -558,7 +558,7 @@ typedef struct drm_agp_binding {
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
* drmAgpVendorId() and drmAgpDeviceId().
*/
-typedef struct drm_agp_info {
+struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
@@ -570,25 +570,25 @@ typedef struct drm_agp_info {
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
-} drm_agp_info_t;
+};
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
-typedef struct drm_scatter_gather {
+struct drm_scatter_gather {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather_t;
+};
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
-typedef struct drm_set_version {
+struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
-} drm_set_version_t;
+};
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
@@ -596,61 +596,61 @@ typedef struct drm_set_version {
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
-#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
-#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
-#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
-#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
-#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
-
-#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
-
-#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
-
-#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
-#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
-
-#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
-#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
-#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
+#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
-#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
-#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
/**
* Device specific ioctls should only be in their respective headers
@@ -663,4 +663,49 @@ typedef struct drm_set_version {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
+/* typedef area */
+#ifndef __KERNEL__
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+#endif
+
#endif
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index d494315752a2..0df87fc3dcb2 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -75,6 +75,8 @@
#include <asm/pgalloc.h>
#include "drm.h"
+#include <linux/idr.h>
+
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@@ -274,32 +276,23 @@ typedef struct drm_ioctl_desc {
int flags;
} drm_ioctl_desc_t;
-typedef struct drm_devstate {
- pid_t owner; /**< X server pid holding x_lock */
-} drm_devstate_t;
-
-typedef struct drm_magic_entry {
- drm_hash_item_t hash_item;
+struct drm_magic_entry {
struct list_head head;
+ struct drm_hash_item hash_item;
struct drm_file *priv;
struct drm_magic_entry *next;
-} drm_magic_entry_t;
-
-typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
-} drm_magic_head_t;
+};
-typedef struct drm_vma_entry {
+struct drm_vma_entry {
+ struct list_head head;
struct vm_area_struct *vma;
- struct drm_vma_entry *next;
pid_t pid;
-} drm_vma_entry_t;
+};
/**
* DMA buffer.
*/
-typedef struct drm_buf {
+struct drm_buf {
int idx; /**< Index into master buflist */
int total; /**< Buffer size */
int order; /**< log-base-2(total) */
@@ -325,30 +318,30 @@ typedef struct drm_buf {
int dev_priv_size; /**< Size of buffer private storage */
void *dev_private; /**< Per-buffer private storage */
-} drm_buf_t;
+};
/** bufs is one longer than it has to be */
-typedef struct drm_waitlist {
+struct drm_waitlist {
int count; /**< Number of possible buffers */
- drm_buf_t **bufs; /**< List of pointers to buffers */
- drm_buf_t **rp; /**< Read pointer */
- drm_buf_t **wp; /**< Write pointer */
- drm_buf_t **end; /**< End pointer */
+ struct drm_buf **bufs; /**< List of pointers to buffers */
+ struct drm_buf **rp; /**< Read pointer */
+ struct drm_buf **wp; /**< Write pointer */
+ struct drm_buf **end; /**< End pointer */
spinlock_t read_lock;
spinlock_t write_lock;
-} drm_waitlist_t;
+};
-typedef struct drm_freelist {
+struct drm_freelist {
int initialized; /**< Freelist in use */
atomic_t count; /**< Number of free buffers */
- drm_buf_t *next; /**< End pointer */
+ struct drm_buf *next; /**< End pointer */
wait_queue_head_t waiting; /**< Processes waiting on free bufs */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
atomic_t wfh; /**< If waiting for high mark */
spinlock_t lock;
-} drm_freelist_t;
+};
typedef struct drm_dma_handle {
dma_addr_t busaddr;
@@ -359,19 +352,19 @@ typedef struct drm_dma_handle {
/**
* Buffer entry. There is one of this for each buffer size order.
*/
-typedef struct drm_buf_entry {
+struct drm_buf_entry {
int buf_size; /**< size */
int buf_count; /**< number of buffers */
- drm_buf_t *buflist; /**< buffer list */
+ struct drm_buf *buflist; /**< buffer list */
int seg_count;
int page_order;
- drm_dma_handle_t **seglist;
+ struct drm_dma_handle **seglist;
- drm_freelist_t freelist;
-} drm_buf_entry_t;
+ struct drm_freelist freelist;
+};
/** File private data */
-typedef struct drm_file {
+struct drm_file {
int authenticated;
int master;
int minor;
@@ -379,16 +372,15 @@ typedef struct drm_file {
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
- struct drm_file *next;
- struct drm_file *prev;
+ struct list_head lhead;
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
void *driver_priv;
-} drm_file_t;
+};
/** Wait queue */
-typedef struct drm_queue {
+struct drm_queue {
atomic_t use_count; /**< Outstanding uses (+1) */
atomic_t finalization; /**< Finalization in progress */
atomic_t block_count; /**< Count of processes waiting */
@@ -401,16 +393,16 @@ typedef struct drm_queue {
atomic_t total_flushed; /**< Total flushes statistic */
atomic_t total_locks; /**< Total locks statistics */
#endif
- drm_ctx_flags_t flags; /**< Context preserving and 2D-only */
- drm_waitlist_t waitlist; /**< Pending buffers */
+ enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
+ struct drm_waitlist waitlist; /**< Pending buffers */
wait_queue_head_t flush_queue; /**< Processes waiting until flush */
-} drm_queue_t;
+};
/**
* Lock data.
*/
-typedef struct drm_lock_data {
- drm_hw_lock_t *hw_lock; /**< Hardware lock */
+struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /**< Hardware lock */
struct file *filp; /**< File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
@@ -418,16 +410,16 @@ typedef struct drm_lock_data {
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
-} drm_lock_data_t;
+};
/**
* DMA data.
*/
-typedef struct drm_device_dma {
+struct drm_device_dma {
- drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
int buf_count; /**< total number of buffers */
- drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
int seg_count;
int page_count; /**< number of pages */
unsigned long *pagelist; /**< page list */
@@ -439,28 +431,27 @@ typedef struct drm_device_dma {
_DRM_DMA_USE_PCI_RO = 0x08
} flags;
-} drm_device_dma_t;
+};
/**
* AGP memory entry. Stored as a doubly linked list.
*/
-typedef struct drm_agp_mem {
+struct drm_agp_mem {
unsigned long handle; /**< handle */
DRM_AGP_MEM *memory;
unsigned long bound; /**< address */
int pages;
- struct drm_agp_mem *prev; /**< previous entry */
- struct drm_agp_mem *next; /**< next entry */
-} drm_agp_mem_t;
+ struct list_head head;
+};
/**
* AGP data.
*
* \sa drm_agp_init() and drm_device::agp.
*/
-typedef struct drm_agp_head {
+struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
- drm_agp_mem_t *memory; /**< memory entries */
+ struct list_head memory;
unsigned long mode; /**< AGP mode */
struct agp_bridge_data *bridge;
int enabled; /**< whether the AGP bus as been enabled */
@@ -469,51 +460,51 @@ typedef struct drm_agp_head {
int agp_mtrr;
int cant_use_aperture;
unsigned long page_mask;
-} drm_agp_head_t;
+};
/**
* Scatter-gather memory.
*/
-typedef struct drm_sg_mem {
+struct drm_sg_mem {
unsigned long handle;
void *virtual;
int pages;
struct page **pagelist;
dma_addr_t *busaddr;
-} drm_sg_mem_t;
+};
-typedef struct drm_sigdata {
+struct drm_sigdata {
int context;
- drm_hw_lock_t *lock;
-} drm_sigdata_t;
+ struct drm_hw_lock *lock;
+};
/**
* Mappings list
*/
-typedef struct drm_map_list {
+struct drm_map_list {
struct list_head head; /**< list head */
- drm_hash_item_t hash;
- drm_map_t *map; /**< mapping */
+ struct drm_hash_item hash;
+ struct drm_map *map; /**< mapping */
unsigned int user_token;
-} drm_map_list_t;
+};
-typedef drm_map_t drm_local_map_t;
+typedef struct drm_map drm_local_map_t;
/**
* Context handle list
*/
-typedef struct drm_ctx_list {
+struct drm_ctx_list {
struct list_head head; /**< list head */
drm_context_t handle; /**< context handle */
- drm_file_t *tag; /**< associated fd private data */
-} drm_ctx_list_t;
+ struct drm_file *tag; /**< associated fd private data */
+};
-typedef struct drm_vbl_sig {
+struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
struct siginfo info;
struct task_struct *task;
-} drm_vbl_sig_t;
+};
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
@@ -523,19 +514,19 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
-typedef struct ati_pcigart_info {
+struct drm_ati_pcigart_info {
int gart_table_location;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
-} drm_ati_pcigart_info;
+};
/*
* Generic memory manager structs
*/
-typedef struct drm_mm_node {
+struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
@@ -543,12 +534,12 @@ typedef struct drm_mm_node {
unsigned long size;
struct drm_mm *mm;
void *private;
-} drm_mm_node_t;
+};
-typedef struct drm_mm {
+struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
-} drm_mm_t;
+};
/**
* DRM driver structure. This structure represent the common code for
@@ -560,21 +551,21 @@ struct drm_device;
struct drm_driver {
int (*load) (struct drm_device *, unsigned long flags);
int (*firstopen) (struct drm_device *);
- int (*open) (struct drm_device *, drm_file_t *);
+ int (*open) (struct drm_device *, struct drm_file *);
void (*preclose) (struct drm_device *, struct file * filp);
- void (*postclose) (struct drm_device *, drm_file_t *);
+ void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
int (*dma_ioctl) (DRM_IOCTL_ARGS);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
- int (*context_ctor) (struct drm_device * dev, int context);
- int (*context_dtor) (struct drm_device * dev, int context);
- int (*kernel_context_switch) (struct drm_device * dev, int old,
+ int (*context_ctor) (struct drm_device *dev, int context);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*kernel_context_switch) (struct drm_device *dev, int old,
int new);
- void (*kernel_context_switch_unlock) (struct drm_device * dev);
- int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
- int (*vblank_wait2) (struct drm_device * dev, unsigned int *sequence);
+ void (*kernel_context_switch_unlock) (struct drm_device *dev);
+ int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
+ int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
int (*dri_library_name) (struct drm_device *dev, char *buf);
/**
@@ -588,22 +579,23 @@ struct drm_driver {
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2).
*/
- int (*device_is_agp) (struct drm_device * dev);
+ int (*device_is_agp) (struct drm_device *dev);
/* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
- void (*irq_preinstall) (struct drm_device * dev);
- void (*irq_postinstall) (struct drm_device * dev);
- void (*irq_uninstall) (struct drm_device * dev);
- void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
+ void (*irq_preinstall) (struct drm_device *dev);
+ void (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+ void (*reclaim_buffers) (struct drm_device *dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file *filp);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct file * filp);
- unsigned long (*get_map_ofs) (drm_map_t * map);
- unsigned long (*get_reg_ofs) (struct drm_device * dev);
- void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
+ unsigned long (*get_map_ofs) (struct drm_map * map);
+ unsigned long (*get_reg_ofs) (struct drm_device *dev);
+ void (*set_version) (struct drm_device *dev,
+ struct drm_set_version *sv);
int major;
int minor;
@@ -625,19 +617,19 @@ struct drm_driver {
* that may contain multiple heads. Embed one per head of these in the
* private drm_device structure.
*/
-typedef struct drm_head {
+struct drm_head {
int minor; /**< Minor device number */
struct drm_device *dev;
struct proc_dir_entry *dev_root; /**< proc directory entry */
dev_t device; /**< Device number for mknod */
struct class_device *dev_class;
-} drm_head_t;
+};
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
*/
-typedef struct drm_device {
+struct drm_device {
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
@@ -663,35 +655,33 @@ typedef struct drm_device {
/** \name Performance counters */
/*@{ */
unsigned long counters;
- drm_stat_type_t types[15];
+ enum drm_stat_type types[15];
atomic_t counts[15];
/*@} */
/** \name Authentication */
/*@{ */
- drm_file_t *file_first; /**< file list head */
- drm_file_t *file_last; /**< file list tail */
- drm_open_hash_t magiclist; /**< magic hash table */
+ struct list_head filelist;
+ struct drm_open_hash magiclist; /**< magic hash table */
struct list_head magicfree;
/*@} */
/** \name Memory management */
/*@{ */
- drm_map_list_t *maplist; /**< Linked list of regions */
+ struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
- drm_open_hash_t map_hash; /**< User token hash table for maps */
+ struct drm_open_hash map_hash; /**< User token hash table for maps */
/** \name Context handle management */
/*@{ */
- drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
+ struct list_head ctxlist; /**< Linked list of context handles */
int ctx_count; /**< Number of context handles */
struct mutex ctxlist_mutex; /**< For ctxlist */
- drm_map_t **context_sareas; /**< per-context SAREA's */
- int max_context;
+ struct idr ctx_idr;
- drm_vma_entry_t *vmalist; /**< List of vmas (for debugging) */
- drm_lock_data_t lock; /**< Information on hardware lock */
+ struct list_head vmalist; /**< List of vmas (for debugging) */
+ struct drm_lock_data lock; /**< Information on hardware lock */
/*@} */
/** \name DMA queues (contexts) */
@@ -699,8 +689,8 @@ typedef struct drm_device {
int queue_count; /**< Number of active DMA queues */
int queue_reserved; /**< Number of reserved DMA queues */
int queue_slots; /**< Actual length of queuelist */
- drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */
- drm_device_dma_t *dma; /**< Optional pointer for DMA support */
+ struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
/** \name Context support */
@@ -725,8 +715,8 @@ typedef struct drm_device {
atomic_t vbl_received;
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
spinlock_t vbl_lock;
- drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
- drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
+ struct list_head vbl_sigs; /**< signal list to send on VBLANK */
+ struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@@ -739,7 +729,7 @@ typedef struct drm_device {
wait_queue_head_t buf_readers; /**< Processes waiting to read */
wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
- drm_agp_head_t *agp; /**< AGP data */
+ struct drm_agp_head *agp; /**< AGP data */
struct pci_dev *pdev; /**< PCI device structure */
int pci_vendor; /**< PCI vendor id */
@@ -747,26 +737,23 @@ typedef struct drm_device {
#ifdef __alpha__
struct pci_controller *hose;
#endif
- drm_sg_mem_t *sg; /**< Scatter gather memory */
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
- drm_sigdata_t sigdata; /**< For block_all_signals */
+ struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
struct drm_driver *driver;
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
- drm_head_t primary; /**< primary screen head */
+ struct drm_head primary; /**< primary screen head */
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
- unsigned int drw_bitfield_length;
- u32 *drw_bitfield;
- unsigned int drw_info_length;
- drm_drawable_info_t **drw_info;
+ struct idr drw_idr;
/*@} */
-} drm_device_t;
+};
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@@ -838,7 +825,7 @@ extern int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_lastclose(drm_device_t *dev);
+extern int drm_lastclose(struct drm_device *dev);
/* Device support (drm_fops.h) */
extern int drm_open(struct inode *inode, struct file *filp);
@@ -857,7 +844,7 @@ extern int drm_mem_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
-extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
+extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
@@ -896,9 +883,9 @@ extern int drm_newctx(struct inode *inode, struct file *filp,
extern int drm_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_ctxbitmap_init(drm_device_t * dev);
-extern void drm_ctxbitmap_cleanup(drm_device_t * dev);
-extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle);
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
extern int drm_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -912,8 +899,9 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
+extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
drm_drawable_t id);
+extern void drm_drawable_free_all(struct drm_device *dev);
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct inode *inode, struct file *filp,
@@ -926,10 +914,10 @@ extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
-extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
-extern void drm_idlelock_take(drm_lock_data_t *lock_data);
-extern void drm_idlelock_release(drm_lock_data_t *lock_data);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
/*
* These are exported to drivers so that they can implement fencing using
@@ -940,15 +928,15 @@ extern int drm_i_have_hw_lock(struct file *filp);
extern int drm_kernel_take_hw_lock(struct file *filp);
/* Buffer management support (drm_bufs.h) */
-extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addmap(drm_device_t * dev, unsigned int offset,
- unsigned int size, drm_map_type_t type,
- drm_map_flags_t flags, drm_local_map_t ** map_ptr);
+extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addmap(struct drm_device *dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, drm_local_map_t ** map_ptr);
extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_rmmap(drm_device_t * dev, drm_local_map_t * map);
-extern int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map);
+extern int drm_rmmap(struct drm_device *dev, drm_local_map_t * map);
+extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t * map);
extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -963,56 +951,56 @@ extern int drm_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern unsigned long drm_get_resource_start(drm_device_t * dev,
+extern unsigned long drm_get_resource_start(struct drm_device *dev,
unsigned int resource);
-extern unsigned long drm_get_resource_len(drm_device_t * dev,
+extern unsigned long drm_get_resource_len(struct drm_device *dev,
unsigned int resource);
/* DMA support (drm_dma.h) */
-extern int drm_dma_setup(drm_device_t * dev);
-extern void drm_dma_takedown(drm_device_t * dev);
-extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf);
-extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
+extern int drm_dma_setup(struct drm_device *dev);
+extern void drm_dma_takedown(struct drm_device *dev);
+extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp);
/* IRQ support (drm_irq.h) */
extern int drm_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
-extern int drm_irq_uninstall(drm_device_t * dev);
-extern void drm_driver_irq_preinstall(drm_device_t * dev);
-extern void drm_driver_irq_postinstall(drm_device_t * dev);
-extern void drm_driver_irq_uninstall(drm_device_t * dev);
+extern int drm_irq_uninstall(struct drm_device *dev);
+extern void drm_driver_irq_preinstall(struct drm_device *dev);
+extern void drm_driver_irq_postinstall(struct drm_device *dev);
+extern void drm_driver_irq_uninstall(struct drm_device *dev);
extern int drm_wait_vblank(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
-extern void drm_vbl_send_signals(drm_device_t * dev);
-extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*));
+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+extern void drm_vbl_send_signals(struct drm_device *dev);
+extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
/* AGP/GART support (drm_agpsupport.h) */
-extern drm_agp_head_t *drm_agp_init(drm_device_t * dev);
-extern int drm_agp_acquire(drm_device_t * dev);
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_release(drm_device_t * dev);
+extern int drm_agp_release(struct drm_device *dev);
extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_enable(drm_device_t * dev, drm_agp_mode_t mode);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info * info);
extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
@@ -1024,16 +1012,18 @@ extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
-extern int drm_put_dev(drm_device_t * dev);
-extern int drm_put_head(drm_head_t * head);
+extern int drm_put_dev(struct drm_device *dev);
+extern int drm_put_head(struct drm_head *head);
extern unsigned int drm_debug;
extern unsigned int drm_cards_limit;
-extern drm_head_t **drm_heads;
+extern struct drm_head **drm_heads;
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
+extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
+
/* Proc support (drm_proc.h) */
-extern int drm_proc_init(drm_device_t * dev,
+extern int drm_proc_init(struct drm_device *dev,
int minor,
struct proc_dir_entry *root,
struct proc_dir_entry **dev_root);
@@ -1042,45 +1032,45 @@ extern int drm_proc_cleanup(int minor,
struct proc_dir_entry *dev_root);
/* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(drm_sg_mem_t * entry);
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
extern int drm_sg_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_sg_free(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* ATI PCIGART support (ati_pcigart.h) */
-extern int drm_ati_pcigart_init(drm_device_t * dev,
- drm_ati_pcigart_info * gart_info);
-extern int drm_ati_pcigart_cleanup(drm_device_t * dev,
- drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
-extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size,
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
-extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah);
-extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
/* sysfs support (drm_sysfs.c) */
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(struct class *cs);
extern struct class_device *drm_sysfs_device_add(struct class *cs,
- drm_head_t *head);
+ struct drm_head *head);
extern void drm_sysfs_device_remove(struct class_device *class_dev);
/*
* Basic memory manager support (drm_mm.c)
*/
-extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size,
unsigned alignment);
-void drm_mm_put_block(drm_mm_node_t * cur);
-extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
+void drm_mm_put_block(struct drm_mm_node * cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
unsigned alignment, int best_match);
-extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(drm_mm_t *mm);
-extern int drm_mm_clean(drm_mm_t *mm);
-extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
-extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
@@ -1088,14 +1078,14 @@ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
- drm_map_list_t *_entry;
- list_for_each_entry(_entry, &dev->maplist->head, head)
+ struct drm_map_list *_entry;
+ list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
}
-static __inline__ int drm_device_is_agp(drm_device_t * dev)
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
{
if (dev->driver->device_is_agp != NULL) {
int err = (*dev->driver->device_is_agp) (dev);
@@ -1108,7 +1098,7 @@ static __inline__ int drm_device_is_agp(drm_device_t * dev)
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
}
-static __inline__ int drm_device_is_pcie(drm_device_t * dev)
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
{
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
@@ -1143,7 +1133,7 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area);
/*@}*/
-extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
#endif /* __KERNEL__ */
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 40bfd9b01e39..354f0e3674bf 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -48,7 +48,7 @@
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
-int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info)
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
DRM_AGP_KERN *kern;
@@ -74,16 +74,16 @@ EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_info_t info;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_agp_info info;
int err;
err = drm_agp_info(dev, &info);
if (err)
return err;
- if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
+ if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
@@ -97,7 +97,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire(drm_device_t * dev)
+int drm_agp_acquire(struct drm_device * dev)
{
if (!dev->agp)
return -ENODEV;
@@ -126,9 +126,9 @@ EXPORT_SYMBOL(drm_agp_acquire);
int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
+ struct drm_file *priv = filp->private_data;
- return drm_agp_acquire((drm_device_t *) priv->head->dev);
+ return drm_agp_acquire((struct drm_device *) priv->head->dev);
}
/**
@@ -139,7 +139,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
-int drm_agp_release(drm_device_t * dev)
+int drm_agp_release(struct drm_device * dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -152,8 +152,8 @@ EXPORT_SYMBOL(drm_agp_release);
int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
return drm_agp_release(dev);
}
@@ -168,7 +168,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
-int drm_agp_enable(drm_device_t * dev, drm_agp_mode_t mode)
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -185,11 +185,11 @@ EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_mode_t mode;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_agp_mode mode;
- if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
+ if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode)))
return -EFAULT;
return drm_agp_enable(dev, mode);
@@ -207,9 +207,9 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device is present and has been acquired, allocates the
* memory via alloc_agp() and creates a drm_agp_mem entry for it.
*/
-int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
@@ -232,11 +232,7 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
- entry->prev = NULL;
- entry->next = dev->agp->memory;
- if (dev->agp->memory)
- dev->agp->memory->prev = entry;
- dev->agp->memory = entry;
+ list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
@@ -248,10 +244,10 @@ EXPORT_SYMBOL(drm_agp_alloc);
int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_buffer_t request;
- drm_agp_buffer_t __user *argp = (void __user *)arg;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_agp_buffer request;
+ struct drm_agp_buffer __user *argp = (void __user *)arg;
int err;
if (copy_from_user(&request, argp, sizeof(request)))
@@ -262,10 +258,12 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
return err;
if (copy_to_user(argp, &request, sizeof(request))) {
- drm_agp_mem_t *entry = dev->agp->memory;
-
- dev->agp->memory = entry->next;
- dev->agp->memory->prev = NULL;
+ struct drm_agp_mem *entry;
+ list_for_each_entry(entry, &dev->agp->memory, head) {
+ if (entry->handle == request.handle)
+ break;
+ }
+ list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
@@ -283,12 +281,12 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
-static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
unsigned long handle)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
- for (entry = dev->agp->memory; entry; entry = entry->next) {
+ list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
@@ -307,9 +305,9 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
-int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
@@ -328,12 +326,12 @@ EXPORT_SYMBOL(drm_agp_unbind);
int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_binding_t request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_agp_binding request;
if (copy_from_user
- (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+ (&request, (struct drm_agp_binding __user *) arg, sizeof(request)))
return -EFAULT;
return drm_agp_unbind(dev, &request);
@@ -352,9 +350,9 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
-int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
int retcode;
int page;
@@ -377,12 +375,12 @@ EXPORT_SYMBOL(drm_agp_bind);
int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_binding_t request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_agp_binding request;
if (copy_from_user
- (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+ (&request, (struct drm_agp_binding __user *) arg, sizeof(request)))
return -EFAULT;
return drm_agp_bind(dev, &request);
@@ -402,9 +400,9 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
-int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -413,13 +411,7 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
if (entry->bound)
drm_unbind_agp(entry->memory);
- if (entry->prev)
- entry->prev->next = entry->next;
- else
- dev->agp->memory = entry->next;
-
- if (entry->next)
- entry->next->prev = entry->prev;
+ list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@@ -430,12 +422,12 @@ EXPORT_SYMBOL(drm_agp_free);
int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_buffer_t request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_agp_buffer request;
if (copy_from_user
- (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
+ (&request, (struct drm_agp_buffer __user *) arg, sizeof(request)))
return -EFAULT;
return drm_agp_free(dev, &request);
@@ -450,9 +442,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*/
-drm_agp_head_t *drm_agp_init(drm_device_t * dev)
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
- drm_agp_head_t *head = NULL;
+ struct drm_agp_head *head = NULL;
if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
return NULL;
@@ -472,7 +464,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t * dev)
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
}
- head->memory = NULL;
+ INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
diff --git a/drivers/char/drm/drm_auth.c b/drivers/char/drm/drm_auth.c
index c7b19d35bcd6..7f777da872cd 100644
--- a/drivers/char/drm/drm_auth.c
+++ b/drivers/char/drm/drm_auth.c
@@ -45,15 +45,15 @@
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
-static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
+static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
{
- drm_file_t *retval = NULL;
- drm_magic_entry_t *pt;
- drm_hash_item_t *hash;
+ struct drm_file *retval = NULL;
+ struct drm_magic_entry *pt;
+ struct drm_hash_item *hash;
mutex_lock(&dev->struct_mutex);
if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
- pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
mutex_unlock(&dev->struct_mutex);
@@ -71,10 +71,10 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
-static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
+static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
drm_magic_t magic)
{
- drm_magic_entry_t *entry;
+ struct drm_magic_entry *entry;
DRM_DEBUG("%d\n", magic);
@@ -102,10 +102,10 @@ static int drm_add_magic(drm_device_t * dev, drm_file_t * priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
-static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
+static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
{
- drm_magic_entry_t *pt;
- drm_hash_item_t *hash;
+ struct drm_magic_entry *pt;
+ struct drm_hash_item *hash;
DRM_DEBUG("%d\n", magic);
@@ -114,7 +114,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&dev->magiclist, hash);
list_del(&pt->head);
mutex_unlock(&dev->struct_mutex);
@@ -142,9 +142,9 @@ int drm_getmagic(struct inode *inode, struct file *filp,
{
static drm_magic_t sequence = 0;
static DEFINE_SPINLOCK(lock);
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_auth_t auth;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_auth auth;
/* Find unique magic */
if (priv->magic) {
@@ -162,7 +162,7 @@ int drm_getmagic(struct inode *inode, struct file *filp,
}
DRM_DEBUG("%u\n", auth.magic);
- if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth)))
+ if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth)))
return -EFAULT;
return 0;
}
@@ -181,12 +181,12 @@ int drm_getmagic(struct inode *inode, struct file *filp,
int drm_authmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_auth_t auth;
- drm_file_t *file;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_auth auth;
+ struct drm_file *file;
- if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth)))
+ if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth)))
return -EFAULT;
DRM_DEBUG("%u\n", auth.magic);
if ((file = drm_find_file(dev, auth.magic))) {
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index c11345856ffe..923174c54a1c 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,26 +36,24 @@
#include <linux/vmalloc.h>
#include "drmP.h"
-unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
+unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
{
return pci_resource_start(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_start);
-unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
+unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
{
return pci_resource_len(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_len);
-static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
drm_local_map_t *map)
{
- struct list_head *list;
-
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
+ struct drm_map_list *entry;
+ list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
@@ -66,7 +64,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
return NULL;
}
-static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
unsigned long user_token, int hashed_handle)
{
int use_hashed_handle;
@@ -103,12 +101,13 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
-static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
- unsigned int size, drm_map_type_t type,
- drm_map_flags_t flags, drm_map_list_t ** maplist)
+static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags,
+ struct drm_map_list ** maplist)
{
- drm_map_t *map;
- drm_map_list_t *list;
+ struct drm_map *map;
+ struct drm_map_list *list;
drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
@@ -214,7 +213,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
}
break;
case _DRM_AGP: {
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
int valid = 0;
if (!drm_core_has_AGP(dev)) {
@@ -237,14 +236,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
- for (entry = dev->agp->memory; entry; entry = entry->next) {
+ list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
- if (dev->agp->memory && !valid) {
+ if (!list_empty(&dev->agp->memory) && !valid) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
@@ -289,7 +288,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list->map = map;
mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist->head);
+ list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
/* We do it here so that dev->struct_mutex protects the increment */
@@ -312,11 +311,11 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
return 0;
}
-int drm_addmap(drm_device_t * dev, unsigned int offset,
- unsigned int size, drm_map_type_t type,
- drm_map_flags_t flags, drm_local_map_t ** map_ptr)
+int drm_addmap(struct drm_device * dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, drm_local_map_t ** map_ptr)
{
- drm_map_list_t *list;
+ struct drm_map_list *list;
int rc;
rc = drm_addmap_core(dev, offset, size, type, flags, &list);
@@ -330,11 +329,11 @@ EXPORT_SYMBOL(drm_addmap);
int drm_addmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t map;
- drm_map_list_t *maplist;
- drm_map_t __user *argp = (void __user *)arg;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map map;
+ struct drm_map_list *maplist;
+ struct drm_map __user *argp = (void __user *)arg;
int err;
if (!(filp->f_mode & 3))
@@ -353,7 +352,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
if (err)
return err;
- if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
+ if (copy_to_user(argp, maplist->map, sizeof(struct drm_map)))
return -EFAULT;
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
@@ -369,7 +368,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
- * \param arg pointer to a drm_map_t structure.
+ * \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*
* Searches the map on drm_device::maplist, removes it from the list, see if
@@ -378,31 +377,26 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*
* \sa drm_addmap
*/
-int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
+int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
{
- struct list_head *list;
- drm_map_list_t *r_list = NULL;
+ struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
+ int found = 0;
/* Find the list entry for the map and remove it */
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
-
+ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
- list_del(list);
+ list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
- drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+ drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
+ found = 1;
break;
}
}
- /* List has wrapped around to the head pointer, or it's empty and we
- * didn't find anything.
- */
- if (list == (&dev->maplist->head)) {
+ if (!found)
return -EINVAL;
- }
switch (map->type) {
case _DRM_REGISTERS:
@@ -433,7 +427,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
return 0;
}
-int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
{
int ret;
@@ -456,21 +450,19 @@ int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map request;
drm_local_map_t *map = NULL;
- struct list_head *list;
+ struct drm_map_list *r_list;
int ret;
- if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
+ if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) {
return -EFAULT;
}
mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
-
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request.handle &&
r_list->map->flags & _DRM_REMOVABLE) {
@@ -482,7 +474,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
- if (list == (&dev->maplist->head)) {
+ if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@@ -513,7 +505,8 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
*
* Frees any pages and buffers associated with the given entry.
*/
-static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
+static void drm_cleanup_buf_error(struct drm_device * dev,
+ struct drm_buf_entry * entry)
{
int i;
@@ -550,20 +543,20 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
/**
* Add AGP buffers for DMA transfers.
*
- * \param dev drm_device_t to which the buffers are to be added.
- * \param request pointer to a drm_buf_desc_t describing the request.
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
* \return zero on success or a negative number on failure.
*
* After some sanity checks creates a drm_buf structure for each buffer and
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
-int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_agp_mem_t *agp_entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_agp_mem *agp_entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -574,7 +567,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int byte_count;
int i, valid;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!dma)
return -EINVAL;
@@ -606,14 +599,14 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
- for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
+ list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
- if (dev->agp->memory && !valid) {
+ if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
@@ -728,24 +721,24 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */
-int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int count;
int order;
int size;
int total;
int page_order;
- drm_buf_entry_t *entry;
+ struct drm_buf_entry *entry;
drm_dma_handle_t *dmah;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
unsigned long *temp_pagelist;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
return -EINVAL;
@@ -954,11 +947,11 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
}
EXPORT_SYMBOL(drm_addbufs_pci);
-static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -969,7 +962,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int byte_count;
int i;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -1116,11 +1109,11 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
return 0;
}
-static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -1131,7 +1124,7 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int byte_count;
int i;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
return -EINVAL;
@@ -1283,7 +1276,7 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
- * \param arg pointer to a drm_buf_desc_t request.
+ * \param arg pointer to a struct drm_buf_desc request.
* \return zero on success or a negative number on failure.
*
* According with the memory type specified in drm_buf_desc::flags and the
@@ -1294,15 +1287,15 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
int drm_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_buf_desc_t request;
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_buf_desc request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int ret;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
- if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
+ if (copy_from_user(&request, (struct drm_buf_desc __user *) arg,
sizeof(request)))
return -EFAULT;
@@ -1346,11 +1339,11 @@ int drm_addbufs(struct inode *inode, struct file *filp,
int drm_infobufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_info_t request;
- drm_buf_info_t __user *argp = (void __user *)arg;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_info request;
+ struct drm_buf_info __user *argp = (void __user *)arg;
int i;
int count;
@@ -1381,10 +1374,10 @@ int drm_infobufs(struct inode *inode, struct file *filp,
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count) {
- drm_buf_desc_t __user *to =
+ struct drm_buf_desc __user *to =
&request.list[count];
- drm_buf_entry_t *from = &dma->bufs[i];
- drm_freelist_t *list = &dma->bufs[i].freelist;
+ struct drm_buf_entry *from = &dma->bufs[i];
+ struct drm_freelist *list = &dma->bufs[i].freelist;
if (copy_to_user(&to->count,
&from->buf_count,
sizeof(from->buf_count)) ||
@@ -1434,12 +1427,12 @@ int drm_infobufs(struct inode *inode, struct file *filp,
int drm_markbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_desc_t request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_desc request;
int order;
- drm_buf_entry_t *entry;
+ struct drm_buf_entry *entry;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1448,7 +1441,7 @@ int drm_markbufs(struct inode *inode, struct file *filp,
return -EINVAL;
if (copy_from_user(&request,
- (drm_buf_desc_t __user *) arg, sizeof(request)))
+ (struct drm_buf_desc __user *) arg, sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d, %d, %d\n",
@@ -1484,13 +1477,13 @@ int drm_markbufs(struct inode *inode, struct file *filp,
int drm_freebufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_free_t request;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_free request;
int i;
int idx;
- drm_buf_t *buf;
+ struct drm_buf *buf;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1499,7 +1492,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
return -EINVAL;
if (copy_from_user(&request,
- (drm_buf_free_t __user *) arg, sizeof(request)))
+ (struct drm_buf_free __user *) arg, sizeof(request)))
return -EFAULT;
DRM_DEBUG("%d\n", request.count);
@@ -1540,15 +1533,15 @@ int drm_freebufs(struct inode *inode, struct file *filp,
int drm_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_map_t __user *argp = (void __user *)arg;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_map __user *argp = (void __user *)arg;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
- drm_buf_map_t request;
+ struct drm_buf_map request;
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1574,7 +1567,7 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
&& (dma->flags & _DRM_DMA_USE_SG))
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
&& (dma->flags & _DRM_DMA_USE_FB))) {
- drm_map_t *map = dev->agp_buffer_map;
+ struct drm_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if (!map) {
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index 83094c73da67..61ad986baa8d 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -53,26 +53,14 @@
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::context_sareas, while holding the drm_device::struct_mutex
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- if (ctx_handle < 0)
- goto failed;
- if (!dev->ctx_bitmap)
- goto failed;
-
- if (ctx_handle < DRM_MAX_CTXBITMAP) {
- mutex_lock(&dev->struct_mutex);
- clear_bit(ctx_handle, dev->ctx_bitmap);
- dev->context_sareas[ctx_handle] = NULL;
- mutex_unlock(&dev->struct_mutex);
- return;
- }
- failed:
- DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
- return;
+ mutex_lock(&dev->struct_mutex);
+ idr_remove(&dev->ctx_idr, ctx_handle);
+ mutex_unlock(&dev->struct_mutex);
}
/**
@@ -81,62 +69,28 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
- * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
- * drm_device::context_sareas to accommodate the new entry while holding the
+ * Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
-static int drm_ctxbitmap_next(drm_device_t * dev)
+static int drm_ctxbitmap_next(struct drm_device * dev)
{
- int bit;
-
- if (!dev->ctx_bitmap)
- return -1;
+ int new_id;
+ int ret;
+again:
+ if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ return -ENOMEM;
+ }
mutex_lock(&dev->struct_mutex);
- bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
- if (bit < DRM_MAX_CTXBITMAP) {
- set_bit(bit, dev->ctx_bitmap);
- DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
- if ((bit + 1) > dev->max_context) {
- dev->max_context = (bit + 1);
- if (dev->context_sareas) {
- drm_map_t **ctx_sareas;
-
- ctx_sareas = drm_realloc(dev->context_sareas,
- (dev->max_context -
- 1) *
- sizeof(*dev->
- context_sareas),
- dev->max_context *
- sizeof(*dev->
- context_sareas),
- DRM_MEM_MAPS);
- if (!ctx_sareas) {
- clear_bit(bit, dev->ctx_bitmap);
- mutex_unlock(&dev->struct_mutex);
- return -1;
- }
- dev->context_sareas = ctx_sareas;
- dev->context_sareas[bit] = NULL;
- } else {
- /* max_context == 1 at this point */
- dev->context_sareas =
- drm_alloc(dev->max_context *
- sizeof(*dev->context_sareas),
- DRM_MEM_MAPS);
- if (!dev->context_sareas) {
- clear_bit(bit, dev->ctx_bitmap);
- mutex_unlock(&dev->struct_mutex);
- return -1;
- }
- dev->context_sareas[bit] = NULL;
- }
- }
+ ret = idr_get_new_above(&dev->ctx_idr, NULL,
+ DRM_RESERVED_CONTEXTS, &new_id);
+ if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
- return bit;
+ goto again;
}
mutex_unlock(&dev->struct_mutex);
- return -1;
+ return new_id;
}
/**
@@ -144,31 +98,11 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
*
* \param dev DRM device.
*
- * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_mutex lock.
+ * Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(drm_device_t * dev)
+int drm_ctxbitmap_init(struct drm_device * dev)
{
- int i;
- int temp;
-
- mutex_lock(&dev->struct_mutex);
- dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
- DRM_MEM_CTXBITMAP);
- if (dev->ctx_bitmap == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
- }
- memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
- dev->context_sareas = NULL;
- dev->max_context = -1;
- mutex_unlock(&dev->struct_mutex);
-
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- temp = drm_ctxbitmap_next(dev);
- DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
- }
-
+ idr_init(&dev->ctx_idr);
return 0;
}
@@ -177,17 +111,13 @@ int drm_ctxbitmap_init(drm_device_t * dev)
*
* \param dev DRM device.
*
- * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_mutex lock.
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(drm_device_t * dev)
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
- if (dev->context_sareas)
- drm_free(dev->context_sareas,
- sizeof(*dev->context_sareas) *
- dev->max_context, DRM_MEM_MAPS);
- drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
+ idr_remove_all(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
@@ -206,34 +136,34 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
- * Gets the map from drm_device::context_sareas with the handle specified and
+ * Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_getsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_priv_map_t __user *argp = (void __user *)arg;
- drm_ctx_priv_map_t request;
- drm_map_t *map;
- drm_map_list_t *_entry;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_ctx_priv_map __user *argp = (void __user *)arg;
+ struct drm_ctx_priv_map request;
+ struct drm_map *map;
+ struct drm_map_list *_entry;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
- if (dev->max_context < 0
- || request.ctx_id >= (unsigned)dev->max_context) {
+
+ map = idr_find(&dev->ctx_idr, request.ctx_id);
+ if (!map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- map = dev->context_sareas[request.ctx_id];
mutex_unlock(&dev->struct_mutex);
request.handle = NULL;
- list_for_each_entry(_entry, &dev->maplist->head, head) {
+ list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request.handle =
(void *)(unsigned long)_entry->user_token;
@@ -258,25 +188,24 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
- * drm_device::context_sareas with it.
+ * drm_device::ctx_idr with it.
*/
int drm_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_priv_map_t request;
- drm_map_t *map = NULL;
- drm_map_list_t *r_list = NULL;
- struct list_head *list;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_ctx_priv_map request;
+ struct drm_map *map = NULL;
+ struct drm_map_list *r_list = NULL;
if (copy_from_user(&request,
- (drm_ctx_priv_map_t __user *) arg, sizeof(request)))
+ (struct drm_ctx_priv_map __user *) arg,
+ sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long)request.handle)
goto found;
@@ -289,11 +218,10 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
map = r_list->map;
if (!map)
goto bad;
- if (dev->max_context < 0)
- goto bad;
- if (request.ctx_id >= (unsigned)dev->max_context)
+
+ if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id)))
goto bad;
- dev->context_sareas[request.ctx_id] = map;
+
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -314,7 +242,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
*
* Attempt to set drm_device::context_flag.
*/
-static int drm_context_switch(drm_device_t * dev, int old, int new)
+static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
@@ -342,7 +270,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new)
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
-static int drm_context_switch_complete(drm_device_t * dev, int new)
+static int drm_context_switch_complete(struct drm_device * dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
@@ -372,9 +300,9 @@ static int drm_context_switch_complete(drm_device_t * dev, int new)
int drm_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_ctx_res_t res;
- drm_ctx_t __user *argp = (void __user *)arg;
- drm_ctx_t ctx;
+ struct drm_ctx_res res;
+ struct drm_ctx_res __user *argp = (void __user *)arg;
+ struct drm_ctx ctx;
int i;
if (copy_from_user(&res, argp, sizeof(res)))
@@ -409,11 +337,11 @@ int drm_resctx(struct inode *inode, struct file *filp,
int drm_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_list_t *ctx_entry;
- drm_ctx_t __user *argp = (void __user *)arg;
- drm_ctx_t ctx;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_ctx_list *ctx_entry;
+ struct drm_ctx __user *argp = (void __user *)arg;
+ struct drm_ctx ctx;
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
@@ -449,7 +377,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
ctx_entry->tag = priv;
mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist->head);
+ list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
@@ -477,8 +405,8 @@ int drm_modctx(struct inode *inode, struct file *filp,
int drm_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_ctx_t __user *argp = (void __user *)arg;
- drm_ctx_t ctx;
+ struct drm_ctx __user *argp = (void __user *)arg;
+ struct drm_ctx ctx;
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
@@ -505,11 +433,11 @@ int drm_getctx(struct inode *inode, struct file *filp,
int drm_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_t ctx;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_ctx ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
@@ -530,11 +458,11 @@ int drm_switchctx(struct inode *inode, struct file *filp,
int drm_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_t ctx;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_ctx ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
@@ -557,11 +485,11 @@ int drm_newctx(struct inode *inode, struct file *filp,
int drm_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_t ctx;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_ctx ctx;
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
+ if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
@@ -575,10 +503,10 @@ int drm_rmctx(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist->head)) {
- drm_ctx_list_t *pos, *n;
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
- list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx.handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 32ed19c9ec1c..802fbdbfe1b3 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -43,7 +43,7 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
-int drm_dma_setup(drm_device_t * dev)
+int drm_dma_setup(struct drm_device *dev)
{
int i;
@@ -67,9 +67,9 @@ int drm_dma_setup(drm_device_t * dev)
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
-void drm_dma_takedown(drm_device_t * dev)
+void drm_dma_takedown(struct drm_device *dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, j;
if (!dma)
@@ -129,7 +129,7 @@ void drm_dma_takedown(drm_device_t * dev)
*
* Resets the fields of \p buf.
*/
-void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
@@ -152,9 +152,9 @@ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
*
* Frees each buffer associated with \p filp not already on the hardware.
*/
-void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp)
+void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index b33313be2547..d6cdba5644e2 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -44,83 +44,30 @@ int drm_adddraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
unsigned long irqflags;
- int i, j;
- u32 *bitfield = dev->drw_bitfield;
- unsigned int bitfield_length = dev->drw_bitfield_length;
- drm_drawable_info_t **info = dev->drw_info;
- unsigned int info_length = dev->drw_info_length;
- drm_draw_t draw;
-
- for (i = 0, j = 0; i < bitfield_length; i++) {
- if (bitfield[i] == ~0)
- continue;
-
- for (; j < 8 * sizeof(*bitfield); j++)
- if (!(bitfield[i] & (1 << j)))
- goto done;
+ struct drm_draw draw;
+ int new_id = 0;
+ int ret;
+
+again:
+ if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ return -ENOMEM;
}
-done:
-
- if (i == bitfield_length) {
- bitfield_length++;
-
- bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
- DRM_MEM_BUFS);
-
- if (!bitfield) {
- DRM_ERROR("Failed to allocate new drawable bitfield\n");
- return DRM_ERR(ENOMEM);
- }
-
- if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
- info_length += 8 * sizeof(*bitfield);
-
- info = drm_alloc(info_length * sizeof(*info),
- DRM_MEM_BUFS);
-
- if (!info) {
- DRM_ERROR("Failed to allocate new drawable info"
- " array\n");
-
- drm_free(bitfield,
- bitfield_length * sizeof(*bitfield),
- DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
- }
- }
-
- bitfield[i] = 0;
- }
-
- draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
- DRM_DEBUG("%d\n", draw.handle);
spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- bitfield[i] |= 1 << j;
- info[draw.handle - 1] = NULL;
-
- if (bitfield != dev->drw_bitfield) {
- memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
- sizeof(*bitfield));
- drm_free(dev->drw_bitfield, sizeof(*bitfield) *
- dev->drw_bitfield_length, DRM_MEM_BUFS);
- dev->drw_bitfield = bitfield;
- dev->drw_bitfield_length = bitfield_length;
- }
-
- if (info != dev->drw_info) {
- memcpy(info, dev->drw_info, dev->drw_info_length *
- sizeof(*info));
- drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
- DRM_MEM_BUFS);
- dev->drw_info = info;
- dev->drw_info_length = info_length;
+ ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+ if (ret == -EAGAIN) {
+ spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ goto again;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
+ draw.handle = new_id;
+
+ DRM_DEBUG("%d\n", draw.handle);
+
+ DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw));
return 0;
}
@@ -131,141 +78,52 @@ done:
int drm_rmdraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_draw_t draw;
- int id, idx;
- unsigned int shift;
+ struct drm_draw draw;
unsigned long irqflags;
- u32 *bitfield = dev->drw_bitfield;
- unsigned int bitfield_length = dev->drw_bitfield_length;
- drm_drawable_info_t **info = dev->drw_info;
- unsigned int info_length = dev->drw_info_length;
- DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data,
sizeof(draw));
- id = draw.handle - 1;
- idx = id / (8 * sizeof(*bitfield));
- shift = id % (8 * sizeof(*bitfield));
-
- if (idx < 0 || idx >= bitfield_length ||
- !(bitfield[idx] & (1 << shift))) {
- DRM_DEBUG("No such drawable %d\n", draw.handle);
- return 0;
- }
-
spin_lock_irqsave(&dev->drw_lock, irqflags);
- bitfield[idx] &= ~(1 << shift);
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- if (info[id]) {
- drm_free(info[id]->rects, info[id]->num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
- drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
- }
-
- /* Can we shrink the arrays? */
- if (idx == bitfield_length - 1) {
- while (idx >= 0 && !bitfield[idx])
- --idx;
-
- bitfield_length = idx + 1;
-
- bitfield = NULL;
-
- if (bitfield_length) {
- if (bitfield_length != dev->drw_bitfield_length)
- bitfield = drm_alloc(bitfield_length *
- sizeof(*bitfield),
- DRM_MEM_BUFS);
-
- if (!bitfield) {
- bitfield = dev->drw_bitfield;
- bitfield_length = dev->drw_bitfield_length;
- }
- }
- }
-
- if (bitfield != dev->drw_bitfield) {
- info_length = 8 * sizeof(*bitfield) * bitfield_length;
-
- if (info_length) {
- info = drm_alloc(info_length * sizeof(*info),
- DRM_MEM_BUFS);
-
- if (!info) {
- info = dev->drw_info;
- info_length = dev->drw_info_length;
- }
- } else
- info = NULL;
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
+ drm_free(drm_get_drawable_info(dev, draw.handle),
+ sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
- if (bitfield)
- memcpy(bitfield, dev->drw_bitfield, bitfield_length *
- sizeof(*bitfield));
- drm_free(dev->drw_bitfield, sizeof(*bitfield) *
- dev->drw_bitfield_length, DRM_MEM_BUFS);
- dev->drw_bitfield = bitfield;
- dev->drw_bitfield_length = bitfield_length;
-
- if (info != dev->drw_info) {
- if (info)
- memcpy(info, dev->drw_info, info_length *
- sizeof(*info));
- drm_free(dev->drw_info, sizeof(*info) *
- dev->drw_info_length, DRM_MEM_BUFS);
- dev->drw_info = info;
- dev->drw_info_length = info_length;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- }
+ idr_remove(&dev->drw_idr, draw.handle);
+ spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("%d\n", draw.handle);
return 0;
}
-int drm_update_drawable_info(DRM_IOCTL_ARGS) {
+int drm_update_drawable_info(DRM_IOCTL_ARGS)
+{
DRM_DEVICE;
- drm_update_draw_t update;
- unsigned int id, idx, shift;
- u32 *bitfield = dev->drw_bitfield;
- unsigned long irqflags, bitfield_length = dev->drw_bitfield_length;
- drm_drawable_info_t *info;
- drm_clip_rect_t *rects;
+ struct drm_update_draw update;
+ unsigned long irqflags;
+ struct drm_clip_rect *rects;
+ struct drm_drawable_info *info;
int err;
- DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data,
sizeof(update));
- id = update.handle - 1;
- idx = id / (8 * sizeof(*bitfield));
- shift = id % (8 * sizeof(*bitfield));
-
- if (idx < 0 || idx >= bitfield_length ||
- !(bitfield[idx] & (1 << shift))) {
- DRM_ERROR("No such drawable %d\n", update.handle);
- return DRM_ERR(EINVAL);
- }
-
- info = dev->drw_info[id];
-
+ info = idr_find(&dev->drw_idr, update.handle);
if (!info) {
- info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
-
- if (!info) {
- DRM_ERROR("Failed to allocate drawable info memory\n");
- return DRM_ERR(ENOMEM);
+ info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
+ if (!info)
+ return -ENOMEM;
+ if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) {
+ DRM_ERROR("No such drawable %d\n", update.handle);
+ drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+ return -EINVAL;
}
}
switch (update.type) {
case DRM_DRAWABLE_CLIPRECTS:
if (update.num != info->num_rects) {
- rects = drm_alloc(update.num * sizeof(drm_clip_rect_t),
+ rects = drm_alloc(update.num * sizeof(struct drm_clip_rect),
DRM_MEM_BUFS);
} else
rects = info->rects;
@@ -277,7 +135,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
}
if (update.num && DRM_COPY_FROM_USER(rects,
- (drm_clip_rect_t __user *)
+ (struct drm_clip_rect __user *)
(unsigned long)update.data,
update.num *
sizeof(*rects))) {
@@ -290,17 +148,16 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
if (rects != info->rects) {
drm_free(info->rects, info->num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+ sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
}
info->rects = rects;
info->num_rects = update.num;
- dev->drw_info[id] = info;
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, id);
+ info->num_rects, update.handle);
break;
default:
DRM_ERROR("Invalid update type %d\n", update.type);
@@ -310,11 +167,9 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
return 0;
error:
- if (!dev->drw_info[id])
- drm_free(info, sizeof(*info), DRM_MEM_BUFS);
- else if (rects != dev->drw_info[id]->rects)
- drm_free(rects, update.num *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+ if (rects != info->rects)
+ drm_free(rects, update.num * sizeof(struct drm_clip_rect),
+ DRM_MEM_BUFS);
return err;
}
@@ -322,20 +177,27 @@ error:
/**
* Caller must hold the drawable spinlock!
*/
-drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
- u32 *bitfield = dev->drw_bitfield;
- unsigned int idx, shift;
-
- id--;
- idx = id / (8 * sizeof(*bitfield));
- shift = id % (8 * sizeof(*bitfield));
-
- if (idx < 0 || idx >= dev->drw_bitfield_length ||
- !(bitfield[idx] & (1 << shift))) {
- DRM_DEBUG("No such drawable %d\n", id);
- return NULL;
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+ return idr_find(&dev->drw_idr, id);
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+ struct drm_drawable_info *info = p;
+
+ if (info) {
+ drm_free(info->rects, info->num_rects *
+ sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+ drm_free(info, sizeof(*info), DRM_MEM_BUFS);
}
- return dev->drw_info[id];
+ return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+ idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+ idr_remove_all(&dev->drw_idr);
}
-EXPORT_SYMBOL(drm_get_drawable_info);
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 8e77b7ed0f44..19994cd865de 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -129,11 +129,11 @@ static drm_ioctl_desc_t drm_ioctls[] = {
*
* \sa drm_device
*/
-int drm_lastclose(drm_device_t * dev)
+int drm_lastclose(struct drm_device * dev)
{
- drm_magic_entry_t *pt, *next;
- drm_map_list_t *r_list;
- drm_vma_entry_t *vma, *vma_next;
+ struct drm_magic_entry *pt, *next;
+ struct drm_map_list *r_list, *list_t;
+ struct drm_vma_entry *vma, *vma_temp;
int i;
DRM_DEBUG("\n");
@@ -151,19 +151,10 @@ int drm_lastclose(drm_device_t * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
- /* Free drawable information memory */
- for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
- i++) {
- drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
-
- if (info) {
- drm_free(info->rects, info->num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
- drm_free(info, sizeof(*info), DRM_MEM_BUFS);
- }
- }
-
mutex_lock(&dev->struct_mutex);
+
+ /* Free drawable information memory */
+ drm_drawable_free_all(dev);
del_timer(&dev->timer);
/* Clear pid list */
@@ -178,19 +169,17 @@ int drm_lastclose(drm_device_t * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
- drm_agp_mem_t *entry;
- drm_agp_mem_t *nexte;
+ struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
- for (entry = dev->agp->memory; entry; entry = nexte) {
- nexte = entry->next;
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
- dev->agp->memory = NULL;
+ INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
@@ -204,20 +193,14 @@ int drm_lastclose(drm_device_t * dev)
}
/* Clear vma list (only built for debugging) */
- if (dev->vmalist) {
- for (vma = dev->vmalist; vma; vma = vma_next) {
- vma_next = vma->next;
- drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
- }
- dev->vmalist = NULL;
+ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+ list_del(&vma->head);
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
- if (dev->maplist) {
- while (!list_empty(&dev->maplist->head)) {
- struct list_head *list = dev->maplist->head.next;
- r_list = list_entry(list, drm_map_list_t, head);
- drm_rmmap_locked(dev, r_list->map);
- }
+ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@@ -298,7 +281,7 @@ EXPORT_SYMBOL(drm_init);
*
* \sa drm_init
*/
-static void drm_cleanup(drm_device_t * dev)
+static void drm_cleanup(struct drm_device * dev)
{
DRM_DEBUG("\n");
@@ -309,11 +292,7 @@ static void drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
- if (dev->maplist) {
- drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
- dev->maplist = NULL;
- drm_ht_remove(&dev->map_hash);
- }
+ drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
@@ -342,8 +321,8 @@ static void drm_cleanup(drm_device_t * dev)
void drm_exit(struct drm_driver *driver)
{
int i;
- drm_device_t *dev = NULL;
- drm_head_t *head;
+ struct drm_device *dev = NULL;
+ struct drm_head *head;
DRM_DEBUG("\n");
@@ -442,10 +421,10 @@ module_exit(drm_core_exit);
static int drm_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_version_t __user *argp = (void __user *)arg;
- drm_version_t version;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_version __user *argp = (void __user *)arg;
+ struct drm_version version;
int len;
if (copy_from_user(&version, argp, sizeof(version)))
@@ -478,8 +457,8 @@ static int drm_version(struct inode *inode, struct file *filp,
int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -529,3 +508,17 @@ int drm_ioctl(struct inode *inode, struct file *filp,
}
EXPORT_SYMBOL(drm_ioctl);
+
+drm_local_map_t *drm_getsarea(struct drm_device *dev)
+{
+ struct drm_map_list *entry;
+
+ list_for_each_entry(entry, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 3b159cab3bc8..7bc51bac450d 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -39,9 +39,9 @@
#include <linux/poll.h>
static int drm_open_helper(struct inode *inode, struct file *filp,
- drm_device_t * dev);
+ struct drm_device * dev);
-static int drm_setup(drm_device_t * dev)
+static int drm_setup(struct drm_device * dev)
{
drm_local_map_t *map;
int i;
@@ -79,13 +79,6 @@ static int drm_setup(drm_device_t * dev)
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
- dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
- if (dev->ctxlist == NULL)
- return -ENOMEM;
- memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
- INIT_LIST_HEAD(&dev->ctxlist->head);
-
- dev->vmalist = NULL;
dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
@@ -135,7 +128,7 @@ static int drm_setup(drm_device_t * dev)
*/
int drm_open(struct inode *inode, struct file *filp)
{
- drm_device_t *dev = NULL;
+ struct drm_device *dev = NULL;
int minor = iminor(inode);
int retcode = 0;
@@ -174,7 +167,7 @@ EXPORT_SYMBOL(drm_open);
*/
int drm_stub_open(struct inode *inode, struct file *filp)
{
- drm_device_t *dev = NULL;
+ struct drm_device *dev = NULL;
int minor = iminor(inode);
int err = -ENODEV;
const struct file_operations *old_fops;
@@ -230,10 +223,10 @@ static int drm_cpu_valid(void)
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct inode *inode, struct file *filp,
- drm_device_t * dev)
+ struct drm_device * dev)
{
int minor = iminor(inode);
- drm_file_t *priv;
+ struct drm_file *priv;
int ret;
if (filp->f_flags & O_EXCL)
@@ -258,6 +251,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
+ INIT_LIST_HEAD(&priv->lhead);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -265,19 +260,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
- if (!dev->file_last) {
- priv->next = NULL;
- priv->prev = NULL;
- dev->file_first = priv;
- dev->file_last = priv;
- /* first opener automatically becomes master */
+ if (list_empty(&dev->filelist))
priv->master = 1;
- } else {
- priv->next = NULL;
- priv->prev = dev->file_last;
- dev->file_last->next = priv;
- dev->file_last = priv;
- }
+
+ list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
#ifdef __alpha__
@@ -309,8 +295,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
/** No-op. */
int drm_fasync(int fd, struct file *filp, int on)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
@@ -336,8 +322,8 @@ EXPORT_SYMBOL(drm_fasync);
*/
int drm_release(struct inode *inode, struct file *filp)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev;
int retcode = 0;
lock_kernel();
@@ -414,10 +400,10 @@ int drm_release(struct inode *inode, struct file *filp)
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
- if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
- drm_ctx_list_t *pos, *n;
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
- list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
@@ -436,22 +422,12 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->struct_mutex);
if (priv->remove_auth_on_close == 1) {
- drm_file_t *temp = dev->file_first;
- while (temp) {
+ struct drm_file *temp;
+
+ list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
- temp = temp->next;
- }
- }
- if (priv->prev) {
- priv->prev->next = priv->next;
- } else {
- dev->file_first = priv->next;
- }
- if (priv->next) {
- priv->next->prev = priv->prev;
- } else {
- dev->file_last = priv->prev;
}
+ list_del(&priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c
index 31acb621dcce..3ad319070704 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/char/drm/drm_hashtab.c
@@ -36,7 +36,7 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
-int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int i;
@@ -63,9 +63,9 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
return 0;
}
-void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
- drm_hash_item_t *entry;
+ struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
@@ -75,15 +75,15 @@ void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) {
- entry = hlist_entry(list, drm_hash_item_t, head);
+ entry = hlist_entry(list, struct drm_hash_item, head);
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
}
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
- drm_hash_item_t *entry;
+ struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
@@ -91,7 +91,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) {
- entry = hlist_entry(list, drm_hash_item_t, head);
+ entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return list;
if (entry->key > key)
@@ -101,9 +101,9 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
}
-int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
- drm_hash_item_t *entry;
+ struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list, *parent;
unsigned int hashed_key;
@@ -113,7 +113,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each(list, h_list) {
- entry = hlist_entry(list, drm_hash_item_t, head);
+ entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -132,7 +132,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
-int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
@@ -156,8 +156,8 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
return 0;
}
-int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
- drm_hash_item_t **item)
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+ struct drm_hash_item **item)
{
struct hlist_node *list;
@@ -165,11 +165,11 @@ int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
if (!list)
return -EINVAL;
- *item = hlist_entry(list, drm_hash_item_t, head);
+ *item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
-int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
@@ -182,14 +182,14 @@ int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
return -EINVAL;
}
-int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
-void drm_ht_remove(drm_open_hash_t *ht)
+void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
if (ht->use_vmalloc)
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
index 613091c970af..0f1376774168 100644
--- a/drivers/char/drm/drm_hashtab.h
+++ b/drivers/char/drm/drm_hashtab.h
@@ -37,31 +37,31 @@
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-typedef struct drm_hash_item{
+struct drm_hash_item {
struct hlist_node head;
unsigned long key;
-} drm_hash_item_t;
+};
-typedef struct drm_open_hash{
+struct drm_open_hash {
unsigned int size;
unsigned int order;
unsigned int fill;
struct hlist_head *table;
int use_vmalloc;
-} drm_open_hash_t;
+};
-extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order);
-extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item);
-extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add);
-extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key);
-extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key);
-extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item);
-extern void drm_ht_remove(drm_open_hash_t *ht);
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
#endif
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index fafeb34f89d5..462f46f2049a 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -82,7 +82,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_version32_t v32;
- drm_version_t __user *version;
+ struct drm_version __user *version;
int err;
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
@@ -129,7 +129,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
- drm_unique_t __user *u;
+ struct drm_unique __user *u;
int err;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
@@ -159,7 +159,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
- drm_unique_t __user *u;
+ struct drm_unique __user *u;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
@@ -179,8 +179,8 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
typedef struct drm_map32 {
u32 offset; /**< Requested physical address (0 for SAREA)*/
u32 size; /**< Requested physical size (bytes) */
- drm_map_type_t type; /**< Type of memory to map */
- drm_map_flags_t flags; /**< Flags */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
u32 handle; /**< User-space: "Handle" to pass to mmap() */
int mtrr; /**< MTRR slot used */
} drm_map32_t;
@@ -190,7 +190,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- drm_map_t __user *map;
+ struct drm_map __user *map;
int idx, err;
void *handle;
@@ -228,7 +228,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- drm_map_t __user *map;
+ struct drm_map __user *map;
int err;
void *handle;
@@ -270,7 +270,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
- drm_map_t __user *map;
+ struct drm_map __user *map;
u32 handle;
if (get_user(handle, &argp->handle))
@@ -300,7 +300,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
{
drm_client32_t c32;
drm_client32_t __user *argp = (void __user *)arg;
- drm_client_t __user *client;
+ struct drm_client __user *client;
int idx, err;
if (get_user(idx, &argp->idx))
@@ -333,7 +333,7 @@ typedef struct drm_stats32 {
u32 count;
struct {
u32 value;
- drm_stat_type_t type;
+ enum drm_stat_type type;
} data[15];
} drm_stats32_t;
@@ -342,7 +342,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
{
drm_stats32_t s32;
drm_stats32_t __user *argp = (void __user *)arg;
- drm_stats_t __user *stats;
+ struct drm_stats __user *stats;
int i, err;
stats = compat_alloc_user_space(sizeof(*stats));
@@ -379,7 +379,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc_t __user *buf;
+ struct drm_buf_desc __user *buf;
int err;
unsigned long agp_start;
@@ -411,7 +411,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
{
drm_buf_desc32_t b32;
drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc_t __user *buf;
+ struct drm_buf_desc __user *buf;
if (copy_from_user(&b32, argp, sizeof(b32)))
return -EFAULT;
@@ -440,8 +440,8 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
drm_buf_info32_t req32;
drm_buf_info32_t __user *argp = (void __user *)arg;
drm_buf_desc32_t __user *to;
- drm_buf_info_t __user *request;
- drm_buf_desc_t __user *list;
+ struct drm_buf_info __user *request;
+ struct drm_buf_desc __user *list;
size_t nbytes;
int i, err;
int count, actual;
@@ -457,11 +457,11 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
&& !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
return -EFAULT;
- nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t);
+ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
- list = (drm_buf_desc_t *) (request + 1);
+ list = (struct drm_buf_desc *) (request + 1);
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
@@ -477,7 +477,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
if (count >= actual)
for (i = 0; i < actual; ++i)
if (__copy_in_user(&to[i], &list[i],
- offsetof(drm_buf_desc_t, flags)))
+ offsetof(struct drm_buf_desc, flags)))
return -EFAULT;
if (__put_user(actual, &argp->count))
@@ -505,8 +505,8 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
drm_buf_map32_t __user *argp = (void __user *)arg;
drm_buf_map32_t req32;
drm_buf_pub32_t __user *list32;
- drm_buf_map_t __user *request;
- drm_buf_pub_t __user *list;
+ struct drm_buf_map __user *request;
+ struct drm_buf_pub __user *list;
int i, err;
int count, actual;
size_t nbytes;
@@ -519,11 +519,11 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
if (count < 0)
return -EINVAL;
- nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t);
+ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
- list = (drm_buf_pub_t *) (request + 1);
+ list = (struct drm_buf_pub *) (request + 1);
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
@@ -539,7 +539,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
if (count >= actual)
for (i = 0; i < actual; ++i)
if (__copy_in_user(&list32[i], &list[i],
- offsetof(drm_buf_pub_t, address))
+ offsetof(struct drm_buf_pub, address))
|| __get_user(addr, &list[i].address)
|| __put_user((unsigned long)addr,
&list32[i].address))
@@ -562,7 +562,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_free32_t req32;
- drm_buf_free_t __user *request;
+ struct drm_buf_free __user *request;
drm_buf_free32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -589,7 +589,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_priv_map32_t req32;
- drm_ctx_priv_map_t __user *request;
+ struct drm_ctx_priv_map __user *request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -610,7 +610,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_ctx_priv_map_t __user *request;
+ struct drm_ctx_priv_map __user *request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
int err;
unsigned int ctx_id;
@@ -648,7 +648,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
{
drm_ctx_res32_t __user *argp = (void __user *)arg;
drm_ctx_res32_t res32;
- drm_ctx_res_t __user *res;
+ struct drm_ctx_res __user *res;
int err;
if (copy_from_user(&res32, argp, sizeof(res32)))
@@ -658,7 +658,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
return -EFAULT;
if (__put_user(res32.count, &res->count)
- || __put_user((drm_ctx_t __user *) (unsigned long)res32.contexts,
+ || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
&res->contexts))
return -EFAULT;
@@ -679,7 +679,7 @@ typedef struct drm_dma32 {
int send_count; /**< Number of buffers to send */
u32 send_indices; /**< List of handles to buffers */
u32 send_sizes; /**< Lengths of data to send */
- drm_dma_flags_t flags; /**< Flags */
+ enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
u32 request_indices; /**< Buffer information */
@@ -692,7 +692,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
{
drm_dma32_t d32;
drm_dma32_t __user *argp = (void __user *)arg;
- drm_dma_t __user *d;
+ struct drm_dma __user *d;
int err;
if (copy_from_user(&d32, argp, sizeof(d32)))
@@ -740,7 +740,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
{
drm_agp_mode32_t __user *argp = (void __user *)arg;
drm_agp_mode32_t m32;
- drm_agp_mode_t __user *mode;
+ struct drm_agp_mode __user *mode;
if (get_user(m32.mode, &argp->mode))
return -EFAULT;
@@ -772,7 +772,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
{
drm_agp_info32_t __user *argp = (void __user *)arg;
drm_agp_info32_t i32;
- drm_agp_info_t __user *info;
+ struct drm_agp_info __user *info;
int err;
info = compat_alloc_user_space(sizeof(*info));
@@ -813,7 +813,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
drm_agp_buffer32_t req32;
- drm_agp_buffer_t __user *request;
+ struct drm_agp_buffer __user *request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -845,7 +845,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
- drm_agp_buffer_t __user *request;
+ struct drm_agp_buffer __user *request;
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
@@ -868,7 +868,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
drm_agp_binding32_t req32;
- drm_agp_binding_t __user *request;
+ struct drm_agp_binding __user *request;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
@@ -887,7 +887,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
- drm_agp_binding_t __user *request;
+ struct drm_agp_binding __user *request;
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
@@ -910,7 +910,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- drm_scatter_gather_t __user *request;
+ struct drm_scatter_gather __user *request;
int err;
unsigned long x;
@@ -938,7 +938,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- drm_scatter_gather_t __user *request;
+ struct drm_scatter_gather __user *request;
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
@@ -953,13 +953,13 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
}
struct drm_wait_vblank_request32 {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
u32 signal;
};
struct drm_wait_vblank_reply32 {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
s32 tval_sec;
s32 tval_usec;
@@ -975,7 +975,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
{
drm_wait_vblank32_t __user *argp = (void __user *)arg;
drm_wait_vblank32_t req32;
- drm_wait_vblank_t __user *request;
+ union drm_wait_vblank __user *request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 565895547d75..b195e102e737 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -52,10 +52,10 @@
int drm_getunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_unique_t __user *argp = (void __user *)arg;
- drm_unique_t u;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_unique __user *argp = (void __user *)arg;
+ struct drm_unique u;
if (copy_from_user(&u, argp, sizeof(u)))
return -EFAULT;
@@ -86,15 +86,15 @@ int drm_getunique(struct inode *inode, struct file *filp,
int drm_setunique(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_unique_t u;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_unique u;
int domain, bus, slot, func, ret;
if (dev->unique_len || dev->unique)
return -EBUSY;
- if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u)))
+ if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u)))
return -EFAULT;
if (!u.unique_len || u.unique_len > 1024)
@@ -136,7 +136,7 @@ int drm_setunique(struct inode *inode, struct file *filp,
return 0;
}
-static int drm_set_busid(drm_device_t * dev)
+static int drm_set_busid(struct drm_device * dev)
{
int len;
@@ -184,11 +184,11 @@ static int drm_set_busid(drm_device_t * dev)
int drm_getmap(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t __user *argp = (void __user *)arg;
- drm_map_t map;
- drm_map_list_t *r_list = NULL;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map __user *argp = (void __user *)arg;
+ struct drm_map map;
+ struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
@@ -204,9 +204,9 @@ int drm_getmap(struct inode *inode, struct file *filp,
}
i = 0;
- list_for_each(list, &dev->maplist->head) {
+ list_for_each(list, &dev->maplist) {
if (i == idx) {
- r_list = list_entry(list, drm_map_list_t, head);
+ r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
@@ -245,11 +245,11 @@ int drm_getmap(struct inode *inode, struct file *filp,
int drm_getclient(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_client_t __user *argp = (drm_client_t __user *)arg;
- drm_client_t client;
- drm_file_t *pt;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_client __user *argp = (struct drm_client __user *)arg;
+ struct drm_client client;
+ struct drm_file *pt;
int idx;
int i;
@@ -257,12 +257,18 @@ int drm_getclient(struct inode *inode, struct file *filp,
return -EFAULT;
idx = client.idx;
mutex_lock(&dev->struct_mutex);
- for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
-
- if (!pt) {
+
+ if (list_empty(&dev->filelist)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
+
+ i = 0;
+ list_for_each_entry(pt, &dev->filelist, lhead) {
+ if (i++ >= idx)
+ break;
+ }
+
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;
@@ -288,9 +294,9 @@ int drm_getclient(struct inode *inode, struct file *filp,
int drm_getstats(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_stats_t stats;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_stats stats;
int i;
memset(&stats, 0, sizeof(stats));
@@ -310,7 +316,7 @@ int drm_getstats(struct inode *inode, struct file *filp,
mutex_unlock(&dev->struct_mutex);
- if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
+ if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
@@ -329,10 +335,10 @@ int drm_getstats(struct inode *inode, struct file *filp,
int drm_setversion(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_set_version_t sv;
- drm_set_version_t retv;
+ struct drm_set_version sv;
+ struct drm_set_version retv;
int if_version;
- drm_set_version_t __user *argp = (void __user *)data;
+ struct drm_set_version __user *argp = (void __user *)data;
int ret;
if (copy_from_user(&sv, argp, sizeof(sv)))
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 2e75331fd83e..871d2fde09b3 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -53,10 +53,10 @@
int drm_irq_by_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_irq_busid_t __user *argp = (void __user *)arg;
- drm_irq_busid_t p;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_irq_busid __user *argp = (void __user *)arg;
+ struct drm_irq_busid p;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
@@ -87,7 +87,7 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation.
*/
-static int drm_irq_install(drm_device_t * dev)
+static int drm_irq_install(struct drm_device * dev)
{
int ret;
unsigned long sh_flags = 0;
@@ -120,8 +120,8 @@ static int drm_irq_install(drm_device_t * dev)
spin_lock_init(&dev->vbl_lock);
- INIT_LIST_HEAD(&dev->vbl_sigs.head);
- INIT_LIST_HEAD(&dev->vbl_sigs2.head);
+ INIT_LIST_HEAD(&dev->vbl_sigs);
+ INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
@@ -155,7 +155,7 @@ static int drm_irq_install(drm_device_t * dev)
*
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
*/
-int drm_irq_uninstall(drm_device_t * dev)
+int drm_irq_uninstall(struct drm_device * dev)
{
int irq_enabled;
@@ -197,13 +197,13 @@ EXPORT_SYMBOL(drm_irq_uninstall);
int drm_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_control_t ctl;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_control ctl;
/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
- if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl)))
+ if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl)))
return -EFAULT;
switch (ctl.func) {
@@ -244,10 +244,10 @@ int drm_control(struct inode *inode, struct file *filp,
*/
int drm_wait_vblank(DRM_IOCTL_ARGS)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_wait_vblank_t __user *argp = (void __user *)data;
- drm_wait_vblank_t vblwait;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ union drm_wait_vblank __user *argp = (void __user *)data;
+ union drm_wait_vblank vblwait;
struct timeval now;
int ret = 0;
unsigned int flags, seq;
@@ -292,9 +292,9 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
- drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+ struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
- drm_vbl_sig_t *vbl_sig;
+ struct drm_vbl_sig *vbl_sig;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -302,7 +302,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
* for the same vblank sequence number; nothing to be done in
* that case
*/
- list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
+ list_for_each_entry(vbl_sig, vbl_sigs, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current) {
@@ -324,7 +324,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (!
(vbl_sig =
- drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) {
+ drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
return -ENOMEM;
}
@@ -336,7 +336,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
+ list_add_tail(&vbl_sig->head, vbl_sigs);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -371,7 +371,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
*
* If a signal is not requested, then calls vblank_wait().
*/
-void drm_vbl_send_signals(drm_device_t * dev)
+void drm_vbl_send_signals(struct drm_device * dev)
{
unsigned long flags;
int i;
@@ -379,20 +379,18 @@ void drm_vbl_send_signals(drm_device_t * dev)
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
- struct list_head *list, *tmp;
- drm_vbl_sig_t *vbl_sig;
- drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+ struct drm_vbl_sig *vbl_sig, *tmp;
+ struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
- list_for_each_safe(list, tmp, &vbl_sigs->head) {
- vbl_sig = list_entry(list, drm_vbl_sig_t, head);
+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
- list_del(list);
+ list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
@@ -418,7 +416,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals);
*/
static void drm_locked_tasklet_func(unsigned long data)
{
- drm_device_t *dev = (drm_device_t*)data;
+ struct drm_device *dev = (struct drm_device *)data;
unsigned long irqflags;
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
@@ -455,7 +453,7 @@ static void drm_locked_tasklet_func(unsigned long data)
* context, it must not make any assumptions about this. Also, the HW lock will
* be held with the kernel context or any client context.
*/
-void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*))
+void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
{
unsigned long irqflags;
static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index befd1af19dfe..c0534b5a8b78 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -51,15 +51,15 @@ static int drm_notifier(void *priv);
int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
DECLARE_WAITQUEUE(entry, current);
- drm_lock_t lock;
+ struct drm_lock lock;
int ret = 0;
++priv->lock_count;
- if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
+ if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
@@ -152,12 +152,12 @@ int drm_lock(struct inode *inode, struct file *filp,
int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_lock_t lock;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_lock lock;
unsigned long irqflags;
- if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
+ if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
@@ -202,7 +202,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
-int drm_lock_take(drm_lock_data_t *lock_data,
+int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
@@ -251,7 +251,7 @@ int drm_lock_take(drm_lock_data_t *lock_data,
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
-static int drm_lock_transfer(drm_lock_data_t *lock_data,
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
@@ -277,7 +277,7 @@ static int drm_lock_transfer(drm_lock_data_t *lock_data,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
-int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -319,7 +319,7 @@ int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
*/
static int drm_notifier(void *priv)
{
- drm_sigdata_t *s = (drm_sigdata_t *) priv;
+ struct drm_sigdata *s = (struct drm_sigdata *) priv;
unsigned int old, new, prev;
/* Allow signal delivery if lock isn't held */
@@ -350,7 +350,7 @@ static int drm_notifier(void *priv)
* having to worry about starvation.
*/
-void drm_idlelock_take(drm_lock_data_t *lock_data)
+void drm_idlelock_take(struct drm_lock_data *lock_data)
{
int ret = 0;
@@ -369,7 +369,7 @@ void drm_idlelock_take(drm_lock_data_t *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_take);
-void drm_idlelock_release(drm_lock_data_t *lock_data)
+void drm_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index 92a867082376..93019901bd30 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -80,7 +80,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
+ struct drm_device * dev)
{
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,7 +94,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
offset -= dev->hose->mem_space->start;
#endif
- for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
+ list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
@@ -123,7 +123,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
}
/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
{
return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
}
@@ -148,7 +148,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
+ struct drm_device * dev)
{
return NULL;
}
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 2ec1d9f26264..3e6bc14f7441 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -44,26 +44,26 @@
#include "drmP.h"
#include <linux/slab.h>
-unsigned long drm_mm_tail_space(drm_mm_t *mm)
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
-int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
@@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
}
-static int drm_mm_create_tail_node(drm_mm_t *mm,
+static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size)
{
- drm_mm_node_t *child;
+ struct drm_mm_node *child;
- child = (drm_mm_node_t *)
+ child = (struct drm_mm_node *)
drm_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@@ -98,13 +98,13 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
}
-int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
}
@@ -112,12 +112,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
return 0;
}
-static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size)
{
- drm_mm_node_t *child;
+ struct drm_mm_node *child;
- child = (drm_mm_node_t *)
+ child = (struct drm_mm_node *)
drm_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@@ -139,12 +139,12 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
-drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size, unsigned alignment)
{
- drm_mm_node_t *align_splitoff = NULL;
- drm_mm_node_t *child;
+ struct drm_mm_node *align_splitoff = NULL;
+ struct drm_mm_node *child;
unsigned tmp = 0;
if (alignment)
@@ -175,26 +175,26 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(drm_mm_node_t * cur)
+void drm_mm_put_block(struct drm_mm_node * cur)
{
- drm_mm_t *mm = cur->mm;
+ struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
- drm_mm_node_t *prev_node = NULL;
- drm_mm_node_t *next_node;
+ struct drm_mm_node *prev_node = NULL;
+ struct drm_mm_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry);
+ prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry);
+ next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
@@ -218,14 +218,14 @@ void drm_mm_put_block(drm_mm_node_t * cur)
}
}
-drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
- drm_mm_node_t *entry;
- drm_mm_node_t *best;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
@@ -233,7 +233,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
best_size = ~0UL;
list_for_each(list, free_stack) {
- entry = list_entry(list, drm_mm_node_t, fl_entry);
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
@@ -259,14 +259,14 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
return best;
}
-int drm_mm_clean(drm_mm_t * mm)
+int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
-int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
@@ -275,12 +275,12 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
}
-void drm_mm_takedown(drm_mm_t * mm)
+void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
- entry = list_entry(bnode, drm_mm_node_t, fl_entry);
+ entry = list_entry(bnode, struct drm_mm_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index 0fe7b4497927..0b8d3433386d 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -34,8 +34,8 @@
/** Read/write memory barrier */
#define DRM_MEMORYBARRIER() mb()
/** DRM device local declaration */
-#define DRM_DEVICE drm_file_t *priv = filp->private_data; \
- drm_device_t *dev = priv->head->dev
+#define DRM_DEVICE struct drm_file *priv = filp->private_data; \
+ struct drm_device *dev = priv->head->dev
/** IRQ handler arguments and return type and values */
#define DRM_IRQ_ARGS int irq, void *arg
@@ -96,24 +96,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
-/**
- * Get the pointer to the SAREA.
- *
- * Searches the SAREA on the mapping lists and points drm_device::sarea to it.
- */
-#define DRM_GETSAREA() \
-do { \
- drm_map_list_t *entry; \
- list_for_each_entry( entry, &dev->maplist->head, head ) { \
- if ( entry->map && \
- entry->map->type == _DRM_SHM && \
- (entry->map->flags & _DRM_CONTAINS_LOCK) ) { \
- dev_priv->sarea = entry->map; \
- break; \
- } \
- } \
-} while (0)
-
#define DRM_HZ HZ
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index 86a0f1c22091..e292bb0eaca2 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -47,7 +47,7 @@
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
@@ -126,7 +126,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*
* This function is for internal use in the Linux-specific DRM core code.
*/
-void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
#if 1
unsigned long addr;
@@ -172,7 +172,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
/**
* \brief Free a PCI consistent memory block
*/
-void drm_pci_free(drm_device_t * dev, drm_dma_handle_t * dmah)
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index b204498d1a28..12dfea89c7f3 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -87,7 +87,7 @@ static struct drm_proc_list {
* "/proc/dri/%minor%/", and each entry in proc_list as
* "/proc/dri/%minor%/%name%".
*/
-int drm_proc_init(drm_device_t * dev, int minor,
+int drm_proc_init(struct drm_device * dev, int minor,
struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
{
struct proc_dir_entry *ent;
@@ -163,7 +163,7 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
@@ -205,11 +205,10 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_map_t *map;
- drm_map_list_t *r_list;
- struct list_head *list;
+ struct drm_map *map;
+ struct drm_map_list *r_list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
@@ -229,9 +228,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
- if (dev->maplist != NULL)
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
@@ -242,14 +239,15 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
i,
map->offset,
- map->size, type, map->flags, r_list->user_token);
+ map->size, type, map->flags,
+ r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
- }
+ }
if (len > request + offset)
return request;
@@ -263,7 +261,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -286,10 +284,10 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
static int drm__queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
int i;
- drm_queue_t *q;
+ struct drm_queue *q;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -336,7 +334,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -359,9 +357,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma || offset > DRM_PROC_LIMIT) {
@@ -408,7 +406,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -431,9 +429,9 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
static int drm__clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_file_t *priv;
+ struct drm_file *priv;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -444,7 +442,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
- for (priv = dev->file_first; priv; priv = priv->next) {
+ list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
@@ -464,7 +462,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -478,9 +476,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_vma_entry_t *pt;
+ struct drm_vma_entry *pt;
struct vm_area_struct *vma;
#if defined(__i386__)
unsigned int pgprot;
@@ -497,7 +495,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
- for (pt = dev->vmalist; pt; pt = pt->next) {
+ list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
@@ -537,7 +535,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
static int drm_vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h
index e94297b751b8..f5466966081e 100644
--- a/drivers/char/drm/drm_sarea.h
+++ b/drivers/char/drm/drm_sarea.h
@@ -50,29 +50,35 @@
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
-typedef struct drm_sarea_drawable {
+struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
-} drm_sarea_drawable_t;
+};
/** SAREA frame */
-typedef struct drm_sarea_frame {
+struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
-} drm_sarea_frame_t;
+};
/** SAREA */
-typedef struct drm_sarea {
+struct drm_sarea {
/** first thing is always the DRM locking structure */
- drm_hw_lock_t lock;
+ struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
- drm_hw_lock_t drawable_lock;
- drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
- drm_sarea_frame_t frame; /**< frame */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
-} drm_sarea_t;
+};
+
+#ifndef __KERNEL__
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+#endif
#endif /* _DRM_SAREA_H_ */
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 06ef7ddbe67d..067d25daaf17 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -36,7 +36,7 @@
#define DEBUG_SCATTER 0
-void drm_sg_cleanup(drm_sg_mem_t * entry)
+void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
@@ -65,11 +65,11 @@ void drm_sg_cleanup(drm_sg_mem_t * entry)
int drm_sg_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_scatter_gather_t __user *argp = (void __user *)arg;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_scatter_gather __user *argp = (void __user *)arg;
+ struct drm_scatter_gather request;
+ struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -201,16 +201,16 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
int drm_sg_free(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_scatter_gather request;
+ struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if (copy_from_user(&request,
- (drm_scatter_gather_t __user *) arg,
+ (struct drm_scatter_gather __user *) arg,
sizeof(request)))
return -EFAULT;
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index e15db6d6bea9..8421a93946d8 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -38,13 +38,13 @@
#include "drm_sman.h"
-typedef struct drm_owner_item {
- drm_hash_item_t owner_hash;
+struct drm_owner_item {
+ struct drm_hash_item owner_hash;
struct list_head sman_list;
struct list_head mem_blocks;
-} drm_owner_item_t;
+};
-void drm_sman_takedown(drm_sman_t * sman)
+void drm_sman_takedown(struct drm_sman * sman)
{
drm_ht_remove(&sman->user_hash_tab);
drm_ht_remove(&sman->owner_hash_tab);
@@ -56,12 +56,12 @@ void drm_sman_takedown(drm_sman_t * sman)
EXPORT_SYMBOL(drm_sman_takedown);
int
-drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order)
{
int ret = 0;
- sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm),
+ sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
DRM_MEM_MM);
if (!sman->mm) {
ret = -ENOMEM;
@@ -88,8 +88,8 @@ EXPORT_SYMBOL(drm_sman_init);
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
- drm_mm_t *mm = (drm_mm_t *) private;
- drm_mm_node_t *tmp;
+ struct drm_mm *mm = (struct drm_mm *) private;
+ struct drm_mm_node *tmp;
tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
@@ -101,30 +101,30 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
static void drm_sman_mm_free(void *private, void *ref)
{
- drm_mm_node_t *node = (drm_mm_node_t *) ref;
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
- drm_mm_t *mm = (drm_mm_t *) private;
+ struct drm_mm *mm = (struct drm_mm *) private;
drm_mm_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
- drm_mm_node_t *node = (drm_mm_node_t *) ref;
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
return node->start;
}
int
-drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
- drm_sman_mm_t *sman_mm;
- drm_mm_t *mm;
+ struct drm_sman_mm *sman_mm;
+ struct drm_mm *mm;
int ret;
BUG_ON(manager >= sman->num_managers);
@@ -153,8 +153,8 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
EXPORT_SYMBOL(drm_sman_set_range);
int
-drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
- drm_sman_mm_t * allocator)
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+ struct drm_sman_mm * allocator)
{
BUG_ON(manager >= sman->num_managers);
sman->mm[manager] = *allocator;
@@ -163,16 +163,16 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
}
EXPORT_SYMBOL(drm_sman_set_manager);
-static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
unsigned long owner)
{
int ret;
- drm_hash_item_t *owner_hash_item;
- drm_owner_item_t *owner_item;
+ struct drm_hash_item *owner_hash_item;
+ struct drm_owner_item *owner_item;
ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
if (!ret) {
- return drm_hash_entry(owner_hash_item, drm_owner_item_t,
+ return drm_hash_entry(owner_hash_item, struct drm_owner_item,
owner_hash);
}
@@ -194,14 +194,14 @@ out:
return NULL;
}
-drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager,
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
unsigned long size, unsigned alignment,
unsigned long owner)
{
void *tmp;
- drm_sman_mm_t *sman_mm;
- drm_owner_item_t *owner_item;
- drm_memblock_item_t *memblock;
+ struct drm_sman_mm *sman_mm;
+ struct drm_owner_item *owner_item;
+ struct drm_memblock_item *memblock;
BUG_ON(manager >= sman->num_managers);
@@ -246,9 +246,9 @@ out:
EXPORT_SYMBOL(drm_sman_alloc);
-static void drm_sman_free(drm_memblock_item_t *item)
+static void drm_sman_free(struct drm_memblock_item *item)
{
- drm_sman_t *sman = item->sman;
+ struct drm_sman *sman = item->sman;
list_del(&item->owner_list);
drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
@@ -256,40 +256,41 @@ static void drm_sman_free(drm_memblock_item_t *item)
drm_free(item, sizeof(*item), DRM_MEM_MM);
}
-int drm_sman_free_key(drm_sman_t *sman, unsigned int key)
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
{
- drm_hash_item_t *hash_item;
- drm_memblock_item_t *memblock_item;
+ struct drm_hash_item *hash_item;
+ struct drm_memblock_item *memblock_item;
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
return -EINVAL;
- memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash);
+ memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+ user_hash);
drm_sman_free(memblock_item);
return 0;
}
EXPORT_SYMBOL(drm_sman_free_key);
-static void drm_sman_remove_owner(drm_sman_t *sman,
- drm_owner_item_t *owner_item)
+static void drm_sman_remove_owner(struct drm_sman *sman,
+ struct drm_owner_item *owner_item)
{
list_del(&owner_item->sman_list);
drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
}
-int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner)
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
{
- drm_hash_item_t *hash_item;
- drm_owner_item_t *owner_item;
+ struct drm_hash_item *hash_item;
+ struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return -1;
}
- owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
+ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
drm_sman_remove_owner(sman, owner_item);
return -1;
@@ -300,10 +301,10 @@ int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner)
EXPORT_SYMBOL(drm_sman_owner_clean);
-static void drm_sman_do_owner_cleanup(drm_sman_t *sman,
- drm_owner_item_t *owner_item)
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+ struct drm_owner_item *owner_item)
{
- drm_memblock_item_t *entry, *next;
+ struct drm_memblock_item *entry, *next;
list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
owner_list) {
@@ -312,28 +313,28 @@ static void drm_sman_do_owner_cleanup(drm_sman_t *sman,
drm_sman_remove_owner(sman, owner_item);
}
-void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner)
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
{
- drm_hash_item_t *hash_item;
- drm_owner_item_t *owner_item;
+ struct drm_hash_item *hash_item;
+ struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return;
}
- owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
+ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
drm_sman_do_owner_cleanup(sman, owner_item);
}
EXPORT_SYMBOL(drm_sman_owner_cleanup);
-void drm_sman_cleanup(drm_sman_t *sman)
+void drm_sman_cleanup(struct drm_sman *sman)
{
- drm_owner_item_t *entry, *next;
+ struct drm_owner_item *entry, *next;
unsigned int i;
- drm_sman_mm_t *sman_mm;
+ struct drm_sman_mm *sman_mm;
list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
drm_sman_do_owner_cleanup(sman, entry);
diff --git a/drivers/char/drm/drm_sman.h b/drivers/char/drm/drm_sman.h
index ddc732a1bf27..39a39fefeef1 100644
--- a/drivers/char/drm/drm_sman.h
+++ b/drivers/char/drm/drm_sman.h
@@ -50,7 +50,7 @@
* for memory management.
*/
-typedef struct drm_sman_mm {
+struct drm_sman_mm {
/* private info. If allocated, needs to be destroyed by the destroy
function */
void *private;
@@ -74,30 +74,30 @@ typedef struct drm_sman_mm {
"alloc" function */
unsigned long (*offset) (void *private, void *ref);
-} drm_sman_mm_t;
+};
-typedef struct drm_memblock_item {
+struct drm_memblock_item {
struct list_head owner_list;
- drm_hash_item_t user_hash;
+ struct drm_hash_item user_hash;
void *mm_info;
- drm_sman_mm_t *mm;
+ struct drm_sman_mm *mm;
struct drm_sman *sman;
-} drm_memblock_item_t;
+};
-typedef struct drm_sman {
- drm_sman_mm_t *mm;
+struct drm_sman {
+ struct drm_sman_mm *mm;
int num_managers;
- drm_open_hash_t owner_hash_tab;
- drm_open_hash_t user_hash_tab;
+ struct drm_open_hash owner_hash_tab;
+ struct drm_open_hash user_hash_tab;
struct list_head owner_items;
-} drm_sman_t;
+};
/*
* Take down a memory manager. This function should only be called after a
* successful init and after a call to drm_sman_cleanup.
*/
-extern void drm_sman_takedown(drm_sman_t * sman);
+extern void drm_sman_takedown(struct drm_sman * sman);
/*
* Allocate structures for a manager.
@@ -112,7 +112,7 @@ extern void drm_sman_takedown(drm_sman_t * sman);
*
*/
-extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
+extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
@@ -120,7 +120,7 @@ extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
* manager unless a customized allogator is used.
*/
-extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
+extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size);
/*
@@ -129,23 +129,23 @@ extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
* so it can be destroyed after this call.
*/
-extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger,
- drm_sman_mm_t * allocator);
+extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+ struct drm_sman_mm * allocator);
/*
* Allocate a memory block. Aligment is not implemented yet.
*/
-extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman,
- unsigned int manager,
- unsigned long size,
- unsigned alignment,
- unsigned long owner);
+extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+ unsigned int manager,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long owner);
/*
* Free a memory block identified by its user hash key.
*/
-extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
+extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
/*
* returns 1 iff there are no stale memory blocks associated with this owner.
@@ -154,7 +154,7 @@ extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
* resources associated with owner.
*/
-extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner);
+extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with this owner. Note that this
@@ -164,13 +164,13 @@ extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner);
* is not going to be referenced anymore.
*/
-extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner);
+extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with the memory manager.
* See idling above.
*/
-extern void drm_sman_cleanup(drm_sman_t * sman);
+extern void drm_sman_cleanup(struct drm_sman * sman);
#endif
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 19408adcc775..ee83ff9efed6 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -49,16 +49,21 @@ MODULE_PARM_DESC(debug, "Enable debug output");
module_param_named(cards_limit, drm_cards_limit, int, 0444);
module_param_named(debug, drm_debug, int, 0600);
-drm_head_t **drm_heads;
+struct drm_head **drm_heads;
struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
-static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
@@ -67,6 +72,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
+ idr_init(&dev->drw_idr);
+
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
@@ -76,12 +83,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
#endif
dev->irq = pdev->irq;
- dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
- if (dev->maplist == NULL)
- return -ENOMEM;
- INIT_LIST_HEAD(&dev->maplist->head);
if (drm_ht_create(&dev->map_hash, 12)) {
- drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
@@ -143,9 +145,9 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
-static int drm_get_head(drm_device_t * dev, drm_head_t * head)
+static int drm_get_head(struct drm_device * dev, struct drm_head * head)
{
- drm_head_t **heads = drm_heads;
+ struct drm_head **heads = drm_heads;
int ret;
int minor;
@@ -154,7 +156,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
for (minor = 0; minor < drm_cards_limit; minor++, heads++) {
if (!*heads) {
- *head = (drm_head_t) {
+ *head = (struct drm_head) {
.dev = dev,.device =
MKDEV(DRM_MAJOR, minor),.minor = minor,};
@@ -184,7 +186,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
err_g2:
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
err_g1:
- *head = (drm_head_t) {
+ *head = (struct drm_head) {
.dev = NULL};
return ret;
}
@@ -203,7 +205,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
{
- drm_device_t *dev;
+ struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
@@ -246,7 +248,7 @@ err_g1:
* "drm" data, otherwise unregisters the "drm" data, frees the dev list and
* unregisters the character device.
*/
-int drm_put_dev(drm_device_t * dev)
+int drm_put_dev(struct drm_device * dev)
{
DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
@@ -274,7 +276,7 @@ int drm_put_dev(drm_device_t * dev)
* last minor released.
*
*/
-int drm_put_head(drm_head_t * head)
+int drm_put_head(struct drm_head * head)
{
int minor = head->minor;
@@ -283,7 +285,7 @@ int drm_put_head(drm_head_t * head)
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
drm_sysfs_device_remove(head->dev_class);
- *head = (drm_head_t) {.dev = NULL};
+ *head = (struct drm_head) {.dev = NULL};
drm_heads[minor] = NULL;
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index cc8e2ebe128c..cf4349b00b07 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -80,7 +80,7 @@ void drm_sysfs_destroy(struct class *class)
static ssize_t show_dri(struct class_device *class_device, char *buf)
{
- drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
+ struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev;
if (dev->driver->dri_library_name)
return dev->driver->dri_library_name(dev, buf);
return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
@@ -104,7 +104,7 @@ static struct class_device_attribute class_device_attrs[] = {
* Note: the struct class passed to this function must have previously been
* created with a call to drm_sysfs_create().
*/
-struct class_device *drm_sysfs_device_add(struct class *cs, drm_head_t *head)
+struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head)
{
struct class_device *class_dev;
int i, j, err;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b5c5b9fa84c3..68e36e51ba0c 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -79,11 +79,11 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t *map = NULL;
- drm_map_list_t *r_list;
- drm_hash_item_t *hash;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map *map = NULL;
+ struct drm_map_list *r_list;
+ struct drm_hash_item *hash;
/*
* Find the right map
@@ -97,7 +97,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error;
- r_list = drm_hash_entry(hash, drm_map_list_t, hash);
+ r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
@@ -116,7 +116,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
/*
* It's AGP memory - find the real physical page to map
*/
- for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
+ list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
@@ -163,7 +163,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_map_t *map = (drm_map_t *) vma->vm_private_data;
+ struct drm_map *map = (struct drm_map *) vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
@@ -194,12 +194,11 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
*/
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *pt, *prev, *next;
- drm_map_t *map;
- drm_map_list_t *r_list;
- struct list_head *list;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_vma_entry *pt, *temp;
+ struct drm_map *map;
+ struct drm_map_list *r_list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -209,30 +208,22 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
- for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
- next = pt->next;
+ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
- if (prev) {
- prev->next = pt->next;
- } else {
- dev->vmalist = pt->next;
- }
+ list_del(&pt->head);
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
- } else {
- prev = pt;
}
}
+
/* We were the only map that was found */
if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information.
*/
found_maps = 0;
- list = &dev->maplist->head;
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
@@ -283,9 +274,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
@@ -319,10 +310,10 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_map_t *map = (drm_map_t *) vma->vm_private_data;
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
unsigned long page_offset;
@@ -414,9 +405,9 @@ static struct vm_operations_struct drm_vm_sg_ops = {
*/
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *vma_entry;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@@ -425,16 +416,15 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
vma_entry->vma = vma;
- vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
- dev->vmalist = vma_entry;
+ list_add(&vma_entry->head, &dev->vmalist);
}
}
static void drm_vm_open(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(vma);
@@ -451,22 +441,18 @@ static void drm_vm_open(struct vm_area_struct *vma)
*/
static void drm_vm_close(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *pt, *prev;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
- for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
+ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
- if (prev) {
- prev->next = pt->next;
- } else {
- dev->vmalist = pt->next;
- }
+ list_del(&pt->head);
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
@@ -486,9 +472,9 @@ static void drm_vm_close(struct vm_area_struct *vma)
*/
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
- drm_device_dma_t *dma;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev;
+ struct drm_device_dma *dma;
unsigned long length = vma->vm_end - vma->vm_start;
dev = priv->head->dev;
@@ -526,7 +512,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-unsigned long drm_core_get_map_ofs(drm_map_t * map)
+unsigned long drm_core_get_map_ofs(struct drm_map * map)
{
return map->offset;
}
@@ -559,11 +545,11 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
*/
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t *map = NULL;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map *map = NULL;
unsigned long offset = 0;
- drm_hash_item_t *hash;
+ struct drm_hash_item *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -588,7 +574,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
- map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
+ map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
@@ -677,8 +663,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 603d17fd2d69..cb449999d0ef 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -45,16 +45,16 @@
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
-static drm_buf_t *i810_freelist_get(drm_device_t * dev)
+static struct drm_buf *i810_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
int used;
/* Linear search might not be the best solution */
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
@@ -70,7 +70,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -87,10 +87,10 @@ static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf)
static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev;
drm_i810_private_t *dev_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
lock_kernel();
@@ -120,10 +120,10 @@ static const struct file_operations i810_buffer_fops = {
.fasync = drm_fasync,
};
-static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
+static int i810_map_buffer(struct drm_buf * buf, struct file *filp)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
@@ -152,7 +152,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
return retcode;
}
-static int i810_unmap_buffer(drm_buf_t * buf)
+static int i810_unmap_buffer(struct drm_buf * buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -172,10 +172,10 @@ static int i810_unmap_buffer(drm_buf_t * buf)
return retcode;
}
-static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d,
+static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
struct file *filp)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
int retcode = 0;
@@ -202,9 +202,9 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d,
return retcode;
}
-static int i810_dma_cleanup(drm_device_t * dev)
+static int i810_dma_cleanup(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
@@ -233,7 +233,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
dev->dev_private = NULL;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
@@ -243,7 +243,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
return 0;
}
-static int i810_wait_ring(drm_device_t * dev, int n)
+static int i810_wait_ring(struct drm_device * dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -276,7 +276,7 @@ static int i810_wait_ring(drm_device_t * dev, int n)
return iters;
}
-static void i810_kernel_lost_context(drm_device_t * dev)
+static void i810_kernel_lost_context(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -288,9 +288,9 @@ static void i810_kernel_lost_context(drm_device_t * dev)
ring->space += ring->Size;
}
-static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
+static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
int i;
@@ -301,7 +301,7 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
}
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->in_use = hw_status++;
@@ -323,16 +323,14 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
return 0;
}
-static int i810_dma_initialize(drm_device_t * dev,
+static int i810_dma_initialize(struct drm_device * dev,
drm_i810_private_t * dev_priv,
drm_i810_init_t * init)
{
- struct list_head *list;
-
+ struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {
@@ -478,8 +476,8 @@ static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg)
static int i810_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv;
drm_i810_init_t init;
int retcode = 0;
@@ -536,7 +534,7 @@ static int i810_dma_init(struct inode *inode, struct file *filp,
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
-static void i810EmitContextVerified(drm_device_t * dev,
+static void i810EmitContextVerified(struct drm_device * dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -569,7 +567,7 @@ static void i810EmitContextVerified(drm_device_t * dev,
ADVANCE_LP_RING();
}
-static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code)
+static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -602,7 +600,7 @@ static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code)
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i810EmitDestVerified(drm_device_t * dev,
+static void i810EmitDestVerified(struct drm_device * dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -637,7 +635,7 @@ static void i810EmitDestVerified(drm_device_t * dev,
ADVANCE_LP_RING();
}
-static void i810EmitState(drm_device_t * dev)
+static void i810EmitState(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -668,14 +666,14 @@ static void i810EmitState(drm_device_t * dev)
/* need to verify
*/
-static void i810_dma_dispatch_clear(drm_device_t * dev, int flags,
+static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
@@ -743,12 +741,12 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags,
}
}
-static void i810_dma_dispatch_swap(drm_device_t * dev)
+static void i810_dma_dispatch_swap(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
@@ -789,13 +787,13 @@ static void i810_dma_dispatch_swap(drm_device_t * dev)
}
}
-static void i810_dma_dispatch_vertex(drm_device_t * dev,
- drm_buf_t * buf, int discard, int used)
+static void i810_dma_dispatch_vertex(struct drm_device * dev,
+ struct drm_buf * buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_clip_rect_t *box = sarea_priv->boxes;
+ struct drm_clip_rect *box = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
@@ -869,7 +867,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev,
}
}
-static void i810_dma_dispatch_flip(drm_device_t * dev)
+static void i810_dma_dispatch_flip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
@@ -916,7 +914,7 @@ static void i810_dma_dispatch_flip(drm_device_t * dev)
}
-static void i810_dma_quiescent(drm_device_t * dev)
+static void i810_dma_quiescent(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -935,10 +933,10 @@ static void i810_dma_quiescent(drm_device_t * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
-static int i810_flush_queue(drm_device_t * dev)
+static int i810_flush_queue(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, ret = 0;
RING_LOCALS;
@@ -954,7 +952,7 @@ static int i810_flush_queue(drm_device_t * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
@@ -970,9 +968,9 @@ static int i810_flush_queue(drm_device_t * dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(drm_device_t * dev, struct file *filp)
+static void i810_reclaim_buffers(struct drm_device * dev, struct file *filp)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
@@ -985,7 +983,7 @@ static void i810_reclaim_buffers(drm_device_t * dev, struct file *filp)
i810_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf->filp == filp && buf_priv) {
@@ -1003,8 +1001,8 @@ static void i810_reclaim_buffers(drm_device_t * dev, struct file *filp)
static int i810_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1015,9 +1013,9 @@ static int i810_flush_ioctl(struct inode *inode, struct file *filp,
static int i810_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1051,8 +1049,8 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp,
static int i810_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_clear_t clear;
if (copy_from_user
@@ -1074,8 +1072,8 @@ static int i810_clear_bufs(struct inode *inode, struct file *filp,
static int i810_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
DRM_DEBUG("i810_swap_bufs\n");
@@ -1088,8 +1086,8 @@ static int i810_swap_bufs(struct inode *inode, struct file *filp,
static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1102,8 +1100,8 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int retcode = 0;
drm_i810_dma_t d;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
@@ -1123,7 +1121,7 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
- if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d)))
+ if (copy_to_user((void __user *) arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1144,7 +1142,7 @@ static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
-static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
+static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1207,9 +1205,9 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
static int i810_dma_mc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1238,8 +1236,8 @@ static int i810_dma_mc(struct inode *inode, struct file *filp,
static int i810_rstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
@@ -1248,8 +1246,8 @@ static int i810_rstatus(struct inode *inode, struct file *filp,
static int i810_ov0_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
drm_i810_overlay_t data;
@@ -1264,8 +1262,8 @@ static int i810_ov0_info(struct inode *inode, struct file *filp,
static int i810_fstatus(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1276,8 +1274,8 @@ static int i810_fstatus(struct inode *inode, struct file *filp,
static int i810_ov0_flip(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1290,7 +1288,7 @@ static int i810_ov0_flip(struct inode *inode, struct file *filp,
/* Not sure why this isn't set all the time:
*/
-static void i810_do_init_pageflip(drm_device_t * dev)
+static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1300,7 +1298,7 @@ static void i810_do_init_pageflip(drm_device_t * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i810_do_cleanup_pageflip(drm_device_t * dev)
+static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1315,8 +1313,8 @@ static int i810_do_cleanup_pageflip(drm_device_t * dev)
static int i810_flip_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -1330,7 +1328,7 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp,
return 0;
}
-int i810_driver_load(drm_device_t *dev, unsigned long flags)
+int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i810 has 4 more counters */
dev->counters += 4;
@@ -1342,12 +1340,12 @@ int i810_driver_load(drm_device_t *dev, unsigned long flags)
return 0;
}
-void i810_driver_lastclose(drm_device_t * dev)
+void i810_driver_lastclose(struct drm_device * dev)
{
i810_dma_cleanup(dev);
}
-void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i810_driver_preclose(struct drm_device * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1357,12 +1355,12 @@ void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
-void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
{
i810_reclaim_buffers(dev, filp);
}
-int i810_driver_dma_quiescent(drm_device_t * dev)
+int i810_driver_dma_quiescent(struct drm_device * dev)
{
i810_dma_quiescent(dev);
return 0;
@@ -1399,7 +1397,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
-int i810_driver_device_is_agp(drm_device_t * dev)
+int i810_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
diff --git a/drivers/char/drm/i810_drm.h b/drivers/char/drm/i810_drm.h
index 2deb925a94f3..614977dbce45 100644
--- a/drivers/char/drm/i810_drm.h
+++ b/drivers/char/drm/i810_drm.h
@@ -158,7 +158,7 @@ typedef struct _drm_i810_sarea {
unsigned int dirty;
unsigned int nbox;
- drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index e6df49f4928a..648833844c7f 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -77,8 +77,8 @@ typedef struct _drm_i810_ring_buffer {
} drm_i810_ring_buffer_t;
typedef struct drm_i810_private {
- drm_map_t *sarea_map;
- drm_map_t *mmio_map;
+ struct drm_map *sarea_map;
+ struct drm_map *mmio_map;
drm_i810_sarea_t *sarea_priv;
drm_i810_ring_buffer_t ring;
@@ -88,7 +88,7 @@ typedef struct drm_i810_private {
dma_addr_t dma_status_page;
- drm_buf_t *mmap_buffer;
+ struct drm_buf *mmap_buffer;
u32 front_di1, back_di1, zi1;
@@ -115,15 +115,15 @@ typedef struct drm_i810_private {
} drm_i810_private_t;
/* i810_dma.c */
-extern int i810_driver_dma_quiescent(drm_device_t * dev);
-extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+extern int i810_driver_dma_quiescent(struct drm_device * dev);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
struct file *filp);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(drm_device_t * dev);
-extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+extern void i810_driver_lastclose(struct drm_device * dev);
+extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
struct file *filp);
-extern int i810_driver_device_is_agp(drm_device_t * dev);
+extern int i810_driver_device_is_agp(struct drm_device * dev);
extern drm_ioctl_desc_t i810_ioctls[];
extern int i810_max_ioctl;
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 3314a9fea9e5..dc20c1a7834e 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -47,16 +47,16 @@
#define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1
-static drm_buf_t *i830_freelist_get(drm_device_t * dev)
+static struct drm_buf *i830_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
int used;
/* Linear search might not be the best solution */
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
@@ -72,7 +72,7 @@ static drm_buf_t *i830_freelist_get(drm_device_t * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i830_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -89,10 +89,10 @@ static int i830_freelist_put(drm_device_t * dev, drm_buf_t * buf)
static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev;
drm_i830_private_t *dev_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_i830_buf_priv_t *buf_priv;
lock_kernel();
@@ -122,10 +122,10 @@ static const struct file_operations i830_buffer_fops = {
.fasync = drm_fasync,
};
-static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
+static int i830_map_buffer(struct drm_buf * buf, struct file *filp)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
@@ -156,7 +156,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
return retcode;
}
-static int i830_unmap_buffer(drm_buf_t * buf)
+static int i830_unmap_buffer(struct drm_buf * buf)
{
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -176,10 +176,10 @@ static int i830_unmap_buffer(drm_buf_t * buf)
return retcode;
}
-static int i830_dma_get_buffer(drm_device_t * dev, drm_i830_dma_t * d,
+static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
struct file *filp)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_i830_buf_priv_t *buf_priv;
int retcode = 0;
@@ -206,9 +206,9 @@ static int i830_dma_get_buffer(drm_device_t * dev, drm_i830_dma_t * d,
return retcode;
}
-static int i830_dma_cleanup(drm_device_t * dev)
+static int i830_dma_cleanup(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
@@ -238,7 +238,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
dev->dev_private = NULL;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
drm_core_ioremapfree(&buf_priv->map, dev);
@@ -247,7 +247,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
return 0;
}
-int i830_wait_ring(drm_device_t * dev, int n, const char *caller)
+int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -281,7 +281,7 @@ int i830_wait_ring(drm_device_t * dev, int n, const char *caller)
return iters;
}
-static void i830_kernel_lost_context(drm_device_t * dev)
+static void i830_kernel_lost_context(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -296,9 +296,9 @@ static void i830_kernel_lost_context(drm_device_t * dev)
dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
}
-static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
+static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int my_idx = 36;
u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
int i;
@@ -309,7 +309,7 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
}
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->in_use = hw_status++;
@@ -330,16 +330,15 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
return 0;
}
-static int i830_dma_initialize(drm_device_t * dev,
+static int i830_dma_initialize(struct drm_device * dev,
drm_i830_private_t * dev_priv,
drm_i830_init_t * init)
{
- struct list_head *list;
+ struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i830_private_t));
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {
@@ -455,8 +454,8 @@ static int i830_dma_initialize(drm_device_t * dev,
static int i830_dma_init(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv;
drm_i830_init_t init;
int retcode = 0;
@@ -490,7 +489,7 @@ static int i830_dma_init(struct inode *inode, struct file *filp,
/* Most efficient way to verify state for the i830 is as it is
* emitted. Non-conformant state is silently dropped.
*/
-static void i830EmitContextVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -535,7 +534,7 @@ static void i830EmitContextVerified(drm_device_t * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitTexVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -569,7 +568,7 @@ static void i830EmitTexVerified(drm_device_t * dev, unsigned int *code)
printk("rejected packet %x\n", code[0]);
}
-static void i830EmitTexBlendVerified(drm_device_t * dev,
+static void i830EmitTexBlendVerified(struct drm_device * dev,
unsigned int *code, unsigned int num)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -594,7 +593,7 @@ static void i830EmitTexBlendVerified(drm_device_t * dev,
ADVANCE_LP_RING();
}
-static void i830EmitTexPalette(drm_device_t * dev,
+static void i830EmitTexPalette(struct drm_device * dev,
unsigned int *palette, int number, int is_shared)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -621,7 +620,7 @@ static void i830EmitTexPalette(drm_device_t * dev,
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i830EmitDestVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
unsigned int tmp;
@@ -682,7 +681,7 @@ static void i830EmitDestVerified(drm_device_t * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitStippleVerified(drm_device_t * dev, unsigned int *code)
+static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -693,7 +692,7 @@ static void i830EmitStippleVerified(drm_device_t * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitState(drm_device_t * dev)
+static void i830EmitState(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -796,7 +795,7 @@ static void i830EmitState(drm_device_t * dev)
* Performance monitoring functions
*/
-static void i830_fill_box(drm_device_t * dev,
+static void i830_fill_box(struct drm_device * dev,
int x, int y, int w, int h, int r, int g, int b)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -834,7 +833,7 @@ static void i830_fill_box(drm_device_t * dev,
ADVANCE_LP_RING();
}
-static void i830_cp_performance_boxes(drm_device_t * dev)
+static void i830_cp_performance_boxes(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -879,7 +878,7 @@ static void i830_cp_performance_boxes(drm_device_t * dev)
dev_priv->sarea_priv->perf_boxes = 0;
}
-static void i830_dma_dispatch_clear(drm_device_t * dev, int flags,
+static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
unsigned int clear_color,
unsigned int clear_zval,
unsigned int clear_depthmask)
@@ -887,7 +886,7 @@ static void i830_dma_dispatch_clear(drm_device_t * dev, int flags,
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = dev_priv->cpp;
int i;
@@ -974,12 +973,12 @@ static void i830_dma_dispatch_clear(drm_device_t * dev, int flags,
}
}
-static void i830_dma_dispatch_swap(drm_device_t * dev)
+static void i830_dma_dispatch_swap(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = dev_priv->cpp;
int i;
@@ -1044,7 +1043,7 @@ static void i830_dma_dispatch_swap(drm_device_t * dev)
}
}
-static void i830_dma_dispatch_flip(drm_device_t * dev)
+static void i830_dma_dispatch_flip(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1087,13 +1086,13 @@ static void i830_dma_dispatch_flip(drm_device_t * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static void i830_dma_dispatch_vertex(drm_device_t * dev,
- drm_buf_t * buf, int discard, int used)
+static void i830_dma_dispatch_vertex(struct drm_device * dev,
+ struct drm_buf * buf, int discard, int used)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_clip_rect_t *box = sarea_priv->boxes;
+ struct drm_clip_rect *box = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
@@ -1199,7 +1198,7 @@ static void i830_dma_dispatch_vertex(drm_device_t * dev,
}
}
-static void i830_dma_quiescent(drm_device_t * dev)
+static void i830_dma_quiescent(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1216,10 +1215,10 @@ static void i830_dma_quiescent(drm_device_t * dev)
i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
}
-static int i830_flush_queue(drm_device_t * dev)
+static int i830_flush_queue(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, ret = 0;
RING_LOCALS;
@@ -1233,7 +1232,7 @@ static int i830_flush_queue(drm_device_t * dev)
i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
@@ -1249,9 +1248,9 @@ static int i830_flush_queue(drm_device_t * dev)
}
/* Must be called with the lock held */
-static void i830_reclaim_buffers(drm_device_t * dev, struct file *filp)
+static void i830_reclaim_buffers(struct drm_device * dev, struct file *filp)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
@@ -1264,7 +1263,7 @@ static void i830_reclaim_buffers(drm_device_t * dev, struct file *filp)
i830_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
if (buf->filp == filp && buf_priv) {
@@ -1282,8 +1281,8 @@ static void i830_reclaim_buffers(drm_device_t * dev, struct file *filp)
static int i830_flush_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1294,9 +1293,9 @@ static int i830_flush_ioctl(struct inode *inode, struct file *filp,
static int i830_dma_vertex(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
@@ -1328,8 +1327,8 @@ static int i830_dma_vertex(struct inode *inode, struct file *filp,
static int i830_clear_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_clear_t clear;
if (copy_from_user
@@ -1352,8 +1351,8 @@ static int i830_clear_bufs(struct inode *inode, struct file *filp,
static int i830_swap_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
DRM_DEBUG("i830_swap_bufs\n");
@@ -1365,7 +1364,7 @@ static int i830_swap_bufs(struct inode *inode, struct file *filp,
/* Not sure why this isn't set all the time:
*/
-static void i830_do_init_pageflip(drm_device_t * dev)
+static void i830_do_init_pageflip(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1375,7 +1374,7 @@ static void i830_do_init_pageflip(drm_device_t * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i830_do_cleanup_pageflip(drm_device_t * dev)
+static int i830_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1390,8 +1389,8 @@ static int i830_do_cleanup_pageflip(drm_device_t * dev)
static int i830_flip_bufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -1408,8 +1407,8 @@ static int i830_flip_bufs(struct inode *inode, struct file *filp,
static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
@@ -1422,8 +1421,8 @@ static int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int retcode = 0;
drm_i830_dma_t d;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
@@ -1444,7 +1443,7 @@ static int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
current->pid, retcode, d.granted);
- if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d)))
+ if (copy_to_user((void __user *) arg, &d, sizeof(d)))
return -EFAULT;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1467,8 +1466,8 @@ static int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
static int i830_getparam(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_getparam_t param;
int value;
@@ -1501,8 +1500,8 @@ static int i830_getparam(struct inode *inode, struct file *filp,
static int i830_setparam(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_setparam_t param;
@@ -1526,7 +1525,7 @@ static int i830_setparam(struct inode *inode, struct file *filp,
return 0;
}
-int i830_driver_load(drm_device_t *dev, unsigned long flags)
+int i830_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i830 has 4 more counters */
dev->counters += 4;
@@ -1538,12 +1537,12 @@ int i830_driver_load(drm_device_t *dev, unsigned long flags)
return 0;
}
-void i830_driver_lastclose(drm_device_t * dev)
+void i830_driver_lastclose(struct drm_device * dev)
{
i830_dma_cleanup(dev);
}
-void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i830_driver_preclose(struct drm_device * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1553,12 +1552,12 @@ void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
-void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
{
i830_reclaim_buffers(dev, filp);
}
-int i830_driver_dma_quiescent(drm_device_t * dev)
+int i830_driver_dma_quiescent(struct drm_device * dev)
{
i830_dma_quiescent(dev);
return 0;
@@ -1594,7 +1593,7 @@ int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
* \returns
* A value of 1 is always retured to indictate every i8xx is AGP.
*/
-int i830_driver_device_is_agp(drm_device_t * dev)
+int i830_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h
index 66dd75027967..968a6d9f9dcb 100644
--- a/drivers/char/drm/i830_drm.h
+++ b/drivers/char/drm/i830_drm.h
@@ -191,7 +191,7 @@ typedef struct _drm_i830_sarea {
unsigned int dirty;
unsigned int nbox;
- drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index e91f94afb4bb..ddda67956dea 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -84,8 +84,8 @@ typedef struct _drm_i830_ring_buffer {
} drm_i830_ring_buffer_t;
typedef struct drm_i830_private {
- drm_map_t *sarea_map;
- drm_map_t *mmio_map;
+ struct drm_map *sarea_map;
+ struct drm_map *mmio_map;
drm_i830_sarea_t *sarea_priv;
drm_i830_ring_buffer_t ring;
@@ -95,7 +95,7 @@ typedef struct drm_i830_private {
dma_addr_t dma_status_page;
- drm_buf_t *mmap_buffer;
+ struct drm_buf *mmap_buffer;
u32 front_di1, back_di1, zi1;
@@ -132,16 +132,16 @@ extern int i830_irq_wait(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(drm_device_t * dev);
-extern void i830_driver_irq_postinstall(drm_device_t * dev);
-extern void i830_driver_irq_uninstall(drm_device_t * dev);
+extern void i830_driver_irq_preinstall(struct drm_device * dev);
+extern void i830_driver_irq_postinstall(struct drm_device * dev);
+extern void i830_driver_irq_uninstall(struct drm_device * dev);
extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void i830_driver_lastclose(drm_device_t * dev);
-extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
+extern void i830_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern void i830_driver_lastclose(struct drm_device * dev);
+extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
struct file *filp);
-extern int i830_driver_dma_quiescent(drm_device_t * dev);
-extern int i830_driver_device_is_agp(drm_device_t * dev);
+extern int i830_driver_dma_quiescent(struct drm_device * dev);
+extern int i830_driver_device_is_agp(struct drm_device * dev);
#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
@@ -180,7 +180,7 @@ extern int i830_driver_device_is_agp(drm_device_t * dev);
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-extern int i830_wait_ring(drm_device_t * dev, int n, const char *caller);
+extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index 5841f7674956..a1b5c63c3c3e 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -35,7 +35,7 @@
irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u16 temp;
@@ -53,7 +53,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int i830_emit_irq(drm_device_t * dev)
+static int i830_emit_irq(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -70,7 +70,7 @@ static int i830_emit_irq(drm_device_t * dev)
return atomic_read(&dev_priv->irq_emitted);
}
-static int i830_wait_irq(drm_device_t * dev, int irq_nr)
+static int i830_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
@@ -117,8 +117,8 @@ static int i830_wait_irq(drm_device_t * dev, int irq_nr)
int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_emit_t emit;
int result;
@@ -149,8 +149,8 @@ int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t irqwait;
@@ -168,7 +168,7 @@ int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd,
/* drm_dma.h hooks
*/
-void i830_driver_irq_preinstall(drm_device_t * dev)
+void i830_driver_irq_preinstall(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
@@ -180,14 +180,14 @@ void i830_driver_irq_preinstall(drm_device_t * dev)
init_waitqueue_head(&dev_priv->irq_queue);
}
-void i830_driver_irq_postinstall(drm_device_t * dev)
+void i830_driver_irq_postinstall(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
-void i830_driver_irq_uninstall(drm_device_t * dev)
+void i830_driver_irq_uninstall(struct drm_device * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index ea52740af4f6..3359cc2b9736 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -47,7 +47,7 @@
* the head pointer changes, so that EBUSY only happens if the ring
* actually stalls for (eg) 3 seconds.
*/
-int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
+int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
@@ -73,7 +73,7 @@ int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
return DRM_ERR(EBUSY);
}
-void i915_kernel_lost_context(drm_device_t * dev)
+void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
@@ -88,7 +88,7 @@ void i915_kernel_lost_context(drm_device_t * dev)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
-static int i915_dma_cleanup(drm_device_t * dev)
+static int i915_dma_cleanup(struct drm_device * dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
@@ -126,13 +126,13 @@ static int i915_dma_cleanup(drm_device_t * dev)
return 0;
}
-static int i915_initialize(drm_device_t * dev,
+static int i915_initialize(struct drm_device * dev,
drm_i915_private_t * dev_priv,
drm_i915_init_t * init)
{
memset(dev_priv, 0, sizeof(drm_i915_private_t));
- DRM_GETSAREA();
+ dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
@@ -211,7 +211,7 @@ static int i915_initialize(drm_device_t * dev,
return 0;
}
-static int i915_dma_resume(drm_device_t * dev)
+static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -357,7 +357,7 @@ static int validate_cmd(int cmd)
return ret;
}
-static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
@@ -396,12 +396,12 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
return 0;
}
-static int i915_emit_box(drm_device_t * dev,
- drm_clip_rect_t __user * boxes,
+static int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t box;
+ struct drm_clip_rect box;
RING_LOCALS;
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
@@ -439,7 +439,7 @@ static int i915_emit_box(drm_device_t * dev,
* emit. For now, do it in both places:
*/
-static void i915_emit_breadcrumb(drm_device_t *dev)
+static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -457,7 +457,7 @@ static void i915_emit_breadcrumb(drm_device_t *dev)
ADVANCE_LP_RING();
}
-static int i915_dispatch_cmdbuffer(drm_device_t * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
drm_i915_cmdbuffer_t * cmd)
{
int nbox = cmd->num_cliprects;
@@ -489,11 +489,11 @@ static int i915_dispatch_cmdbuffer(drm_device_t * dev,
return 0;
}
-static int i915_dispatch_batchbuffer(drm_device_t * dev,
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t __user *boxes = batch->cliprects;
+ struct drm_clip_rect __user *boxes = batch->cliprects;
int nbox = batch->num_cliprects;
int i = 0, count;
RING_LOCALS;
@@ -535,7 +535,7 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
return 0;
}
-static int i915_dispatch_flip(drm_device_t * dev)
+static int i915_dispatch_flip(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -583,7 +583,7 @@ static int i915_dispatch_flip(drm_device_t * dev)
return 0;
}
-static int i915_quiescent(drm_device_t * dev)
+static int i915_quiescent(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -625,7 +625,7 @@ static int i915_batchbuffer(DRM_IOCTL_ARGS)
if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
batch.num_cliprects *
- sizeof(drm_clip_rect_t)))
+ sizeof(struct drm_clip_rect)))
return DRM_ERR(EFAULT);
ret = i915_dispatch_batchbuffer(dev, &batch);
@@ -655,7 +655,7 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
if (cmdbuf.num_cliprects &&
DRM_VERIFYAREA_READ(cmdbuf.cliprects,
cmdbuf.num_cliprects *
- sizeof(drm_clip_rect_t))) {
+ sizeof(struct drm_clip_rect))) {
DRM_ERROR("Fault accessing cliprects\n");
return DRM_ERR(EFAULT);
}
@@ -792,7 +792,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
return 0;
}
-int i915_driver_load(drm_device_t *dev, unsigned long flags)
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i915 has 4 more counters */
dev->counters += 4;
@@ -804,7 +804,7 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
return 0;
}
-void i915_driver_lastclose(drm_device_t * dev)
+void i915_driver_lastclose(struct drm_device * dev)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -813,7 +813,7 @@ void i915_driver_lastclose(drm_device_t * dev)
i915_dma_cleanup(dev);
}
-void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i915_driver_preclose(struct drm_device * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -854,7 +854,7 @@ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
* \returns
* A value of 1 is always retured to indictate every i9x5 is AGP.
*/
-int i915_driver_device_is_agp(drm_device_t * dev)
+int i915_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 7b7b68b96f31..05c66cf03a9e 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -64,7 +64,7 @@ typedef struct _drm_i915_init {
} drm_i915_init_t;
typedef struct _drm_i915_sarea {
- drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1];
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
@@ -170,7 +170,7 @@ typedef struct _drm_i915_batchbuffer {
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
/* As above, but pass a pointer to userspace buffer which can be
@@ -182,7 +182,7 @@ typedef struct _drm_i915_cmdbuffer {
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
/* Userspace can request & wait on irq's:
@@ -259,7 +259,7 @@ typedef struct drm_i915_vblank_pipe {
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
- drm_vblank_seq_type_t seqtype;
+ enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 85e323acb95d..fd918565f4e5 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -120,11 +120,11 @@ extern drm_ioctl_desc_t i915_ioctls[];
extern int i915_max_ioctl;
/* i915_dma.c */
-extern void i915_kernel_lost_context(drm_device_t * dev);
+extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern void i915_driver_lastclose(drm_device_t * dev);
-extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern int i915_driver_device_is_agp(drm_device_t * dev);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -132,12 +132,12 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_irq_emit(DRM_IOCTL_ARGS);
extern int i915_irq_wait(DRM_IOCTL_ARGS);
-extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
-extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(drm_device_t * dev);
-extern void i915_driver_irq_postinstall(drm_device_t * dev);
-extern void i915_driver_irq_uninstall(drm_device_t * dev);
+extern void i915_driver_irq_preinstall(struct drm_device * dev);
+extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
extern int i915_vblank_swap(DRM_IOCTL_ARGS);
@@ -148,7 +148,7 @@ extern int i915_mem_free(DRM_IOCTL_ARGS);
extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(drm_device_t * dev,
+extern void i915_mem_release(struct drm_device * dev,
DRMFILE filp, struct mem_block *heap);
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
@@ -188,7 +188,7 @@ extern void i915_mem_release(drm_device_t * dev,
I915_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index b92062a239f1..4b4b2ce89863 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -42,7 +42,7 @@
*
* This function will be called with the HW lock held.
*/
-static void i915_vblank_tasklet(drm_device_t *dev)
+static void i915_vblank_tasklet(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -50,7 +50,7 @@ static void i915_vblank_tasklet(drm_device_t *dev)
int nhits, nrects, slice[2], upper[2], lower[2], i;
unsigned counter[2] = { atomic_read(&dev->vbl_received),
atomic_read(&dev->vbl_received2) };
- drm_drawable_info_t *drw;
+ struct drm_drawable_info *drw;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
u32 cpp = dev_priv->cpp;
u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
@@ -95,7 +95,7 @@ static void i915_vblank_tasklet(drm_device_t *dev)
list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_cmp =
list_entry(hit, drm_i915_vbl_swap_t, head);
- drm_drawable_info_t *drw_cmp =
+ struct drm_drawable_info *drw_cmp =
drm_get_drawable_info(dev, swap_cmp->drw_id);
if (drw_cmp &&
@@ -160,7 +160,7 @@ static void i915_vblank_tasklet(drm_device_t *dev)
list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
- drm_clip_rect_t *rect;
+ struct drm_clip_rect *rect;
int num_rects, pipe;
unsigned short top, bottom;
@@ -211,7 +211,7 @@ static void i915_vblank_tasklet(drm_device_t *dev)
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
@@ -257,7 +257,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int i915_emit_irq(drm_device_t * dev)
+static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -283,7 +283,7 @@ static int i915_emit_irq(drm_device_t * dev)
return dev_priv->counter;
}
-static int i915_wait_irq(drm_device_t * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
@@ -309,7 +309,7 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
return ret;
}
-static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
+static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
atomic_t *counter)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -331,12 +331,12 @@ static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
}
-int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
{
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
}
-int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
+int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
{
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
}
@@ -389,7 +389,7 @@ int i915_irq_wait(DRM_IOCTL_ARGS)
return i915_wait_irq(dev, irqwait.irq_seq);
}
-static void i915_enable_interrupt (drm_device_t *dev)
+static void i915_enable_interrupt (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 flag;
@@ -569,7 +569,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
/* drm_dma.h hooks
*/
-void i915_driver_irq_preinstall(drm_device_t * dev)
+void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -578,7 +578,7 @@ void i915_driver_irq_preinstall(drm_device_t * dev)
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
}
-void i915_driver_irq_postinstall(drm_device_t * dev)
+void i915_driver_irq_postinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -592,7 +592,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
}
-void i915_driver_irq_uninstall(drm_device_t * dev)
+void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 52c67324df58..50b4bacef0e0 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -43,11 +43,11 @@
* block to allocate, and the ring is drained prior to allocations --
* in other words allocation is expensive.
*/
-static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
+static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_tex_region_t *list;
+ struct drm_tex_region *list;
unsigned shift, nr;
unsigned start;
unsigned end;
@@ -208,7 +208,7 @@ static int init_heap(struct mem_block **heap, int start, int size)
/* Free all blocks associated with the releasing file.
*/
-void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
+void i915_mem_release(struct drm_device * dev, DRMFILE filp, struct mem_block *heap)
{
struct mem_block *p;
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index c2a4bac14521..9c73a6e3861b 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -46,7 +46,7 @@
#define MINIMAL_CLEANUP 0
#define FULL_CLEANUP 1
-static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup);
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
/* ================================================================
* Engine control
@@ -224,7 +224,7 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
-static void mga_freelist_print(drm_device_t * dev)
+static void mga_freelist_print(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -245,10 +245,10 @@ static void mga_freelist_print(drm_device_t * dev)
}
#endif
-static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
+static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_freelist_t *entry;
int i;
@@ -291,7 +291,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
return 0;
}
-static void mga_freelist_cleanup(drm_device_t * dev)
+static void mga_freelist_cleanup(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -311,10 +311,10 @@ static void mga_freelist_cleanup(drm_device_t * dev)
#if 0
/* FIXME: Still needed?
*/
-static void mga_freelist_reset(drm_device_t * dev)
+static void mga_freelist_reset(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
int i;
@@ -326,7 +326,7 @@ static void mga_freelist_reset(drm_device_t * dev)
}
#endif
-static drm_buf_t *mga_freelist_get(drm_device_t * dev)
+static struct drm_buf *mga_freelist_get(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *next;
@@ -359,7 +359,7 @@ static drm_buf_t *mga_freelist_get(drm_device_t * dev)
return NULL;
}
-int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -393,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
* DMA initialization, cleanup
*/
-int mga_driver_load(drm_device_t * dev, unsigned long flags)
+int mga_driver_load(struct drm_device * dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
@@ -434,7 +434,7 @@ int mga_driver_load(drm_device_t * dev, unsigned long flags)
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
-static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
+static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t *const dev_priv =
@@ -445,11 +445,11 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
- drm_buf_desc_t req;
- drm_agp_mode_t mode;
- drm_agp_info_t info;
- drm_agp_buffer_t agp_req;
- drm_agp_binding_t bind_req;
+ struct drm_buf_desc req;
+ struct drm_agp_mode mode;
+ struct drm_agp_info info;
+ struct drm_agp_buffer agp_req;
+ struct drm_agp_binding bind_req;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
@@ -548,10 +548,10 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
}
{
- drm_map_list_t *_entry;
+ struct drm_map_list *_entry;
unsigned long agp_token = 0;
- list_for_each_entry(_entry, &dev->maplist->head, head) {
+ list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
}
@@ -588,7 +588,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
return 0;
}
#else
-static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
+static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
return -EINVAL;
@@ -609,7 +609,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
-static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
+static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t *const dev_priv =
@@ -618,7 +618,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
unsigned int primary_size;
unsigned int bin_count;
int err;
- drm_buf_desc_t req;
+ struct drm_buf_desc req;
if (dev->dma == NULL) {
DRM_ERROR("dev->dma is NULL\n");
@@ -699,7 +699,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
return 0;
}
-static int mga_do_dma_bootstrap(drm_device_t * dev,
+static int mga_do_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
@@ -793,7 +793,7 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS)
return err;
}
-static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
+static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -823,8 +823,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
dev_priv->texture_offset = init->texture_offset[0];
dev_priv->texture_size = init->texture_size[0];
- DRM_GETSAREA();
-
+ dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
return DRM_ERR(EINVAL);
@@ -934,7 +933,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
return 0;
}
-static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
{
int err = 0;
DRM_DEBUG("\n");
@@ -963,8 +962,8 @@ static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
if (dev_priv->used_new_dma_init) {
#if __OS_HAS_AGP
if (dev_priv->agp_handle != 0) {
- drm_agp_binding_t unbind_req;
- drm_agp_buffer_t free_req;
+ struct drm_agp_binding unbind_req;
+ struct drm_agp_buffer free_req;
unbind_req.handle = dev_priv->agp_handle;
drm_agp_unbind(dev, &unbind_req);
@@ -1041,11 +1040,11 @@ int mga_dma_flush(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- drm_lock_t lock;
+ struct drm_lock lock;
LOCK_TEST_WITH_RETURN(dev, filp);
- DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data,
+ DRM_COPY_FROM_USER_IOCTL(lock, (struct drm_lock __user *) data,
sizeof(lock));
DRM_DEBUG("%s%s%s\n",
@@ -1087,9 +1086,9 @@ int mga_dma_reset(DRM_IOCTL_ARGS)
* DMA buffer management
*/
-static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
+static int mga_dma_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
@@ -1114,10 +1113,10 @@ static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
int mga_dma_buffers(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- drm_dma_t __user *argp = (void __user *)data;
- drm_dma_t d;
+ struct drm_dma __user *argp = (void __user *)data;
+ struct drm_dma d;
int ret = 0;
LOCK_TEST_WITH_RETURN(dev, filp);
@@ -1156,7 +1155,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
/**
* Called just before the module is unloaded.
*/
-int mga_driver_unload(drm_device_t * dev)
+int mga_driver_unload(struct drm_device * dev)
{
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
@@ -1167,12 +1166,12 @@ int mga_driver_unload(drm_device_t * dev)
/**
* Called when the last opener of the device is closed.
*/
-void mga_driver_lastclose(drm_device_t * dev)
+void mga_driver_lastclose(struct drm_device * dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
-int mga_driver_dma_quiescent(drm_device_t * dev)
+int mga_driver_dma_quiescent(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
index 44d1293e2947..944b50a5ff24 100644
--- a/drivers/char/drm/mga_drm.h
+++ b/drivers/char/drm/mga_drm.h
@@ -181,7 +181,7 @@ typedef struct _drm_mga_sarea {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
@@ -202,7 +202,7 @@ typedef struct _drm_mga_sarea {
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
- drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
@@ -216,7 +216,7 @@ typedef struct _drm_mga_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
- drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
+ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index be49dbb9ec3f..5572939fc7d1 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -36,7 +36,7 @@
#include "drm_pciids.h"
-static int mga_driver_device_is_agp(drm_device_t * dev);
+static int mga_driver_device_is_agp(struct drm_device * dev);
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
@@ -118,7 +118,7 @@ MODULE_LICENSE("GPL and additional rights");
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
-static int mga_driver_device_is_agp(drm_device_t * dev)
+static int mga_driver_device_is_agp(struct drm_device * dev)
{
const struct pci_dev *const pdev = dev->pdev;
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 6b0c53193506..49253affa475 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -65,7 +65,7 @@ typedef struct drm_mga_freelist {
struct drm_mga_freelist *next;
struct drm_mga_freelist *prev;
drm_mga_age_t age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
} drm_mga_freelist_t;
typedef struct {
@@ -157,10 +157,10 @@ extern int mga_dma_init(DRM_IOCTL_ARGS);
extern int mga_dma_flush(DRM_IOCTL_ARGS);
extern int mga_dma_reset(DRM_IOCTL_ARGS);
extern int mga_dma_buffers(DRM_IOCTL_ARGS);
-extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
-extern int mga_driver_unload(drm_device_t * dev);
-extern void mga_driver_lastclose(drm_device_t * dev);
-extern int mga_driver_dma_quiescent(drm_device_t * dev);
+extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
+extern int mga_driver_unload(struct drm_device * dev);
+extern void mga_driver_lastclose(struct drm_device * dev);
+extern int mga_driver_dma_quiescent(struct drm_device * dev);
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
@@ -168,7 +168,7 @@ extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
-extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf);
+extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
/* mga_warp.c */
extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
@@ -176,12 +176,12 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
-extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mga_driver_irq_preinstall(drm_device_t * dev);
-extern void mga_driver_irq_postinstall(drm_device_t * dev);
-extern void mga_driver_irq_uninstall(drm_device_t * dev);
+extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index eb9644024172..9302cb8f0f83 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -37,7 +37,7 @@
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int status;
int handled = 0;
@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@@ -96,7 +96,7 @@ int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
+int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
@@ -115,7 +115,7 @@ int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-void mga_driver_irq_preinstall(drm_device_t * dev)
+void mga_driver_irq_preinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -125,7 +125,7 @@ void mga_driver_irq_preinstall(drm_device_t * dev)
MGA_WRITE(MGA_ICLEAR, ~0);
}
-void mga_driver_irq_postinstall(drm_device_t * dev)
+void mga_driver_irq_postinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -135,7 +135,7 @@ void mga_driver_irq_postinstall(drm_device_t * dev)
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
}
-void mga_driver_irq_uninstall(drm_device_t * dev)
+void mga_driver_irq_uninstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 2837e669183a..d448b0aef33c 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -42,7 +42,7 @@
*/
static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
- drm_clip_rect_t * box)
+ struct drm_clip_rect * box)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -480,12 +480,12 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
*
*/
-static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
+static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
int i;
DMA_LOCALS;
@@ -500,7 +500,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
ADVANCE_DMA();
for (i = 0; i < nbox; i++) {
- drm_clip_rect_t *box = &pbox[i];
+ struct drm_clip_rect *box = &pbox[i];
u32 height = box->y2 - box->y1;
DRM_DEBUG(" from=%d,%d to=%d,%d\n",
@@ -568,12 +568,12 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
FLUSH_DMA();
}
-static void mga_dma_dispatch_swap(drm_device_t * dev)
+static void mga_dma_dispatch_swap(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
int i;
DMA_LOCALS;
@@ -598,7 +598,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev)
MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
for (i = 0; i < nbox; i++) {
- drm_clip_rect_t *box = &pbox[i];
+ struct drm_clip_rect *box = &pbox[i];
u32 height = box->y2 - box->y1;
u32 start = box->y1 * dev_priv->front_pitch;
@@ -622,7 +622,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev)
DRM_DEBUG("%s... done.\n", __FUNCTION__);
}
-static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -669,7 +669,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
FLUSH_DMA();
}
-static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
+static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
unsigned int start, unsigned int end)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -718,7 +718,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
/* This copies a 64 byte aligned agp region to the frambuffer with a
* standard blit, the ioctl needs to do checking.
*/
-static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
+static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
unsigned int dstorg, unsigned int length)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -766,12 +766,12 @@ static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
FLUSH_DMA();
}
-static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
+static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
u32 scandir = 0, i;
DMA_LOCALS;
@@ -880,8 +880,8 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_vertex_t vertex;
@@ -920,8 +920,8 @@ static int mga_dma_indices(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_indices_t indices;
@@ -959,9 +959,9 @@ static int mga_dma_indices(DRM_IOCTL_ARGS)
static int mga_dma_iload(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_iload_t iload;
DRM_DEBUG("\n");
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 1014602c43a7..b163ed09bd81 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -81,7 +81,7 @@ static u32 r128_cce_microcode[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
-static int R128_READ_PLL(drm_device_t * dev, int addr)
+static int R128_READ_PLL(struct drm_device * dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -271,7 +271,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
/* Reset the engine. This will stop the CCE if it is running.
*/
-static int r128_do_engine_reset(drm_device_t * dev)
+static int r128_do_engine_reset(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@@ -308,7 +308,7 @@ static int r128_do_engine_reset(drm_device_t * dev)
return 0;
}
-static void r128_cce_init_ring_buffer(drm_device_t * dev,
+static void r128_cce_init_ring_buffer(struct drm_device * dev,
drm_r128_private_t * dev_priv)
{
u32 ring_start;
@@ -347,7 +347,7 @@ static void r128_cce_init_ring_buffer(drm_device_t * dev,
R128_WRITE(R128_BUS_CNTL, tmp);
}
-static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
+static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
{
drm_r128_private_t *dev_priv;
@@ -456,8 +456,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->span_offset >> 5));
- DRM_GETSAREA();
-
+ dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
@@ -585,7 +584,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
return 0;
}
-int r128_do_cleanup_cce(drm_device_t * dev)
+int r128_do_cleanup_cce(struct drm_device * dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
@@ -770,11 +769,11 @@ int r128_fullscreen(DRM_IOCTL_ARGS)
#define R128_BUFFER_FREE 0
#if 0
-static int r128_freelist_init(drm_device_t * dev)
+static int r128_freelist_init(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_freelist_t *entry;
int i;
@@ -816,12 +815,12 @@ static int r128_freelist_init(drm_device_t * dev)
}
#endif
-static drm_buf_t *r128_freelist_get(drm_device_t * dev)
+static struct drm_buf *r128_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
@@ -854,13 +853,13 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev)
return NULL;
}
-void r128_freelist_reset(drm_device_t * dev)
+void r128_freelist_reset(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
@@ -887,10 +886,10 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
return DRM_ERR(EBUSY);
}
-static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
+static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d)
{
int i;
- drm_buf_t *buf;
+ struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = r128_freelist_get(dev);
@@ -914,10 +913,10 @@ static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
int r128_cce_buffers(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
- drm_dma_t d;
+ struct drm_dma __user *argp = (void __user *)data;
+ struct drm_dma d;
LOCK_TEST_WITH_RETURN(dev, filp);
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 6e8af313f2b4..e94a39c6e327 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -153,7 +153,7 @@ typedef struct drm_r128_sarea {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@@ -161,7 +161,7 @@ typedef struct drm_r128_sarea {
unsigned int last_frame;
unsigned int last_dispatch;
- drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
+ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
unsigned int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 9086835686dc..72249fb2fd1c 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -57,7 +57,7 @@
typedef struct drm_r128_freelist {
unsigned int age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
} drm_r128_freelist_t;
@@ -118,7 +118,7 @@ typedef struct drm_r128_private {
drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr;
drm_local_map_t *agp_textures;
- drm_ati_pcigart_info gart_info;
+ struct drm_ati_pcigart_info gart_info;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
@@ -142,21 +142,21 @@ extern int r128_engine_reset(DRM_IOCTL_ARGS);
extern int r128_fullscreen(DRM_IOCTL_ARGS);
extern int r128_cce_buffers(DRM_IOCTL_ARGS);
-extern void r128_freelist_reset(drm_device_t * dev);
+extern void r128_freelist_reset(struct drm_device * dev);
extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
-extern int r128_do_cleanup_cce(drm_device_t * dev);
+extern int r128_do_cleanup_cce(struct drm_device * dev);
-extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
-extern void r128_driver_irq_preinstall(drm_device_t * dev);
-extern void r128_driver_irq_postinstall(drm_device_t * dev);
-extern void r128_driver_irq_uninstall(drm_device_t * dev);
-extern void r128_driver_lastclose(drm_device_t * dev);
-extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void r128_driver_irq_preinstall(struct drm_device * dev);
+extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern void r128_driver_irq_uninstall(struct drm_device * dev);
+extern void r128_driver_lastclose(struct drm_device * dev);
+extern void r128_driver_preclose(struct drm_device * dev, DRMFILE filp);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index 87f8ca2b0685..c76fdca7662d 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -37,7 +37,7 @@
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
@@ -54,7 +54,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@@ -72,7 +72,7 @@ int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-void r128_driver_irq_preinstall(drm_device_t * dev)
+void r128_driver_irq_preinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@@ -82,7 +82,7 @@ void r128_driver_irq_preinstall(drm_device_t * dev)
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
-void r128_driver_irq_postinstall(drm_device_t * dev)
+void r128_driver_irq_postinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@@ -90,7 +90,7 @@ void r128_driver_irq_postinstall(drm_device_t * dev)
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
}
-void r128_driver_irq_uninstall(drm_device_t * dev)
+void r128_driver_irq_uninstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index 17b11e7d8f32..7b334fb7d649 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -38,7 +38,7 @@
*/
static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
- drm_clip_rect_t * boxes, int count)
+ struct drm_clip_rect * boxes, int count)
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
@@ -352,13 +352,13 @@ static void r128_print_dirty(const char *msg, unsigned int flags)
(flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
}
-static void r128_cce_dispatch_clear(drm_device_t * dev,
+static void r128_cce_dispatch_clear(struct drm_device * dev,
drm_r128_clear_t * clear)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
unsigned int flags = clear->flags;
int i;
RING_LOCALS;
@@ -458,12 +458,12 @@ static void r128_cce_dispatch_clear(drm_device_t * dev,
}
}
-static void r128_cce_dispatch_swap(drm_device_t * dev)
+static void r128_cce_dispatch_swap(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -524,7 +524,7 @@ static void r128_cce_dispatch_swap(drm_device_t * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_flip(drm_device_t * dev)
+static void r128_cce_dispatch_flip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -567,7 +567,7 @@ static void r128_cce_dispatch_flip(drm_device_t * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -637,8 +637,8 @@ static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
sarea_priv->nbox = 0;
}
-static void r128_cce_dispatch_indirect(drm_device_t * dev,
- drm_buf_t * buf, int start, int end)
+static void r128_cce_dispatch_indirect(struct drm_device * dev,
+ struct drm_buf * buf, int start, int end)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -692,8 +692,8 @@ static void r128_cce_dispatch_indirect(drm_device_t * dev,
dev_priv->sarea_priv->last_dispatch++;
}
-static void r128_cce_dispatch_indices(drm_device_t * dev,
- drm_buf_t * buf,
+static void r128_cce_dispatch_indices(struct drm_device * dev,
+ struct drm_buf * buf,
int start, int end, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -777,11 +777,11 @@ static void r128_cce_dispatch_indices(drm_device_t * dev,
}
static int r128_cce_dispatch_blit(DRMFILE filp,
- drm_device_t * dev, drm_r128_blit_t * blit)
+ struct drm_device * dev, drm_r128_blit_t * blit)
{
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
u32 *data;
int dword_shift, dwords;
@@ -887,7 +887,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
* have hardware stencil support.
*/
-static int r128_cce_dispatch_write_span(drm_device_t * dev,
+static int r128_cce_dispatch_write_span(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -983,7 +983,7 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
return 0;
}
-static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
+static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1105,7 +1105,7 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
return 0;
}
-static int r128_cce_dispatch_read_span(drm_device_t * dev,
+static int r128_cce_dispatch_read_span(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1148,7 +1148,7 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev,
return 0;
}
-static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
+static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1220,7 +1220,7 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
* Polygon stipple
*/
-static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
+static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
@@ -1269,7 +1269,7 @@ static int r128_cce_clear(DRM_IOCTL_ARGS)
return 0;
}
-static int r128_do_init_pageflip(drm_device_t * dev)
+static int r128_do_init_pageflip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1288,7 +1288,7 @@ static int r128_do_init_pageflip(drm_device_t * dev)
return 0;
}
-static int r128_do_cleanup_pageflip(drm_device_t * dev)
+static int r128_do_cleanup_pageflip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1354,8 +1354,8 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_vertex_t vertex;
@@ -1413,8 +1413,8 @@ static int r128_cce_indices(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_indices_t elts;
int count;
@@ -1483,7 +1483,7 @@ static int r128_cce_indices(DRM_IOCTL_ARGS)
static int r128_cce_blit(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_blit_t blit;
int ret;
@@ -1571,8 +1571,8 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_indirect_t indirect;
#if 0
@@ -1675,7 +1675,7 @@ static int r128_getparam(DRM_IOCTL_ARGS)
return 0;
}
-void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void r128_driver_preclose(struct drm_device * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1685,7 +1685,7 @@ void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
-void r128_driver_lastclose(drm_device_t * dev)
+void r128_driver_lastclose(struct drm_device * dev)
{
r128_do_cleanup_cce(dev);
}
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 032a022ec6a8..4e5aca6ba59a 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -55,7 +55,7 @@ static const int r300_cliprect_cntl[4] = {
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
{
- drm_clip_rect_t box;
+ struct drm_clip_rect box;
int nr;
int i;
RING_LOCALS;
@@ -148,15 +148,16 @@ void r300_init_reg_flags(void)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
- ADD_RANGE(0x2080, 1);
+ ADD_RANGE(R300_VAP_CNTL, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
- ADD_RANGE(0x2140, 1);
+ ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
- ADD_RANGE(0x221C, 1);
- ADD_RANGE(0x2220, 4);
- ADD_RANGE(0x2288, 1);
+ ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
+ ADD_RANGE(R300_VAP_CLIP_X_0, 4);
+ ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
+ ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
@@ -168,13 +169,13 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
- ADD_RANGE(0x4238, 1);
+ ADD_RANGE(R300_RE_UNK4238, 1);
ADD_RANGE(0x4260, 3);
- ADD_RANGE(0x4274, 4);
- ADD_RANGE(0x4288, 5);
- ADD_RANGE(0x42A0, 1);
+ ADD_RANGE(R300_RE_SHADE, 4);
+ ADD_RANGE(R300_RE_POLYGON_MODE, 5);
+ ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
- ADD_RANGE(0x42B4, 1);
+ ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
@@ -190,22 +191,22 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
- ADD_RANGE(0x4BC0, 1);
- ADD_RANGE(0x4BC8, 3);
+ ADD_RANGE(R300_RE_FOG_STATE, 1);
+ ADD_RANGE(R300_FOG_COLOR_R, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
- ADD_RANGE(0x4E10, 3);
+ ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
- ADD_RANGE(0x4F10, 4);
+ ADD_RANGE(R300_RB3D_ZSTENCIL_FORMAT, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);
@@ -224,7 +225,7 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
- ADD_RANGE(0x4f18, 1);
+ ADD_RANGE(R300_RB3D_ZCACHE_CTLSTAT, 1);
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
@@ -692,9 +693,9 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
BEGIN_RING(6);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- OUT_RING(0xa);
- OUT_RING(CP_PACKET0(0x4f18, 0));
- OUT_RING(0x3);
+ OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
+ OUT_RING(CP_PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ OUT_RING(R300_RB3D_ZCACHE_UNKNOWN_03);
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
OUT_RING(0x0);
ADVANCE_RING();
@@ -705,7 +706,7 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
-static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@@ -766,8 +767,8 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(2);
- OUT_RING(CP_PACKET0(RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0));
- OUT_RING(dev_priv->scratch_ages[header.scratch.reg]);
+ OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
+ OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
return 0;
@@ -778,14 +779,14 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
-int r300_do_cp_cmdbuf(drm_device_t *dev,
+int r300_do_cp_cmdbuf(struct drm_device *dev,
DRMFILE filp,
- drm_file_t *filp_priv,
+ struct drm_file *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = NULL;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index ecda760ae8c0..3ae57ecc7afd 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -47,12 +47,12 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
/*
-This file contains registers and constants for the R300. They have been
-found mostly by examining command buffers captured using glxtest, as well
-as by extrapolating some known registers and constants from the R200.
-
-I am fairly certain that they are correct unless stated otherwise in comments.
-*/
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
#define R300_SE_VPORT_XSCALE 0x1D98
#define R300_SE_VPORT_XOFFSET 0x1D9C
@@ -61,49 +61,60 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_SE_VPORT_ZSCALE 0x1DA8
#define R300_SE_VPORT_ZOFFSET 0x1DAC
-/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
-#define R300_VAP_VF_CNTL 0x2084
-# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
-# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
-# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
-# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
-# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
-# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
-# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
-# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
-# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
-# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
-# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
-# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
-
-# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
- /* State based - direct writes to registers trigger vertex generation */
-# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
-# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
-# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
-# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
-
- /* I don't think I saw these three used.. */
-# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
-# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
-# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
-
- /* index size - when not set the indices are assumed to be 16 bit */
-# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
- /* number of vertices */
-# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL 0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL 0x2084
+# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
+# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
+# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
+# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
+# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
+# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
+# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
+# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
+
+# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
+ /* State based - direct writes to registers trigger vertex
+ generation */
+# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
+# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
+
+ /* I don't think I saw these three used.. */
+# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
+# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
+# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
+
+ /* index size - when not set the indices are assumed to be 16 bit */
+# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
+ /* number of vertices */
+# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
/* BEGIN: Wild guesses */
#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
-# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
-# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
-# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
-# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
+# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
+ /* each of the following is 3 bits wide, specifies number
+ of components */
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
@@ -112,7 +123,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
-/* END */
+/* END: Wild guesses */
#define R300_SE_VTE_CNTL 0x20b0
# define R300_VPORT_X_SCALE_ENA 0x00000001
@@ -128,43 +139,54 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_VTX_ST_DENORMALIZED 0x00001000
/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+#define R300_VAP_CNTL_STATUS 0x2140
+# define R300_VC_NO_SWAP (0 << 0)
+# define R300_VC_16BIT_SWAP (1 << 0)
+# define R300_VC_32BIT_SWAP (2 << 0)
+# define R300_VAP_TCL_BYPASS (1 << 8)
+
/* gap */
+
/* Where do we get our vertex data?
-//
-// Vertex data either comes either from immediate mode registers or from
-// vertex arrays.
-// There appears to be no mixed mode (though we can force the pitch of
-// vertex arrays to 0, effectively reusing the same element over and over
-// again).
-//
-// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
-// if these registers influence vertex array processing.
-//
-// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
-//
-// In both cases, vertex attributes are then passed through INPUT_ROUTE.
-
-// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
-// into the vertex processor's input registers.
-// The first word routes the first input, the second word the second, etc.
-// The corresponding input is routed into the register with the given index.
-// The list is ended by a word with INPUT_ROUTE_END set.
-//
-// Always set COMPONENTS_4 in immediate mode. */
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
#define R300_VAP_INPUT_ROUTE_0_0 0x2150
# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
-# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
+# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
-# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
+# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
# define R300_VAP_INPUT_ROUTE_END (1 << 13)
-# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
-# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
-# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
-# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
+# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
#define R300_VAP_INPUT_ROUTE_0_1 0x2154
#define R300_VAP_INPUT_ROUTE_0_2 0x2158
#define R300_VAP_INPUT_ROUTE_0_3 0x215C
@@ -174,10 +196,12 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_VAP_INPUT_ROUTE_0_7 0x216C
/* gap */
+
/* Notes:
-// - always set up to produce at least two attributes:
-// if vertex program uses only position, fglrx will set normal, too
-// - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
+ * - always set up to produce at least two attributes:
+ * if vertex program uses only position, fglrx will set normal, too
+ * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
#define R300_VAP_INPUT_CNTL_0 0x2180
# define R300_INPUT_CNTL_0_COLOR 0x00000001
#define R300_VAP_INPUT_CNTL_1 0x2184
@@ -186,20 +210,22 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_INPUT_CNTL_COLOR 0x00000004
# define R300_INPUT_CNTL_TC0 0x00000400
# define R300_INPUT_CNTL_TC1 0x00000800
-# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
-# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
-# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
-# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
-# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
-# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
+# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
+# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
+# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
+# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
+# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
+# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
/* gap */
+
/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
-// are set to a swizzling bit pattern, other words are 0.
-//
-// In immediate mode, the pattern is always set to xyzw. In vertex array
-// mode, the swizzling pattern is e.g. used to set zw components in texture
-// coordinates with only tweo components. */
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
# define R300_INPUT_ROUTE_SELECT_X 0
# define R300_INPUT_ROUTE_SELECT_Y 1
@@ -208,11 +234,11 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_INPUT_ROUTE_SELECT_ZERO 4
# define R300_INPUT_ROUTE_SELECT_ONE 5
# define R300_INPUT_ROUTE_SELECT_MASK 7
-# define R300_INPUT_ROUTE_X_SHIFT 0
-# define R300_INPUT_ROUTE_Y_SHIFT 3
-# define R300_INPUT_ROUTE_Z_SHIFT 6
-# define R300_INPUT_ROUTE_W_SHIFT 9
-# define R300_INPUT_ROUTE_ENABLE (15 << 12)
+# define R300_INPUT_ROUTE_X_SHIFT 0
+# define R300_INPUT_ROUTE_Y_SHIFT 3
+# define R300_INPUT_ROUTE_Z_SHIFT 6
+# define R300_INPUT_ROUTE_W_SHIFT 9
+# define R300_INPUT_ROUTE_ENABLE (15 << 12)
#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
@@ -221,79 +247,107 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
-/* END */
+/* END: Vertex data assembly */
/* gap */
-/* BEGIN: Upload vertex program and data
-// The programmable vertex shader unit has a memory bank of unknown size
-// that can be written to in 16 byte units by writing the address into
-// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
-//
-// Pointers into the memory bank are always in multiples of 16 bytes.
-//
-// The memory bank is divided into areas with fixed meaning.
-//
-// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
-// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
-// whereas the difference between known addresses suggests size 512.
-//
-// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
-// Native reported limits and the VPI layout suggest size 256, whereas
-// difference between known addresses suggests size 512.
-//
-// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
-// floating point pointsize. The exact purpose of this state is uncertain,
-// as there is also the R300_RE_POINTSIZE register.
-//
-// Multiple vertex programs and parameter sets can be loaded at once,
-// which could explain the size discrepancy. */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
# define R300_PVS_UPLOAD_PROGRAM 0x00000000
# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
+
/* gap */
+
#define R300_VAP_PVS_UPLOAD_DATA 0x2208
-/* END */
+
+/* END: Upload vertex program and data */
/* gap */
+
/* I do not know the purpose of this register. However, I do know that
-// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
-// for normal rendering. */
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
#define R300_VAP_UNKNOWN_221C 0x221C
# define R300_221C_NORMAL 0x00000000
# define R300_221C_CLEAR 0x0001C000
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0 0x2220
+#define R300_VAP_CLIP_X_1 0x2224
+#define R300_VAP_CLIP_Y_0 0x2228
+#define R300_VAP_CLIP_Y_1 0x2230
+
/* gap */
+
/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
-// rendering commands and overwriting vertex program parameters.
-// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
-// avoids bugs caused by still running shaders reading bad data from memory. */
-#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
/* Absolutely no clue what this register is about. */
#define R300_VAP_UNKNOWN_2288 0x2288
-# define R300_2288_R300 0x00750000 /* -- nh */
-# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
+# define R300_2288_R300 0x00750000 /* -- nh */
+# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
/* gap */
+
/* Addresses are relative to the vertex program instruction area of the
-// memory bank. PROGRAM_END points to the last instruction of the active
-// program
-//
-// The meaning of the two UNKNOWN fields is obviously not known. However,
-// experiments so far have shown that both *must* point to an instruction
-// inside the vertex program, otherwise the GPU locks up.
-// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
-// CNTL_1_UNKNOWN points to instruction where last write to position takes place.
-// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
-// For some reason this "section" is sometimes accepted other instruction that have
-// no relationship with position calculations.
-*/
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
#define R300_VAP_PVS_CNTL_1 0x22D0
# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
# define R300_PVS_CNTL_1_POS_END_SHIFT 10
# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
-/* Addresses are relative to the vertex program parameters area. */
+/* Addresses are relative the the vertex program parameters area. */
#define R300_VAP_PVS_CNTL_2 0x22D4
# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
@@ -302,23 +356,26 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
-// immediate vertices */
+ * immediate vertices
+ */
#define R300_VAP_VTX_COLOR_R 0x2464
#define R300_VAP_VTX_COLOR_G 0x2468
#define R300_VAP_VTX_COLOR_B 0x246C
-#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
#define R300_VAP_VTX_POS_0_Y_1 0x2494
-#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
-#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
#define R300_VAP_VTX_POS_0_Y_2 0x24A4
#define R300_VAP_VTX_POS_0_Z_2 0x24A8
-#define R300_VAP_VTX_END_OF_PKT 0x24AC /* write 0 to indicate end of packet? */
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT 0x24AC
/* gap */
/* These are values from r300_reg/r300_reg.h - they are known to be correct
- and are here so we can use one register file instead of several
- - Vladimir */
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
@@ -341,14 +398,16 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
/* UNK30 seems to enables point to quad transformation on textures
- (or something closely related to that).
- This bit is rather fatal at the time being due to lackings at pixel shader side */
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
#define R300_GB_ENABLE 0x4008
# define R300_GB_POINT_STUFF_ENABLE (1<<0)
# define R300_GB_LINE_STUFF_ENABLE (1<<1)
# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
-# define R300_GB_UNK30 (1<<30)
+# define R300_GB_UNK31 (1<<31)
/* each of the following is 2 bits wide */
#define R300_GB_TEX_REPLICATE 0
#define R300_GB_TEX_ST 1
@@ -383,11 +442,13 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
# define R300_GB_MSPOS1__MSBD1 24
+
#define R300_GB_TILE_CONFIG 0x4018
# define R300_GB_TILE_ENABLE (1<<0)
# define R300_GB_TILE_PIPE_COUNT_RV300 0
# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
+# define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1)
# define R300_GB_TILE_SIZE_8 0
# define R300_GB_TILE_SIZE_16 (1<<4)
# define R300_GB_TILE_SIZE_32 (2<<4)
@@ -442,17 +503,18 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_GB_W_SELECT_1 (1<<4)
#define R300_GB_AA_CONFIG 0x4020
+# define R300_AA_DISABLE 0x00
# define R300_AA_ENABLE 0x01
# define R300_AA_SUBSAMPLES_2 0
# define R300_AA_SUBSAMPLES_3 (1<<1)
# define R300_AA_SUBSAMPLES_4 (2<<1)
# define R300_AA_SUBSAMPLES_6 (3<<1)
-/* END */
-
/* gap */
+
/* Zero to flush caches. */
#define R300_TX_CNTL 0x4100
+#define R300_TX_FLUSH 0x0
/* The upper enable bits are guessed, based on fglrx reported limits. */
#define R300_TX_ENABLE 0x4104
@@ -474,24 +536,25 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_ENABLE_15 (1 << 15)
/* The pointsize is given in multiples of 6. The pointsize can be
-// enormous: Clear() renders a single point that fills the entire
-// framebuffer. */
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
#define R300_RE_POINTSIZE 0x421C
# define R300_POINTSIZE_Y_SHIFT 0
-# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
+# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
# define R300_POINTSIZE_X_SHIFT 16
-# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
+# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
/* The line width is given in multiples of 6.
- In default mode lines are classified as vertical lines.
- HO: horizontal
- VE: vertical or horizontal
- HO & VE: no classification
-*/
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
#define R300_RE_LINE_CNT 0x4234
# define R300_LINESIZE_SHIFT 0
-# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
+# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
# define R300_LINE_CNT_HO (1 << 16)
# define R300_LINE_CNT_VE (1 << 17)
@@ -499,6 +562,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
/* Some sort of scale or clamp value for texcoordless textures. */
#define R300_RE_UNK4238 0x4238
+/* Something shade related */
+#define R300_RE_SHADE 0x4274
+
#define R300_RE_SHADE_MODEL 0x4278
# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
# define R300_RE_SHADE_MODEL_FLAT 0x39595
@@ -513,24 +579,31 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PM_BACK_LINE (1 << 7)
# define R300_PM_BACK_FILL (1 << 8)
+/* Fog parameters */
+#define R300_RE_FOG_SCALE 0x4294
+#define R300_RE_FOG_START 0x4298
+
/* Not sure why there are duplicate of factor and constant values.
- My best guess so far is that there are seperate zbiases for test and write.
- Ordering might be wrong.
- Some of the tests indicate that fgl has a fallback implementation of zbias
- via pixel shaders. */
+ * My best guess so far is that there are seperate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */
#define R300_RE_ZBIAS_T_FACTOR 0x42A4
#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
#define R300_RE_ZBIAS_W_FACTOR 0x42AC
#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
/* This register needs to be set to (1<<1) for RV350 to correctly
- perform depth test (see --vb-triangles in r300_demo)
- Don't know about other chips. - Vladimir
- This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
- My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
- One to enable depth test and one for depth write.
- Yet this doesnt explain why depth writes work ...
- */
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ * One to enable depth test and one for depth write.
+ * Yet this doesnt explain why depth writes work ...
+ */
#define R300_RE_OCCLUSION_CNTL 0x42B4
# define R300_OCCLUSION_ON (1<<1)
@@ -540,30 +613,38 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FRONT_FACE_CCW (0 << 2)
# define R300_FRONT_FACE_CW (1 << 2)
-/* BEGIN: Rasterization / Interpolators - many guesses
-// 0_UNKNOWN_18 has always been set except for clear operations.
-// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
-// on the vertex program, *not* the fragment program) */
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
#define R300_RS_CNTL_0 0x4300
# define R300_RS_CNTL_TC_CNT_SHIFT 2
# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
-# define R300_RS_CNTL_CI_CNT_SHIFT 7 /* number of color interpolators used */
+ /* number of color interpolators used */
+# define R300_RS_CNTL_CI_CNT_SHIFT 7
# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
-/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
+ /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+ register. */
#define R300_RS_CNTL_1 0x4304
/* gap */
+
/* Only used for texture coordinates.
-// Use the source field to route texture coordinate input from the vertex program
-// to the desired interpolator. Note that the source field is relative to the
-// outputs the vertex program *actually* writes. If a vertex program only writes
-// texcoord[1], this will be source index 0.
-// Set INTERP_USED on all interpolators that produce data used by the
-// fragment program. INTERP_USED looks like a swizzling mask, but
-// I haven't seen it used that way.
-//
-// Note: The _UNKNOWN constants are always set in their respective register.
-// I don't know if this is necessary. */
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
#define R300_RS_INTERP_0 0x4310
#define R300_RS_INTERP_1 0x4314
# define R300_RS_INTERP_1_UNKNOWN 0x40
@@ -580,54 +661,63 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_RS_INTERP_USED 0x00D10000
/* These DWORDs control how vertex data is routed into fragment program
-// registers, after interpolators. */
+ * registers, after interpolators.
+ */
#define R300_RS_ROUTE_0 0x4330
#define R300_RS_ROUTE_1 0x4334
#define R300_RS_ROUTE_2 0x4338
-#define R300_RS_ROUTE_3 0x433C /* GUESS */
-#define R300_RS_ROUTE_4 0x4340 /* GUESS */
-#define R300_RS_ROUTE_5 0x4344 /* GUESS */
-#define R300_RS_ROUTE_6 0x4348 /* GUESS */
-#define R300_RS_ROUTE_7 0x434C /* GUESS */
+#define R300_RS_ROUTE_3 0x433C /* GUESS */
+#define R300_RS_ROUTE_4 0x4340 /* GUESS */
+#define R300_RS_ROUTE_5 0x4344 /* GUESS */
+#define R300_RS_ROUTE_6 0x4348 /* GUESS */
+#define R300_RS_ROUTE_7 0x434C /* GUESS */
# define R300_RS_ROUTE_SOURCE_INTERP_0 0
# define R300_RS_ROUTE_SOURCE_INTERP_1 1
# define R300_RS_ROUTE_SOURCE_INTERP_2 2
# define R300_RS_ROUTE_SOURCE_INTERP_3 3
# define R300_RS_ROUTE_SOURCE_INTERP_4 4
-# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
-# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
-# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
-# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
+# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
+# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
+# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
+# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
# define R300_RS_ROUTE_DEST_SHIFT 6
-# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
+# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
/* Special handling for color: When the fragment program uses color,
-// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
-// color register index. */
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
# define R300_RS_ROUTE_0_COLOR (1 << 14)
# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
-# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
+# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
/* As above, but for secondary color */
# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
-/* END */
-
-/* BEGIN: Scissors and cliprects
-// There are four clipping rectangles. Their corner coordinates are inclusive.
-// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
-// on whether the pixel is inside cliprects 0-3, respectively. For example,
-// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
-// the number 3 (binary 0011).
-// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
-// the pixel is rasterized.
-//
-// In addition to this, there is a scissors rectangle. Only pixels inside the
-// scissors rectangle are drawn. (coordinates are inclusive)
-//
-// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
-// for the purpose of clipping and scissors. */
+/* END: Rasterization / Interpolators - many guesses */
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
#define R300_RE_CLIPRECT_TL_0 0x43B0
#define R300_RE_CLIPRECT_BR_0 0x43B4
#define R300_RE_CLIPRECT_TL_1 0x43B8
@@ -661,6 +751,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_CLIP_3210 (1 << 15)
/* gap */
+
#define R300_RE_SCISSORS_TL 0x43E0
#define R300_RE_SCISSORS_BR 0x43E4
# define R300_SCISSORS_OFFSET 1440
@@ -668,12 +759,15 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_SCISSORS_X_MASK (0x1FFF << 0)
# define R300_SCISSORS_Y_SHIFT 13
# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
-/* END */
+/* END: Scissors and cliprects */
-/* BEGIN: Texture specification
-// The texture specification dwords are grouped by meaning and not by texture unit.
-// This means that e.g. the offset for texture image unit N is found in register
-// TX_OFFSET_0 + (4*N) */
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
#define R300_TX_FILTER_0 0x4400
# define R300_TX_REPEAT 0
# define R300_TX_MIRRORED 1
@@ -697,13 +791,14 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
/* NOTE: NEAREST doesnt seem to exist.
- Im not seting MAG_FILTER_MASK and (3 << 11) on for all
- anisotropy modes because that would void selected mag filter */
-# define R300_TX_MIN_FILTER_ANISO_NEAREST ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-# define R300_TX_MIN_FILTER_ANISO_LINEAR ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
-# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+# define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13)
+# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13)
+# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13)
+# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
@@ -734,10 +829,10 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_HEIGHTMASK_SHIFT 11
# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
# define R300_TX_UNK23 (1 << 23)
-# define R300_TX_SIZE_SHIFT 26 /* largest of width, height */
-# define R300_TX_SIZE_MASK (15 << 26)
-# define R300_TX_SIZE_PROJECTED (1<<30)
-# define R300_TX_SIZE_TXPITCH_EN (1<<31)
+# define R300_TX_MAX_MIP_LEVEL_SHIFT 26
+# define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26)
+# define R300_TX_SIZE_PROJECTED (1<<30)
+# define R300_TX_SIZE_TXPITCH_EN (1<<31)
#define R300_TX_FORMAT_0 0x44C0
/* The interpretation of the format word by Wladimir van der Laan */
/* The X, Y, Z and W refer to the layout of the components.
@@ -761,11 +856,11 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_DXT1 0xF
# define R300_TX_FORMAT_DXT3 0x10
# define R300_TX_FORMAT_DXT5 0x11
-# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
-# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
-# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
-# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
- /* 0x16 - some 16 bit green format.. ?? */
+# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
+# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
+# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
+# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
+ /* 0x16 - some 16 bit green format.. ?? */
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
@@ -793,23 +888,26 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_W 3
# define R300_TX_FORMAT_ZERO 4
# define R300_TX_FORMAT_ONE 5
-# define R300_TX_FORMAT_CUT_Z 6 /* 2.0*Z, everything above 1.0 is set to 0.0 */
-# define R300_TX_FORMAT_CUT_W 7 /* 2.0*W, everything above 1.0 is set to 0.0 */
+ /* 2.0*Z, everything above 1.0 is set to 0.0 */
+# define R300_TX_FORMAT_CUT_Z 6
+ /* 2.0*W, everything above 1.0 is set to 0.0 */
+# define R300_TX_FORMAT_CUT_W 7
# define R300_TX_FORMAT_B_SHIFT 18
# define R300_TX_FORMAT_G_SHIFT 15
# define R300_TX_FORMAT_R_SHIFT 12
# define R300_TX_FORMAT_A_SHIFT 9
/* Convenience macro to take care of layout and swizzling */
-# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) (\
- ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
- | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
- | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
- | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
- | (R300_TX_FORMAT_##FMT) \
- )
- /* These can be ORed with result of R300_EASY_TX_FORMAT() */
- /* We don't really know what they do. Take values from a constant color ? */
+# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \
+ ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
+ | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
+ | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
+ | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
+ | (R300_TX_FORMAT_##FMT) \
+ )
+ /* These can be ORed with result of R300_EASY_TX_FORMAT()
+ We don't really know what they do. Take values from a
+ constant color ? */
# define R300_TX_FORMAT_CONST_X (1<<5)
# define R300_TX_FORMAT_CONST_Y (2<<5)
# define R300_TX_FORMAT_CONST_Z (4<<5)
@@ -819,7 +917,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_TX_PITCH_0 0x4500 /* obvious missing in gap */
#define R300_TX_OFFSET_0 0x4540
-/* BEGIN: Guess from R200 */
+ /* BEGIN: Guess from R200 */
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
@@ -828,53 +926,61 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TXO_MICRO_TILE (1 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
-/* END */
-#define R300_TX_CHROMA_KEY_0 0x4580 /* 32 bit chroma key */
-#define R300_TX_BORDER_COLOR_0 0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
-
-/* END */
-
-/* BEGIN: Fragment program instruction set
-// Fragment programs are written directly into register space.
-// There are separate instruction streams for texture instructions and ALU
-// instructions.
-// In order to synchronize these streams, the program is divided into up
-// to 4 nodes. Each node begins with a number of TEX operations, followed
-// by a number of ALU operations.
-// The first node can have zero TEX ops, all subsequent nodes must have at least
-// one TEX ops.
-// All nodes must have at least one ALU op.
-//
-// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
-// 1 node, a value of 3 means 4 nodes.
-// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
-// offsets into the respective instruction streams, while *_END points to the
-// last instruction relative to this offset. */
+ /* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0 0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0 0x45C0
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
#define R300_PFS_CNTL_0 0x4600
# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
#define R300_PFS_CNTL_1 0x4604
/* There is an unshifted value here which has so far always been equal to the
-// index of the highest used temporary register. */
+ * index of the highest used temporary register.
+ */
#define R300_PFS_CNTL_2 0x4608
# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
# define R300_PFS_CNTL_ALU_END_SHIFT 6
-# define R300_PFS_CNTL_ALU_END_MASK (63 << 0)
+# define R300_PFS_CNTL_ALU_END_MASK (63 << 6)
# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
-# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
+# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
# define R300_PFS_CNTL_TEX_END_SHIFT 18
-# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
+# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
/* gap */
+
/* Nodes are stored backwards. The last active node is always stored in
-// PFS_NODE_3.
-// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
-// first node is stored in NODE_2, the second node is stored in NODE_3.
-//
-// Offsets are relative to the master offset from PFS_CNTL_2.
-// LAST_NODE is set for the last node, and only for the last node. */
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
#define R300_PFS_NODE_0 0x4610
#define R300_PFS_NODE_1 0x4614
#define R300_PFS_NODE_2 0x4618
@@ -887,91 +993,98 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
# define R300_PFS_NODE_TEX_END_SHIFT 17
# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
-/*# define R300_PFS_NODE_LAST_NODE (1 << 22) */
# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
/* TEX
-// As far as I can tell, texture instructions cannot write into output
-// registers directly. A subsequent ALU instruction is always necessary,
-// even if it's just MAD o0, r0, 1, 0 */
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
#define R300_PFS_TEXI_0 0x4620
-# define R300_FPITX_SRC_SHIFT 0
-# define R300_FPITX_SRC_MASK (31 << 0)
-# define R300_FPITX_SRC_CONST (1 << 5) /* GUESS */
-# define R300_FPITX_DST_SHIFT 6
-# define R300_FPITX_DST_MASK (31 << 6)
-# define R300_FPITX_IMAGE_SHIFT 11
-# define R300_FPITX_IMAGE_MASK (15 << 11) /* GUESS based on layout and native limits */
+# define R300_FPITX_SRC_SHIFT 0
+# define R300_FPITX_SRC_MASK (31 << 0)
+ /* GUESS */
+# define R300_FPITX_SRC_CONST (1 << 5)
+# define R300_FPITX_DST_SHIFT 6
+# define R300_FPITX_DST_MASK (31 << 6)
+# define R300_FPITX_IMAGE_SHIFT 11
+ /* GUESS based on layout and native limits */
+# define R300_FPITX_IMAGE_MASK (15 << 11)
/* Unsure if these are opcodes, or some kind of bitfield, but this is how
* they were set when I checked
*/
-# define R300_FPITX_OPCODE_SHIFT 15
-# define R300_FPITX_OP_TEX 1
-# define R300_FPITX_OP_KIL 2
-# define R300_FPITX_OP_TXP 3
-# define R300_FPITX_OP_TXB 4
+# define R300_FPITX_OPCODE_SHIFT 15
+# define R300_FPITX_OP_TEX 1
+# define R300_FPITX_OP_KIL 2
+# define R300_FPITX_OP_TXP 3
+# define R300_FPITX_OP_TXB 4
+# define R300_FPITX_OPCODE_MASK (7 << 15)
/* ALU
-// The ALU instructions register blocks are enumerated according to the order
-// in which fglrx. I assume there is space for 64 instructions, since
-// each block has space for a maximum of 64 DWORDs, and this matches reported
-// native limits.
-//
-// The basic functional block seems to be one MAD for each color and alpha,
-// and an adder that adds all components after the MUL.
-// - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
-// - DP4: Use OUTC_DP4, OUTA_DP4
-// - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
-// - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
-// - CMP: If ARG2 < 0, return ARG1, else return ARG0
-// - FLR: use FRC+MAD
-// - XPD: use MAD+MAD
-// - SGE, SLT: use MAD+CMP
-// - RSQ: use ABS modifier for argument
-// - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
-// into color register
-// - apparently, there's no quick DST operation
-// - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
-// - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
-// - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
-//
-// Operand selection
-// First stage selects three sources from the available registers and
-// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
-// fglrx sorts the three source fields: Registers before constants,
-// lower indices before higher indices; I do not know whether this is necessary.
-// fglrx fills unused sources with "read constant 0"
-// According to specs, you cannot select more than two different constants.
-//
-// Second stage selects the operands from the sources. This is defined in
-// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
-// zero and one.
-// Swizzling and negation happens in this stage, as well.
-//
-// Important: Color and alpha seem to be mostly separate, i.e. their sources
-// selection appears to be fully independent (the register storage is probably
-// physically split into a color and an alpha section).
-// However (because of the apparent physical split), there is some interaction
-// WRT swizzling. If, for example, you want to load an R component into an
-// Alpha operand, this R component is taken from a *color* source, not from
-// an alpha source. The corresponding register doesn't even have to appear in
-// the alpha sources list. (I hope this alll makes sense to you)
-//
-// Destination selection
-// The destination register index is in FPI1 (color) and FPI3 (alpha) together
-// with enable bits.
-// There are separate enable bits for writing into temporary registers
-// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
-// You can write to both at once, or not write at all (the same index
-// must be used for both).
-//
-// Note: There is a special form for LRP
-// - Argument order is the same as in ARB_fragment_program.
-// - Operation is MAD
-// - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
-// - Set FPI0/FPI2_SPECIAL_LRP
-// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ * - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ * - DP4: Use OUTC_DP4, OUTA_DP4
+ * - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ * - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ * - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ * - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ * - FLR: use FRC+MAD
+ * - XPD: use MAD+MAD
+ * - SGE, SLT: use MAD+CMP
+ * - RSQ: use ABS modifier for argument
+ * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ * (e.g. RCP) into color register
+ * - apparently, there's no quick DST operation
+ * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ * - Argument order is the same as in ARB_fragment_program.
+ * - Operation is MAD
+ * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ * - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
#define R300_PFS_INSTR1_0 0x46C0
# define R300_FPI1_SRC0C_SHIFT 0
# define R300_FPI1_SRC0C_MASK (31 << 0)
@@ -982,6 +1095,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI1_SRC2C_SHIFT 12
# define R300_FPI1_SRC2C_MASK (31 << 12)
# define R300_FPI1_SRC2C_CONST (1 << 17)
+# define R300_FPI1_SRC_MASK 0x0003ffff
# define R300_FPI1_DSTC_SHIFT 18
# define R300_FPI1_DSTC_MASK (31 << 18)
# define R300_FPI1_DSTC_REG_MASK_SHIFT 23
@@ -1003,6 +1117,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI3_SRC2A_SHIFT 12
# define R300_FPI3_SRC2A_MASK (31 << 12)
# define R300_FPI3_SRC2A_CONST (1 << 17)
+# define R300_FPI3_SRC_MASK 0x0003ffff
# define R300_FPI3_DSTA_SHIFT 18
# define R300_FPI3_DSTA_MASK (31 << 18)
# define R300_FPI3_DSTA_REG (1 << 23)
@@ -1028,7 +1143,8 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI0_ARGC_SRC1C_LRP 15
# define R300_FPI0_ARGC_ZERO 20
# define R300_FPI0_ARGC_ONE 21
-# define R300_FPI0_ARGC_HALF 22 /* GUESS */
+ /* GUESS */
+# define R300_FPI0_ARGC_HALF 22
# define R300_FPI0_ARGC_SRC0C_YZX 23
# define R300_FPI0_ARGC_SRC1C_YZX 24
# define R300_FPI0_ARGC_SRC2C_YZX 25
@@ -1057,6 +1173,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI0_OUTC_DP4 (2 << 23)
# define R300_FPI0_OUTC_MIN (4 << 23)
# define R300_FPI0_OUTC_MAX (5 << 23)
+# define R300_FPI0_OUTC_CMPH (7 << 23)
# define R300_FPI0_OUTC_CMP (8 << 23)
# define R300_FPI0_OUTC_FRC (9 << 23)
# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
@@ -1079,20 +1196,23 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI2_ARGA_SRC1A_LRP 15
# define R300_FPI2_ARGA_ZERO 16
# define R300_FPI2_ARGA_ONE 17
-# define R300_FPI2_ARGA_HALF 18 /* GUESS */
-
+ /* GUESS */
+# define R300_FPI2_ARGA_HALF 18
# define R300_FPI2_ARG0A_SHIFT 0
# define R300_FPI2_ARG0A_MASK (31 << 0)
# define R300_FPI2_ARG0A_NEG (1 << 5)
-# define R300_FPI2_ARG0A_ABS (1 << 6) /* GUESS */
+ /* GUESS */
+# define R300_FPI2_ARG0A_ABS (1 << 6)
# define R300_FPI2_ARG1A_SHIFT 7
# define R300_FPI2_ARG1A_MASK (31 << 7)
# define R300_FPI2_ARG1A_NEG (1 << 12)
-# define R300_FPI2_ARG1A_ABS (1 << 13) /* GUESS */
+ /* GUESS */
+# define R300_FPI2_ARG1A_ABS (1 << 13)
# define R300_FPI2_ARG2A_SHIFT 14
# define R300_FPI2_ARG2A_MASK (31 << 14)
# define R300_FPI2_ARG2A_NEG (1 << 19)
-# define R300_FPI2_ARG2A_ABS (1 << 20) /* GUESS */
+ /* GUESS */
+# define R300_FPI2_ARG2A_ABS (1 << 20)
# define R300_FPI2_SPECIAL_LRP (1 << 21)
# define R300_FPI2_OUTA_MAD (0 << 23)
# define R300_FPI2_OUTA_DP4 (1 << 23)
@@ -1106,9 +1226,19 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_FPI2_OUTA_RSQ (11 << 23)
# define R300_FPI2_OUTA_SAT (1 << 30)
# define R300_FPI2_UNKNOWN_31 (1 << 31)
-/* END */
+/* END: Fragment program instruction set */
+
+/* Fog state and color */
+#define R300_RE_FOG_STATE 0x4BC0
+# define R300_FOG_ENABLE (1 << 0)
+# define R300_FOG_MODE_LINEAR (0 << 1)
+# define R300_FOG_MODE_EXP (1 << 1)
+# define R300_FOG_MODE_EXP2 (2 << 1)
+# define R300_FOG_MODE_MASK (3 << 1)
+#define R300_FOG_COLOR_R 0x4BC8
+#define R300_FOG_COLOR_G 0x4BCC
+#define R300_FOG_COLOR_B 0x4BD0
-/* gap */
#define R300_PP_ALPHA_TEST 0x4BD4
# define R300_REF_ALPHA_MASK 0x000000ff
# define R300_ALPHA_TEST_FAIL (0 << 8)
@@ -1123,6 +1253,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_ALPHA_TEST_ENABLE (1 << 11)
/* gap */
+
/* Fragment program parameters in 7.16 floating point */
#define R300_PFS_PARAM_0_X 0x4C00
#define R300_PFS_PARAM_0_Y 0x4C04
@@ -1135,45 +1266,48 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_PFS_PARAM_31_W 0x4DFC
/* Notes:
-// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
-// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
-// function (both registers are always set up completely in any case)
-// - Most blend flags are simply copied from R200 and not tested yet */
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ * the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ * are set to the same
+ * function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
#define R300_RB3D_CBLEND 0x4E04
#define R300_RB3D_ABLEND 0x4E08
- /* the following only appear in CBLEND */
+/* the following only appear in CBLEND */
# define R300_BLEND_ENABLE (1 << 0)
# define R300_BLEND_UNKNOWN (3 << 1)
# define R300_BLEND_NO_SEPARATE (1 << 3)
- /* the following are shared between CBLEND and ABLEND */
+/* the following are shared between CBLEND and ABLEND */
# define R300_FCN_MASK (3 << 12)
# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
-# define R300_SRC_BLEND_GL_ZERO (32 << 16)
-# define R300_SRC_BLEND_GL_ONE (33 << 16)
-# define R300_SRC_BLEND_GL_SRC_COLOR (34 << 16)
-# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
-# define R300_SRC_BLEND_GL_DST_COLOR (36 << 16)
-# define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
-# define R300_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
-# define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
-# define R300_SRC_BLEND_GL_DST_ALPHA (40 << 16)
-# define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
-# define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
-# define R300_SRC_BLEND_MASK (63 << 16)
-# define R300_DST_BLEND_GL_ZERO (32 << 24)
-# define R300_DST_BLEND_GL_ONE (33 << 24)
-# define R300_DST_BLEND_GL_SRC_COLOR (34 << 24)
-# define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
-# define R300_DST_BLEND_GL_DST_COLOR (36 << 24)
-# define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
-# define R300_DST_BLEND_GL_SRC_ALPHA (38 << 24)
-# define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
-# define R300_DST_BLEND_GL_DST_ALPHA (40 << 24)
-# define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
-# define R300_DST_BLEND_MASK (63 << 24)
+# define R300_COMB_FCN_MIN (4 << 12)
+# define R300_COMB_FCN_MAX (5 << 12)
+# define R300_COMB_FCN_RSUB_CLAMP (6 << 12)
+# define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12)
+# define R300_BLEND_GL_ZERO (32)
+# define R300_BLEND_GL_ONE (33)
+# define R300_BLEND_GL_SRC_COLOR (34)
+# define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35)
+# define R300_BLEND_GL_DST_COLOR (36)
+# define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37)
+# define R300_BLEND_GL_SRC_ALPHA (38)
+# define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39)
+# define R300_BLEND_GL_DST_ALPHA (40)
+# define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41)
+# define R300_BLEND_GL_SRC_ALPHA_SATURATE (42)
+# define R300_BLEND_GL_CONST_COLOR (43)
+# define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44)
+# define R300_BLEND_GL_CONST_ALPHA (45)
+# define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46)
+# define R300_BLEND_MASK (63)
+# define R300_SRC_BLEND_SHIFT (16)
+# define R300_DST_BLEND_SHIFT (24)
+#define R300_RB3D_BLEND_COLOR 0x4E10
#define R300_RB3D_COLORMASK 0x4E0C
# define R300_COLORMASK0_B (1<<0)
# define R300_COLORMASK0_G (1<<1)
@@ -1181,41 +1315,49 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_COLORMASK0_A (1<<3)
/* gap */
+
#define R300_RB3D_COLOROFFSET0 0x4E28
-# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
-#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
-#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
-#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
+# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
+
/* gap */
+
/* Bit 16: Larger tiles
-// Bit 17: 4x2 tiles
-// Bit 18: Extremely weird tile like, but some pixels duplicated? */
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
#define R300_RB3D_COLORPITCH0 0x4E38
-# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
-# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
-# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
-# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
-# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
-# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
+# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
+# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
+# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
+# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
+# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
+# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
# define R300_COLOR_FORMAT_RGB565 (2 << 22)
# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
-#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
-#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
-#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
+#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
/* gap */
+
/* Guess by Vladimir.
-// Set to 0A before 3D operations, set to 02 afterwards. */
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C
-# define R300_RB3D_DSTCACHE_02 0x00000002
-# define R300_RB3D_DSTCACHE_0A 0x0000000A
+# define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002
+# define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A
/* gap */
-/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
-/* Bit (1<<8) is the "test" bit. so plain write is 6 - vd */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6 - vd
+ */
#define R300_RB3D_ZSTENCIL_CNTL_0 0x4F00
-# define R300_RB3D_Z_DISABLED_1 0x00000010 /* GUESS */
-# define R300_RB3D_Z_DISABLED_2 0x00000014 /* GUESS */
+# define R300_RB3D_Z_DISABLED_1 0x00000010
+# define R300_RB3D_Z_DISABLED_2 0x00000014
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
# define R300_RB3D_Z_WRITE_ONLY 0x00000006
@@ -1226,7 +1368,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_RB3D_STENCIL_ENABLE 0x00000001
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
- /* functions */
+ /* functions */
# define R300_ZS_NEVER 0
# define R300_ZS_LESS 1
# define R300_ZS_LEQUAL 2
@@ -1236,7 +1378,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_ZS_NOTEQUAL 6
# define R300_ZS_ALWAYS 7
# define R300_ZS_MASK 7
- /* operations */
+ /* operations */
# define R300_ZS_KEEP 0
# define R300_ZS_ZERO 1
# define R300_ZS_REPLACE 2
@@ -1245,9 +1387,8 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_ZS_INVERT 5
# define R300_ZS_INCR_WRAP 6
# define R300_ZS_DECR_WRAP 7
-
- /* front and back refer to operations done for front
- and back faces, i.e. separate stencil function support */
+ /* front and back refer to operations done for front
+ and back faces, i.e. separate stencil function support */
# define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT 0
# define R300_RB3D_ZS1_FRONT_FUNC_SHIFT 3
# define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT 6
@@ -1269,45 +1410,64 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_RB3D_ZSTENCIL_FORMAT 0x4F10
# define R300_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
# define R300_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
+ /* 16 bit format or some aditional bit ? */
+# define R300_DEPTH_FORMAT_UNK32 (32 << 0)
+
+#define R300_RB3D_EARLY_Z 0x4F14
+# define R300_EARLY_Z_DISABLE (0 << 0)
+# define R300_EARLY_Z_ENABLE (1 << 0)
+
+/* gap */
+
+#define R300_RB3D_ZCACHE_CTLSTAT 0x4F18 /* GUESS */
+# define R300_RB3D_ZCACHE_UNKNOWN_01 0x1
+# define R300_RB3D_ZCACHE_UNKNOWN_03 0x3
/* gap */
+
#define R300_RB3D_DEPTHOFFSET 0x4F20
#define R300_RB3D_DEPTHPITCH 0x4F24
-# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
-# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
-# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
-# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
-# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
-# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
-
-/* BEGIN: Vertex program instruction set
-// Every instruction is four dwords long:
-// DWORD 0: output and opcode
-// DWORD 1: first argument
-// DWORD 2: second argument
-// DWORD 3: third argument
-//
-// Notes:
-// - ABS r, a is implemented as MAX r, a, -a
-// - MOV is implemented as ADD to zero
-// - XPD is implemented as MUL + MAD
-// - FLR is implemented as FRC + ADD
-// - apparently, fglrx tries to schedule instructions so that there is at least
-// one instruction between the write to a temporary and the first read
-// from said temporary; however, violations of this scheduling are allowed
-// - register indices seem to be unrelated with OpenGL aliasing to conventional state
-// - only one attribute and one parameter can be loaded at a time; however, the
-// same attribute/parameter can be used for more than one argument
-// - the second software argument for POW is the third hardware argument (no idea why)
-// - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
-//
-// There is some magic surrounding LIT:
-// The single argument is replicated across all three inputs, but swizzled:
-// First argument: xyzy
-// Second argument: xyzx
-// Third argument: xyzw
-// Whenever the result is used later in the fragment program, fglrx forces x and w
-// to be 1.0 in the input selection; I don't know whether this is strictly necessary */
+# define R300_DEPTHPITCH_MASK 0x00001FF8 /* GUESS */
+# define R300_DEPTH_TILE_ENABLE (1 << 16) /* GUESS */
+# define R300_DEPTH_MICROTILE_ENABLE (1 << 17) /* GUESS */
+# define R300_DEPTH_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
+# define R300_DEPTH_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
+# define R300_DEPTH_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
+
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ * DWORD 0: output and opcode
+ * DWORD 1: first argument
+ * DWORD 2: second argument
+ * DWORD 3: third argument
+ *
+ * Notes:
+ * - ABS r, a is implemented as MAX r, a, -a
+ * - MOV is implemented as ADD to zero
+ * - XPD is implemented as MUL + MAD
+ * - FLR is implemented as FRC + ADD
+ * - apparently, fglrx tries to schedule instructions so that there is at
+ * least one instruction between the write to a temporary and the first
+ * read from said temporary; however, violations of this scheduling are
+ * allowed
+ * - register indices seem to be unrelated with OpenGL aliasing to
+ * conventional state
+ * - only one attribute and one parameter can be loaded at a time; however,
+ * the same attribute/parameter can be used for more than one argument
+ * - the second software argument for POW is the third hardware argument
+ * (no idea why)
+ * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ * The single argument is replicated across all three inputs, but swizzled:
+ * First argument: xyzy
+ * Second argument: xyzx
+ * Third argument: xyzw
+ * Whenever the result is used later in the fragment program, fglrx forces
+ * x and w to be 1.0 in the input selection; I don't know whether this is
+ * strictly necessary
+ */
#define R300_VPI_OUT_OP_DOT (1 << 0)
#define R300_VPI_OUT_OP_MUL (2 << 0)
#define R300_VPI_OUT_OP_ADD (3 << 0)
@@ -1318,26 +1478,33 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_VPI_OUT_OP_MIN (8 << 0)
#define R300_VPI_OUT_OP_SGE (9 << 0)
#define R300_VPI_OUT_OP_SLT (10 << 0)
-#define R300_VPI_OUT_OP_UNK12 (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12 (12 << 0)
+#define R300_VPI_OUT_OP_ARL (13 << 0)
#define R300_VPI_OUT_OP_EXP (65 << 0)
#define R300_VPI_OUT_OP_LOG (66 << 0)
-#define R300_VPI_OUT_OP_UNK67 (67 << 0) /* Used in fog computations, scalar(scalar) */
+ /* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67 (67 << 0)
#define R300_VPI_OUT_OP_LIT (68 << 0)
#define R300_VPI_OUT_OP_POW (69 << 0)
#define R300_VPI_OUT_OP_RCP (70 << 0)
#define R300_VPI_OUT_OP_RSQ (72 << 0)
-#define R300_VPI_OUT_OP_UNK73 (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+ /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73 (73 << 0)
#define R300_VPI_OUT_OP_EX2 (75 << 0)
#define R300_VPI_OUT_OP_LG2 (76 << 0)
#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
-#define R300_VPI_OUT_OP_UNK129 (129 << 0) /* all temps, vector(scalar, vector, vector) */
+ /* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129 (129 << 0)
#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8)
#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
#define R300_VPI_OUT_REG_INDEX_SHIFT 13
-#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13) /* GUESS based on fglrx native limits */
+ /* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13)
#define R300_VPI_OUT_WRITE_X (1 << 20)
#define R300_VPI_OUT_WRITE_Y (1 << 21)
@@ -1348,14 +1515,16 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
-#define R300_VPI_IN_REG_CLASS_MASK (31 << 0) /* GUESS */
+#define R300_VPI_IN_REG_CLASS_MASK (31 << 0)
#define R300_VPI_IN_REG_INDEX_SHIFT 5
-#define R300_VPI_IN_REG_INDEX_MASK (255 << 5) /* GUESS based on fglrx native limits */
+ /* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK (255 << 5)
/* The R300 can select components from the input register arbitrarily.
-// Use the following constants, shifted by the component shift you
-// want to select */
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
#define R300_VPI_IN_SELECT_X 0
#define R300_VPI_IN_SELECT_Y 1
#define R300_VPI_IN_SELECT_Z 2
@@ -1373,11 +1542,11 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_VPI_IN_NEG_Y (1 << 26)
#define R300_VPI_IN_NEG_Z (1 << 27)
#define R300_VPI_IN_NEG_W (1 << 28)
-/* END */
+/* END: Vertex program instruction set */
-//BEGIN: Packet 3 commands
+/* BEGIN: Packet 3 commands */
-// A primitive emission dword.
+/* A primitive emission dword. */
#define R300_PRIM_TYPE_NONE (0 << 0)
#define R300_PRIM_TYPE_POINT (1 << 0)
#define R300_PRIM_TYPE_LINE (2 << 0)
@@ -1389,7 +1558,8 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
-#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0) // GUESS (based on r200)
+ /* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0)
#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
#define R300_PRIM_TYPE_QUADS (13 << 0)
#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
@@ -1399,37 +1569,58 @@ I am fairly certain that they are correct unless stated otherwise in comments.
#define R300_PRIM_WALK_LIST (2 << 4)
#define R300_PRIM_WALK_RING (3 << 4)
#define R300_PRIM_WALK_MASK (3 << 4)
-#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6) // GUESS (based on r200)
-#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6) // GUESS
+ /* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6)
#define R300_PRIM_NUM_VERTICES_SHIFT 16
+#define R300_PRIM_NUM_VERTICES_MASK 0xffff
-// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
-// Two parameter dwords:
-// 0. The first parameter appears to be always 0
-// 1. The second parameter is a standard primitive emission dword.
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
-// Specify the full set of vertex arrays as (address, stride).
-// The first parameter is the number of vertex arrays specified.
-// The rest of the command is a variable length list of blocks, where
-// each block is three dwords long and specifies two arrays.
-// The first dword of a block is split into two words, the lower significant
-// word refers to the first array, the more significant word to the second
-// array in the block.
-// The low byte of each word contains the size of an array entry in dwords,
-// the high byte contains the stride of the array.
-// The second dword of a block contains the pointer to the first array,
-// the third dword of a block contains the pointer to the second array.
-// Note that if the total number of arrays is odd, the third dword of
-// the last block is omitted.
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
#define R300_PACKET3_INDX_BUFFER 0x00003300
# define R300_EB_UNK1_SHIFT 24
# define R300_EB_UNK1 (0x80<<24)
# define R300_EB_UNK2 0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400
#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
-//END
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8 2
+#define R300_CP_COLOR_FORMAT_ARGB1555 3
+#define R300_CP_COLOR_FORMAT_RGB565 4
+#define R300_CP_COLOR_FORMAT_ARGB8888 6
+#define R300_CP_COLOR_FORMAT_RGB332 7
+#define R300_CP_COLOR_FORMAT_RGB8 9
+#define R300_CP_COLOR_FORMAT_ARGB4444 15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
-#endif /* _R300_REG_H */
+#endif /* _R300_REG_H */
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 68338389d836..af5790f8fd53 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -36,7 +36,7 @@
#define RADEON_FIFO_DEBUG 0
-static int radeon_do_cleanup_cp(drm_device_t * dev);
+static int radeon_do_cleanup_cp(struct drm_device * dev);
/* CP microcode (from ATI) */
static const u32 R200_cp_microcode[][2] = {
@@ -816,7 +816,7 @@ static const u32 R300_cp_microcode[][2] = {
{0000000000, 0000000000},
};
-static int RADEON_READ_PLL(drm_device_t * dev, int addr)
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1066,7 +1066,7 @@ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
/* Reset the engine. This will stop the CP if it is running.
*/
-static int radeon_do_engine_reset(drm_device_t * dev)
+static int radeon_do_engine_reset(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
@@ -1122,7 +1122,7 @@ static int radeon_do_engine_reset(drm_device_t * dev)
return 0;
}
-static void radeon_cp_init_ring_buffer(drm_device_t * dev,
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
drm_radeon_private_t * dev_priv)
{
u32 ring_start, cur_read_ptr;
@@ -1174,7 +1174,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
} else
#endif
{
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_sg_mem *entry = dev->sg;
unsigned long tmp_ofs, page_ofs;
tmp_ofs = dev_priv->ring_rptr->offset -
@@ -1384,7 +1384,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
}
}
-static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1420,6 +1420,10 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
return DRM_ERR(EINVAL);
}
+ /* Enable vblank on CRTC1 for older X servers
+ */
+ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+
switch(init->func) {
case RADEON_INIT_R200_CP:
dev_priv->microcode_version = UCODE_R200;
@@ -1501,13 +1505,13 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
RADEON_ROUND_MODE_TRUNC |
RADEON_ROUND_PREC_8TH_PIX);
- DRM_GETSAREA();
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->gart_textures_offset = init->gart_textures_offset;
+ dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
@@ -1731,7 +1735,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
return 0;
}
-static int radeon_do_cleanup_cp(drm_device_t * dev)
+static int radeon_do_cleanup_cp(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1787,7 +1791,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
*
* Charl P. Botha <http://cpbotha.net>
*/
-static int radeon_do_resume_cp(drm_device_t * dev)
+static int radeon_do_resume_cp(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1914,7 +1918,7 @@ int radeon_cp_stop(DRM_IOCTL_ARGS)
return 0;
}
-void radeon_do_release(drm_device_t * dev)
+void radeon_do_release(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i, ret;
@@ -2042,12 +2046,12 @@ int radeon_fullscreen(DRM_IOCTL_ARGS)
* they can't get the lock.
*/
-drm_buf_t *radeon_freelist_get(drm_device_t * dev)
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i, t;
int start;
@@ -2082,12 +2086,12 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev)
}
#if 0
-drm_buf_t *radeon_freelist_get(drm_device_t * dev)
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i, t;
int start;
u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
@@ -2116,15 +2120,15 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev)
}
#endif
-void radeon_freelist_reset(drm_device_t * dev)
+void radeon_freelist_reset(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
dev_priv->last_buf = 0;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
@@ -2166,11 +2170,11 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
return DRM_ERR(EBUSY);
}
-static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev,
- drm_dma_t * d)
+static int radeon_cp_get_buffers(DRMFILE filp, struct drm_device * dev,
+ struct drm_dma * d)
{
int i;
- drm_buf_t *buf;
+ struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = radeon_freelist_get(dev);
@@ -2194,10 +2198,10 @@ static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev,
int radeon_cp_buffers(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
- drm_dma_t d;
+ struct drm_dma __user *argp = (void __user *)data;
+ struct drm_dma d;
LOCK_TEST_WITH_RETURN(dev, filp);
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 66c4b6fed04f..5a8e23f916fc 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -417,7 +417,7 @@ typedef struct {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@@ -426,7 +426,7 @@ typedef struct {
unsigned int last_dispatch;
unsigned int last_clear;
- drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
1];
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
@@ -604,7 +604,7 @@ typedef struct drm_radeon_cmd_buffer {
int bufsz;
char __user *buf;
int nbox;
- drm_clip_rect_t __user *boxes;
+ struct drm_clip_rect __user *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
@@ -655,6 +655,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
+#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
typedef struct drm_radeon_getparam {
int param;
@@ -708,7 +709,7 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
-
+#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
/* 1.14: Clients can allocate/free a surface
*/
typedef struct drm_radeon_surface_alloc {
@@ -721,4 +722,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
+
#endif
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 2eb652ec6745..349ac3d3b848 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -60,7 +60,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL,
+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@@ -70,6 +70,7 @@ static struct drm_driver driver = {
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
.vblank_wait = radeon_driver_vblank_wait,
+ .vblank_wait2 = radeon_driver_vblank_wait2,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 54f49ef4bef0..3b3d9357201c 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -97,9 +97,10 @@
* new packet type)
* 1.26- Add support for variable size PCI(E) gart aperture
* 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 27
+#define DRIVER_MINOR 28
#define DRIVER_PATCHLEVEL 0
/*
@@ -154,7 +155,7 @@ enum radeon_chip_flags {
typedef struct drm_radeon_freelist {
unsigned int age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
struct drm_radeon_freelist *next;
struct drm_radeon_freelist *prev;
} drm_radeon_freelist_t;
@@ -277,13 +278,16 @@ typedef struct drm_radeon_private {
/* SW interrupt */
wait_queue_head_t swi_queue;
atomic_t swi_emitted;
+ int vblank_crtc;
+ uint32_t irq_enable_reg;
+ int irq_enabled;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
unsigned long pcigart_offset;
unsigned int pcigart_offset_set;
- drm_ati_pcigart_info gart_info;
+ struct drm_ati_pcigart_info gart_info;
u32 scratch_ages[5];
@@ -299,7 +303,7 @@ typedef struct drm_radeon_kcmd_buffer {
int bufsz;
char *buf;
int nbox;
- drm_clip_rect_t __user *boxes;
+ struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
extern int radeon_no_wb;
@@ -332,8 +336,8 @@ extern int radeon_engine_reset(DRM_IOCTL_ARGS);
extern int radeon_fullscreen(DRM_IOCTL_ARGS);
extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
-extern void radeon_freelist_reset(drm_device_t * dev);
-extern drm_buf_t *radeon_freelist_get(drm_device_t * dev);
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
@@ -353,29 +357,33 @@ extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap);
extern int radeon_irq_emit(DRM_IOCTL_ARGS);
extern int radeon_irq_wait(DRM_IOCTL_ARGS);
-extern void radeon_do_release(drm_device_t * dev);
-extern int radeon_driver_vblank_wait(drm_device_t * dev,
+extern void radeon_do_release(struct drm_device * dev);
+extern int radeon_driver_vblank_wait(struct drm_device * dev,
unsigned int *sequence);
+extern int radeon_driver_vblank_wait2(struct drm_device * dev,
+ unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
-extern void radeon_driver_irq_preinstall(drm_device_t * dev);
-extern void radeon_driver_irq_postinstall(drm_device_t * dev);
-extern void radeon_driver_irq_uninstall(drm_device_t * dev);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
extern int radeon_driver_firstopen(struct drm_device *dev);
-extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
-extern void radeon_driver_lastclose(drm_device_t * dev);
-extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
+extern void radeon_driver_preclose(struct drm_device * dev, DRMFILE filp);
+extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
-extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
- drm_file_t * filp_priv,
+extern int r300_do_cp_cmdbuf(struct drm_device * dev, DRMFILE filp,
+ struct drm_file * filp_priv,
drm_radeon_kcmd_buffer_t * cmdbuf);
/* Flags for stats.boxes
@@ -496,12 +504,15 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
+# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 3ff0baa2fbfa..ad8a0ac7182e 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -64,7 +64,7 @@ static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 stat;
@@ -73,18 +73,35 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
* outside the DRM
*/
stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
- RADEON_CRTC_VBLANK_STAT));
+ RADEON_CRTC_VBLANK_STAT |
+ RADEON_CRTC2_VBLANK_STAT));
if (!stat)
return IRQ_NONE;
+ stat &= dev_priv->irq_enable_reg;
+
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST) {
DRM_WAKEUP(&dev_priv->swi_queue);
}
/* VBLANK interrupt */
- if (stat & RADEON_CRTC_VBLANK_STAT) {
- atomic_inc(&dev->vbl_received);
+ if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
+ int vblank_crtc = dev_priv->vblank_crtc;
+
+ if ((vblank_crtc &
+ (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
+ (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+ if (stat & RADEON_CRTC_VBLANK_STAT)
+ atomic_inc(&dev->vbl_received);
+ if (stat & RADEON_CRTC2_VBLANK_STAT)
+ atomic_inc(&dev->vbl_received2);
+ } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
+ (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
+ ((stat & RADEON_CRTC2_VBLANK_STAT) &&
+ (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
+ atomic_inc(&dev->vbl_received);
+
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
}
@@ -92,7 +109,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int radeon_emit_irq(drm_device_t * dev)
+static int radeon_emit_irq(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;
@@ -110,7 +127,7 @@ static int radeon_emit_irq(drm_device_t * dev)
return ret;
}
-static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -127,19 +144,30 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
return ret;
}
-int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int radeon_driver_vblank_do_wait(struct drm_device * dev, unsigned int *sequence,
+ int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
-
+ int ack = 0;
+ atomic_t *counter;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
- radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT);
+ if (crtc == DRM_RADEON_VBLANK_CRTC1) {
+ counter = &dev->vbl_received;
+ ack |= RADEON_CRTC_VBLANK_STAT;
+ } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
+ counter = &dev->vbl_received2;
+ ack |= RADEON_CRTC2_VBLANK_STAT;
+ } else
+ return DRM_ERR(EINVAL);
+
+ radeon_acknowledge_irqs(dev_priv, ack);
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
@@ -148,7 +176,7 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
- (((cur_vblank = atomic_read(&dev->vbl_received))
+ (((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
@@ -156,6 +184,16 @@ int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
+int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+ return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
+}
+
+int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+ return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+}
+
/* Needs the lock as it touches the ring.
*/
int radeon_irq_emit(DRM_IOCTL_ARGS)
@@ -204,9 +242,24 @@ int radeon_irq_wait(DRM_IOCTL_ARGS)
return radeon_wait_irq(dev, irqwait.irq_seq);
}
+static void radeon_enable_interrupt(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+
+ dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
+ if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
+ dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
+
+ if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
+ dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
+
+ RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+ dev_priv->irq_enabled = 1;
+}
+
/* drm_dma.h hooks
*/
-void radeon_driver_irq_preinstall(drm_device_t * dev)
+void radeon_driver_irq_preinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -216,10 +269,11 @@ void radeon_driver_irq_preinstall(drm_device_t * dev)
/* Clear bits if they're already high */
radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
- RADEON_CRTC_VBLANK_STAT));
+ RADEON_CRTC_VBLANK_STAT |
+ RADEON_CRTC2_VBLANK_STAT));
}
-void radeon_driver_irq_postinstall(drm_device_t * dev)
+void radeon_driver_irq_postinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -227,18 +281,48 @@ void radeon_driver_irq_postinstall(drm_device_t * dev)
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
- /* Turn on SW and VBL ints */
- RADEON_WRITE(RADEON_GEN_INT_CNTL,
- RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
+ radeon_enable_interrupt(dev);
}
-void radeon_driver_irq_uninstall(drm_device_t * dev)
+void radeon_driver_irq_uninstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
if (!dev_priv)
return;
+ dev_priv->irq_enabled = 0;
+
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}
+
+
+int radeon_vblank_crtc_get(struct drm_device *dev)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+ u32 flag;
+ u32 value;
+
+ flag = RADEON_READ(RADEON_GEN_INT_CNTL);
+ value = 0;
+
+ if (flag & RADEON_CRTC_VBLANK_MASK)
+ value |= DRM_RADEON_VBLANK_CRTC1;
+
+ if (flag & RADEON_CRTC2_VBLANK_MASK)
+ value |= DRM_RADEON_VBLANK_CRTC2;
+ return value;
+}
+
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+ if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+ DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
+ return DRM_ERR(EINVAL);
+ }
+ dev_priv->vblank_crtc = (unsigned int)value;
+ radeon_enable_interrupt(dev);
+ return 0;
+}
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 98c5f1d3a8e7..3ddf86f2abf0 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -39,7 +39,7 @@
static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file * filp_priv,
u32 *offset)
{
u64 off = *offset;
@@ -90,7 +90,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file * filp_priv,
int id, u32 *data)
{
switch (id) {
@@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
dev_priv,
- drm_file_t *filp_priv,
+ struct drm_file *filp_priv,
drm_radeon_kcmd_buffer_t *
cmdbuf,
unsigned int *cmdsz)
@@ -421,7 +421,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
*/
static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
- drm_clip_rect_t * box)
+ struct drm_clip_rect * box)
{
RING_LOCALS;
@@ -439,7 +439,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
/* Emit 1.1 state
*/
static int radeon_emit_state(drm_radeon_private_t * dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file * filp_priv,
drm_radeon_context_regs_t * ctx,
drm_radeon_texture_regs_t * tex,
unsigned int dirty)
@@ -608,7 +608,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
/* Emit 1.2 state
*/
static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file * filp_priv,
drm_radeon_state_t * state)
{
RING_LOCALS;
@@ -844,7 +844,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
* CP command dispatch functions
*/
-static void radeon_cp_dispatch_clear(drm_device_t * dev,
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
drm_radeon_clear_t * clear,
drm_radeon_clear_rect_t * depth_boxes)
{
@@ -852,7 +852,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
unsigned int flags = clear->flags;
u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
int i;
@@ -1335,12 +1335,12 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
ADVANCE_RING();
}
-static void radeon_cp_dispatch_swap(drm_device_t * dev)
+static void radeon_cp_dispatch_swap(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
DRM_DEBUG("\n");
@@ -1412,10 +1412,10 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
ADVANCE_RING();
}
-static void radeon_cp_dispatch_flip(drm_device_t * dev)
+static void radeon_cp_dispatch_flip(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle;
+ struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
@@ -1491,8 +1491,8 @@ typedef struct {
unsigned int vc_format;
} drm_radeon_tcl_prim_t;
-static void radeon_cp_dispatch_vertex(drm_device_t * dev,
- drm_buf_t * buf,
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+ struct drm_buf * buf,
drm_radeon_tcl_prim_t * prim)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1537,7 +1537,7 @@ static void radeon_cp_dispatch_vertex(drm_device_t * dev,
} while (i < nbox);
}
-static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@@ -1554,8 +1554,8 @@ static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
buf->used = 0;
}
-static void radeon_cp_dispatch_indirect(drm_device_t * dev,
- drm_buf_t * buf, int start, int end)
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+ struct drm_buf * buf, int start, int end)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1588,8 +1588,8 @@ static void radeon_cp_dispatch_indirect(drm_device_t * dev,
}
}
-static void radeon_cp_dispatch_indices(drm_device_t * dev,
- drm_buf_t * elt_buf,
+static void radeon_cp_dispatch_indices(struct drm_device * dev,
+ struct drm_buf * elt_buf,
drm_radeon_tcl_prim_t * prim)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1647,13 +1647,13 @@ static void radeon_cp_dispatch_indices(drm_device_t * dev,
#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
static int radeon_cp_dispatch_texture(DRMFILE filp,
- drm_device_t * dev,
+ struct drm_device * dev,
drm_radeon_texture_t * tex,
drm_radeon_tex_image_t * image)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
- drm_buf_t *buf;
+ struct drm_file *filp_priv;
+ struct drm_buf *buf;
u32 format;
u32 *buffer;
const u8 __user *data;
@@ -1881,7 +1881,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
return 0;
}
-static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
@@ -2134,7 +2134,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS)
/* Not sure why this isn't set all the time:
*/
-static int radeon_do_init_pageflip(drm_device_t * dev)
+static int radeon_do_init_pageflip(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -2206,10 +2206,10 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
+ struct drm_file *filp_priv;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_radeon_vertex_t vertex;
drm_radeon_tcl_prim_t prim;
@@ -2289,10 +2289,10 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
+ struct drm_file *filp_priv;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_radeon_indices_t elts;
drm_radeon_tcl_prim_t prim;
int count;
@@ -2438,8 +2438,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_radeon_indirect_t indirect;
RING_LOCALS;
@@ -2507,10 +2507,10 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
+ struct drm_file *filp_priv;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_radeon_vertex2_t vertex;
int i;
unsigned char laststate;
@@ -2603,7 +2603,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
}
static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file * filp_priv,
drm_radeon_cmd_header_t header,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
@@ -2728,8 +2728,8 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
return 0;
}
-static int radeon_emit_packet3(drm_device_t * dev,
- drm_file_t * filp_priv,
+static int radeon_emit_packet3(struct drm_device * dev,
+ struct drm_file * filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -2754,16 +2754,16 @@ static int radeon_emit_packet3(drm_device_t * dev,
return 0;
}
-static int radeon_emit_packet3_cliprect(drm_device_t *dev,
- drm_file_t *filp_priv,
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+ struct drm_file *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
int orig_nbox)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t box;
+ struct drm_clip_rect box;
unsigned int cmdsz;
int ret;
- drm_clip_rect_t __user *boxes = cmdbuf->boxes;
+ struct drm_clip_rect __user *boxes = cmdbuf->boxes;
int i = 0;
RING_LOCALS;
@@ -2816,7 +2816,7 @@ static int radeon_emit_packet3_cliprect(drm_device_t *dev,
return 0;
}
-static int radeon_emit_wait(drm_device_t * dev, int flags)
+static int radeon_emit_wait(struct drm_device * dev, int flags)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -2849,9 +2849,9 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = NULL;
+ struct drm_file *filp_priv;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf = NULL;
int idx;
drm_radeon_kcmd_buffer_t cmdbuf;
drm_radeon_cmd_header_t header;
@@ -3085,6 +3085,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
else
value = RADEON_CARD_PCI;
break;
+ case RADEON_PARAM_VBLANK_CRTC:
+ value = radeon_vblank_crtc_get(dev);
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", param.param);
return DRM_ERR(EINVAL);
@@ -3102,7 +3105,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
+ struct drm_file *filp_priv;
drm_radeon_setparam_t sp;
struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3141,6 +3144,9 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
break;
+ case RADEON_SETPARAM_VBLANK_CRTC:
+ return radeon_vblank_crtc_set(dev, sp.value);
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);
@@ -3156,7 +3162,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
-void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void radeon_driver_preclose(struct drm_device *dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3167,7 +3173,7 @@ void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
-void radeon_driver_lastclose(drm_device_t * dev)
+void radeon_driver_lastclose(struct drm_device *dev)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3180,7 +3186,7 @@ void radeon_driver_lastclose(drm_device_t * dev)
radeon_do_release(dev);
}
-int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
+int radeon_driver_open(struct drm_device *dev, struct drm_file *filp_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3202,7 +3208,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
return 0;
}
-void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *filp_priv)
{
struct drm_radeon_driver_file_fields *radeon_priv =
filp_priv->driver_priv;
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index b94fab556809..18c7235f6b73 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -32,7 +32,7 @@
#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
#define SAVAGE_FREELIST_DEBUG 0
-static int savage_do_cleanup_bci(drm_device_t *dev);
+static int savage_do_cleanup_bci(struct drm_device *dev);
static int
savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
@@ -203,11 +203,11 @@ uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
/*
* Freelist management
*/
-static int savage_freelist_init(drm_device_t * dev)
+static int savage_freelist_init(struct drm_device * dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_savage_buf_priv_t *entry;
int i;
DRM_DEBUG("count=%d\n", dma->buf_count);
@@ -236,7 +236,7 @@ static int savage_freelist_init(drm_device_t * dev)
return 0;
}
-static drm_buf_t *savage_freelist_get(drm_device_t * dev)
+static struct drm_buf *savage_freelist_get(struct drm_device * dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
@@ -269,7 +269,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t * dev)
return NULL;
}
-void savage_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
@@ -535,7 +535,7 @@ static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}
-int savage_driver_load(drm_device_t *dev, unsigned long chipset)
+int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
@@ -558,7 +558,7 @@ int savage_driver_load(drm_device_t *dev, unsigned long chipset)
* in drm_addmap. Therefore we add them manually before the maps are
* initialized, and tear them down on last close.
*/
-int savage_driver_firstopen(drm_device_t *dev)
+int savage_driver_firstopen(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
unsigned long mmio_base, fb_base, fb_size, aperture_base;
@@ -655,7 +655,7 @@ int savage_driver_firstopen(drm_device_t *dev)
/*
* Delete MTRRs and free device-private data.
*/
-void savage_driver_lastclose(drm_device_t *dev)
+void savage_driver_lastclose(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
@@ -667,7 +667,7 @@ void savage_driver_lastclose(drm_device_t *dev)
dev_priv->mtrr[i].size, DRM_MTRR_WC);
}
-int savage_driver_unload(drm_device_t *dev)
+int savage_driver_unload(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@@ -676,7 +676,7 @@ int savage_driver_unload(drm_device_t *dev)
return 0;
}
-static int savage_do_init_bci(drm_device_t * dev, drm_savage_init_t * init)
+static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@@ -711,7 +711,7 @@ static int savage_do_init_bci(drm_device_t * dev, drm_savage_init_t * init)
dev_priv->texture_offset = init->texture_offset;
dev_priv->texture_size = init->texture_size;
- DRM_GETSAREA();
+ dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
savage_do_cleanup_bci(dev);
@@ -898,7 +898,7 @@ static int savage_do_init_bci(drm_device_t * dev, drm_savage_init_t * init)
return 0;
}
-static int savage_do_cleanup_bci(drm_device_t * dev)
+static int savage_do_cleanup_bci(struct drm_device * dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@@ -1007,9 +1007,9 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
* DMA buffer management
*/
-static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
+static int savage_bci_get_buffers(DRMFILE filp, struct drm_device *dev, struct drm_dma *d)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
@@ -1034,13 +1034,13 @@ static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
int savage_bci_buffers(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
- drm_dma_t d;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_dma d;
int ret = 0;
LOCK_TEST_WITH_RETURN(dev, filp);
- DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *) data, sizeof(d));
+ DRM_COPY_FROM_USER_IOCTL(d, (struct drm_dma __user *) data, sizeof(d));
/* Please don't send us buffers.
*/
@@ -1064,14 +1064,14 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
ret = savage_bci_get_buffers(filp, dev, &d);
}
- DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *) data, d, sizeof(d));
+ DRM_COPY_TO_USER_IOCTL((struct drm_dma __user *) data, d, sizeof(d));
return ret;
}
-void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
+void savage_reclaim_buffers(struct drm_device *dev, DRMFILE filp)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
@@ -1085,7 +1085,7 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
/*i830_flush_queue(dev); */
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_savage_buf_priv_t *buf_priv = buf->dev_private;
if (buf->filp == filp && buf_priv &&
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
index e1148e8e7994..8a576ef01821 100644
--- a/drivers/char/drm/savage_drm.h
+++ b/drivers/char/drm/savage_drm.h
@@ -47,7 +47,7 @@
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
- drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
+ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
@@ -113,7 +113,7 @@ typedef struct drm_savage_cmdbuf {
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
- drm_clip_rect_t __user *box_addr;
+ struct drm_clip_rect __user *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index 8f04b3d82292..5fd54de4280e 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -58,7 +58,7 @@ typedef struct drm_savage_buf_priv {
struct drm_savage_buf_priv *next;
struct drm_savage_buf_priv *prev;
drm_savage_age_t age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
} drm_savage_buf_priv_t;
typedef struct drm_savage_dma_page {
@@ -192,7 +192,7 @@ typedef struct drm_savage_private {
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
- const drm_clip_rect_t * pbox);
+ const struct drm_clip_rect * pbox);
void (*dma_flush) (struct drm_savage_private * dev_priv);
} drm_savage_private_t;
@@ -203,22 +203,22 @@ extern int savage_bci_buffers(DRM_IOCTL_ARGS);
/* BCI functions */
extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
unsigned int flags);
-extern void savage_freelist_put(drm_device_t * dev, drm_buf_t * buf);
+extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf);
extern void savage_dma_reset(drm_savage_private_t * dev_priv);
extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
unsigned int n);
-extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
-extern int savage_driver_firstopen(drm_device_t *dev);
-extern void savage_driver_lastclose(drm_device_t *dev);
-extern int savage_driver_unload(drm_device_t *dev);
-extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
+extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int savage_driver_firstopen(struct drm_device *dev);
+extern void savage_driver_lastclose(struct drm_device *dev);
+extern int savage_driver_unload(struct drm_device *dev);
+extern void savage_reclaim_buffers(struct drm_device * dev, DRMFILE filp);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
- const drm_clip_rect_t * pbox);
+ const struct drm_clip_rect * pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
- const drm_clip_rect_t * pbox);
+ const struct drm_clip_rect * pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index 1ca1e9cb5a33..77497841478a 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -27,7 +27,7 @@
#include "savage_drv.h"
void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
- const drm_clip_rect_t * pbox)
+ const struct drm_clip_rect * pbox)
{
uint32_t scstart = dev_priv->state.s3d.new_scstart;
uint32_t scend = dev_priv->state.s3d.new_scend;
@@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
}
void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
- const drm_clip_rect_t * pbox)
+ const struct drm_clip_rect * pbox)
{
uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@@ -277,7 +277,7 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
- const drm_buf_t * dmabuf)
+ const struct drm_buf * dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->prim.prim;
@@ -536,7 +536,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
const uint16_t *idx,
- const drm_buf_t * dmabuf)
+ const struct drm_buf * dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->idx.prim;
@@ -792,7 +792,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
const drm_savage_cmd_header_t *data,
unsigned int nbox,
- const drm_clip_rect_t *boxes)
+ const struct drm_clip_rect *boxes)
{
unsigned int flags = cmd_header->clear0.flags;
unsigned int clear_cmd;
@@ -861,7 +861,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
}
static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
- unsigned int nbox, const drm_clip_rect_t *boxes)
+ unsigned int nbox, const struct drm_clip_rect *boxes)
{
unsigned int swap_cmd;
unsigned int i;
@@ -892,11 +892,11 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t *start,
const drm_savage_cmd_header_t *end,
- const drm_buf_t * dmabuf,
+ const struct drm_buf * dmabuf,
const unsigned int *vtxbuf,
unsigned int vb_size, unsigned int vb_stride,
unsigned int nbox,
- const drm_clip_rect_t *boxes)
+ const struct drm_clip_rect *boxes)
{
unsigned int i, j;
int ret;
@@ -957,13 +957,13 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *dmabuf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *dmabuf;
drm_savage_cmdbuf_t cmdbuf;
drm_savage_cmd_header_t *kcmd_addr = NULL;
drm_savage_cmd_header_t *first_draw_cmd;
unsigned int *kvb_addr = NULL;
- drm_clip_rect_t *kbox_addr = NULL;
+ struct drm_clip_rect *kbox_addr = NULL;
unsigned int i, j;
int ret = 0;
@@ -1019,7 +1019,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
cmdbuf.vb_addr = kvb_addr;
}
if (cmdbuf.nbox) {
- kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
+ kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
if (kbox_addr == NULL) {
ret = DRM_ERR(ENOMEM);
@@ -1027,7 +1027,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
}
if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
- cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
+ cmdbuf.nbox * sizeof(struct drm_clip_rect))) {
ret = DRM_ERR(EFAULT);
goto done;
}
@@ -1158,7 +1158,7 @@ done:
/* If we didn't need to allocate them, these'll be NULL */
drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
- drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
+ drm_free(kbox_addr, cmdbuf.nbox * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
return ret;
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 690e0af8e7c2..1912f5857051 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -35,7 +35,7 @@ static struct pci_device_id pciidlist[] = {
sisdrv_PCI_IDS
};
-static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
+static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
int ret;
@@ -54,7 +54,7 @@ static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
return ret;
}
-static int sis_driver_unload(drm_device_t *dev)
+static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index 70d4ede75fe8..5630df874353 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -46,6 +46,7 @@ enum sis_family {
#include "drm_sman.h"
+
#define SIS_BASE (dev_priv->mmio)
#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
@@ -53,7 +54,7 @@ enum sis_family {
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
- drm_sman_t sman;
+ struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
@@ -61,9 +62,9 @@ typedef struct drm_sis_private {
unsigned long agp_offset;
} drm_sis_private_t;
-extern int sis_idle(drm_device_t *dev);
-extern void sis_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
-extern void sis_lastclose(drm_device_t *dev);
+extern int sis_idle(struct drm_device *dev);
+extern void sis_reclaim_buffers_locked(struct drm_device *dev, struct file *filp);
+extern void sis_lastclose(struct drm_device *dev);
extern drm_ioctl_desc_t sis_ioctls[];
extern int sis_max_ioctl;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index d26f5dbb7853..441bbdbf1510 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -94,7 +94,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
mutex_lock(&dev->struct_mutex);
#if defined(CONFIG_FB_SIS)
{
- drm_sman_mm_t sman_mm;
+ struct drm_sman_mm sman_mm;
sman_mm.private = (void *)0xFFFFFFFF;
sman_mm.allocate = sis_sman_mm_allocate;
sman_mm.free = sis_sman_mm_free;
@@ -123,14 +123,14 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
return 0;
}
-static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file * priv,
unsigned long data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data;
drm_sis_mem_t mem;
int retval = 0;
- drm_memblock_item_t *item;
+ struct drm_memblock_item *item;
DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem));
@@ -229,12 +229,12 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
return sis_drm_alloc(dev, priv, data, AGP_TYPE);
}
-static drm_local_map_t *sis_reg_init(drm_device_t *dev)
+static drm_local_map_t *sis_reg_init(struct drm_device *dev)
{
- drm_map_list_t *entry;
+ struct drm_map_list *entry;
drm_local_map_t *map;
- list_for_each_entry(entry, &dev->maplist->head, head) {
+ list_for_each_entry(entry, &dev->maplist, head) {
map = entry->map;
if (!map)
continue;
@@ -245,7 +245,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev)
return NULL;
}
-int sis_idle(drm_device_t *dev)
+int sis_idle(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
uint32_t idle_reg;
@@ -314,10 +314,10 @@ void sis_lastclose(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_file_t *priv = filp->private_data;
+ struct drm_file *priv = filp->private_data;
mutex_lock(&dev->struct_mutex);
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 13a9c5ca4593..7ff2b623c2d4 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -151,7 +151,7 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
-int via_dma_cleanup(drm_device_t * dev)
+int via_dma_cleanup(struct drm_device * dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv =
@@ -169,7 +169,7 @@ int via_dma_cleanup(drm_device_t * dev)
return 0;
}
-static int via_initialize(drm_device_t * dev,
+static int via_initialize(struct drm_device * dev,
drm_via_private_t * dev_priv,
drm_via_dma_init_t * init)
{
@@ -262,7 +262,7 @@ static int via_dma_init(DRM_IOCTL_ARGS)
return retcode;
}
-static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv;
uint32_t *vb;
@@ -316,7 +316,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
return 0;
}
-int via_driver_dma_quiescent(drm_device_t * dev)
+int via_driver_dma_quiescent(struct drm_device * dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
@@ -356,7 +356,7 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS)
return 0;
}
-static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
+static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 2881a06b6f55..832de1d9ba7e 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -207,7 +207,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
*/
static void
-via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -273,10 +273,9 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
- if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
+ if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
return DRM_ERR(ENOMEM);
- memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
@@ -289,7 +288,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
}
static void
-via_abort_dmablit(drm_device_t *dev, int engine)
+via_abort_dmablit(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -297,7 +296,7 @@ via_abort_dmablit(drm_device_t *dev, int engine)
}
static void
-via_dmablit_engine_off(drm_device_t *dev, int engine)
+via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -314,7 +313,7 @@ via_dmablit_engine_off(drm_device_t *dev, int engine)
*/
void
-via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
@@ -433,7 +432,7 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
*/
static int
-via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -466,7 +465,7 @@ static void
via_dmablit_timer(unsigned long data)
{
drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
- drm_device_t *dev = blitq->dev;
+ struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -502,7 +501,7 @@ static void
via_dmablit_workqueue(struct work_struct *work)
{
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
- drm_device_t *dev = blitq->dev;
+ struct drm_device *dev = blitq->dev;
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
@@ -545,7 +544,7 @@ via_dmablit_workqueue(struct work_struct *work)
void
-via_init_dmablit(drm_device_t *dev)
+via_init_dmablit(struct drm_device *dev)
{
int i,j;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -582,7 +581,7 @@ via_init_dmablit(drm_device_t *dev)
static int
-via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
@@ -730,7 +729,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
static int
-via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
index f4036cd5d0e0..6f6a513d5147 100644
--- a/drivers/char/drm/via_dmablit.h
+++ b/drivers/char/drm/via_dmablit.h
@@ -59,7 +59,7 @@ typedef struct _drm_via_sg_info {
} drm_via_sg_info_t;
typedef struct _drm_via_blitq {
- drm_device_t *dev;
+ struct drm_device *dev;
uint32_t cur_blit_handle;
uint32_t done_blit_handle;
unsigned serviced;
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index e4ee97d7156f..8f53c76062e9 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -40,7 +40,7 @@
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
#define XVMCLOCKPTR(saPriv,lockNo) \
- ((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
+ ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
(VIA_MAX_CACHELINE_SIZE - 1)) & \
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
VIA_MAX_CACHELINE_SIZE*(lockNo)))
@@ -182,7 +182,7 @@ typedef struct _drm_via_tex_region {
typedef struct _drm_via_sarea {
unsigned int dirty;
unsigned int nbox;
- drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index b46ca8e6306d..576711564a11 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -87,7 +87,7 @@ typedef struct drm_via_private {
uint32_t irq_pending_mask;
int *irq_map;
unsigned int idle_fault;
- drm_sman_t sman;
+ struct drm_sman sman;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
@@ -123,31 +123,31 @@ extern int via_wait_irq(DRM_IOCTL_ARGS);
extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
extern int via_dma_blit( DRM_IOCTL_ARGS );
-extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
-extern int via_driver_unload(drm_device_t *dev);
+extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int via_driver_unload(struct drm_device *dev);
-extern int via_init_context(drm_device_t * dev, int context);
-extern int via_final_context(drm_device_t * dev, int context);
+extern int via_init_context(struct drm_device * dev, int context);
+extern int via_final_context(struct drm_device * dev, int context);
-extern int via_do_cleanup_map(drm_device_t * dev);
-extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int via_do_cleanup_map(struct drm_device * dev);
+extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
-extern void via_driver_irq_preinstall(drm_device_t * dev);
-extern void via_driver_irq_postinstall(drm_device_t * dev);
-extern void via_driver_irq_uninstall(drm_device_t * dev);
+extern void via_driver_irq_preinstall(struct drm_device * dev);
+extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern void via_driver_irq_uninstall(struct drm_device * dev);
-extern int via_dma_cleanup(drm_device_t * dev);
+extern int via_dma_cleanup(struct drm_device * dev);
extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(drm_device_t * dev);
+extern int via_driver_dma_quiescent(struct drm_device * dev);
extern void via_init_futex(drm_via_private_t * dev_priv);
extern void via_cleanup_futex(drm_via_private_t * dev_priv);
extern void via_release_futex(drm_via_private_t * dev_priv, int context);
-extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
-extern void via_lastclose(drm_device_t *dev);
+extern void via_reclaim_buffers_locked(struct drm_device *dev, struct file *filp);
+extern void via_lastclose(struct drm_device *dev);
-extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
-extern void via_init_dmablit(drm_device_t *dev);
+extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
+extern void via_init_dmablit(struct drm_device *dev);
#endif
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 1ac5941ad237..8dc99b5fbab6 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -98,7 +98,7 @@ static unsigned time_diff(struct timeval *now, struct timeval *then)
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
int handled = 0;
@@ -163,7 +163,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
}
}
-int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_vblank;
@@ -191,7 +191,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
}
static int
-via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
+via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -244,7 +244,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
* drm_dma.h hooks
*/
-void via_driver_irq_preinstall(drm_device_t * dev)
+void via_driver_irq_preinstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -293,7 +293,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
}
}
-void via_driver_irq_postinstall(drm_device_t * dev)
+void via_driver_irq_postinstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -312,7 +312,7 @@ void via_driver_irq_postinstall(drm_device_t * dev)
}
}
-void via_driver_irq_uninstall(drm_device_t * dev)
+void via_driver_irq_uninstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 4e3fc072aa3b..7fb9d2a2cce2 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -25,13 +25,13 @@
#include "via_drm.h"
#include "via_drv.h"
-static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
+static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- DRM_GETSAREA();
+ dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
@@ -68,7 +68,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
return 0;
}
-int via_do_cleanup_map(drm_device_t * dev)
+int via_do_cleanup_map(struct drm_device * dev)
{
via_dma_cleanup(dev);
@@ -95,7 +95,7 @@ int via_map_init(DRM_IOCTL_ARGS)
return -EINVAL;
}
-int via_driver_load(drm_device_t *dev, unsigned long chipset)
+int via_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_via_private_t *dev_priv;
int ret = 0;
@@ -115,7 +115,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset)
return ret;
}
-int via_driver_unload(drm_device_t *dev)
+int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 2fcf0577a7aa..85d56acd9d82 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -127,7 +127,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
drm_via_mem_t mem;
int retval = 0;
- drm_memblock_item_t *item;
+ struct drm_memblock_item *item;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned long tmpSize;
@@ -188,10 +188,10 @@ int via_mem_free(DRM_IOCTL_ARGS)
}
-void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp)
{
drm_via_private_t *dev_priv = dev->dev_private;
- drm_file_t *priv = filp->private_data;
+ struct drm_file *priv = filp->private_data;
mutex_lock(&dev->struct_mutex);
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 2e7e08078287..832d48356e91 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -252,10 +252,9 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset,
unsigned long size,
- drm_device_t * dev)
+ struct drm_device * dev)
{
- struct list_head *list;
- drm_map_list_t *r_list;
+ struct drm_map_list *r_list;
drm_local_map_t *map = seq->map_cache;
if (map && map->offset <= offset
@@ -263,8 +262,7 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
return map;
}
- list_for_each(list, &dev->maplist->head) {
- r_list = (drm_map_list_t *) list;
+ list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
@@ -964,7 +962,7 @@ via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
int
via_verify_command_stream(const uint32_t * buf, unsigned int size,
- drm_device_t * dev, int agp)
+ struct drm_device * dev, int agp)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -1039,7 +1037,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
}
int
-via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
+via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
unsigned int size)
{
diff --git a/drivers/char/drm/via_verifier.h b/drivers/char/drm/via_verifier.h
index b77f59df0278..28b50296a7bd 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/char/drm/via_verifier.h
@@ -47,7 +47,7 @@ typedef struct {
drm_via_sequence_t unfinished;
int agp_texture;
int multitex;
- drm_device_t *dev;
+ struct drm_device *dev;
drm_local_map_t *map_cache;
uint32_t vertex_count;
int agp;
@@ -55,8 +55,8 @@ typedef struct {
} drm_via_state_t;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
- drm_device_t * dev, int agp);
-extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
+ struct drm_device * dev, int agp);
+extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size);
#endif
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index d1bfbaa2aa02..2e7ae42a5503 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -1121,8 +1121,6 @@ static void change_speed(struct esp_struct *info)
/*
* Set up parity check flag
*/
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (I_INPCK(info->tty))
info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
@@ -1920,11 +1918,6 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
struct esp_struct *info = (struct esp_struct *)tty->driver_data;
unsigned long flags;
- if ( (tty->termios->c_cflag == old_termios->c_cflag)
- && ( RELEVANT_IFLAG(tty->termios->c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
change_speed(info);
spin_lock_irqsave(&info->lock, flags);
@@ -2466,7 +2459,7 @@ static int __init espserial_init(void)
return 1;
}
- info = kmalloc(sizeof(struct esp_struct), GFP_KERNEL);
+ info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
if (!info)
{
@@ -2476,7 +2469,6 @@ static int __init espserial_init(void)
return 1;
}
- memset((void *)info, 0, sizeof(struct esp_struct));
spin_lock_init(&info->lock);
/* rx_trigger, tx_trigger are needed by autoconfig */
info->config.rx_trigger = rx_trigger;
@@ -2534,7 +2526,7 @@ static int __init espserial_init(void)
if (!dma)
info->stat_flags |= ESP_STAT_NEVER_DMA;
- info = kmalloc(sizeof(struct esp_struct), GFP_KERNEL);
+ info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
if (!info)
{
printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
@@ -2543,7 +2535,6 @@ static int __init espserial_init(void)
return 0;
}
- memset((void *)info, 0, sizeof(struct esp_struct));
/* rx_trigger, tx_trigger are needed by autoconfig */
info->config.rx_trigger = rx_trigger;
info->config.tx_trigger = tx_trigger;
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c
index 8ea02755b1c9..8facf3e25c49 100644
--- a/drivers/char/generic_serial.c
+++ b/drivers/char/generic_serial.c
@@ -43,16 +43,6 @@ static int gs_debug;
#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__)
#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __FUNCTION__)
-#define NEW_WRITE_LOCKING 1
-#if NEW_WRITE_LOCKING
-#define DECL /* Nothing */
-#define LOCKIT mutex_lock(& port->port_write_mutex);
-#define RELEASEIT mutex_unlock(&port->port_write_mutex);
-#else
-#define DECL unsigned long flags;
-#define LOCKIT save_flags (flags);cli ()
-#define RELEASEIT restore_flags (flags)
-#endif
#define RS_EVENT_WRITE_WAKEUP 1
@@ -62,7 +52,6 @@ module_param(gs_debug, int, 0644);
void gs_put_char(struct tty_struct * tty, unsigned char ch)
{
struct gs_port *port;
- DECL
func_enter ();
@@ -75,11 +64,11 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch)
if (! (port->flags & ASYNC_INITIALIZED)) return;
/* Take a lock on the serial tranmit buffer! */
- LOCKIT;
+ mutex_lock(& port->port_write_mutex);
if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
/* Sorry, buffer is full, drop character. Update statistics???? -- REW */
- RELEASEIT;
+ mutex_unlock(&port->port_write_mutex);
return;
}
@@ -87,13 +76,11 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch)
port->xmit_head &= SERIAL_XMIT_SIZE - 1;
port->xmit_cnt++; /* Characters in buffer */
- RELEASEIT;
+ mutex_unlock(&port->port_write_mutex);
func_exit ();
}
-#ifdef NEW_WRITE_LOCKING
-
/*
> Problems to take into account are:
> -1- Interrupts that empty part of the buffer.
@@ -166,90 +153,6 @@ int gs_write(struct tty_struct * tty,
func_exit ();
return total;
}
-#else
-/*
-> Problems to take into account are:
-> -1- Interrupts that empty part of the buffer.
-> -2- page faults on the access to userspace.
-> -3- Other processes that are also trying to do a "write".
-*/
-
-int gs_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
-{
- struct gs_port *port;
- int c, total = 0;
- int t;
- unsigned long flags;
-
- func_enter ();
-
- /* The standard serial driver returns 0 in this case.
- That sounds to me as "No error, I just didn't get to writing any
- bytes. Feel free to try again."
- The "official" way to write n bytes from buf is:
-
- for (nwritten = 0;nwritten < n;nwritten += rv) {
- rv = write (fd, buf+nwritten, n-nwritten);
- if (rv < 0) break; // Error: bail out. //
- }
-
- which will loop endlessly in this case. The manual page for write
- agrees with me. In practise almost everybody writes
- "write (fd, buf,n);" but some people might have had to deal with
- incomplete writes in the past and correctly implemented it by now...
- */
-
- if (!tty) return -EIO;
-
- port = tty->driver_data;
- if (!port || !port->xmit_buf)
- return -EIO;
-
- local_save_flags(flags);
- while (1) {
- cli();
- c = count;
-
- /* This is safe because we "OWN" the "head". Noone else can
- change the "head": we own the port_write_mutex. */
- /* Don't overrun the end of the buffer */
- t = SERIAL_XMIT_SIZE - port->xmit_head;
- if (t < c) c = t;
-
- /* This is safe because the xmit_cnt can only decrease. This
- would increase "t", so we might copy too little chars. */
- /* Don't copy past the "head" of the buffer */
- t = SERIAL_XMIT_SIZE - 1 - port->xmit_cnt;
- if (t < c) c = t;
-
- /* Can't copy more? break out! */
- if (c <= 0) {
- local_restore_flags(flags);
- break;
- }
- memcpy(port->xmit_buf + port->xmit_head, buf, c);
- port->xmit_head = ((port->xmit_head + c) &
- (SERIAL_XMIT_SIZE-1));
- port->xmit_cnt += c;
- local_restore_flags(flags);
- buf += c;
- count -= c;
- total += c;
- }
-
- if (port->xmit_cnt &&
- !tty->stopped &&
- !tty->hw_stopped &&
- !(port->flags & GS_TX_INTEN)) {
- port->flags |= GS_TX_INTEN;
- port->rd->enable_tx_interrupts (port);
- }
- func_exit ();
- return total;
-}
-
-#endif
@@ -737,23 +640,6 @@ void gs_set_termios (struct tty_struct * tty,
gs_dprintk (GS_DEBUG_TERMIOS, "termios structure (%p):\n", tiosp);
}
- /* This is an optimization that is only allowed for dumb cards */
- /* Smart cards require knowledge of iflags and oflags too: that
- might change hardware cooking mode.... */
- if (old_termios) {
- if( (tiosp->c_iflag == old_termios->c_iflag)
- && (tiosp->c_oflag == old_termios->c_oflag)
- && (tiosp->c_cflag == old_termios->c_cflag)
- && (tiosp->c_lflag == old_termios->c_lflag)
- && (tiosp->c_line == old_termios->c_line)
- && (memcmp(tiosp->c_cc, old_termios->c_cc, NCC) == 0)) {
- gs_dprintk(GS_DEBUG_TERMIOS, "gs_set_termios: optimized away\n");
- return /* 0 */;
- }
- } else
- gs_dprintk(GS_DEBUG_TERMIOS, "gs_set_termios: no old_termios: "
- "no optimization\n");
-
if(old_termios && (gs_debug & GS_DEBUG_TERMIOS)) {
if(tiosp->c_iflag != old_termios->c_iflag) printk("c_iflag changed\n");
if(tiosp->c_oflag != old_termios->c_oflag) printk("c_oflag changed\n");
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 9e1fc02967ff..69f0a2993af0 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -173,7 +173,6 @@ static void gen_rtc_interrupt(unsigned long arg)
static ssize_t gen_rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
@@ -183,18 +182,10 @@ static ssize_t gen_rtc_read(struct file *file, char __user *buf,
if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data)
return -EAGAIN;
- add_wait_queue(&gen_rtc_wait, &wait);
- retval = -ERESTARTSYS;
-
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- data = xchg(&gen_rtc_irq_data, 0);
- if (data)
- break;
- if (signal_pending(current))
- goto out;
- schedule();
- }
+ retval = wait_event_interruptible(gen_rtc_wait,
+ (data = xchg(&gen_rtc_irq_data, 0)));
+ if (retval)
+ goto out;
/* first test allows optimizer to nuke this case for 32-bit machines */
if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
@@ -206,10 +197,7 @@ static ssize_t gen_rtc_read(struct file *file, char __user *buf,
retval = put_user(data, (unsigned long __user *)buf) ?:
sizeof(unsigned long);
}
- out:
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&gen_rtc_wait, &wait);
-
+out:
return retval;
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 0be700f4e8fd..ba0e74ad74bb 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -29,6 +29,7 @@
#include <linux/bcd.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
+#include <linux/clocksource.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@@ -51,8 +52,34 @@
#define HPET_RANGE_SIZE 1024 /* from HPET spec */
+#if BITS_PER_LONG == 64
+#define write_counter(V, MC) writeq(V, MC)
+#define read_counter(MC) readq(MC)
+#else
+#define write_counter(V, MC) writel(V, MC)
+#define read_counter(MC) readl(MC)
+#endif
+
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
+static void __iomem *hpet_mctr;
+
+static cycle_t read_hpet(void)
+{
+ return (cycle_t)read_counter((void __iomem *)hpet_mctr);
+}
+
+static struct clocksource clocksource_hpet = {
+ .name = "hpet",
+ .rating = 250,
+ .read = read_hpet,
+ .mask = 0xffffffffffffffff,
+ .mult = 0, /*to be caluclated*/
+ .shift = 10,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+static struct clocksource *hpet_clocksource;
+
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
/* A lock for concurrent intermodule access to hpet and isr hpet activity. */
@@ -79,7 +106,7 @@ struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
- struct time_interpolator *hp_interpolator;
+ struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
@@ -94,13 +121,6 @@ static struct hpets *hpets;
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
-#if BITS_PER_LONG == 64
-#define write_counter(V, MC) writeq(V, MC)
-#define read_counter(MC) readq(MC)
-#else
-#define write_counter(V, MC) writel(V, MC)
-#define read_counter(MC) readl(MC)
-#endif
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
@@ -737,27 +757,6 @@ static ctl_table dev_root[] = {
static struct ctl_table_header *sysctl_header;
-static void hpet_register_interpolator(struct hpets *hpetp)
-{
-#ifdef CONFIG_TIME_INTERPOLATION
- struct time_interpolator *ti;
-
- ti = kzalloc(sizeof(*ti), GFP_KERNEL);
- if (!ti)
- return;
-
- ti->source = TIME_SOURCE_MMIO64;
- ti->shift = 10;
- ti->addr = &hpetp->hp_hpet->hpet_mc;
- ti->frequency = hpetp->hp_tick_freq;
- ti->drift = HPET_DRIFT;
- ti->mask = -1;
-
- hpetp->hp_interpolator = ti;
- register_time_interpolator(ti);
-#endif
-}
-
/*
* Adjustment for when arming the timer with
* initial conditions. That is, main counter
@@ -909,7 +908,16 @@ int hpet_alloc(struct hpet_data *hdp)
}
hpetp->hp_delta = hpet_calibrate(hpetp);
- hpet_register_interpolator(hpetp);
+
+ if (!hpet_clocksource) {
+ hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
+ CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
+ clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq,
+ clocksource_hpet.shift);
+ clocksource_register(&clocksource_hpet);
+ hpetp->hp_clocksource = &clocksource_hpet;
+ hpet_clocksource = &clocksource_hpet;
+ }
return 0;
}
@@ -995,7 +1003,7 @@ static int hpet_acpi_add(struct acpi_device *device)
static int hpet_acpi_remove(struct acpi_device *device, int type)
{
- /* XXX need to unregister interpolator, dealloc mem, etc */
+ /* XXX need to unregister clocksource, dealloc mem, etc */
return -EINVAL;
}
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 322bc5f7d86b..83c1151ec7a2 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -674,11 +674,12 @@ static const cpumask_t cpus_in_xmon = CPU_MASK_NONE;
* calling hvc_poll() who determines whether a console adapter support
* interrupts.
*/
-int khvcd(void *unused)
+static int khvcd(void *unused)
{
int poll_mask;
struct hvc_struct *hp;
+ set_freezable();
__set_current_state(TASK_RUNNING);
do {
poll_mask = 0;
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index b37f1d5a5be6..a08f8f981c11 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -472,7 +472,7 @@ static void hvc_handle_event(struct HvLpEvent *event)
}
}
-static int send_open(HvLpIndex remoteLp, void *sem)
+static int __init send_open(HvLpIndex remoteLp, void *sem)
{
return HvCallEvent_signalLpEventFast(remoteLp,
HvLpEvent_Type_VirtualIo,
@@ -484,7 +484,7 @@ static int send_open(HvLpIndex remoteLp, void *sem)
0, 0, 0, 0);
}
-static int hvc_vio_init(void)
+static int __init hvc_vio_init(void)
{
atomic_t wait_flag;
int rc;
@@ -552,14 +552,14 @@ static int hvc_vio_init(void)
}
module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
-static void hvc_vio_exit(void)
+static void __exit hvc_vio_exit(void)
{
vio_unregister_driver(&hvc_vio_driver);
}
module_exit(hvc_vio_exit);
/* the device tree order defines our numbering */
-static int hvc_find_vtys(void)
+static int __init hvc_find_vtys(void)
{
struct device_node *vty;
int num_found = 0;
diff --git a/drivers/char/hvc_lguest.c b/drivers/char/hvc_lguest.c
new file mode 100644
index 000000000000..e7b889e404a7
--- /dev/null
+++ b/drivers/char/hvc_lguest.c
@@ -0,0 +1,102 @@
+/* Simple console for lguest.
+ *
+ * Copyright (C) 2006 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/lguest_bus.h>
+#include "hvc_console.h"
+
+static char inbuf[256];
+static struct lguest_dma cons_input = { .used_len = 0,
+ .addr[0] = __pa(inbuf),
+ .len[0] = sizeof(inbuf),
+ .len[1] = 0 };
+
+static int put_chars(u32 vtermno, const char *buf, int count)
+{
+ struct lguest_dma dma;
+
+ /* FIXME: what if it's over a page boundary? */
+ dma.len[0] = count;
+ dma.len[1] = 0;
+ dma.addr[0] = __pa(buf);
+
+ lguest_send_dma(LGUEST_CONSOLE_DMA_KEY, &dma);
+ return count;
+}
+
+static int get_chars(u32 vtermno, char *buf, int count)
+{
+ static int cons_offset;
+
+ if (!cons_input.used_len)
+ return 0;
+
+ if (cons_input.used_len - cons_offset < count)
+ count = cons_input.used_len - cons_offset;
+
+ memcpy(buf, inbuf + cons_offset, count);
+ cons_offset += count;
+ if (cons_offset == cons_input.used_len) {
+ cons_offset = 0;
+ cons_input.used_len = 0;
+ }
+ return count;
+}
+
+static struct hv_ops lguest_cons = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+};
+
+static int __init cons_init(void)
+{
+ if (strcmp(paravirt_ops.name, "lguest") != 0)
+ return 0;
+
+ return hvc_instantiate(0, 0, &lguest_cons);
+}
+console_initcall(cons_init);
+
+static int lguestcons_probe(struct lguest_device *lgdev)
+{
+ int err;
+
+ lgdev->private = hvc_alloc(0, lgdev_irq(lgdev), &lguest_cons, 256);
+ if (IS_ERR(lgdev->private))
+ return PTR_ERR(lgdev->private);
+
+ err = lguest_bind_dma(LGUEST_CONSOLE_DMA_KEY, &cons_input, 1,
+ lgdev_irq(lgdev));
+ if (err)
+ printk("lguest console: failed to bind buffer.\n");
+ return err;
+}
+
+static struct lguest_driver lguestcons_drv = {
+ .name = "lguestcons",
+ .owner = THIS_MODULE,
+ .device_type = LGUEST_DEVICE_T_CONSOLE,
+ .probe = lguestcons_probe,
+};
+
+static int __init hvc_lguest_init(void)
+{
+ return register_lguest_driver(&lguestcons_drv);
+}
+module_init(hvc_lguest_init);
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index 4b97eaf18602..bb09413d5a21 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -115,7 +115,7 @@ static void __exit hvc_rtas_exit(void)
module_exit(hvc_rtas_exit);
/* This will happen prior to module init. There is no tty at this time? */
-static int hvc_rtas_console_init(void)
+static int __init hvc_rtas_console_init(void)
{
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
new file mode 100644
index 000000000000..dd68f8541c2d
--- /dev/null
+++ b/drivers/char/hvc_xen.c
@@ -0,0 +1,159 @@
+/*
+ * xen console driver interface to hvc_console.c
+ *
+ * (c) 2007 Gerd Hoffmann <kraxel@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/xen/hypervisor.h>
+#include <xen/page.h>
+#include <xen/events.h>
+#include <xen/interface/io/console.h>
+#include <xen/hvc-console.h>
+
+#include "hvc_console.h"
+
+#define HVC_COOKIE 0x58656e /* "Xen" in hex */
+
+static struct hvc_struct *hvc;
+static int xencons_irq;
+
+/* ------------------------------------------------------------------ */
+
+static inline struct xencons_interface *xencons_interface(void)
+{
+ return mfn_to_virt(xen_start_info->console.domU.mfn);
+}
+
+static inline void notify_daemon(void)
+{
+ /* Use evtchn: this is called early, before irq is set up. */
+ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
+}
+
+static int write_console(uint32_t vtermno, const char *data, int len)
+{
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+ int sent = 0;
+
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ mb(); /* update queue values before going on */
+ BUG_ON((prod - cons) > sizeof(intf->out));
+
+ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
+ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
+
+ wmb(); /* write ring before updating pointer */
+ intf->out_prod = prod;
+
+ notify_daemon();
+ return sent;
+}
+
+static int read_console(uint32_t vtermno, char *buf, int len)
+{
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+ int recv = 0;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ mb(); /* get pointers before reading ring */
+ BUG_ON((prod - cons) > sizeof(intf->in));
+
+ while (cons != prod && recv < len)
+ buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
+
+ mb(); /* read ring before consuming */
+ intf->in_cons = cons;
+
+ notify_daemon();
+ return recv;
+}
+
+static struct hv_ops hvc_ops = {
+ .get_chars = read_console,
+ .put_chars = write_console,
+};
+
+static int __init xen_init(void)
+{
+ struct hvc_struct *hp;
+
+ if (!is_running_on_xen())
+ return 0;
+
+ xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
+ if (xencons_irq < 0)
+ xencons_irq = 0 /* NO_IRQ */;
+ hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+
+ hvc = hp;
+ return 0;
+}
+
+static void __exit xen_fini(void)
+{
+ if (hvc)
+ hvc_remove(hvc);
+}
+
+static int xen_cons_init(void)
+{
+ if (!is_running_on_xen())
+ return 0;
+
+ hvc_instantiate(HVC_COOKIE, 0, &hvc_ops);
+ return 0;
+}
+
+module_init(xen_init);
+module_exit(xen_fini);
+console_initcall(xen_cons_init);
+
+static void xenboot_write_console(struct console *console, const char *string,
+ unsigned len)
+{
+ unsigned int linelen, off = 0;
+ const char *pos;
+
+ while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
+ linelen = pos-string+off;
+ if (off + linelen > len)
+ break;
+ write_console(0, string+off, linelen);
+ write_console(0, "\r\n", 2);
+ off += linelen + 1;
+ }
+ if (off < len)
+ write_console(0, string+off, len-off);
+}
+
+struct console xenboot_console = {
+ .name = "xenboot",
+ .write = xenboot_write_console,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+};
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 207f7343ba60..69d8866de783 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -210,9 +210,9 @@ static struct ktermios hvcs_tty_termios = {
static int hvcs_parm_num_devs = -1;
module_param(hvcs_parm_num_devs, int, 0);
-char hvcs_driver_name[] = "hvcs";
-char hvcs_device_node[] = "hvcs";
-char hvcs_driver_string[]
+static const char hvcs_driver_name[] = "hvcs";
+static const char hvcs_device_node[] = "hvcs";
+static const char hvcs_driver_string[]
= "IBM hvcs (Hypervisor Virtual Console Server) Driver";
/* Status of partner info rescan triggered via sysfs. */
@@ -784,12 +784,10 @@ static int __devinit hvcs_probe(
return -EFAULT;
}
- hvcsd = kmalloc(sizeof(*hvcsd), GFP_KERNEL);
+ hvcsd = kzalloc(sizeof(*hvcsd), GFP_KERNEL);
if (!hvcsd)
return -ENODEV;
- /* hvcsd->tty is zeroed out with the memset */
- memset(hvcsd, 0x00, sizeof(*hvcsd));
spin_lock_init(&hvcsd->lock);
/* Automatically incs the refcount the first time */
@@ -1094,7 +1092,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
* NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
* calling this function or you will get deadlock.
*/
-struct hvcs_struct *hvcs_get_by_index(int index)
+static struct hvcs_struct *hvcs_get_by_index(int index)
{
struct hvcs_struct *hvcsd = NULL;
unsigned long flags;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 7cda04b33534..2d7cd486e025 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -41,7 +41,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
- depends on HW_RANDOM && X86 && PCI
+ depends on HW_RANDOM && X86_32 && PCI
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 4ae9811d1a6c..753f46052b87 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -296,12 +296,10 @@ static int __init intel_init_hw_struct(struct intel_rng_hw *intel_rng_hw,
(BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
== BIOS_CNTL_LOCK_ENABLE_MASK) {
static __initdata /*const*/ char warning[] =
- KERN_WARNING PFX "Firmware space is locked read-only. "
- KERN_WARNING PFX "If you can't or\n don't want to "
- KERN_WARNING PFX "disable this in firmware setup, and "
- KERN_WARNING PFX "if\n you are certain that your "
- KERN_WARNING PFX "system has a functional\n RNG, try"
- KERN_WARNING PFX "using the 'no_fwh_detect' option.\n";
+ KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n"
+ KERN_WARNING PFX "don't want to disable this in firmware setup, and if\n"
+ KERN_WARNING PFX "you are certain that your system has a functional\n"
+ KERN_WARNING PFX "RNG, try using the 'no_fwh_detect' option.\n";
if (no_fwh_detect)
return -ENODEV;
diff --git a/drivers/char/ip2/i2ellis.c b/drivers/char/ip2/i2ellis.c
index dd761a1e4f08..61ef013b8445 100644
--- a/drivers/char/ip2/i2ellis.c
+++ b/drivers/char/ip2/i2ellis.c
@@ -43,8 +43,6 @@ static void iiEnableMailIrqIIEX(i2eBordStrPtr);
static void iiWriteMaskII(i2eBordStrPtr, unsigned char);
static void iiWriteMaskIIEX(i2eBordStrPtr, unsigned char);
-static void ii2DelayTimer(unsigned int);
-static void ii2DelayWakeup(unsigned long id);
static void ii2Nop(void);
//***************
@@ -55,8 +53,6 @@ static int ii2Safe; // Safe I/O address for delay routine
static int iiDelayed; // Set when the iiResetDelay function is
// called. Cleared when ANY board is reset.
-static struct timer_list * pDelayTimer; // Used by iiDelayTimer
-static wait_queue_head_t pDelayWait; // Used by iiDelayTimer
static rwlock_t Dl_spinlock;
//********
@@ -86,9 +82,6 @@ static rwlock_t Dl_spinlock;
static void
iiEllisInit(void)
{
- pDelayTimer = kmalloc ( sizeof (struct timer_list), GFP_KERNEL );
- init_timer(pDelayTimer);
- init_waitqueue_head(&pDelayWait);
LOCK_INIT(&Dl_spinlock);
}
@@ -106,7 +99,6 @@ iiEllisInit(void)
static void
iiEllisCleanup(void)
{
- kfree(pDelayTimer);
}
//******************************************************************************
@@ -560,19 +552,6 @@ iiInitialize(i2eBordStrPtr pB)
COMPLETE(pB, I2EE_GOOD);
}
-//=======================================================
-// Delay Routines
-//
-// iiDelayIO
-// iiNop
-//=======================================================
-
-static void
-ii2DelayWakeup(unsigned long id)
-{
- wake_up_interruptible ( &pDelayWait );
-}
-
//******************************************************************************
// Function: ii2DelayTimer(mseconds)
// Parameters: mseconds - number of milliseconds to delay
@@ -594,28 +573,7 @@ ii2DelayWakeup(unsigned long id)
static void
ii2DelayTimer(unsigned int mseconds)
{
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- init_timer ( pDelayTimer );
-
- add_wait_queue(&pDelayWait, &wait);
-
- set_current_state( TASK_INTERRUPTIBLE );
-
- pDelayTimer->expires = jiffies + ( mseconds + 9 ) / 10;
- pDelayTimer->function = ii2DelayWakeup;
- pDelayTimer->data = 0;
-
- add_timer ( pDelayTimer );
-
- schedule();
-
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pDelayWait, &wait);
-
- del_timer ( pDelayTimer );
+ msleep_interruptible(mseconds);
}
#if 0
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 83c7258d3580..6005b5225772 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -425,9 +425,7 @@ cleanup_module(void)
printk(KERN_ERR "IP2: failed to unregister tty driver (%d)\n", err);
}
put_tty_driver(ip2_tty_driver);
- if ( ( err = unregister_chrdev ( IP2_IPL_MAJOR, pcIpl ) ) ) {
- printk(KERN_ERR "IP2: failed to unregister IPL driver (%d)\n", err);
- }
+ unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
remove_proc_entry("ip2mem", &proc_root);
// free memory
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index b894f67fdf14..0baa8fab4ea7 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -2,11 +2,9 @@
# IPMI device configuration
#
-menu "IPMI"
- depends on HAS_IOMEM
-
-config IPMI_HANDLER
+menuconfig IPMI_HANDLER
tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
help
This enables the central IPMI message handler, required for IPMI
to work.
@@ -18,9 +16,10 @@ config IPMI_HANDLER
If unsure, say N.
+if IPMI_HANDLER
+
config IPMI_PANIC_EVENT
bool 'Generate a panic event to all BMCs on a panic'
- depends on IPMI_HANDLER
help
When a panic occurs, this will cause the IPMI message handler to
generate an IPMI event describing the panic to each interface
@@ -40,14 +39,12 @@ config IPMI_PANIC_STRING
config IPMI_DEVICE_INTERFACE
tristate 'Device interface for IPMI'
- depends on IPMI_HANDLER
help
This provides an IOCTL interface to the IPMI message handler so
userland processes may use IPMI. It supports poll() and select().
config IPMI_SI
tristate 'IPMI System Interface handler'
- depends on IPMI_HANDLER
help
Provides a driver for System Interfaces (KCS, SMIC, BT).
Currently, only KCS and SMIC are supported. If
@@ -55,15 +52,13 @@ config IPMI_SI
config IPMI_WATCHDOG
tristate 'IPMI Watchdog Timer'
- depends on IPMI_HANDLER
help
This enables the IPMI watchdog timer.
config IPMI_POWEROFF
tristate 'IPMI Poweroff'
- depends on IPMI_HANDLER
help
This enables a function to power off the system with IPMI if
the IPMI management controller is capable of this.
-endmenu
+endif # IPMI_HANDLER
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8e222f2b80cc..6a01dd9e43f8 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2171,52 +2171,42 @@ static int create_files(struct bmc_device *bmc)
int err;
bmc->device_id_attr.attr.name = "device_id";
- bmc->device_id_attr.attr.owner = THIS_MODULE;
bmc->device_id_attr.attr.mode = S_IRUGO;
bmc->device_id_attr.show = device_id_show;
bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
- bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
bmc->revision_attr.attr.name = "revision";
- bmc->revision_attr.attr.owner = THIS_MODULE;
bmc->revision_attr.attr.mode = S_IRUGO;
bmc->revision_attr.show = revision_show;
bmc->firmware_rev_attr.attr.name = "firmware_revision";
- bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
bmc->firmware_rev_attr.attr.mode = S_IRUGO;
bmc->firmware_rev_attr.show = firmware_rev_show;
bmc->version_attr.attr.name = "ipmi_version";
- bmc->version_attr.attr.owner = THIS_MODULE;
bmc->version_attr.attr.mode = S_IRUGO;
bmc->version_attr.show = ipmi_version_show;
bmc->add_dev_support_attr.attr.name = "additional_device_support";
- bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
bmc->add_dev_support_attr.attr.mode = S_IRUGO;
bmc->add_dev_support_attr.show = add_dev_support_show;
bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
- bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
bmc->manufacturer_id_attr.show = manufacturer_id_show;
bmc->product_id_attr.attr.name = "product_id";
- bmc->product_id_attr.attr.owner = THIS_MODULE;
bmc->product_id_attr.attr.mode = S_IRUGO;
bmc->product_id_attr.show = product_id_show;
bmc->guid_attr.attr.name = "guid";
- bmc->guid_attr.attr.owner = THIS_MODULE;
bmc->guid_attr.attr.mode = S_IRUGO;
bmc->guid_attr.show = guid_show;
bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
- bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
@@ -2649,10 +2639,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
return -ENODEV;
}
- intf = kmalloc(sizeof(*intf), GFP_KERNEL);
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf)
return -ENOMEM;
- memset(intf, 0, sizeof(*intf));
intf->ipmi_version_major = ipmi_version_major(device_id);
intf->ipmi_version_minor = ipmi_version_minor(device_id);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e02893b7b300..b86186de7f07 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -679,7 +679,7 @@ static int ipmi_poweroff_init (void)
{
int rv;
- printk ("Copyright (C) 2004 MontaVista Software -"
+ printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -"
" IPMI Powerdown via sys_reboot.\n");
if (poweroff_powercycle)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 78e1b962fe35..4edfdda0cf99 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2857,7 +2857,7 @@ static int try_smi_init(struct smi_info *new_smi)
mutex_unlock(&smi_infos_lock);
- printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
+ printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
return 0;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 761f77740d67..77a7a4a06620 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -171,9 +171,6 @@ static struct pci_driver isicom_driver = {
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
-static DECLARE_COMPLETION(isi_timerdone);
-static char re_schedule = 1;
-
static void isicom_tx(unsigned long _data);
static void isicom_start(struct tty_struct *tty);
@@ -187,7 +184,7 @@ static signed char linuxb_to_isib[] = {
struct isi_board {
unsigned long base;
- unsigned char irq;
+ int irq;
unsigned char port_count;
unsigned short status;
unsigned short port_status; /* each bit for each port */
@@ -227,7 +224,7 @@ static struct isi_port isi_ports[PORT_COUNT];
* it wants to talk.
*/
-static inline int WaitTillCardIsFree(u16 base)
+static inline int WaitTillCardIsFree(unsigned long base)
{
unsigned int count = 0;
unsigned int a = in_atomic(); /* do we run under spinlock? */
@@ -243,17 +240,18 @@ static inline int WaitTillCardIsFree(u16 base)
static int lock_card(struct isi_board *card)
{
- char retries;
unsigned long base = card->base;
+ unsigned int retries, a;
- for (retries = 0; retries < 100; retries++) {
+ for (retries = 0; retries < 10; retries++) {
spin_lock_irqsave(&card->card_lock, card->flags);
- if (inw(base + 0xe) & 0x1) {
- return 1;
- } else {
- spin_unlock_irqrestore(&card->card_lock, card->flags);
- udelay(1000); /* 1ms */
+ for (a = 0; a < 10; a++) {
+ if (inw(base + 0xe) & 0x1)
+ return 1;
+ udelay(10);
}
+ spin_unlock_irqrestore(&card->card_lock, card->flags);
+ msleep(10);
}
printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
card->base);
@@ -261,23 +259,6 @@ static int lock_card(struct isi_board *card)
return 0; /* Failed to acquire the card! */
}
-static int lock_card_at_interrupt(struct isi_board *card)
-{
- unsigned char retries;
- unsigned long base = card->base;
-
- for (retries = 0; retries < 200; retries++) {
- spin_lock_irqsave(&card->card_lock, card->flags);
-
- if (inw(base + 0xe) & 0x1)
- return 1;
- else
- spin_unlock_irqrestore(&card->card_lock, card->flags);
- }
- /* Failing in interrupt is an acceptable event */
- return 0; /* Failed to acquire the card! */
-}
-
static void unlock_card(struct isi_board *card)
{
spin_unlock_irqrestore(&card->card_lock, card->flags);
@@ -415,7 +396,9 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
static void isicom_tx(unsigned long _data)
{
- short count = (BOARD_COUNT-1), card, base;
+ unsigned long flags, base;
+ unsigned int retries;
+ short count = (BOARD_COUNT-1), card;
short txcount, wrd, residue, word_count, cnt;
struct isi_port *port;
struct tty_struct *tty;
@@ -435,32 +418,34 @@ static void isicom_tx(unsigned long _data)
count = isi_card[card].port_count;
port = isi_card[card].ports;
base = isi_card[card].base;
+
+ spin_lock_irqsave(&isi_card[card].card_lock, flags);
+ for (retries = 0; retries < 100; retries++) {
+ if (inw(base + 0xe) & 0x1)
+ break;
+ udelay(2);
+ }
+ if (retries >= 100)
+ goto unlock;
+
for (;count > 0;count--, port++) {
- if (!lock_card_at_interrupt(&isi_card[card]))
- continue;
/* port not active or tx disabled to force flow control */
if (!(port->flags & ASYNC_INITIALIZED) ||
!(port->status & ISI_TXOK))
- unlock_card(&isi_card[card]);
continue;
tty = port->tty;
-
- if (tty == NULL) {
- unlock_card(&isi_card[card]);
+ if (tty == NULL)
continue;
- }
txcount = min_t(short, TX_SIZE, port->xmit_cnt);
- if (txcount <= 0 || tty->stopped || tty->hw_stopped) {
- unlock_card(&isi_card[card]);
+ if (txcount <= 0 || tty->stopped || tty->hw_stopped)
continue;
- }
- if (!(inw(base + 0x02) & (1 << port->channel))) {
- unlock_card(&isi_card[card]);
+
+ if (!(inw(base + 0x02) & (1 << port->channel)))
continue;
- }
+
pr_dbg("txing %d bytes, port%d.\n", txcount,
port->channel + 1);
outw((port->channel << isi_card[card].shift_count) | txcount,
@@ -508,16 +493,12 @@ static void isicom_tx(unsigned long _data)
port->status &= ~ISI_TXOK;
if (port->xmit_cnt <= WAKEUP_CHARS)
tty_wakeup(tty);
- unlock_card(&isi_card[card]);
}
+unlock:
+ spin_unlock_irqrestore(&isi_card[card].card_lock, flags);
/* schedule another tx for hopefully in about 10ms */
sched_again:
- if (!re_schedule) {
- complete(&isi_timerdone);
- return;
- }
-
mod_timer(&tx, jiffies + msecs_to_jiffies(10));
}
@@ -1749,17 +1730,13 @@ static unsigned int card_count;
static int __devinit isicom_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned int ioaddr, signature, index;
+ unsigned int signature, index;
int retval = -EPERM;
- u8 pciirq;
struct isi_board *board = NULL;
if (card_count >= BOARD_COUNT)
goto err;
- ioaddr = pci_resource_start(pdev, 3);
- /* i.e at offset 0x1c in the PCI configuration register space. */
- pciirq = pdev->irq;
dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
/* allot the first empty slot in the array */
@@ -1770,8 +1747,8 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
}
board->index = index;
- board->base = ioaddr;
- board->irq = pciirq;
+ board->base = pci_resource_start(pdev, 3);
+ board->irq = pdev->irq;
card_count++;
pci_set_drvdata(pdev, board);
@@ -1901,9 +1878,7 @@ error:
static void __exit isicom_exit(void)
{
- re_schedule = 0;
-
- wait_for_completion_timeout(&isi_timerdone, HZ);
+ del_timer_sync(&tx);
pci_unregister_driver(&isicom_driver);
tty_unregister_driver(isicom_normal);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 7b279d1de4a2..3c66f402f9d7 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -1753,9 +1753,6 @@ static void stli_settermios(struct tty_struct *tty, struct ktermios *old)
return;
tiosp = tty->termios;
- if ((tiosp->c_cflag == old->c_cflag) &&
- (tiosp->c_iflag == old->c_iflag))
- return;
stli_mkasyport(portp, &aport, tiosp);
stli_cmdwait(brdp, portp, A_SETPORT, &aport, sizeof(asyport_t), 0);
@@ -2166,14 +2163,10 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
cdkhdr_t __iomem *hdrp;
cdkctrl_t __iomem *cp;
unsigned char __iomem *bits;
- unsigned long flags;
-
- spin_lock_irqsave(&brd_lock, flags);
if (test_bit(ST_CMDING, &portp->state)) {
printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n",
(int) cmd);
- spin_unlock_irqrestore(&brd_lock, flags);
return;
}
@@ -2194,7 +2187,6 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
writeb(readb(bits) | portp->portbit, bits);
set_bit(ST_CMDING, &portp->state);
EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
}
static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
@@ -3218,13 +3210,13 @@ static int stli_initecp(struct stlibrd *brdp)
goto err;
}
+ brdp->iosize = ECP_IOSIZE;
+
if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
retval = -EIO;
goto err;
}
- brdp->iosize = ECP_IOSIZE;
-
/*
* Based on the specific board type setup the common vars to access
* and enable shared memory. Set all board specific information now
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 90965b4def5c..2ce0af1bd588 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -24,6 +24,7 @@
* 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
*/
+#include <linux/consolemap.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/tty.h>
@@ -308,10 +309,9 @@ static void applkey(struct vc_data *vc, int key, char mode)
* Many other routines do put_queue, but I think either
* they produce ASCII, or they produce some user-assigned
* string, and in both cases we might assume that it is
- * in utf-8 already. UTF-8 is defined for words of up to 31 bits,
- * but we need only 16 bits here
+ * in utf-8 already.
*/
-static void to_utf8(struct vc_data *vc, ushort c)
+static void to_utf8(struct vc_data *vc, uint c)
{
if (c < 0x80)
/* 0******* */
@@ -320,11 +320,21 @@ static void to_utf8(struct vc_data *vc, ushort c)
/* 110***** 10****** */
put_queue(vc, 0xc0 | (c >> 6));
put_queue(vc, 0x80 | (c & 0x3f));
- } else {
+ } else if (c < 0x10000) {
+ if (c >= 0xD800 && c < 0xE000)
+ return;
+ if (c == 0xFFFF)
+ return;
/* 1110**** 10****** 10****** */
put_queue(vc, 0xe0 | (c >> 12));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
+ } else if (c < 0x110000) {
+ /* 11110*** 10****** 10****** 10****** */
+ put_queue(vc, 0xf0 | (c >> 18));
+ put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
+ put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
+ put_queue(vc, 0x80 | (c & 0x3f));
}
}
@@ -393,7 +403,7 @@ static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
return d;
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, d);
+ to_utf8(vc, conv_8bit_to_uni(d));
else if (d < 0x100)
put_queue(vc, d);
@@ -407,7 +417,7 @@ static void fn_enter(struct vc_data *vc)
{
if (diacr) {
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, diacr);
+ to_utf8(vc, conv_8bit_to_uni(diacr));
else if (diacr < 0x100)
put_queue(vc, diacr);
diacr = 0;
@@ -617,7 +627,7 @@ static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
return;
}
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, value);
+ to_utf8(vc, conv_8bit_to_uni(value));
else if (value < 0x100)
put_queue(vc, value);
}
@@ -775,7 +785,7 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
/* kludge */
if (up_flag && shift_state != old_state && npadch != -1) {
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, npadch & 0xffff);
+ to_utf8(vc, npadch);
else
put_queue(vc, npadch & 0xff);
npadch = -1;
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 57f9115a456c..7ee5d9444926 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -39,14 +39,14 @@
#else
#define DBG(fmt...)
#endif
-int mbcs_major;
+static int mbcs_major;
-LIST_HEAD(soft_list);
+static LIST_HEAD(soft_list);
/*
* file operations
*/
-const struct file_operations mbcs_ops = {
+static const struct file_operations mbcs_ops = {
.open = mbcs_open,
.llseek = mbcs_sram_llseek,
.read = mbcs_sram_read,
@@ -377,7 +377,7 @@ dmaread_exit:
return rv;
}
-int mbcs_open(struct inode *ip, struct file *fp)
+static int mbcs_open(struct inode *ip, struct file *fp)
{
struct mbcs_soft *soft;
int minor;
@@ -394,7 +394,7 @@ int mbcs_open(struct inode *ip, struct file *fp)
return -ENODEV;
}
-ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
+static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
{
struct cx_dev *cx_dev = fp->private_data;
struct mbcs_soft *soft = cx_dev->soft;
@@ -418,7 +418,7 @@ ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t *
return rv;
}
-ssize_t
+static ssize_t
mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
{
struct cx_dev *cx_dev = fp->private_data;
@@ -443,7 +443,7 @@ mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * o
return rv;
}
-loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
+static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
{
loff_t newpos;
@@ -491,7 +491,7 @@ static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
}
-int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
{
struct cx_dev *cx_dev = fp->private_data;
struct mbcs_soft *soft = cx_dev->soft;
@@ -793,7 +793,7 @@ static int mbcs_remove(struct cx_dev *dev)
return 0;
}
-const struct cx_device_id __devinitdata mbcs_id_table[] = {
+static const struct cx_device_id __devinitdata mbcs_id_table[] = {
{
.part_num = MBCS_PART_NUM,
.mfg_num = MBCS_MFG_NUM,
@@ -807,7 +807,7 @@ const struct cx_device_id __devinitdata mbcs_id_table[] = {
MODULE_DEVICE_TABLE(cx, mbcs_id_table);
-struct cx_drv mbcs_driver = {
+static struct cx_drv mbcs_driver = {
.name = DEVICE_NAME,
.id_table = mbcs_id_table,
.probe = mbcs_probe,
@@ -816,12 +816,7 @@ struct cx_drv mbcs_driver = {
static void __exit mbcs_exit(void)
{
- int rv;
-
- rv = unregister_chrdev(mbcs_major, DEVICE_NAME);
- if (rv < 0)
- DBG(KERN_ALERT "Error in unregister_chrdev: %d\n", rv);
-
+ unregister_chrdev(mbcs_major, DEVICE_NAME);
cx_driver_unregister(&mbcs_driver);
}
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
index e7fd47e43257..c9905a3c3353 100644
--- a/drivers/char/mbcs.h
+++ b/drivers/char/mbcs.h
@@ -542,12 +542,12 @@ struct mbcs_soft {
struct semaphore algolock;
};
-extern int mbcs_open(struct inode *ip, struct file *fp);
-extern ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
+static int mbcs_open(struct inode *ip, struct file *fp);
+static ssize_t mbcs_sram_read(struct file *fp, char __user *buf, size_t len,
loff_t * off);
-extern ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
+static ssize_t mbcs_sram_write(struct file *fp, const char __user *buf, size_t len,
loff_t * off);
-extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
-extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
+static loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
+static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
#endif // __MBCS_H__
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 4e6fb9651a16..71c8cd7fa15f 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -67,25 +67,13 @@ extern int pmu_device_init(void);
#ifdef CONFIG_PROC_FS
static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct miscdevice *p;
- loff_t off = 0;
-
mutex_lock(&misc_mtx);
- list_for_each_entry(p, &misc_list, list) {
- if (*pos == off++)
- return p;
- }
- return NULL;
+ return seq_list_start(&misc_list, *pos);
}
static void *misc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct list_head *n = ((struct miscdevice *)v)->list.next;
-
- ++*pos;
-
- return (n != &misc_list) ? list_entry(n, struct miscdevice, list)
- : NULL;
+ return seq_list_next(v, &misc_list, pos);
}
static void misc_seq_stop(struct seq_file *seq, void *v)
@@ -95,7 +83,7 @@ static void misc_seq_stop(struct seq_file *seq, void *v)
static int misc_seq_show(struct seq_file *seq, void *v)
{
- const struct miscdevice *p = v;
+ const struct miscdevice *p = list_entry(v, struct miscdevice, list);
seq_printf(seq, "%3i %s\n", p->minor, p->name ? p->name : "");
return 0;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index e0d35c20c04f..ed76f0a127fd 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1405,7 +1405,6 @@ static int moxaCard;
static struct mon_str moxaLog;
static int moxaFuncTout = HZ / 2;
-static void moxadelay(int);
static void moxafunc(void __iomem *, int, ushort);
static void wait_finish(void __iomem *);
static void low_water_check(void __iomem *);
@@ -2404,10 +2403,10 @@ void MoxaPortSendBreak(int port, int ms100)
ofsAddr = moxa_ports[port].tableAddr;
if (ms100) {
moxafunc(ofsAddr, FC_SendBreak, Magic_code);
- moxadelay(ms100 * (HZ / 10));
+ msleep(ms100 * 10);
} else {
moxafunc(ofsAddr, FC_SendBreak, Magic_code);
- moxadelay(HZ / 4); /* 250 ms */
+ msleep(250);
}
moxafunc(ofsAddr, FC_StopBreak, Magic_code);
}
@@ -2476,18 +2475,6 @@ static int moxa_set_serial_info(struct moxa_port *info,
/*****************************************************************************
* Static local functions: *
*****************************************************************************/
-/*
- * moxadelay - delays a specified number ticks
- */
-static void moxadelay(int tick)
-{
- unsigned long st, et;
-
- st = jiffies;
- et = st + tick;
- while (time_before(jiffies, et));
-}
-
static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
{
@@ -2535,7 +2522,7 @@ static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
return -EFAULT;
baseAddr = moxa_boards[cardno].basemem;
writeb(HW_reset, baseAddr + Control_reg); /* reset */
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
for (i = 0; i < 4096; i++)
writeb(0, baseAddr + i); /* clear fix page */
for (i = 0; i < len; i++)
@@ -2713,7 +2700,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + C218_key) == keycode)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
if (readw(baseAddr + C218_key) != keycode) {
return (-1);
@@ -2725,7 +2712,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + C218_key) == keycode)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
retry++;
} while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
@@ -2736,7 +2723,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code) {
return (-1);
@@ -2746,7 +2733,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
for (i = 0; i < 100; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1); /* delay 10 ms */
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code) {
return (-1);
@@ -2788,7 +2775,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 10; i++) {
if (readw(baseAddr + C320_key) == C320_KeyCode)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + C320_key) != C320_KeyCode)
return (-1);
@@ -2799,7 +2786,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 10; i++) {
if (readw(baseAddr + C320_key) == C320_KeyCode)
break;
- moxadelay(1);
+ msleep(10);
}
retry++;
} while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
@@ -2809,7 +2796,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 600; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code)
return (-100);
@@ -2828,7 +2815,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 500; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code)
return (-102);
@@ -2842,7 +2829,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
for (i = 0; i < 600; i++) {
if (readw(baseAddr + Magic_no) == Magic_code)
break;
- moxadelay(1);
+ msleep(10);
}
if (readw(baseAddr + Magic_no) != Magic_code)
return (-102);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 7ac30612068b..c716ef0dd370 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -265,7 +265,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
vdata->refcnt = ATOMIC_INIT(1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 5953a45d7e96..2aee3fef0416 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -90,8 +90,6 @@
#define UART_MCR_AFE 0x20
#define UART_LSR_SPECIAL 0x1E
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK|\
- IXON|IXOFF))
#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED)
@@ -1729,16 +1727,12 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
struct mxser_struct *info = tty->driver_data;
unsigned long flags;
- if ((tty->termios->c_cflag != old_termios->c_cflag) ||
- (RELEVANT_IFLAG(tty->termios->c_iflag) != RELEVANT_IFLAG(old_termios->c_iflag))) {
+ mxser_change_speed(info, old_termios);
- mxser_change_speed(info, old_termios);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- mxser_start(tty);
- }
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ mxser_start(tty);
}
/* Handle sw stopped */
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c
index 6cde448cd5b2..6a563932ba19 100644
--- a/drivers/char/mxser_new.c
+++ b/drivers/char/mxser_new.c
@@ -72,8 +72,6 @@
#define UART_MCR_AFE 0x20
#define UART_LSR_SPECIAL 0x1E
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK|\
- IXON|IXOFF))
#define C168_ASIC_ID 1
#define C104_ASIC_ID 2
@@ -1560,7 +1558,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
return -EFAULT;
return 0;
case MOXA_ASPP_MON_EXT: {
- int status, p, shiftbit;
+ int p, shiftbit;
unsigned long opmode;
unsigned cflag, iflag;
@@ -1990,18 +1988,14 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
struct mxser_port *info = tty->driver_data;
unsigned long flags;
- if ((tty->termios->c_cflag != old_termios->c_cflag) ||
- (RELEVANT_IFLAG(tty->termios->c_iflag) != RELEVANT_IFLAG(old_termios->c_iflag))) {
-
- spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(info, old_termios);
- spin_unlock_irqrestore(&info->slock, flags);
+ spin_lock_irqsave(&info->slock, flags);
+ mxser_change_speed(info, old_termios);
+ spin_unlock_irqrestore(&info->slock, flags);
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- mxser_start(tty);
- }
+ if ((old_termios->c_cflag & CRTSCTS) &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ mxser_start(tty);
}
/* Handle sw stopped */
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 337a87f86a3b..e8332f305d72 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -400,7 +400,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
/* Send the next block of data to device */
tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
actual = tty->driver->write(tty, tbuf->buf, tbuf->count);
-
+
+ /* rollback was possible and has been done */
+ if (actual == -ERESTARTSYS) {
+ n_hdlc->tbuf = tbuf;
+ break;
+ }
/* if transmit error, throw frame away by */
/* pretending it was accepted by driver */
if (actual < 0)
@@ -780,13 +785,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_wait(filp, &tty->write_wait, wait);
/* set bits for operations that won't block */
- if(n_hdlc->rx_buf_list.head)
+ if (n_hdlc->rx_buf_list.head)
mask |= POLLIN | POLLRDNORM; /* readable */
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= POLLHUP;
- if(tty_hung_up_p(filp))
+ if (tty_hung_up_p(filp))
mask |= POLLHUP;
- if(n_hdlc->tx_free_buf_list.head)
+ if (!tty_is_writelocked(tty) &&
+ n_hdlc->tx_free_buf_list.head)
mask |= POLLOUT | POLLWRNORM; /* writable */
}
return mask;
@@ -861,7 +867,7 @@ static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
spin_lock_irqsave(&list->spinlock,flags);
buf->link=NULL;
- if(list->tail)
+ if (list->tail)
list->tail->link = buf;
else
list->head = buf;
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 14557a4822c0..6b918b80f73e 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1071,8 +1071,6 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
struct r3964_client_info *pClient;
struct r3964_message *pMsg;
struct r3964_client_message theMsg;
- DECLARE_WAITQUEUE(wait, current);
-
int count;
TRACE_L("read()");
@@ -1086,16 +1084,8 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
return -EAGAIN;
}
/* block until there is a message: */
- add_wait_queue(&pInfo->read_wait, &wait);
-repeat:
- __set_current_state(TASK_INTERRUPTIBLE);
- pMsg = remove_msg(pInfo, pClient);
- if (!pMsg && !signal_pending(current)) {
- schedule();
- goto repeat;
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&pInfo->read_wait, &wait);
+ wait_event_interruptible(pInfo->read_wait,
+ (pMsg = remove_msg(pInfo, pClient)));
}
/* If we still haven't got a message, we must have been signalled */
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 154f42203b05..038056911934 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -45,6 +45,8 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/bitops.h>
+#include <linux/audit.h>
+#include <linux/file.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -78,6 +80,13 @@ static inline void free_buf(unsigned char *buf)
free_page((unsigned long) buf);
}
+static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
+ unsigned char __user *ptr)
+{
+ tty_audit_add_data(tty, &x, 1);
+ return put_user(x, ptr);
+}
+
/**
* n_tty_set__room - receive space
* @tty: terminal
@@ -1153,6 +1162,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
if (n) {
retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
n -= retval;
+ tty_audit_add_data(tty, &tty->read_buf[tty->read_tail], n);
spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1);
tty->read_cnt -= n;
@@ -1279,7 +1289,7 @@ do_it_again:
break;
cs = tty->link->ctrl_status;
tty->link->ctrl_status = 0;
- if (put_user(cs, b++)) {
+ if (tty_put_user(tty, cs, b++)) {
retval = -EFAULT;
b--;
break;
@@ -1321,7 +1331,7 @@ do_it_again:
/* Deal with packet mode. */
if (tty->packet && b == buf) {
- if (put_user(TIOCPKT_DATA, b++)) {
+ if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
retval = -EFAULT;
b--;
break;
@@ -1352,15 +1362,17 @@ do_it_again:
spin_unlock_irqrestore(&tty->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
- if (put_user(c, b++)) {
+ if (tty_put_user(tty, c, b++)) {
retval = -EFAULT;
b--;
break;
}
nr--;
}
- if (eol)
+ if (eol) {
+ tty_audit_push(tty);
break;
+ }
}
if (retval)
break;
@@ -1538,7 +1550,8 @@ static unsigned int normal_poll(struct tty_struct * tty, struct file * file, pol
else
tty->minimum_to_wake = 1;
}
- if (tty->driver->chars_in_buffer(tty) < WAKEUP_CHARS &&
+ if (!tty_is_writelocked(tty) &&
+ tty->driver->chars_in_buffer(tty) < WAKEUP_CHARS &&
tty->driver->write_room(tty) > 0)
mask |= POLLOUT | POLLWRNORM;
return mask;
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 204deaa0de80..98dec380af49 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -42,19 +42,12 @@
#define PC 1
#define ATARI 2
-#define COBALT 3
/* select machine configuration */
#if defined(CONFIG_ATARI)
# define MACH ATARI
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and others?? */
-#define MACH PC
-# if defined(CONFIG_COBALT)
-# include <linux/cobalt-nvram.h>
-# define MACH COBALT
-# else
-# define MACH PC
-# endif
+# define MACH PC
#else
# error Cannot build nvram driver for this machine configuration.
#endif
@@ -76,18 +69,6 @@
#endif
-#if MACH == COBALT
-
-#define CHECK_DRIVER_INIT() 1
-
-#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE)
-
-#define mach_check_checksum cobalt_check_checksum
-#define mach_set_checksum cobalt_set_checksum
-#define mach_proc_infos cobalt_proc_infos
-
-#endif
-
#if MACH == ATARI
/* Special parameters for RTC in Atari machines */
@@ -604,177 +585,6 @@ pc_proc_infos(unsigned char *nvram, char *buffer, int *len,
#endif /* MACH == PC */
-#if MACH == COBALT
-
-/* the cobalt CMOS has a wider range of its checksum */
-static int cobalt_check_checksum(void)
-{
- int i;
- unsigned short sum = 0;
- unsigned short expect;
-
- for (i = COBT_CMOS_CKS_START; i <= COBT_CMOS_CKS_END; ++i) {
- if ((i == COBT_CMOS_CHECKSUM) || (i == (COBT_CMOS_CHECKSUM+1)))
- continue;
-
- sum += __nvram_read_byte(i);
- }
- expect = __nvram_read_byte(COBT_CMOS_CHECKSUM) << 8 |
- __nvram_read_byte(COBT_CMOS_CHECKSUM+1);
- return ((sum & 0xffff) == expect);
-}
-
-static void cobalt_set_checksum(void)
-{
- int i;
- unsigned short sum = 0;
-
- for (i = COBT_CMOS_CKS_START; i <= COBT_CMOS_CKS_END; ++i) {
- if ((i == COBT_CMOS_CHECKSUM) || (i == (COBT_CMOS_CHECKSUM+1)))
- continue;
-
- sum += __nvram_read_byte(i);
- }
-
- __nvram_write_byte(sum >> 8, COBT_CMOS_CHECKSUM);
- __nvram_write_byte(sum & 0xff, COBT_CMOS_CHECKSUM+1);
-}
-
-#ifdef CONFIG_PROC_FS
-
-static int cobalt_proc_infos(unsigned char *nvram, char *buffer, int *len,
- off_t *begin, off_t offset, int size)
-{
- int i;
- unsigned int checksum;
- unsigned int flags;
- char sernum[14];
- char *key = "cNoEbTaWlOtR!";
- unsigned char bto_csum;
-
- spin_lock_irq(&rtc_lock);
- checksum = __nvram_check_checksum();
- spin_unlock_irq(&rtc_lock);
-
- PRINT_PROC("Checksum status: %svalid\n", checksum ? "" : "not ");
-
- flags = nvram[COBT_CMOS_FLAG_BYTE_0] << 8
- | nvram[COBT_CMOS_FLAG_BYTE_1];
-
- PRINT_PROC("Console: %s\n",
- flags & COBT_CMOS_CONSOLE_FLAG ? "on": "off");
-
- PRINT_PROC("Firmware Debug Messages: %s\n",
- flags & COBT_CMOS_DEBUG_FLAG ? "on": "off");
-
- PRINT_PROC("Auto Prompt: %s\n",
- flags & COBT_CMOS_AUTO_PROMPT_FLAG ? "on": "off");
-
- PRINT_PROC("Shutdown Status: %s\n",
- flags & COBT_CMOS_CLEAN_BOOT_FLAG ? "clean": "dirty");
-
- PRINT_PROC("Hardware Probe: %s\n",
- flags & COBT_CMOS_HW_NOPROBE_FLAG ? "partial": "full");
-
- PRINT_PROC("System Fault: %sdetected\n",
- flags & COBT_CMOS_SYSFAULT_FLAG ? "": "not ");
-
- PRINT_PROC("Panic on OOPS: %s\n",
- flags & COBT_CMOS_OOPSPANIC_FLAG ? "yes": "no");
-
- PRINT_PROC("Delayed Cache Initialization: %s\n",
- flags & COBT_CMOS_DELAY_CACHE_FLAG ? "yes": "no");
-
- PRINT_PROC("Show Logo at Boot: %s\n",
- flags & COBT_CMOS_NOLOGO_FLAG ? "no": "yes");
-
- PRINT_PROC("Boot Method: ");
- switch (nvram[COBT_CMOS_BOOT_METHOD]) {
- case COBT_CMOS_BOOT_METHOD_DISK:
- PRINT_PROC("disk\n");
- break;
-
- case COBT_CMOS_BOOT_METHOD_ROM:
- PRINT_PROC("rom\n");
- break;
-
- case COBT_CMOS_BOOT_METHOD_NET:
- PRINT_PROC("net\n");
- break;
-
- default:
- PRINT_PROC("unknown\n");
- break;
- }
-
- PRINT_PROC("Primary Boot Device: %d:%d\n",
- nvram[COBT_CMOS_BOOT_DEV0_MAJ],
- nvram[COBT_CMOS_BOOT_DEV0_MIN] );
- PRINT_PROC("Secondary Boot Device: %d:%d\n",
- nvram[COBT_CMOS_BOOT_DEV1_MAJ],
- nvram[COBT_CMOS_BOOT_DEV1_MIN] );
- PRINT_PROC("Tertiary Boot Device: %d:%d\n",
- nvram[COBT_CMOS_BOOT_DEV2_MAJ],
- nvram[COBT_CMOS_BOOT_DEV2_MIN] );
-
- PRINT_PROC("Uptime: %d\n",
- nvram[COBT_CMOS_UPTIME_0] << 24 |
- nvram[COBT_CMOS_UPTIME_1] << 16 |
- nvram[COBT_CMOS_UPTIME_2] << 8 |
- nvram[COBT_CMOS_UPTIME_3]);
-
- PRINT_PROC("Boot Count: %d\n",
- nvram[COBT_CMOS_BOOTCOUNT_0] << 24 |
- nvram[COBT_CMOS_BOOTCOUNT_1] << 16 |
- nvram[COBT_CMOS_BOOTCOUNT_2] << 8 |
- nvram[COBT_CMOS_BOOTCOUNT_3]);
-
- /* 13 bytes of serial num */
- for (i=0 ; i<13 ; i++) {
- sernum[i] = nvram[COBT_CMOS_SYS_SERNUM_0 + i];
- }
- sernum[13] = '\0';
-
- checksum = 0;
- for (i=0 ; i<13 ; i++) {
- checksum += sernum[i] ^ key[i];
- }
- checksum = ((checksum & 0x7f) ^ (0xd6)) & 0xff;
-
- PRINT_PROC("Serial Number: %s", sernum);
- if (checksum != nvram[COBT_CMOS_SYS_SERNUM_CSUM]) {
- PRINT_PROC(" (invalid checksum)");
- }
- PRINT_PROC("\n");
-
- PRINT_PROC("Rom Revison: %d.%d.%d\n", nvram[COBT_CMOS_ROM_REV_MAJ],
- nvram[COBT_CMOS_ROM_REV_MIN], nvram[COBT_CMOS_ROM_REV_REV]);
-
- PRINT_PROC("BTO Server: %d.%d.%d.%d", nvram[COBT_CMOS_BTO_IP_0],
- nvram[COBT_CMOS_BTO_IP_1], nvram[COBT_CMOS_BTO_IP_2],
- nvram[COBT_CMOS_BTO_IP_3]);
- bto_csum = nvram[COBT_CMOS_BTO_IP_0] + nvram[COBT_CMOS_BTO_IP_1]
- + nvram[COBT_CMOS_BTO_IP_2] + nvram[COBT_CMOS_BTO_IP_3];
- if (bto_csum != nvram[COBT_CMOS_BTO_IP_CSUM]) {
- PRINT_PROC(" (invalid checksum)");
- }
- PRINT_PROC("\n");
-
- if (flags & COBT_CMOS_VERSION_FLAG
- && nvram[COBT_CMOS_VERSION] >= COBT_CMOS_VER_BTOCODE) {
- PRINT_PROC("BTO Code: 0x%x\n",
- nvram[COBT_CMOS_BTO_CODE_0] << 24 |
- nvram[COBT_CMOS_BTO_CODE_1] << 16 |
- nvram[COBT_CMOS_BTO_CODE_2] << 8 |
- nvram[COBT_CMOS_BTO_CODE_3]);
- }
-
- return 1;
-}
-#endif /* CONFIG_PROC_FS */
-
-#endif /* MACH == COBALT */
-
#if MACH == ATARI
static int
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 13808f6083a0..2b889317461e 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -540,13 +540,12 @@ static int mgslpc_probe(struct pcmcia_device *link)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("mgslpc_attach\n");
- info = kmalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+ info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
if (!info) {
printk("Error can't allocate device instance data\n");
return -ENOMEM;
}
- memset(info, 0, sizeof(MGSLPC_INFO));
info->magic = MGSLPC_MAGIC;
INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
new file mode 100644
index 000000000000..79b6f461be75
--- /dev/null
+++ b/drivers/char/ps3flash.c
@@ -0,0 +1,440 @@
+/*
+ * PS3 FLASH ROM Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+#define DEVICE_NAME "ps3flash"
+
+#define FLASH_BLOCK_SIZE (256*1024)
+
+
+struct ps3flash_private {
+ struct mutex mutex; /* Bounce buffer mutex */
+};
+
+static struct ps3_storage_device *ps3flash_dev;
+
+static ssize_t ps3flash_read_write_sectors(struct ps3_storage_device *dev,
+ u64 lpar, u64 start_sector,
+ u64 sectors, int write)
+{
+ u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors,
+ write);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+ __LINE__, write ? "write" : "read", res);
+ return -EIO;
+ }
+ return sectors;
+}
+
+static ssize_t ps3flash_read_sectors(struct ps3_storage_device *dev,
+ u64 start_sector, u64 sectors,
+ unsigned int sector_offset)
+{
+ u64 max_sectors, lpar;
+
+ max_sectors = dev->bounce_size / dev->blk_size;
+ if (sectors > max_sectors) {
+ dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %lu\n",
+ __func__, __LINE__, max_sectors);
+ sectors = max_sectors;
+ }
+
+ lpar = dev->bounce_lpar + sector_offset * dev->blk_size;
+ return ps3flash_read_write_sectors(dev, lpar, start_sector, sectors,
+ 0);
+}
+
+static ssize_t ps3flash_write_chunk(struct ps3_storage_device *dev,
+ u64 start_sector)
+{
+ u64 sectors = dev->bounce_size / dev->blk_size;
+ return ps3flash_read_write_sectors(dev, dev->bounce_lpar, start_sector,
+ sectors, 1);
+}
+
+static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ loff_t res;
+
+ mutex_lock(&file->f_mapping->host->i_mutex);
+ switch (origin) {
+ case 1:
+ offset += file->f_pos;
+ break;
+ case 2:
+ offset += dev->regions[dev->region_idx].size*dev->blk_size;
+ break;
+ }
+ if (offset < 0) {
+ res = -EINVAL;
+ goto out;
+ }
+
+ file->f_pos = offset;
+ res = file->f_pos;
+
+out:
+ mutex_unlock(&file->f_mapping->host->i_mutex);
+ return res;
+}
+
+static ssize_t ps3flash_read(struct file *file, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ struct ps3flash_private *priv = dev->sbd.core.driver_data;
+ u64 size, start_sector, end_sector, offset;
+ ssize_t sectors_read;
+ size_t remaining, n;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: Reading %zu bytes at position %lld to user 0x%p\n",
+ __func__, __LINE__, count, *pos, buf);
+
+ size = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (*pos >= size || !count)
+ return 0;
+
+ if (*pos + count > size) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u Truncating count from %zu to %llu\n", __func__,
+ __LINE__, count, size - *pos);
+ count = size - *pos;
+ }
+
+ start_sector = *pos / dev->blk_size;
+ offset = *pos % dev->blk_size;
+ end_sector = DIV_ROUND_UP(*pos + count, dev->blk_size);
+
+ remaining = count;
+ do {
+ mutex_lock(&priv->mutex);
+
+ sectors_read = ps3flash_read_sectors(dev, start_sector,
+ end_sector-start_sector,
+ 0);
+ if (sectors_read < 0) {
+ mutex_unlock(&priv->mutex);
+ goto fail;
+ }
+
+ n = min(remaining, sectors_read*dev->blk_size-offset);
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from 0x%p to user 0x%p\n",
+ __func__, __LINE__, n, dev->bounce_buf+offset, buf);
+ if (copy_to_user(buf, dev->bounce_buf+offset, n)) {
+ mutex_unlock(&priv->mutex);
+ sectors_read = -EFAULT;
+ goto fail;
+ }
+
+ mutex_unlock(&priv->mutex);
+
+ *pos += n;
+ buf += n;
+ remaining -= n;
+ start_sector += sectors_read;
+ offset = 0;
+ } while (remaining > 0);
+
+ return count;
+
+fail:
+ return sectors_read;
+}
+
+static ssize_t ps3flash_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ps3_storage_device *dev = ps3flash_dev;
+ struct ps3flash_private *priv = dev->sbd.core.driver_data;
+ u64 size, chunk_sectors, start_write_sector, end_write_sector,
+ end_read_sector, start_read_sector, head, tail, offset;
+ ssize_t res;
+ size_t remaining, n;
+ unsigned int sec_off;
+
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: Writing %zu bytes at position %lld from user 0x%p\n",
+ __func__, __LINE__, count, *pos, buf);
+
+ size = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (*pos >= size || !count)
+ return 0;
+
+ if (*pos + count > size) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u Truncating count from %zu to %llu\n", __func__,
+ __LINE__, count, size - *pos);
+ count = size - *pos;
+ }
+
+ chunk_sectors = dev->bounce_size / dev->blk_size;
+
+ start_write_sector = *pos / dev->bounce_size * chunk_sectors;
+ offset = *pos % dev->bounce_size;
+ end_write_sector = DIV_ROUND_UP(*pos + count, dev->bounce_size) *
+ chunk_sectors;
+
+ end_read_sector = DIV_ROUND_UP(*pos, dev->blk_size);
+ start_read_sector = (*pos + count) / dev->blk_size;
+
+ /*
+ * As we have to write in 256 KiB chunks, while we can read in blk_size
+ * (usually 512 bytes) chunks, we perform the following steps:
+ * 1. Read from start_write_sector to end_read_sector ("head")
+ * 2. Read from start_read_sector to end_write_sector ("tail")
+ * 3. Copy data to buffer
+ * 4. Write from start_write_sector to end_write_sector
+ * All of this is complicated by using only one 256 KiB bounce buffer.
+ */
+
+ head = end_read_sector - start_write_sector;
+ tail = end_write_sector - start_read_sector;
+
+ remaining = count;
+ do {
+ mutex_lock(&priv->mutex);
+
+ if (end_read_sector >= start_read_sector) {
+ /* Merge head and tail */
+ dev_dbg(&dev->sbd.core,
+ "Merged head and tail: %lu sectors at %lu\n",
+ chunk_sectors, start_write_sector);
+ res = ps3flash_read_sectors(dev, start_write_sector,
+ chunk_sectors, 0);
+ if (res < 0)
+ goto fail;
+ } else {
+ if (head) {
+ /* Read head */
+ dev_dbg(&dev->sbd.core,
+ "head: %lu sectors at %lu\n", head,
+ start_write_sector);
+ res = ps3flash_read_sectors(dev,
+ start_write_sector,
+ head, 0);
+ if (res < 0)
+ goto fail;
+ }
+ if (start_read_sector <
+ start_write_sector+chunk_sectors) {
+ /* Read tail */
+ dev_dbg(&dev->sbd.core,
+ "tail: %lu sectors at %lu\n", tail,
+ start_read_sector);
+ sec_off = start_read_sector-start_write_sector;
+ res = ps3flash_read_sectors(dev,
+ start_read_sector,
+ tail, sec_off);
+ if (res < 0)
+ goto fail;
+ }
+ }
+
+ n = min(remaining, dev->bounce_size-offset);
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: copy %lu bytes from user 0x%p to 0x%p\n",
+ __func__, __LINE__, n, buf, dev->bounce_buf+offset);
+ if (copy_from_user(dev->bounce_buf+offset, buf, n)) {
+ res = -EFAULT;
+ goto fail;
+ }
+
+ res = ps3flash_write_chunk(dev, start_write_sector);
+ if (res < 0)
+ goto fail;
+
+ mutex_unlock(&priv->mutex);
+
+ *pos += n;
+ buf += n;
+ remaining -= n;
+ start_write_sector += chunk_sectors;
+ head = 0;
+ offset = 0;
+ } while (remaining > 0);
+
+ return count;
+
+fail:
+ mutex_unlock(&priv->mutex);
+ return res;
+}
+
+
+static irqreturn_t ps3flash_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ int res;
+ u64 tag, status;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %lx, expected %lx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+ __func__, __LINE__, res, status);
+ } else {
+ dev->lv1_status = status;
+ complete(&dev->done);
+ }
+ return IRQ_HANDLED;
+}
+
+
+static const struct file_operations ps3flash_fops = {
+ .owner = THIS_MODULE,
+ .llseek = ps3flash_llseek,
+ .read = ps3flash_read,
+ .write = ps3flash_write,
+};
+
+static struct miscdevice ps3flash_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DEVICE_NAME,
+ .fops = &ps3flash_fops,
+};
+
+static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct ps3flash_private *priv;
+ int error;
+ unsigned long tmp;
+
+ tmp = dev->regions[dev->region_idx].start*dev->blk_size;
+ if (tmp % FLASH_BLOCK_SIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u region start %lu is not aligned\n", __func__,
+ __LINE__, tmp);
+ return -EINVAL;
+ }
+ tmp = dev->regions[dev->region_idx].size*dev->blk_size;
+ if (tmp % FLASH_BLOCK_SIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u region size %lu is not aligned\n", __func__,
+ __LINE__, tmp);
+ return -EINVAL;
+ }
+
+ /* use static buffer, kmalloc cannot allocate 256 KiB */
+ if (!ps3flash_bounce_buffer.address)
+ return -ENODEV;
+
+ if (ps3flash_dev) {
+ dev_err(&dev->sbd.core,
+ "Only one FLASH device is supported\n");
+ return -EBUSY;
+ }
+
+ ps3flash_dev = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dev->sbd.core.driver_data = priv;
+ mutex_init(&priv->mutex);
+
+ dev->bounce_size = ps3flash_bounce_buffer.size;
+ dev->bounce_buf = ps3flash_bounce_buffer.address;
+
+ error = ps3stor_setup(dev, ps3flash_interrupt);
+ if (error)
+ goto fail_free_priv;
+
+ ps3flash_misc.parent = &dev->sbd.core;
+ error = misc_register(&ps3flash_misc);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n",
+ __func__, __LINE__, error);
+ goto fail_teardown;
+ }
+
+ dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n",
+ __func__, __LINE__, ps3flash_misc.minor);
+ return 0;
+
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_priv:
+ kfree(priv);
+ dev->sbd.core.driver_data = NULL;
+fail:
+ ps3flash_dev = NULL;
+ return error;
+}
+
+static int ps3flash_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+
+ misc_deregister(&ps3flash_misc);
+ ps3stor_teardown(dev);
+ kfree(dev->sbd.core.driver_data);
+ dev->sbd.core.driver_data = NULL;
+ ps3flash_dev = NULL;
+ return 0;
+}
+
+
+static struct ps3_system_bus_driver ps3flash = {
+ .match_id = PS3_MATCH_ID_STOR_FLASH,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3flash_probe,
+ .remove = ps3flash_remove,
+ .shutdown = ps3flash_remove,
+};
+
+
+static int __init ps3flash_init(void)
+{
+ return ps3_system_bus_driver_register(&ps3flash);
+}
+
+static void __exit ps3flash_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3flash);
+}
+
+module_init(ps3flash_init);
+module_exit(ps3flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7f5271272f91..397c714cf2ba 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -693,9 +693,14 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
- int bytes = max_t(int, random_read_wakeup_thresh / 8,
- min_t(int, nbytes, sizeof(tmp)));
+ /* If we're limited, always leave two wakeup worth's BITS */
int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+ int bytes = nbytes;
+
+ /* pull at least as many as BYTES as wakeup BITS */
+ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* but never more than the buffer size */
+ bytes = min_t(int, bytes, sizeof(tmp));
DEBUG_ENT("going to reseed %s with %d bits "
"(%d of %d requested)\n",
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 294e9cb0c449..0ce96670f979 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -803,9 +803,7 @@ static void *ckmalloc(int size)
{
void *p;
- p = kmalloc(size, GFP_KERNEL);
- if (p)
- memset(p, 0, size);
+ p = kzalloc(size, GFP_KERNEL);
return p;
}
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 8cc60b693460..7321d002c34f 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -556,9 +556,7 @@ struct CmdBlk *RIOGetCmdBlk(void)
{
struct CmdBlk *CmdBlkP;
- CmdBlkP = kmalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
- if (CmdBlkP)
- memset(CmdBlkP, 0, sizeof(struct CmdBlk));
+ CmdBlkP = kzalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
return CmdBlkP;
}
diff --git a/drivers/char/rio/riotable.c b/drivers/char/rio/riotable.c
index 7e988357326e..991119c9f473 100644
--- a/drivers/char/rio/riotable.c
+++ b/drivers/char/rio/riotable.c
@@ -863,8 +863,7 @@ int RIOReMapPorts(struct rio_info *p, struct Host *HostP, struct Map *HostMapP)
if (PortP->TxRingBuffer)
memset(PortP->TxRingBuffer, 0, p->RIOBufferSize);
else if (p->RIOBufferSize) {
- PortP->TxRingBuffer = kmalloc(p->RIOBufferSize, GFP_KERNEL);
- memset(PortP->TxRingBuffer, 0, p->RIOBufferSize);
+ PortP->TxRingBuffer = kzalloc(p->RIOBufferSize, GFP_KERNEL);
}
PortP->TxBufferOut = 0;
PortP->TxBufferIn = 0;
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3494e3fc44bf..b37e626f4faa 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -213,14 +213,6 @@ static inline void rc_release_io_range(struct riscom_board * const bp)
release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
}
-/* Must be called with enabled interrupts */
-static inline void rc_long_delay(unsigned long delay)
-{
- unsigned long i;
-
- for (i = jiffies + delay; time_after(i,jiffies); ) ;
-}
-
/* Reset and setup CD180 chip */
static void __init rc_init_CD180(struct riscom_board const * bp)
{
@@ -231,7 +223,7 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
rc_wait_CCR(bp); /* Wait for CCR ready */
rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
sti();
- rc_long_delay(HZ/20); /* Delay 0.05 sec */
+ msleep(50); /* Delay 0.05 sec */
cli();
rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
rc_out(bp, CD180_GICR, 0); /* Clear all bits */
@@ -280,7 +272,7 @@ static int __init rc_probe(struct riscom_board *bp)
rc_wait_CCR(bp);
rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */
rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */
- rc_long_delay(HZ/20);
+ msleep(50);
irqs = probe_irq_off(irqs);
val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */
val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index a3fd7e7ba5a9..56cbba7b6ec0 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -635,12 +635,11 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
ctlp = sCtlNumToCtlPtr(board);
/* Get a r_port struct for the port, fill it in and save it globally, indexed by line number */
- info = kmalloc(sizeof (struct r_port), GFP_KERNEL);
+ info = kzalloc(sizeof (struct r_port), GFP_KERNEL);
if (!info) {
printk(KERN_INFO "Couldn't allocate info struct for line #%d\n", line);
return;
}
- memset(info, 0, sizeof (struct r_port));
info->magic = RPORT_MAGIC;
info->line = line;
@@ -1702,7 +1701,8 @@ static int rp_write(struct tty_struct *tty,
if (count <= 0 || rocket_paranoia_check(info, "rp_write"))
return 0;
- mutex_lock_interruptible(&info->write_mtx);
+ if (mutex_lock_interruptible(&info->write_mtx))
+ return -ERESTARTSYS;
#ifdef ROCKET_DEBUG_WRITE
printk(KERN_INFO "rp_write %d chars...", count);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 20380a2c4dee..ec6b65ec69ea 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -82,16 +82,13 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-#if defined(__i386__)
+#ifdef CONFIG_X86
#include <asm/hpet.h>
#endif
-#ifdef __sparc__
+#ifdef CONFIG_SPARC32
#include <linux/pci.h>
#include <asm/ebus.h>
-#ifdef __sparc_v9__
-#include <asm/isa.h>
-#endif
static unsigned long rtc_port;
static int rtc_irq = PCI_IRQ_NONE;
@@ -930,13 +927,9 @@ static int __init rtc_init(void)
unsigned int year, ctrl;
char *guess = NULL;
#endif
-#ifdef __sparc__
+#ifdef CONFIG_SPARC32
struct linux_ebus *ebus;
struct linux_ebus_device *edev;
-#ifdef __sparc_v9__
- struct sparc_isa_bridge *isa_br;
- struct sparc_isa_device *isa_dev;
-#endif
#else
void *r;
#ifdef RTC_IRQ
@@ -944,7 +937,7 @@ static int __init rtc_init(void)
#endif
#endif
-#ifdef __sparc__
+#ifdef CONFIG_SPARC32
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
if(strcmp(edev->prom_node->name, "rtc") == 0) {
@@ -954,17 +947,6 @@ static int __init rtc_init(void)
}
}
}
-#ifdef __sparc_v9__
- for_each_isa(isa_br) {
- for_each_isadev(isa_dev, isa_br) {
- if (strcmp(isa_dev->prom_node->name, "rtc") == 0) {
- rtc_port = isa_dev->resource.start;
- rtc_irq = isa_dev->irq;
- goto found;
- }
- }
- }
-#endif
rtc_has_irq = 0;
printk(KERN_ERR "rtc_init: no PC rtc found\n");
return -EIO;
@@ -1020,7 +1002,7 @@ no_irq:
#endif
-#endif /* __sparc__ vs. others */
+#endif /* CONFIG_SPARC32 vs. others */
if (misc_register(&rtc_dev)) {
#ifdef RTC_IRQ
@@ -1105,7 +1087,7 @@ static void __exit rtc_exit (void)
remove_proc_entry ("driver/rtc", NULL);
misc_deregister(&rtc_dev);
-#ifdef __sparc__
+#ifdef CONFIG_SPARC32
if (rtc_has_irq)
free_irq (rtc_irq, &rtc_port);
#else
@@ -1117,7 +1099,7 @@ static void __exit rtc_exit (void)
if (rtc_has_irq)
free_irq (RTC_IRQ, NULL);
#endif
-#endif /* __sparc__ */
+#endif /* CONFIG_SPARC32 */
}
module_init(rtc_init);
@@ -1159,7 +1141,8 @@ static void rtc_dropped_irq(unsigned long data)
spin_unlock_irq(&rtc_lock);
- printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq);
+ if (printk_ratelimit())
+ printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq);
/* Now we have new data */
wake_up_interruptible(&rtc_wait);
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index a69f094d1ed3..d63f5ccc29e6 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -20,6 +20,7 @@
#include <asm/uaccess.h>
+#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/consolemap.h>
#include <linux/selection.h>
@@ -34,6 +35,7 @@ extern void poke_blanked_console(void);
/* Variables for selection control. */
/* Use a dynamic buffer, instead of static (Dec 1994) */
struct vc_data *sel_cons; /* must not be deallocated */
+static int use_unicode;
static volatile int sel_start = -1; /* cleared by clear_selection */
static int sel_end;
static int sel_buffer_lth;
@@ -54,10 +56,11 @@ static inline void highlight_pointer(const int where)
complement_pos(sel_cons, where);
}
-static unsigned char
+static u16
sel_pos(int n)
{
- return inverse_translate(sel_cons, screen_glyph(sel_cons, n));
+ return inverse_translate(sel_cons, screen_glyph(sel_cons, n),
+ use_unicode);
}
/* remove the current selection highlight, if any,
@@ -86,8 +89,8 @@ static u32 inwordLut[8]={
0xFF7FFFFF /* latin-1 accented letters, not division sign */
};
-static inline int inword(const unsigned char c) {
- return ( inwordLut[c>>5] >> (c & 0x1F) ) & 1;
+static inline int inword(const u16 c) {
+ return c > 0xff || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1);
}
/* set inwordLut contents. Invoked by ioctl(). */
@@ -108,13 +111,36 @@ static inline unsigned short limit(const unsigned short v, const unsigned short
return (v > u) ? u : v;
}
+/* stores the char in UTF8 and returns the number of bytes used (1-3) */
+static int store_utf8(u16 c, char *p)
+{
+ if (c < 0x80) {
+ /* 0******* */
+ p[0] = c;
+ return 1;
+ } else if (c < 0x800) {
+ /* 110***** 10****** */
+ p[0] = 0xc0 | (c >> 6);
+ p[1] = 0x80 | (c & 0x3f);
+ return 2;
+ } else {
+ /* 1110**** 10****** 10****** */
+ p[0] = 0xe0 | (c >> 12);
+ p[1] = 0x80 | ((c >> 6) & 0x3f);
+ p[2] = 0x80 | (c & 0x3f);
+ return 3;
+ }
+}
+
/* set the current selection. Invoked by ioctl() or by kernel code. */
int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int sel_mode, new_sel_start, new_sel_end, spc;
char *bp, *obp;
- int i, ps, pe;
+ int i, ps, pe, multiplier;
+ u16 c;
+ struct kbd_struct *kbd = kbd_table + fg_console;
poke_blanked_console();
@@ -158,6 +184,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
clear_selection();
sel_cons = vc_cons[fg_console].d;
}
+ use_unicode = kbd && kbd->kbdmode == VC_UNICODE;
switch (sel_mode)
{
@@ -240,7 +267,8 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
sel_end = new_sel_end;
/* Allocate a new buffer before freeing the old one ... */
- bp = kmalloc((sel_end-sel_start)/2+1, GFP_KERNEL);
+ multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */
+ bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL);
if (!bp) {
printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
@@ -251,8 +279,12 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
obp = bp;
for (i = sel_start; i <= sel_end; i += 2) {
- *bp = sel_pos(i);
- if (!isspace(*bp++))
+ c = sel_pos(i);
+ if (use_unicode)
+ bp += store_utf8(c, bp);
+ else
+ *bp++ = c;
+ if (!isspace(c))
obp = bp;
if (! ((i + 2) % vc->vc_size_row)) {
/* strip trailing blanks from line and add newline,
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index c585b4738f86..f1497cecffd8 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -2573,16 +2573,10 @@ static struct tty_driver *serial167_console_device(struct console *c,
return cy_serial_driver;
}
-static int __init serial167_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
static struct console sercons = {
.name = "ttyS",
.write = serial167_console_write,
.device = serial167_console_device,
- .setup = serial167_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 3ef593a9015f..73037a4d3c50 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -885,53 +885,6 @@ found:
return IRQ_HANDLED;
}
-/* External camera command (exported to the motion eye v4l driver) */
-int sonypi_camera_command(int command, u8 value)
-{
- if (!camera)
- return -EIO;
-
- mutex_lock(&sonypi_device.lock);
-
- switch (command) {
- case SONYPI_COMMAND_SETCAMERA:
- if (value)
- sonypi_camera_on();
- else
- sonypi_camera_off();
- break;
- case SONYPI_COMMAND_SETCAMERABRIGHTNESS:
- sonypi_set(SONYPI_CAMERA_BRIGHTNESS, value);
- break;
- case SONYPI_COMMAND_SETCAMERACONTRAST:
- sonypi_set(SONYPI_CAMERA_CONTRAST, value);
- break;
- case SONYPI_COMMAND_SETCAMERAHUE:
- sonypi_set(SONYPI_CAMERA_HUE, value);
- break;
- case SONYPI_COMMAND_SETCAMERACOLOR:
- sonypi_set(SONYPI_CAMERA_COLOR, value);
- break;
- case SONYPI_COMMAND_SETCAMERASHARPNESS:
- sonypi_set(SONYPI_CAMERA_SHARPNESS, value);
- break;
- case SONYPI_COMMAND_SETCAMERAPICTURE:
- sonypi_set(SONYPI_CAMERA_PICTURE, value);
- break;
- case SONYPI_COMMAND_SETCAMERAAGC:
- sonypi_set(SONYPI_CAMERA_AGC, value);
- break;
- default:
- printk(KERN_ERR "sonypi: sonypi_camera_command invalid: %d\n",
- command);
- break;
- }
- mutex_unlock(&sonypi_device.lock);
- return 0;
-}
-
-EXPORT_SYMBOL(sonypi_camera_command);
-
static int sonypi_misc_fasync(int fd, struct file *filp, int on)
{
int retval;
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index baf7234b6e66..455855631aef 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -345,18 +345,6 @@ static inline void sx_release_io_range(struct specialix_board * bp)
}
-/* Must be called with enabled interrupts */
-/* Ugly. Very ugly. Don't use this for anything else than initialization
- code */
-static inline void sx_long_delay(unsigned long delay)
-{
- unsigned long i;
-
- for (i = jiffies + delay; time_after(i, jiffies); ) ;
-}
-
-
-
/* Set the IRQ using the RTS lines that run to the PAL on the board.... */
static int sx_set_irq ( struct specialix_board *bp)
{
@@ -397,7 +385,7 @@ static int sx_init_CD186x(struct specialix_board * bp)
spin_lock_irqsave(&bp->lock, flags);
sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */
spin_unlock_irqrestore(&bp->lock, flags);
- sx_long_delay(HZ/20); /* Delay 0.05 sec */
+ msleep(50); /* Delay 0.05 sec */
spin_lock_irqsave(&bp->lock, flags);
sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */
sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */
@@ -533,7 +521,7 @@ static int sx_probe(struct specialix_board *bp)
sx_wait_CCR(bp);
sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */
sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */
- sx_long_delay(HZ/20);
+ msleep(50);
irqs = probe_irq_off(irqs);
dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR));
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 8c73ccb8830f..4a80b2f864e0 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1788,7 +1788,6 @@ static void stl_offintr(struct work_struct *work)
if (tty == NULL)
return;
- lock_kernel();
if (test_bit(ASYI_TXLOW, &portp->istate))
tty_wakeup(tty);
@@ -1802,7 +1801,6 @@ static void stl_offintr(struct work_struct *work)
if (portp->flags & ASYNC_CHECK_CD)
tty_hangup(tty); /* FIXME: module removal race here - AKPM */
}
- unlock_kernel();
}
/*****************************************************************************/
@@ -2357,9 +2355,6 @@ static int __devinit stl_pciprobe(struct pci_dev *pdev,
if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
goto err;
- dev_info(&pdev->dev, "please, report this to LKML: %x/%x/%x\n",
- pdev->vendor, pdev->device, pdev->class);
-
retval = pci_enable_device(pdev);
if (retval)
goto err;
@@ -4800,7 +4795,6 @@ static void __exit stallion_module_exit(void)
{
struct stlbrd *brdp;
unsigned int i, j;
- int retval;
pr_debug("cleanup_module()\n");
@@ -4823,9 +4817,7 @@ static void __exit stallion_module_exit(void)
for (i = 0; i < 4; i++)
class_device_destroy(stallion_class, MKDEV(STL_SIOMEMMAJOR, i));
- if ((retval = unregister_chrdev(STL_SIOMEMMAJOR, "staliomem")))
- printk("STALLION: failed to un-register serial memory device, "
- "errno=%d\n", -retval);
+ unregister_chrdev(STL_SIOMEMMAJOR, "staliomem");
class_destroy(stallion_class);
pci_unregister_driver(&stl_pcidriver);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index f02a0795983f..fdc256b380b8 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -160,8 +160,6 @@ typedef struct _DMABUFFERENTRY
#define IO_PIN_SHUTDOWN_LIMIT 100
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
struct _input_signal_events {
int ri_up;
int ri_down;
@@ -3064,12 +3062,6 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio
printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
tty->driver->name );
- /* just return if nothing has changed */
- if ((tty->termios->c_cflag == old_termios->c_cflag)
- && (RELEVANT_IFLAG(tty->termios->c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
mgsl_change_params(info);
/* Handle transition to B0 status */
@@ -4332,13 +4324,12 @@ static struct mgsl_struct* mgsl_allocate_device(void)
{
struct mgsl_struct *info;
- info = kmalloc(sizeof(struct mgsl_struct),
+ info = kzalloc(sizeof(struct mgsl_struct),
GFP_KERNEL);
if (!info) {
printk("Error can't allocate device instance data\n");
} else {
- memset(info, 0, sizeof(struct mgsl_struct));
info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, mgsl_bh_handler);
info->max_frame_size = 4096;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 02b49bc00028..372a37e25620 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -144,8 +144,6 @@ MODULE_PARM_DESC(dosyncppp, "Enable synchronous net device, 0=disable 1=enable")
/*
* tty support and callbacks
*/
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
static struct tty_driver *serial_driver;
static int open(struct tty_struct *tty, struct file * filp);
@@ -823,12 +821,6 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
DBGINFO(("%s set_termios\n", tty->driver->name));
- /* just return if nothing has changed */
- if ((tty->termios->c_cflag == old_termios->c_cflag)
- && (RELEVANT_IFLAG(tty->termios->c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
change_params(info);
/* Handle transition to B0 status */
@@ -3422,13 +3414,12 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
{
struct slgt_info *info;
- info = kmalloc(sizeof(struct slgt_info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
if (!info) {
DBGERR(("%s device alloc failed adapter=%d port=%d\n",
driver_name, adapter_num, port_num));
} else {
- memset(info, 0, sizeof(struct slgt_info));
info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index ef93d055bdd7..c63013b2fc36 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -135,8 +135,6 @@ typedef struct _SCADESC_EX
#define IO_PIN_SHUTDOWN_LIMIT 100
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
struct _input_signal_events {
int ri_up;
int ri_down;
@@ -927,12 +925,6 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
printk("%s(%d):%s set_termios()\n", __FILE__,__LINE__,
tty->driver->name );
- /* just return if nothing has changed */
- if ((tty->termios->c_cflag == old_termios->c_cflag)
- && (RELEVANT_IFLAG(tty->termios->c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
change_params(info);
/* Handle transition to B0 status */
@@ -3794,14 +3786,13 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
{
SLMP_INFO *info;
- info = kmalloc(sizeof(SLMP_INFO),
+ info = kzalloc(sizeof(SLMP_INFO),
GFP_KERNEL);
if (!info) {
printk("%s(%d) Error can't allocate device instance data for adapter %d, port %d\n",
__FILE__,__LINE__, adapter_num, port_num);
} else {
- memset(info, 0, sizeof(SLMP_INFO));
info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index dc4e1ff7f56f..8f3f7620f95a 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -2,11 +2,9 @@
# TPM device configuration
#
-menu "TPM devices"
- depends on HAS_IOMEM
-
-config TCG_TPM
+menuconfig TCG_TPM
tristate "TPM Hardware Support"
+ depends on HAS_IOMEM
depends on EXPERIMENTAL
---help---
If you have a TPM security chip in your system, which
@@ -21,9 +19,11 @@ config TCG_TPM
Note: For more TPM drivers enable CONFIG_PNP, CONFIG_ACPI
and CONFIG_PNPACPI.
+if TCG_TPM
+
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
- depends on TCG_TPM && PNPACPI
+ depends on PNPACPI
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -32,7 +32,7 @@ config TCG_TIS
config TCG_NSC
tristate "National Semiconductor TPM Interface"
- depends on TCG_TPM && PNPACPI
+ depends on PNPACPI
---help---
If you have a TPM security chip from National Semiconductor
say Yes and it will be accessible from within Linux. To
@@ -41,7 +41,6 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
- depends on TCG_TPM
---help---
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
@@ -49,7 +48,7 @@ config TCG_ATMEL
config TCG_INFINEON
tristate "Infineon Technologies TPM Interface"
- depends on TCG_TPM && PNPACPI
+ depends on PNPACPI
---help---
If you have a TPM security chip from Infineon Technologies
(either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
@@ -59,5 +58,4 @@ config TCG_INFINEON
Further information on this driver and the supported hardware
can be found at http://www.prosec.rub.de/tpm
-endmenu
-
+endif # TCG_TPM
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index 4eba32b23b29..8677fc6a545e 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -427,7 +427,7 @@ static int tpm_ascii_bios_measurements_open(struct inode *inode,
return -ENOMEM;
if ((err = read_log(log)))
- return err;
+ goto out_free;
/* now register seq file */
err = seq_open(file, &tpm_ascii_b_measurments_seqops);
@@ -435,10 +435,15 @@ static int tpm_ascii_bios_measurements_open(struct inode *inode,
seq = file->private_data;
seq->private = log;
} else {
- kfree(log->bios_event_log);
- kfree(log);
+ goto out_free;
}
+
+out:
return err;
+out_free:
+ kfree(log->bios_event_log);
+ kfree(log);
+ goto out;
}
const struct file_operations tpm_ascii_bios_measurements_ops = {
@@ -460,7 +465,7 @@ static int tpm_binary_bios_measurements_open(struct inode *inode,
return -ENOMEM;
if ((err = read_log(log)))
- return err;
+ goto out_free;
/* now register seq file */
err = seq_open(file, &tpm_binary_b_measurments_seqops);
@@ -468,10 +473,15 @@ static int tpm_binary_bios_measurements_open(struct inode *inode,
seq = file->private_data;
seq->private = log;
} else {
- kfree(log->bios_event_log);
- kfree(log);
+ goto out_free;
}
+
+out:
return err;
+out_free:
+ kfree(log->bios_event_log);
+ kfree(log);
+ goto out;
}
const struct file_operations tpm_binary_bios_measurements_ops = {
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
new file mode 100644
index 000000000000..d222012c1b0c
--- /dev/null
+++ b/drivers/char/tty_audit.c
@@ -0,0 +1,345 @@
+/*
+ * Creating audit events from TTY input.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All rights reserved. This copyrighted
+ * material is made available to anyone wishing to use, modify, copy, or
+ * redistribute it subject to the terms and conditions of the GNU General
+ * Public License v.2.
+ *
+ * Authors: Miloslav Trmac <mitr@redhat.com>
+ */
+
+#include <linux/audit.h>
+#include <linux/file.h>
+#include <linux/tty.h>
+
+struct tty_audit_buf {
+ atomic_t count;
+ struct mutex mutex; /* Protects all data below */
+ int major, minor; /* The TTY which the data is from */
+ unsigned icanon:1;
+ size_t valid;
+ unsigned char *data; /* Allocated size N_TTY_BUF_SIZE */
+};
+
+static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
+ int icanon)
+{
+ struct tty_audit_buf *buf;
+
+ buf = kmalloc(sizeof (*buf), GFP_KERNEL);
+ if (!buf)
+ goto err;
+ if (PAGE_SIZE != N_TTY_BUF_SIZE)
+ buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+ else
+ buf->data = (unsigned char *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ goto err_buf;
+ atomic_set(&buf->count, 1);
+ mutex_init(&buf->mutex);
+ buf->major = major;
+ buf->minor = minor;
+ buf->icanon = icanon;
+ buf->valid = 0;
+ return buf;
+
+err_buf:
+ kfree(buf);
+err:
+ return NULL;
+}
+
+static void tty_audit_buf_free(struct tty_audit_buf *buf)
+{
+ WARN_ON(buf->valid != 0);
+ if (PAGE_SIZE != N_TTY_BUF_SIZE)
+ kfree(buf->data);
+ else
+ free_page((unsigned long)buf->data);
+ kfree(buf);
+}
+
+static void tty_audit_buf_put(struct tty_audit_buf *buf)
+{
+ if (atomic_dec_and_test(&buf->count))
+ tty_audit_buf_free(buf);
+}
+
+/**
+ * tty_audit_buf_push - Push buffered data out
+ *
+ * Generate an audit message from the contents of @buf, which is owned by
+ * @tsk with @loginuid. @buf->mutex must be locked.
+ */
+static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
+ struct tty_audit_buf *buf)
+{
+ struct audit_buffer *ab;
+
+ if (buf->valid == 0)
+ return;
+ if (audit_enabled == 0)
+ return;
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
+ if (ab) {
+ char name[sizeof(tsk->comm)];
+
+ audit_log_format(ab, "tty pid=%u uid=%u auid=%u major=%d "
+ "minor=%d comm=", tsk->pid, tsk->uid,
+ loginuid, buf->major, buf->minor);
+ get_task_comm(name, tsk);
+ audit_log_untrustedstring(ab, name);
+ audit_log_format(ab, " data=");
+ audit_log_n_untrustedstring(ab, buf->valid, buf->data);
+ audit_log_end(ab);
+ }
+ buf->valid = 0;
+}
+
+/**
+ * tty_audit_buf_push_current - Push buffered data out
+ *
+ * Generate an audit message from the contents of @buf, which is owned by
+ * the current task. @buf->mutex must be locked.
+ */
+static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
+{
+ tty_audit_buf_push(current, audit_get_loginuid(current->audit_context),
+ buf);
+}
+
+/**
+ * tty_audit_exit - Handle a task exit
+ *
+ * Make sure all buffered data is written out and deallocate the buffer.
+ * Only needs to be called if current->signal->tty_audit_buf != %NULL.
+ */
+void tty_audit_exit(void)
+{
+ struct tty_audit_buf *buf;
+
+ spin_lock_irq(&current->sighand->siglock);
+ buf = current->signal->tty_audit_buf;
+ current->signal->tty_audit_buf = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+ if (!buf)
+ return;
+
+ mutex_lock(&buf->mutex);
+ tty_audit_buf_push_current(buf);
+ mutex_unlock(&buf->mutex);
+
+ tty_audit_buf_put(buf);
+}
+
+/**
+ * tty_audit_fork - Copy TTY audit state for a new task
+ *
+ * Set up TTY audit state in @sig from current. @sig needs no locking.
+ */
+void tty_audit_fork(struct signal_struct *sig)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ sig->audit_tty = current->signal->audit_tty;
+ spin_unlock_irq(&current->sighand->siglock);
+ sig->tty_audit_buf = NULL;
+}
+
+/**
+ * tty_audit_push_task - Flush task's pending audit data
+ */
+void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid)
+{
+ struct tty_audit_buf *buf;
+
+ spin_lock_irq(&tsk->sighand->siglock);
+ buf = tsk->signal->tty_audit_buf;
+ if (buf)
+ atomic_inc(&buf->count);
+ spin_unlock_irq(&tsk->sighand->siglock);
+ if (!buf)
+ return;
+
+ mutex_lock(&buf->mutex);
+ tty_audit_buf_push(tsk, loginuid, buf);
+ mutex_unlock(&buf->mutex);
+
+ tty_audit_buf_put(buf);
+}
+
+/**
+ * tty_audit_buf_get - Get an audit buffer.
+ *
+ * Get an audit buffer for @tty, allocate it if necessary. Return %NULL
+ * if TTY auditing is disabled or out of memory. Otherwise, return a new
+ * reference to the buffer.
+ */
+static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty)
+{
+ struct tty_audit_buf *buf, *buf2;
+
+ buf = NULL;
+ buf2 = NULL;
+ spin_lock_irq(&current->sighand->siglock);
+ if (likely(!current->signal->audit_tty))
+ goto out;
+ buf = current->signal->tty_audit_buf;
+ if (buf) {
+ atomic_inc(&buf->count);
+ goto out;
+ }
+ spin_unlock_irq(&current->sighand->siglock);
+
+ buf2 = tty_audit_buf_alloc(tty->driver->major,
+ tty->driver->minor_start + tty->index,
+ tty->icanon);
+ if (buf2 == NULL) {
+ audit_log_lost("out of memory in TTY auditing");
+ return NULL;
+ }
+
+ spin_lock_irq(&current->sighand->siglock);
+ if (!current->signal->audit_tty)
+ goto out;
+ buf = current->signal->tty_audit_buf;
+ if (!buf) {
+ current->signal->tty_audit_buf = buf2;
+ buf = buf2;
+ buf2 = NULL;
+ }
+ atomic_inc(&buf->count);
+ /* Fall through */
+ out:
+ spin_unlock_irq(&current->sighand->siglock);
+ if (buf2)
+ tty_audit_buf_free(buf2);
+ return buf;
+}
+
+/**
+ * tty_audit_add_data - Add data for TTY auditing.
+ *
+ * Audit @data of @size from @tty, if necessary.
+ */
+void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+ size_t size)
+{
+ struct tty_audit_buf *buf;
+ int major, minor;
+
+ if (unlikely(size == 0))
+ return;
+
+ buf = tty_audit_buf_get(tty);
+ if (!buf)
+ return;
+
+ mutex_lock(&buf->mutex);
+ major = tty->driver->major;
+ minor = tty->driver->minor_start + tty->index;
+ if (buf->major != major || buf->minor != minor
+ || buf->icanon != tty->icanon) {
+ tty_audit_buf_push_current(buf);
+ buf->major = major;
+ buf->minor = minor;
+ buf->icanon = tty->icanon;
+ }
+ do {
+ size_t run;
+
+ run = N_TTY_BUF_SIZE - buf->valid;
+ if (run > size)
+ run = size;
+ memcpy(buf->data + buf->valid, data, run);
+ buf->valid += run;
+ data += run;
+ size -= run;
+ if (buf->valid == N_TTY_BUF_SIZE)
+ tty_audit_buf_push_current(buf);
+ } while (size != 0);
+ mutex_unlock(&buf->mutex);
+ tty_audit_buf_put(buf);
+}
+
+/**
+ * tty_audit_push - Push buffered data out
+ *
+ * Make sure no audit data is pending for @tty on the current process.
+ */
+void tty_audit_push(struct tty_struct *tty)
+{
+ struct tty_audit_buf *buf;
+
+ spin_lock_irq(&current->sighand->siglock);
+ if (likely(!current->signal->audit_tty)) {
+ spin_unlock_irq(&current->sighand->siglock);
+ return;
+ }
+ buf = current->signal->tty_audit_buf;
+ if (buf)
+ atomic_inc(&buf->count);
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (buf) {
+ int major, minor;
+
+ major = tty->driver->major;
+ minor = tty->driver->minor_start + tty->index;
+ mutex_lock(&buf->mutex);
+ if (buf->major == major && buf->minor == minor)
+ tty_audit_buf_push_current(buf);
+ mutex_unlock(&buf->mutex);
+ tty_audit_buf_put(buf);
+ }
+}
+
+/**
+ * tty_audit_opening - A TTY is being opened.
+ *
+ * As a special hack, tasks that close all their TTYs and open new ones
+ * are assumed to be system daemons (e.g. getty) and auditing is
+ * automatically disabled for them.
+ */
+void tty_audit_opening(void)
+{
+ int disable;
+
+ disable = 1;
+ spin_lock_irq(&current->sighand->siglock);
+ if (current->signal->audit_tty == 0)
+ disable = 0;
+ spin_unlock_irq(&current->sighand->siglock);
+ if (!disable)
+ return;
+
+ task_lock(current);
+ if (current->files) {
+ struct fdtable *fdt;
+ unsigned i;
+
+ /*
+ * We don't take a ref to the file, so we must hold ->file_lock
+ * instead.
+ */
+ spin_lock(&current->files->file_lock);
+ fdt = files_fdtable(current->files);
+ for (i = 0; i < fdt->max_fds; i++) {
+ struct file *filp;
+
+ filp = fcheck_files(current->files, i);
+ if (filp && is_tty(filp)) {
+ disable = 0;
+ break;
+ }
+ }
+ spin_unlock(&current->files->file_lock);
+ }
+ task_unlock(current);
+ if (!disable)
+ return;
+
+ spin_lock_irq(&current->sighand->siglock);
+ current->signal->audit_tty = 0;
+ spin_unlock_irq(&current->sighand->siglock);
+}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index a96f26a63fa2..de37ebc3a4cf 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1503,6 +1503,15 @@ int tty_hung_up_p(struct file * filp)
EXPORT_SYMBOL(tty_hung_up_p);
+/**
+ * is_tty - checker whether file is a TTY
+ */
+int is_tty(struct file *filp)
+{
+ return filp->f_op->read == tty_read
+ || filp->f_op->read == hung_up_tty_read;
+}
+
static void session_clear_tty(struct pid *session)
{
struct task_struct *p;
@@ -1726,6 +1735,23 @@ static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
return i;
}
+void tty_write_unlock(struct tty_struct *tty)
+{
+ mutex_unlock(&tty->atomic_write_lock);
+ wake_up_interruptible(&tty->write_wait);
+}
+
+int tty_write_lock(struct tty_struct *tty, int ndelay)
+{
+ if (!mutex_trylock(&tty->atomic_write_lock)) {
+ if (ndelay)
+ return -EAGAIN;
+ if (mutex_lock_interruptible(&tty->atomic_write_lock))
+ return -ERESTARTSYS;
+ }
+ return 0;
+}
+
/*
* Split writes up in sane blocksizes to avoid
* denial-of-service type attacks
@@ -1737,13 +1763,12 @@ static inline ssize_t do_tty_write(
const char __user *buf,
size_t count)
{
- ssize_t ret = 0, written = 0;
+ ssize_t ret, written = 0;
unsigned int chunk;
- /* FIXME: O_NDELAY ... */
- if (mutex_lock_interruptible(&tty->atomic_write_lock)) {
- return -ERESTARTSYS;
- }
+ ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
+ if (ret < 0)
+ return ret;
/*
* We chunk up writes into a temporary buffer. This
@@ -1776,8 +1801,8 @@ static inline ssize_t do_tty_write(
buf = kmalloc(chunk, GFP_KERNEL);
if (!buf) {
- mutex_unlock(&tty->atomic_write_lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
kfree(tty->write_buf);
tty->write_cnt = chunk;
@@ -1812,7 +1837,8 @@ static inline ssize_t do_tty_write(
inode->i_mtime = current_fs_time(inode->i_sb);
ret = written;
}
- mutex_unlock(&tty->atomic_write_lock);
+out:
+ tty_write_unlock(tty);
return ret;
}
@@ -2016,11 +2042,9 @@ static int init_dev(struct tty_driver *driver, int idx,
}
if (!*ltp_loc) {
- ltp = (struct ktermios *) kmalloc(sizeof(struct ktermios),
- GFP_KERNEL);
+ ltp = kzalloc(sizeof(struct ktermios), GFP_KERNEL);
if (!ltp)
goto free_mem_out;
- memset(ltp, 0, sizeof(struct ktermios));
}
if (driver->type == TTY_DRIVER_TYPE_PTY) {
@@ -2049,11 +2073,9 @@ static int init_dev(struct tty_driver *driver, int idx,
}
if (!*o_ltp_loc) {
- o_ltp = (struct ktermios *)
- kmalloc(sizeof(struct ktermios), GFP_KERNEL);
+ o_ltp = kzalloc(sizeof(struct ktermios), GFP_KERNEL);
if (!o_ltp)
goto free_mem_out;
- memset(o_ltp, 0, sizeof(struct ktermios));
}
/*
@@ -2660,6 +2682,7 @@ got_driver:
__proc_set_tty(current, tty);
spin_unlock_irq(&current->sighand->siglock);
mutex_unlock(&tty_mutex);
+ tty_audit_opening();
return 0;
}
@@ -2722,8 +2745,10 @@ static int ptmx_open(struct inode * inode, struct file * filp)
check_tty_count(tty, "tty_open");
retval = ptm_driver->open(tty, filp);
- if (!retval)
+ if (!retval) {
+ tty_audit_opening();
return 0;
+ }
out1:
release_dev(filp);
return retval;
@@ -3163,14 +3188,13 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
static int send_break(struct tty_struct *tty, unsigned int duration)
{
- if (mutex_lock_interruptible(&tty->atomic_write_lock))
+ if (tty_write_lock(tty, 0) < 0)
return -EINTR;
tty->driver->break_ctl(tty, -1);
- if (!signal_pending(current)) {
+ if (!signal_pending(current))
msleep_interruptible(duration);
- }
tty->driver->break_ctl(tty, 0);
- mutex_unlock(&tty->atomic_write_lock);
+ tty_write_unlock(tty);
if (signal_pending(current))
return -EINTR;
return 0;
@@ -3739,9 +3763,8 @@ struct tty_driver *alloc_tty_driver(int lines)
{
struct tty_driver *driver;
- driver = kmalloc(sizeof(struct tty_driver), GFP_KERNEL);
+ driver = kzalloc(sizeof(struct tty_driver), GFP_KERNEL);
if (driver) {
- memset(driver, 0, sizeof(struct tty_driver));
driver->magic = TTY_DRIVER_MAGIC;
driver->num = lines;
/* later we'll move allocation of tables here */
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index fd471cb3338f..3423e9ee6481 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -52,8 +52,6 @@
void tty_wait_until_sent(struct tty_struct * tty, long timeout)
{
- DECLARE_WAITQUEUE(wait, current);
-
#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
char buf[64];
@@ -61,26 +59,13 @@ void tty_wait_until_sent(struct tty_struct * tty, long timeout)
#endif
if (!tty->driver->chars_in_buffer)
return;
- add_wait_queue(&tty->write_wait, &wait);
if (!timeout)
timeout = MAX_SCHEDULE_TIMEOUT;
- do {
-#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "waiting %s...(%d)\n", tty_name(tty, buf),
- tty->driver->chars_in_buffer(tty));
-#endif
- set_current_state(TASK_INTERRUPTIBLE);
- if (signal_pending(current))
- goto stop_waiting;
- if (!tty->driver->chars_in_buffer(tty))
- break;
- timeout = schedule_timeout(timeout);
- } while (timeout);
+ if (wait_event_interruptible_timeout(tty->write_wait,
+ !tty->driver->chars_in_buffer(tty), timeout))
+ return;
if (tty->driver->wait_until_sent)
tty->driver->wait_until_sent(tty, timeout);
-stop_waiting:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&tty->write_wait, &wait);
}
EXPORT_SYMBOL(tty_wait_until_sent);
@@ -276,13 +261,12 @@ void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed
termios->c_cflag |= (baud_bits[i] << IBSHIFT);
ifound = i;
}
- }
- while(++i < n_baud_table);
+ } while (++i < n_baud_table);
if (ofound == -1)
termios->c_cflag |= BOTHER;
/* Set exact input bits only if the input and output differ or the
user already did */
- if (ifound == -1 && (ibaud != obaud || ibinput))
+ if (ifound == -1 && (ibaud != obaud || ibinput))
termios->c_cflag |= (BOTHER << IBSHIFT);
}
@@ -575,7 +559,7 @@ static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb)
return -EFAULT;
mutex_lock(&tty->termios_mutex);
- termios = *tty->termios;
+ termios = *tty->termios;
termios.c_cc[VERASE] = tmp.sg_erase;
termios.c_cc[VKILL] = tmp.sg_kill;
set_sgflags(&termios, tmp.sg_flags);
@@ -667,7 +651,7 @@ static int send_prio_char(struct tty_struct *tty, char ch)
return 0;
}
- if (mutex_lock_interruptible(&tty->atomic_write_lock))
+ if (tty_write_lock(tty, 0) < 0)
return -ERESTARTSYS;
if (was_stopped)
@@ -675,7 +659,7 @@ static int send_prio_char(struct tty_struct *tty, char ch)
tty->driver->write(tty, &ch, 1);
if (was_stopped)
stop_tty(tty);
- mutex_unlock(&tty->atomic_write_lock);
+ tty_write_unlock(tty);
return 0;
}
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 13faf8d17482..e12275df6ea2 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -873,12 +873,12 @@ free_op:
}
const struct file_operations viotap_fops = {
- owner: THIS_MODULE,
- read: viotap_read,
- write: viotap_write,
- ioctl: viotap_ioctl,
- open: viotap_open,
- release: viotap_release,
+ .owner = THIS_MODULE,
+ .read = viotap_read,
+ .write = viotap_write,
+ .ioctl = viotap_ioctl,
+ .open = viotap_open,
+ .release = viotap_release,
};
/* Handle interrupt events for tape */
@@ -1098,15 +1098,10 @@ static int chg_state(int index, unsigned char new_state, struct file *file)
/* Cleanup */
static void __exit viotap_exit(void)
{
- int ret;
-
remove_proc_entry("iSeries/viotape", NULL);
vio_unregister_driver(&viotape_driver);
class_destroy(tape_class);
- ret = unregister_chrdev(VIOTAPE_MAJOR, "viotape");
- if (ret < 0)
- printk(VIOTAPE_KERN_WARN "Error unregistering device: %d\n",
- ret);
+ unregister_chrdev(VIOTAPE_MAJOR, "viotape");
if (viotape_unitinfo)
dma_free_coherent(iSeries_vio_dev,
sizeof(viotape_unitinfo[0]) * VIOTAPE_MAX_TAPE,
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c
index bef6d886d4fb..e122a0e87bb0 100644
--- a/drivers/char/vme_scc.c
+++ b/drivers/char/vme_scc.c
@@ -1013,18 +1013,10 @@ static struct tty_driver *scc_console_device(struct console *c, int *index)
return scc_driver;
}
-
-static int __init scc_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
-
static struct console sercons = {
.name = "ttyS",
.write = scc_console_write,
.device = scc_console_device,
- .setup = scc_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index 0cea8d4907df..e5ed09192be8 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -19,18 +19,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/irq.h>
@@ -44,18 +43,6 @@ static int major; /* default is dynamic major device number */
module_param(major, int, 0);
MODULE_PARM_DESC(major, "Major device number");
-#define GIU_TYPE1_START 0x0b000100UL
-#define GIU_TYPE1_SIZE 0x20UL
-
-#define GIU_TYPE2_START 0x0f000140UL
-#define GIU_TYPE2_SIZE 0x20UL
-
-#define GIU_TYPE3_START 0x0f000140UL
-#define GIU_TYPE3_SIZE 0x28UL
-
-#define GIU_PULLUPDOWN_START 0x0b0002e0UL
-#define GIU_PULLUPDOWN_SIZE 0x04UL
-
#define GIUIOSELL 0x00
#define GIUIOSELH 0x02
#define GIUPIODL 0x04
@@ -89,8 +76,6 @@ MODULE_PARM_DESC(major, "Major device number");
#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
static spinlock_t giu_lock;
-static struct resource *giu_resource1;
-static struct resource *giu_resource2;
static unsigned long giu_flags;
static unsigned int giu_nr_pins;
@@ -234,7 +219,7 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
giu_set(GIUINTHTSELL, mask);
else
giu_clear(GIUINTHTSELL, mask);
- if (current_cpu_data.cputype == CPU_VR4133) {
+ if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
switch (trigger) {
case IRQ_TRIGGER_EDGE_FALLING:
giu_set(GIUFEDGEINHL, mask);
@@ -269,7 +254,7 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
giu_set(GIUINTHTSELH, mask);
else
giu_clear(GIUINTHTSELH, mask);
- if (current_cpu_data.cputype == CPU_VR4133) {
+ if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
switch (trigger) {
case IRQ_TRIGGER_EDGE_FALLING:
giu_set(GIUFEDGEINHH, mask);
@@ -298,7 +283,6 @@ void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger, irq_signal_
giu_write(GIUINTSTATH, mask);
}
}
-
EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
@@ -321,7 +305,6 @@ void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
giu_write(GIUINTSTATH, mask);
}
}
-
EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
gpio_data_t vr41xx_gpio_get_pin(unsigned int pin)
@@ -350,7 +333,6 @@ gpio_data_t vr41xx_gpio_get_pin(unsigned int pin)
return GPIO_DATA_LOW;
}
-
EXPORT_SYMBOL_GPL(vr41xx_gpio_get_pin);
int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data)
@@ -388,7 +370,6 @@ int vr41xx_gpio_set_pin(unsigned int pin, gpio_data_t data)
return 0;
}
-
EXPORT_SYMBOL_GPL(vr41xx_gpio_set_pin);
int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir)
@@ -438,7 +419,6 @@ int vr41xx_gpio_set_direction(unsigned int pin, gpio_direction_t dir)
return 0;
}
-
EXPORT_SYMBOL_GPL(vr41xx_gpio_set_direction);
int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
@@ -477,7 +457,6 @@ int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
return 0;
}
-
EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
static ssize_t gpio_read(struct file *file, char __user *buf, size_t len,
@@ -596,61 +575,40 @@ static const struct file_operations gpio_fops = {
static int __devinit giu_probe(struct platform_device *dev)
{
- unsigned long start, size, flags = 0;
- unsigned int nr_pins = 0, trigger, i, pin;
- struct resource *res1, *res2 = NULL;
- void *base;
+ struct resource *res;
+ unsigned int trigger, i, pin;
struct irq_chip *chip;
- int retval;
-
- switch (current_cpu_data.cputype) {
- case CPU_VR4111:
- case CPU_VR4121:
- start = GIU_TYPE1_START;
- size = GIU_TYPE1_SIZE;
- flags = GPIO_HAS_PULLUPDOWN_IO;
- nr_pins = 50;
+ int irq, retval;
+
+ switch (dev->id) {
+ case GPIO_50PINS_PULLUPDOWN:
+ giu_flags = GPIO_HAS_PULLUPDOWN_IO;
+ giu_nr_pins = 50;
break;
- case CPU_VR4122:
- case CPU_VR4131:
- start = GIU_TYPE2_START;
- size = GIU_TYPE2_SIZE;
- nr_pins = 36;
+ case GPIO_36PINS:
+ giu_nr_pins = 36;
break;
- case CPU_VR4133:
- start = GIU_TYPE3_START;
- size = GIU_TYPE3_SIZE;
- flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
- nr_pins = 48;
+ case GPIO_48PINS_EDGE_SELECT:
+ giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
+ giu_nr_pins = 48;
break;
default:
+ printk(KERN_ERR "GIU: unknown ID %d\n", dev->id);
return -ENODEV;
}
- res1 = request_mem_region(start, size, "GIU");
- if (res1 == NULL)
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
return -EBUSY;
- base = ioremap(start, size);
- if (base == NULL) {
- release_resource(res1);
+ giu_base = ioremap(res->start, res->end - res->start + 1);
+ if (!giu_base)
return -ENOMEM;
- }
-
- if (flags & GPIO_HAS_PULLUPDOWN_IO) {
- res2 = request_mem_region(GIU_PULLUPDOWN_START, GIU_PULLUPDOWN_SIZE, "GIU");
- if (res2 == NULL) {
- iounmap(base);
- release_resource(res1);
- return -EBUSY;
- }
- }
retval = register_chrdev(major, "GIU", &gpio_fops);
if (retval < 0) {
- iounmap(base);
- release_resource(res1);
- release_resource(res2);
+ iounmap(giu_base);
+ giu_base = NULL;
return retval;
}
@@ -660,11 +618,6 @@ static int __devinit giu_probe(struct platform_device *dev)
}
spin_lock_init(&giu_lock);
- giu_base = base;
- giu_resource1 = res1;
- giu_resource2 = res2;
- giu_flags = flags;
- giu_nr_pins = nr_pins;
giu_write(GIUINTENL, 0);
giu_write(GIUINTENH, 0);
@@ -685,22 +638,23 @@ static int __devinit giu_probe(struct platform_device *dev)
}
- return cascade_irq(GIUINT_IRQ, giu_get_irq);
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0 || irq >= NR_IRQS)
+ return -EBUSY;
+
+ return cascade_irq(irq, giu_get_irq);
}
static int __devexit giu_remove(struct platform_device *dev)
{
- iounmap(giu_base);
-
- release_resource(giu_resource1);
- if (giu_flags & GPIO_HAS_PULLUPDOWN_IO)
- release_resource(giu_resource2);
+ if (giu_base) {
+ iounmap(giu_base);
+ giu_base = NULL;
+ }
return 0;
}
-static struct platform_device *giu_platform_device;
-
static struct platform_driver giu_device_driver = {
.probe = giu_probe,
.remove = __devexit_p(giu_remove),
@@ -712,30 +666,12 @@ static struct platform_driver giu_device_driver = {
static int __init vr41xx_giu_init(void)
{
- int retval;
-
- giu_platform_device = platform_device_alloc("GIU", -1);
- if (!giu_platform_device)
- return -ENOMEM;
-
- retval = platform_device_add(giu_platform_device);
- if (retval < 0) {
- platform_device_put(giu_platform_device);
- return retval;
- }
-
- retval = platform_driver_register(&giu_device_driver);
- if (retval < 0)
- platform_device_unregister(giu_platform_device);
-
- return retval;
+ return platform_driver_register(&giu_device_driver);
}
static void __exit vr41xx_giu_exit(void)
{
platform_driver_unregister(&giu_device_driver);
-
- platform_device_unregister(giu_platform_device);
}
module_init(vr41xx_giu_init);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6650ae1c088f..edb7002a3216 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -729,10 +729,9 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
/* although the numbers above are not valid since long ago, the
point is still up-to-date and the comment still has its value
even if only as a historical artifact. --mj, July 1998 */
- vc = kmalloc(sizeof(struct vc_data), GFP_KERNEL);
+ vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
if (!vc)
return -ENOMEM;
- memset(vc, 0, sizeof(*vc));
vc_cons[currcons].d = vc;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
@@ -1991,8 +1990,7 @@ static int is_double_width(uint32_t ucs)
{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
};
- return bisearch(ucs, double_width,
- sizeof(double_width) / sizeof(*double_width) - 1);
+ return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
}
/* acquires console_sem */
@@ -2989,8 +2987,24 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
return retval;
}
-static int unbind_con_driver(const struct consw *csw, int first, int last,
- int deflt)
+/**
+ * unbind_con_driver - unbind a console driver
+ * @csw: pointer to console driver to unregister
+ * @first: first in range of consoles that @csw should be unbound from
+ * @last: last in range of consoles that @csw should be unbound from
+ * @deflt: should next bound console driver be default after @csw is unbound?
+ *
+ * To unbind a driver from all possible consoles, pass 0 as @first and
+ * %MAX_NR_CONSOLES as @last.
+ *
+ * @deflt controls whether the console that ends up replacing @csw should be
+ * the default console.
+ *
+ * RETURNS:
+ * -ENODEV if @csw isn't a registered console driver or can't be unregistered
+ * or 0 on success.
+ */
+int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
struct module *owner = csw->owner;
const struct consw *defcsw = NULL;
@@ -3075,6 +3089,7 @@ err:
return retval;
}
+EXPORT_SYMBOL(unbind_con_driver);
static int vt_bind(struct con_driver *con)
{
@@ -3491,9 +3506,6 @@ void do_blank_screen(int entering_gfx)
}
return;
}
- if (blank_state != blank_normal_wait)
- return;
- blank_state = blank_off;
/* entering graphics mode? */
if (entering_gfx) {
@@ -3501,10 +3513,15 @@ void do_blank_screen(int entering_gfx)
save_screen(vc);
vc->vc_sw->con_blank(vc, -1, 1);
console_blanked = fg_console + 1;
+ blank_state = blank_off;
set_origin(vc);
return;
}
+ if (blank_state != blank_normal_wait)
+ return;
+ blank_state = blank_off;
+
/* don't blank graphics */
if (vc->vc_mode != KD_TEXT) {
console_blanked = fg_console + 1;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 53f5538c0c05..16fb23125e96 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -187,6 +187,31 @@ config PNX4008_WATCHDOG
Say N if you are unsure.
+config IOP_WATCHDOG
+ tristate "IOP Watchdog"
+ depends on WATCHDOG && PLAT_IOP
+ select WATCHDOG_NOWAYOUT if (ARCH_IOP32X || ARCH_IOP33X)
+ help
+ Say Y here if to include support for the watchdog timer
+ in the Intel IOP3XX & IOP13XX I/O Processors. This driver can
+ be built as a module by choosing M. The module will
+ be called iop_wdt.
+
+ Note: The IOP13XX watchdog does an Internal Bus Reset which will
+ affect both cores and the peripherals of the IOP. The ATU-X
+ and/or ATUe configuration registers will remain intact, but if
+ operating as an Root Complex and/or Central Resource, the PCI-X
+ and/or PCIe busses will also be reset. THIS IS A VERY BIG HAMMER.
+
+# AVR32 Architecture
+
+config AT32AP700X_WDT
+ tristate "AT32AP700x watchdog"
+ depends on WATCHDOG && CPU_AT32AP7000
+ help
+ Watchdog timer embedded into AT32AP700x devices. This will reboot
+ your system when the timeout is reached.
+
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -593,7 +618,7 @@ config ZVM_WATCHDOG
config SH_WDT
tristate "SuperH Watchdog"
- depends on SUPERH
+ depends on SUPERH && (CPU_SH3 || CPU_SH4)
help
This driver adds watchdog support for the integrated watchdog in the
SuperH processors. If you have one of these processors and wish
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index d90f649038c2..bdb9d5e3bb41 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -35,6 +35,10 @@ obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
+obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
+
+# AVR32 Architecture
+obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
diff --git a/drivers/char/watchdog/at32ap700x_wdt.c b/drivers/char/watchdog/at32ap700x_wdt.c
new file mode 100644
index 000000000000..54a516169d07
--- /dev/null
+++ b/drivers/char/watchdog/at32ap700x_wdt.c
@@ -0,0 +1,386 @@
+/*
+ * Watchdog driver for Atmel AT32AP700X devices
+ *
+ * Copyright (C) 2005-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#define TIMEOUT_MIN 1
+#define TIMEOUT_MAX 2
+#define TIMEOUT_DEFAULT TIMEOUT_MAX
+
+/* module parameters */
+static int timeout = TIMEOUT_DEFAULT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Timeout value. Limited to be 1 or 2 seconds. (default="
+ __MODULE_STRING(TIMEOUT_DEFAULT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/* Watchdog registers and write/read macro */
+#define WDT_CTRL 0x00
+#define WDT_CTRL_EN 0
+#define WDT_CTRL_PSEL 8
+#define WDT_CTRL_KEY 24
+
+#define WDT_CLR 0x04
+
+#define WDT_BIT(name) (1 << WDT_##name)
+#define WDT_BF(name, value) ((value) << WDT_##name)
+
+#define wdt_readl(dev, reg) \
+ __raw_readl((dev)->regs + WDT_##reg)
+#define wdt_writel(dev, reg, value) \
+ __raw_writel((value), (dev)->regs + WDT_##reg)
+
+struct wdt_at32ap700x {
+ void __iomem *regs;
+ spinlock_t io_lock;
+ int timeout;
+ unsigned long users;
+ struct miscdevice miscdev;
+};
+
+static struct wdt_at32ap700x *wdt;
+static char expect_release;
+
+/*
+ * Disable the watchdog.
+ */
+static inline void at32_wdt_stop(void)
+{
+ unsigned long psel;
+
+ spin_lock(&wdt->io_lock);
+ psel = wdt_readl(wdt, CTRL) & WDT_BF(CTRL_PSEL, 0x0f);
+ wdt_writel(wdt, CTRL, psel | WDT_BF(CTRL_KEY, 0x55));
+ wdt_writel(wdt, CTRL, psel | WDT_BF(CTRL_KEY, 0xaa));
+ spin_unlock(&wdt->io_lock);
+}
+
+/*
+ * Enable and reset the watchdog.
+ */
+static inline void at32_wdt_start(void)
+{
+ /* 0xf is 2^16 divider = 2 sec, 0xe is 2^15 divider = 1 sec */
+ unsigned long psel = (wdt->timeout > 1) ? 0xf : 0xe;
+
+ spin_lock(&wdt->io_lock);
+ wdt_writel(wdt, CTRL, WDT_BIT(CTRL_EN)
+ | WDT_BF(CTRL_PSEL, psel)
+ | WDT_BF(CTRL_KEY, 0x55));
+ wdt_writel(wdt, CTRL, WDT_BIT(CTRL_EN)
+ | WDT_BF(CTRL_PSEL, psel)
+ | WDT_BF(CTRL_KEY, 0xaa));
+ spin_unlock(&wdt->io_lock);
+}
+
+/*
+ * Pat the watchdog timer.
+ */
+static inline void at32_wdt_pat(void)
+{
+ spin_lock(&wdt->io_lock);
+ wdt_writel(wdt, CLR, 0x42);
+ spin_unlock(&wdt->io_lock);
+}
+
+/*
+ * Watchdog device is opened, and watchdog starts running.
+ */
+static int at32_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(1, &wdt->users))
+ return -EBUSY;
+
+ at32_wdt_start();
+ return nonseekable_open(inode, file);
+}
+
+/*
+ * Close the watchdog device.
+ */
+static int at32_wdt_close(struct inode *inode, struct file *file)
+{
+ if (expect_release == 42) {
+ at32_wdt_stop();
+ } else {
+ dev_dbg(wdt->miscdev.parent,
+ "Unexpected close, not stopping watchdog!\n");
+ at32_wdt_pat();
+ }
+ clear_bit(1, &wdt->users);
+ expect_release = 0;
+ return 0;
+}
+
+/*
+ * Change the watchdog time interval.
+ */
+static int at32_wdt_settimeout(int time)
+{
+ /*
+ * All counting occurs at 1 / SLOW_CLOCK (32 kHz) and max prescaler is
+ * 2 ^ 16 allowing up to 2 seconds timeout.
+ */
+ if ((time < TIMEOUT_MIN) || (time > TIMEOUT_MAX))
+ return -EINVAL;
+
+ /*
+ * Set new watchdog time. It will be used when at32_wdt_start() is
+ * called.
+ */
+ wdt->timeout = time;
+ return 0;
+}
+
+static struct watchdog_info at32_wdt_info = {
+ .identity = "at32ap700x watchdog",
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+/*
+ * Handle commands from user-space.
+ */
+static int at32_wdt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int time;
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+
+ switch (cmd) {
+ case WDIOC_KEEPALIVE:
+ at32_wdt_pat();
+ ret = 0;
+ break;
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(argp, &at32_wdt_info,
+ sizeof(at32_wdt_info)) ? -EFAULT : 0;
+ break;
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(time, p);
+ if (ret)
+ break;
+ ret = at32_wdt_settimeout(time);
+ if (ret)
+ break;
+ /* Enable new time value */
+ at32_wdt_start();
+ /* fall through */
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(wdt->timeout, p);
+ break;
+ case WDIOC_GETSTATUS: /* fall through */
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, p);
+ break;
+ case WDIOC_SETOPTIONS:
+ ret = get_user(time, p);
+ if (ret)
+ break;
+ if (time & WDIOS_DISABLECARD)
+ at32_wdt_stop();
+ if (time & WDIOS_ENABLECARD)
+ at32_wdt_start();
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t at32_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /*
+ * note: just in case someone wrote the magic
+ * character five months ago...
+ */
+ expect_release = 0;
+
+ /*
+ * scan to see whether or not we got the magic
+ * character
+ */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data+i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = 42;
+ }
+ }
+ /* someone wrote to us, we should pat the watchdog */
+ at32_wdt_pat();
+ }
+ return len;
+}
+
+static const struct file_operations at32_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .ioctl = at32_wdt_ioctl,
+ .open = at32_wdt_open,
+ .release = at32_wdt_close,
+ .write = at32_wdt_write,
+};
+
+static int __init at32_wdt_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ int ret;
+
+ if (wdt) {
+ dev_dbg(&pdev->dev, "only 1 wdt instance supported.\n");
+ return -EBUSY;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_dbg(&pdev->dev, "missing mmio resource\n");
+ return -ENXIO;
+ }
+
+ wdt = kzalloc(sizeof(struct wdt_at32ap700x), GFP_KERNEL);
+ if (!wdt) {
+ dev_dbg(&pdev->dev, "no memory for wdt structure\n");
+ return -ENOMEM;
+ }
+
+ wdt->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ if (!wdt->regs) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "could not map I/O memory\n");
+ goto err_free;
+ }
+ spin_lock_init(&wdt->io_lock);
+ wdt->users = 0;
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+ wdt->miscdev.fops = &at32_wdt_fops;
+
+ if (at32_wdt_settimeout(timeout)) {
+ at32_wdt_settimeout(TIMEOUT_DEFAULT);
+ dev_dbg(&pdev->dev,
+ "default timeout invalid, set to %d sec.\n",
+ TIMEOUT_DEFAULT);
+ }
+
+ ret = misc_register(&wdt->miscdev);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to register wdt miscdev\n");
+ goto err_iounmap;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+ wdt->miscdev.parent = &pdev->dev;
+ dev_info(&pdev->dev,
+ "AT32AP700X WDT at 0x%p, timeout %d sec (nowayout=%d)\n",
+ wdt->regs, wdt->timeout, nowayout);
+
+ return 0;
+
+err_iounmap:
+ iounmap(wdt->regs);
+err_free:
+ kfree(wdt);
+ wdt = NULL;
+ return ret;
+}
+
+static int __exit at32_wdt_remove(struct platform_device *pdev)
+{
+ if (wdt && platform_get_drvdata(pdev) == wdt) {
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ at32_wdt_stop();
+
+ misc_deregister(&wdt->miscdev);
+ iounmap(wdt->regs);
+ kfree(wdt);
+ wdt = NULL;
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static void at32_wdt_shutdown(struct platform_device *pdev)
+{
+ at32_wdt_stop();
+}
+
+#ifdef CONFIG_PM
+static int at32_wdt_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ at32_wdt_stop();
+ return 0;
+}
+
+static int at32_wdt_resume(struct platform_device *pdev)
+{
+ if (wdt->users)
+ at32_wdt_start();
+ return 0;
+}
+#else
+#define at32_wdt_suspend NULL
+#define at32_wdt_resume NULL
+#endif
+
+static struct platform_driver at32_wdt_driver = {
+ .remove = __exit_p(at32_wdt_remove),
+ .suspend = at32_wdt_suspend,
+ .resume = at32_wdt_resume,
+ .driver = {
+ .name = "at32_wdt",
+ .owner = THIS_MODULE,
+ },
+ .shutdown = at32_wdt_shutdown,
+};
+
+static int __init at32_wdt_init(void)
+{
+ return platform_driver_probe(&at32_wdt_driver, at32_wdt_probe);
+}
+module_init(at32_wdt_init);
+
+static void __exit at32_wdt_exit(void)
+{
+ platform_driver_unregister(&at32_wdt_driver);
+}
+module_exit(at32_wdt_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/ep93xx_wdt.c b/drivers/char/watchdog/ep93xx_wdt.c
index 01cf123b1616..0e4787a0bb87 100644
--- a/drivers/char/watchdog/ep93xx_wdt.c
+++ b/drivers/char/watchdog/ep93xx_wdt.c
@@ -107,10 +107,6 @@ static ssize_t
ep93xx_wdt_write(struct file *file, const char __user *data, size_t len,
loff_t *ppos)
{
- /* Can't seek (pwrite) on this device */
- if (*ppos != file->f_pos)
- return -ESPIPE;
-
if (len) {
if (!nowayout) {
size_t i;
diff --git a/drivers/char/watchdog/iop_wdt.c b/drivers/char/watchdog/iop_wdt.c
new file mode 100644
index 000000000000..bbbd91af754d
--- /dev/null
+++ b/drivers/char/watchdog/iop_wdt.c
@@ -0,0 +1,262 @@
+/*
+ * drivers/char/watchdog/iop_wdt.c
+ *
+ * WDT driver for Intel I/O Processors
+ * Copyright (C) 2005, Intel Corporation.
+ *
+ * Based on ixp4xx driver, Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Curt E Bruns <curt.e.bruns@intel.com>
+ * Peter Milne <peter.milne@d-tacq.com>
+ * Dan Williams <dan.j.williams@intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/uaccess.h>
+#include <asm/hardware.h>
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned long wdt_status;
+static unsigned long boot_status;
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+#define WDT_ENABLED 2
+
+static unsigned long iop_watchdog_timeout(void)
+{
+ return (0xffffffffUL / get_iop_tick_rate());
+}
+
+/**
+ * wdt_supports_disable - determine if we are accessing a iop13xx watchdog
+ * or iop3xx by whether it has a disable command
+ */
+static int wdt_supports_disable(void)
+{
+ int can_disable;
+
+ if (IOP_WDTCR_EN_ARM != IOP_WDTCR_DIS_ARM)
+ can_disable = 1;
+ else
+ can_disable = 0;
+
+ return can_disable;
+}
+
+static void wdt_enable(void)
+{
+ /* Arm and enable the Timer to starting counting down from 0xFFFF.FFFF
+ * Takes approx. 10.7s to timeout
+ */
+ write_wdtcr(IOP_WDTCR_EN_ARM);
+ write_wdtcr(IOP_WDTCR_EN);
+}
+
+/* returns 0 if the timer was successfully disabled */
+static int wdt_disable(void)
+{
+ /* Stop Counting */
+ if (wdt_supports_disable()) {
+ write_wdtcr(IOP_WDTCR_DIS_ARM);
+ write_wdtcr(IOP_WDTCR_DIS);
+ clear_bit(WDT_ENABLED, &wdt_status);
+ printk(KERN_INFO "WATCHDOG: Disabled\n");
+ return 0;
+ } else
+ return 1;
+}
+
+static int iop_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+ return -EBUSY;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ wdt_enable();
+
+ set_bit(WDT_ENABLED, &wdt_status);
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t
+iop_wdt_write(struct file *file, const char *data, size_t len,
+ loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ }
+ }
+ wdt_enable();
+ }
+
+ return len;
+}
+
+static struct watchdog_info ident = {
+ .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "iop watchdog",
+};
+
+static int
+iop_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int options;
+ int ret = -ENOTTY;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user
+ ((struct watchdog_info *)arg, &ident, sizeof ident))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(boot_status, (int *)arg);
+ break;
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(iop_watchdog_timeout(), (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ wdt_enable();
+ ret = 0;
+ break;
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(options, (int *)arg))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD) {
+ if (!nowayout) {
+ if (wdt_disable() == 0) {
+ set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ ret = 0;
+ } else
+ ret = -ENXIO;
+ } else
+ ret = 0;
+ }
+
+ if (options & WDIOS_ENABLECARD) {
+ wdt_enable();
+ ret = 0;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static int iop_wdt_release(struct inode *inode, struct file *file)
+{
+ int state = 1;
+ if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
+ if (test_bit(WDT_ENABLED, &wdt_status))
+ state = wdt_disable();
+
+ /* if the timer is not disbaled reload and notify that we are still
+ * going down
+ */
+ if (state != 0) {
+ wdt_enable();
+ printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
+ "reset in %lu seconds\n", iop_watchdog_timeout());
+ }
+
+ clear_bit(WDT_IN_USE, &wdt_status);
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ return 0;
+}
+
+static const struct file_operations iop_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = iop_wdt_write,
+ .ioctl = iop_wdt_ioctl,
+ .open = iop_wdt_open,
+ .release = iop_wdt_release,
+};
+
+static struct miscdevice iop_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &iop_wdt_fops,
+};
+
+static int __init iop_wdt_init(void)
+{
+ int ret;
+
+ ret = misc_register(&iop_wdt_miscdev);
+ if (ret == 0)
+ printk("iop watchdog timer: timeout %lu sec\n",
+ iop_watchdog_timeout());
+
+ /* check if the reset was caused by the watchdog timer */
+ boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0;
+
+ /* Configure Watchdog Timeout to cause an Internal Bus (IB) Reset
+ * NOTE: An IB Reset will Reset both cores in the IOP342
+ */
+ write_wdtsr(IOP13XX_WDTCR_IB_RESET);
+
+ return ret;
+}
+
+static void __exit iop_wdt_exit(void)
+{
+ misc_deregister(&iop_wdt_miscdev);
+}
+
+module_init(iop_wdt_init);
+module_exit(iop_wdt_exit);
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+
+MODULE_AUTHOR("Curt E Bruns <curt.e.bruns@intel.com>");
+MODULE_DESCRIPTION("iop watchdog timer driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/mixcomwd.c b/drivers/char/watchdog/mixcomwd.c
index f35e2848aa3e..db2ccb864412 100644
--- a/drivers/char/watchdog/mixcomwd.c
+++ b/drivers/char/watchdog/mixcomwd.c
@@ -29,11 +29,18 @@
* - support for one more type board
*
* Version 0.5 (2001/12/14) Matt Domsch <Matt_Domsch@dell.com>
- * - added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
+ * - added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
+ *
+ * Version 0.6 (2002/04/12): Rob Radez <rob@osinvestor.com>
+ * - make mixcomwd_opened unsigned,
+ * removed lock_kernel/unlock_kernel from mixcomwd_release,
+ * modified ioctl a bit to conform to API
*
*/
-#define VERSION "0.5"
+#define VERSION "0.6"
+#define WATCHDOG_NAME "mixcomwd"
+#define PFX WATCHDOG_NAME ": "
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -49,12 +56,46 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-static int mixcomwd_ioports[] = { 0x180, 0x280, 0x380, 0x000 };
-
-#define MIXCOM_WATCHDOG_OFFSET 0xc10
+/*
+ * We have two types of cards that can be probed:
+ * 1) The Mixcom cards: these cards can be found at addresses
+ * 0x180, 0x280, 0x380 with an additional offset of 0xc10.
+ * (Or 0xd90, 0xe90, 0xf90).
+ * 2) The FlashCOM cards: these cards can be set up at
+ * 0x300 -> 0x378, in 0x8 jumps with an offset of 0x04.
+ * (Or 0x304 -> 0x37c in 0x8 jumps).
+ * Each card has it's own ID.
+ */
#define MIXCOM_ID 0x11
-#define FLASHCOM_WATCHDOG_OFFSET 0x4
#define FLASHCOM_ID 0x18
+static struct {
+ int ioport;
+ int id;
+} mixcomwd_io_info[] __devinitdata = {
+ /* The Mixcom cards */
+ {0x0d90, MIXCOM_ID},
+ {0x0e90, MIXCOM_ID},
+ {0x0f90, MIXCOM_ID},
+ /* The FlashCOM cards */
+ {0x0304, FLASHCOM_ID},
+ {0x030c, FLASHCOM_ID},
+ {0x0314, FLASHCOM_ID},
+ {0x031c, FLASHCOM_ID},
+ {0x0324, FLASHCOM_ID},
+ {0x032c, FLASHCOM_ID},
+ {0x0334, FLASHCOM_ID},
+ {0x033c, FLASHCOM_ID},
+ {0x0344, FLASHCOM_ID},
+ {0x034c, FLASHCOM_ID},
+ {0x0354, FLASHCOM_ID},
+ {0x035c, FLASHCOM_ID},
+ {0x0364, FLASHCOM_ID},
+ {0x036c, FLASHCOM_ID},
+ {0x0374, FLASHCOM_ID},
+ {0x037c, FLASHCOM_ID},
+ /* The end of the list */
+ {0x0000, 0},
+};
static void mixcomwd_timerfun(unsigned long d);
@@ -113,13 +154,13 @@ static int mixcomwd_release(struct inode *inode, struct file *file)
{
if (expect_close == 42) {
if(mixcomwd_timer_alive) {
- printk(KERN_ERR "mixcomwd: release called while internal timer alive");
+ printk(KERN_ERR PFX "release called while internal timer alive");
return -EBUSY;
}
mixcomwd_timer_alive=1;
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
} else {
- printk(KERN_CRIT "mixcomwd: WDT device closed unexpectedly. WDT will not stop!\n");
+ printk(KERN_CRIT PFX "WDT device closed unexpectedly. WDT will not stop!\n");
}
clear_bit(0,&mixcomwd_opened);
@@ -188,8 +229,7 @@ static int mixcomwd_ioctl(struct inode *inode, struct file *file,
return 0;
}
-static const struct file_operations mixcomwd_fops=
-{
+static const struct file_operations mixcomwd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = mixcomwd_write,
@@ -198,46 +238,30 @@ static const struct file_operations mixcomwd_fops=
.release = mixcomwd_release,
};
-static struct miscdevice mixcomwd_miscdev=
-{
+static struct miscdevice mixcomwd_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &mixcomwd_fops,
};
-static int __init mixcomwd_checkcard(int port)
+static int __init checkcard(int port, int card_id)
{
int id;
- port += MIXCOM_WATCHDOG_OFFSET;
- if (!request_region(port, 1, "MixCOM watchdog")) {
- return 0;
- }
-
- id=inb_p(port) & 0x3f;
- if(id!=MIXCOM_ID) {
- release_region(port, 1);
- return 0;
- }
- return port;
-}
-
-static int __init flashcom_checkcard(int port)
-{
- int id;
-
- port += FLASHCOM_WATCHDOG_OFFSET;
if (!request_region(port, 1, "MixCOM watchdog")) {
return 0;
}
id=inb_p(port);
- if(id!=FLASHCOM_ID) {
+ if (card_id==MIXCOM_ID)
+ id &= 0x3f;
+
+ if (id!=card_id) {
release_region(port, 1);
return 0;
}
- return port;
- }
+ return 1;
+}
static int __init mixcomwd_init(void)
{
@@ -245,50 +269,50 @@ static int __init mixcomwd_init(void)
int ret;
int found=0;
- for (i = 0; !found && mixcomwd_ioports[i] != 0; i++) {
- watchdog_port = mixcomwd_checkcard(mixcomwd_ioports[i]);
- if (watchdog_port) {
- found = 1;
- }
- }
-
- /* The FlashCOM card can be set up at 0x300 -> 0x378, in 0x8 jumps */
- for (i = 0x300; !found && i < 0x380; i+=0x8) {
- watchdog_port = flashcom_checkcard(i);
- if (watchdog_port) {
+ for (i = 0; !found && mixcomwd_io_info[i].ioport != 0; i++) {
+ if (checkcard(mixcomwd_io_info[i].ioport,
+ mixcomwd_io_info[i].id)) {
found = 1;
+ watchdog_port = mixcomwd_io_info[i].ioport;
}
}
if (!found) {
- printk("mixcomwd: No card detected, or port not available.\n");
+ printk(KERN_ERR PFX "No card detected, or port not available.\n");
return -ENODEV;
}
ret = misc_register(&mixcomwd_miscdev);
if (ret)
{
- release_region(watchdog_port, 1);
- return ret;
+ printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
+ WATCHDOG_MINOR, ret);
+ goto error_misc_register_watchdog;
}
- printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",VERSION,watchdog_port);
+ printk(KERN_INFO "MixCOM watchdog driver v%s, watchdog port at 0x%3x\n",
+ VERSION, watchdog_port);
return 0;
+
+error_misc_register_watchdog:
+ release_region(watchdog_port, 1);
+ watchdog_port = 0x0000;
+ return ret;
}
static void __exit mixcomwd_exit(void)
{
if (!nowayout) {
if(mixcomwd_timer_alive) {
- printk(KERN_WARNING "mixcomwd: I quit now, hardware will"
+ printk(KERN_WARNING PFX "I quit now, hardware will"
" probably reboot!\n");
del_timer_sync(&mixcomwd_timer);
mixcomwd_timer_alive=0;
}
}
- release_region(watchdog_port,1);
misc_deregister(&mixcomwd_miscdev);
+ release_region(watchdog_port,1);
}
module_init(mixcomwd_init);
@@ -296,5 +320,6 @@ module_exit(mixcomwd_exit);
MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>");
MODULE_DESCRIPTION("MixCom Watchdog driver");
+MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index e88947f8fe53..0d2b27735419 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -328,12 +328,11 @@ static int __devinit mpcore_wdt_probe(struct platform_device *dev)
goto err_out;
}
- wdt = kmalloc(sizeof(struct mpcore_wdt), GFP_KERNEL);
+ wdt = kzalloc(sizeof(struct mpcore_wdt), GFP_KERNEL);
if (!wdt) {
ret = -ENOMEM;
goto err_out;
}
- memset(wdt, 0, sizeof(struct mpcore_wdt));
wdt->dev = &dev->dev;
wdt->irq = platform_get_irq(dev, 0);
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 1e7a6719d5ba..0f3fd6c9c354 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -626,12 +626,11 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
/* allocate memory for our device and initialize it */
- usb_pcwd = kmalloc (sizeof(struct usb_pcwd_private), GFP_KERNEL);
+ usb_pcwd = kzalloc (sizeof(struct usb_pcwd_private), GFP_KERNEL);
if (usb_pcwd == NULL) {
printk(KERN_ERR PFX "Out of memory\n");
goto error;
}
- memset (usb_pcwd, 0x00, sizeof (*usb_pcwd));
usb_pcwd_device = usb_pcwd;
diff --git a/drivers/char/watchdog/pnx4008_wdt.c b/drivers/char/watchdog/pnx4008_wdt.c
index 5991add702b0..22f8873dd092 100644
--- a/drivers/char/watchdog/pnx4008_wdt.c
+++ b/drivers/char/watchdog/pnx4008_wdt.c
@@ -148,10 +148,6 @@ static ssize_t
pnx4008_wdt_write(struct file *file, const char *data, size_t len,
loff_t * ppos)
{
- /* Can't seek (pwrite) on this device */
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len) {
if (!nowayout) {
size_t i;
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 20fa29ca7404..50430bced2f2 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -92,6 +92,7 @@ typedef enum close_state {
static DECLARE_MUTEX(open_lock);
+static struct device *wdt_dev; /* platform device attached to */
static struct resource *wdt_mem;
static struct resource *wdt_irq;
static struct clk *wdt_clock;
@@ -180,7 +181,7 @@ static int s3c2410wdt_set_heartbeat(int timeout)
}
if ((count / divisor) >= 0x10000) {
- printk(KERN_ERR PFX "timeout %d too big\n", timeout);
+ dev_err(wdt_dev, "timeout %d too big\n", timeout);
return -EINVAL;
}
}
@@ -233,7 +234,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
if (allow_close == CLOSE_STATE_ALLOW) {
s3c2410wdt_stop();
} else {
- printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+ dev_err(wdt_dev, "Unexpected close, not stopping watchdog\n");
s3c2410wdt_keepalive();
}
@@ -338,7 +339,7 @@ static struct miscdevice s3c2410wdt_miscdev = {
static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
{
- printk(KERN_INFO PFX "Watchdog timer expired!\n");
+ dev_info(wdt_dev, "watchdog timer expired (irq)\n");
s3c2410wdt_keepalive();
return IRQ_HANDLED;
@@ -348,31 +349,36 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct resource *res;
+ struct device *dev;
+ unsigned int wtcon;
int started = 0;
int ret;
int size;
DBG("%s: probe=%p\n", __FUNCTION__, pdev);
+ dev = &pdev->dev;
+ wdt_dev = &pdev->dev;
+
/* get the memory region for the watchdog timer */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
- printk(KERN_INFO PFX "failed to get memory region resouce\n");
+ dev_err(dev, "no memory resource specified\n");
return -ENOENT;
}
size = (res->end-res->start)+1;
wdt_mem = request_mem_region(res->start, size, pdev->name);
if (wdt_mem == NULL) {
- printk(KERN_INFO PFX "failed to get memory region\n");
+ dev_err(dev, "failed to get memory region\n");
ret = -ENOENT;
goto err_req;
}
wdt_base = ioremap(res->start, size);
if (wdt_base == 0) {
- printk(KERN_INFO PFX "failed to ioremap() region\n");
+ dev_err(dev, "failed to ioremap() region\n");
ret = -EINVAL;
goto err_req;
}
@@ -381,20 +387,20 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (wdt_irq == NULL) {
- printk(KERN_INFO PFX "failed to get irq resource\n");
+ dev_err(dev, "no irq resource specified\n");
ret = -ENOENT;
goto err_map;
}
ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
if (ret != 0) {
- printk(KERN_INFO PFX "failed to install irq (%d)\n", ret);
+ dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_map;
}
wdt_clock = clk_get(&pdev->dev, "watchdog");
if (IS_ERR(wdt_clock)) {
- printk(KERN_INFO PFX "failed to find watchdog clock source\n");
+ dev_err(dev, "failed to find watchdog clock source\n");
ret = PTR_ERR(wdt_clock);
goto err_irq;
}
@@ -408,22 +414,22 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
started = s3c2410wdt_set_heartbeat(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
if (started == 0) {
- printk(KERN_INFO PFX "tmr_margin value out of range, default %d used\n",
+ dev_info(dev,"tmr_margin value out of range, default %d used\n",
CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME);
} else {
- printk(KERN_INFO PFX "default timer value is out of range, cannot start\n");
+ dev_info(dev, "default timer value is out of range, cannot start\n");
}
}
ret = misc_register(&s3c2410wdt_miscdev);
if (ret) {
- printk (KERN_ERR PFX "cannot register miscdev on minor=%d (%d)\n",
+ dev_err(dev, "cannot register miscdev on minor=%d (%d)\n",
WATCHDOG_MINOR, ret);
goto err_clk;
}
if (tmr_atboot && started == 0) {
- printk(KERN_INFO PFX "Starting Watchdog Timer\n");
+ dev_info(dev, "starting watchdog timer\n");
s3c2410wdt_start();
} else if (!tmr_atboot) {
/* if we're not enabling the watchdog, then ensure it is
@@ -433,6 +439,15 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
s3c2410wdt_stop();
}
+ /* print out a statement of readiness */
+
+ wtcon = readl(wdt_base + S3C2410_WTCON);
+
+ dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n",
+ (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in",
+ (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis",
+ (wtcon & S3C2410_WTCON_INTEN) ? "" : "en");
+
return 0;
err_clk:
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 5cfcff532545..7b46faf22318 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -71,7 +71,7 @@ static struct clocksource clocksource_acpi_pm = {
.rating = 200,
.read = acpi_pm_read,
.mask = (cycle_t)ACPI_PM_MASK,
- .mult = 0, /*to be caluclated*/
+ .mult = 0, /*to be calculated*/
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -105,14 +105,11 @@ static inline void acpi_pm_need_workaround(void)
*/
static void __devinit acpi_pm_check_blacklist(struct pci_dev *dev)
{
- u8 rev;
-
if (acpi_pm_good)
return;
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
/* the bug has been fixed in PIIX4M */
- if (rev < 3) {
+ if (dev->revision < 3) {
printk(KERN_WARNING "* Found PM-Timer Bug on the chipset."
" Due to workarounds for a bug,\n"
"* this clock source is slow. Consider trying"
diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig
index e0bdc0db9640..100bfd422066 100644
--- a/drivers/connector/Kconfig
+++ b/drivers/connector/Kconfig
@@ -1,6 +1,5 @@
-menu "Connector - unified userspace <-> kernelspace linker"
-config CONNECTOR
+menuconfig CONNECTOR
tristate "Connector - unified userspace <-> kernelspace linker"
depends on NET
---help---
@@ -10,6 +9,8 @@ config CONNECTOR
Connector support can also be built as a module. If so, the module
will be called cn.ko.
+if CONNECTOR
+
config PROC_EVENTS
boolean "Report process events to userspace"
depends on CONNECTOR=y
@@ -18,4 +19,4 @@ config PROC_EVENTS
Provide a connector that reports process events to userspace. Send
events such as fork, exec, id change (uid, gid, suid, etc), and exit.
-endmenu
+endif # CONNECTOR
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index eb37fba9b7ef..2f6a73c01b71 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -39,6 +39,10 @@
*/
static struct cpufreq_driver *cpufreq_driver;
static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
+#ifdef CONFIG_HOTPLUG_CPU
+/* This one keeps track of the previously set governor of a removed CPU */
+static struct cpufreq_governor *cpufreq_cpu_governor[NR_CPUS];
+#endif
static DEFINE_SPINLOCK(cpufreq_driver_lock);
/*
@@ -770,9 +774,17 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
}
policy->user_policy.min = policy->cpuinfo.min_freq;
policy->user_policy.max = policy->cpuinfo.max_freq;
- policy->user_policy.governor = policy->governor;
#ifdef CONFIG_SMP
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpufreq_cpu_governor[cpu]){
+ policy->governor = cpufreq_cpu_governor[cpu];
+ dprintk("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
+ }
+#endif
+
for_each_cpu_mask(j, policy->cpus) {
if (cpu == j)
continue;
@@ -826,13 +838,21 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
- sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+ ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+ if (ret)
+ goto err_out_driver_exit;
drv_attr++;
}
- if (cpufreq_driver->get)
- sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
- if (cpufreq_driver->target)
- sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+ if (cpufreq_driver->get){
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
+ if (ret)
+ goto err_out_driver_exit;
+ }
+ if (cpufreq_driver->target){
+ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+ if (ret)
+ goto err_out_driver_exit;
+ }
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu_mask(j, policy->cpus) {
@@ -865,6 +885,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
/* set default policy */
ret = __cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
unlock_policy_rwsem_write(cpu);
@@ -961,6 +982,11 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
}
#ifdef CONFIG_SMP
+
+#ifdef CONFIG_HOTPLUG_CPU
+ cpufreq_cpu_governor[cpu] = data->governor;
+#endif
+
/* if we have other CPUs still registered, we need to unlink them,
* or else wait_for_completion below will lock up. Clean the
* cpufreq_cpu_data[] while holding the lock, and remove the sysfs
@@ -981,6 +1007,9 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
if (j == cpu)
continue;
dprintk("removing link for cpu %u\n", j);
+#ifdef CONFIG_HOTPLUG_CPU
+ cpufreq_cpu_governor[j] = data->governor;
+#endif
cpu_sys_dev = get_cpu_sysdev(j);
sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
cpufreq_cpu_put(data);
@@ -1679,7 +1708,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
- struct cpufreq_policy *policy;
sys_dev = get_cpu_sysdev(cpu);
if (sys_dev) {
@@ -1693,11 +1721,6 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- policy = cpufreq_cpu_data[cpu];
- if (policy) {
- __cpufreq_driver_target(policy, policy->min,
- CPUFREQ_RELATION_H);
- }
__cpufreq_remove_dev(sys_dev);
break;
case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 8532bb79e5fc..e794527e4925 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -96,15 +96,25 @@ static struct dbs_tuners {
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
- cputime64_t retval;
+ cputime64_t idle_time;
+ cputime64_t cur_jiffies;
+ cputime64_t busy_time;
- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
- kstat_cpu(cpu).cpustat.iowait);
+ cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+ busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
+ kstat_cpu(cpu).cpustat.system);
- if (dbs_tuners_ins.ignore_nice)
- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
+ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- return retval;
+ if (!dbs_tuners_ins.ignore_nice) {
+ busy_time = cputime64_add(busy_time,
+ kstat_cpu(cpu).cpustat.nice);
+ }
+
+ idle_time = cputime64_sub(cur_jiffies, busy_time);
+ return idle_time;
}
/*
@@ -325,7 +335,7 @@ static struct attribute_group dbs_attr_group = {
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int idle_ticks, total_ticks;
- unsigned int load;
+ unsigned int load = 0;
cputime64_t cur_jiffies;
struct cpufreq_policy *policy;
@@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
this_dbs_info->prev_cpu_wall);
- this_dbs_info->prev_cpu_wall = cur_jiffies;
+ this_dbs_info->prev_cpu_wall = get_jiffies_64();
+
if (!total_ticks)
return;
/*
@@ -370,7 +381,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
if (tmp_idle_ticks < idle_ticks)
idle_ticks = tmp_idle_ticks;
}
- load = (100 * (total_ticks - idle_ticks)) / total_ticks;
+ if (likely(total_ticks > idle_ticks))
+ load = (100 * (total_ticks - idle_ticks)) / total_ticks;
/* Check for frequency increase */
if (load > dbs_tuners_ins.up_threshold) {
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d2f0cbd8b8f3..917b9bab9ccb 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -25,8 +25,7 @@ static spinlock_t cpufreq_stats_lock;
#define CPUFREQ_STATDEVICE_ATTR(_name,_mode,_show) \
static struct freq_attr _attr_##_name = {\
- .attr = {.name = __stringify(_name), .owner = THIS_MODULE, \
- .mode = _mode, }, \
+ .attr = {.name = __stringify(_name), .mode = _mode, }, \
.show = _show,\
};
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 860345c7799a..51bedab6c808 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -37,6 +37,7 @@ static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
static unsigned int cpu_is_managed[NR_CPUS];
static DEFINE_MUTEX (userspace_mutex);
+static int cpus_using_userspace_governor;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
@@ -47,7 +48,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{
struct cpufreq_freqs *freq = data;
- dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new);
+ if (!cpu_is_managed[freq->cpu])
+ return 0;
+
+ dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
+ freq->cpu, freq->new);
cpu_cur_freq[freq->cpu] = freq->new;
return 0;
@@ -120,7 +125,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
static struct freq_attr freq_attr_scaling_setspeed =
{
- .attr = { .name = "scaling_setspeed", .mode = 0644, .owner = THIS_MODULE },
+ .attr = { .name = "scaling_setspeed", .mode = 0644 },
.show = show_speed,
.store = store_speed,
};
@@ -142,6 +147,13 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
if (rc)
goto start_out;
+ if (cpus_using_userspace_governor == 0) {
+ cpufreq_register_notifier(
+ &userspace_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ cpus_using_userspace_governor++;
+
cpu_is_managed[cpu] = 1;
cpu_min_freq[cpu] = policy->min;
cpu_max_freq[cpu] = policy->max;
@@ -153,6 +165,13 @@ start_out:
break;
case CPUFREQ_GOV_STOP:
mutex_lock(&userspace_mutex);
+ cpus_using_userspace_governor--;
+ if (cpus_using_userspace_governor == 0) {
+ cpufreq_unregister_notifier(
+ &userspace_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
cpu_is_managed[cpu] = 0;
cpu_min_freq[cpu] = 0;
cpu_max_freq[cpu] = 0;
@@ -198,7 +217,6 @@ EXPORT_SYMBOL(cpufreq_gov_userspace);
static int __init cpufreq_gov_userspace_init(void)
{
- cpufreq_register_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
return cpufreq_register_governor(&cpufreq_gov_userspace);
}
@@ -206,7 +224,6 @@ static int __init cpufreq_gov_userspace_init(void)
static void __exit cpufreq_gov_userspace_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_userspace);
- cpufreq_unregister_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index e7490925fdcf..5409f3afb3f8 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -199,7 +199,6 @@ static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
.attr = { .name = "scaling_available_frequencies",
.mode = 0444,
- .owner=THIS_MODULE
},
.show = show_available_freqs,
};
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index bb90cbd7ca51..84ebfcc1ffb4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -1,4 +1,9 @@
-menu "Hardware crypto devices"
+
+menuconfig CRYPTO_HW
+ bool "Hardware crypto devices"
+ default y
+
+if CRYPTO_HW
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
@@ -78,4 +83,4 @@ config ZCRYPT_MONOLITHIC
that contains all parts of the crypto device driver (ap bus,
request router and all the card drivers).
-endmenu
+endif # CRYPTO_HW
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 72be6c63edfc..8f670dae53bb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -3,13 +3,13 @@
#
menu "DMA Engine support"
- depends on !S390
+ depends on HAS_DMA
config DMA_ENGINE
bool "Support for DMA engines"
---help---
- DMA engines offload copy operations from the CPU to dedicated
- hardware, allowing the copies to happen asynchronously.
+ DMA engines offload bulk memory operations from the CPU to dedicated
+ hardware, allowing the operations to happen asynchronously.
comment "DMA Clients"
@@ -32,4 +32,12 @@ config INTEL_IOATDMA
---help---
Enable support for the Intel(R) I/OAT DMA engine.
+config INTEL_IOP_ADMA
+ tristate "Intel IOP ADMA support"
+ depends on DMA_ENGINE && (ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX)
+ select ASYNC_CORE
+ default m
+ ---help---
+ Enable support for the Intel(R) IOP Series RAID engines.
+
endmenu
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index bdcfdbdb1aec..b3839b687ae0 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
+obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 322ee2984e3d..82489923af09 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -37,11 +37,11 @@
* Each device has a channels list, which runs unlocked but is never modified
* once the device is registered, it's just setup by the driver.
*
- * Each client has a channels list, it's only modified under the client->lock
- * and in an RCU callback, so it's safe to read under rcu_read_lock().
+ * Each client is responsible for keeping track of the channels it uses. See
+ * the definition of dma_event_callback in dmaengine.h.
*
* Each device has a kref, which is initialized to 1 when the device is
- * registered. A kref_put is done for each class_device registered. When the
+ * registered. A kref_get is done for each class_device registered. When the
* class_device is released, the coresponding kref_put is done in the release
* method. Every time one of the device's channels is allocated to a client,
* a kref_get occurs. When the channel is freed, the coresponding kref_put
@@ -51,14 +51,17 @@
* references to finish.
*
* Each channel has an open-coded implementation of Rusty Russell's "bigref,"
- * with a kref and a per_cpu local_t. A single reference is set when on an
- * ADDED event, and removed with a REMOVE event. Net DMA client takes an
- * extra reference per outstanding transaction. The relase function does a
- * kref_put on the device. -ChrisL
+ * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
+ * signals that it wants to use a channel, and dma_chan_put is called when
+ * a channel is removed or a client using it is unregesitered. A client can
+ * take extra references per outstanding transaction, as is the case with
+ * the NET DMA client. The release function does a kref_put on the device.
+ * -ChrisL, DanW
*/
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mm.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
@@ -66,6 +69,7 @@
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
+#include <linux/jiffies.h>
static DEFINE_MUTEX(dma_list_mutex);
static LIST_HEAD(dma_device_list);
@@ -100,8 +104,19 @@ static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
static ssize_t show_in_use(struct class_device *cd, char *buf)
{
struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
+ int in_use = 0;
+
+ if (unlikely(chan->slow_ref) &&
+ atomic_read(&chan->refcount.refcount) > 1)
+ in_use = 1;
+ else {
+ if (local_read(&(per_cpu_ptr(chan->local,
+ get_cpu())->refcount)) > 0)
+ in_use = 1;
+ put_cpu();
+ }
- return sprintf(buf, "%d\n", (chan->client ? 1 : 0));
+ return sprintf(buf, "%d\n", in_use);
}
static struct class_device_attribute dma_class_attrs[] = {
@@ -127,43 +142,72 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
+#define dma_chan_satisfies_mask(chan, mask) \
+ __dma_chan_satisfies_mask((chan), &(mask))
+static int
+__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
+{
+ dma_cap_mask_t has;
+
+ bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
+ DMA_TX_TYPE_END);
+ return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
+}
+
/**
- * dma_client_chan_alloc - try to allocate a channel to a client
+ * dma_client_chan_alloc - try to allocate channels to a client
* @client: &dma_client
*
* Called with dma_list_mutex held.
*/
-static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)
+static void dma_client_chan_alloc(struct dma_client *client)
{
struct dma_device *device;
struct dma_chan *chan;
- unsigned long flags;
int desc; /* allocated descriptor count */
+ enum dma_state_client ack;
- /* Find a channel, any DMA engine will do */
- list_for_each_entry(device, &dma_device_list, global_node) {
+ /* Find a channel */
+ list_for_each_entry(device, &dma_device_list, global_node)
list_for_each_entry(chan, &device->channels, device_node) {
- if (chan->client)
+ if (!dma_chan_satisfies_mask(chan, client->cap_mask))
continue;
desc = chan->device->device_alloc_chan_resources(chan);
if (desc >= 0) {
- kref_get(&device->refcount);
- kref_init(&chan->refcount);
- chan->slow_ref = 0;
- INIT_RCU_HEAD(&chan->rcu);
- chan->client = client;
- spin_lock_irqsave(&client->lock, flags);
- list_add_tail_rcu(&chan->client_node,
- &client->channels);
- spin_unlock_irqrestore(&client->lock, flags);
- return chan;
+ ack = client->event_callback(client,
+ chan,
+ DMA_RESOURCE_AVAILABLE);
+
+ /* we are done once this client rejects
+ * an available resource
+ */
+ if (ack == DMA_ACK) {
+ dma_chan_get(chan);
+ kref_get(&device->refcount);
+ } else if (ack == DMA_NAK)
+ return;
}
}
- }
+}
+
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+ enum dma_status status;
+ unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
+
+ dma_async_issue_pending(chan);
+ do {
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+ if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
+ printk(KERN_ERR "dma_sync_wait_timeout!\n");
+ return DMA_ERROR;
+ }
+ } while (status == DMA_IN_PROGRESS);
- return NULL;
+ return status;
}
+EXPORT_SYMBOL(dma_sync_wait);
/**
* dma_chan_cleanup - release a DMA channel's resources
@@ -173,7 +217,6 @@ void dma_chan_cleanup(struct kref *kref)
{
struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
chan->device->device_free_chan_resources(chan);
- chan->client = NULL;
kref_put(&chan->device->refcount, dma_async_device_cleanup);
}
EXPORT_SYMBOL(dma_chan_cleanup);
@@ -189,7 +232,7 @@ static void dma_chan_free_rcu(struct rcu_head *rcu)
kref_put(&chan->refcount, dma_chan_cleanup);
}
-static void dma_client_chan_free(struct dma_chan *chan)
+static void dma_chan_release(struct dma_chan *chan)
{
atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
chan->slow_ref = 1;
@@ -197,70 +240,57 @@ static void dma_client_chan_free(struct dma_chan *chan)
}
/**
- * dma_chans_rebalance - reallocate channels to clients
- *
- * When the number of DMA channel in the system changes,
- * channels need to be rebalanced among clients.
+ * dma_chans_notify_available - broadcast available channels to the clients
*/
-static void dma_chans_rebalance(void)
+static void dma_clients_notify_available(void)
{
struct dma_client *client;
- struct dma_chan *chan;
- unsigned long flags;
mutex_lock(&dma_list_mutex);
- list_for_each_entry(client, &dma_client_list, global_node) {
- while (client->chans_desired > client->chan_count) {
- chan = dma_client_chan_alloc(client);
- if (!chan)
- break;
- client->chan_count++;
- client->event_callback(client,
- chan,
- DMA_RESOURCE_ADDED);
- }
- while (client->chans_desired < client->chan_count) {
- spin_lock_irqsave(&client->lock, flags);
- chan = list_entry(client->channels.next,
- struct dma_chan,
- client_node);
- list_del_rcu(&chan->client_node);
- spin_unlock_irqrestore(&client->lock, flags);
- client->chan_count--;
- client->event_callback(client,
- chan,
- DMA_RESOURCE_REMOVED);
- dma_client_chan_free(chan);
- }
- }
+ list_for_each_entry(client, &dma_client_list, global_node)
+ dma_client_chan_alloc(client);
mutex_unlock(&dma_list_mutex);
}
/**
- * dma_async_client_register - allocate and register a &dma_client
- * @event_callback: callback for notification of channel addition/removal
+ * dma_chans_notify_available - tell the clients that a channel is going away
+ * @chan: channel on its way out
*/
-struct dma_client *dma_async_client_register(dma_event_callback event_callback)
+static void dma_clients_notify_removed(struct dma_chan *chan)
{
struct dma_client *client;
+ enum dma_state_client ack;
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return NULL;
+ mutex_lock(&dma_list_mutex);
- INIT_LIST_HEAD(&client->channels);
- spin_lock_init(&client->lock);
- client->chans_desired = 0;
- client->chan_count = 0;
- client->event_callback = event_callback;
+ list_for_each_entry(client, &dma_client_list, global_node) {
+ ack = client->event_callback(client, chan,
+ DMA_RESOURCE_REMOVED);
+
+ /* client was holding resources for this channel so
+ * free it
+ */
+ if (ack == DMA_ACK) {
+ dma_chan_put(chan);
+ kref_put(&chan->device->refcount,
+ dma_async_device_cleanup);
+ }
+ }
+ mutex_unlock(&dma_list_mutex);
+}
+
+/**
+ * dma_async_client_register - register a &dma_client
+ * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
+ */
+void dma_async_client_register(struct dma_client *client)
+{
mutex_lock(&dma_list_mutex);
list_add_tail(&client->global_node, &dma_client_list);
mutex_unlock(&dma_list_mutex);
-
- return client;
}
EXPORT_SYMBOL(dma_async_client_register);
@@ -272,40 +302,42 @@ EXPORT_SYMBOL(dma_async_client_register);
*/
void dma_async_client_unregister(struct dma_client *client)
{
+ struct dma_device *device;
struct dma_chan *chan;
+ enum dma_state_client ack;
if (!client)
return;
- rcu_read_lock();
- list_for_each_entry_rcu(chan, &client->channels, client_node)
- dma_client_chan_free(chan);
- rcu_read_unlock();
-
mutex_lock(&dma_list_mutex);
+ /* free all channels the client is holding */
+ list_for_each_entry(device, &dma_device_list, global_node)
+ list_for_each_entry(chan, &device->channels, device_node) {
+ ack = client->event_callback(client, chan,
+ DMA_RESOURCE_REMOVED);
+
+ if (ack == DMA_ACK) {
+ dma_chan_put(chan);
+ kref_put(&chan->device->refcount,
+ dma_async_device_cleanup);
+ }
+ }
+
list_del(&client->global_node);
mutex_unlock(&dma_list_mutex);
-
- kfree(client);
- dma_chans_rebalance();
}
EXPORT_SYMBOL(dma_async_client_unregister);
/**
- * dma_async_client_chan_request - request DMA channels
- * @client: &dma_client
- * @number: count of DMA channels requested
- *
- * Clients call dma_async_client_chan_request() to specify how many
- * DMA channels they need, 0 to free all currently allocated.
- * The resulting allocations/frees are indicated to the client via the
- * event callback.
+ * dma_async_client_chan_request - send all available channels to the
+ * client that satisfy the capability mask
+ * @client - requester
*/
-void dma_async_client_chan_request(struct dma_client *client,
- unsigned int number)
+void dma_async_client_chan_request(struct dma_client *client)
{
- client->chans_desired = number;
- dma_chans_rebalance();
+ mutex_lock(&dma_list_mutex);
+ dma_client_chan_alloc(client);
+ mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL(dma_async_client_chan_request);
@@ -316,12 +348,31 @@ EXPORT_SYMBOL(dma_async_client_chan_request);
int dma_async_device_register(struct dma_device *device)
{
static int id;
- int chancnt = 0;
+ int chancnt = 0, rc;
struct dma_chan* chan;
if (!device)
return -ENODEV;
+ /* validate device routines */
+ BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
+ !device->device_prep_dma_memcpy);
+ BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
+ !device->device_prep_dma_xor);
+ BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
+ !device->device_prep_dma_zero_sum);
+ BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
+ !device->device_prep_dma_memset);
+ BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
+ !device->device_prep_dma_interrupt);
+
+ BUG_ON(!device->device_alloc_chan_resources);
+ BUG_ON(!device->device_free_chan_resources);
+ BUG_ON(!device->device_dependency_added);
+ BUG_ON(!device->device_is_tx_complete);
+ BUG_ON(!device->device_issue_pending);
+ BUG_ON(!device->dev);
+
init_completion(&device->done);
kref_init(&device->refcount);
device->dev_id = id++;
@@ -338,17 +389,38 @@ int dma_async_device_register(struct dma_device *device)
snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
device->dev_id, chan->chan_id);
+ rc = class_device_register(&chan->class_dev);
+ if (rc) {
+ chancnt--;
+ free_percpu(chan->local);
+ chan->local = NULL;
+ goto err_out;
+ }
+
kref_get(&device->refcount);
- class_device_register(&chan->class_dev);
+ kref_init(&chan->refcount);
+ chan->slow_ref = 0;
+ INIT_RCU_HEAD(&chan->rcu);
}
mutex_lock(&dma_list_mutex);
list_add_tail(&device->global_node, &dma_device_list);
mutex_unlock(&dma_list_mutex);
- dma_chans_rebalance();
+ dma_clients_notify_available();
return 0;
+
+err_out:
+ list_for_each_entry(chan, &device->channels, device_node) {
+ if (chan->local == NULL)
+ continue;
+ kref_put(&device->refcount, dma_async_device_cleanup);
+ class_device_unregister(&chan->class_dev);
+ chancnt--;
+ free_percpu(chan->local);
+ }
+ return rc;
}
EXPORT_SYMBOL(dma_async_device_register);
@@ -371,32 +443,165 @@ static void dma_async_device_cleanup(struct kref *kref)
void dma_async_device_unregister(struct dma_device *device)
{
struct dma_chan *chan;
- unsigned long flags;
mutex_lock(&dma_list_mutex);
list_del(&device->global_node);
mutex_unlock(&dma_list_mutex);
list_for_each_entry(chan, &device->channels, device_node) {
- if (chan->client) {
- spin_lock_irqsave(&chan->client->lock, flags);
- list_del(&chan->client_node);
- chan->client->chan_count--;
- spin_unlock_irqrestore(&chan->client->lock, flags);
- chan->client->event_callback(chan->client,
- chan,
- DMA_RESOURCE_REMOVED);
- dma_client_chan_free(chan);
- }
+ dma_clients_notify_removed(chan);
class_device_unregister(&chan->class_dev);
+ dma_chan_release(chan);
}
- dma_chans_rebalance();
kref_put(&device->refcount, dma_async_device_cleanup);
wait_for_completion(&device->done);
}
EXPORT_SYMBOL(dma_async_device_unregister);
+/**
+ * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
+ * @chan: DMA channel to offload copy to
+ * @dest: destination address (virtual)
+ * @src: source address (virtual)
+ * @len: length
+ *
+ * Both @dest and @src must be mappable to a bus address according to the
+ * DMA mapping API rules for streaming mappings.
+ * Both @dest and @src must stay memory resident (kernel memory or locked
+ * user space pages).
+ */
+dma_cookie_t
+dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
+ void *src, size_t len)
+{
+ struct dma_device *dev = chan->device;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ int cpu;
+
+ tx = dev->device_prep_dma_memcpy(chan, len, 0);
+ if (!tx)
+ return -ENOMEM;
+
+ tx->ack = 1;
+ tx->callback = NULL;
+ addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
+ tx->tx_set_src(addr, tx, 0);
+ addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
+ tx->tx_set_dest(addr, tx, 0);
+ cookie = tx->tx_submit(tx);
+
+ cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return cookie;
+}
+EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
+
+/**
+ * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
+ * @chan: DMA channel to offload copy to
+ * @page: destination page
+ * @offset: offset in page to copy to
+ * @kdata: source address (virtual)
+ * @len: length
+ *
+ * Both @page/@offset and @kdata must be mappable to a bus address according
+ * to the DMA mapping API rules for streaming mappings.
+ * Both @page/@offset and @kdata must stay memory resident (kernel memory or
+ * locked user space pages)
+ */
+dma_cookie_t
+dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
+ unsigned int offset, void *kdata, size_t len)
+{
+ struct dma_device *dev = chan->device;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ int cpu;
+
+ tx = dev->device_prep_dma_memcpy(chan, len, 0);
+ if (!tx)
+ return -ENOMEM;
+
+ tx->ack = 1;
+ tx->callback = NULL;
+ addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
+ tx->tx_set_src(addr, tx, 0);
+ addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
+ tx->tx_set_dest(addr, tx, 0);
+ cookie = tx->tx_submit(tx);
+
+ cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return cookie;
+}
+EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
+
+/**
+ * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
+ * @chan: DMA channel to offload copy to
+ * @dest_pg: destination page
+ * @dest_off: offset in page to copy to
+ * @src_pg: source page
+ * @src_off: offset in page to copy from
+ * @len: length
+ *
+ * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
+ * address according to the DMA mapping API rules for streaming mappings.
+ * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
+ * (kernel memory or locked user space pages).
+ */
+dma_cookie_t
+dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
+ unsigned int dest_off, struct page *src_pg, unsigned int src_off,
+ size_t len)
+{
+ struct dma_device *dev = chan->device;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t addr;
+ dma_cookie_t cookie;
+ int cpu;
+
+ tx = dev->device_prep_dma_memcpy(chan, len, 0);
+ if (!tx)
+ return -ENOMEM;
+
+ tx->ack = 1;
+ tx->callback = NULL;
+ addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
+ tx->tx_set_src(addr, tx, 0);
+ addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
+ tx->tx_set_dest(addr, tx, 0);
+ cookie = tx->tx_submit(tx);
+
+ cpu = get_cpu();
+ per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
+ per_cpu_ptr(chan->local, cpu)->memcpy_count++;
+ put_cpu();
+
+ return cookie;
+}
+EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
+
+void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
+ struct dma_chan *chan)
+{
+ tx->chan = chan;
+ spin_lock_init(&tx->lock);
+ INIT_LIST_HEAD(&tx->depend_node);
+ INIT_LIST_HEAD(&tx->depend_list);
+}
+EXPORT_SYMBOL(dma_async_tx_descriptor_init);
+
static int __init dma_bus_init(void)
{
mutex_init(&dma_list_mutex);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 850014139556..5fbe56b5cea0 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -32,16 +32,17 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include "ioatdma.h"
-#include "ioatdma_io.h"
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
+#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
/* internal functions */
static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ioat_shutdown(struct pci_dev *pdev);
static void __devexit ioat_remove(struct pci_dev *pdev);
static int enumerate_dma_channels(struct ioat_device *device)
@@ -51,8 +52,8 @@ static int enumerate_dma_channels(struct ioat_device *device)
int i;
struct ioat_dma_chan *ioat_chan;
- device->common.chancnt = ioatdma_read8(device, IOAT_CHANCNT_OFFSET);
- xfercap_scale = ioatdma_read8(device, IOAT_XFERCAP_OFFSET);
+ device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+ xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
for (i = 0; i < device->common.chancnt; i++) {
@@ -71,13 +72,79 @@ static int enumerate_dma_channels(struct ioat_device *device)
INIT_LIST_HEAD(&ioat_chan->used_desc);
/* This should be made common somewhere in dmaengine.c */
ioat_chan->common.device = &device->common;
- ioat_chan->common.client = NULL;
list_add_tail(&ioat_chan->common.device_node,
&device->common.channels);
}
return device->common.chancnt;
}
+static void
+ioat_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
+{
+ struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+
+ pci_unmap_addr_set(desc, src, addr);
+
+ list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
+ iter->hw->src_addr = addr;
+ addr += ioat_chan->xfercap;
+ }
+
+}
+
+static void
+ioat_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx, int index)
+{
+ struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+
+ pci_unmap_addr_set(desc, dst, addr);
+
+ list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
+ iter->hw->dst_addr = addr;
+ addr += ioat_chan->xfercap;
+ }
+}
+
+static dma_cookie_t
+ioat_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+ struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
+ int append = 0;
+ dma_cookie_t cookie;
+ struct ioat_desc_sw *group_start;
+
+ group_start = list_entry(desc->async_tx.tx_list.next,
+ struct ioat_desc_sw, node);
+ spin_lock_bh(&ioat_chan->desc_lock);
+ /* cookie incr and addition to used_list must be atomic */
+ cookie = ioat_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
+
+ /* write address into NextDescriptor field of last desc in chain */
+ to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
+ group_start->async_tx.phys;
+ list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
+
+ ioat_chan->pending += desc->tx_cnt;
+ if (ioat_chan->pending >= 4) {
+ append = 1;
+ ioat_chan->pending = 0;
+ }
+ spin_unlock_bh(&ioat_chan->desc_lock);
+
+ if (append)
+ writeb(IOAT_CHANCMD_APPEND,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+
+ return cookie;
+}
+
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
struct ioat_dma_chan *ioat_chan,
gfp_t flags)
@@ -99,8 +166,13 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
}
memset(desc, 0, sizeof(*desc));
+ dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
+ desc_sw->async_tx.tx_set_src = ioat_set_src;
+ desc_sw->async_tx.tx_set_dest = ioat_set_dest;
+ desc_sw->async_tx.tx_submit = ioat_tx_submit;
+ INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
desc_sw->hw = desc;
- desc_sw->phys = phys;
+ desc_sw->async_tx.phys = phys;
return desc_sw;
}
@@ -123,7 +195,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
* In-use bit automatically set by reading chanctrl
* If 0, we got it, if 1, someone else did
*/
- chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
+ chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
if (chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE)
return -EBUSY;
@@ -132,12 +204,12 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
IOAT_CHANCTRL_ERR_INT_EN |
IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
IOAT_CHANCTRL_ERR_COMPLETION_EN;
- ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
+ writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
- chanerr = ioatdma_chan_read32(ioat_chan, IOAT_CHANERR_OFFSET);
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
if (chanerr) {
printk("IOAT: CHANERR = %x, clearing\n", chanerr);
- ioatdma_chan_write32(ioat_chan, IOAT_CHANERR_OFFSET, chanerr);
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
}
/* Allocate descriptors */
@@ -161,10 +233,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
&ioat_chan->completion_addr);
memset(ioat_chan->completion_virt, 0,
sizeof(*ioat_chan->completion_virt));
- ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_LOW,
- ((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF);
- ioatdma_chan_write32(ioat_chan, IOAT_CHANCMP_OFFSET_HIGH,
- ((u64) ioat_chan->completion_addr) >> 32);
+ writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+ writel(((u64) ioat_chan->completion_addr) >> 32,
+ ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
ioat_start_null_desc(ioat_chan);
return i;
@@ -182,18 +254,20 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_dma_memcpy_cleanup(ioat_chan);
- ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET);
+ writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
spin_lock_bh(&ioat_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
in_use_descs++;
list_del(&desc->node);
- pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
+ pci_pool_free(ioat_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
kfree(desc);
}
list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
list_del(&desc->node);
- pci_pool_free(ioat_device->dma_pool, desc->hw, desc->phys);
+ pci_pool_free(ioat_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
kfree(desc);
}
spin_unlock_bh(&ioat_chan->desc_lock);
@@ -210,50 +284,30 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
/* Tell hw the chan is free */
- chanctrl = ioatdma_chan_read16(ioat_chan, IOAT_CHANCTRL_OFFSET);
+ chanctrl = readw(ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
chanctrl &= ~IOAT_CHANCTRL_CHANNEL_IN_USE;
- ioatdma_chan_write16(ioat_chan, IOAT_CHANCTRL_OFFSET, chanctrl);
+ writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
}
-/**
- * do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction
- * @ioat_chan: IOAT DMA channel handle
- * @dest: DMA destination address
- * @src: DMA source address
- * @len: transaction length in bytes
- */
-
-static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,
- dma_addr_t dest,
- dma_addr_t src,
- size_t len)
+static struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy(struct dma_chan *chan, size_t len, int int_en)
{
- struct ioat_desc_sw *first;
- struct ioat_desc_sw *prev;
- struct ioat_desc_sw *new;
- dma_cookie_t cookie;
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ struct ioat_desc_sw *first, *prev, *new;
LIST_HEAD(new_chain);
u32 copy;
size_t orig_len;
- dma_addr_t orig_src, orig_dst;
- unsigned int desc_count = 0;
- unsigned int append = 0;
-
- if (!ioat_chan || !dest || !src)
- return -EFAULT;
+ int desc_count = 0;
if (!len)
- return ioat_chan->common.cookie;
+ return NULL;
orig_len = len;
- orig_src = src;
- orig_dst = dest;
first = NULL;
prev = NULL;
spin_lock_bh(&ioat_chan->desc_lock);
-
while (len) {
if (!list_empty(&ioat_chan->free_desc)) {
new = to_ioat_desc(ioat_chan->free_desc.next);
@@ -270,141 +324,36 @@ static dma_cookie_t do_ioat_dma_memcpy(struct ioat_dma_chan *ioat_chan,
new->hw->size = copy;
new->hw->ctl = 0;
- new->hw->src_addr = src;
- new->hw->dst_addr = dest;
- new->cookie = 0;
+ new->async_tx.cookie = 0;
+ new->async_tx.ack = 1;
/* chain together the physical address list for the HW */
if (!first)
first = new;
else
- prev->hw->next = (u64) new->phys;
+ prev->hw->next = (u64) new->async_tx.phys;
prev = new;
-
len -= copy;
- dest += copy;
- src += copy;
-
list_add_tail(&new->node, &new_chain);
desc_count++;
}
- new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- new->hw->next = 0;
- /* cookie incr and addition to used_list must be atomic */
+ list_splice(&new_chain, &new->async_tx.tx_list);
- cookie = ioat_chan->common.cookie;
- cookie++;
- if (cookie < 0)
- cookie = 1;
- ioat_chan->common.cookie = new->cookie = cookie;
+ new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+ new->hw->next = 0;
+ new->tx_cnt = desc_count;
+ new->async_tx.ack = 0; /* client is in control of this ack */
+ new->async_tx.cookie = -EBUSY;
- pci_unmap_addr_set(new, src, orig_src);
- pci_unmap_addr_set(new, dst, orig_dst);
pci_unmap_len_set(new, src_len, orig_len);
pci_unmap_len_set(new, dst_len, orig_len);
-
- /* write address into NextDescriptor field of last desc in chain */
- to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->phys;
- list_splice_init(&new_chain, ioat_chan->used_desc.prev);
-
- ioat_chan->pending += desc_count;
- if (ioat_chan->pending >= 20) {
- append = 1;
- ioat_chan->pending = 0;
- }
-
spin_unlock_bh(&ioat_chan->desc_lock);
- if (append)
- ioatdma_chan_write8(ioat_chan,
- IOAT_CHANCMD_OFFSET,
- IOAT_CHANCMD_APPEND);
- return cookie;
-}
-
-/**
- * ioat_dma_memcpy_buf_to_buf - wrapper that takes src & dest bufs
- * @chan: IOAT DMA channel handle
- * @dest: DMA destination address
- * @src: DMA source address
- * @len: transaction length in bytes
- */
-
-static dma_cookie_t ioat_dma_memcpy_buf_to_buf(struct dma_chan *chan,
- void *dest,
- void *src,
- size_t len)
-{
- dma_addr_t dest_addr;
- dma_addr_t src_addr;
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
- dest_addr = pci_map_single(ioat_chan->device->pdev,
- dest, len, PCI_DMA_FROMDEVICE);
- src_addr = pci_map_single(ioat_chan->device->pdev,
- src, len, PCI_DMA_TODEVICE);
-
- return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
+ return new ? &new->async_tx : NULL;
}
-/**
- * ioat_dma_memcpy_buf_to_pg - wrapper, copying from a buf to a page
- * @chan: IOAT DMA channel handle
- * @page: pointer to the page to copy to
- * @offset: offset into that page
- * @src: DMA source address
- * @len: transaction length in bytes
- */
-
-static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,
- struct page *page,
- unsigned int offset,
- void *src,
- size_t len)
-{
- dma_addr_t dest_addr;
- dma_addr_t src_addr;
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
- dest_addr = pci_map_page(ioat_chan->device->pdev,
- page, offset, len, PCI_DMA_FROMDEVICE);
- src_addr = pci_map_single(ioat_chan->device->pdev,
- src, len, PCI_DMA_TODEVICE);
-
- return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
-}
-
-/**
- * ioat_dma_memcpy_pg_to_pg - wrapper, copying between two pages
- * @chan: IOAT DMA channel handle
- * @dest_pg: pointer to the page to copy to
- * @dest_off: offset into that page
- * @src_pg: pointer to the page to copy from
- * @src_off: offset into that page
- * @len: transaction length in bytes. This is guaranteed not to make a copy
- * across a page boundary.
- */
-
-static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,
- struct page *dest_pg,
- unsigned int dest_off,
- struct page *src_pg,
- unsigned int src_off,
- size_t len)
-{
- dma_addr_t dest_addr;
- dma_addr_t src_addr;
- struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
- dest_addr = pci_map_page(ioat_chan->device->pdev,
- dest_pg, dest_off, len, PCI_DMA_FROMDEVICE);
- src_addr = pci_map_page(ioat_chan->device->pdev,
- src_pg, src_off, len, PCI_DMA_TODEVICE);
-
- return do_ioat_dma_memcpy(ioat_chan, dest_addr, src_addr, len);
-}
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
@@ -417,9 +366,8 @@ static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
if (ioat_chan->pending != 0) {
ioat_chan->pending = 0;
- ioatdma_chan_write8(ioat_chan,
- IOAT_CHANCMD_OFFSET,
- IOAT_CHANCMD_APPEND);
+ writeb(IOAT_CHANCMD_APPEND,
+ ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
}
}
@@ -449,7 +397,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
printk("IOAT: Channel halted, chanerr = %x\n",
- ioatdma_chan_read32(chan, IOAT_CHANERR_OFFSET));
+ readl(chan->reg_base + IOAT_CHANERR_OFFSET));
/* TODO do something to salvage the situation */
}
@@ -467,8 +415,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
* exceeding xfercap, perhaps. If so, only the last one will
* have a cookie, and require unmapping.
*/
- if (desc->cookie) {
- cookie = desc->cookie;
+ if (desc->async_tx.cookie) {
+ cookie = desc->async_tx.cookie;
/* yes we are unmapping both _page and _single alloc'd
regions with unmap_page. Is this *really* that bad?
@@ -483,14 +431,19 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
PCI_DMA_TODEVICE);
}
- if (desc->phys != phys_complete) {
- /* a completed entry, but not the last, so cleanup */
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->free_desc);
+ if (desc->async_tx.phys != phys_complete) {
+ /* a completed entry, but not the last, so cleanup
+ * if the client is done with the descriptor
+ */
+ if (desc->async_tx.ack) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->free_desc);
+ } else
+ desc->async_tx.cookie = 0;
} else {
/* last used desc. Do not remove, so we can append from
it, but don't look at it next time, either */
- desc->cookie = 0;
+ desc->async_tx.cookie = 0;
/* TODO check status bits? */
break;
@@ -506,6 +459,17 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
spin_unlock(&chan->cleanup_lock);
}
+static void ioat_dma_dependency_added(struct dma_chan *chan)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ spin_lock_bh(&ioat_chan->desc_lock);
+ if (ioat_chan->pending == 0) {
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ ioat_dma_memcpy_cleanup(ioat_chan);
+ } else
+ spin_unlock_bh(&ioat_chan->desc_lock);
+}
+
/**
* ioat_dma_is_complete - poll the status of a IOAT DMA transaction
* @chan: IOAT DMA channel handle
@@ -553,6 +517,8 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_UNISYS,
+ PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
{ 0, }
};
@@ -560,6 +526,7 @@ static struct pci_driver ioat_pci_driver = {
.name = "ioatdma",
.id_table = ioat_pci_tbl,
.probe = ioat_probe,
+ .shutdown = ioat_shutdown,
.remove = __devexit_p(ioat_remove),
};
@@ -569,21 +536,21 @@ static irqreturn_t ioat_do_interrupt(int irq, void *data)
unsigned long attnstatus;
u8 intrctrl;
- intrctrl = ioatdma_read8(instance, IOAT_INTRCTRL_OFFSET);
+ intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
return IRQ_NONE;
if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
- ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
return IRQ_NONE;
}
- attnstatus = ioatdma_read32(instance, IOAT_ATTNSTATUS_OFFSET);
+ attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
printk(KERN_ERR "ioatdma error: interrupt! status %lx\n", attnstatus);
- ioatdma_write8(instance, IOAT_INTRCTRL_OFFSET, intrctrl);
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
return IRQ_HANDLED;
}
@@ -607,19 +574,17 @@ static void ioat_start_null_desc(struct ioat_dma_chan *ioat_chan)
desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
desc->hw->next = 0;
+ desc->async_tx.ack = 1;
list_add_tail(&desc->node, &ioat_chan->used_desc);
spin_unlock_bh(&ioat_chan->desc_lock);
-#if (BITS_PER_LONG == 64)
- ioatdma_chan_write64(ioat_chan, IOAT_CHAINADDR_OFFSET, desc->phys);
-#else
- ioatdma_chan_write32(ioat_chan,
- IOAT_CHAINADDR_OFFSET_LOW,
- (u32) desc->phys);
- ioatdma_chan_write32(ioat_chan, IOAT_CHAINADDR_OFFSET_HIGH, 0);
-#endif
- ioatdma_chan_write8(ioat_chan, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_START);
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
+
+ writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
}
/*
@@ -633,6 +598,8 @@ static int ioat_self_test(struct ioat_device *device)
u8 *src;
u8 *dest;
struct dma_chan *dma_chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t addr;
dma_cookie_t cookie;
int err = 0;
@@ -658,7 +625,15 @@ static int ioat_self_test(struct ioat_device *device)
goto out;
}
- cookie = ioat_dma_memcpy_buf_to_buf(dma_chan, dest, src, IOAT_TEST_SIZE);
+ tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+ async_tx_ack(tx);
+ addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
+ DMA_TO_DEVICE);
+ ioat_set_src(addr, tx, 0);
+ addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
+ DMA_FROM_DEVICE);
+ ioat_set_dest(addr, tx, 0);
+ cookie = ioat_tx_submit(tx);
ioat_dma_memcpy_issue_pending(dma_chan);
msleep(1);
@@ -748,19 +723,20 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
device->reg_base = reg_base;
- ioatdma_write8(device, IOAT_INTRCTRL_OFFSET, IOAT_INTRCTRL_MASTER_INT_EN);
+ writeb(IOAT_INTRCTRL_MASTER_INT_EN, device->reg_base + IOAT_INTRCTRL_OFFSET);
pci_set_master(pdev);
INIT_LIST_HEAD(&device->common.channels);
enumerate_dma_channels(device);
+ dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
device->common.device_free_chan_resources = ioat_dma_free_chan_resources;
- device->common.device_memcpy_buf_to_buf = ioat_dma_memcpy_buf_to_buf;
- device->common.device_memcpy_buf_to_pg = ioat_dma_memcpy_buf_to_pg;
- device->common.device_memcpy_pg_to_pg = ioat_dma_memcpy_pg_to_pg;
- device->common.device_memcpy_complete = ioat_dma_is_complete;
- device->common.device_memcpy_issue_pending = ioat_dma_memcpy_issue_pending;
+ device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
+ device->common.device_is_tx_complete = ioat_dma_is_complete;
+ device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
+ device->common.device_dependency_added = ioat_dma_dependency_added;
+ device->common.dev = &pdev->dev;
printk(KERN_INFO "Intel(R) I/OAT DMA Engine found, %d channels\n",
device->common.chancnt);
@@ -787,9 +763,20 @@ err_request_regions:
err_set_dma_mask:
pci_disable_device(pdev);
err_enable_device:
+
+ printk(KERN_ERR "Intel(R) I/OAT DMA Engine initialization failed\n");
+
return err;
}
+static void ioat_shutdown(struct pci_dev *pdev)
+{
+ struct ioat_device *device;
+ device = pci_get_drvdata(pdev);
+
+ dma_async_device_unregister(&device->common);
+}
+
static void __devexit ioat_remove(struct pci_dev *pdev)
{
struct ioat_device *device;
@@ -818,7 +805,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
}
/* MODULE API */
-MODULE_VERSION("1.7");
+MODULE_VERSION("1.9");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index 62b26a9be4c9..d3726478031a 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -30,9 +30,6 @@
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
-extern struct list_head dma_device_list;
-extern struct list_head dma_client_list;
-
/**
* struct ioat_device - internal representation of a IOAT device
* @pdev: PCI-Express device
@@ -105,21 +102,20 @@ struct ioat_dma_chan {
/**
* struct ioat_desc_sw - wrapper around hardware descriptor
* @hw: hardware DMA descriptor
- * @node:
- * @cookie:
- * @phys:
+ * @node: this descriptor will either be on the free list,
+ * or attached to a transaction list (async_tx.tx_list)
+ * @tx_cnt: number of descriptors required to complete the transaction
+ * @async_tx: the generic software descriptor for all engines
*/
-
struct ioat_desc_sw {
struct ioat_dma_descriptor *hw;
struct list_head node;
- dma_cookie_t cookie;
- dma_addr_t phys;
+ int tx_cnt;
DECLARE_PCI_UNMAP_ADDR(src)
DECLARE_PCI_UNMAP_LEN(src_len)
DECLARE_PCI_UNMAP_ADDR(dst)
DECLARE_PCI_UNMAP_LEN(dst_len)
+ struct dma_async_tx_descriptor async_tx;
};
#endif /* IOATDMA_H */
-
diff --git a/drivers/dma/ioatdma_io.h b/drivers/dma/ioatdma_io.h
deleted file mode 100644
index c0b4bf66c920..000000000000
--- a/drivers/dma/ioatdma_io.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_IO_H
-#define IOATDMA_IO_H
-
-#include <asm/io.h>
-
-/*
- * device and per-channel MMIO register read and write functions
- * this is a lot of anoying inline functions, but it's typesafe
- */
-
-static inline u8 ioatdma_read8(struct ioat_device *device,
- unsigned int offset)
-{
- return readb(device->reg_base + offset);
-}
-
-static inline u16 ioatdma_read16(struct ioat_device *device,
- unsigned int offset)
-{
- return readw(device->reg_base + offset);
-}
-
-static inline u32 ioatdma_read32(struct ioat_device *device,
- unsigned int offset)
-{
- return readl(device->reg_base + offset);
-}
-
-static inline void ioatdma_write8(struct ioat_device *device,
- unsigned int offset, u8 value)
-{
- writeb(value, device->reg_base + offset);
-}
-
-static inline void ioatdma_write16(struct ioat_device *device,
- unsigned int offset, u16 value)
-{
- writew(value, device->reg_base + offset);
-}
-
-static inline void ioatdma_write32(struct ioat_device *device,
- unsigned int offset, u32 value)
-{
- writel(value, device->reg_base + offset);
-}
-
-static inline u8 ioatdma_chan_read8(struct ioat_dma_chan *chan,
- unsigned int offset)
-{
- return readb(chan->reg_base + offset);
-}
-
-static inline u16 ioatdma_chan_read16(struct ioat_dma_chan *chan,
- unsigned int offset)
-{
- return readw(chan->reg_base + offset);
-}
-
-static inline u32 ioatdma_chan_read32(struct ioat_dma_chan *chan,
- unsigned int offset)
-{
- return readl(chan->reg_base + offset);
-}
-
-static inline void ioatdma_chan_write8(struct ioat_dma_chan *chan,
- unsigned int offset, u8 value)
-{
- writeb(value, chan->reg_base + offset);
-}
-
-static inline void ioatdma_chan_write16(struct ioat_dma_chan *chan,
- unsigned int offset, u16 value)
-{
- writew(value, chan->reg_base + offset);
-}
-
-static inline void ioatdma_chan_write32(struct ioat_dma_chan *chan,
- unsigned int offset, u32 value)
-{
- writel(value, chan->reg_base + offset);
-}
-
-#if (BITS_PER_LONG == 64)
-static inline u64 ioatdma_chan_read64(struct ioat_dma_chan *chan,
- unsigned int offset)
-{
- return readq(chan->reg_base + offset);
-}
-
-static inline void ioatdma_chan_write64(struct ioat_dma_chan *chan,
- unsigned int offset, u64 value)
-{
- writeq(value, chan->reg_base + offset);
-}
-#endif
-
-#endif /* IOATDMA_IO_H */
-
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
new file mode 100644
index 000000000000..5a1d426744d6
--- /dev/null
+++ b/drivers/dma/iop-adma.c
@@ -0,0 +1,1467 @@
+/*
+ * offload engine driver for the Intel Xscale series of i/o processors
+ * Copyright © 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <linux/ioport.h>
+
+#include <asm/arch/adma.h>
+
+#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
+#define to_iop_adma_device(dev) \
+ container_of(dev, struct iop_adma_device, common)
+#define tx_to_iop_adma_slot(tx) \
+ container_of(tx, struct iop_adma_desc_slot, async_tx)
+
+/**
+ * iop_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &iop_chan->lock while calling this function
+ */
+static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ struct iop_adma_desc_slot,
+ slot_node);
+ }
+}
+
+static dma_cookie_t
+iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
+{
+ BUG_ON(desc->async_tx.cookie < 0);
+ spin_lock_bh(&desc->async_tx.lock);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ */
+ if (desc->group_head && desc->unmap_len) {
+ struct iop_adma_desc_slot *unmap = desc->group_head;
+ struct device *dev =
+ &iop_chan->device->pdev->dev;
+ u32 len = unmap->unmap_len;
+ u32 src_cnt = unmap->unmap_src_cnt;
+ dma_addr_t addr = iop_desc_get_dest_addr(unmap,
+ iop_chan);
+
+ dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ while (src_cnt--) {
+ addr = iop_desc_get_src_addr(unmap,
+ iop_chan,
+ src_cnt);
+ dma_unmap_page(dev, addr, len,
+ DMA_TO_DEVICE);
+ }
+ desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ async_tx_run_dependencies(&desc->async_tx);
+ spin_unlock_bh(&desc->async_tx.lock);
+
+ return cookie;
+}
+
+static int
+iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
+ struct iop_adma_chan *iop_chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!desc->async_tx.ack)
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (desc->chain_node.next == &iop_chan->chain)
+ return 1;
+
+ dev_dbg(iop_chan->device->common.dev,
+ "\tfree slot: %d slots_per_op: %d\n",
+ desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ iop_adma_free_slots(desc);
+
+ return 0;
+}
+
+static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
+{
+ struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
+ dma_cookie_t cookie = 0;
+ u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
+ int busy = iop_chan_is_busy(iop_chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
+ chain_node) {
+ pr_debug("\tcookie: %d slot: %d busy: %d "
+ "this_desc: %#x next_desc: %#x ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy,
+ iter->async_tx.phys, iop_desc_get_next_desc(iter),
+ iter->async_tx.ack);
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel, subsequent descriptors are either in
+ * process or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->async_tx.phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || iop_desc_get_next_desc(iter))
+ break;
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ pr_debug("\tgroup++\n");
+ if (!grp_start)
+ grp_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ struct iop_adma_desc_slot *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+ pr_debug("\tgroup end\n");
+
+ /* collect the total results */
+ if (grp_start->xor_check_result) {
+ u32 zero_sum_result = 0;
+ slot_cnt = grp_start->slot_cnt;
+ grp_iter = grp_start;
+
+ list_for_each_entry_from(grp_iter,
+ &iop_chan->chain, chain_node) {
+ zero_sum_result |=
+ iop_desc_get_zero_result(grp_iter);
+ pr_debug("\titer%d result: %d\n",
+ grp_iter->idx, zero_sum_result);
+ slot_cnt -= slots_per_op;
+ if (slot_cnt == 0)
+ break;
+ }
+ pr_debug("\tgrp_start->xor_check_result: %p\n",
+ grp_start->xor_check_result);
+ *grp_start->xor_check_result = zero_sum_result;
+ }
+
+ /* clean up the group */
+ slot_cnt = grp_start->slot_cnt;
+ grp_iter = grp_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &iop_chan->chain, chain_node) {
+ cookie = iop_adma_run_tx_complete_actions(
+ grp_iter, iop_chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = iop_adma_clean_slot(grp_iter,
+ iop_chan);
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ grp_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ /* write back zero sum results (single descriptor case) */
+ if (iter->xor_check_result && iter->async_tx.cookie)
+ *iter->xor_check_result =
+ iop_desc_get_zero_result(iter);
+
+ cookie = iop_adma_run_tx_complete_actions(
+ iter, iop_chan, cookie);
+
+ if (iop_adma_clean_slot(iter, iop_chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ iop_chan_idle(busy, iop_chan);
+
+ if (cookie > 0) {
+ iop_chan->completed_cookie = cookie;
+ pr_debug("\tcompleted cookie %d\n", cookie);
+ }
+}
+
+static void
+iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
+{
+ spin_lock_bh(&iop_chan->lock);
+ __iop_adma_slot_cleanup(iop_chan);
+ spin_unlock_bh(&iop_chan->lock);
+}
+
+static void iop_adma_tasklet(unsigned long data)
+{
+ struct iop_adma_chan *chan = (struct iop_adma_chan *) data;
+ __iop_adma_slot_cleanup(chan);
+}
+
+static struct iop_adma_desc_slot *
+iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
+ int slots_per_op)
+{
+ struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = iop_chan->last_used;
+ else
+ iter = list_entry(&iop_chan->all_slots,
+ struct iop_adma_desc_slot,
+ slot_node);
+
+ list_for_each_entry_safe_continue(
+ iter, _iter, &iop_chan->all_slots, slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ /* give up after finding the first busy slot
+ * on the second pass through the list
+ */
+ if (retry)
+ break;
+
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++) {
+ if (iop_desc_is_aligned(iter, slots_per_op))
+ alloc_start = iter;
+ else {
+ slots_found = 0;
+ continue;
+ }
+ }
+
+ if (slots_found == num_slots) {
+ struct iop_adma_desc_slot *alloc_tail = NULL;
+ struct iop_adma_desc_slot *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+ dev_dbg(iop_chan->device->common.dev,
+ "allocated slot: %d "
+ "(desc %p phys: %#x) slots_per_op %d\n",
+ iter->idx, iter->hw_desc,
+ iter->async_tx.phys, slots_per_op);
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op)
+ iter->async_tx.ack = 1;
+ else
+ iter->async_tx.ack = 0;
+
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->slot_cnt = num_slots;
+ iter->xor_check_result = NULL;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ struct iop_adma_desc_slot,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->async_tx.tx_list);
+ iop_chan->last_used = last_used;
+ iop_desc_clear_next_desc(alloc_start);
+ iop_desc_clear_next_desc(alloc_tail);
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&iop_chan->irq_tasklet);
+
+ return NULL;
+}
+
+static dma_cookie_t
+iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
+ struct iop_adma_desc_slot *desc)
+{
+ dma_cookie_t cookie = iop_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ iop_chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
+{
+ dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
+ iop_chan->pending);
+
+ if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
+ iop_chan->pending = 0;
+ iop_chan_append(iop_chan);
+ }
+}
+
+static dma_cookie_t
+iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
+ struct iop_adma_desc_slot *grp_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+
+ grp_start = sw_desc->group_head;
+ slot_cnt = grp_start->slot_cnt;
+ slots_per_op = grp_start->slots_per_op;
+
+ spin_lock_bh(&iop_chan->lock);
+ cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
+
+ old_chain_tail = list_entry(iop_chan->chain.prev,
+ struct iop_adma_desc_slot, chain_node);
+ list_splice_init(&sw_desc->async_tx.tx_list,
+ &old_chain_tail->chain_node);
+
+ /* fix up the hardware chain */
+ iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
+
+ /* 1/ don't add pre-chained descriptors
+ * 2/ dummy read to flush next_desc write
+ */
+ BUG_ON(iop_desc_get_next_desc(sw_desc));
+
+ /* increment the pending count by the number of slots
+ * memcpy operations have a 1:1 (slot:operation) relation
+ * other operations are heavier and will pop the threshold
+ * more often.
+ */
+ iop_chan->pending += slot_cnt;
+ iop_adma_check_threshold(iop_chan);
+ spin_unlock_bh(&iop_chan->lock);
+
+ dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
+ __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx);
+
+ return cookie;
+}
+
+static void
+iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
+ int index)
+{
+ struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
+
+ /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
+ iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
+}
+
+static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
+static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
+
+/* returns the number of allocated descriptors */
+static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ char *hw_desc;
+ int idx;
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *slot = NULL;
+ int init = iop_chan->slots_allocated ? 0 : 1;
+ struct iop_adma_platform_data *plat_data =
+ iop_chan->device->pdev->dev.platform_data;
+ int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
+
+ /* Allocate descriptor slots */
+ do {
+ idx = iop_chan->slots_allocated;
+ if (idx == num_descs_in_pool)
+ break;
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "IOP ADMA Channel only initialized"
+ " %d descriptor slots", idx);
+ break;
+ }
+ hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
+
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = iop_adma_tx_submit;
+ slot->async_tx.tx_set_dest = iop_adma_set_dest;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->async_tx.tx_list);
+ hw_desc = (char *) iop_chan->device->dma_desc_pool;
+ slot->async_tx.phys =
+ (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
+ slot->idx = idx;
+
+ spin_lock_bh(&iop_chan->lock);
+ iop_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &iop_chan->all_slots);
+ spin_unlock_bh(&iop_chan->lock);
+ } while (iop_chan->slots_allocated < num_descs_in_pool);
+
+ if (idx && !iop_chan->last_used)
+ iop_chan->last_used = list_entry(iop_chan->all_slots.next,
+ struct iop_adma_desc_slot,
+ slot_node);
+
+ dev_dbg(iop_chan->device->common.dev,
+ "allocated %d descriptor slots last_used: %p\n",
+ iop_chan->slots_allocated, iop_chan->last_used);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ if (dma_has_cap(DMA_MEMCPY,
+ iop_chan->device->common.cap_mask))
+ iop_chan_start_null_memcpy(iop_chan);
+ else if (dma_has_cap(DMA_XOR,
+ iop_chan->device->common.cap_mask))
+ iop_chan_start_null_xor(iop_chan);
+ else
+ BUG();
+ }
+
+ return (idx > 0) ? idx : -ENOMEM;
+}
+
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_interrupt(struct dma_chan *chan)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+ iop_desc_init_interrupt(grp_start, iop_chan);
+ grp_start->unmap_len = 0;
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void
+iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
+ int index)
+{
+ struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
+ struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
+
+ iop_desc_set_memcpy_src_addr(grp_start, addr);
+}
+
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ int slot_cnt, slots_per_op;
+
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+
+ dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+ __FUNCTION__, len);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+ iop_desc_init_memcpy(grp_start, int_en);
+ iop_desc_set_byte_count(grp_start, iop_chan, len);
+ sw_desc->unmap_src_cnt = 1;
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
+ int int_en)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ int slot_cnt, slots_per_op;
+
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
+
+ dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+ __FUNCTION__, len);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+ iop_desc_init_memset(grp_start, int_en);
+ iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_block_fill_val(grp_start, value);
+ sw_desc->unmap_src_cnt = 1;
+ sw_desc->unmap_len = len;
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void
+iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
+ int index)
+{
+ struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
+ struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
+
+ iop_desc_set_xor_src_addr(grp_start, index, addr);
+}
+
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
+ int int_en)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ int slot_cnt, slots_per_op;
+
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(iop_chan->device->common.dev,
+ "%s src_cnt: %d len: %u int_en: %d\n",
+ __FUNCTION__, src_cnt, len, int_en);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+ iop_desc_init_xor(grp_start, src_cnt, int_en);
+ iop_desc_set_byte_count(grp_start, iop_chan, len);
+ sw_desc->unmap_src_cnt = src_cnt;
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src;
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void
+iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
+ struct dma_async_tx_descriptor *tx,
+ int index)
+{
+ struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
+ struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
+
+ iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
+}
+
+static struct dma_async_tx_descriptor *
+iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
+ size_t len, u32 *result, int int_en)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ int slot_cnt, slots_per_op;
+
+ if (unlikely(!len))
+ return NULL;
+
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ __FUNCTION__, src_cnt, len);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+ iop_desc_init_zero_sum(grp_start, src_cnt, int_en);
+ iop_desc_set_zero_sum_byte_count(grp_start, len);
+ grp_start->xor_check_result = result;
+ pr_debug("\t%s: grp_start->xor_check_result: %p\n",
+ __FUNCTION__, grp_start->xor_check_result);
+ sw_desc->unmap_src_cnt = src_cnt;
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src;
+ }
+ spin_unlock_bh(&iop_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void iop_adma_dependency_added(struct dma_chan *chan)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ tasklet_schedule(&iop_chan->irq_tasklet);
+}
+
+static void iop_adma_free_chan_resources(struct dma_chan *chan)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ struct iop_adma_desc_slot *iter, *_iter;
+ int in_use_descs = 0;
+
+ iop_adma_slot_cleanup(iop_chan);
+
+ spin_lock_bh(&iop_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(
+ iter, _iter, &iop_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ iop_chan->slots_allocated--;
+ }
+ iop_chan->last_used = NULL;
+
+ dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
+ __FUNCTION__, iop_chan->slots_allocated);
+ spin_unlock_bh(&iop_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * iop_adma_is_complete - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ dma_cookie_t *done,
+ dma_cookie_t *used)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+ last_complete = iop_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ iop_adma_slot_cleanup(iop_chan);
+
+ last_used = chan->cookie;
+ last_complete = iop_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static irqreturn_t iop_adma_eot_handler(int irq, void *data)
+{
+ struct iop_adma_chan *chan = data;
+
+ dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+
+ iop_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
+{
+ struct iop_adma_chan *chan = data;
+
+ dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+
+ iop_adma_device_clear_eoc_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t iop_adma_err_handler(int irq, void *data)
+{
+ struct iop_adma_chan *chan = data;
+ unsigned long status = iop_chan_get_status(chan);
+
+ dev_printk(KERN_ERR, chan->device->common.dev,
+ "error ( %s%s%s%s%s%s%s)\n",
+ iop_is_err_int_parity(status, chan) ? "int_parity " : "",
+ iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
+ iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
+ iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
+ iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
+ iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
+ iop_is_err_split_tx(status, chan) ? "split_tx " : "");
+
+ iop_adma_device_clear_err_status(chan);
+
+ BUG();
+
+ return IRQ_HANDLED;
+}
+
+static void iop_adma_issue_pending(struct dma_chan *chan)
+{
+ struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
+
+ if (iop_chan->pending) {
+ iop_chan->pending = 0;
+ iop_chan_append(iop_chan);
+ }
+}
+
+/*
+ * Perform a transaction to verify the HW works.
+ */
+#define IOP_ADMA_TEST_SIZE 2000
+
+static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
+{
+ int i;
+ void *src, *dest;
+ dma_addr_t src_dma, dest_dma;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ struct dma_async_tx_descriptor *tx;
+ int err = 0;
+ struct iop_adma_chan *iop_chan;
+
+ dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
+
+ src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+ dest = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
+ ((u8 *) src)[i] = (u8)i;
+
+ memset(dest, 0, IOP_ADMA_TEST_SIZE);
+
+ /* Start copy, using first DMA channel */
+ dma_chan = container_of(device->common.channels.next,
+ struct dma_chan,
+ device_node);
+ if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
+ dest_dma = dma_map_single(dma_chan->device->dev, dest,
+ IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
+ iop_adma_set_dest(dest_dma, tx, 0);
+ src_dma = dma_map_single(dma_chan->device->dev, src,
+ IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
+ iop_adma_memcpy_set_src(src_dma, tx, 0);
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(1);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test copy timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ iop_chan = to_iop_adma_chan(dma_chan);
+ dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
+ IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
+ if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test copy failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+free_resources:
+ iop_adma_free_chan_resources(dma_chan);
+out:
+ kfree(src);
+ kfree(dest);
+ return err;
+}
+
+#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
+static int __devinit
+iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
+{
+ int i, src_idx;
+ struct page *dest;
+ struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
+ struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
+ dma_addr_t dma_addr, dest_dma;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
+ u8 cmp_byte = 0;
+ u32 cmp_word;
+ u32 zero_sum_result;
+ int err = 0;
+ struct iop_adma_chan *iop_chan;
+
+ dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
+
+ for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
+ xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+ if (!xor_srcs[src_idx])
+ while (src_idx--) {
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+ }
+
+ dest = alloc_page(GFP_KERNEL);
+ if (!dest)
+ while (src_idx--) {
+ __free_page(xor_srcs[src_idx]);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffers */
+ for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
+ u8 *ptr = page_address(xor_srcs[src_idx]);
+ for (i = 0; i < PAGE_SIZE; i++)
+ ptr[i] = (1 << src_idx);
+ }
+
+ for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
+ cmp_byte ^= (u8) (1 << src_idx);
+
+ cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+ (cmp_byte << 8) | cmp_byte;
+
+ memset(page_address(dest), 0, PAGE_SIZE);
+
+ dma_chan = container_of(device->common.channels.next,
+ struct dma_chan,
+ device_node);
+ if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* test xor */
+ tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
+ PAGE_SIZE, 1);
+ dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ iop_adma_set_dest(dest_dma, tx, 0);
+
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
+ dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ iop_adma_xor_set_src(dma_addr, tx, i);
+ }
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
+ DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test xor timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ iop_chan = to_iop_adma_chan(dma_chan);
+ dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+ u32 *ptr = page_address(dest);
+ if (ptr[i] != cmp_word) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test xor failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ }
+ dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
+ PAGE_SIZE, DMA_TO_DEVICE);
+
+ /* skip zero sum if the capability is not present */
+ if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask))
+ goto free_resources;
+
+ /* zero sum the sources with the destintation page */
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
+ zero_sum_srcs[i] = xor_srcs[i];
+ zero_sum_srcs[i] = dest;
+
+ zero_sum_result = 1;
+
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
+ PAGE_SIZE, &zero_sum_result, 1);
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
+ dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
+ }
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test zero sum timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ if (zero_sum_result != 0) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test zero sum failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ /* test memset */
+ tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
+ dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ iop_adma_set_dest(dma_addr, tx, 0);
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test memset timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
+ u32 *ptr = page_address(dest);
+ if (ptr[i]) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test memset failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+ }
+
+ /* test for non-zero parity sum */
+ zero_sum_result = 0;
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
+ PAGE_SIZE, &zero_sum_result, 1);
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
+ dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
+ }
+
+ cookie = iop_adma_tx_submit(tx);
+ iop_adma_issue_pending(dma_chan);
+ async_tx_ack(tx);
+ msleep(8);
+
+ if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test non-zero sum timed out, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ if (zero_sum_result != 1) {
+ dev_printk(KERN_ERR, dma_chan->device->dev,
+ "Self-test non-zero sum failed compare, disabling\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+free_resources:
+ iop_adma_free_chan_resources(dma_chan);
+out:
+ src_idx = IOP_ADMA_NUM_SRC_TEST;
+ while (src_idx--)
+ __free_page(xor_srcs[src_idx]);
+ __free_page(dest);
+ return err;
+}
+
+static int __devexit iop_adma_remove(struct platform_device *dev)
+{
+ struct iop_adma_device *device = platform_get_drvdata(dev);
+ struct dma_chan *chan, *_chan;
+ struct iop_adma_chan *iop_chan;
+ int i;
+ struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
+
+ dma_async_device_unregister(&device->common);
+
+ for (i = 0; i < 3; i++) {
+ unsigned int irq;
+ irq = platform_get_irq(dev, i);
+ free_irq(irq, device);
+ }
+
+ dma_free_coherent(&dev->dev, plat_data->pool_size,
+ device->dma_desc_pool_virt, device->dma_desc_pool);
+
+ do {
+ struct resource *res;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start);
+ } while (0);
+
+ list_for_each_entry_safe(chan, _chan, &device->common.channels,
+ device_node) {
+ iop_chan = to_iop_adma_chan(chan);
+ list_del(&chan->device_node);
+ kfree(iop_chan);
+ }
+ kfree(device);
+
+ return 0;
+}
+
+static int __devinit iop_adma_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0, i;
+ struct iop_adma_device *adev;
+ struct iop_adma_chan *iop_chan;
+ struct dma_device *dma_dev;
+ struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ res->end - res->start, pdev->name))
+ return -EBUSY;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+ dma_dev = &adev->common;
+
+ /* allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly flush the writes
+ */
+ if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_free_adev;
+ }
+
+ dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
+ __FUNCTION__, adev->dma_desc_pool_virt,
+ (void *) adev->dma_desc_pool);
+
+ adev->id = plat_data->hw_id;
+
+ /* discover transaction capabilites from the platform data */
+ dma_dev->cap_mask = plat_data->cap_mask;
+
+ adev->pdev = pdev;
+ platform_set_drvdata(pdev, adev);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* set base routines */
+ dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
+ dma_dev->device_is_tx_complete = iop_adma_is_complete;
+ dma_dev->device_issue_pending = iop_adma_issue_pending;
+ dma_dev->device_dependency_added = iop_adma_dependency_added;
+ dma_dev->dev = &pdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
+ if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ dma_dev->max_xor = iop_adma_get_max_xor();
+ dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
+ }
+ if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_zero_sum =
+ iop_adma_prep_dma_zero_sum;
+ if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_interrupt =
+ iop_adma_prep_dma_interrupt;
+
+ iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
+ if (!iop_chan) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
+ iop_chan->device = adev;
+
+ iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
+ res->end - res->start);
+ if (!iop_chan->mmr_base) {
+ ret = -ENOMEM;
+ goto err_free_iop_chan;
+ }
+ tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
+ iop_chan);
+
+ /* clear errors before enabling interrupts */
+ iop_adma_device_clear_err_status(iop_chan);
+
+ for (i = 0; i < 3; i++) {
+ irq_handler_t handler[] = { iop_adma_eot_handler,
+ iop_adma_eoc_handler,
+ iop_adma_err_handler };
+ int irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto err_free_iop_chan;
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq,
+ handler[i], 0, pdev->name, iop_chan);
+ if (ret)
+ goto err_free_iop_chan;
+ }
+ }
+
+ spin_lock_init(&iop_chan->lock);
+ init_timer(&iop_chan->cleanup_watchdog);
+ iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan;
+ iop_chan->cleanup_watchdog.function = iop_adma_tasklet;
+ INIT_LIST_HEAD(&iop_chan->chain);
+ INIT_LIST_HEAD(&iop_chan->all_slots);
+ INIT_RCU_HEAD(&iop_chan->common.rcu);
+ iop_chan->common.device = dma_dev;
+ list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
+
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
+ ret = iop_adma_memcpy_self_test(adev);
+ dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
+ if (ret)
+ goto err_free_iop_chan;
+ }
+
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
+ ret = iop_adma_xor_zero_sum_self_test(adev);
+ dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
+ if (ret)
+ goto err_free_iop_chan;
+ }
+
+ dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
+ "( %s%s%s%s%s%s%s%s%s%s)\n",
+ dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",
+ dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
+ dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
+ dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "",
+ dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
+ dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+
+ dma_async_device_register(dma_dev);
+ goto out;
+
+ err_free_iop_chan:
+ kfree(iop_chan);
+ err_free_dma:
+ dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+ err_free_adev:
+ kfree(adev);
+ out:
+ return ret;
+}
+
+static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
+{
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+
+ list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
+ sw_desc->async_tx.ack = 1;
+ iop_desc_init_memcpy(grp_start, 0);
+ iop_desc_set_byte_count(grp_start, iop_chan, 0);
+ iop_desc_set_dest_addr(grp_start, iop_chan, 0);
+ iop_desc_set_memcpy_src_addr(grp_start, 0);
+
+ cookie = iop_chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ iop_chan->completed_cookie = cookie - 1;
+ iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(iop_chan_is_busy(iop_chan));
+
+ /* clear any prior error-status bits */
+ iop_adma_device_clear_err_status(iop_chan);
+
+ /* disable operation */
+ iop_chan_disable(iop_chan);
+
+ /* set the descriptor address */
+ iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
+
+ /* 1/ don't add pre-chained descriptors
+ * 2/ dummy read to flush next_desc write
+ */
+ BUG_ON(iop_desc_get_next_desc(sw_desc));
+
+ /* run the descriptor */
+ iop_chan_enable(iop_chan);
+ } else
+ dev_printk(KERN_ERR, iop_chan->device->common.dev,
+ "failed to allocate null descriptor\n");
+ spin_unlock_bh(&iop_chan->lock);
+}
+
+static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
+{
+ struct iop_adma_desc_slot *sw_desc, *grp_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+
+ spin_lock_bh(&iop_chan->lock);
+ slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ grp_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
+ sw_desc->async_tx.ack = 1;
+ iop_desc_init_null_xor(grp_start, 2, 0);
+ iop_desc_set_byte_count(grp_start, iop_chan, 0);
+ iop_desc_set_dest_addr(grp_start, iop_chan, 0);
+ iop_desc_set_xor_src_addr(grp_start, 0, 0);
+ iop_desc_set_xor_src_addr(grp_start, 1, 0);
+
+ cookie = iop_chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ iop_chan->completed_cookie = cookie - 1;
+ iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(iop_chan_is_busy(iop_chan));
+
+ /* clear any prior error-status bits */
+ iop_adma_device_clear_err_status(iop_chan);
+
+ /* disable operation */
+ iop_chan_disable(iop_chan);
+
+ /* set the descriptor address */
+ iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
+
+ /* 1/ don't add pre-chained descriptors
+ * 2/ dummy read to flush next_desc write
+ */
+ BUG_ON(iop_desc_get_next_desc(sw_desc));
+
+ /* run the descriptor */
+ iop_chan_enable(iop_chan);
+ } else
+ dev_printk(KERN_ERR, iop_chan->device->common.dev,
+ "failed to allocate null descriptor\n");
+ spin_unlock_bh(&iop_chan->lock);
+}
+
+static struct platform_driver iop_adma_driver = {
+ .probe = iop_adma_probe,
+ .remove = iop_adma_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "iop-adma",
+ },
+};
+
+static int __init iop_adma_init (void)
+{
+ /* it's currently unsafe to unload this module */
+ /* if forced, worst case is that rmmod hangs */
+ __unsafe(THIS_MODULE);
+
+ return platform_driver_register(&iop_adma_driver);
+}
+
+static void __exit iop_adma_exit (void)
+{
+ platform_driver_unregister(&iop_adma_driver);
+ return;
+}
+
+module_init(iop_adma_init);
+module_exit(iop_adma_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("IOP ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 807c402df049..1724c41d2414 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -3,20 +3,18 @@
# Copyright (c) 2003 Linux Networx
# Licensed and distributed under the GPL
#
-# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $
-#
-menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)'
+menuconfig EDAC
+ bool "EDAC - error detection and reporting (EXPERIMENTAL)"
depends on HAS_IOMEM
-
-config EDAC
- tristate "EDAC core system error reporting (EXPERIMENTAL)"
- depends on X86 && EXPERIMENTAL
+ depends on EXPERIMENTAL
+ depends on X86 || MIPS || PPC
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
- supporting chipset: memory errors, cache errors, PCI errors,
- thermal throttling, etc.. If unsure, select 'Y'.
+ supporting chipset or other subsystems:
+ memory errors, cache errors, PCI errors, thermal throttling, etc..
+ If unsure, select 'Y'.
If this code is reporting problems on your system, please
see the EDAC project web pages for more information at:
@@ -30,13 +28,12 @@ config EDAC
There is also a mailing list for the EDAC project, which can
be found via the sourceforge page.
+if EDAC
comment "Reporting subsystems"
- depends on EDAC
config EDAC_DEBUG
bool "Debugging"
- depends on EDAC
help
This turns on debugging information for the entire EDAC
sub-system. You can insert module with "debug_level=x", current
@@ -45,7 +42,6 @@ config EDAC_DEBUG
config EDAC_MM_EDAC
tristate "Main Memory EDAC (Error Detection And Correction) reporting"
- depends on EDAC
default y
help
Some systems are able to detect and correct errors in main
@@ -77,6 +73,14 @@ config EDAC_E752X
Support for error detection and correction on the Intel
E7520, E7525, E7320 server chipsets.
+config EDAC_I82443BXGX
+ tristate "Intel 82443BX/GX (440BX/GX)"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ depends on BROKEN
+ help
+ Support for error detection and correction on the Intel
+ 82443BX/GX memory controllers (440BX/GX chipsets).
+
config EDAC_I82875P
tristate "Intel 82875p (D82875P, E7210)"
depends on EDAC_MM_EDAC && PCI && X86_32
@@ -84,6 +88,20 @@ config EDAC_I82875P
Support for error detection and correction on the Intel
DP82785P and E7210 server chipsets.
+config EDAC_I82975X
+ tristate "Intel 82975x (D82975x)"
+ depends on EDAC_MM_EDAC && PCI && X86
+ help
+ Support for error detection and correction on the Intel
+ DP82975x server chipsets.
+
+config EDAC_I3000
+ tristate "Intel 3000/3010"
+ depends on EDAC_MM_EDAC && PCI && X86_32
+ help
+ Support for error detection and correction on the Intel
+ 3000 and 3010 server chipsets.
+
config EDAC_I82860
tristate "Intel 82860"
depends on EDAC_MM_EDAC && PCI && X86_32
@@ -98,17 +116,20 @@ config EDAC_R82600
Support for error detection and correction on the Radisys
82600 embedded chipset.
-choice
- prompt "Error detecting method"
- depends on EDAC
- default EDAC_POLL
+config EDAC_I5000
+ tristate "Intel Greencreek/Blackford chipset"
+ depends on EDAC_MM_EDAC && X86 && PCI
+ help
+ Support for error detection and correction the Intel
+ Greekcreek/Blackford chipsets.
-config EDAC_POLL
- bool "Poll for errors"
- depends on EDAC
+config EDAC_PASEMI
+ tristate "PA Semi PWRficient"
+ depends on EDAC_MM_EDAC && PCI
+ depends on PPC
help
- Poll the chipset periodically to detect errors.
+ Support for error detection and correction on PA Semi
+ PWRficient.
-endchoice
-endmenu
+endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 93137fdab4b3..02c09f0ff157 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -5,14 +5,27 @@
# This file may be distributed under the terms of the
# GNU General Public License.
#
-# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $
-obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o
+obj-$(CONFIG_EDAC) := edac_stub.o
+obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
+
+edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
+edac_core-objs += edac_module.o edac_device_sysfs.o
+
+ifdef CONFIG_PCI
+edac_core-objs += edac_pci.o edac_pci_sysfs.o
+endif
+
obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
+obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
+obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o
+obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o
+obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
+obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f79f6b587bfa..f22075410591 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,9 +17,9 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
-#define AMD76X_REVISION " Ver: 2.0.1 " __DATE__
+#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__
#define EDAC_MOD_STR "amd76x_edac"
#define amd76x_printk(level, fmt, arg...) \
@@ -86,13 +86,13 @@ struct amd76x_dev_info {
static const struct amd76x_dev_info amd76x_devs[] = {
[AMD761] = {
- .ctl_name = "AMD761"
- },
+ .ctl_name = "AMD761"},
[AMD762] = {
- .ctl_name = "AMD762"
- },
+ .ctl_name = "AMD762"},
};
+static struct edac_pci_ctl_info *amd76x_pci;
+
/**
* amd76x_get_error_info - fetch error information
* @mci: Memory controller
@@ -102,21 +102,21 @@ static const struct amd76x_dev_info amd76x_devs[] = {
* on the chip so that further errors will be reported
*/
static void amd76x_get_error_info(struct mem_ctl_info *mci,
- struct amd76x_error_info *info)
+ struct amd76x_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->dev);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
- &info->ecc_mode_status);
+ &info->ecc_mode_status);
if (info->ecc_mode_status & BIT(8))
pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
- (u32) BIT(8), (u32) BIT(8));
+ (u32) BIT(8), (u32) BIT(8));
if (info->ecc_mode_status & BIT(9))
pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
- (u32) BIT(9), (u32) BIT(9));
+ (u32) BIT(9), (u32) BIT(9));
}
/**
@@ -130,7 +130,8 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci,
* then attempt to handle and clean up after the error
*/
static int amd76x_process_error_info(struct mem_ctl_info *mci,
- struct amd76x_error_info *info, int handle_errors)
+ struct amd76x_error_info *info,
+ int handle_errors)
{
int error_found;
u32 row;
@@ -138,7 +139,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
error_found = 0;
/*
- * Check for an uncorrectable error
+ * Check for an uncorrectable error
*/
if (info->ecc_mode_status & BIT(8)) {
error_found = 1;
@@ -146,12 +147,12 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
+ row, mci->ctl_name);
}
}
/*
- * Check for a correctable error
+ * Check for a correctable error
*/
if (info->ecc_mode_status & BIT(9)) {
error_found = 1;
@@ -159,7 +160,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
+ 0, row, 0, mci->ctl_name);
}
}
@@ -182,7 +183,7 @@ static void amd76x_check(struct mem_ctl_info *mci)
}
static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- enum edac_type edac_mode)
+ enum edac_type edac_mode)
{
struct csrow_info *csrow;
u32 mba, mba_base, mba_mask, dms;
@@ -193,8 +194,7 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
- AMD76X_MEM_BASE_ADDR + (index * 4),
- &mba);
+ AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
if (!(mba & BIT(0)))
continue;
@@ -238,7 +238,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
- mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
+ mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
if (mci == NULL) {
return -ENOMEM;
@@ -249,24 +249,36 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
- (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = AMD76X_REVISION;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = amd76x_check;
mci->ctl_page_to_phys = NULL;
amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
- amd76x_get_error_info(mci, &discard); /* clear counters */
+ amd76x_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
- if (edac_mc_add_mc(mci,0)) {
+ if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
+ /* allocating generic PCI control info */
+ amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!amd76x_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
@@ -278,7 +290,7 @@ fail:
/* returns count (>= 0), or negative on error */
static int __devinit amd76x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
@@ -300,6 +312,9 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
debugf0("%s()\n", __func__);
+ if (amd76x_pci)
+ edac_pci_release_generic_ctl(amd76x_pci);
+
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
@@ -308,16 +323,14 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
{
- PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD762
- },
+ PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD762},
{
- PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- AMD761
- },
+ PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ AMD761},
{
- 0,
- } /* 0 terminated list. */
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 8bcc887692ab..3bba224cb55d 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -22,13 +22,16 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include "edac_mc.h"
+#include <linux/edac.h>
+#include "edac_core.h"
-#define E752X_REVISION " Ver: 2.0.1 " __DATE__
+#define E752X_REVISION " Ver: 2.0.2 " __DATE__
#define EDAC_MOD_STR "e752x_edac"
static int force_function_unhide;
+static struct edac_pci_ctl_info *e752x_pci;
+
#define e752x_printk(level, fmt, arg...) \
edac_printk(level, "e752x", fmt, ##arg)
@@ -203,25 +206,22 @@ static const struct e752x_dev_info e752x_devs[] = {
[E7520] = {
.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
- .ctl_name = "E7520"
- },
+ .ctl_name = "E7520"},
[E7525] = {
.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
- .ctl_name = "E7525"
- },
+ .ctl_name = "E7525"},
[E7320] = {
.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
- .ctl_name = "E7320"
- },
+ .ctl_name = "E7320"},
};
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
+ unsigned long page)
{
u32 remap;
- struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
@@ -241,13 +241,13 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
}
static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome)
+ u32 sec1_add, u16 sec1_syndrome)
{
u32 page;
int row;
int channel;
int i;
- struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
@@ -261,7 +261,8 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
e752x_printk(KERN_WARNING,
"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
- pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]);
+ pvt->map[4], pvt->map[5], pvt->map[6],
+ pvt->map[7]);
/* test for channel remapping */
for (i = 0; i < 8; i++) {
@@ -275,24 +276,22 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
row = i;
else
e752x_mc_printk(mci, KERN_WARNING,
- "row %d not found in remap table\n", row);
+ "row %d not found in remap table\n",
+ row);
} else
row = edac_mc_find_csrow_by_page(mci, page);
/* 0 = channel A, 1 = channel B */
channel = !(error_one & 1);
- if (!pvt->map_type)
- row = 7 - row;
-
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
sec1_syndrome, row, channel, "e752x CE");
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome, int *error_found,
- int handle_error)
+ u32 sec1_add, u16 sec1_syndrome, int *error_found,
+ int handle_error)
{
*error_found = 1;
@@ -301,11 +300,11 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
}
static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
- u32 ded_add, u32 scrb_add)
+ u32 ded_add, u32 scrb_add)
{
u32 error_2b, block_page;
int row;
- struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
@@ -316,14 +315,14 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
+ /* chip select are bits 14 & 13 */
((block_page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
+ offset_in_page(error_2b << 4),
+ row, "e752x UE from Read");
}
if (error_one & 0x0404) {
error_2b = scrb_add;
@@ -332,19 +331,20 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
+ /* chip select are bits 14 & 13 */
((block_page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
+ offset_in_page(error_2b << 4),
+ row, "e752x UE from Scruber");
}
}
static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
- u32 ded_add, u32 scrb_add, int *error_found, int handle_error)
+ u32 ded_add, u32 scrb_add, int *error_found,
+ int handle_error)
{
*error_found = 1;
@@ -353,7 +353,7 @@ static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
}
static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
*error_found = 1;
@@ -365,24 +365,24 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
- u32 retry_add)
+ u32 retry_add)
{
u32 error_1b, page;
int row;
- struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
+ struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
error_1b = retry_add;
- page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
- row = pvt->mc_symmetric ?
- ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
+ page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
+ row = pvt->mc_symmetric ? ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
edac_mc_find_csrow_by_page(mci, page);
e752x_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, row %d : Memory read retry\n",
- (long unsigned int) page, row);
+ "CE page 0x%lx, row %d : Memory read retry\n",
+ (long unsigned int)page, row);
}
static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
- u32 retry_add, int *error_found, int handle_error)
+ u32 retry_add, int *error_found,
+ int handle_error)
{
*error_found = 1;
@@ -391,7 +391,7 @@ static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
}
static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
*error_found = 1;
@@ -420,7 +420,7 @@ static void do_global_error(int fatal, u32 errors)
}
static inline void global_error(int fatal, u32 errors, int *error_found,
- int handle_error)
+ int handle_error)
{
*error_found = 1;
@@ -447,7 +447,7 @@ static void do_hub_error(int fatal, u8 errors)
}
static inline void hub_error(int fatal, u8 errors, int *error_found,
- int handle_error)
+ int handle_error)
{
*error_found = 1;
@@ -505,7 +505,7 @@ static void do_sysbus_error(int fatal, u32 errors)
}
static inline void sysbus_error(int fatal, u32 errors, int *error_found,
- int handle_error)
+ int handle_error)
{
*error_found = 1;
@@ -514,7 +514,7 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found,
}
static void e752x_check_hub_interface(struct e752x_error_info *info,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
u8 stat8;
@@ -522,33 +522,32 @@ static void e752x_check_hub_interface(struct e752x_error_info *info,
stat8 = info->hi_ferr;
- if(stat8 & 0x7f) { /* Error, so process */
+ if (stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
- if(stat8 & 0x2b)
+ if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
- if(stat8 & 0x54)
+ if (stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
-
//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
stat8 = info->hi_nerr;
- if(stat8 & 0x7f) { /* Error, so process */
+ if (stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
- if(stat8 & 0x54)
+ if (stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
}
static void e752x_check_sysbus(struct e752x_error_info *info,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
u32 stat32, error32;
@@ -556,47 +555,47 @@ static void e752x_check_sysbus(struct e752x_error_info *info,
stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
if (stat32 == 0)
- return; /* no errors */
+ return; /* no errors */
error32 = (stat32 >> 16) & 0x3ff;
stat32 = stat32 & 0x3ff;
- if(stat32 & 0x087)
+ if (stat32 & 0x087)
sysbus_error(1, stat32 & 0x087, error_found, handle_error);
- if(stat32 & 0x378)
+ if (stat32 & 0x378)
sysbus_error(0, stat32 & 0x378, error_found, handle_error);
- if(error32 & 0x087)
+ if (error32 & 0x087)
sysbus_error(1, error32 & 0x087, error_found, handle_error);
- if(error32 & 0x378)
+ if (error32 & 0x378)
sysbus_error(0, error32 & 0x378, error_found, handle_error);
}
-static void e752x_check_membuf (struct e752x_error_info *info,
- int *error_found, int handle_error)
+static void e752x_check_membuf(struct e752x_error_info *info,
+ int *error_found, int handle_error)
{
u8 stat8;
stat8 = info->buf_ferr;
- if (stat8 & 0x0f) { /* Error, so process */
+ if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
stat8 = info->buf_nerr;
- if (stat8 & 0x0f) { /* Error, so process */
+ if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
}
-static void e752x_check_dram (struct mem_ctl_info *mci,
- struct e752x_error_info *info, int *error_found,
- int handle_error)
+static void e752x_check_dram(struct mem_ctl_info *mci,
+ struct e752x_error_info *info, int *error_found,
+ int handle_error)
{
u16 error_one, error_next;
@@ -604,55 +603,52 @@ static void e752x_check_dram (struct mem_ctl_info *mci,
error_next = info->dram_nerr;
/* decode and report errors */
- if(error_one & 0x0101) /* check first error correctable */
+ if (error_one & 0x0101) /* check first error correctable */
process_ce(mci, error_one, info->dram_sec1_add,
- info->dram_sec1_syndrome, error_found,
- handle_error);
+ info->dram_sec1_syndrome, error_found, handle_error);
- if(error_next & 0x0101) /* check next error correctable */
+ if (error_next & 0x0101) /* check next error correctable */
process_ce(mci, error_next, info->dram_sec2_add,
- info->dram_sec2_syndrome, error_found,
- handle_error);
+ info->dram_sec2_syndrome, error_found, handle_error);
- if(error_one & 0x4040)
+ if (error_one & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
- if(error_next & 0x4040)
+ if (error_next & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
- if(error_one & 0x2020)
+ if (error_one & 0x2020)
process_ded_retry(mci, error_one, info->dram_retr_add,
- error_found, handle_error);
+ error_found, handle_error);
- if(error_next & 0x2020)
+ if (error_next & 0x2020)
process_ded_retry(mci, error_next, info->dram_retr_add,
- error_found, handle_error);
+ error_found, handle_error);
- if(error_one & 0x0808)
- process_threshold_ce(mci, error_one, error_found,
- handle_error);
+ if (error_one & 0x0808)
+ process_threshold_ce(mci, error_one, error_found, handle_error);
- if(error_next & 0x0808)
+ if (error_next & 0x0808)
process_threshold_ce(mci, error_next, error_found,
- handle_error);
+ handle_error);
- if(error_one & 0x0606)
+ if (error_one & 0x0606)
process_ue(mci, error_one, info->dram_ded_add,
- info->dram_scrb_add, error_found, handle_error);
+ info->dram_scrb_add, error_found, handle_error);
- if(error_next & 0x0606)
+ if (error_next & 0x0606)
process_ue(mci, error_next, info->dram_ded_add,
- info->dram_scrb_add, error_found, handle_error);
+ info->dram_scrb_add, error_found, handle_error);
}
-static void e752x_get_error_info (struct mem_ctl_info *mci,
- struct e752x_error_info *info)
+static void e752x_get_error_info(struct mem_ctl_info *mci,
+ struct e752x_error_info *info)
{
struct pci_dev *dev;
struct e752x_pvt *pvt;
memset(info, 0, sizeof(*info));
- pvt = (struct e752x_pvt *) mci->pvt_info;
+ pvt = (struct e752x_pvt *)mci->pvt_info;
dev = pvt->dev_d0f1;
pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
@@ -661,8 +657,7 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
pci_read_config_word(dev, E752X_SYSBUS_FERR,
&info->sysbus_ferr);
pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
- pci_read_config_word(dev, E752X_DRAM_FERR,
- &info->dram_ferr);
+ pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
&info->dram_sec1_add);
pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
@@ -688,7 +683,7 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
if (info->dram_ferr)
pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
- info->dram_ferr, info->dram_ferr);
+ info->dram_ferr, info->dram_ferr);
pci_write_config_dword(dev, E752X_FERR_GLOBAL,
info->ferr_global);
@@ -701,8 +696,7 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
pci_read_config_word(dev, E752X_SYSBUS_NERR,
&info->sysbus_nerr);
pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
- pci_read_config_word(dev, E752X_DRAM_NERR,
- &info->dram_nerr);
+ pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
&info->dram_sec2_add);
pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
@@ -722,15 +716,16 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
if (info->dram_nerr)
pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
- info->dram_nerr, info->dram_nerr);
+ info->dram_nerr, info->dram_nerr);
pci_write_config_dword(dev, E752X_NERR_GLOBAL,
info->nerr_global);
}
}
-static int e752x_process_error_info (struct mem_ctl_info *mci,
- struct e752x_error_info *info, int handle_errors)
+static int e752x_process_error_info(struct mem_ctl_info *mci,
+ struct e752x_error_info *info,
+ int handle_errors)
{
u32 error32, stat32;
int error_found;
@@ -776,26 +771,38 @@ static inline int dual_channel_active(u16 ddrcsr)
return (((ddrcsr >> 12) & 3) == 3);
}
+/* Remap csrow index numbers if map_type is "reverse"
+ */
+static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
+{
+ struct e752x_pvt *pvt = mci->pvt_info;
+
+ if (!pvt->map_type)
+ return (7 - index);
+
+ return (index);
+}
+
static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- u16 ddrcsr)
+ u16 ddrcsr)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
int index, mem_dev, drc_chan;
- int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
- int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
+ int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
+ int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
u32 dra, drc, cumul_size;
dra = 0;
- for (index=0; index < 4; index++) {
+ for (index = 0; index < 4; index++) {
u8 dra_reg;
- pci_read_config_byte(pdev, E752X_DRA+index, &dra_reg);
+ pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
drc_chan = dual_channel_active(ddrcsr);
- drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
+ drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
/* The dram row boundary (DRB) reg values are boundary address for
@@ -806,7 +813,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 2)) & 0x3;
- csrow = &mci->csrows[index];
+ csrow = &mci->csrows[remap_csrow_index(mci, index)];
mem_dev = (mem_dev == 2);
pci_read_config_byte(pdev, E752X_DRB + index, &value);
@@ -843,10 +850,10 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
}
static void e752x_init_mem_map_table(struct pci_dev *pdev,
- struct e752x_pvt *pvt)
+ struct e752x_pvt *pvt)
{
int index;
- u8 value, last, row, stat8;
+ u8 value, last, row;
last = 0;
row = 0;
@@ -858,7 +865,7 @@ static void e752x_init_mem_map_table(struct pci_dev *pdev,
/* no dimm in the slot, so flag it as empty */
pvt->map[index] = 0xff;
pvt->map[index + 1] = 0xff;
- } else { /* there is a dimm in the slot */
+ } else { /* there is a dimm in the slot */
pvt->map[index] = row;
row++;
last = value;
@@ -866,31 +873,25 @@ static void e752x_init_mem_map_table(struct pci_dev *pdev,
* sided
*/
pci_read_config_byte(pdev, E752X_DRB + index + 1,
- &value);
- pvt->map[index + 1] = (value == last) ?
- 0xff : /* the dimm is single sided,
- so flag as empty */
- row; /* this is a double sided dimm
- to save the next row # */
+ &value);
+
+ /* the dimm is single sided, so flag as empty */
+ /* this is a double sided dimm to save the next row #*/
+ pvt->map[index + 1] = (value == last) ? 0xff : row;
row++;
last = value;
}
}
-
- /* set the map type. 1 = normal, 0 = reversed */
- pci_read_config_byte(pdev, E752X_DRM, &stat8);
- pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
}
/* Return 0 on success or 1 on failure. */
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
- struct e752x_pvt *pvt)
+ struct e752x_pvt *pvt)
{
struct pci_dev *dev;
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev,
- pvt->bridge_ck);
+ pvt->dev_info->err_dev, pvt->bridge_ck);
if (pvt->bridge_ck == NULL)
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
@@ -898,13 +899,13 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
- NULL);
+ NULL);
if (dev == NULL)
goto fail;
@@ -942,12 +943,22 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
u16 ddrcsr;
- int drc_chan; /* Number of channels 0=1chan,1=2chan */
+ int drc_chan; /* Number of channels 0=1chan,1=2chan */
struct e752x_error_info discard;
debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n");
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ break;
+ }
+
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
* exclusive access, we take care not to violate that assumption and
@@ -966,7 +977,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
- mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
+ mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
if (mci == NULL) {
return -ENOMEM;
@@ -975,14 +986,14 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
- EDAC_FLAG_S4ECD4ED;
+ EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E752X_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
- pvt = (struct e752x_pvt *) mci->pvt_info;
+ pvt = (struct e752x_pvt *)mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
@@ -993,16 +1004,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = e752x_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
- e752x_init_csrows(mci, pdev, ddrcsr);
- e752x_init_mem_map_table(pdev, pvt);
-
- /* set the map type. 1 = normal, 0 = reversed */
+ /* set the map type. 1 = normal, 0 = reversed
+ * Must be set before e752x_init_csrows in case csrow mapping
+ * is reversed.
+ */
pci_read_config_byte(pdev, E752X_DRM, &stat8);
pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
+ e752x_init_csrows(mci, pdev, ddrcsr);
+ e752x_init_mem_map_table(pdev, pvt);
+
mci->edac_cap |= EDAC_FLAG_NONE;
debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
@@ -1014,19 +1029,29 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
e752x_printk(KERN_INFO,
- "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
- pvt->remapbase, pvt->remaplimit);
+ "tolm = %x, remapbase = %x, remaplimit = %x\n",
+ pvt->tolm, pvt->remapbase, pvt->remaplimit);
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
- if (edac_mc_add_mc(mci,0)) {
+ if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
e752x_init_error_reporting_regs(pvt);
- e752x_get_error_info(mci, &discard); /* clear other MCH errors */
+ e752x_get_error_info(mci, &discard); /* clear other MCH errors */
+
+ /* allocating generic PCI control info */
+ e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!e752x_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n", __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
@@ -1043,12 +1068,12 @@ fail:
/* returns count (>= 0), or negative on error */
static int __devinit e752x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
/* wake up and enable device */
- if(pci_enable_device(pdev) < 0)
+ if (pci_enable_device(pdev) < 0)
return -EIO;
return e752x_probe1(pdev, ent->driver_data);
@@ -1061,10 +1086,13 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
debugf0("%s()\n", __func__);
+ if (e752x_pci)
+ edac_pci_release_generic_ctl(e752x_pci);
+
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
- pvt = (struct e752x_pvt *) mci->pvt_info;
+ pvt = (struct e752x_pvt *)mci->pvt_info;
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
pci_dev_put(pvt->bridge_ck);
@@ -1073,20 +1101,17 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
{
- PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7520
- },
+ PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7520},
{
- PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7525
- },
+ PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7525},
{
- PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7320
- },
+ PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7320},
{
- 0,
- } /* 0 terminated list. */
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
@@ -1122,5 +1147,6 @@ MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
module_param(force_function_unhide, int, 0444);
MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
-" 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
-
+ " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 310d91b41c96..96ecc4926641 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -27,9 +27,10 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include "edac_mc.h"
+#include <linux/edac.h>
+#include "edac_core.h"
-#define E7XXX_REVISION " Ver: 2.0.1 " __DATE__
+#define E7XXX_REVISION " Ver: 2.0.2 " __DATE__
#define EDAC_MOD_STR "e7xxx_edac"
#define e7xxx_printk(level, fmt, arg...) \
@@ -143,23 +144,21 @@ struct e7xxx_error_info {
u32 dram_uelog_add;
};
+static struct edac_pci_ctl_info *e7xxx_pci;
+
static const struct e7xxx_dev_info e7xxx_devs[] = {
[E7500] = {
.err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
- .ctl_name = "E7500"
- },
+ .ctl_name = "E7500"},
[E7501] = {
.err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
- .ctl_name = "E7501"
- },
+ .ctl_name = "E7501"},
[E7505] = {
.err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
- .ctl_name = "E7505"
- },
+ .ctl_name = "E7505"},
[E7205] = {
.err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
- .ctl_name = "E7205"
- },
+ .ctl_name = "E7205"},
};
/* FIXME - is this valid for both SECDED and S4ECD4ED? */
@@ -180,15 +179,15 @@ static inline int e7xxx_find_channel(u16 syndrome)
}
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
+ unsigned long page)
{
u32 remap;
- struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
if ((page < pvt->tolm) ||
- ((page >= 0x100000) && (page < pvt->remapbase)))
+ ((page >= 0x100000) && (page < pvt->remapbase)))
return page;
remap = (page - pvt->tolm) + pvt->remapbase;
@@ -200,8 +199,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
return pvt->tolm - 1;
}
-static void process_ce(struct mem_ctl_info *mci,
- struct e7xxx_error_info *info)
+static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
{
u32 error_1b, page;
u16 syndrome;
@@ -212,7 +210,7 @@ static void process_ce(struct mem_ctl_info *mci,
/* read the error address */
error_1b = info->dram_celog_add;
/* FIXME - should use PAGE_SHIFT */
- page = error_1b >> 6; /* convert the address to 4k page */
+ page = error_1b >> 6; /* convert the address to 4k page */
/* read the syndrome */
syndrome = info->dram_celog_syndrome;
/* FIXME - check for -1 */
@@ -228,8 +226,7 @@ static void process_ce_no_info(struct mem_ctl_info *mci)
edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
}
-static void process_ue(struct mem_ctl_info *mci,
- struct e7xxx_error_info *info)
+static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
{
u32 error_2b, block_page;
int row;
@@ -238,7 +235,7 @@ static void process_ue(struct mem_ctl_info *mci,
/* read the error address */
error_2b = info->dram_uelog_add;
/* FIXME - should use PAGE_SHIFT */
- block_page = error_2b >> 6; /* convert to 4k address */
+ block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
}
@@ -249,16 +246,14 @@ static void process_ue_no_info(struct mem_ctl_info *mci)
edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
}
-static void e7xxx_get_error_info (struct mem_ctl_info *mci,
- struct e7xxx_error_info *info)
+static void e7xxx_get_error_info(struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info)
{
struct e7xxx_pvt *pvt;
- pvt = (struct e7xxx_pvt *) mci->pvt_info;
- pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
- &info->dram_ferr);
- pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
- &info->dram_nerr);
+ pvt = (struct e7xxx_pvt *)mci->pvt_info;
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
+ pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
@@ -279,8 +274,9 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci,
pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
}
-static int e7xxx_process_error_info (struct mem_ctl_info *mci,
- struct e7xxx_error_info *info, int handle_errors)
+static int e7xxx_process_error_info(struct mem_ctl_info *mci,
+ struct e7xxx_error_info *info,
+ int handle_errors)
{
int error_found;
@@ -341,7 +337,6 @@ static inline int dual_channel_active(u32 drc, int dev_idx)
return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
}
-
/* Return DRB granularity (0=32mb, 1=64mb). */
static inline int drb_granularity(u32 drc, int dev_idx)
{
@@ -349,9 +344,8 @@ static inline int drb_granularity(u32 drc, int dev_idx)
return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
}
-
static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- int dev_idx, u32 drc)
+ int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
int index;
@@ -419,10 +413,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
struct e7xxx_error_info discard;
debugf0("%s(): mci\n", __func__);
+
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ break;
+ }
+
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
- mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
+ mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
if (mci == NULL)
return -ENOMEM;
@@ -430,17 +435,16 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
- EDAC_FLAG_S4ECD4ED;
+ EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
- pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pvt = (struct e7xxx_pvt *)mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
- pvt->dev_info->err_dev,
- pvt->bridge_ck);
+ pvt->dev_info->err_dev, pvt->bridge_ck);
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
@@ -451,6 +455,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e7xxx_init_csrows(mci, pdev, dev_idx, drc);
@@ -473,11 +478,22 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
- if (edac_mc_add_mc(mci,0)) {
+ if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail1;
}
+ /* allocating generic PCI control info */
+ e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!e7xxx_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
@@ -493,7 +509,7 @@ fail0:
/* returns count (>= 0), or negative on error */
static int __devinit e7xxx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
@@ -509,34 +525,33 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
debugf0("%s()\n", __func__);
+ if (e7xxx_pci)
+ edac_pci_release_generic_ctl(e7xxx_pci);
+
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
- pvt = (struct e7xxx_pvt *) mci->pvt_info;
+ pvt = (struct e7xxx_pvt *)mci->pvt_info;
pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
{
- PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7205
- },
+ PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7205},
{
- PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7500
- },
+ PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7500},
{
- PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7501
- },
+ PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7501},
{
- PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- E7505
- },
+ PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ E7505},
{
- 0,
- } /* 0 terminated list. */
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
@@ -563,5 +578,7 @@ module_exit(e7xxx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on.work by Dan Hollis et al");
+ "Based on.work by Dan Hollis et al");
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_core.h
index 713444cc4105..4e6bad15c4ba 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_core.h
@@ -1,6 +1,7 @@
/*
- * MC kernel module
- * (C) 2003 Linux Networx (http://lnxi.com)
+ * Defines, structures, APIs for edac_core module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
@@ -11,12 +12,13 @@
* NMI handling support added by
* Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
*
- * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $
+ * Refactored for multi-source files:
+ * Doug Thompson <norsk5@xmission.com>
*
*/
-#ifndef _EDAC_MC_H_
-#define _EDAC_MC_H_
+#ifndef _EDAC_CORE_H_
+#define _EDAC_CORE_H_
#include <linux/kernel.h>
#include <linux/types.h>
@@ -30,9 +32,14 @@
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/platform_device.h>
+#include <linux/sysdev.h>
+#include <linux/workqueue.h>
+#include <linux/version.h>
#define EDAC_MC_LABEL_LEN 31
-#define MC_PROC_NAME_MAX_LEN 7
+#define EDAC_DEVICE_NAME_LEN 31
+#define EDAC_ATTRIB_VALUE_LEN 15
+#define MC_PROC_NAME_MAX_LEN 7
#if PAGE_SHIFT < 20
#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) )
@@ -49,6 +56,14 @@
#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+/* edac_device printk */
+#define edac_device_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+/* edac_pci printk */
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+ printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
/* prefixes for edac_printk() and edac_mc_printk() */
#define EDAC_MC "MC"
#define EDAC_PCI "PCI"
@@ -60,7 +75,7 @@ extern int edac_debug_level;
#define edac_debug_printk(level, fmt, arg...) \
do { \
if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
+ edac_printk(KERN_EMERG, EDAC_DEBUG, fmt, ##arg); \
} while(0)
#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
@@ -69,7 +84,7 @@ extern int edac_debug_level;
#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
-#else /* !CONFIG_EDAC_DEBUG */
+#else /* !CONFIG_EDAC_DEBUG */
#define debugf0( ... )
#define debugf1( ... )
@@ -77,18 +92,14 @@ extern int edac_debug_level;
#define debugf3( ... )
#define debugf4( ... )
-#endif /* !CONFIG_EDAC_DEBUG */
+#endif /* !CONFIG_EDAC_DEBUG */
#define BIT(x) (1 << (x))
#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
PCI_DEVICE_ID_ ## vend ## _ ## dev
-#if defined(CONFIG_X86) && defined(CONFIG_PCI)
-#define dev_name(dev) pci_name(to_pci_dev(dev))
-#else
-#define dev_name(dev) to_platform_device(dev)->name
-#endif
+#define dev_name(dev) (dev)->dev_name
/* memory devices */
enum dev_type {
@@ -124,8 +135,9 @@ enum mem_type {
MEM_DDR, /* Double data rate SDRAM */
MEM_RDDR, /* Registered Double data rate SDRAM */
MEM_RMBS, /* Rambus DRAM */
- MEM_DDR2, /* DDR2 RAM */
- MEM_FB_DDR2, /* fully buffered DDR2 */
+ MEM_DDR2, /* DDR2 RAM */
+ MEM_FB_DDR2, /* fully buffered DDR2 */
+ MEM_RDDR2, /* Registered DDR2 RAM */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -141,6 +153,7 @@ enum mem_type {
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
@@ -181,16 +194,23 @@ enum scrub_type {
};
#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
-#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR)
-#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR)
+#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC)
+#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC)
#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
-#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR)
-#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
+#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC)
+#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC)
#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+/* EDAC internal operation states */
+#define OP_ALLOC 0x100
+#define OP_RUNNING_POLL 0x201
+#define OP_RUNNING_INTERRUPT 0x202
+#define OP_RUNNING_POLL_INTR 0x203
+#define OP_OFFLINE 0x300
+
/*
* There are several things to be aware of that aren't at all obvious:
*
@@ -276,7 +296,7 @@ enum scrub_type {
struct channel_info {
int chan_idx; /* channel index */
u32 ce_count; /* Correctable Errors for this CHANNEL */
- char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
struct csrow_info *csrow; /* the parent */
};
@@ -297,15 +317,29 @@ struct csrow_info {
struct mem_ctl_info *mci; /* the parent */
struct kobject kobj; /* sysfs kobject for this csrow */
- struct completion kobj_complete;
- /* FIXME the number of CHANNELs might need to become dynamic */
+ /* channel information for this csrow */
u32 nr_channels;
struct channel_info *channels;
};
+/* mcidev_sysfs_attribute structure
+ * used for driver sysfs attributes and in mem_ctl_info
+ * sysfs top level entries
+ */
+struct mcidev_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mem_ctl_info *,char *);
+ ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+};
+
+/* MEMORY controller information structure
+ */
struct mem_ctl_info {
- struct list_head link; /* for global list of mem_ctl_info structs */
+ struct list_head link; /* for global list of mem_ctl_info structs */
+
+ struct module *owner; /* Module owner of this control struct */
+
unsigned long mtype_cap; /* memory types supported by mc */
unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
unsigned long edac_cap; /* configuration capabilities - this is
@@ -322,14 +356,15 @@ struct mem_ctl_info {
/* Translates sdram memory scrub rate given in bytes/sec to the
internal representation and configures whatever else needs
to be configured.
- */
- int (*set_sdram_scrub_rate) (struct mem_ctl_info *mci, u32 *bw);
+ */
+ int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
/* Get the current sdram memory scrub rate from the internal
representation and converts it to the closest matching
bandwith in bytes/sec.
- */
- int (*get_sdram_scrub_rate) (struct mem_ctl_info *mci, u32 *bw);
+ */
+ int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+
/* pointer to edac checking routine */
void (*edac_check) (struct mem_ctl_info * mci);
@@ -340,7 +375,7 @@ struct mem_ctl_info {
*/
/* FIXME - why not send the phys page to begin with? */
unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
- unsigned long page);
+ unsigned long page);
int mc_idx;
int nr_csrows;
struct csrow_info *csrows;
@@ -353,6 +388,7 @@ struct mem_ctl_info {
const char *mod_name;
const char *mod_ver;
const char *ctl_name;
+ const char *dev_name;
char proc_name[MC_PROC_NAME_MAX_LEN + 1];
void *pvt_info;
u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
@@ -369,14 +405,327 @@ struct mem_ctl_info {
/* edac sysfs device control */
struct kobject edac_mci_kobj;
- struct completion kobj_complete;
+
+ /* Additional top controller level attributes, but specified
+ * by the low level driver.
+ *
+ * Set by the low level driver to provide attributes at the
+ * controller level, same level as 'ue_count' and 'ce_count' above.
+ * An array of structures, NULL terminated
+ *
+ * If attributes are desired, then set to array of attributes
+ * If no attributes are desired, leave NULL
+ */
+ struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+
+ /* work struct for this MC */
+ struct delayed_work work;
+
+ /* the internal state of this controller instance */
+ int op_state;
+};
+
+/*
+ * The following are the structures to provide for a generic
+ * or abstract 'edac_device'. This set of structures and the
+ * code that implements the APIs for the same, provide for
+ * registering EDAC type devices which are NOT standard memory.
+ *
+ * CPU caches (L1 and L2)
+ * DMA engines
+ * Core CPU swithces
+ * Fabric switch units
+ * PCIe interface controllers
+ * other EDAC/ECC type devices that can be monitored for
+ * errors, etc.
+ *
+ * It allows for a 2 level set of hiearchry. For example:
+ *
+ * cache could be composed of L1, L2 and L3 levels of cache.
+ * Each CPU core would have its own L1 cache, while sharing
+ * L2 and maybe L3 caches.
+ *
+ * View them arranged, via the sysfs presentation:
+ * /sys/devices/system/edac/..
+ *
+ * mc/ <existing memory device directory>
+ * cpu/cpu0/.. <L1 and L2 block directory>
+ * /L1-cache/ce_count
+ * /ue_count
+ * /L2-cache/ce_count
+ * /ue_count
+ * cpu/cpu1/.. <L1 and L2 block directory>
+ * /L1-cache/ce_count
+ * /ue_count
+ * /L2-cache/ce_count
+ * /ue_count
+ * ...
+ *
+ * the L1 and L2 directories would be "edac_device_block's"
+ */
+
+struct edac_device_counter {
+ u32 ue_count;
+ u32 ce_count;
+};
+
+/* forward reference */
+struct edac_device_ctl_info;
+struct edac_device_block;
+
+/* edac_dev_sysfs_attribute structure
+ * used for driver sysfs attributes in mem_ctl_info
+ * for extra controls and attributes:
+ * like high level error Injection controls
+ */
+struct edac_dev_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct edac_device_ctl_info *, char *);
+ ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
+};
+
+/* edac_dev_sysfs_block_attribute structure
+ *
+ * used in leaf 'block' nodes for adding controls/attributes
+ *
+ * each block in each instance of the containing control structure
+ * can have an array of the following. The show and store functions
+ * will be filled in with the show/store function in the
+ * low level driver.
+ *
+ * The 'value' field will be the actual value field used for
+ * counting
+ */
+struct edac_dev_sysfs_block_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *,
+ const char *, size_t);
+ struct edac_device_block *block;
+
+ unsigned int value;
+};
+
+/* device block control structure */
+struct edac_device_block {
+ struct edac_device_instance *instance; /* Up Pointer */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ struct edac_device_counter counters; /* basic UE and CE counters */
+
+ int nr_attribs; /* how many attributes */
+
+ /* this block's attributes, could be NULL */
+ struct edac_dev_sysfs_block_attribute *block_attributes;
+
+ /* edac sysfs device control */
+ struct kobject kobj;
+};
+
+/* device instance control structure */
+struct edac_device_instance {
+ struct edac_device_ctl_info *ctl; /* Up pointer */
+ char name[EDAC_DEVICE_NAME_LEN + 4];
+
+ struct edac_device_counter counters; /* instance counters */
+
+ u32 nr_blocks; /* how many blocks */
+ struct edac_device_block *blocks; /* block array */
+
+ /* edac sysfs device control */
+ struct kobject kobj;
+};
+
+
+/*
+ * Abstract edac_device control info structure
+ *
+ */
+struct edac_device_ctl_info {
+ /* for global list of edac_device_ctl_info structs */
+ struct list_head link;
+
+ struct module *owner; /* Module owner of this control struct */
+
+ int dev_idx;
+
+ /* Per instance controls for this edac_device */
+ int log_ue; /* boolean for logging UEs */
+ int log_ce; /* boolean for logging CEs */
+ int panic_on_ue; /* boolean for panic'ing on an UE */
+ unsigned poll_msec; /* number of milliseconds to poll interval */
+ unsigned long delay; /* number of jiffies for poll_msec */
+
+ /* Additional top controller level attributes, but specified
+ * by the low level driver.
+ *
+ * Set by the low level driver to provide attributes at the
+ * controller level, same level as 'ue_count' and 'ce_count' above.
+ * An array of structures, NULL terminated
+ *
+ * If attributes are desired, then set to array of attributes
+ * If no attributes are desired, leave NULL
+ */
+ struct edac_dev_sysfs_attribute *sysfs_attributes;
+
+ /* pointer to main 'edac' class in sysfs */
+ struct sysdev_class *edac_class;
+
+ /* the internal state of this controller instance */
+ int op_state;
+ /* work struct for this instance */
+ struct delayed_work work;
+
+ /* pointer to edac polling checking routine:
+ * If NOT NULL: points to polling check routine
+ * If NULL: Then assumes INTERRUPT operation, where
+ * MC driver will receive events
+ */
+ void (*edac_check) (struct edac_device_ctl_info * edac_dev);
+
+ struct device *dev; /* pointer to device structure */
+
+ const char *mod_name; /* module name */
+ const char *ctl_name; /* edac controller name */
+ const char *dev_name; /* pci/platform/etc... name */
+
+ void *pvt_info; /* pointer to 'private driver' info */
+
+ unsigned long start_time; /* edac_device load start time (jiffies) */
+
+ /* these are for safe removal of mc devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion removal_complete;
+
+ /* sysfs top name under 'edac' directory
+ * and instance name:
+ * cpu/cpu0/...
+ * cpu/cpu1/...
+ * cpu/cpu2/...
+ * ...
+ */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ /* Number of instances supported on this control structure
+ * and the array of those instances
+ */
+ u32 nr_instances;
+ struct edac_device_instance *instances;
+
+ /* Event counters for the this whole EDAC Device */
+ struct edac_device_counter counters;
+
+ /* edac sysfs device control for the 'name'
+ * device this structure controls
+ */
+ struct kobject kobj;
};
+/* To get from the instance's wq to the beginning of the ctl structure */
+#define to_edac_mem_ctl_work(w) \
+ container_of(w, struct mem_ctl_info, work)
+
+#define to_edac_device_ctl_work(w) \
+ container_of(w,struct edac_device_ctl_info,work)
+
+/*
+ * The alloc() and free() functions for the 'edac_device' control info
+ * structure. A MC driver will allocate one of these for each edac_device
+ * it is going to control/register with the EDAC CORE.
+ */
+extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+ unsigned sizeof_private,
+ char *edac_device_name, unsigned nr_instances,
+ char *edac_block_name, unsigned nr_blocks,
+ unsigned offset_value,
+ struct edac_dev_sysfs_block_attribute *block_attributes,
+ unsigned nr_attribs,
+ int device_index);
+
+/* The offset value can be:
+ * -1 indicating no offset value
+ * 0 for zero-based block numbers
+ * 1 for 1-based block number
+ * other for other-based block number
+ */
+#define BLOCK_OFFSET_VALUE_OFF ((unsigned) -1)
+
+extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
+
#ifdef CONFIG_PCI
+struct edac_pci_counter {
+ atomic_t pe_count;
+ atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+ /* for global list of edac_pci_ctl_info structs */
+ struct list_head link;
+
+ int pci_idx;
+
+ struct sysdev_class *edac_class; /* pointer to class */
+
+ /* the internal state of this controller instance */
+ int op_state;
+ /* work struct for this instance */
+ struct delayed_work work;
+
+ /* pointer to edac polling checking routine:
+ * If NOT NULL: points to polling check routine
+ * If NULL: Then assumes INTERRUPT operation, where
+ * MC driver will receive events
+ */
+ void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+ struct device *dev; /* pointer to device structure */
+
+ const char *mod_name; /* module name */
+ const char *ctl_name; /* edac controller name */
+ const char *dev_name; /* pci/platform/etc... name */
+
+ void *pvt_info; /* pointer to 'private driver' info */
+
+ unsigned long start_time; /* edac_pci load start time (jiffies) */
+
+ /* these are for safe removal of devices from global list while
+ * NMI handlers may be traversing list
+ */
+ struct rcu_head rcu;
+ struct completion complete;
+
+ /* sysfs top name under 'edac' directory
+ * and instance name:
+ * cpu/cpu0/...
+ * cpu/cpu1/...
+ * cpu/cpu2/...
+ * ...
+ */
+ char name[EDAC_DEVICE_NAME_LEN + 1];
+
+ /* Event counters for the this whole EDAC Device */
+ struct edac_pci_counter counters;
+
+ /* edac sysfs device control for the 'name'
+ * device this structure controls
+ */
+ struct kobject kobj;
+ struct completion kobj_complete;
+};
+
+#define to_edac_pci_ctl_work(w) \
+ container_of(w, struct edac_pci_ctl_info,work)
+
/* write all or some bits in a byte-register*/
static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
- u8 mask)
+ u8 mask)
{
if (mask != 0xff) {
u8 buf;
@@ -392,7 +741,7 @@ static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
/* write all or some bits in a word-register*/
static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
- u16 value, u16 mask)
+ u16 value, u16 mask)
{
if (mask != 0xffff) {
u16 buf;
@@ -408,7 +757,7 @@ static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
/* write all or some bits in a dword-register*/
static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
- u32 value, u32 mask)
+ u32 value, u32 mask)
{
if (mask != 0xffff) {
u32 buf;
@@ -422,20 +771,16 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
pci_write_config_dword(pdev, offset, value);
}
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCI */
-#ifdef CONFIG_EDAC_DEBUG
-void edac_mc_dump_channel(struct channel_info *chan);
-void edac_mc_dump_mci(struct mem_ctl_info *mci);
-void edac_mc_dump_csrow(struct csrow_info *csrow);
-#endif /* CONFIG_EDAC_DEBUG */
-
-extern int edac_mc_add_mc(struct mem_ctl_info *mci,int mc_idx);
-extern struct mem_ctl_info * edac_mc_del_mc(struct device *dev);
+extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
+ unsigned nr_chans, int edac_index);
+extern int edac_mc_add_mc(struct mem_ctl_info *mci);
+extern void edac_mc_free(struct mem_ctl_info *mci);
+extern struct mem_ctl_info *edac_mc_find(int idx);
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
- unsigned long page);
-extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
- u32 size);
+ unsigned long page);
/*
* The no info errors are used when error overflows are reported.
@@ -448,34 +793,59 @@ extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
* statement clutter and extra function arguments.
*/
extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number, unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg);
+ unsigned long page_frame_number,
+ unsigned long offset_in_page,
+ unsigned long syndrome, int row, int channel,
+ const char *msg);
extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
+ const char *msg);
extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number, unsigned long offset_in_page,
- int row, const char *msg);
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row,
+ const char *msg);
extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channel0,
- unsigned int channel1,
- char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channel,
- char *msg);
+ const char *msg);
+extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
+ unsigned int channel0, unsigned int channel1,
+ char *msg);
+extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
+ unsigned int channel, char *msg);
/*
- * This kmalloc's and initializes all the structures.
- * Can't be used if all structures don't have the same lifetime.
+ * edac_device APIs
*/
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans);
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
+extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg);
+extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg);
-/* Free an mc previously allocated by edac_mc_alloc() */
-extern void edac_mc_free(struct mem_ctl_info *mci);
+/*
+ * edac_pci APIs
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+ const char *edac_pci_name);
+
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+ unsigned long value);
+
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+ struct device *dev,
+ const char *mod_name);
+
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
-#endif /* _EDAC_MC_H_ */
+#endif /* _EDAC_CORE_H_ */
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
new file mode 100644
index 000000000000..f3690a697cf9
--- /dev/null
+++ b/drivers/edac/edac_device.c
@@ -0,0 +1,746 @@
+
+/*
+ * edac_device.c
+ * (C) 2007 www.douglaskthompson.com
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Doug Thompson <norsk5@xmission.com>
+ *
+ * edac_device API implementation
+ * 19 Jan 2007
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* lock for the list: 'edac_device_list', manipulation of this list
+ * is protected by the 'device_ctls_mutex' lock
+ */
+static DEFINE_MUTEX(device_ctls_mutex);
+static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list);
+
+#ifdef CONFIG_EDAC_DEBUG
+static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+{
+ debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
+ debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
+ debugf3("\tdev = %p\n", edac_dev->dev);
+ debugf3("\tmod_name:ctl_name = %s:%s\n",
+ edac_dev->mod_name, edac_dev->ctl_name);
+ debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
+}
+#endif /* CONFIG_EDAC_DEBUG */
+
+
+/*
+ * edac_device_alloc_ctl_info()
+ * Allocate a new edac device control info structure
+ *
+ * The control structure is allocated in complete chunk
+ * from the OS. It is in turn sub allocated to the
+ * various objects that compose the struture
+ *
+ * The structure has a 'nr_instance' array within itself.
+ * Each instance represents a major component
+ * Example: L1 cache and L2 cache are 2 instance components
+ *
+ * Within each instance is an array of 'nr_blocks' blockoffsets
+ */
+struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+ unsigned sz_private,
+ char *edac_device_name, unsigned nr_instances,
+ char *edac_block_name, unsigned nr_blocks,
+ unsigned offset_value, /* zero, 1, or other based offset */
+ struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
+ int device_index)
+{
+ struct edac_device_ctl_info *dev_ctl;
+ struct edac_device_instance *dev_inst, *inst;
+ struct edac_device_block *dev_blk, *blk_p, *blk;
+ struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
+ unsigned total_size;
+ unsigned count;
+ unsigned instance, block, attr;
+ void *pvt;
+ int err;
+
+ debugf4("%s() instances=%d blocks=%d\n",
+ __func__, nr_instances, nr_blocks);
+
+ /* Calculate the size of memory we need to allocate AND
+ * determine the offsets of the various item arrays
+ * (instance,block,attrib) from the start of an allocated structure.
+ * We want the alignment of each item (instance,block,attrib)
+ * to be at least as stringent as what the compiler would
+ * provide if we could simply hardcode everything into a single struct.
+ */
+ dev_ctl = (struct edac_device_ctl_info *)NULL;
+
+ /* Calc the 'end' offset past end of ONE ctl_info structure
+ * which will become the start of the 'instance' array
+ */
+ dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+
+ /* Calc the 'end' offset past the instance array within the ctl_info
+ * which will become the start of the block array
+ */
+ dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+
+ /* Calc the 'end' offset past the dev_blk array
+ * which will become the start of the attrib array, if any.
+ */
+ count = nr_instances * nr_blocks;
+ dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
+
+ /* Check for case of when an attribute array is specified */
+ if (nr_attrib > 0) {
+ /* calc how many nr_attrib we need */
+ count *= nr_attrib;
+
+ /* Calc the 'end' offset past the attributes array */
+ pvt = edac_align_ptr(&dev_attrib[count], sz_private);
+ } else {
+ /* no attribute array specificed */
+ pvt = edac_align_ptr(dev_attrib, sz_private);
+ }
+
+ /* 'pvt' now points to where the private data area is.
+ * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
+ * is baselined at ZERO
+ */
+ total_size = ((unsigned long)pvt) + sz_private;
+
+ /* Allocate the amount of memory for the set of control structures */
+ dev_ctl = kzalloc(total_size, GFP_KERNEL);
+ if (dev_ctl == NULL)
+ return NULL;
+
+ /* Adjust pointers so they point within the actual memory we
+ * just allocated rather than an imaginary chunk of memory
+ * located at address 0.
+ * 'dev_ctl' points to REAL memory, while the others are
+ * ZERO based and thus need to be adjusted to point within
+ * the allocated memory.
+ */
+ dev_inst = (struct edac_device_instance *)
+ (((char *)dev_ctl) + ((unsigned long)dev_inst));
+ dev_blk = (struct edac_device_block *)
+ (((char *)dev_ctl) + ((unsigned long)dev_blk));
+ dev_attrib = (struct edac_dev_sysfs_block_attribute *)
+ (((char *)dev_ctl) + ((unsigned long)dev_attrib));
+ pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
+
+ /* Begin storing the information into the control info structure */
+ dev_ctl->dev_idx = device_index;
+ dev_ctl->nr_instances = nr_instances;
+ dev_ctl->instances = dev_inst;
+ dev_ctl->pvt_info = pvt;
+
+ /* Name of this edac device */
+ snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
+
+ debugf4("%s() edac_dev=%p next after end=%p\n",
+ __func__, dev_ctl, pvt + sz_private );
+
+ /* Initialize every Instance */
+ for (instance = 0; instance < nr_instances; instance++) {
+ inst = &dev_inst[instance];
+ inst->ctl = dev_ctl;
+ inst->nr_blocks = nr_blocks;
+ blk_p = &dev_blk[instance * nr_blocks];
+ inst->blocks = blk_p;
+
+ /* name of this instance */
+ snprintf(inst->name, sizeof(inst->name),
+ "%s%u", edac_device_name, instance);
+
+ /* Initialize every block in each instance */
+ for (block = 0; block < nr_blocks; block++) {
+ blk = &blk_p[block];
+ blk->instance = inst;
+ snprintf(blk->name, sizeof(blk->name),
+ "%s%d", edac_block_name, block+offset_value);
+
+ debugf4("%s() instance=%d inst_p=%p block=#%d "
+ "block_p=%p name='%s'\n",
+ __func__, instance, inst, block,
+ blk, blk->name);
+
+ /* if there are NO attributes OR no attribute pointer
+ * then continue on to next block iteration
+ */
+ if ((nr_attrib == 0) || (attrib_spec == NULL))
+ continue;
+
+ /* setup the attribute array for this block */
+ blk->nr_attribs = nr_attrib;
+ attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
+ blk->block_attributes = attrib_p;
+
+ debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
+ __func__, blk->block_attributes);
+
+ /* Initialize every user specified attribute in this
+ * block with the data the caller passed in
+ * Each block gets its own copy of pointers,
+ * and its unique 'value'
+ */
+ for (attr = 0; attr < nr_attrib; attr++) {
+ attrib = &attrib_p[attr];
+
+ /* populate the unique per attrib
+ * with the code pointers and info
+ */
+ attrib->attr = attrib_spec[attr].attr;
+ attrib->show = attrib_spec[attr].show;
+ attrib->store = attrib_spec[attr].store;
+
+ attrib->block = blk; /* up link */
+
+ debugf4("%s() alloc-attrib=%p attrib_name='%s' "
+ "attrib-spec=%p spec-name=%s\n",
+ __func__, attrib, attrib->attr.name,
+ &attrib_spec[attr],
+ attrib_spec[attr].attr.name
+ );
+ }
+ }
+ }
+
+ /* Mark this instance as merely ALLOCATED */
+ dev_ctl->op_state = OP_ALLOC;
+
+ /*
+ * Initialize the 'root' kobj for the edac_device controller
+ */
+ err = edac_device_register_sysfs_main_kobj(dev_ctl);
+ if (err) {
+ kfree(dev_ctl);
+ return NULL;
+ }
+
+ /* at this point, the root kobj is valid, and in order to
+ * 'free' the object, then the function:
+ * edac_device_unregister_sysfs_main_kobj() must be called
+ * which will perform kobj unregistration and the actual free
+ * will occur during the kobject callback operation
+ */
+
+ return dev_ctl;
+}
+EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
+
+/*
+ * edac_device_free_ctl_info()
+ * frees the memory allocated by the edac_device_alloc_ctl_info()
+ * function
+ */
+void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
+{
+ edac_device_unregister_sysfs_main_kobj(ctl_info);
+}
+EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
+
+/*
+ * find_edac_device_by_dev
+ * scans the edac_device list for a specific 'struct device *'
+ *
+ * lock to be held prior to call: device_ctls_mutex
+ *
+ * Return:
+ * pointer to control structure managing 'dev'
+ * NULL if not found on list
+ */
+static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct list_head *item;
+
+ debugf0("%s()\n", __func__);
+
+ list_for_each(item, &edac_device_list) {
+ edac_dev = list_entry(item, struct edac_device_ctl_info, link);
+
+ if (edac_dev->dev == dev)
+ return edac_dev;
+ }
+
+ return NULL;
+}
+
+/*
+ * add_edac_dev_to_global_list
+ * Before calling this function, caller must
+ * assign a unique value to edac_dev->dev_idx.
+ *
+ * lock to be held prior to call: device_ctls_mutex
+ *
+ * Return:
+ * 0 on success
+ * 1 on failure.
+ */
+static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
+{
+ struct list_head *item, *insert_before;
+ struct edac_device_ctl_info *rover;
+
+ insert_before = &edac_device_list;
+
+ /* Determine if already on the list */
+ rover = find_edac_device_by_dev(edac_dev->dev);
+ if (unlikely(rover != NULL))
+ goto fail0;
+
+ /* Insert in ascending order by 'dev_idx', so find position */
+ list_for_each(item, &edac_device_list) {
+ rover = list_entry(item, struct edac_device_ctl_info, link);
+
+ if (rover->dev_idx >= edac_dev->dev_idx) {
+ if (unlikely(rover->dev_idx == edac_dev->dev_idx))
+ goto fail1;
+
+ insert_before = item;
+ break;
+ }
+ }
+
+ list_add_tail_rcu(&edac_dev->link, insert_before);
+ return 0;
+
+fail0:
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "%s (%s) %s %s already assigned %d\n",
+ rover->dev->bus_id, dev_name(rover),
+ rover->mod_name, rover->ctl_name, rover->dev_idx);
+ return 1;
+
+fail1:
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "bug in low-level driver: attempt to assign\n"
+ " duplicate dev_idx %d in %s()\n", rover->dev_idx,
+ __func__);
+ return 1;
+}
+
+/*
+ * complete_edac_device_list_del
+ *
+ * callback function when reference count is zero
+ */
+static void complete_edac_device_list_del(struct rcu_head *head)
+{
+ struct edac_device_ctl_info *edac_dev;
+
+ edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
+ INIT_LIST_HEAD(&edac_dev->link);
+ complete(&edac_dev->removal_complete);
+}
+
+/*
+ * del_edac_device_from_global_list
+ *
+ * remove the RCU, setup for a callback call,
+ * then wait for the callback to occur
+ */
+static void del_edac_device_from_global_list(struct edac_device_ctl_info
+ *edac_device)
+{
+ list_del_rcu(&edac_device->link);
+
+ init_completion(&edac_device->removal_complete);
+ call_rcu(&edac_device->rcu, complete_edac_device_list_del);
+ wait_for_completion(&edac_device->removal_complete);
+}
+
+/**
+ * edac_device_find
+ * Search for a edac_device_ctl_info structure whose index is 'idx'.
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ *
+ * Caller must hold device_ctls_mutex.
+ */
+struct edac_device_ctl_info *edac_device_find(int idx)
+{
+ struct list_head *item;
+ struct edac_device_ctl_info *edac_dev;
+
+ /* Iterate over list, looking for exact match of ID */
+ list_for_each(item, &edac_device_list) {
+ edac_dev = list_entry(item, struct edac_device_ctl_info, link);
+
+ if (edac_dev->dev_idx >= idx) {
+ if (edac_dev->dev_idx == idx)
+ return edac_dev;
+
+ /* not on list, so terminate early */
+ break;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(edac_device_find);
+
+/*
+ * edac_device_workq_function
+ * performs the operation scheduled by a workq request
+ *
+ * this workq is embedded within an edac_device_ctl_info
+ * structure, that needs to be polled for possible error events.
+ *
+ * This operation is to acquire the list mutex lock
+ * (thus preventing insertation or deletion)
+ * and then call the device's poll function IFF this device is
+ * running polled and there is a poll function defined.
+ */
+static void edac_device_workq_function(struct work_struct *work_req)
+{
+ struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
+
+ mutex_lock(&device_ctls_mutex);
+
+ /* Only poll controllers that are running polled and have a check */
+ if ((edac_dev->op_state == OP_RUNNING_POLL) &&
+ (edac_dev->edac_check != NULL)) {
+ edac_dev->edac_check(edac_dev);
+ }
+
+ mutex_unlock(&device_ctls_mutex);
+
+ /* Reschedule the workq for the next time period to start again
+ * if the number of msec is for 1 sec, then adjust to the next
+ * whole one second to save timers fireing all over the period
+ * between integral seconds
+ */
+ if (edac_dev->poll_msec == 1000)
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ round_jiffies(edac_dev->delay));
+ else
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_setup
+ * initialize a workq item for this edac_device instance
+ * passing in the new delay period in msec
+ */
+void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ unsigned msec)
+{
+ debugf0("%s()\n", __func__);
+
+ /* take the arg 'msec' and set it into the control structure
+ * to used in the time period calculation
+ * then calc the number of jiffies that represents
+ */
+ edac_dev->poll_msec = msec;
+ edac_dev->delay = msecs_to_jiffies(msec);
+
+ INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
+
+ /* optimize here for the 1 second case, which will be normal value, to
+ * fire ON the 1 second time event. This helps reduce all sorts of
+ * timers firing on sub-second basis, while they are happy
+ * to fire together on the 1 second exactly
+ */
+ if (edac_dev->poll_msec == 1000)
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ round_jiffies(edac_dev->delay));
+ else
+ queue_delayed_work(edac_workqueue, &edac_dev->work,
+ edac_dev->delay);
+}
+
+/*
+ * edac_device_workq_teardown
+ * stop the workq processing on this edac_dev
+ */
+void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+{
+ int status;
+
+ status = cancel_delayed_work(&edac_dev->work);
+ if (status == 0) {
+ /* workq instance might be running, wait for it */
+ flush_workqueue(edac_workqueue);
+ }
+}
+
+/*
+ * edac_device_reset_delay_period
+ *
+ * need to stop any outstanding workq queued up at this time
+ * because we will be resetting the sleep time.
+ * Then restart the workq on the new delay
+ */
+void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
+ unsigned long value)
+{
+ /* cancel the current workq request, without the mutex lock */
+ edac_device_workq_teardown(edac_dev);
+
+ /* acquire the mutex before doing the workq setup */
+ mutex_lock(&device_ctls_mutex);
+
+ /* restart the workq request, with new delay value */
+ edac_device_workq_setup(edac_dev, value);
+
+ mutex_unlock(&device_ctls_mutex);
+}
+
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ * edac_device global list and create sysfs entries associated with
+ * edac_device structure.
+ * @edac_device: pointer to the edac_device structure to be added to the list
+ * 'edac_device' structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
+{
+ debugf0("%s()\n", __func__);
+
+#ifdef CONFIG_EDAC_DEBUG
+ if (edac_debug_level >= 3)
+ edac_device_dump_device(edac_dev);
+#endif
+ mutex_lock(&device_ctls_mutex);
+
+ if (add_edac_dev_to_global_list(edac_dev))
+ goto fail0;
+
+ /* set load time so that error rate can be tracked */
+ edac_dev->start_time = jiffies;
+
+ /* create this instance's sysfs entries */
+ if (edac_device_create_sysfs(edac_dev)) {
+ edac_device_printk(edac_dev, KERN_WARNING,
+ "failed to create sysfs device\n");
+ goto fail1;
+ }
+
+ /* If there IS a check routine, then we are running POLLED */
+ if (edac_dev->edac_check != NULL) {
+ /* This instance is NOW RUNNING */
+ edac_dev->op_state = OP_RUNNING_POLL;
+
+ /*
+ * enable workq processing on this instance,
+ * default = 1000 msec
+ */
+ edac_device_workq_setup(edac_dev, 1000);
+ } else {
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ /* Report action taken */
+ edac_device_printk(edac_dev, KERN_INFO,
+ "Giving out device to module '%s' controller "
+ "'%s': DEV '%s' (%s)\n",
+ edac_dev->mod_name,
+ edac_dev->ctl_name,
+ dev_name(edac_dev),
+ edac_op_state_to_string(edac_dev->op_state));
+
+ mutex_unlock(&device_ctls_mutex);
+ return 0;
+
+fail1:
+ /* Some error, so remove the entry from the lsit */
+ del_edac_device_from_global_list(edac_dev);
+
+fail0:
+ mutex_unlock(&device_ctls_mutex);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(edac_device_add_device);
+
+/**
+ * edac_device_del_device:
+ * Remove sysfs entries for specified edac_device structure and
+ * then remove edac_device structure from global list
+ *
+ * @pdev:
+ * Pointer to 'struct device' representing edac_device
+ * structure to remove.
+ *
+ * Return:
+ * Pointer to removed edac_device structure,
+ * OR NULL if device not found.
+ */
+struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
+{
+ struct edac_device_ctl_info *edac_dev;
+
+ debugf0("%s()\n", __func__);
+
+ mutex_lock(&device_ctls_mutex);
+
+ /* Find the structure on the list, if not there, then leave */
+ edac_dev = find_edac_device_by_dev(dev);
+ if (edac_dev == NULL) {
+ mutex_unlock(&device_ctls_mutex);
+ return NULL;
+ }
+
+ /* mark this instance as OFFLINE */
+ edac_dev->op_state = OP_OFFLINE;
+
+ /* clear workq processing on this instance */
+ edac_device_workq_teardown(edac_dev);
+
+ /* deregister from global list */
+ del_edac_device_from_global_list(edac_dev);
+
+ mutex_unlock(&device_ctls_mutex);
+
+ /* Tear down the sysfs entries for this instance */
+ edac_device_remove_sysfs(edac_dev);
+
+ edac_printk(KERN_INFO, EDAC_MC,
+ "Removed device %d for %s %s: DEV %s\n",
+ edac_dev->dev_idx,
+ edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev));
+
+ return edac_dev;
+}
+EXPORT_SYMBOL_GPL(edac_device_del_device);
+
+static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
+{
+ return edac_dev->log_ce;
+}
+
+static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
+{
+ return edac_dev->log_ue;
+}
+
+static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
+ *edac_dev)
+{
+ return edac_dev->panic_on_ue;
+}
+
+/*
+ * edac_device_handle_ce
+ * perform a common output and handling of an 'edac_dev' CE event
+ */
+void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg)
+{
+ struct edac_device_instance *instance;
+ struct edac_device_block *block = NULL;
+
+ if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: 'instance' out of range "
+ "(%d >= %d)\n", inst_nr,
+ edac_dev->nr_instances);
+ return;
+ }
+
+ instance = edac_dev->instances + inst_nr;
+
+ if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: instance %d 'block' "
+ "out of range (%d >= %d)\n",
+ inst_nr, block_nr,
+ instance->nr_blocks);
+ return;
+ }
+
+ if (instance->nr_blocks > 0) {
+ block = instance->blocks + block_nr;
+ block->counters.ce_count++;
+ }
+
+ /* Propogate the count up the 'totals' tree */
+ instance->counters.ce_count++;
+ edac_dev->counters.ce_count++;
+
+ if (edac_device_get_log_ce(edac_dev))
+ edac_device_printk(edac_dev, KERN_WARNING,
+ "CE: %s instance: %s block: %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ce);
+
+/*
+ * edac_device_handle_ue
+ * perform a common output and handling of an 'edac_dev' UE event
+ */
+void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+ int inst_nr, int block_nr, const char *msg)
+{
+ struct edac_device_instance *instance;
+ struct edac_device_block *block = NULL;
+
+ if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: 'instance' out of range "
+ "(%d >= %d)\n", inst_nr,
+ edac_dev->nr_instances);
+ return;
+ }
+
+ instance = edac_dev->instances + inst_nr;
+
+ if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
+ edac_device_printk(edac_dev, KERN_ERR,
+ "INTERNAL ERROR: instance %d 'block' "
+ "out of range (%d >= %d)\n",
+ inst_nr, block_nr,
+ instance->nr_blocks);
+ return;
+ }
+
+ if (instance->nr_blocks > 0) {
+ block = instance->blocks + block_nr;
+ block->counters.ue_count++;
+ }
+
+ /* Propogate the count up the 'totals' tree */
+ instance->counters.ue_count++;
+ edac_dev->counters.ue_count++;
+
+ if (edac_device_get_log_ue(edac_dev))
+ edac_device_printk(edac_dev, KERN_EMERG,
+ "UE: %s instance: %s block: %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
+
+ if (edac_device_get_panic_on_ue(edac_dev))
+ panic("EDAC %s: UE instance: %s block %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
+}
+EXPORT_SYMBOL_GPL(edac_device_handle_ue);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
new file mode 100644
index 000000000000..70b837f23c43
--- /dev/null
+++ b/drivers/edac/edac_device_sysfs.c
@@ -0,0 +1,896 @@
+/*
+ * file for managing the edac_device class of devices for EDAC
+ *
+ * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_DEVICE_SYMLINK "device"
+
+#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr)
+
+
+/*
+ * Set of edac_device_ctl_info attribute store/show functions
+ */
+
+/* 'log_ue' */
+static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->log_ue);
+}
+
+static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ /* if parameter is zero, turn off flag, if non-zero turn on flag */
+ ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+ return count;
+}
+
+/* 'log_ce' */
+static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->log_ce);
+}
+
+static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ /* if parameter is zero, turn off flag, if non-zero turn on flag */
+ ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
+
+ return count;
+}
+
+/* 'panic_on_ue' */
+static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->panic_on_ue);
+}
+
+static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ /* if parameter is zero, turn off flag, if non-zero turn on flag */
+ ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0);
+
+ return count;
+}
+
+/* 'poll_msec' show and store functions*/
+static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
+ *ctl_info, char *data)
+{
+ return sprintf(data, "%u\n", ctl_info->poll_msec);
+}
+
+static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
+ *ctl_info, const char *data,
+ size_t count)
+{
+ unsigned long value;
+
+ /* get the value and enforce that it is non-zero, must be at least
+ * one millisecond for the delay period, between scans
+ * Then cancel last outstanding delay for the work request
+ * and set a new one.
+ */
+ value = simple_strtoul(data, NULL, 0);
+ edac_device_reset_delay_period(ctl_info, value);
+
+ return count;
+}
+
+/* edac_device_ctl_info specific attribute structure */
+struct ctl_info_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edac_device_ctl_info *, char *);
+ ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t);
+};
+
+#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj)
+#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr)
+
+/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+ struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+ if (ctl_info_attr->show)
+ return ctl_info_attr->show(edac_dev, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
+static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
+ struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
+
+ if (ctl_info_attr->store)
+ return ctl_info_attr->store(edac_dev, buffer, count);
+ return -EIO;
+}
+
+/* edac_dev file operations for an 'ctl_info' */
+static struct sysfs_ops device_ctl_info_ops = {
+ .show = edac_dev_ctl_info_show,
+ .store = edac_dev_ctl_info_store
+};
+
+#define CTL_INFO_ATTR(_name,_mode,_show,_store) \
+static struct ctl_info_attribute attr_ctl_info_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* Declare the various ctl_info attributes here and their respective ops */
+CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
+ edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
+CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
+ edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
+ edac_device_ctl_panic_on_ue_show,
+ edac_device_ctl_panic_on_ue_store);
+CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
+ edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
+
+/* Base Attributes of the EDAC_DEVICE ECC object */
+static struct ctl_info_attribute *device_ctrl_attr[] = {
+ &attr_ctl_info_panic_on_ue,
+ &attr_ctl_info_log_ue,
+ &attr_ctl_info_log_ce,
+ &attr_ctl_info_poll_msec,
+ NULL,
+};
+
+/*
+ * edac_device_ctrl_master_release
+ *
+ * called when the reference count for the 'main' kobj
+ * for a edac_device control struct reaches zero
+ *
+ * Reference count model:
+ * One 'main' kobject for each control structure allocated.
+ * That main kobj is initially set to one AND
+ * the reference count for the EDAC 'core' module is
+ * bumped by one, thus added 'keep in memory' dependency.
+ *
+ * Each new internal kobj (in instances and blocks) then
+ * bumps the 'main' kobject.
+ *
+ * When they are released their release functions decrement
+ * the 'main' kobj.
+ *
+ * When the main kobj reaches zero (0) then THIS function
+ * is called which then decrements the EDAC 'core' module.
+ * When the module reference count reaches zero then the
+ * module no longer has dependency on keeping the release
+ * function code in memory and module can be unloaded.
+ *
+ * This will support several control objects as well, each
+ * with its own 'main' kobj.
+ */
+static void edac_device_ctrl_master_release(struct kobject *kobj)
+{
+ struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
+
+ debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
+
+ /* decrement the EDAC CORE module ref count */
+ module_put(edac_dev->owner);
+
+ /* free the control struct containing the 'main' kobj
+ * passed in to this routine
+ */
+ kfree(edac_dev);
+}
+
+/* ktype for the main (master) kobject */
+static struct kobj_type ktype_device_ctrl = {
+ .release = edac_device_ctrl_master_release,
+ .sysfs_ops = &device_ctl_info_ops,
+ .default_attrs = (struct attribute **)device_ctrl_attr,
+};
+
+/*
+ * edac_device_register_sysfs_main_kobj
+ *
+ * perform the high level setup for the new edac_device instance
+ *
+ * Return: 0 SUCCESS
+ * !0 FAILURE
+ */
+int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+{
+ struct sysdev_class *edac_class;
+ int err;
+
+ debugf1("%s()\n", __func__);
+
+ /* get the /sys/devices/system/edac reference */
+ edac_class = edac_get_edac_class();
+ if (edac_class == NULL) {
+ debugf1("%s() no edac_class error\n", __func__);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Point to the 'edac_class' this instance 'reports' to */
+ edac_dev->edac_class = edac_class;
+
+ /* Init the devices's kobject */
+ memset(&edac_dev->kobj, 0, sizeof(struct kobject));
+ edac_dev->kobj.ktype = &ktype_device_ctrl;
+
+ /* set this new device under the edac_class kobject */
+ edac_dev->kobj.parent = &edac_class->kset.kobj;
+
+ /* generate sysfs "..../edac/<name>" */
+ debugf4("%s() set name of kobject to: %s\n", __func__, edac_dev->name);
+ err = kobject_set_name(&edac_dev->kobj, "%s", edac_dev->name);
+ if (err)
+ goto err_out;
+
+ /* Record which module 'owns' this control structure
+ * and bump the ref count of the module
+ */
+ edac_dev->owner = THIS_MODULE;
+
+ if (!try_module_get(edac_dev->owner)) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* register */
+ err = kobject_register(&edac_dev->kobj);
+ if (err) {
+ debugf1("%s()Failed to register '.../edac/%s'\n",
+ __func__, edac_dev->name);
+ goto err_kobj_reg;
+ }
+
+ /* At this point, to 'free' the control struct,
+ * edac_device_unregister_sysfs_main_kobj() must be used
+ */
+
+ debugf4("%s() Registered '.../edac/%s' kobject\n",
+ __func__, edac_dev->name);
+
+ return 0;
+
+ /* Error exit stack */
+err_kobj_reg:
+ module_put(edac_dev->owner);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_unregister_sysfs_main_kobj:
+ * the '..../edac/<name>' kobject
+ */
+void edac_device_unregister_sysfs_main_kobj(
+ struct edac_device_ctl_info *edac_dev)
+{
+ debugf0("%s()\n", __func__);
+ debugf4("%s() name of kobject is: %s\n",
+ __func__, kobject_name(&edac_dev->kobj));
+
+ /*
+ * Unregister the edac device's kobject and
+ * allow for reference count to reach 0 at which point
+ * the callback will be called to:
+ * a) module_put() this module
+ * b) 'kfree' the memory
+ */
+ kobject_unregister(&edac_dev->kobj);
+}
+
+/* edac_dev -> instance information */
+
+/*
+ * Set of low-level instance attribute show functions
+ */
+static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
+ char *data)
+{
+ return sprintf(data, "%u\n", instance->counters.ue_count);
+}
+
+static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
+ char *data)
+{
+ return sprintf(data, "%u\n", instance->counters.ce_count);
+}
+
+#define to_instance(k) container_of(k, struct edac_device_instance, kobj)
+#define to_instance_attr(a) container_of(a,struct instance_attribute,attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_device_ctrl_instance_release(struct kobject *kobj)
+{
+ struct edac_device_instance *instance;
+
+ debugf1("%s()\n", __func__);
+
+ /* map from this kobj to the main control struct
+ * and then dec the main kobj count
+ */
+ instance = to_instance(kobj);
+ kobject_put(&instance->ctl->kobj);
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edac_device_instance *, char *);
+ ssize_t(*store) (struct edac_device_instance *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_device_instance *instance = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->show)
+ return instance_attr->show(instance, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'instance' structure */
+static ssize_t edac_dev_instance_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_device_instance *instance = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->store)
+ return instance_attr->store(instance, buffer, count);
+ return -EIO;
+}
+
+/* edac_dev file operations for an 'instance' */
+static struct sysfs_ops device_instance_ops = {
+ .show = edac_dev_instance_show,
+ .store = edac_dev_instance_store
+};
+
+#define INSTANCE_ATTR(_name,_mode,_show,_store) \
+static struct instance_attribute attr_instance_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/*
+ * Define attributes visible for the edac_device instance object
+ * Each contains a pointer to a show and an optional set
+ * function pointer that does the low level output/input
+ */
+INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL);
+INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL);
+
+/* list of edac_dev 'instance' attributes */
+static struct instance_attribute *device_instance_attr[] = {
+ &attr_instance_ce_count,
+ &attr_instance_ue_count,
+ NULL,
+};
+
+/* The 'ktype' for each edac_dev 'instance' */
+static struct kobj_type ktype_instance_ctrl = {
+ .release = edac_device_ctrl_instance_release,
+ .sysfs_ops = &device_instance_ops,
+ .default_attrs = (struct attribute **)device_instance_attr,
+};
+
+/* edac_dev -> instance -> block information */
+
+#define to_block(k) container_of(k, struct edac_device_block, kobj)
+#define to_block_attr(a) \
+ container_of(a, struct edac_dev_sysfs_block_attribute, attr)
+
+/*
+ * Set of low-level block attribute show functions
+ */
+static ssize_t block_ue_count_show(struct kobject *kobj,
+ struct attribute *attr, char *data)
+{
+ struct edac_device_block *block = to_block(kobj);
+
+ return sprintf(data, "%u\n", block->counters.ue_count);
+}
+
+static ssize_t block_ce_count_show(struct kobject *kobj,
+ struct attribute *attr, char *data)
+{
+ struct edac_device_block *block = to_block(kobj);
+
+ return sprintf(data, "%u\n", block->counters.ce_count);
+}
+
+/* DEVICE block kobject release() function */
+static void edac_device_ctrl_block_release(struct kobject *kobj)
+{
+ struct edac_device_block *block;
+
+ debugf1("%s()\n", __func__);
+
+ /* get the container of the kobj */
+ block = to_block(kobj);
+
+ /* map from 'block kobj' to 'block->instance->controller->main_kobj'
+ * now 'release' the block kobject
+ */
+ kobject_put(&block->instance->ctl->kobj);
+}
+
+
+/* Function to 'show' fields from the edac_dev 'block' structure */
+static ssize_t edac_dev_block_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_dev_sysfs_block_attribute *block_attr =
+ to_block_attr(attr);
+
+ if (block_attr->show)
+ return block_attr->show(kobj, attr, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_dev 'block' structure */
+static ssize_t edac_dev_block_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_dev_sysfs_block_attribute *block_attr;
+
+ block_attr = to_block_attr(attr);
+
+ if (block_attr->store)
+ return block_attr->store(kobj, attr, buffer, count);
+ return -EIO;
+}
+
+/* edac_dev file operations for a 'block' */
+static struct sysfs_ops device_block_ops = {
+ .show = edac_dev_block_show,
+ .store = edac_dev_block_store
+};
+
+#define BLOCK_ATTR(_name,_mode,_show,_store) \
+static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
+BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL);
+
+/* list of edac_dev 'block' attributes */
+static struct edac_dev_sysfs_block_attribute *device_block_attr[] = {
+ &attr_block_ce_count,
+ &attr_block_ue_count,
+ NULL,
+};
+
+/* The 'ktype' for each edac_dev 'block' */
+static struct kobj_type ktype_block_ctrl = {
+ .release = edac_device_ctrl_block_release,
+ .sysfs_ops = &device_block_ops,
+ .default_attrs = (struct attribute **)device_block_attr,
+};
+
+/* block ctor/dtor code */
+
+/*
+ * edac_device_create_block
+ */
+static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
+ struct edac_device_instance *instance,
+ struct edac_device_block *block)
+{
+ int i;
+ int err;
+ struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+ struct kobject *main_kobj;
+
+ debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n",
+ __func__, instance->name, instance, block->name, block);
+ debugf4("%s() block kobj=%p block kobj->parent=%p\n",
+ __func__, &block->kobj, &block->kobj.parent);
+
+ /* init this block's kobject */
+ memset(&block->kobj, 0, sizeof(struct kobject));
+ block->kobj.parent = &instance->kobj;
+ block->kobj.ktype = &ktype_block_ctrl;
+
+ err = kobject_set_name(&block->kobj, "%s", block->name);
+ if (err)
+ return err;
+
+ /* bump the main kobject's reference count for this controller
+ * and this instance is dependant on the main
+ */
+ main_kobj = kobject_get(&edac_dev->kobj);
+ if (!main_kobj) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Add this block's kobject */
+ err = kobject_register(&block->kobj);
+ if (err) {
+ debugf1("%s() Failed to register instance '%s'\n",
+ __func__, block->name);
+ kobject_put(main_kobj);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* If there are driver level block attributes, then added them
+ * to the block kobject
+ */
+ sysfs_attrib = block->block_attributes;
+ if (sysfs_attrib && block->nr_attribs) {
+ for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+ debugf4("%s() creating block attrib='%s' "
+ "attrib->%p to kobj=%p\n",
+ __func__,
+ sysfs_attrib->attr.name,
+ sysfs_attrib, &block->kobj);
+
+ /* Create each block_attribute file */
+ err = sysfs_create_file(&block->kobj,
+ &sysfs_attrib->attr);
+ if (err)
+ goto err_on_attrib;
+ }
+ }
+
+ return 0;
+
+ /* Error unwind stack */
+err_on_attrib:
+ kobject_unregister(&block->kobj);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_delete_block(edac_dev,block);
+ */
+static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
+ struct edac_device_block *block)
+{
+ struct edac_dev_sysfs_block_attribute *sysfs_attrib;
+ int i;
+
+ /* if this block has 'attributes' then we need to iterate over the list
+ * and 'remove' the attributes on this block
+ */
+ sysfs_attrib = block->block_attributes;
+ if (sysfs_attrib && block->nr_attribs) {
+ for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
+
+ /* remove each block_attrib file */
+ sysfs_remove_file(&block->kobj,
+ (struct attribute *) sysfs_attrib);
+ }
+ }
+
+ /* unregister this block's kobject, SEE:
+ * edac_device_ctrl_block_release() callback operation
+ */
+ kobject_unregister(&block->kobj);
+}
+
+/* instance ctor/dtor code */
+
+/*
+ * edac_device_create_instance
+ * create just one instance of an edac_device 'instance'
+ */
+static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
+ int idx)
+{
+ int i, j;
+ int err;
+ struct edac_device_instance *instance;
+ struct kobject *main_kobj;
+
+ instance = &edac_dev->instances[idx];
+
+ /* Init the instance's kobject */
+ memset(&instance->kobj, 0, sizeof(struct kobject));
+
+ /* set this new device under the edac_device main kobject */
+ instance->kobj.parent = &edac_dev->kobj;
+ instance->kobj.ktype = &ktype_instance_ctrl;
+ instance->ctl = edac_dev;
+
+ err = kobject_set_name(&instance->kobj, "%s", instance->name);
+ if (err)
+ goto err_out;
+
+ /* bump the main kobject's reference count for this controller
+ * and this instance is dependant on the main
+ */
+ main_kobj = kobject_get(&edac_dev->kobj);
+ if (!main_kobj) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Formally register this instance's kobject */
+ err = kobject_register(&instance->kobj);
+ if (err != 0) {
+ debugf2("%s() Failed to register instance '%s'\n",
+ __func__, instance->name);
+ kobject_put(main_kobj);
+ goto err_out;
+ }
+
+ debugf4("%s() now register '%d' blocks for instance %d\n",
+ __func__, instance->nr_blocks, idx);
+
+ /* register all blocks of this instance */
+ for (i = 0; i < instance->nr_blocks; i++) {
+ err = edac_device_create_block(edac_dev, instance,
+ &instance->blocks[i]);
+ if (err) {
+ /* If any fail, remove all previous ones */
+ for (j = 0; j < i; j++)
+ edac_device_delete_block(edac_dev,
+ &instance->blocks[j]);
+ goto err_release_instance_kobj;
+ }
+ }
+
+ debugf4("%s() Registered instance %d '%s' kobject\n",
+ __func__, idx, instance->name);
+
+ return 0;
+
+ /* error unwind stack */
+err_release_instance_kobj:
+ kobject_unregister(&instance->kobj);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_remove_instance
+ * remove an edac_device instance
+ */
+static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev,
+ int idx)
+{
+ struct edac_device_instance *instance;
+ int i;
+
+ instance = &edac_dev->instances[idx];
+
+ /* unregister all blocks in this instance */
+ for (i = 0; i < instance->nr_blocks; i++)
+ edac_device_delete_block(edac_dev, &instance->blocks[i]);
+
+ /* unregister this instance's kobject, SEE:
+ * edac_device_ctrl_instance_release() for callback operation
+ */
+ kobject_unregister(&instance->kobj);
+}
+
+/*
+ * edac_device_create_instances
+ * create the first level of 'instances' for this device
+ * (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc
+ */
+static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
+{
+ int i, j;
+ int err;
+
+ debugf0("%s()\n", __func__);
+
+ /* iterate over creation of the instances */
+ for (i = 0; i < edac_dev->nr_instances; i++) {
+ err = edac_device_create_instance(edac_dev, i);
+ if (err) {
+ /* unwind previous instances on error */
+ for (j = 0; j < i; j++)
+ edac_device_delete_instance(edac_dev, j);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * edac_device_delete_instances(edac_dev);
+ * unregister all the kobjects of the instances
+ */
+static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev)
+{
+ int i;
+
+ /* iterate over creation of the instances */
+ for (i = 0; i < edac_dev->nr_instances; i++)
+ edac_device_delete_instance(edac_dev, i);
+}
+
+/* edac_dev sysfs ctor/dtor code */
+
+/*
+ * edac_device_add_main_sysfs_attributes
+ * add some attributes to this instance's main kobject
+ */
+static int edac_device_add_main_sysfs_attributes(
+ struct edac_device_ctl_info *edac_dev)
+{
+ struct edac_dev_sysfs_attribute *sysfs_attrib;
+ int err = 0;
+
+ sysfs_attrib = edac_dev->sysfs_attributes;
+ if (sysfs_attrib) {
+ /* iterate over the array and create an attribute for each
+ * entry in the list
+ */
+ while (sysfs_attrib->attr.name != NULL) {
+ err = sysfs_create_file(&edac_dev->kobj,
+ (struct attribute*) sysfs_attrib);
+ if (err)
+ goto err_out;
+
+ sysfs_attrib++;
+ }
+ }
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_remove_main_sysfs_attributes
+ * remove any attributes to this instance's main kobject
+ */
+static void edac_device_remove_main_sysfs_attributes(
+ struct edac_device_ctl_info *edac_dev)
+{
+ struct edac_dev_sysfs_attribute *sysfs_attrib;
+
+ /* if there are main attributes, defined, remove them. First,
+ * point to the start of the array and iterate over it
+ * removing each attribute listed from this device's instance's kobject
+ */
+ sysfs_attrib = edac_dev->sysfs_attributes;
+ if (sysfs_attrib) {
+ while (sysfs_attrib->attr.name != NULL) {
+ sysfs_remove_file(&edac_dev->kobj,
+ (struct attribute *) sysfs_attrib);
+ sysfs_attrib++;
+ }
+ }
+}
+
+/*
+ * edac_device_create_sysfs() Constructor
+ *
+ * accept a created edac_device control structure
+ * and 'export' it to sysfs. The 'main' kobj should already have been
+ * created. 'instance' and 'block' kobjects should be registered
+ * along with any 'block' attributes from the low driver. In addition,
+ * the main attributes (if any) are connected to the main kobject of
+ * the control structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+ int err;
+ struct kobject *edac_kobj = &edac_dev->kobj;
+
+ debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
+
+ /* go create any main attributes callers wants */
+ err = edac_device_add_main_sysfs_attributes(edac_dev);
+ if (err) {
+ debugf0("%s() failed to add sysfs attribs\n", __func__);
+ goto err_out;
+ }
+
+ /* create a symlink from the edac device
+ * to the platform 'device' being used for this
+ */
+ err = sysfs_create_link(edac_kobj,
+ &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
+ if (err) {
+ debugf0("%s() sysfs_create_link() returned err= %d\n",
+ __func__, err);
+ goto err_remove_main_attribs;
+ }
+
+ /* Create the first level instance directories
+ * In turn, the nested blocks beneath the instances will
+ * be registered as well
+ */
+ err = edac_device_create_instances(edac_dev);
+ if (err) {
+ debugf0("%s() edac_device_create_instances() "
+ "returned err= %d\n", __func__, err);
+ goto err_remove_link;
+ }
+
+
+ debugf4("%s() create-instances done, idx=%d\n",
+ __func__, edac_dev->dev_idx);
+
+ return 0;
+
+ /* Error unwind stack */
+err_remove_link:
+ /* remove the sym link */
+ sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+err_remove_main_attribs:
+ edac_device_remove_main_sysfs_attributes(edac_dev);
+
+err_out:
+ return err;
+}
+
+/*
+ * edac_device_remove_sysfs() destructor
+ *
+ * given an edac_device struct, tear down the kobject resources
+ */
+void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
+{
+ debugf0("%s()\n", __func__);
+
+ /* remove any main attributes for this device */
+ edac_device_remove_main_sysfs_attributes(edac_dev);
+
+ /* remove the device sym link */
+ sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK);
+
+ /* walk the instance/block kobject tree, deconstructing it */
+ edac_device_delete_instances(edac_dev);
+}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7b622300d0e5..4471be362599 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -27,1200 +27,20 @@
#include <linux/list.h>
#include <linux/sysdev.h>
#include <linux/ctype.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
+#include <linux/edac.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/edac.h>
-#include "edac_mc.h"
-
-#define EDAC_MC_VERSION "Ver: 2.0.1 " __DATE__
-
-
-#ifdef CONFIG_EDAC_DEBUG
-/* Values of 0 to 4 will generate output */
-int edac_debug_level = 1;
-EXPORT_SYMBOL_GPL(edac_debug_level);
-#endif
-
-/* EDAC Controls, setable by module parameter, and sysfs */
-static int log_ue = 1;
-static int log_ce = 1;
-static int panic_on_ue;
-static int poll_msec = 1000;
+#include "edac_core.h"
+#include "edac_module.h"
/* lock to memory controller's control array */
-static DECLARE_MUTEX(mem_ctls_mutex);
+static DEFINE_MUTEX(mem_ctls_mutex);
static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
-static struct task_struct *edac_thread;
-
-#ifdef CONFIG_PCI
-static int check_pci_parity = 0; /* default YES check PCI parity */
-static int panic_on_pci_parity; /* default no panic on PCI Parity */
-static atomic_t pci_parity_count = ATOMIC_INIT(0);
-
-static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */
-static struct completion edac_pci_kobj_complete;
-#endif /* CONFIG_PCI */
-
-/* START sysfs data and methods */
-
-
-static const char *mem_types[] = {
- [MEM_EMPTY] = "Empty",
- [MEM_RESERVED] = "Reserved",
- [MEM_UNKNOWN] = "Unknown",
- [MEM_FPM] = "FPM",
- [MEM_EDO] = "EDO",
- [MEM_BEDO] = "BEDO",
- [MEM_SDR] = "Unbuffered-SDR",
- [MEM_RDR] = "Registered-SDR",
- [MEM_DDR] = "Unbuffered-DDR",
- [MEM_RDDR] = "Registered-DDR",
- [MEM_RMBS] = "RMBS"
-};
-
-static const char *dev_types[] = {
- [DEV_UNKNOWN] = "Unknown",
- [DEV_X1] = "x1",
- [DEV_X2] = "x2",
- [DEV_X4] = "x4",
- [DEV_X8] = "x8",
- [DEV_X16] = "x16",
- [DEV_X32] = "x32",
- [DEV_X64] = "x64"
-};
-
-static const char *edac_caps[] = {
- [EDAC_UNKNOWN] = "Unknown",
- [EDAC_NONE] = "None",
- [EDAC_RESERVED] = "Reserved",
- [EDAC_PARITY] = "PARITY",
- [EDAC_EC] = "EC",
- [EDAC_SECDED] = "SECDED",
- [EDAC_S2ECD2ED] = "S2ECD2ED",
- [EDAC_S4ECD4ED] = "S4ECD4ED",
- [EDAC_S8ECD8ED] = "S8ECD8ED",
- [EDAC_S16ECD16ED] = "S16ECD16ED"
-};
-
-/* sysfs object: /sys/devices/system/edac */
-static struct sysdev_class edac_class = {
- set_kset_name("edac"),
-};
-
-/* sysfs object:
- * /sys/devices/system/edac/mc
- */
-static struct kobject edac_memctrl_kobj;
-
-/* We use these to wait for the reference counts on edac_memctrl_kobj and
- * edac_pci_kobj to reach 0.
- */
-static struct completion edac_memctrl_kobj_complete;
-
-/*
- * /sys/devices/system/edac/mc;
- * data structures and methods
- */
-static ssize_t memctrl_int_show(void *ptr, char *buffer)
-{
- int *value = (int*) ptr;
- return sprintf(buffer, "%u\n", *value);
-}
-
-static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
-{
- int *value = (int*) ptr;
-
- if (isdigit(*buffer))
- *value = simple_strtoul(buffer, NULL, 0);
-
- return count;
-}
-
-struct memctrl_dev_attribute {
- struct attribute attr;
- void *value;
- ssize_t (*show)(void *,char *);
- ssize_t (*store)(void *, const char *, size_t);
-};
-
-/* Set of show/store abstract level functions for memory control object */
-static ssize_t memctrl_dev_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct memctrl_dev_attribute *memctrl_dev;
- memctrl_dev = (struct memctrl_dev_attribute*)attr;
-
- if (memctrl_dev->show)
- return memctrl_dev->show(memctrl_dev->value, buffer);
-
- return -EIO;
-}
-
-static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct memctrl_dev_attribute *memctrl_dev;
- memctrl_dev = (struct memctrl_dev_attribute*)attr;
-
- if (memctrl_dev->store)
- return memctrl_dev->store(memctrl_dev->value, buffer, count);
-
- return -EIO;
-}
-
-static struct sysfs_ops memctrlfs_ops = {
- .show = memctrl_dev_show,
- .store = memctrl_dev_store
-};
-
-#define MEMCTRL_ATTR(_name,_mode,_show,_store) \
-struct memctrl_dev_attribute attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .value = &_name, \
- .show = _show, \
- .store = _store, \
-};
-
-#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
-struct memctrl_dev_attribute attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .value = _data, \
- .show = _show, \
- .store = _store, \
-};
-
-/* csrow<id> control files */
-MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
-
-/* Base Attributes of the memory ECC object */
-static struct memctrl_dev_attribute *memctrl_attr[] = {
- &attr_panic_on_ue,
- &attr_log_ue,
- &attr_log_ce,
- &attr_poll_msec,
- NULL,
-};
-
-/* Main MC kobject release() function */
-static void edac_memctrl_master_release(struct kobject *kobj)
-{
- debugf1("%s()\n", __func__);
- complete(&edac_memctrl_kobj_complete);
-}
-
-static struct kobj_type ktype_memctrl = {
- .release = edac_memctrl_master_release,
- .sysfs_ops = &memctrlfs_ops,
- .default_attrs = (struct attribute **) memctrl_attr,
-};
-
-/* Initialize the main sysfs entries for edac:
- * /sys/devices/system/edac
- *
- * and children
- *
- * Return: 0 SUCCESS
- * !0 FAILURE
- */
-static int edac_sysfs_memctrl_setup(void)
-{
- int err = 0;
-
- debugf1("%s()\n", __func__);
-
- /* create the /sys/devices/system/edac directory */
- err = sysdev_class_register(&edac_class);
-
- if (err) {
- debugf1("%s() error=%d\n", __func__, err);
- return err;
- }
-
- /* Init the MC's kobject */
- memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
- edac_memctrl_kobj.parent = &edac_class.kset.kobj;
- edac_memctrl_kobj.ktype = &ktype_memctrl;
-
- /* generate sysfs "..../edac/mc" */
- err = kobject_set_name(&edac_memctrl_kobj,"mc");
-
- if (err)
- goto fail;
-
- /* FIXME: maybe new sysdev_create_subdir() */
- err = kobject_register(&edac_memctrl_kobj);
-
- if (err) {
- debugf1("Failed to register '.../edac/mc'\n");
- goto fail;
- }
-
- debugf1("Registered '.../edac/mc' kobject\n");
-
- return 0;
-
-fail:
- sysdev_class_unregister(&edac_class);
- return err;
-}
-
-/*
- * MC teardown:
- * the '..../edac/mc' kobject followed by '..../edac' itself
- */
-static void edac_sysfs_memctrl_teardown(void)
-{
- debugf0("MC: " __FILE__ ": %s()\n", __func__);
-
- /* Unregister the MC's kobject and wait for reference count to reach
- * 0.
- */
- init_completion(&edac_memctrl_kobj_complete);
- kobject_unregister(&edac_memctrl_kobj);
- wait_for_completion(&edac_memctrl_kobj_complete);
-
- /* Unregister the 'edac' object */
- sysdev_class_unregister(&edac_class);
-}
-
-#ifdef CONFIG_PCI
-static ssize_t edac_pci_int_show(void *ptr, char *buffer)
-{
- int *value = ptr;
- return sprintf(buffer,"%d\n",*value);
-}
-
-static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
-{
- int *value = ptr;
-
- if (isdigit(*buffer))
- *value = simple_strtoul(buffer,NULL,0);
-
- return count;
-}
-
-struct edac_pci_dev_attribute {
- struct attribute attr;
- void *value;
- ssize_t (*show)(void *,char *);
- ssize_t (*store)(void *, const char *,size_t);
-};
-
-/* Set of show/store abstract level functions for PCI Parity object */
-static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct edac_pci_dev_attribute *edac_pci_dev;
- edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
-
- if (edac_pci_dev->show)
- return edac_pci_dev->show(edac_pci_dev->value, buffer);
- return -EIO;
-}
-
-static ssize_t edac_pci_dev_store(struct kobject *kobj,
- struct attribute *attr, const char *buffer, size_t count)
-{
- struct edac_pci_dev_attribute *edac_pci_dev;
- edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
-
- if (edac_pci_dev->show)
- return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
- return -EIO;
-}
-
-static struct sysfs_ops edac_pci_sysfs_ops = {
- .show = edac_pci_dev_show,
- .store = edac_pci_dev_store
-};
-
-#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
-struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .value = &_name, \
- .show = _show, \
- .store = _store, \
-};
-
-#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
-struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .value = _data, \
- .show = _show, \
- .store = _store, \
-};
-
-/* PCI Parity control files */
-EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
-EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
-EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
-
-/* Base Attributes of the memory ECC object */
-static struct edac_pci_dev_attribute *edac_pci_attr[] = {
- &edac_pci_attr_check_pci_parity,
- &edac_pci_attr_panic_on_pci_parity,
- &edac_pci_attr_pci_parity_count,
- NULL,
-};
-
-/* No memory to release */
-static void edac_pci_release(struct kobject *kobj)
-{
- debugf1("%s()\n", __func__);
- complete(&edac_pci_kobj_complete);
-}
-
-static struct kobj_type ktype_edac_pci = {
- .release = edac_pci_release,
- .sysfs_ops = &edac_pci_sysfs_ops,
- .default_attrs = (struct attribute **) edac_pci_attr,
-};
-
-/**
- * edac_sysfs_pci_setup()
- *
- */
-static int edac_sysfs_pci_setup(void)
-{
- int err;
-
- debugf1("%s()\n", __func__);
-
- memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
- edac_pci_kobj.parent = &edac_class.kset.kobj;
- edac_pci_kobj.ktype = &ktype_edac_pci;
- err = kobject_set_name(&edac_pci_kobj, "pci");
-
- if (!err) {
- /* Instanstiate the csrow object */
- /* FIXME: maybe new sysdev_create_subdir() */
- err = kobject_register(&edac_pci_kobj);
-
- if (err)
- debugf1("Failed to register '.../edac/pci'\n");
- else
- debugf1("Registered '.../edac/pci' kobject\n");
- }
-
- return err;
-}
-
-static void edac_sysfs_pci_teardown(void)
-{
- debugf0("%s()\n", __func__);
- init_completion(&edac_pci_kobj_complete);
- kobject_unregister(&edac_pci_kobj);
- wait_for_completion(&edac_pci_kobj_complete);
-}
-
-
-static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
-{
- int where;
- u16 status;
-
- where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
- pci_read_config_word(dev, where, &status);
-
- /* If we get back 0xFFFF then we must suspect that the card has been
- * pulled but the Linux PCI layer has not yet finished cleaning up.
- * We don't want to report on such devices
- */
-
- if (status == 0xFFFF) {
- u32 sanity;
-
- pci_read_config_dword(dev, 0, &sanity);
-
- if (sanity == 0xFFFFFFFF)
- return 0;
- }
-
- status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_PARITY;
-
- if (status)
- /* reset only the bits we are interested in */
- pci_write_config_word(dev, where, status);
-
- return status;
-}
-
-typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
-
-/* Clear any PCI parity errors logged by this device. */
-static void edac_pci_dev_parity_clear(struct pci_dev *dev)
-{
- u8 header_type;
-
- get_pci_parity_status(dev, 0);
-
- /* read the device TYPE, looking for bridges */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
- get_pci_parity_status(dev, 1);
-}
-
-/*
- * PCI Parity polling
- *
- */
-static void edac_pci_dev_parity_test(struct pci_dev *dev)
-{
- u16 status;
- u8 header_type;
-
- /* read the STATUS register on this device
- */
- status = get_pci_parity_status(dev, 0);
-
- debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
-
- /* check the status reg for errors */
- if (status) {
- if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Signaled System Error on %s\n",
- pci_name(dev));
-
- if (status & (PCI_STATUS_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Master Data Parity Error on %s\n",
- pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
-
- if (status & (PCI_STATUS_DETECTED_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI,
- "Detected Parity Error on %s\n",
- pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
- }
-
- /* read the device TYPE, looking for bridges */
- pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
-
- debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- /* On bridges, need to examine secondary status register */
- status = get_pci_parity_status(dev, 1);
-
- debugf2("PCI SEC_STATUS= 0x%04x %s\n",
- status, dev->dev.bus_id );
-
- /* check the secondary status reg for errors */
- if (status) {
- if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Signaled System Error on %s\n",
- pci_name(dev));
-
- if (status & (PCI_STATUS_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Master Data Parity Error on "
- "%s\n", pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
-
- if (status & (PCI_STATUS_DETECTED_PARITY)) {
- edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Detected Parity Error on %s\n",
- pci_name(dev));
-
- atomic_inc(&pci_parity_count);
- }
- }
- }
-}
-
-/*
- * pci_dev parity list iterator
- * Scan the PCI device list for one iteration, looking for SERRORs
- * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
- */
-static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
-{
- struct pci_dev *dev = NULL;
-
- /* request for kernel access to the next PCI device, if any,
- * and while we are looking at it have its reference count
- * bumped until we are done with it
- */
- while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- fn(dev);
- }
-}
-
-static void do_pci_parity_check(void)
-{
- unsigned long flags;
- int before_count;
-
- debugf3("%s()\n", __func__);
-
- if (!check_pci_parity)
- return;
-
- before_count = atomic_read(&pci_parity_count);
-
- /* scan all PCI devices looking for a Parity Error on devices and
- * bridges
- */
- local_irq_save(flags);
- edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
- local_irq_restore(flags);
-
- /* Only if operator has selected panic on PCI Error */
- if (panic_on_pci_parity) {
- /* If the count is different 'after' from 'before' */
- if (before_count != atomic_read(&pci_parity_count))
- panic("EDAC: PCI Parity Error");
- }
-}
-
-static inline void clear_pci_parity_errors(void)
-{
- /* Clear any PCI bus parity errors that devices initially have logged
- * in their registers.
- */
- edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
-}
-
-#else /* CONFIG_PCI */
-
-/* pre-process these away */
-#define do_pci_parity_check()
-#define clear_pci_parity_errors()
-#define edac_sysfs_pci_teardown()
-#define edac_sysfs_pci_setup() (0)
-
-#endif /* CONFIG_PCI */
-
-/* EDAC sysfs CSROW data structures and methods
- */
-
-/* Set of more default csrow<id> attribute show/store functions */
-static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, int private)
-{
- return sprintf(data,"%u\n", csrow->ue_count);
-}
-
-static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, int private)
-{
- return sprintf(data,"%u\n", csrow->ce_count);
-}
-
-static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, int private)
-{
- return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
-}
-
-static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, int private)
-{
- return sprintf(data,"%s\n", mem_types[csrow->mtype]);
-}
-
-static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, int private)
-{
- return sprintf(data,"%s\n", dev_types[csrow->dtype]);
-}
-
-static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, int private)
-{
- return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
-}
-
-/* show/store functions for DIMM Label attributes */
-static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
- char *data, int channel)
-{
- return snprintf(data, EDAC_MC_LABEL_LEN,"%s",
- csrow->channels[channel].label);
-}
-
-static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
- const char *data,
- size_t count,
- int channel)
-{
- ssize_t max_size = 0;
-
- max_size = min((ssize_t)count,(ssize_t)EDAC_MC_LABEL_LEN-1);
- strncpy(csrow->channels[channel].label, data, max_size);
- csrow->channels[channel].label[max_size] = '\0';
-
- return max_size;
-}
-
-/* show function for dynamic chX_ce_count attribute */
-static ssize_t channel_ce_count_show(struct csrow_info *csrow,
- char *data,
- int channel)
-{
- return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
-}
-
-/* csrow specific attribute structure */
-struct csrowdev_attribute {
- struct attribute attr;
- ssize_t (*show)(struct csrow_info *,char *,int);
- ssize_t (*store)(struct csrow_info *, const char *,size_t,int);
- int private;
-};
-
-#define to_csrow(k) container_of(k, struct csrow_info, kobj)
-#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
-
-/* Set of show/store higher level functions for default csrow attributes */
-static ssize_t csrowdev_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct csrow_info *csrow = to_csrow(kobj);
- struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
-
- if (csrowdev_attr->show)
- return csrowdev_attr->show(csrow,
- buffer,
- csrowdev_attr->private);
- return -EIO;
-}
-
-static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct csrow_info *csrow = to_csrow(kobj);
- struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
-
- if (csrowdev_attr->store)
- return csrowdev_attr->store(csrow,
- buffer,
- count,
- csrowdev_attr->private);
- return -EIO;
-}
-
-static struct sysfs_ops csrowfs_ops = {
- .show = csrowdev_show,
- .store = csrowdev_store
-};
-
-#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
-struct csrowdev_attribute attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
- .private = _private, \
-};
-
-/* default cwrow<id>/attribute files */
-CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL,0);
-CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL,0);
-CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL,0);
-CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL,0);
-CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL,0);
-CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL,0);
-
-/* default attributes of the CSROW<id> object */
-static struct csrowdev_attribute *default_csrow_attr[] = {
- &attr_dev_type,
- &attr_mem_type,
- &attr_edac_mode,
- &attr_size_mb,
- &attr_ue_count,
- &attr_ce_count,
- NULL,
-};
-
-
-/* possible dynamic channel DIMM Label attribute files */
-CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
- channel_dimm_label_show,
- channel_dimm_label_store,
- 0 );
-CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
- channel_dimm_label_show,
- channel_dimm_label_store,
- 1 );
-CSROWDEV_ATTR(ch2_dimm_label,S_IRUGO|S_IWUSR,
- channel_dimm_label_show,
- channel_dimm_label_store,
- 2 );
-CSROWDEV_ATTR(ch3_dimm_label,S_IRUGO|S_IWUSR,
- channel_dimm_label_show,
- channel_dimm_label_store,
- 3 );
-CSROWDEV_ATTR(ch4_dimm_label,S_IRUGO|S_IWUSR,
- channel_dimm_label_show,
- channel_dimm_label_store,
- 4 );
-CSROWDEV_ATTR(ch5_dimm_label,S_IRUGO|S_IWUSR,
- channel_dimm_label_show,
- channel_dimm_label_store,
- 5 );
-
-/* Total possible dynamic DIMM Label attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
- &attr_ch0_dimm_label,
- &attr_ch1_dimm_label,
- &attr_ch2_dimm_label,
- &attr_ch3_dimm_label,
- &attr_ch4_dimm_label,
- &attr_ch5_dimm_label
-};
-
-/* possible dynamic channel ce_count attribute files */
-CSROWDEV_ATTR(ch0_ce_count,S_IRUGO|S_IWUSR,
- channel_ce_count_show,
- NULL,
- 0 );
-CSROWDEV_ATTR(ch1_ce_count,S_IRUGO|S_IWUSR,
- channel_ce_count_show,
- NULL,
- 1 );
-CSROWDEV_ATTR(ch2_ce_count,S_IRUGO|S_IWUSR,
- channel_ce_count_show,
- NULL,
- 2 );
-CSROWDEV_ATTR(ch3_ce_count,S_IRUGO|S_IWUSR,
- channel_ce_count_show,
- NULL,
- 3 );
-CSROWDEV_ATTR(ch4_ce_count,S_IRUGO|S_IWUSR,
- channel_ce_count_show,
- NULL,
- 4 );
-CSROWDEV_ATTR(ch5_ce_count,S_IRUGO|S_IWUSR,
- channel_ce_count_show,
- NULL,
- 5 );
-
-/* Total possible dynamic ce_count attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
- &attr_ch0_ce_count,
- &attr_ch1_ce_count,
- &attr_ch2_ce_count,
- &attr_ch3_ce_count,
- &attr_ch4_ce_count,
- &attr_ch5_ce_count
-};
-
-
-#define EDAC_NR_CHANNELS 6
-
-/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
-static int edac_create_channel_files(struct kobject *kobj, int chan)
-{
- int err=-ENODEV;
-
- if (chan >= EDAC_NR_CHANNELS)
- return err;
-
- /* create the DIMM label attribute file */
- err = sysfs_create_file(kobj,
- (struct attribute *) dynamic_csrow_dimm_attr[chan]);
-
- if (!err) {
- /* create the CE Count attribute file */
- err = sysfs_create_file(kobj,
- (struct attribute *) dynamic_csrow_ce_count_attr[chan]);
- } else {
- debugf1("%s() dimm labels and ce_count files created", __func__);
- }
-
- return err;
-}
-
-/* No memory to release for this kobj */
-static void edac_csrow_instance_release(struct kobject *kobj)
-{
- struct csrow_info *cs;
-
- cs = container_of(kobj, struct csrow_info, kobj);
- complete(&cs->kobj_complete);
-}
-
-/* the kobj_type instance for a CSROW */
-static struct kobj_type ktype_csrow = {
- .release = edac_csrow_instance_release,
- .sysfs_ops = &csrowfs_ops,
- .default_attrs = (struct attribute **) default_csrow_attr,
-};
-
-/* Create a CSROW object under specifed edac_mc_device */
-static int edac_create_csrow_object(
- struct kobject *edac_mci_kobj,
- struct csrow_info *csrow,
- int index)
-{
- int err = 0;
- int chan;
-
- memset(&csrow->kobj, 0, sizeof(csrow->kobj));
-
- /* generate ..../edac/mc/mc<id>/csrow<index> */
-
- csrow->kobj.parent = edac_mci_kobj;
- csrow->kobj.ktype = &ktype_csrow;
-
- /* name this instance of csrow<id> */
- err = kobject_set_name(&csrow->kobj,"csrow%d",index);
- if (err)
- goto error_exit;
-
- /* Instanstiate the csrow object */
- err = kobject_register(&csrow->kobj);
- if (!err) {
- /* Create the dyanmic attribute files on this csrow,
- * namely, the DIMM labels and the channel ce_count
- */
- for (chan = 0; chan < csrow->nr_channels; chan++) {
- err = edac_create_channel_files(&csrow->kobj,chan);
- if (err)
- break;
- }
- }
-
-error_exit:
- return err;
-}
-
-/* default sysfs methods and data structures for the main MCI kobject */
-
-static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- int row, chan;
-
- mci->ue_noinfo_count = 0;
- mci->ce_noinfo_count = 0;
- mci->ue_count = 0;
- mci->ce_count = 0;
-
- for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *ri = &mci->csrows[row];
-
- ri->ue_count = 0;
- ri->ce_count = 0;
-
- for (chan = 0; chan < ri->nr_channels; chan++)
- ri->channels[chan].ce_count = 0;
- }
-
- mci->start_time = jiffies;
- return count;
-}
-
-/* memory scrubbing */
-static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
-{
- u32 bandwidth = -1;
-
- if (mci->set_sdram_scrub_rate) {
-
- memctrl_int_store(&bandwidth, data, count);
-
- if (!(*mci->set_sdram_scrub_rate)(mci, &bandwidth)) {
- edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate set successfully, applied: %d\n",
- bandwidth);
- } else {
- /* FIXME: error codes maybe? */
- edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate set FAILED, could not apply: %d\n",
- bandwidth);
- }
- } else {
- /* FIXME: produce "not implemented" ERROR for user-side. */
- edac_printk(KERN_WARNING, EDAC_MC,
- "Memory scrubbing 'set'control is not implemented!\n");
- }
- return count;
-}
-
-static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
-{
- u32 bandwidth = -1;
-
- if (mci->get_sdram_scrub_rate) {
- if (!(*mci->get_sdram_scrub_rate)(mci, &bandwidth)) {
- edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate successfully, fetched: %d\n",
- bandwidth);
- } else {
- /* FIXME: error codes maybe? */
- edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate fetch FAILED, got: %d\n",
- bandwidth);
- }
- } else {
- /* FIXME: produce "not implemented" ERROR for user-side. */
- edac_printk(KERN_WARNING, EDAC_MC,
- "Memory scrubbing 'get' control is not implemented!\n");
- }
- return sprintf(data, "%d\n", bandwidth);
-}
-
-/* default attribute files for the MCI object */
-static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data,"%d\n", mci->ue_count);
-}
-
-static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data,"%d\n", mci->ce_count);
-}
-
-static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data,"%d\n", mci->ce_noinfo_count);
-}
-
-static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data,"%d\n", mci->ue_noinfo_count);
-}
-
-static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
-}
-
-static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
-{
- return sprintf(data,"%s\n", mci->ctl_name);
-}
-
-static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
-{
- int total_pages, csrow_idx;
-
- for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
- struct csrow_info *csrow = &mci->csrows[csrow_idx];
-
- if (!csrow->nr_pages)
- continue;
-
- total_pages += csrow->nr_pages;
- }
-
- return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
-}
-
-struct mcidev_attribute {
- struct attribute attr;
- ssize_t (*show)(struct mem_ctl_info *,char *);
- ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
-};
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
-#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
-
-/* MCI show/store functions for top most object */
-static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
- struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
-
- if (mcidev_attr->show)
- return mcidev_attr->show(mem_ctl_info, buffer);
-
- return -EIO;
-}
-
-static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
- struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
-
- if (mcidev_attr->store)
- return mcidev_attr->store(mem_ctl_info, buffer, count);
-
- return -EIO;
-}
-
-static struct sysfs_ops mci_ops = {
- .show = mcidev_show,
- .store = mcidev_store
-};
-
-#define MCIDEV_ATTR(_name,_mode,_show,_store) \
-struct mcidev_attribute mci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-/* default Control file */
-MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
-
-/* default Attribute files */
-MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
-MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
-MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
-MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
-MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
-MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
-MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
-
-/* memory scrubber attribute file */
-MCIDEV_ATTR(sdram_scrub_rate,S_IRUGO|S_IWUSR,mci_sdram_scrub_rate_show,mci_sdram_scrub_rate_store);
-
-static struct mcidev_attribute *mci_attr[] = {
- &mci_attr_reset_counters,
- &mci_attr_mc_name,
- &mci_attr_size_mb,
- &mci_attr_seconds_since_reset,
- &mci_attr_ue_noinfo_count,
- &mci_attr_ce_noinfo_count,
- &mci_attr_ue_count,
- &mci_attr_ce_count,
- &mci_attr_sdram_scrub_rate,
- NULL
-};
-
-/*
- * Release of a MC controlling instance
- */
-static void edac_mci_instance_release(struct kobject *kobj)
-{
- struct mem_ctl_info *mci;
-
- mci = to_mci(kobj);
- debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
- complete(&mci->kobj_complete);
-}
-
-static struct kobj_type ktype_mci = {
- .release = edac_mci_instance_release,
- .sysfs_ops = &mci_ops,
- .default_attrs = (struct attribute **) mci_attr,
-};
-
-
-#define EDAC_DEVICE_SYMLINK "device"
-
-/*
- * Create a new Memory Controller kobject instance,
- * mc<id> under the 'mc' directory
- *
- * Return:
- * 0 Success
- * !0 Failure
- */
-static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
-{
- int i;
- int err;
- struct csrow_info *csrow;
- struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
-
- debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
- memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
-
- /* set the name of the mc<id> object */
- err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
- if (err)
- return err;
-
- /* link to our parent the '..../edac/mc' object */
- edac_mci_kobj->parent = &edac_memctrl_kobj;
- edac_mci_kobj->ktype = &ktype_mci;
-
- /* register the mc<id> kobject */
- err = kobject_register(edac_mci_kobj);
- if (err)
- return err;
-
- /* create a symlink for the device */
- err = sysfs_create_link(edac_mci_kobj, &mci->dev->kobj,
- EDAC_DEVICE_SYMLINK);
- if (err)
- goto fail0;
-
- /* Make directories for each CSROW object
- * under the mc<id> kobject
- */
- for (i = 0; i < mci->nr_csrows; i++) {
- csrow = &mci->csrows[i];
-
- /* Only expose populated CSROWs */
- if (csrow->nr_pages > 0) {
- err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
- if (err)
- goto fail1;
- }
- }
-
- return 0;
-
- /* CSROW error: backout what has already been registered, */
-fail1:
- for ( i--; i >= 0; i--) {
- if (csrow->nr_pages > 0) {
- init_completion(&csrow->kobj_complete);
- kobject_unregister(&mci->csrows[i].kobj);
- wait_for_completion(&csrow->kobj_complete);
- }
- }
-
-fail0:
- init_completion(&mci->kobj_complete);
- kobject_unregister(edac_mci_kobj);
- wait_for_completion(&mci->kobj_complete);
- return err;
-}
-
-/*
- * remove a Memory Controller instance
- */
-static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
-{
- int i;
-
- debugf0("%s()\n", __func__);
-
- /* remove all csrow kobjects */
- for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
- init_completion(&mci->csrows[i].kobj_complete);
- kobject_unregister(&mci->csrows[i].kobj);
- wait_for_completion(&mci->csrows[i].kobj_complete);
- }
- }
-
- sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
- init_completion(&mci->kobj_complete);
- kobject_unregister(&mci->edac_mci_kobj);
- wait_for_completion(&mci->kobj_complete);
-}
-
-/* END OF sysfs data and methods */
-
#ifdef CONFIG_EDAC_DEBUG
-void edac_mc_dump_channel(struct channel_info *chan)
+static void edac_mc_dump_channel(struct channel_info *chan)
{
debugf4("\tchannel = %p\n", chan);
debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
@@ -1228,25 +48,21 @@ void edac_mc_dump_channel(struct channel_info *chan)
debugf4("\tchannel->label = '%s'\n", chan->label);
debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
}
-EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
-void edac_mc_dump_csrow(struct csrow_info *csrow)
+static void edac_mc_dump_csrow(struct csrow_info *csrow)
{
debugf4("\tcsrow = %p\n", csrow);
debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
- debugf4("\tcsrow->first_page = 0x%lx\n",
- csrow->first_page);
+ debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
- debugf4("\tcsrow->nr_channels = %d\n",
- csrow->nr_channels);
+ debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
debugf4("\tcsrow->channels = %p\n", csrow->channels);
debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
}
-EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
-void edac_mc_dump_mci(struct mem_ctl_info *mci)
+static void edac_mc_dump_mci(struct mem_ctl_info *mci)
{
debugf3("\tmci = %p\n", mci);
debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
@@ -1256,13 +72,11 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
mci->nr_csrows, mci->csrows);
debugf3("\tdev = %p\n", mci->dev);
- debugf3("\tmod_name:ctl_name = %s:%s\n",
- mci->mod_name, mci->ctl_name);
+ debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
}
-EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
-#endif /* CONFIG_EDAC_DEBUG */
+#endif /* CONFIG_EDAC_DEBUG */
/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
* Adjust 'ptr' so that its alignment is at least as stringent as what the
@@ -1271,7 +85,7 @@ EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
* If 'size' is a constant, the compiler will optimize this whole function
* down to either a no-op or the addition of a constant to the value of 'ptr'.
*/
-static inline char * align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void *ptr, unsigned size)
{
unsigned align, r;
@@ -1288,14 +102,14 @@ static inline char * align_ptr(void *ptr, unsigned size)
else if (size > sizeof(char))
align = sizeof(short);
else
- return (char *) ptr;
+ return (char *)ptr;
r = size % align;
if (r == 0)
- return (char *) ptr;
+ return (char *)ptr;
- return (char *) (((unsigned long) ptr) + align - r);
+ return (void *)(((unsigned long)ptr) + align - r);
}
/**
@@ -1315,7 +129,7 @@ static inline char * align_ptr(void *ptr, unsigned size)
* struct mem_ctl_info pointer
*/
struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans)
+ unsigned nr_chans, int edac_index)
{
struct mem_ctl_info *mci;
struct csrow_info *csi, *csrow;
@@ -1323,30 +137,32 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
void *pvt;
unsigned size;
int row, chn;
+ int err;
/* Figure out the offsets of the various items from the start of an mc
* structure. We want the alignment of each item to be at least as
* stringent as what the compiler would provide if we could simply
* hardcode everything into a single struct.
*/
- mci = (struct mem_ctl_info *) 0;
- csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
- chi = (struct channel_info *)
- align_ptr(&csi[nr_csrows], sizeof(*chi));
- pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
- size = ((unsigned long) pvt) + sz_pvt;
-
- if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
+ mci = (struct mem_ctl_info *)0;
+ csi = edac_align_ptr(&mci[1], sizeof(*csi));
+ chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
+ pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ size = ((unsigned long)pvt) + sz_pvt;
+
+ mci = kzalloc(size, GFP_KERNEL);
+ if (mci == NULL)
return NULL;
/* Adjust pointers so they point within the memory we just allocated
* rather than an imaginary chunk of memory located at address 0.
*/
- csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
- chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
- pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
+ csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
+ chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi));
+ pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
- memset(mci, 0, size); /* clear all fields */
+ /* setup index and various internal pointers */
+ mci->mc_idx = edac_index;
mci->csrows = csi;
mci->pvt_info = pvt;
mci->nr_csrows = nr_csrows;
@@ -1366,17 +182,35 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
}
}
+ mci->op_state = OP_ALLOC;
+
+ /*
+ * Initialize the 'root' kobj for the edac_mc controller
+ */
+ err = edac_mc_register_sysfs_main_kobj(mci);
+ if (err) {
+ kfree(mci);
+ return NULL;
+ }
+
+ /* at this point, the root kobj is valid, and in order to
+ * 'free' the object, then the function:
+ * edac_mc_unregister_sysfs_main_kobj() must be called
+ * which will perform kobj unregistration and the actual free
+ * will occur during the kobject callback operation
+ */
return mci;
}
EXPORT_SYMBOL_GPL(edac_mc_alloc);
/**
- * edac_mc_free: Free a previously allocated 'mci' structure
+ * edac_mc_free
+ * 'Free' a previously allocated 'mci' structure
* @mci: pointer to a struct mem_ctl_info structure
*/
void edac_mc_free(struct mem_ctl_info *mci)
{
- kfree(mci);
+ edac_mc_unregister_sysfs_main_kobj(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
@@ -1397,18 +231,136 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
return NULL;
}
+/*
+ * handler for EDAC to check if NMI type handler has asserted interrupt
+ */
+static int edac_mc_assert_error_check_and_clear(void)
+{
+ int old_state;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ return 1;
+
+ old_state = edac_err_assert;
+ edac_err_assert = 0;
+
+ return old_state;
+}
+
+/*
+ * edac_mc_workq_function
+ * performs the operation scheduled by a workq request
+ */
+static void edac_mc_workq_function(struct work_struct *work_req)
+{
+ struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
+
+ mutex_lock(&mem_ctls_mutex);
+
+ /* if this control struct has movd to offline state, we are done */
+ if (mci->op_state == OP_OFFLINE) {
+ mutex_unlock(&mem_ctls_mutex);
+ return;
+ }
+
+ /* Only poll controllers that are running polled and have a check */
+ if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+ mci->edac_check(mci);
+
+ /*
+ * FIXME: temp place holder for PCI checks,
+ * goes away when we break out PCI
+ */
+ edac_pci_do_parity_check();
+
+ mutex_unlock(&mem_ctls_mutex);
+
+ /* Reschedule */
+ queue_delayed_work(edac_workqueue, &mci->work,
+ msecs_to_jiffies(edac_mc_get_poll_msec()));
+}
+
+/*
+ * edac_mc_workq_setup
+ * initialize a workq item for this mci
+ * passing in the new delay period in msec
+ *
+ * locking model:
+ *
+ * called with the mem_ctls_mutex held
+ */
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+{
+ debugf0("%s()\n", __func__);
+
+ /* if this instance is not in the POLL state, then simply return */
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+}
+
+/*
+ * edac_mc_workq_teardown
+ * stop the workq processing on this mci
+ *
+ * locking model:
+ *
+ * called WITHOUT lock held
+ */
+static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+{
+ int status;
+
+ /* if not running POLL, leave now */
+ if (mci->op_state == OP_RUNNING_POLL) {
+ status = cancel_delayed_work(&mci->work);
+ if (status == 0) {
+ debugf0("%s() not canceled, flush the queue\n",
+ __func__);
+
+ /* workq instance might be running, wait for it */
+ flush_workqueue(edac_workqueue);
+ }
+ }
+}
+
+/*
+ * edac_reset_delay_period
+ */
+static void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
+{
+ /* cancel the current workq request */
+ edac_mc_workq_teardown(mci);
+
+ /* lock the list of devices for the new setup */
+ mutex_lock(&mem_ctls_mutex);
+
+ /* restart the workq request, with new delay value */
+ edac_mc_workq_setup(mci, value);
+
+ mutex_unlock(&mem_ctls_mutex);
+}
+
/* Return 0 on success, 1 on failure.
* Before calling this function, caller must
* assign a unique value to mci->mc_idx.
+ *
+ * locking model:
+ *
+ * called with the mem_ctls_mutex lock held
*/
-static int add_mc_to_global_list (struct mem_ctl_info *mci)
+static int add_mc_to_global_list(struct mem_ctl_info *mci)
{
struct list_head *item, *insert_before;
struct mem_ctl_info *p;
insert_before = &mc_devices;
- if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL))
+ p = find_mci_by_dev(mci->dev);
+ if (unlikely(p != NULL))
goto fail0;
list_for_each(item, &mc_devices) {
@@ -1424,18 +376,19 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
}
list_add_tail_rcu(&mci->link, insert_before);
+ atomic_inc(&edac_handlers);
return 0;
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
- "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
- dev_name(p->dev), p->mod_name, p->ctl_name, p->mc_idx);
+ "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
+ dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
return 1;
fail1:
edac_printk(KERN_WARNING, EDAC_MC,
- "bug in low-level driver: attempt to assign\n"
- " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
+ "bug in low-level driver: attempt to assign\n"
+ " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
return 1;
}
@@ -1450,6 +403,7 @@ static void complete_mc_list_del(struct rcu_head *head)
static void del_mc_from_global_list(struct mem_ctl_info *mci)
{
+ atomic_dec(&edac_handlers);
list_del_rcu(&mci->link);
init_completion(&mci->complete);
call_rcu(&mci->rcu, complete_mc_list_del);
@@ -1457,6 +411,34 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
}
/**
+ * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ *
+ * Caller must hold mem_ctls_mutex.
+ */
+struct mem_ctl_info *edac_mc_find(int idx)
+{
+ struct list_head *item;
+ struct mem_ctl_info *mci;
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+ if (mci->mc_idx >= idx) {
+ if (mci->mc_idx == idx)
+ return mci;
+
+ break;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(edac_mc_find);
+
+/**
* edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
* create sysfs entries associated with mci structure
* @mci: pointer to the mci structure to be added to the list
@@ -1468,10 +450,10 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
*/
/* FIXME - should a warning be printed if no error detection? correction? */
-int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
+int edac_mc_add_mc(struct mem_ctl_info *mci)
{
debugf0("%s()\n", __func__);
- mci->mc_idx = mc_idx;
+
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_mc_dump_mci(mci);
@@ -1484,12 +466,12 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
edac_mc_dump_csrow(&mci->csrows[i]);
for (j = 0; j < mci->csrows[i].nr_channels; j++)
- edac_mc_dump_channel(
- &mci->csrows[i].channels[j]);
+ edac_mc_dump_channel(&mci->csrows[i].
+ channels[j]);
}
}
#endif
- down(&mem_ctls_mutex);
+ mutex_lock(&mem_ctls_mutex);
if (add_mc_to_global_list(mci))
goto fail0;
@@ -1503,18 +485,28 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
goto fail1;
}
+ /* If there IS a check routine, then we are running POLLED */
+ if (mci->edac_check != NULL) {
+ /* This instance is NOW RUNNING */
+ mci->op_state = OP_RUNNING_POLL;
+
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ } else {
+ mci->op_state = OP_RUNNING_INTERRUPT;
+ }
+
/* Report action taken */
- edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
- mci->mod_name, mci->ctl_name, dev_name(mci->dev));
+ edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
+ " DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci));
- up(&mem_ctls_mutex);
+ mutex_unlock(&mem_ctls_mutex);
return 0;
fail1:
del_mc_from_global_list(mci);
fail0:
- up(&mem_ctls_mutex);
+ mutex_unlock(&mem_ctls_mutex);
return 1;
}
EXPORT_SYMBOL_GPL(edac_mc_add_mc);
@@ -1526,29 +518,41 @@ EXPORT_SYMBOL_GPL(edac_mc_add_mc);
*
* Return pointer to removed mci structure, or NULL if device not found.
*/
-struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
+struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
{
struct mem_ctl_info *mci;
- debugf0("MC: %s()\n", __func__);
- down(&mem_ctls_mutex);
+ debugf0("%s()\n", __func__);
+
+ mutex_lock(&mem_ctls_mutex);
- if ((mci = find_mci_by_dev(dev)) == NULL) {
- up(&mem_ctls_mutex);
+ /* find the requested mci struct in the global list */
+ mci = find_mci_by_dev(dev);
+ if (mci == NULL) {
+ mutex_unlock(&mem_ctls_mutex);
return NULL;
}
- edac_remove_sysfs_mci_device(mci);
+ /* marking MCI offline */
+ mci->op_state = OP_OFFLINE;
+
del_mc_from_global_list(mci);
- up(&mem_ctls_mutex);
+ mutex_unlock(&mem_ctls_mutex);
+
+ /* flush workq processes and remove sysfs */
+ edac_mc_workq_teardown(mci);
+ edac_remove_sysfs_mci_device(mci);
+
edac_printk(KERN_INFO, EDAC_MC,
"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
- mci->mod_name, mci->ctl_name, dev_name(mci->dev));
+ mci->mod_name, mci->ctl_name, dev_name(mci));
+
return mci;
}
EXPORT_SYMBOL_GPL(edac_mc_del_mc);
-void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
+static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
+ u32 size)
{
struct page *pg;
void *virt_addr;
@@ -1557,7 +561,7 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
debugf3("%s()\n", __func__);
/* ECC error page was not in our memory. Ignore it. */
- if(!pfn_valid(page))
+ if (!pfn_valid(page))
return;
/* Find the actual page structure then map it and fix */
@@ -1577,7 +581,6 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
if (PageHighMem(pg))
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
/* FIXME - should return -1 */
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
@@ -1611,7 +614,7 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
if (row == -1)
edac_mc_printk(mci, KERN_ERR,
"could not look up page error address %lx\n",
- (unsigned long) page);
+ (unsigned long)page);
return row;
}
@@ -1620,8 +623,9 @@ EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
/* FIXME - setable log (warning/emerg) levels */
/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number, unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel, const char *msg)
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, unsigned long syndrome,
+ int row, int channel, const char *msg)
{
unsigned long remapped_page;
@@ -1647,7 +651,7 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci,
return;
}
- if (log_ce)
+ if (edac_mc_get_log_ce())
/* FIXME - put in DIMM location */
edac_mc_printk(mci, KERN_WARNING,
"CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
@@ -1671,18 +675,18 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci,
* page - which can then be scrubbed.
*/
remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ mci->csrows[row].grain);
}
}
EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
{
- if (log_ce)
+ if (edac_mc_get_log_ce())
edac_mc_printk(mci, KERN_WARNING,
"CE - no information available: %s\n", msg);
@@ -1692,8 +696,8 @@ void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number, unsigned long offset_in_page,
- int row, const char *msg)
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row, const char *msg)
{
int len = EDAC_MC_LABEL_LEN * 4;
char labels[len + 1];
@@ -1714,26 +718,26 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci,
}
chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
+ mci->csrows[row].channels[0].label);
len -= chars;
pos += chars;
for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
+ chan++) {
chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
+ mci->csrows[row].channels[chan].label);
len -= chars;
pos += chars;
}
- if (log_ue)
+ if (edac_mc_get_log_ue())
edac_mc_printk(mci, KERN_EMERG,
"UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
"labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row, labels,
- msg);
+ offset_in_page, mci->csrows[row].grain, row,
+ labels, msg);
- if (panic_on_ue)
+ if (edac_mc_get_panic_on_ue())
panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
"row %d, labels \"%s\": %s\n", mci->mc_idx,
page_frame_number, offset_in_page,
@@ -1746,10 +750,10 @@ EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
{
- if (panic_on_ue)
+ if (edac_mc_get_panic_on_ue())
panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
- if (log_ue)
+ if (edac_mc_get_log_ue())
edac_mc_printk(mci, KERN_WARNING,
"UE - no information available: %s\n", msg);
mci->ue_noinfo_count++;
@@ -1757,16 +761,14 @@ void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
}
EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-
/*************************************************************
* On Fully Buffered DIMM modules, this help function is
* called to process UE events
*/
void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb,
- char *msg)
+ unsigned int csrow,
+ unsigned int channela,
+ unsigned int channelb, char *msg)
{
int len = EDAC_MC_LABEL_LEN * 4;
char labels[len + 1];
@@ -1808,20 +810,21 @@ void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
/* Generate the DIMM labels from the specified channels */
chars = snprintf(pos, len + 1, "%s",
mci->csrows[csrow].channels[channela].label);
- len -= chars; pos += chars;
+ len -= chars;
+ pos += chars;
chars = snprintf(pos, len + 1, "-%s",
mci->csrows[csrow].channels[channelb].label);
- if (log_ue)
+ if (edac_mc_get_log_ue())
edac_mc_printk(mci, KERN_EMERG,
"UE row %d, channel-a= %d channel-b= %d "
"labels \"%s\": %s\n", csrow, channela, channelb,
labels, msg);
- if (panic_on_ue)
+ if (edac_mc_get_panic_on_ue())
panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
+ "labels \"%s\": %s\n", csrow, channela,
+ channelb, labels, msg);
}
EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
@@ -1830,9 +833,7 @@ EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
* called to process CE events
*/
void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channel,
- char *msg)
+ unsigned int csrow, unsigned int channel, char *msg)
{
/* Ensure boundary values */
@@ -1853,13 +854,12 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
return;
}
- if (log_ce)
+ if (edac_mc_get_log_ce())
/* FIXME - put in DIMM location */
edac_mc_printk(mci, KERN_WARNING,
"CE row %d, channel %d, label \"%s\": %s\n",
csrow, channel,
- mci->csrows[csrow].channels[channel].label,
- msg);
+ mci->csrows[csrow].channels[channel].label, msg);
mci->ce_count++;
mci->csrows[csrow].ce_count++;
@@ -1867,17 +867,16 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
}
EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
-
/*
* Iterate over all MC instances and check for ECC, et al, errors
*/
-static inline void check_mc_devices(void)
+void edac_check_mc_devices(void)
{
struct list_head *item;
struct mem_ctl_info *mci;
debugf3("%s()\n", __func__);
- down(&mem_ctls_mutex);
+ mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
@@ -1886,119 +885,5 @@ static inline void check_mc_devices(void)
mci->edac_check(mci);
}
- up(&mem_ctls_mutex);
-}
-
-/*
- * Check MC status every poll_msec.
- * Check PCI status every poll_msec as well.
- *
- * This where the work gets done for edac.
- *
- * SMP safe, doesn't use NMI, and auto-rate-limits.
- */
-static void do_edac_check(void)
-{
- debugf3("%s()\n", __func__);
- check_mc_devices();
- do_pci_parity_check();
-}
-
-static int edac_kernel_thread(void *arg)
-{
- while (!kthread_should_stop()) {
- do_edac_check();
-
- /* goto sleep for the interval */
- schedule_timeout_interruptible((HZ * poll_msec) / 1000);
- try_to_freeze();
- }
-
- return 0;
+ mutex_unlock(&mem_ctls_mutex);
}
-
-/*
- * edac_mc_init
- * module initialization entry point
- */
-static int __init edac_mc_init(void)
-{
- edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
-
- /*
- * Harvest and clear any boot/initialization PCI parity errors
- *
- * FIXME: This only clears errors logged by devices present at time of
- * module initialization. We should also do an initial clear
- * of each newly hotplugged device.
- */
- clear_pci_parity_errors();
-
- /* Create the MC sysfs entries */
- if (edac_sysfs_memctrl_setup()) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error initializing sysfs code\n");
- return -ENODEV;
- }
-
- /* Create the PCI parity sysfs entries */
- if (edac_sysfs_pci_setup()) {
- edac_sysfs_memctrl_teardown();
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC PCI: Error initializing sysfs code\n");
- return -ENODEV;
- }
-
- /* create our kernel thread */
- edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
-
- if (IS_ERR(edac_thread)) {
- /* remove the sysfs entries */
- edac_sysfs_memctrl_teardown();
- edac_sysfs_pci_teardown();
- return PTR_ERR(edac_thread);
- }
-
- return 0;
-}
-
-/*
- * edac_mc_exit()
- * module exit/termination functioni
- */
-static void __exit edac_mc_exit(void)
-{
- debugf0("%s()\n", __func__);
- kthread_stop(edac_thread);
-
- /* tear down the sysfs device */
- edac_sysfs_memctrl_teardown();
- edac_sysfs_pci_teardown();
-}
-
-module_init(edac_mc_init);
-module_exit(edac_mc_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on work by Dan Hollis et al");
-MODULE_DESCRIPTION("Core library routines for MC reporting");
-
-module_param(panic_on_ue, int, 0644);
-MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
-#ifdef CONFIG_PCI
-module_param(check_pci_parity, int, 0644);
-MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
-module_param(panic_on_pci_parity, int, 0644);
-MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
-#endif
-module_param(log_ue, int, 0644);
-MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
-module_param(log_ce, int, 0644);
-MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
-module_param(poll_msec, int, 0644);
-MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
-#ifdef CONFIG_EDAC_DEBUG
-module_param(edac_debug_level, int, 0644);
-MODULE_PARM_DESC(edac_debug_level, "Debug level");
-#endif
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
new file mode 100644
index 000000000000..cd090b0677a7
--- /dev/null
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -0,0 +1,1024 @@
+/*
+ * edac_mc kernel module
+ * (C) 2005-2007 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/bug.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+
+/* MC EDAC Controls, setable by module parameter, and sysfs */
+static int edac_mc_log_ue = 1;
+static int edac_mc_log_ce = 1;
+static int edac_mc_panic_on_ue;
+static int edac_mc_poll_msec = 1000;
+
+/* Getter functions for above */
+int edac_mc_get_log_ue(void)
+{
+ return edac_mc_log_ue;
+}
+
+int edac_mc_get_log_ce(void)
+{
+ return edac_mc_log_ce;
+}
+
+int edac_mc_get_panic_on_ue(void)
+{
+ return edac_mc_panic_on_ue;
+}
+
+/* this is temporary */
+int edac_mc_get_poll_msec(void)
+{
+ return edac_mc_poll_msec;
+}
+
+/* Parameter declarations for above */
+module_param(edac_mc_panic_on_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
+module_param(edac_mc_log_ue, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ue,
+ "Log uncorrectable error to console: 0=off 1=on");
+module_param(edac_mc_log_ce, int, 0644);
+MODULE_PARM_DESC(edac_mc_log_ce,
+ "Log correctable error to console: 0=off 1=on");
+module_param(edac_mc_poll_msec, int, 0644);
+MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
+
+/*
+ * various constants for Memory Controllers
+ */
+static const char *mem_types[] = {
+ [MEM_EMPTY] = "Empty",
+ [MEM_RESERVED] = "Reserved",
+ [MEM_UNKNOWN] = "Unknown",
+ [MEM_FPM] = "FPM",
+ [MEM_EDO] = "EDO",
+ [MEM_BEDO] = "BEDO",
+ [MEM_SDR] = "Unbuffered-SDR",
+ [MEM_RDR] = "Registered-SDR",
+ [MEM_DDR] = "Unbuffered-DDR",
+ [MEM_RDDR] = "Registered-DDR",
+ [MEM_RMBS] = "RMBS",
+ [MEM_DDR2] = "Unbuffered-DDR2",
+ [MEM_FB_DDR2] = "FullyBuffered-DDR2",
+ [MEM_RDDR2] = "Registered-DDR2"
+};
+
+static const char *dev_types[] = {
+ [DEV_UNKNOWN] = "Unknown",
+ [DEV_X1] = "x1",
+ [DEV_X2] = "x2",
+ [DEV_X4] = "x4",
+ [DEV_X8] = "x8",
+ [DEV_X16] = "x16",
+ [DEV_X32] = "x32",
+ [DEV_X64] = "x64"
+};
+
+static const char *edac_caps[] = {
+ [EDAC_UNKNOWN] = "Unknown",
+ [EDAC_NONE] = "None",
+ [EDAC_RESERVED] = "Reserved",
+ [EDAC_PARITY] = "PARITY",
+ [EDAC_EC] = "EC",
+ [EDAC_SECDED] = "SECDED",
+ [EDAC_S2ECD2ED] = "S2ECD2ED",
+ [EDAC_S4ECD4ED] = "S4ECD4ED",
+ [EDAC_S8ECD8ED] = "S8ECD8ED",
+ [EDAC_S16ECD16ED] = "S16ECD16ED"
+};
+
+
+
+/*
+ * /sys/devices/system/edac/mc;
+ * data structures and methods
+ */
+static ssize_t memctrl_int_show(void *ptr, char *buffer)
+{
+ int *value = (int *)ptr;
+ return sprintf(buffer, "%u\n", *value);
+}
+
+static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = (int *)ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ return count;
+}
+
+
+/* EDAC sysfs CSROW data structures and methods
+ */
+
+/* Set of more default csrow<id> attribute show/store functions */
+static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%u\n", csrow->ue_count);
+}
+
+static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%u\n", csrow->ce_count);
+}
+
+static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+}
+
+static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+}
+
+static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+}
+
+static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
+ int private)
+{
+ return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+}
+
+/* show/store functions for DIMM Label attributes */
+static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
+ char *data, int channel)
+{
+ return snprintf(data, EDAC_MC_LABEL_LEN, "%s",
+ csrow->channels[channel].label);
+}
+
+static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
+ const char *data,
+ size_t count, int channel)
+{
+ ssize_t max_size = 0;
+
+ max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
+ strncpy(csrow->channels[channel].label, data, max_size);
+ csrow->channels[channel].label[max_size] = '\0';
+
+ return max_size;
+}
+
+/* show function for dynamic chX_ce_count attribute */
+static ssize_t channel_ce_count_show(struct csrow_info *csrow,
+ char *data, int channel)
+{
+ return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
+}
+
+/* csrow specific attribute structure */
+struct csrowdev_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct csrow_info *, char *, int);
+ ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
+ int private;
+};
+
+#define to_csrow(k) container_of(k, struct csrow_info, kobj)
+#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+
+/* Set of show/store higher level functions for default csrow attributes */
+static ssize_t csrowdev_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->show)
+ return csrowdev_attr->show(csrow,
+ buffer, csrowdev_attr->private);
+ return -EIO;
+}
+
+static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct csrow_info *csrow = to_csrow(kobj);
+ struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+
+ if (csrowdev_attr->store)
+ return csrowdev_attr->store(csrow,
+ buffer,
+ count, csrowdev_attr->private);
+ return -EIO;
+}
+
+static struct sysfs_ops csrowfs_ops = {
+ .show = csrowdev_show,
+ .store = csrowdev_store
+};
+
+#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
+static struct csrowdev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+ .private = _private, \
+};
+
+/* default cwrow<id>/attribute files */
+CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
+CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
+CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
+CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
+CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
+CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
+
+/* default attributes of the CSROW<id> object */
+static struct csrowdev_attribute *default_csrow_attr[] = {
+ &attr_dev_type,
+ &attr_mem_type,
+ &attr_edac_mode,
+ &attr_size_mb,
+ &attr_ue_count,
+ &attr_ce_count,
+ NULL,
+};
+
+/* possible dynamic channel DIMM Label attribute files */
+CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 0);
+CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 1);
+CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 2);
+CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 3);
+CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 4);
+CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 5);
+
+/* Total possible dynamic DIMM Label attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
+ &attr_ch0_dimm_label,
+ &attr_ch1_dimm_label,
+ &attr_ch2_dimm_label,
+ &attr_ch3_dimm_label,
+ &attr_ch4_dimm_label,
+ &attr_ch5_dimm_label
+};
+
+/* possible dynamic channel ce_count attribute files */
+CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
+CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
+CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
+CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
+CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
+CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
+
+/* Total possible dynamic ce_count attribute file table */
+static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
+ &attr_ch0_ce_count,
+ &attr_ch1_ce_count,
+ &attr_ch2_ce_count,
+ &attr_ch3_ce_count,
+ &attr_ch4_ce_count,
+ &attr_ch5_ce_count
+};
+
+#define EDAC_NR_CHANNELS 6
+
+/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
+static int edac_create_channel_files(struct kobject *kobj, int chan)
+{
+ int err = -ENODEV;
+
+ if (chan >= EDAC_NR_CHANNELS)
+ return err;
+
+ /* create the DIMM label attribute file */
+ err = sysfs_create_file(kobj,
+ (struct attribute *)
+ dynamic_csrow_dimm_attr[chan]);
+
+ if (!err) {
+ /* create the CE Count attribute file */
+ err = sysfs_create_file(kobj,
+ (struct attribute *)
+ dynamic_csrow_ce_count_attr[chan]);
+ } else {
+ debugf1("%s() dimm labels and ce_count files created",
+ __func__);
+ }
+
+ return err;
+}
+
+/* No memory to release for this kobj */
+static void edac_csrow_instance_release(struct kobject *kobj)
+{
+ struct mem_ctl_info *mci;
+ struct csrow_info *cs;
+
+ debugf1("%s()\n", __func__);
+
+ cs = container_of(kobj, struct csrow_info, kobj);
+ mci = cs->mci;
+
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* the kobj_type instance for a CSROW */
+static struct kobj_type ktype_csrow = {
+ .release = edac_csrow_instance_release,
+ .sysfs_ops = &csrowfs_ops,
+ .default_attrs = (struct attribute **)default_csrow_attr,
+};
+
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ struct csrow_info *csrow, int index)
+{
+ struct kobject *kobj_mci = &mci->edac_mci_kobj;
+ struct kobject *kobj;
+ int chan;
+ int err;
+
+ /* generate ..../edac/mc/mc<id>/csrow<index> */
+ memset(&csrow->kobj, 0, sizeof(csrow->kobj));
+ csrow->mci = mci; /* include container up link */
+ csrow->kobj.parent = kobj_mci;
+ csrow->kobj.ktype = &ktype_csrow;
+
+ /* name this instance of csrow<id> */
+ err = kobject_set_name(&csrow->kobj, "csrow%d", index);
+ if (err)
+ goto err_out;
+
+ /* bump the mci instance's kobject's ref count */
+ kobj = kobject_get(&mci->edac_mci_kobj);
+ if (!kobj) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ /* Instanstiate the csrow object */
+ err = kobject_register(&csrow->kobj);
+ if (err)
+ goto err_release_top_kobj;
+
+ /* At this point, to release a csrow kobj, one must
+ * call the kobject_unregister and allow that tear down
+ * to work the releasing
+ */
+
+ /* Create the dyanmic attribute files on this csrow,
+ * namely, the DIMM labels and the channel ce_count
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ err = edac_create_channel_files(&csrow->kobj, chan);
+ if (err) {
+ /* special case the unregister here */
+ kobject_unregister(&csrow->kobj);
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+ /* error unwind stack */
+err_release_top_kobj:
+ kobject_put(&mci->edac_mci_kobj);
+
+err_out:
+ return err;
+}
+
+/* default sysfs methods and data structures for the main MCI kobject */
+
+static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ int row, chan;
+
+ mci->ue_noinfo_count = 0;
+ mci->ce_noinfo_count = 0;
+ mci->ue_count = 0;
+ mci->ce_count = 0;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ struct csrow_info *ri = &mci->csrows[row];
+
+ ri->ue_count = 0;
+ ri->ce_count = 0;
+
+ for (chan = 0; chan < ri->nr_channels; chan++)
+ ri->channels[chan].ce_count = 0;
+ }
+
+ mci->start_time = jiffies;
+ return count;
+}
+
+/* memory scrubbing */
+static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ u32 bandwidth = -1;
+
+ if (mci->set_sdram_scrub_rate) {
+
+ memctrl_int_store(&bandwidth, data, count);
+
+ if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) {
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate set successfully, applied: %d\n",
+ bandwidth);
+ } else {
+ /* FIXME: error codes maybe? */
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate set FAILED, could not apply: %d\n",
+ bandwidth);
+ }
+ } else {
+ /* FIXME: produce "not implemented" ERROR for user-side. */
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "Memory scrubbing 'set'control is not implemented!\n");
+ }
+ return count;
+}
+
+static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
+{
+ u32 bandwidth = -1;
+
+ if (mci->get_sdram_scrub_rate) {
+ if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) {
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate successfully, fetched: %d\n",
+ bandwidth);
+ } else {
+ /* FIXME: error codes maybe? */
+ edac_printk(KERN_DEBUG, EDAC_MC,
+ "Scrub rate fetch FAILED, got: %d\n",
+ bandwidth);
+ }
+ } else {
+ /* FIXME: produce "not implemented" ERROR for user-side. */
+ edac_printk(KERN_WARNING, EDAC_MC,
+ "Memory scrubbing 'get' control is not implemented\n");
+ }
+ return sprintf(data, "%d\n", bandwidth);
+}
+
+/* default attribute files for the MCI object */
+static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ue_count);
+}
+
+static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ce_count);
+}
+
+static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ce_noinfo_count);
+}
+
+static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%d\n", mci->ue_noinfo_count);
+}
+
+static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
+}
+
+static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+{
+ return sprintf(data, "%s\n", mci->ctl_name);
+}
+
+static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+{
+ int total_pages, csrow_idx;
+
+ for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
+ csrow_idx++) {
+ struct csrow_info *csrow = &mci->csrows[csrow_idx];
+
+ if (!csrow->nr_pages)
+ continue;
+
+ total_pages += csrow->nr_pages;
+ }
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
+#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
+
+/* MCI show/store functions for top most object */
+static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+
+ return -EIO;
+}
+
+static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+ return -EIO;
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops mci_ops = {
+ .show = mcidev_show,
+ .store = mcidev_store
+};
+
+#define MCIDEV_ATTR(_name,_mode,_show,_store) \
+static struct mcidev_sysfs_attribute mci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* default Control file */
+MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
+
+/* default Attribute files */
+MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
+MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
+MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
+MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
+MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
+MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
+MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+
+/* memory scrubber attribute file */
+MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
+ mci_sdram_scrub_rate_store);
+
+static struct mcidev_sysfs_attribute *mci_attr[] = {
+ &mci_attr_reset_counters,
+ &mci_attr_mc_name,
+ &mci_attr_size_mb,
+ &mci_attr_seconds_since_reset,
+ &mci_attr_ue_noinfo_count,
+ &mci_attr_ce_noinfo_count,
+ &mci_attr_ue_count,
+ &mci_attr_ce_count,
+ &mci_attr_sdram_scrub_rate,
+ NULL
+};
+
+
+/*
+ * Release of a MC controlling instance
+ *
+ * each MC control instance has the following resources upon entry:
+ * a) a ref count on the top memctl kobj
+ * b) a ref count on this module
+ *
+ * this function must decrement those ref counts and then
+ * issue a free on the instance's memory
+ */
+static void edac_mci_control_release(struct kobject *kobj)
+{
+ struct mem_ctl_info *mci;
+
+ mci = to_mci(kobj);
+
+ debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
+
+ /* decrement the module ref count */
+ module_put(mci->owner);
+
+ /* free the mci instance memory here */
+ kfree(mci);
+}
+
+static struct kobj_type ktype_mci = {
+ .release = edac_mci_control_release,
+ .sysfs_ops = &mci_ops,
+ .default_attrs = (struct attribute **)mci_attr,
+};
+
+/* show/store, tables, etc for the MC kset */
+
+
+struct memctrl_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t(*show) (void *, char *);
+ ssize_t(*store) (void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for memory control object */
+static ssize_t memctrl_dev_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct memctrl_dev_attribute *memctrl_dev;
+ memctrl_dev = (struct memctrl_dev_attribute *)attr;
+
+ if (memctrl_dev->show)
+ return memctrl_dev->show(memctrl_dev->value, buffer);
+
+ return -EIO;
+}
+
+static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct memctrl_dev_attribute *memctrl_dev;
+ memctrl_dev = (struct memctrl_dev_attribute *)attr;
+
+ if (memctrl_dev->store)
+ return memctrl_dev->store(memctrl_dev->value, buffer, count);
+
+ return -EIO;
+}
+
+static struct sysfs_ops memctrlfs_ops = {
+ .show = memctrl_dev_show,
+ .store = memctrl_dev_store
+};
+
+#define MEMCTRL_ATTR(_name, _mode, _show, _store) \
+static struct memctrl_dev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define MEMCTRL_STRING_ATTR(_name, _data, _mode, _show, _store) \
+static struct memctrl_dev_attribute attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* csrow<id> control files */
+MEMCTRL_ATTR(edac_mc_panic_on_ue,
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+MEMCTRL_ATTR(edac_mc_log_ue,
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+MEMCTRL_ATTR(edac_mc_log_ce,
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+MEMCTRL_ATTR(edac_mc_poll_msec,
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+
+/* Base Attributes of the memory ECC object */
+static struct memctrl_dev_attribute *memctrl_attr[] = {
+ &attr_edac_mc_panic_on_ue,
+ &attr_edac_mc_log_ue,
+ &attr_edac_mc_log_ce,
+ &attr_edac_mc_poll_msec,
+ NULL,
+};
+
+
+/* the ktype for the mc_kset internal kobj */
+static struct kobj_type ktype_mc_set_attribs = {
+ .sysfs_ops = &memctrlfs_ops,
+ .default_attrs = (struct attribute **)memctrl_attr,
+};
+
+/* EDAC memory controller sysfs kset:
+ * /sys/devices/system/edac/mc
+ */
+static struct kset mc_kset = {
+ .kobj = {.name = "mc", .ktype = &ktype_mc_set_attribs },
+ .ktype = &ktype_mci,
+};
+
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ * setups and registers the main kobject for each mci
+ */
+int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+ struct kobject *kobj_mci;
+ int err;
+
+ debugf1("%s()\n", __func__);
+
+ kobj_mci = &mci->edac_mci_kobj;
+
+ /* Init the mci's kobject */
+ memset(kobj_mci, 0, sizeof(*kobj_mci));
+
+ /* this instance become part of the mc_kset */
+ kobj_mci->kset = &mc_kset;
+
+ /* set the name of the mc<id> object */
+ err = kobject_set_name(kobj_mci, "mc%d", mci->mc_idx);
+ if (err)
+ goto fail_out;
+
+ /* Record which module 'owns' this control structure
+ * and bump the ref count of the module
+ */
+ mci->owner = THIS_MODULE;
+
+ /* bump ref count on this module */
+ if (!try_module_get(mci->owner)) {
+ err = -ENODEV;
+ goto fail_out;
+ }
+
+ /* register the mc<id> kobject to the mc_kset */
+ err = kobject_register(kobj_mci);
+ if (err) {
+ debugf1("%s()Failed to register '.../edac/mc%d'\n",
+ __func__, mci->mc_idx);
+ goto kobj_reg_fail;
+ }
+
+ /* At this point, to 'free' the control struct,
+ * edac_mc_unregister_sysfs_main_kobj() must be used
+ */
+
+ debugf1("%s() Registered '.../edac/mc%d' kobject\n",
+ __func__, mci->mc_idx);
+
+ return 0;
+
+ /* Error exit stack */
+
+kobj_reg_fail:
+ module_put(mci->owner);
+
+fail_out:
+ return err;
+}
+
+/*
+ * edac_mc_register_sysfs_main_kobj
+ *
+ * tears down and the main mci kobject from the mc_kset
+ */
+void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
+{
+ /* delete the kobj from the mc_kset */
+ kobject_unregister(&mci->edac_mci_kobj);
+}
+
+#define EDAC_DEVICE_SYMLINK "device"
+
+/*
+ * edac_create_mci_instance_attributes
+ * create MC driver specific attributes at the topmost level
+ * directory of this mci instance.
+ */
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+ int err;
+ struct mcidev_sysfs_attribute *sysfs_attrib;
+
+ /* point to the start of the array and iterate over it
+ * adding each attribute listed to this mci instance's kobject
+ */
+ sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+ while (sysfs_attrib && sysfs_attrib->attr.name) {
+ err = sysfs_create_file(&mci->edac_mci_kobj,
+ (struct attribute*) sysfs_attrib);
+ if (err) {
+ return err;
+ }
+
+ sysfs_attrib++;
+ }
+
+ return 0;
+}
+
+/*
+ * edac_remove_mci_instance_attributes
+ * remove MC driver specific attributes at the topmost level
+ * directory of this mci instance.
+ */
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+{
+ struct mcidev_sysfs_attribute *sysfs_attrib;
+
+ /* point to the start of the array and iterate over it
+ * adding each attribute listed to this mci instance's kobject
+ */
+ sysfs_attrib = mci->mc_driver_sysfs_attributes;
+
+ /* loop if there are attributes and until we hit a NULL entry */
+ while (sysfs_attrib && sysfs_attrib->attr.name) {
+ sysfs_remove_file(&mci->edac_mci_kobj,
+ (struct attribute *) sysfs_attrib);
+ sysfs_attrib++;
+ }
+}
+
+
+/*
+ * Create a new Memory Controller kobject instance,
+ * mc<id> under the 'mc' directory
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+ int err;
+ struct csrow_info *csrow;
+ struct kobject *kobj_mci = &mci->edac_mci_kobj;
+
+ debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+
+ /* create a symlink for the device */
+ err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
+ EDAC_DEVICE_SYMLINK);
+ if (err) {
+ debugf1("%s() failure to create symlink\n", __func__);
+ goto fail0;
+ }
+
+ /* If the low level driver desires some attributes,
+ * then create them now for the driver.
+ */
+ if (mci->mc_driver_sysfs_attributes) {
+ err = edac_create_mci_instance_attributes(mci);
+ if (err) {
+ debugf1("%s() failure to create mci attributes\n",
+ __func__);
+ goto fail0;
+ }
+ }
+
+ /* Make directories for each CSROW object under the mc<id> kobject
+ */
+ for (i = 0; i < mci->nr_csrows; i++) {
+ csrow = &mci->csrows[i];
+
+ /* Only expose populated CSROWs */
+ if (csrow->nr_pages > 0) {
+ err = edac_create_csrow_object(mci, csrow, i);
+ if (err) {
+ debugf1("%s() failure: create csrow %d obj\n",
+ __func__, i);
+ goto fail1;
+ }
+ }
+ }
+
+ return 0;
+
+ /* CSROW error: backout what has already been registered, */
+fail1:
+ for (i--; i >= 0; i--) {
+ if (csrow->nr_pages > 0) {
+ kobject_unregister(&mci->csrows[i].kobj);
+ }
+ }
+
+ /* remove the mci instance's attributes, if any */
+ edac_remove_mci_instance_attributes(mci);
+
+ /* remove the symlink */
+ sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
+
+fail0:
+ return err;
+}
+
+/*
+ * remove a Memory Controller instance
+ */
+void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+{
+ int i;
+
+ debugf0("%s()\n", __func__);
+
+ /* remove all csrow kobjects */
+ for (i = 0; i < mci->nr_csrows; i++) {
+ if (mci->csrows[i].nr_pages > 0) {
+ debugf0("%s() unreg csrow-%d\n", __func__, i);
+ kobject_unregister(&mci->csrows[i].kobj);
+ }
+ }
+
+ debugf0("%s() remove_link\n", __func__);
+
+ /* remove the symlink */
+ sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
+
+ debugf0("%s() remove_mci_instance\n", __func__);
+
+ /* remove this mci instance's attribtes */
+ edac_remove_mci_instance_attributes(mci);
+
+ debugf0("%s() unregister this mci kobj\n", __func__);
+
+ /* unregister this instance's kobject */
+ kobject_unregister(&mci->edac_mci_kobj);
+}
+
+
+
+
+/*
+ * edac_setup_sysfs_mc_kset(void)
+ *
+ * Initialize the mc_kset for the 'mc' entry
+ * This requires creating the top 'mc' directory with a kset
+ * and its controls/attributes.
+ *
+ * To this 'mc' kset, instance 'mci' will be grouped as children.
+ *
+ * Return: 0 SUCCESS
+ * !0 FAILURE error code
+ */
+int edac_sysfs_setup_mc_kset(void)
+{
+ int err = 0;
+ struct sysdev_class *edac_class;
+
+ debugf1("%s()\n", __func__);
+
+ /* get the /sys/devices/system/edac class reference */
+ edac_class = edac_get_edac_class();
+ if (edac_class == NULL) {
+ debugf1("%s() no edac_class error=%d\n", __func__, err);
+ goto fail_out;
+ }
+
+ /* Init the MC's kobject */
+ mc_kset.kobj.parent = &edac_class->kset.kobj;
+
+ /* register the mc_kset */
+ err = kset_register(&mc_kset);
+ if (err) {
+ debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
+ goto fail_out;
+ }
+
+ debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
+
+ return 0;
+
+
+ /* error unwind stack */
+fail_out:
+ return err;
+}
+
+/*
+ * edac_sysfs_teardown_mc_kset
+ *
+ * deconstruct the mc_ket for memory controllers
+ */
+void edac_sysfs_teardown_mc_kset(void)
+{
+ kset_unregister(&mc_kset);
+}
+
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
new file mode 100644
index 000000000000..e0c4a4086055
--- /dev/null
+++ b/drivers/edac/edac_module.c
@@ -0,0 +1,222 @@
+/*
+ * edac_module.c
+ *
+ * (C) 2007 www.softwarebitmaker.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Author: Doug Thompson <dougthompson@xmission.com>
+ *
+ */
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define EDAC_VERSION "Ver: 2.1.0 " __DATE__
+
+#ifdef CONFIG_EDAC_DEBUG
+/* Values of 0 to 4 will generate output */
+int edac_debug_level = 2;
+EXPORT_SYMBOL_GPL(edac_debug_level);
+#endif
+
+/* scope is to module level only */
+struct workqueue_struct *edac_workqueue;
+
+/*
+ * sysfs object: /sys/devices/system/edac
+ * need to export to other files in this modules
+ */
+static struct sysdev_class edac_class = {
+ set_kset_name("edac"),
+};
+static int edac_class_valid;
+
+/*
+ * edac_op_state_to_string()
+ */
+char *edac_op_state_to_string(int opstate)
+{
+ if (opstate == OP_RUNNING_POLL)
+ return "POLLED";
+ else if (opstate == OP_RUNNING_INTERRUPT)
+ return "INTERRUPT";
+ else if (opstate == OP_RUNNING_POLL_INTR)
+ return "POLL-INTR";
+ else if (opstate == OP_ALLOC)
+ return "ALLOC";
+ else if (opstate == OP_OFFLINE)
+ return "OFFLINE";
+
+ return "UNKNOWN";
+}
+
+/*
+ * edac_get_edac_class()
+ *
+ * return pointer to the edac class of 'edac'
+ */
+struct sysdev_class *edac_get_edac_class(void)
+{
+ struct sysdev_class *classptr = NULL;
+
+ if (edac_class_valid)
+ classptr = &edac_class;
+
+ return classptr;
+}
+
+/*
+ * edac_register_sysfs_edac_name()
+ *
+ * register the 'edac' into /sys/devices/system
+ *
+ * return:
+ * 0 success
+ * !0 error
+ */
+static int edac_register_sysfs_edac_name(void)
+{
+ int err;
+
+ /* create the /sys/devices/system/edac directory */
+ err = sysdev_class_register(&edac_class);
+
+ if (err) {
+ debugf1("%s() error=%d\n", __func__, err);
+ return err;
+ }
+
+ edac_class_valid = 1;
+ return 0;
+}
+
+/*
+ * sysdev_class_unregister()
+ *
+ * unregister the 'edac' from /sys/devices/system
+ */
+static void edac_unregister_sysfs_edac_name(void)
+{
+ /* only if currently registered, then unregister it */
+ if (edac_class_valid)
+ sysdev_class_unregister(&edac_class);
+
+ edac_class_valid = 0;
+}
+
+/*
+ * edac_workqueue_setup
+ * initialize the edac work queue for polling operations
+ */
+static int edac_workqueue_setup(void)
+{
+ edac_workqueue = create_singlethread_workqueue("edac-poller");
+ if (edac_workqueue == NULL)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+/*
+ * edac_workqueue_teardown
+ * teardown the edac workqueue
+ */
+static void edac_workqueue_teardown(void)
+{
+ if (edac_workqueue) {
+ flush_workqueue(edac_workqueue);
+ destroy_workqueue(edac_workqueue);
+ edac_workqueue = NULL;
+ }
+}
+
+/*
+ * edac_init
+ * module initialization entry point
+ */
+static int __init edac_init(void)
+{
+ int err = 0;
+
+ edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n");
+
+ /*
+ * Harvest and clear any boot/initialization PCI parity errors
+ *
+ * FIXME: This only clears errors logged by devices present at time of
+ * module initialization. We should also do an initial clear
+ * of each newly hotplugged device.
+ */
+ edac_pci_clear_parity_errors();
+
+ /*
+ * perform the registration of the /sys/devices/system/edac class object
+ */
+ if (edac_register_sysfs_edac_name()) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Error initializing 'edac' kobject\n");
+ err = -ENODEV;
+ goto error;
+ }
+
+ /*
+ * now set up the mc_kset under the edac class object
+ */
+ err = edac_sysfs_setup_mc_kset();
+ if (err)
+ goto sysfs_setup_fail;
+
+ /* Setup/Initialize the workq for this core */
+ err = edac_workqueue_setup();
+ if (err) {
+ edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
+ goto workq_fail;
+ }
+
+ return 0;
+
+ /* Error teardown stack */
+workq_fail:
+ edac_sysfs_teardown_mc_kset();
+
+sysfs_setup_fail:
+ edac_unregister_sysfs_edac_name();
+
+error:
+ return err;
+}
+
+/*
+ * edac_exit()
+ * module exit/termination function
+ */
+static void __exit edac_exit(void)
+{
+ debugf0("%s()\n", __func__);
+
+ /* tear down the various subsystems */
+ edac_workqueue_teardown();
+ edac_sysfs_teardown_mc_kset();
+ edac_unregister_sysfs_edac_name();
+}
+
+/*
+ * Inform the kernel of our entry and exit points
+ */
+module_init(edac_init);
+module_exit(edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al");
+MODULE_DESCRIPTION("Core library routines for EDAC reporting");
+
+/* refer to *_sysfs.c files for parameters that are exported via sysfs */
+
+#ifdef CONFIG_EDAC_DEBUG
+module_param(edac_debug_level, int, 0644);
+MODULE_PARM_DESC(edac_debug_level, "Debug level");
+#endif
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
new file mode 100644
index 000000000000..a2134dfc3cc6
--- /dev/null
+++ b/drivers/edac/edac_module.h
@@ -0,0 +1,77 @@
+
+/*
+ * edac_module.h
+ *
+ * For defining functions/data for within the EDAC_CORE module only
+ *
+ * written by doug thompson <norsk5@xmission.h>
+ */
+
+#ifndef __EDAC_MODULE_H__
+#define __EDAC_MODULE_H__
+
+#include <linux/sysdev.h>
+
+#include "edac_core.h"
+
+/*
+ * INTERNAL EDAC MODULE:
+ * EDAC memory controller sysfs create/remove functions
+ * and setup/teardown functions
+ *
+ * edac_mc objects
+ */
+extern int edac_sysfs_setup_mc_kset(void);
+extern void edac_sysfs_teardown_mc_kset(void);
+extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
+extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
+extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
+extern void edac_check_mc_devices(void);
+extern int edac_get_log_ue(void);
+extern int edac_get_log_ce(void);
+extern int edac_get_panic_on_ue(void);
+extern int edac_mc_get_log_ue(void);
+extern int edac_mc_get_log_ce(void);
+extern int edac_mc_get_panic_on_ue(void);
+extern int edac_get_poll_msec(void);
+extern int edac_mc_get_poll_msec(void);
+
+extern int edac_device_register_sysfs_main_kobj(
+ struct edac_device_ctl_info *edac_dev);
+extern void edac_device_unregister_sysfs_main_kobj(
+ struct edac_device_ctl_info *edac_dev);
+extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev);
+extern struct sysdev_class *edac_get_edac_class(void);
+
+/* edac core workqueue: single CPU mode */
+extern struct workqueue_struct *edac_workqueue;
+extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+ unsigned msec);
+extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
+extern void edac_device_reset_delay_period(struct edac_device_ctl_info
+ *edac_dev, unsigned long value);
+extern void *edac_align_ptr(void *ptr, unsigned size);
+
+/*
+ * EDAC PCI functions
+ */
+#ifdef CONFIG_PCI
+extern void edac_pci_do_parity_check(void);
+extern void edac_pci_clear_parity_errors(void);
+extern int edac_sysfs_pci_setup(void);
+extern void edac_sysfs_pci_teardown(void);
+extern int edac_pci_get_check_errors(void);
+extern int edac_pci_get_poll_msec(void);
+#else /* CONFIG_PCI */
+/* pre-process these away */
+#define edac_pci_do_parity_check()
+#define edac_pci_clear_parity_errors()
+#define edac_sysfs_pci_setup() (0)
+#define edac_sysfs_pci_teardown()
+#define edac_pci_get_check_errors()
+#define edac_pci_get_poll_msec()
+#endif /* CONFIG_PCI */
+
+#endif /* __EDAC_MODULE_H__ */
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
new file mode 100644
index 000000000000..d9cd5e048cee
--- /dev/null
+++ b/drivers/edac/edac_pci.c
@@ -0,0 +1,433 @@
+/*
+ * EDAC PCI component
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/highmem.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+static DEFINE_MUTEX(edac_pci_ctls_mutex);
+static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list);
+
+static inline void edac_lock_pci_list(void)
+{
+ mutex_lock(&edac_pci_ctls_mutex);
+}
+
+static inline void edac_unlock_pci_list(void)
+{
+ mutex_unlock(&edac_pci_ctls_mutex);
+}
+
+/*
+ * The alloc() and free() functions for the 'edac_pci' control info
+ * structure. The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
+ */
+struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+ const char *edac_pci_name)
+{
+ struct edac_pci_ctl_info *pci;
+ void *pvt;
+ unsigned int size;
+
+ pci = (struct edac_pci_ctl_info *)0;
+ pvt = edac_align_ptr(&pci[1], sz_pvt);
+ size = ((unsigned long)pvt) + sz_pvt;
+
+ if ((pci = kzalloc(size, GFP_KERNEL)) == NULL)
+ return NULL;
+
+ pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL;
+
+ pci->pvt_info = pvt;
+
+ pci->op_state = OP_ALLOC;
+
+ snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name);
+
+ return pci;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
+
+/*
+ * edac_pci_free_ctl_info()
+ * frees the memory allocated by edac_pci_alloc_ctl_info() function
+ */
+void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
+{
+ kfree(pci);
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info);
+
+/*
+ * find_edac_pci_by_dev()
+ * scans the edac_pci list for a specific 'struct device *'
+ */
+static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct list_head *item;
+
+ debugf3("%s()\n", __func__);
+
+ list_for_each(item, &edac_pci_list) {
+ pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+ if (pci->dev == dev)
+ return pci;
+ }
+
+ return NULL;
+}
+
+/*
+ * add_edac_pci_to_global_list
+ * Before calling this function, caller must assign a unique value to
+ * edac_dev->pci_idx.
+ * Return:
+ * 0 on success
+ * 1 on failure
+ */
+static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
+{
+ struct list_head *item, *insert_before;
+ struct edac_pci_ctl_info *rover;
+
+ insert_before = &edac_pci_list;
+
+ /* Determine if already on the list */
+ if (unlikely((rover = find_edac_pci_by_dev(pci->dev)) != NULL))
+ goto fail0;
+
+ /* Insert in ascending order by 'pci_idx', so find position */
+ list_for_each(item, &edac_pci_list) {
+ rover = list_entry(item, struct edac_pci_ctl_info, link);
+
+ if (rover->pci_idx >= pci->pci_idx) {
+ if (unlikely(rover->pci_idx == pci->pci_idx))
+ goto fail1;
+
+ insert_before = item;
+ break;
+ }
+ }
+
+ list_add_tail_rcu(&pci->link, insert_before);
+ return 0;
+
+fail0:
+ edac_printk(KERN_WARNING, EDAC_PCI,
+ "%s (%s) %s %s already assigned %d\n",
+ rover->dev->bus_id, dev_name(rover),
+ rover->mod_name, rover->ctl_name, rover->pci_idx);
+ return 1;
+
+fail1:
+ edac_printk(KERN_WARNING, EDAC_PCI,
+ "but in low-level driver: attempt to assign\n"
+ "\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
+ __func__);
+ return 1;
+}
+
+/*
+ * complete_edac_pci_list_del
+ */
+static void complete_edac_pci_list_del(struct rcu_head *head)
+{
+ struct edac_pci_ctl_info *pci;
+
+ pci = container_of(head, struct edac_pci_ctl_info, rcu);
+ INIT_LIST_HEAD(&pci->link);
+ complete(&pci->complete);
+}
+
+/*
+ * del_edac_pci_from_global_list
+ */
+static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
+{
+ list_del_rcu(&pci->link);
+ init_completion(&pci->complete);
+ call_rcu(&pci->rcu, complete_edac_pci_list_del);
+ wait_for_completion(&pci->complete);
+}
+
+/*
+ * edac_pci_find()
+ * Search for an edac_pci_ctl_info structure whose index is 'idx'
+ *
+ * If found, return a pointer to the structure
+ * Else return NULL.
+ *
+ * Caller must hold pci_ctls_mutex.
+ */
+struct edac_pci_ctl_info *edac_pci_find(int idx)
+{
+ struct list_head *item;
+ struct edac_pci_ctl_info *pci;
+
+ /* Iterage over list, looking for exact match of ID */
+ list_for_each(item, &edac_pci_list) {
+ pci = list_entry(item, struct edac_pci_ctl_info, link);
+
+ if (pci->pci_idx >= idx) {
+ if (pci->pci_idx == idx)
+ return pci;
+
+ /* not on list, so terminate early */
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_find);
+
+/*
+ * edac_pci_workq_function()
+ * performs the operation scheduled by a workq request
+ */
+static void edac_pci_workq_function(struct work_struct *work_req)
+{
+ struct delayed_work *d_work = (struct delayed_work *)work_req;
+ struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work);
+
+ edac_lock_pci_list();
+
+ if ((pci->op_state == OP_RUNNING_POLL) &&
+ (pci->edac_check != NULL) && (edac_pci_get_check_errors()))
+ pci->edac_check(pci);
+
+ edac_unlock_pci_list();
+
+ /* Reschedule */
+ queue_delayed_work(edac_workqueue, &pci->work,
+ msecs_to_jiffies(edac_pci_get_poll_msec()));
+}
+
+/*
+ * edac_pci_workq_setup()
+ * initialize a workq item for this edac_pci instance
+ * passing in the new delay period in msec
+ */
+static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+ unsigned int msec)
+{
+ debugf0("%s()\n", __func__);
+
+ INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+ queue_delayed_work(edac_workqueue, &pci->work,
+ msecs_to_jiffies(edac_pci_get_poll_msec()));
+}
+
+/*
+ * edac_pci_workq_teardown()
+ * stop the workq processing on this edac_pci instance
+ */
+static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+{
+ int status;
+
+ status = cancel_delayed_work(&pci->work);
+ if (status == 0)
+ flush_workqueue(edac_workqueue);
+}
+
+/*
+ * edac_pci_reset_delay_period
+ */
+void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
+ unsigned long value)
+{
+ edac_lock_pci_list();
+
+ edac_pci_workq_teardown(pci);
+
+ edac_pci_workq_setup(pci, value);
+
+ edac_unlock_pci_list();
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
+
+/*
+ * edac_pci_add_device: Insert the 'edac_dev' structure into the
+ * edac_pci global list and create sysfs entries associated with
+ * edac_pci structure.
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ * 'edac_pci' structure.
+ *
+ * Return:
+ * 0 Success
+ * !0 Failure
+ */
+int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
+{
+ debugf0("%s()\n", __func__);
+
+ pci->pci_idx = edac_idx;
+
+ edac_lock_pci_list();
+
+ if (add_edac_pci_to_global_list(pci))
+ goto fail0;
+
+ pci->start_time = jiffies;
+
+ if (edac_pci_create_sysfs(pci)) {
+ edac_pci_printk(pci, KERN_WARNING,
+ "failed to create sysfs pci\n");
+ goto fail1;
+ }
+
+ if (pci->edac_check != NULL) {
+ pci->op_state = OP_RUNNING_POLL;
+
+ edac_pci_workq_setup(pci, 1000);
+ } else {
+ pci->op_state = OP_RUNNING_INTERRUPT;
+ }
+
+ edac_pci_printk(pci, KERN_INFO,
+ "Giving out device to module '%s' controller '%s':"
+ " DEV '%s' (%s)\n",
+ pci->mod_name,
+ pci->ctl_name,
+ dev_name(pci), edac_op_state_to_string(pci->op_state));
+
+ edac_unlock_pci_list();
+ return 0;
+
+fail1:
+ del_edac_pci_from_global_list(pci);
+fail0:
+ edac_unlock_pci_list();
+ return 1;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_add_device);
+
+/*
+ * edac_pci_del_device()
+ * Remove sysfs entries for specified edac_pci structure and
+ * then remove edac_pci structure from global list
+ *
+ * @dev:
+ * Pointer to 'struct device' representing edac_pci structure
+ * to remove
+ *
+ * Return:
+ * Pointer to removed edac_pci structure,
+ * or NULL if device not found
+ */
+struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
+{
+ struct edac_pci_ctl_info *pci;
+
+ debugf0("%s()\n", __func__);
+
+ edac_lock_pci_list();
+
+ if ((pci = find_edac_pci_by_dev(dev)) == NULL) {
+ edac_unlock_pci_list();
+ return NULL;
+ }
+
+ pci->op_state = OP_OFFLINE;
+
+ edac_pci_workq_teardown(pci);
+
+ edac_pci_remove_sysfs(pci);
+
+ del_edac_pci_from_global_list(pci);
+
+ edac_unlock_pci_list();
+
+ edac_printk(KERN_INFO, EDAC_PCI,
+ "Removed device %d for %s %s: DEV %s\n",
+ pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci));
+
+ return pci;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_del_device);
+
+void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
+{
+ edac_pci_do_parity_check();
+}
+
+static int edac_pci_idx;
+#define EDAC_PCI_GENCTL_NAME "EDAC PCI controller"
+
+struct edac_pci_gen_data {
+ int edac_idx;
+};
+
+struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
+ const char *mod_name)
+{
+ struct edac_pci_ctl_info *pci;
+ struct edac_pci_gen_data *pdata;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME);
+ if (!pci)
+ return NULL;
+
+ pdata = pci->pvt_info;
+ pci->dev = dev;
+ dev_set_drvdata(pci->dev, pci);
+ pci->dev_name = pci_name(to_pci_dev(dev));
+
+ pci->mod_name = mod_name;
+ pci->ctl_name = EDAC_PCI_GENCTL_NAME;
+ pci->edac_check = edac_pci_generic_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ edac_pci_free_ctl_info(pci);
+ return NULL;
+ }
+
+ return pci;
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
+
+void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
+{
+ edac_pci_del_device(pci->dev);
+ edac_pci_free_ctl_info(pci);
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
new file mode 100644
index 000000000000..fac94cae2c3d
--- /dev/null
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -0,0 +1,620 @@
+/*
+ * (C) 2005, 2006 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written Doug Thompson <norsk5@xmission.com>
+ *
+ */
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/ctype.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#ifdef CONFIG_PCI
+
+#define EDAC_PCI_SYMLINK "device"
+
+static int check_pci_errors; /* default YES check PCI parity */
+static int edac_pci_panic_on_pe; /* default no panic on PCI Parity */
+static int edac_pci_log_pe = 1; /* log PCI parity errors */
+static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
+static atomic_t pci_parity_count = ATOMIC_INIT(0);
+static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
+static int edac_pci_poll_msec = 1000;
+
+static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */
+static struct completion edac_pci_kobj_complete;
+static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
+
+int edac_pci_get_check_errors(void)
+{
+ return check_pci_errors;
+}
+
+int edac_pci_get_log_pe(void)
+{
+ return edac_pci_log_pe;
+}
+
+int edac_pci_get_log_npe(void)
+{
+ return edac_pci_log_npe;
+}
+
+int edac_pci_get_panic_on_pe(void)
+{
+ return edac_pci_panic_on_pe;
+}
+
+int edac_pci_get_poll_msec(void)
+{
+ return edac_pci_poll_msec;
+}
+
+/**************************** EDAC PCI sysfs instance *******************/
+static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data)
+{
+ return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count));
+}
+
+static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
+ char *data)
+{
+ return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
+}
+
+#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_instance_attr(a) container_of(a, struct instance_attribute, attr)
+
+/* DEVICE instance kobject release() function */
+static void edac_pci_instance_release(struct kobject *kobj)
+{
+ struct edac_pci_ctl_info *pci;
+
+ debugf1("%s()\n", __func__);
+
+ pci = to_instance(kobj);
+ complete(&pci->kobj_complete);
+}
+
+/* instance specific attribute structure */
+struct instance_attribute {
+ struct attribute attr;
+ ssize_t(*show) (struct edac_pci_ctl_info *, char *);
+ ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t);
+};
+
+/* Function to 'show' fields from the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct edac_pci_ctl_info *pci = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->show)
+ return instance_attr->show(pci, buffer);
+ return -EIO;
+}
+
+/* Function to 'store' fields into the edac_pci 'instance' structure */
+static ssize_t edac_pci_instance_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct edac_pci_ctl_info *pci = to_instance(kobj);
+ struct instance_attribute *instance_attr = to_instance_attr(attr);
+
+ if (instance_attr->store)
+ return instance_attr->store(pci, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops pci_instance_ops = {
+ .show = edac_pci_instance_show,
+ .store = edac_pci_instance_store
+};
+
+#define INSTANCE_ATTR(_name, _mode, _show, _store) \
+static struct instance_attribute attr_instance_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL);
+INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL);
+
+/* pci instance attributes */
+static struct instance_attribute *pci_instance_attr[] = {
+ &attr_instance_pe_count,
+ &attr_instance_npe_count,
+ NULL
+};
+
+/* the ktype for pci instance */
+static struct kobj_type ktype_pci_instance = {
+ .release = edac_pci_instance_release,
+ .sysfs_ops = &pci_instance_ops,
+ .default_attrs = (struct attribute **)pci_instance_attr,
+};
+
+static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+{
+ int err;
+
+ pci->kobj.parent = &edac_pci_kobj;
+ pci->kobj.ktype = &ktype_pci_instance;
+
+ err = kobject_set_name(&pci->kobj, "pci%d", idx);
+ if (err)
+ return err;
+
+ err = kobject_register(&pci->kobj);
+ if (err != 0) {
+ debugf2("%s() failed to register instance pci%d\n",
+ __func__, idx);
+ return err;
+ }
+
+ debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
+
+ return 0;
+}
+
+static void
+edac_pci_delete_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
+{
+ init_completion(&pci->kobj_complete);
+ kobject_unregister(&pci->kobj);
+ wait_for_completion(&pci->kobj_complete);
+}
+
+/***************************** EDAC PCI sysfs root **********************/
+#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj)
+#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr)
+
+static ssize_t edac_pci_int_show(void *ptr, char *buffer)
+{
+ int *value = ptr;
+ return sprintf(buffer, "%d\n", *value);
+}
+
+static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
+{
+ int *value = ptr;
+
+ if (isdigit(*buffer))
+ *value = simple_strtoul(buffer, NULL, 0);
+
+ return count;
+}
+
+struct edac_pci_dev_attribute {
+ struct attribute attr;
+ void *value;
+ ssize_t(*show) (void *, char *);
+ ssize_t(*store) (void *, const char *, size_t);
+};
+
+/* Set of show/store abstract level functions for PCI Parity object */
+static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->show(edac_pci_dev->value, buffer);
+ return -EIO;
+}
+
+static ssize_t edac_pci_dev_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer,
+ size_t count)
+{
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+ if (edac_pci_dev->show)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+}
+
+static struct sysfs_ops edac_pci_sysfs_ops = {
+ .show = edac_pci_dev_show,
+ .store = edac_pci_dev_store
+};
+
+#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = &_name, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
+static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .value = _data, \
+ .show = _show, \
+ .store = _store, \
+};
+
+/* PCI Parity control files */
+EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
+ edac_pci_int_store);
+EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
+EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
+
+/* Base Attributes of the memory ECC object */
+static struct edac_pci_dev_attribute *edac_pci_attr[] = {
+ &edac_pci_attr_check_pci_errors,
+ &edac_pci_attr_edac_pci_log_pe,
+ &edac_pci_attr_edac_pci_log_npe,
+ &edac_pci_attr_edac_pci_panic_on_pe,
+ &edac_pci_attr_pci_parity_count,
+ &edac_pci_attr_pci_nonparity_count,
+ NULL,
+};
+
+/* No memory to release */
+static void edac_pci_release(struct kobject *kobj)
+{
+ struct edac_pci_ctl_info *pci;
+
+ pci = to_edacpci(kobj);
+
+ debugf1("%s()\n", __func__);
+ complete(&pci->kobj_complete);
+}
+
+static struct kobj_type ktype_edac_pci = {
+ .release = edac_pci_release,
+ .sysfs_ops = &edac_pci_sysfs_ops,
+ .default_attrs = (struct attribute **)edac_pci_attr,
+};
+
+/**
+ * edac_sysfs_pci_setup()
+ *
+ * setup the sysfs for EDAC PCI attributes
+ * assumes edac_class has already been initialized
+ */
+int edac_pci_register_main_kobj(void)
+{
+ int err;
+ struct sysdev_class *edac_class;
+
+ debugf1("%s()\n", __func__);
+
+ edac_class = edac_get_edac_class();
+ if (edac_class == NULL) {
+ debugf1("%s() no edac_class\n", __func__);
+ return -ENODEV;
+ }
+
+ edac_pci_kobj.ktype = &ktype_edac_pci;
+
+ edac_pci_kobj.parent = &edac_class->kset.kobj;
+
+ err = kobject_set_name(&edac_pci_kobj, "pci");
+ if (err)
+ return err;
+
+ /* Instanstiate the pci object */
+ /* FIXME: maybe new sysdev_create_subdir() */
+ err = kobject_register(&edac_pci_kobj);
+
+ if (err) {
+ debugf1("Failed to register '.../edac/pci'\n");
+ return err;
+ }
+
+ debugf1("Registered '.../edac/pci' kobject\n");
+
+ return 0;
+}
+
+/*
+ * edac_pci_unregister_main_kobj()
+ *
+ * perform the sysfs teardown for the PCI attributes
+ */
+void edac_pci_unregister_main_kobj(void)
+{
+ debugf0("%s()\n", __func__);
+ init_completion(&edac_pci_kobj_complete);
+ kobject_unregister(&edac_pci_kobj);
+ wait_for_completion(&edac_pci_kobj_complete);
+}
+
+int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
+{
+ int err;
+ struct kobject *edac_kobj = &pci->kobj;
+
+ if (atomic_inc_return(&edac_pci_sysfs_refcount) == 1) {
+ err = edac_pci_register_main_kobj();
+ if (err) {
+ atomic_dec(&edac_pci_sysfs_refcount);
+ return err;
+ }
+ }
+
+ err = edac_pci_create_instance_kobj(pci, pci->pci_idx);
+ if (err) {
+ if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0)
+ edac_pci_unregister_main_kobj();
+ }
+
+ debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
+
+ err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
+ if (err) {
+ debugf0("%s() sysfs_create_link() returned err= %d\n",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
+
+void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
+{
+ debugf0("%s()\n", __func__);
+
+ edac_pci_delete_instance_kobj(pci, pci->pci_idx);
+
+ sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
+
+ if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0)
+ edac_pci_unregister_main_kobj();
+}
+
+/************************ PCI error handling *************************/
+static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
+{
+ int where;
+ u16 status;
+
+ where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
+ pci_read_config_word(dev, where, &status);
+
+ /* If we get back 0xFFFF then we must suspect that the card has been
+ * pulled but the Linux PCI layer has not yet finished cleaning up.
+ * We don't want to report on such devices
+ */
+
+ if (status == 0xFFFF) {
+ u32 sanity;
+
+ pci_read_config_dword(dev, 0, &sanity);
+
+ if (sanity == 0xFFFFFFFF)
+ return 0;
+ }
+
+ status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_PARITY;
+
+ if (status)
+ /* reset only the bits we are interested in */
+ pci_write_config_word(dev, where, status);
+
+ return status;
+}
+
+typedef void (*pci_parity_check_fn_t) (struct pci_dev * dev);
+
+/* Clear any PCI parity errors logged by this device. */
+static void edac_pci_dev_parity_clear(struct pci_dev *dev)
+{
+ u8 header_type;
+
+ get_pci_parity_status(dev, 0);
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
+ get_pci_parity_status(dev, 1);
+}
+
+/*
+ * PCI Parity polling
+ *
+ */
+static void edac_pci_dev_parity_test(struct pci_dev *dev)
+{
+ u16 status;
+ u8 header_type;
+
+ /* read the STATUS register on this device
+ */
+ status = get_pci_parity_status(dev, 0);
+
+ debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+
+ /* check the status reg for errors */
+ if (status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+ atomic_inc(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Master Data Parity Error on %s\n",
+ pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI,
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+
+ /* read the device TYPE, looking for bridges */
+ pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+
+ debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* On bridges, need to examine secondary status register */
+ status = get_pci_parity_status(dev, 1);
+
+ debugf2("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
+
+ /* check the secondary status reg for errors */
+ if (status) {
+ if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Signaled System Error on %s\n",
+ pci_name(dev));
+ atomic_inc(&pci_nonparity_count);
+ }
+
+ if (status & (PCI_STATUS_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+
+ if (status & (PCI_STATUS_DETECTED_PARITY)) {
+ edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
+
+ atomic_inc(&pci_parity_count);
+ }
+ }
+ }
+}
+
+/*
+ * pci_dev parity list iterator
+ * Scan the PCI device list for one iteration, looking for SERRORs
+ * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
+ */
+static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
+{
+ struct pci_dev *dev = NULL;
+
+ /* request for kernel access to the next PCI device, if any,
+ * and while we are looking at it have its reference count
+ * bumped until we are done with it
+ */
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ fn(dev);
+ }
+}
+
+/*
+ * edac_pci_do_parity_check
+ *
+ * performs the actual PCI parity check operation
+ */
+void edac_pci_do_parity_check(void)
+{
+ unsigned long flags;
+ int before_count;
+
+ debugf3("%s()\n", __func__);
+
+ if (!check_pci_errors)
+ return;
+
+ before_count = atomic_read(&pci_parity_count);
+
+ /* scan all PCI devices looking for a Parity Error on devices and
+ * bridges
+ */
+ local_irq_save(flags);
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
+ local_irq_restore(flags);
+
+ /* Only if operator has selected panic on PCI Error */
+ if (edac_pci_get_panic_on_pe()) {
+ /* If the count is different 'after' from 'before' */
+ if (before_count != atomic_read(&pci_parity_count))
+ panic("EDAC: PCI Parity Error");
+ }
+}
+
+void edac_pci_clear_parity_errors(void)
+{
+ /* Clear any PCI bus parity errors that devices initially have logged
+ * in their registers.
+ */
+ edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
+}
+void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+ /* global PE counter incremented by edac_pci_do_parity_check() */
+ atomic_inc(&pci->counters.pe_count);
+
+ if (edac_pci_get_log_pe())
+ edac_pci_printk(pci, KERN_WARNING,
+ "Parity Error ctl: %s %d: %s\n",
+ pci->ctl_name, pci->pci_idx, msg);
+
+ /*
+ * poke all PCI devices and see which one is the troublemaker
+ * panic() is called if set
+ */
+ edac_pci_do_parity_check();
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_handle_pe);
+
+void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg)
+{
+
+ /* global NPE counter incremented by edac_pci_do_parity_check() */
+ atomic_inc(&pci->counters.npe_count);
+
+ if (edac_pci_get_log_npe())
+ edac_pci_printk(pci, KERN_WARNING,
+ "Non-Parity Error ctl: %s %d: %s\n",
+ pci->ctl_name, pci->pci_idx, msg);
+
+ /*
+ * poke all PCI devices and see which one is the troublemaker
+ * panic() is called if set
+ */
+ edac_pci_do_parity_check();
+}
+
+EXPORT_SYMBOL_GPL(edac_pci_handle_npe);
+
+/*
+ * Define the PCI parameter to the module
+ */
+module_param(check_pci_errors, int, 0644);
+MODULE_PARM_DESC(check_pci_errors,
+ "Check for PCI bus parity errors: 0=off 1=on");
+module_param(edac_pci_panic_on_pe, int, 0644);
+MODULE_PARM_DESC(edac_pci_panic_on_pe,
+ "Panic on PCI Bus Parity error: 0=off 1=on");
+
+#endif /* CONFIG_PCI */
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
new file mode 100644
index 000000000000..20b428aa155e
--- /dev/null
+++ b/drivers/edac/edac_stub.c
@@ -0,0 +1,46 @@
+/*
+ * common EDAC components that must be in kernel
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/edac.h>
+#include <asm/atomic.h>
+#include <asm/edac.h>
+
+int edac_op_state = EDAC_OPSTATE_INVAL;
+EXPORT_SYMBOL_GPL(edac_op_state);
+
+atomic_t edac_handlers = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(edac_handlers);
+
+int edac_err_assert = 0;
+EXPORT_SYMBOL_GPL(edac_err_assert);
+
+/*
+ * called to determine if there is an EDAC driver interested in
+ * knowing an event (such as NMI) occurred
+ */
+int edac_handler_set(void)
+{
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ return 0;
+
+ return atomic_read(&edac_handlers);
+}
+EXPORT_SYMBOL_GPL(edac_handler_set);
+
+/*
+ * handler for NMI type of interrupts to assert error
+ */
+void edac_atomic_assert_error(void)
+{
+ edac_err_assert++;
+}
+EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
new file mode 100644
index 000000000000..0ecfdc432f87
--- /dev/null
+++ b/drivers/edac/i3000_edac.c
@@ -0,0 +1,506 @@
+/*
+ * Intel 3000/3010 Memory Controller kernel module
+ * Copyright (C) 2007 Akamai Technologies, Inc.
+ * Shamelessly copied from:
+ * Intel D82875P Memory Controller kernel module
+ * (C) 2003 Linux Networx (http://lnxi.com)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_core.h"
+
+#define I3000_REVISION "1.1"
+
+#define EDAC_MOD_STR "i3000_edac"
+
+#define I3000_RANKS 8
+#define I3000_RANKS_PER_CHANNEL 4
+#define I3000_CHANNELS 2
+
+/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
+
+#define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
+#define I3000_MCHBAR_MASK 0xffffc000
+#define I3000_MMR_WINDOW_SIZE 16384
+
+#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
+ *
+ * 7:1 reserved
+ * 0 bit 32 of address
+ */
+#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
+ *
+ * 31:7 address
+ * 6:1 reserved
+ * 0 Error channel 0/1
+ */
+#define I3000_DEAP_GRAIN (1 << 7)
+#define I3000_DEAP_PFN(edeap, deap) ((((edeap) & 1) << (32 - PAGE_SHIFT)) | \
+ ((deap) >> PAGE_SHIFT))
+#define I3000_DEAP_OFFSET(deap) ((deap) & ~(I3000_DEAP_GRAIN-1) & ~PAGE_MASK)
+#define I3000_DEAP_CHANNEL(deap) ((deap) & 1)
+
+#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:12 reserved
+ * 11 MCH Thermal Sensor Event for SMI/SCI/SERR
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 Received Refresh Timeout Flag (RRTOF)
+ * 7:2 reserved
+ * 1 Multiple-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
+#define I3000_ERRSTS_UE 0x0002
+#define I3000_ERRSTS_CE 0x0001
+
+#define I3000_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:12 reserved
+ * 11 SERR on MCH Thermal Sensor Event (TSESERR)
+ * 10 reserved
+ * 9 SERR on LOCK to non-DRAM Memory (LCKERR)
+ * 8 SERR on DRAM Refresh Timeout (DRTOERR)
+ * 7:2 reserved
+ * 1 SERR Multiple-Bit DRAM ECC Error (DMERR)
+ * 0 SERR on Single-Bit ECC Error (DSERR)
+ */
+
+/* Intel MMIO register space - device 0 function 0 - MMR space */
+
+#define I3000_DRB_SHIFT 25 /* 32MiB grain */
+
+#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 1 DRAM Rank Boundary Address
+ */
+
+#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
+ *
+ * 7 reserved
+ * 6:4 DRAM odd Rank Attribute
+ * 3 reserved
+ * 2:0 DRAM even Rank Attribute
+ *
+ * Each attribute defines the page
+ * size of the corresponding rank:
+ * 000: unpopulated
+ * 001: reserved
+ * 010: 4 KB
+ * 011: 8 KB
+ * 100: 16 KB
+ * Others: reserved
+ */
+#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
+#define ODD_RANK_ATTRIB(dra) (((dra) & 0x70) >> 4)
+#define EVEN_RANK_ATTRIB(dra) ((dra) & 0x07)
+
+#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
+ *
+ * 31:30 reserved
+ * 29 Initialization Complete (IC)
+ * 28:11 reserved
+ * 10:8 Refresh Mode Select (RMS)
+ * 7 reserved
+ * 6:4 Mode Select (SMS)
+ * 3:2 reserved
+ * 1:0 DRAM Type (DT)
+ */
+
+#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
+ *
+ * 31 Enhanced Addressing Enable (ENHADE)
+ * 30:0 reserved
+ */
+
+enum i3000p_chips {
+ I3000 = 0,
+};
+
+struct i3000_dev_info {
+ const char *ctl_name;
+};
+
+struct i3000_error_info {
+ u16 errsts;
+ u8 derrsyn;
+ u8 edeap;
+ u32 deap;
+ u16 errsts2;
+};
+
+static const struct i3000_dev_info i3000_devs[] = {
+ [I3000] = {
+ .ctl_name = "i3000"},
+};
+
+static struct pci_dev *mci_pdev;
+static int i3000_registered = 1;
+static struct edac_pci_ctl_info *i3000_pci;
+
+static void i3000_get_error_info(struct mem_ctl_info *mci,
+ struct i3000_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
+ if (!(info->errsts & I3000_ERRSTS_BITS))
+ return;
+ pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+ pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+ pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+ pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
+
+ /*
+ * If the error is the same for both reads then the first set
+ * of reads is valid. If there is a change then there is a CE
+ * with no info and the second set of reads is valid and
+ * should be UE info.
+ */
+ if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+ pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
+ pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
+ pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
+ }
+
+ /* Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+ I3000_ERRSTS_BITS);
+}
+
+static int i3000_process_error_info(struct mem_ctl_info *mci,
+ struct i3000_error_info *info,
+ int handle_errors)
+{
+ int row, multi_chan;
+ int pfn, offset, channel;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts & I3000_ERRSTS_BITS))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ pfn = I3000_DEAP_PFN(info->edeap, info->deap);
+ offset = I3000_DEAP_OFFSET(info->deap);
+ channel = I3000_DEAP_CHANNEL(info->deap);
+
+ row = edac_mc_find_csrow_by_page(mci, pfn);
+
+ if (info->errsts & I3000_ERRSTS_UE)
+ edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+ else
+ edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
+ multi_chan ? channel : 0, "i3000 CE");
+
+ return 1;
+}
+
+static void i3000_check(struct mem_ctl_info *mci)
+{
+ struct i3000_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i3000_get_error_info(mci, &info);
+ i3000_process_error_info(mci, &info, 1);
+}
+
+static int i3000_is_interleaved(const unsigned char *c0dra,
+ const unsigned char *c1dra,
+ const unsigned char *c0drb,
+ const unsigned char *c1drb)
+{
+ int i;
+
+ /* If the channels aren't populated identically then
+ * we're not interleaved.
+ */
+ for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
+ if (ODD_RANK_ATTRIB(c0dra[i]) != ODD_RANK_ATTRIB(c1dra[i]) ||
+ EVEN_RANK_ATTRIB(c0dra[i]) !=
+ EVEN_RANK_ATTRIB(c1dra[i]))
+ return 0;
+
+ /* If the rank boundaries for the two channels are different
+ * then we're not interleaved.
+ */
+ for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
+ if (c0drb[i] != c1drb[i])
+ return 0;
+
+ return 1;
+}
+
+static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc;
+ int i;
+ struct mem_ctl_info *mci = NULL;
+ unsigned long last_cumul_size;
+ int interleaved, nr_channels;
+ unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
+ unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
+ unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
+ unsigned long mchbar;
+ void *window;
+
+ debugf0("MC: %s()\n", __func__);
+
+ pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
+ mchbar &= I3000_MCHBAR_MASK;
+ window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
+ if (!window) {
+ printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
+ mchbar);
+ return -ENODEV;
+ }
+
+ c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
+ c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
+ c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
+ c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
+
+ for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
+ c0drb[i] = readb(window + I3000_C0DRB + i);
+ c1drb[i] = readb(window + I3000_C1DRB + i);
+ }
+
+ iounmap(window);
+
+ /* Figure out how many channels we have.
+ *
+ * If we have what the datasheet calls "asymmetric channels"
+ * (essentially the same as what was called "virtual single
+ * channel mode" in the i82875) then it's a single channel as
+ * far as EDAC is concerned.
+ */
+ interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
+ nr_channels = interleaved ? 2 : 1;
+ mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+ if (!mci)
+ return -ENOMEM;
+
+ debugf3("MC: %s(): init mci\n", __func__);
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR2;
+
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I3000_REVISION;
+ mci->ctl_name = i3000_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i3000_check;
+ mci->ctl_page_to_phys = NULL;
+
+ /*
+ * The dram rank boundary (DRB) reg values are boundary addresses
+ * for each DRAM rank with a granularity of 32MB. DRB regs are
+ * cumulative; the last one will contain the total memory
+ * contained in all ranks.
+ *
+ * If we're in interleaved mode then we're only walking through
+ * the ranks of controller 0, so we double all the values we see.
+ */
+ for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
+ u8 value;
+ u32 cumul_size;
+ struct csrow_info *csrow = &mci->csrows[i];
+
+ value = drb[i];
+ cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
+ if (interleaved)
+ cumul_size <<= 1;
+ debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
+ __func__, i, cumul_size);
+ if (cumul_size == last_cumul_size) {
+ csrow->mtype = MEM_EMPTY;
+ continue;
+ }
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = I3000_DEAP_GRAIN;
+ csrow->mtype = MEM_DDR2;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = EDAC_UNKNOWN;
+ }
+
+ /* Clear any error bits.
+ * (Yes, we really clear bits by writing 1 to them.)
+ */
+ pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
+ I3000_ERRSTS_BITS);
+
+ rc = -ENODEV;
+ if (edac_mc_add_mc(mci)) {
+ debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i3000_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* get this far and it's successful */
+ debugf3("MC: %s(): success\n", __func__);
+ return 0;
+
+ fail:
+ if (mci)
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i3000_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("MC: %s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i3000_probe1(pdev, ent->driver_data);
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i3000_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0("%s()\n", __func__);
+
+ if (i3000_pci)
+ edac_pci_release_generic_ctl(i3000_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I3000},
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
+
+static struct pci_driver i3000_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i3000_init_one,
+ .remove = __devexit_p(i3000_remove_one),
+ .id_table = i3000_pci_tbl,
+};
+
+static int __init i3000_init(void)
+{
+ int pci_rc;
+
+ debugf3("MC: %s()\n", __func__);
+ pci_rc = pci_register_driver(&i3000_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (mci_pdev == NULL) {
+ i3000_registered = 0;
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_3000_HB, NULL);
+ if (!mci_pdev) {
+ debugf0("i3000 pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
+ if (pci_rc < 0) {
+ debugf0("i3000 init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i3000_driver);
+
+fail0:
+ if (mci_pdev)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i3000_exit(void)
+{
+ debugf3("MC: %s()\n", __func__);
+
+ pci_unregister_driver(&i3000_driver);
+ if (!i3000_registered) {
+ i3000_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i3000_init);
+module_exit(i3000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
+MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
new file mode 100644
index 000000000000..96f7e63e3996
--- /dev/null
+++ b/drivers/edac/i5000_edac.c
@@ -0,0 +1,1505 @@
+/*
+ * Intel 5000(P/V/X) class Memory Controllers kernel module
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Douglas Thompson Linux Networx (http://lnxi.com)
+ * norsk5@xmission.com
+ *
+ * This module is based on the following document:
+ *
+ * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet
+ * http://developer.intel.com/design/chipsets/datashts/313070.htm
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/edac.h>
+#include <asm/mmzone.h>
+
+#include "edac_core.h"
+
+/*
+ * Alter this version for the I5000 module when modifications are made
+ */
+#define I5000_REVISION " Ver: 2.0.12 " __DATE__
+#define EDAC_MOD_STR "i5000_edac"
+
+#define i5000_printk(level, fmt, arg...) \
+ edac_printk(level, "i5000", fmt, ##arg)
+
+#define i5000_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_FBD_0
+#define PCI_DEVICE_ID_INTEL_FBD_0 0x25F5
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_FBD_1
+#define PCI_DEVICE_ID_INTEL_FBD_1 0x25F6
+#endif
+
+/* Device 16,
+ * Function 0: System Address
+ * Function 1: Memory Branch Map, Control, Errors Register
+ * Function 2: FSB Error Registers
+ *
+ * All 3 functions of Device 16 (0,1,2) share the SAME DID
+ */
+#define PCI_DEVICE_ID_INTEL_I5000_DEV16 0x25F0
+
+/* OFFSETS for Function 0 */
+
+/* OFFSETS for Function 1 */
+#define AMBASE 0x48
+#define MAXCH 0x56
+#define MAXDIMMPERCH 0x57
+#define TOLM 0x6C
+#define REDMEMB 0x7C
+#define RED_ECC_LOCATOR(x) ((x) & 0x3FFFF)
+#define REC_ECC_LOCATOR_EVEN(x) ((x) & 0x001FF)
+#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3FE00)
+#define MIR0 0x80
+#define MIR1 0x84
+#define MIR2 0x88
+#define AMIR0 0x8C
+#define AMIR1 0x90
+#define AMIR2 0x94
+
+#define FERR_FAT_FBD 0x98
+#define NERR_FAT_FBD 0x9C
+#define EXTRACT_FBDCHAN_INDX(x) (((x)>>28) & 0x3)
+#define FERR_FAT_FBDCHAN 0x30000000
+#define FERR_FAT_M3ERR 0x00000004
+#define FERR_FAT_M2ERR 0x00000002
+#define FERR_FAT_M1ERR 0x00000001
+#define FERR_FAT_MASK (FERR_FAT_M1ERR | \
+ FERR_FAT_M2ERR | \
+ FERR_FAT_M3ERR)
+
+#define FERR_NF_FBD 0xA0
+
+/* Thermal and SPD or BFD errors */
+#define FERR_NF_M28ERR 0x01000000
+#define FERR_NF_M27ERR 0x00800000
+#define FERR_NF_M26ERR 0x00400000
+#define FERR_NF_M25ERR 0x00200000
+#define FERR_NF_M24ERR 0x00100000
+#define FERR_NF_M23ERR 0x00080000
+#define FERR_NF_M22ERR 0x00040000
+#define FERR_NF_M21ERR 0x00020000
+
+/* Correctable errors */
+#define FERR_NF_M20ERR 0x00010000
+#define FERR_NF_M19ERR 0x00008000
+#define FERR_NF_M18ERR 0x00004000
+#define FERR_NF_M17ERR 0x00002000
+
+/* Non-Retry or redundant Retry errors */
+#define FERR_NF_M16ERR 0x00001000
+#define FERR_NF_M15ERR 0x00000800
+#define FERR_NF_M14ERR 0x00000400
+#define FERR_NF_M13ERR 0x00000200
+
+/* Uncorrectable errors */
+#define FERR_NF_M12ERR 0x00000100
+#define FERR_NF_M11ERR 0x00000080
+#define FERR_NF_M10ERR 0x00000040
+#define FERR_NF_M9ERR 0x00000020
+#define FERR_NF_M8ERR 0x00000010
+#define FERR_NF_M7ERR 0x00000008
+#define FERR_NF_M6ERR 0x00000004
+#define FERR_NF_M5ERR 0x00000002
+#define FERR_NF_M4ERR 0x00000001
+
+#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \
+ FERR_NF_M11ERR | \
+ FERR_NF_M10ERR | \
+ FERR_NF_M8ERR | \
+ FERR_NF_M7ERR | \
+ FERR_NF_M6ERR | \
+ FERR_NF_M5ERR | \
+ FERR_NF_M4ERR)
+#define FERR_NF_CORRECTABLE (FERR_NF_M20ERR | \
+ FERR_NF_M19ERR | \
+ FERR_NF_M18ERR | \
+ FERR_NF_M17ERR)
+#define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \
+ FERR_NF_M28ERR)
+#define FERR_NF_THERMAL (FERR_NF_M26ERR | \
+ FERR_NF_M25ERR | \
+ FERR_NF_M24ERR | \
+ FERR_NF_M23ERR)
+#define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR)
+#define FERR_NF_NORTH_CRC (FERR_NF_M21ERR)
+#define FERR_NF_NON_RETRY (FERR_NF_M13ERR | \
+ FERR_NF_M14ERR | \
+ FERR_NF_M15ERR)
+
+#define NERR_NF_FBD 0xA4
+#define FERR_NF_MASK (FERR_NF_UNCORRECTABLE | \
+ FERR_NF_CORRECTABLE | \
+ FERR_NF_DIMM_SPARE | \
+ FERR_NF_THERMAL | \
+ FERR_NF_SPD_PROTOCOL | \
+ FERR_NF_NORTH_CRC | \
+ FERR_NF_NON_RETRY)
+
+#define EMASK_FBD 0xA8
+#define EMASK_FBD_M28ERR 0x08000000
+#define EMASK_FBD_M27ERR 0x04000000
+#define EMASK_FBD_M26ERR 0x02000000
+#define EMASK_FBD_M25ERR 0x01000000
+#define EMASK_FBD_M24ERR 0x00800000
+#define EMASK_FBD_M23ERR 0x00400000
+#define EMASK_FBD_M22ERR 0x00200000
+#define EMASK_FBD_M21ERR 0x00100000
+#define EMASK_FBD_M20ERR 0x00080000
+#define EMASK_FBD_M19ERR 0x00040000
+#define EMASK_FBD_M18ERR 0x00020000
+#define EMASK_FBD_M17ERR 0x00010000
+
+#define EMASK_FBD_M15ERR 0x00004000
+#define EMASK_FBD_M14ERR 0x00002000
+#define EMASK_FBD_M13ERR 0x00001000
+#define EMASK_FBD_M12ERR 0x00000800
+#define EMASK_FBD_M11ERR 0x00000400
+#define EMASK_FBD_M10ERR 0x00000200
+#define EMASK_FBD_M9ERR 0x00000100
+#define EMASK_FBD_M8ERR 0x00000080
+#define EMASK_FBD_M7ERR 0x00000040
+#define EMASK_FBD_M6ERR 0x00000020
+#define EMASK_FBD_M5ERR 0x00000010
+#define EMASK_FBD_M4ERR 0x00000008
+#define EMASK_FBD_M3ERR 0x00000004
+#define EMASK_FBD_M2ERR 0x00000002
+#define EMASK_FBD_M1ERR 0x00000001
+
+#define ENABLE_EMASK_FBD_FATAL_ERRORS (EMASK_FBD_M1ERR | \
+ EMASK_FBD_M2ERR | \
+ EMASK_FBD_M3ERR)
+
+#define ENABLE_EMASK_FBD_UNCORRECTABLE (EMASK_FBD_M4ERR | \
+ EMASK_FBD_M5ERR | \
+ EMASK_FBD_M6ERR | \
+ EMASK_FBD_M7ERR | \
+ EMASK_FBD_M8ERR | \
+ EMASK_FBD_M9ERR | \
+ EMASK_FBD_M10ERR | \
+ EMASK_FBD_M11ERR | \
+ EMASK_FBD_M12ERR)
+#define ENABLE_EMASK_FBD_CORRECTABLE (EMASK_FBD_M17ERR | \
+ EMASK_FBD_M18ERR | \
+ EMASK_FBD_M19ERR | \
+ EMASK_FBD_M20ERR)
+#define ENABLE_EMASK_FBD_DIMM_SPARE (EMASK_FBD_M27ERR | \
+ EMASK_FBD_M28ERR)
+#define ENABLE_EMASK_FBD_THERMALS (EMASK_FBD_M26ERR | \
+ EMASK_FBD_M25ERR | \
+ EMASK_FBD_M24ERR | \
+ EMASK_FBD_M23ERR)
+#define ENABLE_EMASK_FBD_SPD_PROTOCOL (EMASK_FBD_M22ERR)
+#define ENABLE_EMASK_FBD_NORTH_CRC (EMASK_FBD_M21ERR)
+#define ENABLE_EMASK_FBD_NON_RETRY (EMASK_FBD_M15ERR | \
+ EMASK_FBD_M14ERR | \
+ EMASK_FBD_M13ERR)
+
+#define ENABLE_EMASK_ALL (ENABLE_EMASK_FBD_NON_RETRY | \
+ ENABLE_EMASK_FBD_NORTH_CRC | \
+ ENABLE_EMASK_FBD_SPD_PROTOCOL | \
+ ENABLE_EMASK_FBD_THERMALS | \
+ ENABLE_EMASK_FBD_DIMM_SPARE | \
+ ENABLE_EMASK_FBD_FATAL_ERRORS | \
+ ENABLE_EMASK_FBD_CORRECTABLE | \
+ ENABLE_EMASK_FBD_UNCORRECTABLE)
+
+#define ERR0_FBD 0xAC
+#define ERR1_FBD 0xB0
+#define ERR2_FBD 0xB4
+#define MCERR_FBD 0xB8
+#define NRECMEMA 0xBE
+#define NREC_BANK(x) (((x)>>12) & 0x7)
+#define NREC_RDWR(x) (((x)>>11) & 1)
+#define NREC_RANK(x) (((x)>>8) & 0x7)
+#define NRECMEMB 0xC0
+#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF)
+#define NREC_RAS(x) ((x) & 0x7FFF)
+#define NRECFGLOG 0xC4
+#define NREEECFBDA 0xC8
+#define NREEECFBDB 0xCC
+#define NREEECFBDC 0xD0
+#define NREEECFBDD 0xD4
+#define NREEECFBDE 0xD8
+#define REDMEMA 0xDC
+#define RECMEMA 0xE2
+#define REC_BANK(x) (((x)>>12) & 0x7)
+#define REC_RDWR(x) (((x)>>11) & 1)
+#define REC_RANK(x) (((x)>>8) & 0x7)
+#define RECMEMB 0xE4
+#define REC_CAS(x) (((x)>>16) & 0xFFFFFF)
+#define REC_RAS(x) ((x) & 0x7FFF)
+#define RECFGLOG 0xE8
+#define RECFBDA 0xEC
+#define RECFBDB 0xF0
+#define RECFBDC 0xF4
+#define RECFBDD 0xF8
+#define RECFBDE 0xFC
+
+/* OFFSETS for Function 2 */
+
+/*
+ * Device 21,
+ * Function 0: Memory Map Branch 0
+ *
+ * Device 22,
+ * Function 0: Memory Map Branch 1
+ */
+#define PCI_DEVICE_ID_I5000_BRANCH_0 0x25F5
+#define PCI_DEVICE_ID_I5000_BRANCH_1 0x25F6
+
+#define AMB_PRESENT_0 0x64
+#define AMB_PRESENT_1 0x66
+#define MTR0 0x80
+#define MTR1 0x84
+#define MTR2 0x88
+#define MTR3 0x8C
+
+#define NUM_MTRS 4
+#define CHANNELS_PER_BRANCH (2)
+
+/* Defines to extract the vaious fields from the
+ * MTRx - Memory Technology Registers
+ */
+#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
+#define MTR_DRAM_WIDTH(mtr) ((((mtr) >> 6) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS(mtr) ((((mtr) >> 5) & 0x1) ? 8 : 4)
+#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2)
+#define MTR_DIMM_RANK(mtr) (((mtr) >> 4) & 0x1)
+#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1)
+#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3)
+#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13)
+#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
+#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
+
+#ifdef CONFIG_EDAC_DEBUG
+static char *numrow_toString[] = {
+ "8,192 - 13 rows",
+ "16,384 - 14 rows",
+ "32,768 - 15 rows",
+ "reserved"
+};
+
+static char *numcol_toString[] = {
+ "1,024 - 10 columns",
+ "2,048 - 11 columns",
+ "4,096 - 12 columns",
+ "reserved"
+};
+#endif
+
+/* Enumeration of supported devices */
+enum i5000_chips {
+ I5000P = 0,
+ I5000V = 1, /* future */
+ I5000X = 2 /* future */
+};
+
+/* Device name and register DID (Device ID) */
+struct i5000_dev_info {
+ const char *ctl_name; /* name for this device */
+ u16 fsb_mapping_errors; /* DID for the branchmap,control */
+};
+
+/* Table of devices attributes supported by this driver */
+static const struct i5000_dev_info i5000_devs[] = {
+ [I5000P] = {
+ .ctl_name = "I5000",
+ .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
+ },
+};
+
+struct i5000_dimm_info {
+ int megabytes; /* size, 0 means not present */
+ int dual_rank;
+};
+
+#define MAX_CHANNELS 6 /* max possible channels */
+#define MAX_CSROWS (8*2) /* max possible csrows per channel */
+
+/* driver private data structure */
+struct i5000_pvt {
+ struct pci_dev *system_address; /* 16.0 */
+ struct pci_dev *branchmap_werrors; /* 16.1 */
+ struct pci_dev *fsb_error_regs; /* 16.2 */
+ struct pci_dev *branch_0; /* 21.0 */
+ struct pci_dev *branch_1; /* 22.0 */
+
+ u16 tolm; /* top of low memory */
+ u64 ambase; /* AMB BAR */
+
+ u16 mir0, mir1, mir2;
+
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_ambpresent0; /* Branch 0, Channel 0 */
+ u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_ambpresent0; /* Branch 1, Channel 8 */
+ u16 b1_ambpresent1; /* Branch 1, Channel 1 */
+
+ /* DIMM infomation matrix, allocating architecture maximums */
+ struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+
+ /* Actual values for this controller */
+ int maxch; /* Max channels */
+ int maxdimmperch; /* Max DIMMs per channel */
+};
+
+/* I5000 MCH error information retrieved from Hardware */
+struct i5000_error_info {
+
+ /* These registers are always read from the MC */
+ u32 ferr_fat_fbd; /* First Errors Fatal */
+ u32 nerr_fat_fbd; /* Next Errors Fatal */
+ u32 ferr_nf_fbd; /* First Errors Non-Fatal */
+ u32 nerr_nf_fbd; /* Next Errors Non-Fatal */
+
+ /* These registers are input ONLY if there was a Recoverable Error */
+ u32 redmemb; /* Recoverable Mem Data Error log B */
+ u16 recmema; /* Recoverable Mem Error log A */
+ u32 recmemb; /* Recoverable Mem Error log B */
+
+ /* These registers are input ONLY if there was a
+ * Non-Recoverable Error */
+ u16 nrecmema; /* Non-Recoverable Mem log A */
+ u16 nrecmemb; /* Non-Recoverable Mem log B */
+
+};
+
+static struct edac_pci_ctl_info *i5000_pci;
+
+/*
+ * i5000_get_error_info Retrieve the hardware error information from
+ * the hardware and cache it in the 'info'
+ * structure
+ */
+static void i5000_get_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info)
+{
+ struct i5000_pvt *pvt;
+ u32 value;
+
+ pvt = mci->pvt_info;
+
+ /* read in the 1st FATAL error register */
+ pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value);
+
+ /* Mask only the bits that the doc says are valid
+ */
+ value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK);
+
+ /* If there is an error, then read in the */
+ /* NEXT FATAL error register and the Memory Error Log Register A */
+ if (value & FERR_FAT_MASK) {
+ info->ferr_fat_fbd = value;
+
+ /* harvest the various error data we need */
+ pci_read_config_dword(pvt->branchmap_werrors,
+ NERR_FAT_FBD, &info->nerr_fat_fbd);
+ pci_read_config_word(pvt->branchmap_werrors,
+ NRECMEMA, &info->nrecmema);
+ pci_read_config_word(pvt->branchmap_werrors,
+ NRECMEMB, &info->nrecmemb);
+
+ /* Clear the error bits, by writing them back */
+ pci_write_config_dword(pvt->branchmap_werrors,
+ FERR_FAT_FBD, value);
+ } else {
+ info->ferr_fat_fbd = 0;
+ info->nerr_fat_fbd = 0;
+ info->nrecmema = 0;
+ info->nrecmemb = 0;
+ }
+
+ /* read in the 1st NON-FATAL error register */
+ pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value);
+
+ /* If there is an error, then read in the 1st NON-FATAL error
+ * register as well */
+ if (value & FERR_NF_MASK) {
+ info->ferr_nf_fbd = value;
+
+ /* harvest the various error data we need */
+ pci_read_config_dword(pvt->branchmap_werrors,
+ NERR_NF_FBD, &info->nerr_nf_fbd);
+ pci_read_config_word(pvt->branchmap_werrors,
+ RECMEMA, &info->recmema);
+ pci_read_config_dword(pvt->branchmap_werrors,
+ RECMEMB, &info->recmemb);
+ pci_read_config_dword(pvt->branchmap_werrors,
+ REDMEMB, &info->redmemb);
+
+ /* Clear the error bits, by writing them back */
+ pci_write_config_dword(pvt->branchmap_werrors,
+ FERR_NF_FBD, value);
+ } else {
+ info->ferr_nf_fbd = 0;
+ info->nerr_nf_fbd = 0;
+ info->recmema = 0;
+ info->recmemb = 0;
+ info->redmemb = 0;
+ }
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * struct i5000_error_info *info,
+ * int handle_errors);
+ *
+ * handle the Intel FATAL errors, if any
+ */
+static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info,
+ int handle_errors)
+{
+ char msg[EDAC_MC_LABEL_LEN + 1 + 90];
+ u32 allErrors;
+ int branch;
+ int channel;
+ int bank;
+ int rank;
+ int rdwr;
+ int ras, cas;
+
+ /* mask off the Error bits that are possible */
+ allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK);
+ if (!allErrors)
+ return; /* if no error, return now */
+
+ /* ONLY ONE of the possible error bits will be set, as per the docs */
+ i5000_mc_printk(mci, KERN_ERR,
+ "FATAL ERRORS Found!!! 1st FATAL Err Reg= 0x%x\n",
+ allErrors);
+
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
+ channel = branch;
+
+ /* Use the NON-Recoverable macros to extract data */
+ bank = NREC_BANK(info->nrecmema);
+ rank = NREC_RANK(info->nrecmema);
+ rdwr = NREC_RDWR(info->nrecmema);
+ ras = NREC_RAS(info->nrecmemb);
+ cas = NREC_CAS(info->nrecmemb);
+
+ debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
+
+ /* Only 1 bit will be on */
+ if (allErrors & FERR_FAT_M1ERR) {
+ i5000_mc_printk(mci, KERN_ERR,
+ "Alert on non-redundant retry or fast "
+ "reset timeout\n");
+
+ } else if (allErrors & FERR_FAT_M2ERR) {
+ i5000_mc_printk(mci, KERN_ERR,
+ "Northbound CRC error on non-redundant "
+ "retry\n");
+
+ } else if (allErrors & FERR_FAT_M3ERR) {
+ i5000_mc_printk(mci, KERN_ERR,
+ ">Tmid Thermal event with intelligent "
+ "throttling disabled\n");
+ }
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
+ "FATAL Err=0x%x)",
+ branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+ allErrors);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+}
+
+/*
+ * i5000_process_fatal_error_info(struct mem_ctl_info *mci,
+ * struct i5000_error_info *info,
+ * int handle_errors);
+ *
+ * handle the Intel NON-FATAL errors, if any
+ */
+static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info,
+ int handle_errors)
+{
+ char msg[EDAC_MC_LABEL_LEN + 1 + 90];
+ u32 allErrors;
+ u32 ue_errors;
+ u32 ce_errors;
+ u32 misc_errors;
+ int branch;
+ int channel;
+ int bank;
+ int rank;
+ int rdwr;
+ int ras, cas;
+
+ /* mask off the Error bits that are possible */
+ allErrors = (info->ferr_nf_fbd & FERR_NF_MASK);
+ if (!allErrors)
+ return; /* if no error, return now */
+
+ /* ONLY ONE of the possible error bits will be set, as per the docs */
+ i5000_mc_printk(mci, KERN_WARNING,
+ "NON-FATAL ERRORS Found!!! 1st NON-FATAL Err "
+ "Reg= 0x%x\n", allErrors);
+
+ ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
+ if (ue_errors) {
+ debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
+
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+ channel = branch;
+ bank = NREC_BANK(info->nrecmema);
+ rank = NREC_RANK(info->nrecmema);
+ rdwr = NREC_RDWR(info->nrecmema);
+ ras = NREC_RAS(info->nrecmemb);
+ cas = NREC_CAS(info->nrecmemb);
+
+ debugf0
+ ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "CAS=%d, UE Err=0x%x)",
+ branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
+ ue_errors);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ }
+
+ /* Check correctable errors */
+ ce_errors = allErrors & FERR_NF_CORRECTABLE;
+ if (ce_errors) {
+ debugf0("\tCorrected bits= 0x%x\n", ce_errors);
+
+ branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
+
+ channel = 0;
+ if (REC_ECC_LOCATOR_ODD(info->redmemb))
+ channel = 1;
+
+ /* Convert channel to be based from zero, instead of
+ * from branch base of 0 */
+ channel += branch;
+
+ bank = REC_BANK(info->recmema);
+ rank = REC_RANK(info->recmema);
+ rdwr = REC_RDWR(info->recmema);
+ ras = REC_RAS(info->recmemb);
+ cas = REC_CAS(info->recmemb);
+
+ debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
+
+ /* Form out message */
+ snprintf(msg, sizeof(msg),
+ "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "CAS=%d, CE Err=0x%x)", branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas, ce_errors);
+
+ /* Call the helper to output message */
+ edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ }
+
+ /* See if any of the thermal errors have fired */
+ misc_errors = allErrors & FERR_NF_THERMAL;
+ if (misc_errors) {
+ i5000_printk(KERN_WARNING, "\tTHERMAL Error, bits= 0x%x\n",
+ misc_errors);
+ }
+
+ /* See if any of the thermal errors have fired */
+ misc_errors = allErrors & FERR_NF_NON_RETRY;
+ if (misc_errors) {
+ i5000_printk(KERN_WARNING, "\tNON-Retry Errors, bits= 0x%x\n",
+ misc_errors);
+ }
+
+ /* See if any of the thermal errors have fired */
+ misc_errors = allErrors & FERR_NF_NORTH_CRC;
+ if (misc_errors) {
+ i5000_printk(KERN_WARNING,
+ "\tNORTHBOUND CRC Error, bits= 0x%x\n",
+ misc_errors);
+ }
+
+ /* See if any of the thermal errors have fired */
+ misc_errors = allErrors & FERR_NF_SPD_PROTOCOL;
+ if (misc_errors) {
+ i5000_printk(KERN_WARNING,
+ "\tSPD Protocol Error, bits= 0x%x\n",
+ misc_errors);
+ }
+
+ /* See if any of the thermal errors have fired */
+ misc_errors = allErrors & FERR_NF_DIMM_SPARE;
+ if (misc_errors) {
+ i5000_printk(KERN_WARNING, "\tDIMM-Spare Error, bits= 0x%x\n",
+ misc_errors);
+ }
+}
+
+/*
+ * i5000_process_error_info Process the error info that is
+ * in the 'info' structure, previously retrieved from hardware
+ */
+static void i5000_process_error_info(struct mem_ctl_info *mci,
+ struct i5000_error_info *info,
+ int handle_errors)
+{
+ /* First handle any fatal errors that occurred */
+ i5000_process_fatal_error_info(mci, info, handle_errors);
+
+ /* now handle any non-fatal errors that occurred */
+ i5000_process_nonfatal_error_info(mci, info, handle_errors);
+}
+
+/*
+ * i5000_clear_error Retrieve any error from the hardware
+ * but do NOT process that error.
+ * Used for 'clearing' out of previous errors
+ * Called by the Core module.
+ */
+static void i5000_clear_error(struct mem_ctl_info *mci)
+{
+ struct i5000_error_info info;
+
+ i5000_get_error_info(mci, &info);
+}
+
+/*
+ * i5000_check_error Retrieve and process errors reported by the
+ * hardware. Called by the Core module.
+ */
+static void i5000_check_error(struct mem_ctl_info *mci)
+{
+ struct i5000_error_info info;
+ debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i5000_get_error_info(mci, &info);
+ i5000_process_error_info(mci, &info, 1);
+}
+
+/*
+ * i5000_get_devices Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver
+ *
+ * Need to 'get' device 16 func 1 and func 2
+ */
+static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
+{
+ //const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx];
+ struct i5000_pvt *pvt;
+ struct pci_dev *pdev;
+
+ pvt = mci->pvt_info;
+
+ /* Attempt to 'get' the MCH register we want */
+ pdev = NULL;
+ while (1) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+ /* End of list, leave */
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "'system address,Process Bus' "
+ "device not found:"
+ "vendor 0x%x device 0x%x FUNC 1 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+ return 1;
+ }
+
+ /* Scan for device 16 func 1 */
+ if (PCI_FUNC(pdev->devfn) == 1)
+ break;
+ }
+
+ pvt->branchmap_werrors = pdev;
+
+ /* Attempt to 'get' the MCH register we want */
+ pdev = NULL;
+ while (1) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "MC: 'branchmap,control,errors' "
+ "device not found:"
+ "vendor 0x%x device 0x%x Func 2 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16);
+
+ pci_dev_put(pvt->branchmap_werrors);
+ return 1;
+ }
+
+ /* Scan for device 16 func 1 */
+ if (PCI_FUNC(pdev->devfn) == 2)
+ break;
+ }
+
+ pvt->fsb_error_regs = pdev;
+
+ debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->system_address),
+ pvt->system_address->vendor, pvt->system_address->device);
+ debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->branchmap_werrors),
+ pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
+ debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->fsb_error_regs),
+ pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+
+ pdev = NULL;
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
+
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "MC: 'BRANCH 0' device not found:"
+ "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
+
+ pci_dev_put(pvt->branchmap_werrors);
+ pci_dev_put(pvt->fsb_error_regs);
+ return 1;
+ }
+
+ pvt->branch_0 = pdev;
+
+ /* If this device claims to have more than 2 channels then
+ * fetch Branch 1's information
+ */
+ if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+ pdev = NULL;
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
+
+ if (pdev == NULL) {
+ i5000_printk(KERN_ERR,
+ "MC: 'BRANCH 1' device not found:"
+ "vendor 0x%x device 0x%x Func 0 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_1);
+
+ pci_dev_put(pvt->branchmap_werrors);
+ pci_dev_put(pvt->fsb_error_regs);
+ pci_dev_put(pvt->branch_0);
+ return 1;
+ }
+
+ pvt->branch_1 = pdev;
+ }
+
+ return 0;
+}
+
+/*
+ * i5000_put_devices 'put' all the devices that we have
+ * reserved via 'get'
+ */
+static void i5000_put_devices(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+
+ pvt = mci->pvt_info;
+
+ pci_dev_put(pvt->branchmap_werrors); /* FUNC 1 */
+ pci_dev_put(pvt->fsb_error_regs); /* FUNC 2 */
+ pci_dev_put(pvt->branch_0); /* DEV 21 */
+
+ /* Only if more than 2 channels do we release the second branch */
+ if (pvt->maxch >= CHANNELS_PER_BRANCH)
+ pci_dev_put(pvt->branch_1); /* DEV 22 */
+}
+
+/*
+ * determine_amb_resent
+ *
+ * the information is contained in NUM_MTRS different registers
+ * determineing which of the NUM_MTRS requires knowing
+ * which channel is in question
+ *
+ * 2 branches, each with 2 channels
+ * b0_ambpresent0 for channel '0'
+ * b0_ambpresent1 for channel '1'
+ * b1_ambpresent0 for channel '2'
+ * b1_ambpresent1 for channel '3'
+ */
+static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
+{
+ int amb_present;
+
+ if (channel < CHANNELS_PER_BRANCH) {
+ if (channel & 0x1)
+ amb_present = pvt->b0_ambpresent1;
+ else
+ amb_present = pvt->b0_ambpresent0;
+ } else {
+ if (channel & 0x1)
+ amb_present = pvt->b1_ambpresent1;
+ else
+ amb_present = pvt->b1_ambpresent0;
+ }
+
+ return amb_present;
+}
+
+/*
+ * determine_mtr(pvt, csrow, channel)
+ *
+ * return the proper MTR register as determine by the csrow and channel desired
+ */
+static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+{
+ int mtr;
+
+ if (channel < CHANNELS_PER_BRANCH)
+ mtr = pvt->b0_mtr[csrow >> 1];
+ else
+ mtr = pvt->b1_mtr[csrow >> 1];
+
+ return mtr;
+}
+
+/*
+ */
+static void decode_mtr(int slot_row, u16 mtr)
+{
+ int ans;
+
+ ans = MTR_DIMMS_PRESENT(mtr);
+
+ debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
+ ans ? "Present" : "NOT Present");
+ if (!ans)
+ return;
+
+ debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+ debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
+ debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
+ debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+}
+
+static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+ struct i5000_dimm_info *dinfo)
+{
+ int mtr;
+ int amb_present_reg;
+ int addrBits;
+
+ mtr = determine_mtr(pvt, csrow, channel);
+ if (MTR_DIMMS_PRESENT(mtr)) {
+ amb_present_reg = determine_amb_present_reg(pvt, channel);
+
+ /* Determine if there is a DIMM present in this DIMM slot */
+ if (amb_present_reg & (1 << (csrow >> 1))) {
+ dinfo->dual_rank = MTR_DIMM_RANK(mtr);
+
+ if (!((dinfo->dual_rank == 0) &&
+ ((csrow & 0x1) == 0x1))) {
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add thenumber of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
+ }
+ }
+ }
+}
+
+/*
+ * calculate_dimm_size
+ *
+ * also will output a DIMM matrix map, if debug is enabled, for viewing
+ * how the DIMMs are populated
+ */
+static void calculate_dimm_size(struct i5000_pvt *pvt)
+{
+ struct i5000_dimm_info *dinfo;
+ int csrow, max_csrows;
+ char *p, *mem_buffer;
+ int space, n;
+ int channel;
+
+ /* ================= Generate some debug output ================= */
+ space = PAGE_SIZE;
+ mem_buffer = p = kmalloc(space, GFP_KERNEL);
+ if (p == NULL) {
+ i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
+ __FILE__, __func__);
+ return;
+ }
+
+ n = snprintf(p, space, "\n");
+ p += n;
+ space -= n;
+
+ /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ * and calculate the information for each DIMM
+ * Start with the highest csrow first, to display it first
+ * and work toward the 0th csrow
+ */
+ max_csrows = pvt->maxdimmperch * 2;
+ for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+
+ /* on an odd csrow, first output a 'boundary' marker,
+ * then reset the message buffer */
+ if (csrow & 0x1) {
+ n = snprintf(p, space, "---------------------------"
+ "--------------------------------");
+ p += n;
+ space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+ }
+ n = snprintf(p, space, "csrow %2d ", csrow);
+ p += n;
+ space -= n;
+
+ for (channel = 0; channel < pvt->maxch; channel++) {
+ dinfo = &pvt->dimm_info[csrow][channel];
+ handle_channel(pvt, csrow, channel, dinfo);
+ n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
+ p += n;
+ space -= n;
+ }
+ n = snprintf(p, space, "\n");
+ p += n;
+ space -= n;
+ }
+
+ /* Output the last bottom 'boundary' marker */
+ n = snprintf(p, space, "---------------------------"
+ "--------------------------------\n");
+ p += n;
+ space -= n;
+
+ /* now output the 'channel' labels */
+ n = snprintf(p, space, " ");
+ p += n;
+ space -= n;
+ for (channel = 0; channel < pvt->maxch; channel++) {
+ n = snprintf(p, space, "channel %d | ", channel);
+ p += n;
+ space -= n;
+ }
+ n = snprintf(p, space, "\n");
+ p += n;
+ space -= n;
+
+ /* output the last message and free buffer */
+ debugf2("%s\n", mem_buffer);
+ kfree(mem_buffer);
+}
+
+/*
+ * i5000_get_mc_regs read in the necessary registers and
+ * cache locally
+ *
+ * Fills in the private data members
+ */
+static void i5000_get_mc_regs(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+ u32 actual_tolm;
+ u16 limit;
+ int slot_row;
+ int maxch;
+ int maxdimmperch;
+ int way0, way1;
+
+ pvt = mci->pvt_info;
+
+ pci_read_config_dword(pvt->system_address, AMBASE,
+ (u32 *) & pvt->ambase);
+ pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
+ ((u32 *) & pvt->ambase) + sizeof(u32));
+
+ maxdimmperch = pvt->maxdimmperch;
+ maxch = pvt->maxch;
+
+ debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
+ (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+
+ /* Get the Branch Map regs */
+ pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
+ pvt->tolm >>= 12;
+ debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
+ pvt->tolm);
+
+ actual_tolm = pvt->tolm << 28;
+ debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
+
+ pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
+ pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
+ pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2);
+
+ /* Get the MIR[0-2] regs */
+ limit = (pvt->mir0 >> 4) & 0x0FFF;
+ way0 = pvt->mir0 & 0x1;
+ way1 = pvt->mir0 & 0x2;
+ debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ limit = (pvt->mir1 >> 4) & 0x0FFF;
+ way0 = pvt->mir1 & 0x1;
+ way1 = pvt->mir1 & 0x2;
+ debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ limit = (pvt->mir2 >> 4) & 0x0FFF;
+ way0 = pvt->mir2 & 0x1;
+ way1 = pvt->mir2 & 0x2;
+ debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+
+ /* Get the MTR[0-3] regs */
+ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+ int where = MTR0 + (slot_row * sizeof(u32));
+
+ pci_read_config_word(pvt->branch_0, where,
+ &pvt->b0_mtr[slot_row]);
+
+ debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
+ pvt->b0_mtr[slot_row]);
+
+ if (pvt->maxch >= CHANNELS_PER_BRANCH) {
+ pci_read_config_word(pvt->branch_1, where,
+ &pvt->b1_mtr[slot_row]);
+ debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
+ where, pvt->b0_mtr[slot_row]);
+ } else {
+ pvt->b1_mtr[slot_row] = 0;
+ }
+ }
+
+ /* Read and dump branch 0's MTRs */
+ debugf2("\nMemory Technology Registers:\n");
+ debugf2(" Branch 0:\n");
+ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+ decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
+ }
+ pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
+ &pvt->b0_ambpresent0);
+ debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+ pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
+ &pvt->b0_ambpresent1);
+ debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+
+ /* Only if we have 2 branchs (4 channels) */
+ if (pvt->maxch < CHANNELS_PER_BRANCH) {
+ pvt->b1_ambpresent0 = 0;
+ pvt->b1_ambpresent1 = 0;
+ } else {
+ /* Read and dump branch 1's MTRs */
+ debugf2(" Branch 1:\n");
+ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
+ decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
+ }
+ pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
+ &pvt->b1_ambpresent0);
+ debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
+ pvt->b1_ambpresent0);
+ pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
+ &pvt->b1_ambpresent1);
+ debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
+ pvt->b1_ambpresent1);
+ }
+
+ /* Go and determine the size of each DIMM and place in an
+ * orderly matrix */
+ calculate_dimm_size(pvt);
+}
+
+/*
+ * i5000_init_csrows Initialize the 'csrows' table within
+ * the mci control structure with the
+ * addressing of memory.
+ *
+ * return:
+ * 0 success
+ * 1 no actual memory found on this MC
+ */
+static int i5000_init_csrows(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+ struct csrow_info *p_csrow;
+ int empty, channel_count;
+ int max_csrows;
+ int mtr;
+ int csrow_megs;
+ int channel;
+ int csrow;
+
+ pvt = mci->pvt_info;
+
+ channel_count = pvt->maxch;
+ max_csrows = pvt->maxdimmperch * 2;
+
+ empty = 1; /* Assume NO memory */
+
+ for (csrow = 0; csrow < max_csrows; csrow++) {
+ p_csrow = &mci->csrows[csrow];
+
+ p_csrow->csrow_idx = csrow;
+
+ /* use branch 0 for the basis */
+ mtr = pvt->b0_mtr[csrow >> 1];
+
+ /* if no DIMMS on this row, continue */
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
+
+ /* FAKE OUT VALUES, FIXME */
+ p_csrow->first_page = 0 + csrow * 20;
+ p_csrow->last_page = 9 + csrow * 20;
+ p_csrow->page_mask = 0xFFF;
+
+ p_csrow->grain = 8;
+
+ csrow_megs = 0;
+ for (channel = 0; channel < pvt->maxch; channel++) {
+ csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
+ }
+
+ p_csrow->nr_pages = csrow_megs << 8;
+
+ /* Assume DDR2 for now */
+ p_csrow->mtype = MEM_FB_DDR2;
+
+ /* ask what device type on this row */
+ if (MTR_DRAM_WIDTH(mtr))
+ p_csrow->dtype = DEV_X8;
+ else
+ p_csrow->dtype = DEV_X4;
+
+ p_csrow->edac_mode = EDAC_S8ECD8ED;
+
+ empty = 0;
+ }
+
+ return empty;
+}
+
+/*
+ * i5000_enable_error_reporting
+ * Turn on the memory reporting features of the hardware
+ */
+static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
+{
+ struct i5000_pvt *pvt;
+ u32 fbd_error_mask;
+
+ pvt = mci->pvt_info;
+
+ /* Read the FBD Error Mask Register */
+ pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+ &fbd_error_mask);
+
+ /* Enable with a '0' */
+ fbd_error_mask &= ~(ENABLE_EMASK_ALL);
+
+ pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
+ fbd_error_mask);
+}
+
+/*
+ * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ *
+ * ask the device how many channels are present and how many CSROWS
+ * as well
+ */
+static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
+ int *num_dimms_per_channel,
+ int *num_channels)
+{
+ u8 value;
+
+ /* Need to retrieve just how many channels and dimms per channel are
+ * supported on this memory controller
+ */
+ pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
+ *num_dimms_per_channel = (int)value *2;
+
+ pci_read_config_byte(pdev, MAXCH, &value);
+ *num_channels = (int)value;
+}
+
+/*
+ * i5000_probe1 Probe for ONE instance of device to see if it is
+ * present.
+ * return:
+ * 0 for FOUND a device
+ * < 0 for error code
+ */
+static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ struct mem_ctl_info *mci;
+ struct i5000_pvt *pvt;
+ int num_channels;
+ int num_dimms_per_channel;
+ int num_csrows;
+
+ debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
+ __func__,
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ /* We only are looking for func 0 of the set */
+ if (PCI_FUNC(pdev->devfn) != 0)
+ return -ENODEV;
+
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ break;
+ }
+
+ /* Ask the devices for the number of CSROWS and CHANNELS so
+ * that we can calculate the memory resources, etc
+ *
+ * The Chipset will report what it can handle which will be greater
+ * or equal to what the motherboard manufacturer will implement.
+ *
+ * As we don't have a motherboard identification routine to determine
+ * actual number of slots/dimms per channel, we thus utilize the
+ * resource as specified by the chipset. Thus, we might have
+ * have more DIMMs per channel than actually on the mobo, but this
+ * allows the driver to support upto the chipset max, without
+ * some fancy mobo determination.
+ */
+ i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
+ &num_channels);
+ num_csrows = num_dimms_per_channel * 2;
+
+ debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
+ __func__, num_channels, num_dimms_per_channel, num_csrows);
+
+ /* allocate a new MC control structure */
+ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ mci->dev = &pdev->dev; /* record ptr to the generic device */
+
+ pvt = mci->pvt_info;
+ pvt->system_address = pdev; /* Record this device in our private */
+ pvt->maxch = num_channels;
+ pvt->maxdimmperch = num_dimms_per_channel;
+
+ /* 'get' the pci devices we want to reserve for our use */
+ if (i5000_get_devices(mci, dev_idx))
+ goto fail0;
+
+ /* Time to get serious */
+ i5000_get_mc_regs(mci); /* retrieve the hardware registers */
+
+ mci->mc_idx = 0;
+ mci->mtype_cap = MEM_FLAG_FB_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "i5000_edac.c";
+ mci->mod_ver = I5000_REVISION;
+ mci->ctl_name = i5000_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
+ mci->ctl_page_to_phys = NULL;
+
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i5000_check_error;
+
+ /* initialize the MC control structure 'csrows' table
+ * with the mapping and control information */
+ if (i5000_init_csrows(mci)) {
+ debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
+ " because i5000_init_csrows() returned nonzero "
+ "value\n");
+ mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+ } else {
+ debugf1("MC: Enable error reporting now\n");
+ i5000_enable_error_reporting(mci);
+ }
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (edac_mc_add_mc(mci)) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ /* FIXME: perhaps some code should go here that disables error
+ * reporting if we just enabled it
+ */
+ goto fail1;
+ }
+
+ i5000_clear_error(mci);
+
+ /* allocating generic PCI control info */
+ i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i5000_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ return 0;
+
+ /* Error exit unwinding stack */
+fail1:
+
+ i5000_put_devices(mci);
+
+fail0:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+/*
+ * i5000_init_one constructor for one instance of device
+ *
+ * returns:
+ * negative on error
+ * count (>= 0)
+ */
+static int __devinit i5000_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rc;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* wake up device */
+ rc = pci_enable_device(pdev);
+ if (rc == -EIO)
+ return rc;
+
+ /* now probe and enable the device */
+ return i5000_probe1(pdev, id->driver_data);
+}
+
+/*
+ * i5000_remove_one destructor for one instance of device
+ *
+ */
+static void __devexit i5000_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i5000_pci)
+ edac_pci_release_generic_ctl(i5000_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ /* retrieve references to resources, and free those resources */
+ i5000_put_devices(mci);
+
+ edac_mc_free(mci);
+}
+
+/*
+ * pci_device_id table for which devices we are looking for
+ *
+ * The "E500P" device is the first device supported.
+ */
+static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
+ .driver_data = I5000P},
+
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
+
+/*
+ * i5000_driver pci_driver structure for this module
+ *
+ */
+static struct pci_driver i5000_driver = {
+ .name = __stringify(KBUILD_BASENAME),
+ .probe = i5000_init_one,
+ .remove = __devexit_p(i5000_remove_one),
+ .id_table = i5000_pci_tbl,
+};
+
+/*
+ * i5000_init Module entry function
+ * Try to initialize this module for its devices
+ */
+static int __init i5000_init(void)
+{
+ int pci_rc;
+
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+ pci_rc = pci_register_driver(&i5000_driver);
+
+ return (pci_rc < 0) ? pci_rc : 0;
+}
+
+/*
+ * i5000_exit() Module exit function
+ * Unregister the driver
+ */
+static void __exit i5000_exit(void)
+{
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i5000_driver);
+}
+
+module_init(i5000_init);
+module_exit(i5000_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR
+ ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
+MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
+ I5000_REVISION);
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
new file mode 100644
index 000000000000..83bfe37c4bbb
--- /dev/null
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -0,0 +1,402 @@
+/*
+ * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel
+ * module (C) 2006 Tim Small
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License.
+ *
+ * Written by Tim Small <tim@buttersideup.com>, based on work by Linux
+ * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and
+ * others.
+ *
+ * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
+ *
+ * Written with reference to 82443BX Host Bridge Datasheet:
+ * http://www.intel.com/design/chipsets/440/documentation.htm
+ * references to this document given in [].
+ *
+ * This module doesn't support the 440LX, but it may be possible to
+ * make it do so (the 440LX's register definitions are different, but
+ * not completely so - I haven't studied them in enough detail to know
+ * how easy this would be).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/slab.h>
+
+#include "edac_core.h"
+
+#define I82443_REVISION "0.1"
+
+#define EDAC_MOD_STR "i82443bxgx_edac"
+
+/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
+ * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory
+ * rows" "The 82443BX supports multiple-bit error detection and
+ * single-bit error correction when ECC mode is enabled and
+ * single/multi-bit error detection when correction is disabled.
+ * During writes to the DRAM, the 82443BX generates ECC for the data
+ * on a QWord basis. Partial QWord writes require a read-modify-write
+ * cycle when ECC is enabled."
+*/
+
+/* "Additionally, the 82443BX ensures that the data is corrected in
+ * main memory so that accumulation of errors is prevented. Another
+ * error within the same QWord would result in a double-bit error
+ * which is unrecoverable. This is known as hardware scrubbing since
+ * it requires no software intervention to correct the data in memory."
+ */
+
+/* [Also see page 100 (section 4.3), "DRAM Interface"]
+ * [Also see page 112 (section 4.6.1.4), ECC]
+ */
+
+#define I82443BXGX_NR_CSROWS 8
+#define I82443BXGX_NR_CHANS 1
+#define I82443BXGX_NR_DIMMS 4
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI
+ * config space offset */
+#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if
+ * row is non-ECC */
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */
+
+#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */
+#define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */
+#define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */
+#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */
+
+#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6
+
+/* 82443 PCI Device 0 */
+#define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI
+ * config space offset, Error Address
+ * Pointer Register */
+#define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */
+#define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */
+#define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */
+
+#define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI
+ * config space offset. */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */
+#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */
+
+#define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI
+ * config space offset. */
+#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */
+#define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */
+#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */
+#define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */
+
+#define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI
+ * config space offset. */
+#define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */
+#define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */
+#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */
+#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */
+
+#define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI
+ * config space offset. */
+
+/* FIXME - don't poll when ECC disabled? */
+
+struct i82443bxgx_edacmc_error_info {
+ u32 eap;
+};
+
+static struct edac_pci_ctl_info *i82443bxgx_pci;
+
+static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
+ struct i82443bxgx_edacmc_error_info
+ *info)
+{
+ struct pci_dev *pdev;
+ pdev = to_pci_dev(mci->dev);
+ pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
+ if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
+ /* Clear error to allow next error to be reported [p.61] */
+ pci_write_bits32(pdev, I82443BXGX_EAP,
+ I82443BXGX_EAP_OFFSET_SBE,
+ I82443BXGX_EAP_OFFSET_SBE);
+
+ if (info->eap & I82443BXGX_EAP_OFFSET_MBE)
+ /* Clear error to allow next error to be reported [p.61] */
+ pci_write_bits32(pdev, I82443BXGX_EAP,
+ I82443BXGX_EAP_OFFSET_MBE,
+ I82443BXGX_EAP_OFFSET_MBE);
+}
+
+static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
+ struct
+ i82443bxgx_edacmc_error_info
+ *info, int handle_errors)
+{
+ int error_found = 0;
+ u32 eapaddr, page, pageoffset;
+
+ /* bits 30:12 hold the 4kb block in which the error occurred
+ * [p.61] */
+ eapaddr = (info->eap & 0xfffff000);
+ page = eapaddr >> PAGE_SHIFT;
+ pageoffset = eapaddr - (page << PAGE_SHIFT);
+
+ if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
+ error_found = 1;
+ if (handle_errors)
+ edac_mc_handle_ce(mci, page, pageoffset,
+ /* 440BX/GX don't make syndrome information
+ * available */
+ 0, edac_mc_find_csrow_by_page(mci, page), 0,
+ mci->ctl_name);
+ }
+
+ if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
+ error_found = 1;
+ if (handle_errors)
+ edac_mc_handle_ue(mci, page, pageoffset,
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
+ }
+
+ return error_found;
+}
+
+static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
+{
+ struct i82443bxgx_edacmc_error_info info;
+
+ debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__);
+ i82443bxgx_edacmc_get_error_info(mci, &info);
+ i82443bxgx_edacmc_process_error_info(mci, &info, 1);
+}
+
+static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev,
+ enum edac_type edac_mode,
+ enum mem_type mtype)
+{
+ struct csrow_info *csrow;
+ int index;
+ u8 drbar, dramc;
+ u32 row_base, row_high_limit, row_high_limit_last;
+
+ pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+ row_high_limit_last = 0;
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+ pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n",
+ mci->mc_idx, __func__, index, drbar);
+ row_high_limit = ((u32) drbar << 23);
+ /* find the DRAM Chip Select Base address and mask */
+ debugf1("MC%d: " __FILE__ ": %s() Row=%d, "
+ "Boundry Address=%#0x, Last = %#0x \n",
+ mci->mc_idx, __func__, index, row_high_limit,
+ row_high_limit_last);
+
+ /* 440GX goes to 2GB, represented with a DRB of 0. */
+ if (row_high_limit_last && !row_high_limit)
+ row_high_limit = 1UL << 31;
+
+ /* This row is empty [p.49] */
+ if (row_high_limit == row_high_limit_last)
+ continue;
+ row_base = row_high_limit_last;
+ csrow->first_page = row_base >> PAGE_SHIFT;
+ csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
+ csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ /* EAP reports in 4kilobyte granularity [61] */
+ csrow->grain = 1 << 12;
+ csrow->mtype = mtype;
+ /* I don't think 440BX can tell you device type? FIXME? */
+ csrow->dtype = DEV_UNKNOWN;
+ /* Mode is global to all rows on 440BX */
+ csrow->edac_mode = edac_mode;
+ row_high_limit_last = row_high_limit;
+ }
+}
+
+static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ struct mem_ctl_info *mci;
+ u8 dramc;
+ u32 nbxcfg, ecc_mode;
+ enum mem_type mtype;
+ enum edac_type edac_mode;
+
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Something is really hosed if PCI config space reads from
+ * the MC aren't working.
+ */
+ if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
+ return -EIO;
+
+ mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
+ switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) {
+ case I82443BXGX_DRAMC_DRAM_IS_EDO:
+ mtype = MEM_EDO;
+ break;
+ case I82443BXGX_DRAMC_DRAM_IS_SDRAM:
+ mtype = MEM_SDR;
+ break;
+ case I82443BXGX_DRAMC_DRAM_IS_RSDRAM:
+ mtype = MEM_RDR;
+ break;
+ default:
+ debugf0("Unknown/reserved DRAM type value "
+ "in DRAMC register!\n");
+ mtype = -MEM_UNKNOWN;
+ }
+
+ if ((mtype == MEM_SDR) || (mtype == MEM_RDR))
+ mci->edac_cap = mci->edac_ctl_cap;
+ else
+ mci->edac_cap = EDAC_FLAG_NONE;
+
+ mci->scrub_cap = SCRUB_FLAG_HW_SRC;
+ pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
+ ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
+ (BIT(0) | BIT(1)));
+
+ mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
+ ? SCRUB_HW_SRC : SCRUB_NONE;
+
+ switch (ecc_mode) {
+ case I82443BXGX_NBXCFG_INTEGRITY_NONE:
+ edac_mode = EDAC_NONE;
+ break;
+ case I82443BXGX_NBXCFG_INTEGRITY_EC:
+ edac_mode = EDAC_EC;
+ break;
+ case I82443BXGX_NBXCFG_INTEGRITY_ECC:
+ case I82443BXGX_NBXCFG_INTEGRITY_SCRUB:
+ edac_mode = EDAC_SECDED;
+ break;
+ default:
+ debugf0("%s(): Unknown/reserved ECC state "
+ "in NBXCFG register!\n", __func__);
+ edac_mode = EDAC_UNKNOWN;
+ break;
+ }
+
+ i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype);
+
+ /* Many BIOSes don't clear error flags on boot, so do this
+ * here, or we get "phantom" errors occuring at module-load
+ * time. */
+ pci_write_bits32(pdev, I82443BXGX_EAP,
+ (I82443BXGX_EAP_OFFSET_SBE |
+ I82443BXGX_EAP_OFFSET_MBE),
+ (I82443BXGX_EAP_OFFSET_SBE |
+ I82443BXGX_EAP_OFFSET_MBE));
+
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I82443_REVISION;
+ mci->ctl_name = "I82443BXGX";
+ mci->dev_name = pci_name(pdev);
+ mci->edac_check = i82443bxgx_edacmc_check;
+ mci->ctl_page_to_phys = NULL;
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i82443bxgx_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1);
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ debugf0("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* don't need to call pci_device_enable() */
+ return i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
+}
+
+static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i82443bxgx_pci)
+ edac_pci_release_generic_ctl(i82443bxgx_pci);
+
+ if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ return;
+
+ edac_mc_free(mci);
+}
+
+EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
+
+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)},
+ {0,} /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl);
+
+static struct pci_driver i82443bxgx_edacmc_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i82443bxgx_edacmc_init_one,
+ .remove = __devexit_p(i82443bxgx_edacmc_remove_one),
+ .id_table = i82443bxgx_pci_tbl,
+};
+
+static int __init i82443bxgx_edacmc_init(void)
+{
+ return pci_register_driver(&i82443bxgx_edacmc_driver);
+}
+
+static void __exit i82443bxgx_edacmc_exit(void)
+{
+ pci_unregister_driver(&i82443bxgx_edacmc_driver);
+}
+
+module_init(i82443bxgx_edacmc_init);
+module_exit(i82443bxgx_edacmc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
+MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index e4bb298e613f..f5ecd2c4d813 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,9 +14,9 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
-#define I82860_REVISION " Ver: 2.0.1 " __DATE__
+#define I82860_REVISION " Ver: 2.0.2 " __DATE__
#define EDAC_MOD_STR "i82860_edac"
#define i82860_printk(level, fmt, arg...) \
@@ -54,16 +54,16 @@ struct i82860_error_info {
static const struct i82860_dev_info i82860_devs[] = {
[I82860] = {
- .ctl_name = "i82860"
- },
+ .ctl_name = "i82860"},
};
-static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code
* has already registered driver
*/
+static struct edac_pci_ctl_info *i82860_pci;
static void i82860_get_error_info(struct mem_ctl_info *mci,
- struct i82860_error_info *info)
+ struct i82860_error_info *info)
{
struct pci_dev *pdev;
@@ -91,13 +91,13 @@ static void i82860_get_error_info(struct mem_ctl_info *mci,
if ((info->errsts ^ info->errsts2) & 0x0003) {
pci_read_config_dword(pdev, I82860_EAP, &info->eap);
- pci_read_config_word(pdev, I82860_DERRCTL_STS,
- &info->derrsyn);
+ pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
}
}
static int i82860_process_error_info(struct mem_ctl_info *mci,
- struct i82860_error_info *info, int handle_errors)
+ struct i82860_error_info *info,
+ int handle_errors)
{
int row;
@@ -136,7 +136,7 @@ static void i82860_check(struct mem_ctl_info *mci)
static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
{
unsigned long last_cumul_size;
- u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
+ u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
@@ -155,7 +155,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
csrow = &mci->csrows[index];
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
- (I82860_GBA_SHIFT - PAGE_SHIFT);
+ (I82860_GBA_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
@@ -186,7 +186,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
the channel and the GRA registers map to physical devices so we are
going to make 1 channel for group.
*/
- mci = edac_mc_alloc(0, 16, 1);
+ mci = edac_mc_alloc(0, 16, 1, 0);
if (!mci)
return -ENOMEM;
@@ -200,19 +200,31 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82860_REVISION;
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = i82860_check;
mci->ctl_page_to_phys = NULL;
i82860_init_csrows(mci, pdev);
- i82860_get_error_info(mci, &discard); /* clear counters */
+ i82860_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
- if (edac_mc_add_mc(mci,0)) {
+ if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
+ /* allocating generic PCI control info */
+ i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i82860_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
@@ -225,7 +237,7 @@ fail:
/* returns count (>= 0), or negative on error */
static int __devinit i82860_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
@@ -249,6 +261,9 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
debugf0("%s()\n", __func__);
+ if (i82860_pci)
+ edac_pci_release_generic_ctl(i82860_pci);
+
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
@@ -257,12 +272,11 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
{
- PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82860
- },
+ PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82860},
{
- 0,
- } /* 0 terminated list. */
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
@@ -329,5 +343,5 @@ module_exit(i82860_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
- "Ben Woodard <woodard@redhat.com>");
+ "Ben Woodard <woodard@redhat.com>");
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 2800b3e614a9..031abadc439a 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,9 +18,9 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
-#define I82875P_REVISION " Ver: 2.0.1 " __DATE__
+#define I82875P_REVISION " Ver: 2.0.2 " __DATE__
#define EDAC_MOD_STR "i82875p_edac"
#define i82875p_printk(level, fmt, arg...) \
@@ -174,18 +174,19 @@ struct i82875p_error_info {
static const struct i82875p_dev_info i82875p_devs[] = {
[I82875P] = {
- .ctl_name = "i82875p"
- },
+ .ctl_name = "i82875p"},
};
-static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
* already registered driver
*/
static int i82875p_registered = 1;
+static struct edac_pci_ctl_info *i82875p_pci;
+
static void i82875p_get_error_info(struct mem_ctl_info *mci,
- struct i82875p_error_info *info)
+ struct i82875p_error_info *info)
{
struct pci_dev *pdev;
@@ -197,38 +198,39 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci,
* overwritten by UE.
*/
pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
+
+ if (!(info->errsts & 0x0081))
+ return;
+
pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
pci_read_config_byte(pdev, I82875P_DES, &info->des);
pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
- pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
-
/*
* If the error is the same then we can for both reads then
* the first set of reads is valid. If there is a change then
* there is a CE no info and the second set of reads is valid
* and should be UE info.
*/
- if (!(info->errsts2 & 0x0081))
- return;
-
if ((info->errsts ^ info->errsts2) & 0x0081) {
pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
pci_read_config_byte(pdev, I82875P_DES, &info->des);
- pci_read_config_byte(pdev, I82875P_DERRSYN,
- &info->derrsyn);
+ pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
}
+
+ pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
}
static int i82875p_process_error_info(struct mem_ctl_info *mci,
- struct i82875p_error_info *info, int handle_errors)
+ struct i82875p_error_info *info,
+ int handle_errors)
{
int row, multi_chan;
multi_chan = mci->csrows[0].nr_channels - 1;
- if (!(info->errsts2 & 0x0081))
+ if (!(info->errsts & 0x0081))
return 0;
if (!handle_errors)
@@ -263,10 +265,12 @@ static void i82875p_check(struct mem_ctl_info *mci)
/* Return 0 on success or 1 on failure. */
static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
- struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window)
+ struct pci_dev **ovrfl_pdev,
+ void __iomem **ovrfl_window)
{
struct pci_dev *dev;
void __iomem *window;
+ int err;
*ovrfl_pdev = NULL;
*ovrfl_window = NULL;
@@ -284,14 +288,19 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
if (dev == NULL)
return 1;
- pci_bus_add_device(dev);
+ err = pci_bus_add_device(dev);
+ if (err) {
+ i82875p_printk(KERN_ERR,
+ "%s(): pci_bus_add_device() Failed\n",
+ __func__);
+ }
}
*ovrfl_pdev = dev;
if (pci_enable_device(dev)) {
i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
- "device\n", __func__);
+ "device\n", __func__);
return 1;
}
@@ -307,7 +316,7 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
if (window == NULL) {
i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
- __func__);
+ __func__);
goto fail1;
}
@@ -325,21 +334,20 @@ fail0:
return 1;
}
-
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u32 drc)
{
return (drc >> 21) & 0x1;
}
-
static void i82875p_init_csrows(struct mem_ctl_info *mci,
- struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc)
+ struct pci_dev *pdev,
+ void __iomem * ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
- u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
+ u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
u32 cumul_size;
int index;
@@ -392,7 +400,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans);
+ nr_chans, 0);
if (!mci) {
rc = -ENOMEM;
@@ -407,23 +415,35 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82875P_REVISION;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
+ mci->dev_name = pci_name(pdev);
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
debugf3("%s(): init pvt\n", __func__);
- pvt = (struct i82875p_pvt *) mci->pvt_info;
+ pvt = (struct i82875p_pvt *)mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
- i82875p_get_error_info(mci, &discard); /* clear counters */
+ i82875p_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
- if (edac_mc_add_mc(mci,0)) {
+ if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail1;
}
+ /* allocating generic PCI control info */
+ i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!i82875p_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
@@ -442,7 +462,7 @@ fail0:
/* returns count (>= 0), or negative on error */
static int __devinit i82875p_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
@@ -467,10 +487,13 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
debugf0("%s()\n", __func__);
+ if (i82875p_pci)
+ edac_pci_release_generic_ctl(i82875p_pci);
+
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
- pvt = (struct i82875p_pvt *) mci->pvt_info;
+ pvt = (struct i82875p_pvt *)mci->pvt_info;
if (pvt->ovrfl_window)
iounmap(pvt->ovrfl_window);
@@ -488,12 +511,11 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
{
- PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- I82875P
- },
+ PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82875P},
{
- 0,
- } /* 0 terminated list. */
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
@@ -517,7 +539,7 @@ static int __init i82875p_init(void)
if (mci_pdev == NULL) {
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82875_0, NULL);
+ PCI_DEVICE_ID_INTEL_82875_0, NULL);
if (!mci_pdev) {
debugf0("875p pci_get_device fail\n");
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
new file mode 100644
index 000000000000..0ee888456932
--- /dev/null
+++ b/drivers/edac/i82975x_edac.c
@@ -0,0 +1,666 @@
+/*
+ * Intel 82975X Memory Controller kernel module
+ * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com)
+ * (C) 2007 jetzbroadband (http://jetzbroadband.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Arvind R.
+ * Copied from i82875p_edac.c source:
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+
+#include "edac_core.h"
+
+#define I82975X_REVISION " Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR "i82975x_edac"
+
+#define i82975x_printk(level, fmt, arg...) \
+ edac_printk(level, "i82975x", fmt, ##arg)
+
+#define i82975x_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg)
+
+#ifndef PCI_DEVICE_ID_INTEL_82975_0
+#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
+#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
+
+#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
+
+/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
+#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
+ *
+ * 31:7 128 byte cache-line address
+ * 6:1 reserved
+ * 0 0: CH0; 1: CH1
+ */
+
+#define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b)
+ * 0h: Processor Memory Reads
+ * 1h:7h reserved
+ * More - See Page 65 of Intel DocSheet.
+ */
+
+#define I82975X_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:12 reserved
+ * 11 Thermal Sensor Event
+ * 10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Refresh Timeout
+ * 7:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+/* Error Reporting is supported by 3 mechanisms:
+ 1. DMI SERR generation ( ERRCMD )
+ 2. SMI DMI generation ( SMICMD )
+ 3. SCI DMI generation ( SCICMD )
+NOTE: Only ONE of the three must be enabled
+*/
+#define I82975X_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:12 reserved
+ * 11 Thermal Sensor Event
+ * 10 reserved
+ * 9 non-DRAM lock error (ndlock)
+ * 8 Refresh Timeout
+ * 7:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+#define I82975X_SMICMD 0xcc /* Error Command (16b)
+ *
+ * 15:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+#define I82975X_SCICMD 0xce /* Error Command (16b)
+ *
+ * 15:2 reserved
+ * 1 ECC UE (multibit DRAM error)
+ * 0 ECC CE (singlebit DRAM error)
+ */
+
+#define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b)
+ *
+ * 7:1 reserved
+ * 0 Bit32 of the Dram Error Address
+ */
+
+#define I82975X_MCHBAR 0x44 /*
+ *
+ * 31:14 Base Addr of 16K memory-mapped
+ * configuration space
+ * 13:1 reserverd
+ * 0 mem-mapped config space enable
+ */
+
+/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */
+/* Intel 82975x memory mapped register space */
+
+#define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */
+
+#define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8)
+ *
+ * 7 set to 1 in highest DRB of
+ * channel if 4GB in ch.
+ * 6:2 upper boundary of rank in
+ * 32MB grains
+ * 1:0 set to 0
+ */
+#define I82975X_DRB_CH0R0 0x100
+#define I82975X_DRB_CH0R1 0x101
+#define I82975X_DRB_CH0R2 0x102
+#define I82975X_DRB_CH0R3 0x103
+#define I82975X_DRB_CH1R0 0x180
+#define I82975X_DRB_CH1R1 0x181
+#define I82975X_DRB_CH1R2 0x182
+#define I82975X_DRB_CH1R3 0x183
+
+
+#define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8)
+ * defines the PAGE SIZE to be used
+ * for the rank
+ * 7 reserved
+ * 6:4 row attr of odd rank, i.e. 1
+ * 3 reserved
+ * 2:0 row attr of even rank, i.e. 0
+ *
+ * 000 = unpopulated
+ * 001 = reserved
+ * 010 = 4KiB
+ * 011 = 8KiB
+ * 100 = 16KiB
+ * others = reserved
+ */
+#define I82975X_DRA_CH0R01 0x108
+#define I82975X_DRA_CH0R23 0x109
+#define I82975X_DRA_CH1R01 0x188
+#define I82975X_DRA_CH1R23 0x189
+
+
+#define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b)
+ *
+ * 15:8 reserved
+ * 7:6 Rank 3 architecture
+ * 5:4 Rank 2 architecture
+ * 3:2 Rank 1 architecture
+ * 1:0 Rank 0 architecture
+ *
+ * 00 => x16 devices; i.e 4 banks
+ * 01 => x8 devices; i.e 8 banks
+ */
+#define I82975X_C0BNKARC 0x10e
+#define I82975X_C1BNKARC 0x18e
+
+
+
+#define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b)
+ *
+ * 31:30 reserved
+ * 29 init complete
+ * 28:11 reserved, according to Intel
+ * 22:21 number of channels
+ * 00=1 01=2 in 82875
+ * seems to be ECC mode
+ * bits in 82975 in Asus
+ * P5W
+ * 19:18 Data Integ Mode
+ * 00=none 01=ECC in 82875
+ * 10:8 refresh mode
+ * 7 reserved
+ * 6:4 mode select
+ * 3:2 reserved
+ * 1:0 DRAM type 10=Second Revision
+ * DDR2 SDRAM
+ * 00, 01, 11 reserved
+ */
+#define I82975X_DRC_CH0M0 0x120
+#define I82975X_DRC_CH1M0 0x1A0
+
+
+#define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b)
+ * 31 0=Standard Address Map
+ * 1=Enhanced Address Map
+ * 30:0 reserved
+ */
+
+#define I82975X_DRC_CH0M1 0x124
+#define I82975X_DRC_CH1M1 0x1A4
+
+enum i82975x_chips {
+ I82975X = 0,
+};
+
+struct i82975x_pvt {
+ void __iomem *mch_window;
+};
+
+struct i82975x_dev_info {
+ const char *ctl_name;
+};
+
+struct i82975x_error_info {
+ u16 errsts;
+ u32 eap;
+ u8 des;
+ u8 derrsyn;
+ u16 errsts2;
+ u8 chan; /* the channel is bit 0 of EAP */
+ u8 xeap; /* extended eap bit */
+};
+
+static const struct i82975x_dev_info i82975x_devs[] = {
+ [I82975X] = {
+ .ctl_name = "i82975x"
+ },
+};
+
+static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
+ * already registered driver
+ */
+
+static int i82975x_registered = 1;
+
+static void i82975x_get_error_info(struct mem_ctl_info *mci,
+ struct i82975x_error_info *info)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(mci->dev);
+
+ /*
+ * This is a mess because there is no atomic way to read all the
+ * registers at once and the registers can transition from CE being
+ * overwritten by UE.
+ */
+ pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts);
+ pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+ pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+ pci_read_config_byte(pdev, I82975X_DES, &info->des);
+ pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn);
+ pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2);
+
+ pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003);
+
+ /*
+ * If the error is the same then we can for both reads then
+ * the first set of reads is valid. If there is a change then
+ * there is a CE no info and the second set of reads is valid
+ * and should be UE info.
+ */
+ if (!(info->errsts2 & 0x0003))
+ return;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ pci_read_config_dword(pdev, I82975X_EAP, &info->eap);
+ pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap);
+ pci_read_config_byte(pdev, I82975X_DES, &info->des);
+ pci_read_config_byte(pdev, I82975X_DERRSYN,
+ &info->derrsyn);
+ }
+}
+
+static int i82975x_process_error_info(struct mem_ctl_info *mci,
+ struct i82975x_error_info *info, int handle_errors)
+{
+ int row, multi_chan, chan;
+
+ multi_chan = mci->csrows[0].nr_channels - 1;
+
+ if (!(info->errsts2 & 0x0003))
+ return 0;
+
+ if (!handle_errors)
+ return 1;
+
+ if ((info->errsts ^ info->errsts2) & 0x0003) {
+ edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ info->errsts = info->errsts2;
+ }
+
+ chan = info->eap & 1;
+ info->eap >>= 1;
+ if (info->xeap )
+ info->eap |= 0x80000000;
+ info->eap >>= PAGE_SHIFT;
+ row = edac_mc_find_csrow_by_page(mci, info->eap);
+
+ if (info->errsts & 0x0002)
+ edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE");
+ else
+ edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
+ multi_chan ? chan : 0,
+ "i82975x CE");
+
+ return 1;
+}
+
+static void i82975x_check(struct mem_ctl_info *mci)
+{
+ struct i82975x_error_info info;
+
+ debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ i82975x_get_error_info(mci, &info);
+ i82975x_process_error_info(mci, &info, 1);
+}
+
+/* Return 1 if dual channel mode is active. Else return 0. */
+static int dual_channel_active(void __iomem *mch_window)
+{
+ /*
+ * We treat interleaved-symmetric configuration as dual-channel - EAP's
+ * bit-0 giving the channel of the error location.
+ *
+ * All other configurations are treated as single channel - the EAP's
+ * bit-0 will resolve ok in symmetric area of mixed
+ * (symmetric/asymmetric) configurations
+ */
+ u8 drb[4][2];
+ int row;
+ int dualch;
+
+ for (dualch = 1, row = 0; dualch && (row < 4); row++) {
+ drb[row][0] = readb(mch_window + I82975X_DRB + row);
+ drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80);
+ dualch = dualch && (drb[row][0] == drb[row][1]);
+ }
+ return dualch;
+}
+
+static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
+{
+ /*
+ * ASUS P5W DH either does not program this register or programs
+ * it wrong!
+ * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val'
+ * for each rank should be 01b - the LSB of the word should be 0x55;
+ * but it reads 0!
+ */
+ return DEV_X8;
+}
+
+static void i82975x_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev, void __iomem *mch_window)
+{
+ struct csrow_info *csrow;
+ unsigned long last_cumul_size;
+ u8 value;
+ u32 cumul_size;
+ int index;
+
+ last_cumul_size = 0;
+
+ /*
+ * 82875 comment:
+ * The dram row boundary (DRB) reg values are boundary address
+ * for each DRAM row with a granularity of 32 or 64MB (single/dual
+ * channel operation). DRB regs are cumulative; therefore DRB7 will
+ * contain the total memory contained in all eight rows.
+ *
+ * FIXME:
+ * EDAC currently works for Dual-channel Interleaved configuration.
+ * Other configurations, which the chip supports, need fixing/testing.
+ *
+ */
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ value = readb(mch_window + I82975X_DRB + index +
+ ((index >= 4) ? 0x80 : 0));
+ cumul_size = value;
+ cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT);
+ debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
+ cumul_size);
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+
+ csrow->first_page = last_cumul_size;
+ csrow->last_page = cumul_size - 1;
+ csrow->nr_pages = cumul_size - last_cumul_size;
+ last_cumul_size = cumul_size;
+ csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */
+ csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */
+ csrow->dtype = i82975x_dram_type(mch_window, index);
+ csrow->edac_mode = EDAC_SECDED; /* only supported */
+ }
+}
+
+/* #define i82975x_DEBUG_IOMEM */
+
+#ifdef i82975x_DEBUG_IOMEM
+static void i82975x_print_dram_timings(void __iomem *mch_window)
+{
+ /*
+ * The register meanings are from Intel specs;
+ * (shows 13-5-5-5 for 800-DDR2)
+ * Asus P5W Bios reports 15-5-4-4
+ * What's your religion?
+ */
+ static const int caslats[4] = { 5, 4, 3, 6 };
+ u32 dtreg[2];
+
+ dtreg[0] = readl(mch_window + 0x114);
+ dtreg[1] = readl(mch_window + 0x194);
+ i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n"
+ " RAS Active Min = %d %d\n"
+ " CAS latency = %d %d\n"
+ " RAS to CAS = %d %d\n"
+ " RAS precharge = %d %d\n",
+ (dtreg[0] >> 19 ) & 0x0f,
+ (dtreg[1] >> 19) & 0x0f,
+ caslats[(dtreg[0] >> 8) & 0x03],
+ caslats[(dtreg[1] >> 8) & 0x03],
+ ((dtreg[0] >> 4) & 0x07) + 2,
+ ((dtreg[1] >> 4) & 0x07) + 2,
+ (dtreg[0] & 0x07) + 2,
+ (dtreg[1] & 0x07) + 2
+ );
+
+}
+#endif
+
+static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
+{
+ int rc = -ENODEV;
+ struct mem_ctl_info *mci;
+ struct i82975x_pvt *pvt;
+ void __iomem *mch_window;
+ u32 mchbar;
+ u32 drc[2];
+ struct i82975x_error_info discard;
+ int chans;
+#ifdef i82975x_DEBUG_IOMEM
+ u8 c0drb[4];
+ u8 c1drb[4];
+#endif
+
+ debugf0("%s()\n", __func__);
+
+ pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
+ if (!(mchbar & 1)) {
+ debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
+ goto fail0;
+ }
+ mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
+ mch_window = ioremap_nocache(mchbar, 0x1000);
+
+#ifdef i82975x_DEBUG_IOMEM
+ i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n",
+ mchbar, mch_window);
+
+ c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0);
+ c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1);
+ c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2);
+ c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3);
+ c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0);
+ c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1);
+ c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2);
+ c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3);
+ i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]);
+ i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]);
+ i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]);
+ i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]);
+ i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]);
+ i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]);
+ i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]);
+ i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]);
+#endif
+
+ drc[0] = readl(mch_window + I82975X_DRC_CH0M0);
+ drc[1] = readl(mch_window + I82975X_DRC_CH1M0);
+#ifdef i82975x_DEBUG_IOMEM
+ i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0],
+ ((drc[0] >> 21) & 3) == 1 ?
+ "ECC enabled" : "ECC disabled");
+ i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1],
+ ((drc[1] >> 21) & 3) == 1 ?
+ "ECC enabled" : "ECC disabled");
+
+ i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n",
+ readw(mch_window + I82975X_C0BNKARC));
+ i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n",
+ readw(mch_window + I82975X_C1BNKARC));
+ i82975x_print_dram_timings(mch_window);
+ goto fail1;
+#endif
+ if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) {
+ i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n");
+ goto fail1;
+ }
+
+ chans = dual_channel_active(mch_window) + 1;
+
+ /* assuming only one controller, index thus is 0 */
+ mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
+ chans, 0);
+ if (!mci) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = I82975X_REVISION;
+ mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
+ mci->edac_check = i82975x_check;
+ mci->ctl_page_to_phys = NULL;
+ debugf3("%s(): init pvt\n", __func__);
+ pvt = (struct i82975x_pvt *) mci->pvt_info;
+ pvt->mch_window = mch_window;
+ i82975x_init_csrows(mci, pdev, mch_window);
+ i82975x_get_error_info(mci, &discard); /* clear counters */
+
+ /* finalize this instance of memory controller with edac core */
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto fail2;
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+ return 0;
+
+fail2:
+ edac_mc_free(mci);
+
+fail1:
+ iounmap(mch_window);
+fail0:
+ return rc;
+}
+
+/* returns count (>= 0), or negative on error */
+static int __devinit i82975x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rc;
+
+ debugf0("%s()\n", __func__);
+
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
+
+ rc = i82975x_probe1(pdev, ent->driver_data);
+
+ if (mci_pdev == NULL)
+ mci_pdev = pci_dev_get(pdev);
+
+ return rc;
+}
+
+static void __devexit i82975x_remove_one(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i82975x_pvt *pvt;
+
+ debugf0("%s()\n", __func__);
+
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (mci == NULL)
+ return;
+
+ pvt = mci->pvt_info;
+ if (pvt->mch_window)
+ iounmap( pvt->mch_window );
+
+ edac_mc_free(mci);
+}
+
+static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
+ {
+ PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ I82975X
+ },
+ {
+ 0,
+ } /* 0 terminated list. */
+};
+
+MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl);
+
+static struct pci_driver i82975x_driver = {
+ .name = EDAC_MOD_STR,
+ .probe = i82975x_init_one,
+ .remove = __devexit_p(i82975x_remove_one),
+ .id_table = i82975x_pci_tbl,
+};
+
+static int __init i82975x_init(void)
+{
+ int pci_rc;
+
+ debugf3("%s()\n", __func__);
+
+ pci_rc = pci_register_driver(&i82975x_driver);
+ if (pci_rc < 0)
+ goto fail0;
+
+ if (mci_pdev == NULL) {
+ mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82975_0, NULL);
+
+ if (!mci_pdev) {
+ debugf0("i82975x pci_get_device fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+
+ pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
+
+ if (pci_rc < 0) {
+ debugf0("i82975x init fail\n");
+ pci_rc = -ENODEV;
+ goto fail1;
+ }
+ }
+
+ return 0;
+
+fail1:
+ pci_unregister_driver(&i82975x_driver);
+
+fail0:
+ if (mci_pdev != NULL)
+ pci_dev_put(mci_pdev);
+
+ return pci_rc;
+}
+
+static void __exit i82975x_exit(void)
+{
+ debugf3("%s()\n", __func__);
+
+ pci_unregister_driver(&i82975x_driver);
+
+ if (!i82975x_registered) {
+ i82975x_remove_one(mci_pdev);
+ pci_dev_put(mci_pdev);
+ }
+}
+
+module_init(i82975x_init);
+module_exit(i82975x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
+MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
new file mode 100644
index 000000000000..e66cdd42a392
--- /dev/null
+++ b/drivers/edac/pasemi_edac.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip memory controllers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include "edac_core.h"
+
+#define MODULE_NAME "pasemi_edac"
+
+#define MCCFG_MCEN 0x300
+#define MCCFG_MCEN_MMC_EN 0x00000001
+#define MCCFG_ERRCOR 0x388
+#define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100
+#define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010
+#define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001
+#define MCCFG_SCRUB 0x384
+#define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001
+#define MCDEBUG_ERRCTL1 0x728
+#define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000
+#define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000
+#define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000
+#define MCDEBUG_ERRSTA 0x730
+#define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004
+#define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002
+#define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001
+#define MCDEBUG_ERRCNT1 0x734
+#define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080
+#define MCDEBUG_ERRLOG1A 0x738
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000
+#define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000
+#define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000
+#define MCDEBUG_ERRLOG1A_MERR_BA_S 20
+#define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000
+#define MCDEBUG_ERRLOG1A_MERR_CS_S 16
+#define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff
+#define MCDRAM_RANKCFG 0x114
+#define MCDRAM_RANKCFG_EN 0x00000001
+#define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0
+#define MCDRAM_RANKCFG_TYPE_SIZE_S 6
+
+#define PASEMI_EDAC_NR_CSROWS 8
+#define PASEMI_EDAC_NR_CHANS 1
+#define PASEMI_EDAC_ERROR_GRAIN 64
+
+static int last_page_in_mmc;
+static int system_mmc_id;
+
+
+static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
+{
+ struct pci_dev *pdev = to_pci_dev(mci->dev);
+ u32 tmp;
+
+ pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
+ &tmp);
+
+ tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS
+ | MCDEBUG_ERRSTA_SBE_STATUS);
+
+ if (tmp) {
+ if (tmp & MCDEBUG_ERRSTA_SBE_STATUS)
+ pci_write_config_dword(pdev, MCDEBUG_ERRCNT1,
+ MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO);
+ pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp);
+ }
+
+ return tmp;
+}
+
+static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
+{
+ struct pci_dev *pdev = to_pci_dev(mci->dev);
+ u32 errlog1a;
+ u32 cs;
+
+ if (!errsta)
+ return;
+
+ pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a);
+
+ cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >>
+ MCDEBUG_ERRLOG1A_MERR_CS_S;
+
+ /* uncorrectable/multi-bit errors */
+ if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
+ MCDEBUG_ERRSTA_RFL_STATUS)) {
+ edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
+ cs, mci->ctl_name);
+ }
+
+ /* correctable/single-bit errors */
+ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
+ edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
+ 0, cs, 0, mci->ctl_name);
+ }
+}
+
+static void pasemi_edac_check(struct mem_ctl_info *mci)
+{
+ u32 errsta;
+
+ errsta = pasemi_edac_get_error_info(mci);
+ if (errsta)
+ pasemi_edac_process_error_info(mci, errsta);
+}
+
+static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev,
+ enum edac_type edac_mode)
+{
+ struct csrow_info *csrow;
+ u32 rankcfg;
+ int index;
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ csrow = &mci->csrows[index];
+
+ pci_read_config_dword(pdev,
+ MCDRAM_RANKCFG + (index * 12),
+ &rankcfg);
+
+ if (!(rankcfg & MCDRAM_RANKCFG_EN))
+ continue;
+
+ switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
+ MCDRAM_RANKCFG_TYPE_SIZE_S) {
+ case 0:
+ csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+ break;
+ case 1:
+ csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+ break;
+ case 2:
+ case 3:
+ csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+ break;
+ case 4:
+ csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+ break;
+ case 5:
+ csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+ break;
+ default:
+ edac_mc_printk(mci, KERN_ERR,
+ "Unrecognized Rank Config. rankcfg=%u\n",
+ rankcfg);
+ return -EINVAL;
+ }
+
+ csrow->first_page = last_page_in_mmc;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ last_page_in_mmc += csrow->nr_pages;
+ csrow->page_mask = 0;
+ csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
+ csrow->mtype = MEM_DDR;
+ csrow->dtype = DEV_UNKNOWN;
+ csrow->edac_mode = edac_mode;
+ }
+ return 0;
+}
+
+static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mem_ctl_info *mci = NULL;
+ u32 errctl1, errcor, scrub, mcen;
+
+ pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
+ if (!(mcen & MCCFG_MCEN_MMC_EN))
+ return -ENODEV;
+
+ /*
+ * We should think about enabling other error detection later on
+ */
+
+ pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1);
+ errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN |
+ MCDEBUG_ERRCTL1_MBE_LOG_EN |
+ MCDEBUG_ERRCTL1_RFL_LOG_EN;
+ pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
+
+ mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
+ system_mmc_id++);
+
+ if (mci == NULL)
+ return -ENOMEM;
+
+ pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor);
+ errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN |
+ MCCFG_ERRCOR_ECC_GEN_EN |
+ MCCFG_ERRCOR_ECC_CRR_EN;
+
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
+ ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ?
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) :
+ EDAC_FLAG_NONE;
+ mci->mod_name = MODULE_NAME;
+ mci->dev_name = pci_name(pdev);
+ mci->ctl_name = "pasemi,1682m-mc";
+ mci->edac_check = pasemi_edac_check;
+ mci->ctl_page_to_phys = NULL;
+ pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub);
+ mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC;
+ mci->scrub_mode =
+ ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) |
+ ((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0);
+
+ if (pasemi_edac_init_csrows(mci, pdev,
+ (mci->edac_cap & EDAC_FLAG_SECDED) ?
+ EDAC_SECDED :
+ ((mci->edac_cap & EDAC_FLAG_EC) ?
+ EDAC_EC : EDAC_NONE)))
+ goto fail;
+
+ /*
+ * Clear status
+ */
+ pasemi_edac_get_error_info(mci);
+
+ if (edac_mc_add_mc(mci))
+ goto fail;
+
+ /* get this far and it's successful */
+ return 0;
+
+fail:
+ edac_mc_free(mci);
+ return -ENODEV;
+}
+
+static void __devexit pasemi_edac_remove(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+}
+
+
+static const struct pci_device_id pasemi_edac_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl);
+
+static struct pci_driver pasemi_edac_driver = {
+ .name = MODULE_NAME,
+ .probe = pasemi_edac_probe,
+ .remove = __devexit_p(pasemi_edac_remove),
+ .id_table = pasemi_edac_pci_tbl,
+};
+
+static int __init pasemi_edac_init(void)
+{
+ return pci_register_driver(&pasemi_edac_driver);
+}
+
+static void __exit pasemi_edac_exit(void)
+{
+ pci_unregister_driver(&pasemi_edac_driver);
+}
+
+module_init(pasemi_edac_init);
+module_exit(pasemi_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("MC support for PA Semi PA6T-1682M memory controller");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index a49cf0a39398..e25f712f2dc3 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -11,7 +11,7 @@
*
* Written with reference to 82600 High Integration Dual PCI System
* Controller Data Book:
- * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
+ * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf
* references to this document given in []
*/
@@ -20,9 +20,9 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
-#include "edac_mc.h"
+#include "edac_core.h"
-#define R82600_REVISION " Ver: 2.0.1 " __DATE__
+#define R82600_REVISION " Ver: 2.0.2 " __DATE__
#define EDAC_MOD_STR "r82600_edac"
#define r82600_printk(level, fmt, arg...) \
@@ -131,10 +131,12 @@ struct r82600_error_info {
u32 eapr;
};
-static unsigned int disable_hardware_scrub = 0;
+static unsigned int disable_hardware_scrub;
-static void r82600_get_error_info (struct mem_ctl_info *mci,
- struct r82600_error_info *info)
+static struct edac_pci_ctl_info *r82600_pci;
+
+static void r82600_get_error_info(struct mem_ctl_info *mci,
+ struct r82600_error_info *info)
{
struct pci_dev *pdev;
@@ -144,18 +146,19 @@ static void r82600_get_error_info (struct mem_ctl_info *mci,
if (info->eapr & BIT(0))
/* Clear error to allow next error to be reported [p.62] */
pci_write_bits32(pdev, R82600_EAP,
- ((u32) BIT(0) & (u32) BIT(1)),
- ((u32) BIT(0) & (u32) BIT(1)));
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
if (info->eapr & BIT(1))
/* Clear error to allow next error to be reported [p.62] */
pci_write_bits32(pdev, R82600_EAP,
- ((u32) BIT(0) & (u32) BIT(1)),
- ((u32) BIT(0) & (u32) BIT(1)));
+ ((u32) BIT(0) & (u32) BIT(1)),
+ ((u32) BIT(0) & (u32) BIT(1)));
}
-static int r82600_process_error_info (struct mem_ctl_info *mci,
- struct r82600_error_info *info, int handle_errors)
+static int r82600_process_error_info(struct mem_ctl_info *mci,
+ struct r82600_error_info *info,
+ int handle_errors)
{
int error_found;
u32 eapaddr, page;
@@ -172,25 +175,24 @@ static int r82600_process_error_info (struct mem_ctl_info *mci,
* granularity (upper 19 bits only) */
page = eapaddr >> PAGE_SHIFT;
- if (info->eapr & BIT(0)) { /* CE? */
+ if (info->eapr & BIT(0)) { /* CE? */
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, 0, /* not avail */
+ edac_mc_handle_ce(mci, page, 0, /* not avail */
syndrome,
edac_mc_find_csrow_by_page(mci, page),
- 0, /* channel */
- mci->ctl_name);
+ 0, mci->ctl_name);
}
- if (info->eapr & BIT(1)) { /* UE? */
+ if (info->eapr & BIT(1)) { /* UE? */
error_found = 1;
if (handle_errors)
/* 82600 doesn't give enough info */
edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
}
return error_found;
@@ -211,11 +213,11 @@ static inline int ecc_enabled(u8 dramcr)
}
static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- u8 dramcr)
+ u8 dramcr)
{
struct csrow_info *csrow;
int index;
- u8 drbar; /* SDRAM Row Boundry Address Register */
+ u8 drbar; /* SDRAM Row Boundry Address Register */
u32 row_high_limit, row_high_limit_last;
u32 reg_sdram, ecc_on, row_base;
@@ -276,7 +278,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
sdram_refresh_rate);
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
- mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
+ mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
if (mci == NULL)
return -ENOMEM;
@@ -305,15 +307,16 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = R82600_REVISION;
mci->ctl_name = "R82600";
+ mci->dev_name = pci_name(pdev);
mci->edac_check = r82600_check;
mci->ctl_page_to_phys = NULL;
r82600_init_csrows(mci, pdev, dramcr);
- r82600_get_error_info(mci, &discard); /* clear counters */
+ r82600_get_error_info(mci, &discard); /* clear counters */
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
- if (edac_mc_add_mc(mci,0)) {
+ if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
@@ -326,6 +329,17 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
}
+ /* allocating generic PCI control info */
+ r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
+ if (!r82600_pci) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
debugf3("%s(): success\n", __func__);
return 0;
@@ -336,7 +350,7 @@ fail:
/* returns count (>= 0), or negative on error */
static int __devinit r82600_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
@@ -350,6 +364,9 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
debugf0("%s()\n", __func__);
+ if (r82600_pci)
+ edac_pci_release_generic_ctl(r82600_pci);
+
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
@@ -358,11 +375,11 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
{
- PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
- },
+ PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
+ },
{
- 0,
- } /* 0 terminated list. */
+ 0,
+ } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
@@ -389,7 +406,7 @@ module_exit(r82600_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
- "on behalf of EADS Astrium");
+ "on behalf of EADS Astrium");
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
module_param(disable_hardware_scrub, bool, 0644);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 41476abc0693..db703758db98 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -224,6 +224,7 @@ ohci_update_phy_reg(struct fw_card *card, int addr,
u32 val, old;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
+ flush_writes(ohci);
msleep(2);
val = reg_read(ohci, OHCI1394_PhyControl);
if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
@@ -586,7 +587,7 @@ static void context_stop(struct context *ctx)
break;
fw_notify("context_stop: still active (0x%08x)\n", reg);
- msleep(1);
+ mdelay(1);
}
}
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 7c53be0387fb..3e4a369d0057 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -840,7 +840,6 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
container_of(base_orb, struct sbp2_command_orb, base);
struct fw_unit *unit = orb->unit;
struct fw_device *device = fw_device(unit->device.parent);
- struct scatterlist *sg;
int result;
if (status != NULL) {
@@ -876,11 +875,10 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof(orb->request), DMA_TO_DEVICE);
- if (orb->cmd->use_sg > 0) {
- sg = (struct scatterlist *)orb->cmd->request_buffer;
- dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+ if (scsi_sg_count(orb->cmd) > 0)
+ dma_unmap_sg(device->card->device, scsi_sglist(orb->cmd),
+ scsi_sg_count(orb->cmd),
orb->cmd->sc_data_direction);
- }
if (orb->page_table_bus != 0)
dma_unmap_single(device->card->device, orb->page_table_bus,
@@ -901,8 +899,8 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
int sg_len, l, i, j, count;
dma_addr_t sg_addr;
- sg = (struct scatterlist *)orb->cmd->request_buffer;
- count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
+ sg = scsi_sglist(orb->cmd);
+ count = dma_map_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
orb->cmd->sc_data_direction);
if (count == 0)
goto fail;
@@ -971,7 +969,7 @@ static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
return 0;
fail_page_table:
- dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
+ dma_unmap_sg(device->card->device, sg, scsi_sg_count(orb->cmd),
orb->cmd->sc_data_direction);
fail:
return -ENOMEM;
@@ -1031,7 +1029,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
orb->request.misc |=
COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
- if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
+ if (scsi_sg_count(cmd) && sbp2_command_orb_map_scatterlist(orb) < 0)
goto fail_mapping;
fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
@@ -1162,7 +1160,7 @@ static struct device_attribute *sbp2_scsi_sysfs_attrs[] = {
static struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
- .proc_name = (char *)sbp2_driver_name,
+ .proc_name = sbp2_driver_name,
.queuecommand = sbp2_scsi_queuecommand,
.slave_alloc = sbp2_scsi_slave_alloc,
.slave_configure = sbp2_scsi_slave_configure,
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 80d0121463d0..3ce8e2fbe15f 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -605,8 +605,10 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
* check is sufficient to ensure we don't send response to
* broadcast packets or posted writes.
*/
- if (request->ack != ACK_PENDING)
+ if (request->ack != ACK_PENDING) {
+ kfree(request);
return;
+ }
if (rcode == RCODE_COMPLETE)
fw_fill_response(&request->response, request->request_header,
@@ -628,11 +630,6 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
unsigned long flags;
int tcode, destination, source;
- if (p->payload_length > 2048) {
- /* FIXME: send error response. */
- return;
- }
-
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
return;
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 5abed193f4a6..5ceaccd10564 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -123,6 +123,10 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
size_t length,
void *callback_data);
+/*
+ * Important note: The callback must guarantee that either fw_send_response()
+ * or kfree() is called on the @request.
+ */
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
int tcode, int destination, int source,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 88f462122a30..05f02a326f1c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -84,4 +84,13 @@ config DCDBAS
Say Y or M here to enable the driver for use by Dell systems
management software such as Dell OpenManage.
+config DMIID
+ bool "Export DMI identification via sysfs to userspace"
+ depends on DMI
+ default y
+ help
+ Say Y here if you want to query SMBIOS/DMI system identification
+ information from userspace through /sys/class/dmi/id/ or if you want
+ DMI-based module auto-loading.
+
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 98e395f4bb29..8d4ebc805a50 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_PCDP) += pcdp.o
obj-$(CONFIG_DELL_RBU) += dell_rbu.o
obj-$(CONFIG_DCDBAS) += dcdbas.o
+obj-$(CONFIG_DMIID) += dmi-id.o
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 1865b56fb141..18cdcb3ae1ca 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -149,8 +149,9 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
return count;
}
-static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
- size_t count)
+static ssize_t smi_data_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
size_t max_read;
ssize_t ret;
@@ -170,8 +171,9 @@ out:
return ret;
}
-static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
- size_t count)
+static ssize_t smi_data_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
ssize_t ret;
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
index 58a85182b3e8..dcdba0f1b32c 100644
--- a/drivers/firmware/dcdbas.h
+++ b/drivers/firmware/dcdbas.h
@@ -67,8 +67,7 @@
#define DCDBAS_BIN_ATTR_RW(_name) \
struct bin_attribute bin_attr_##_name = { \
.attr = { .name = __stringify(_name), \
- .mode = 0600, \
- .owner = THIS_MODULE }, \
+ .mode = 0600 }, \
.read = _name##_read, \
.write = _name##_write, \
}
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index fc702e40bd43..477a3d0e3caf 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -543,8 +543,9 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
return ret_count;
}
-static ssize_t read_rbu_data(struct kobject *kobj, char *buffer,
- loff_t pos, size_t count)
+static ssize_t read_rbu_data(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
ssize_t ret_count = 0;
@@ -591,8 +592,9 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
spin_unlock(&rbu_data.lock);
}
-static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer,
- loff_t pos, size_t count)
+static ssize_t read_rbu_image_type(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
int size = 0;
if (!pos)
@@ -600,8 +602,9 @@ static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer,
return size;
}
-static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer,
- loff_t pos, size_t count)
+static ssize_t write_rbu_image_type(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
int rc = count;
int req_firm_rc = 0;
@@ -660,8 +663,9 @@ static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer,
return rc;
}
-static ssize_t read_rbu_packet_size(struct kobject *kobj, char *buffer,
- loff_t pos, size_t count)
+static ssize_t read_rbu_packet_size(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
int size = 0;
if (!pos) {
@@ -672,8 +676,9 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj, char *buffer,
return size;
}
-static ssize_t write_rbu_packet_size(struct kobject *kobj, char *buffer,
- loff_t pos, size_t count)
+static ssize_t write_rbu_packet_size(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
{
unsigned long temp;
spin_lock(&rbu_data.lock);
@@ -687,18 +692,18 @@ static ssize_t write_rbu_packet_size(struct kobject *kobj, char *buffer,
}
static struct bin_attribute rbu_data_attr = {
- .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444},
+ .attr = {.name = "data", .mode = 0444},
.read = read_rbu_data,
};
static struct bin_attribute rbu_image_type_attr = {
- .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644},
+ .attr = {.name = "image_type", .mode = 0644},
.read = read_rbu_image_type,
.write = write_rbu_image_type,
};
static struct bin_attribute rbu_packet_size_attr = {
- .attr = {.name = "packet_size",.owner = THIS_MODULE,.mode = 0644},
+ .attr = {.name = "packet_size", .mode = 0644},
.read = read_rbu_packet_size,
.write = write_rbu_packet_size,
};
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
new file mode 100644
index 000000000000..59c3b5aa89f4
--- /dev/null
+++ b/drivers/firmware/dmi-id.c
@@ -0,0 +1,222 @@
+/*
+ * Export SMBIOS/DMI info via sysfs to userspace
+ *
+ * Copyright 2007, Lennart Poettering
+ *
+ * Licensed under GPLv2
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/autoconf.h>
+
+#define DEFINE_DMI_ATTR(_name, _mode, _show) \
+static struct device_attribute sys_dmi_##_name##_attr = \
+ __ATTR(_name, _mode, _show, NULL);
+
+#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field) \
+static ssize_t sys_dmi_##_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *page) \
+{ \
+ ssize_t len; \
+ len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(_field)); \
+ page[len-1] = '\n'; \
+ return len; \
+} \
+DEFINE_DMI_ATTR(_name, _mode, sys_dmi_##_name##_show);
+
+DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
+DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
+DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(board_serial, 0400, DMI_BOARD_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(board_asset_tag, 0444, DMI_BOARD_ASSET_TAG);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_vendor, 0444, DMI_CHASSIS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_type, 0444, DMI_CHASSIS_TYPE);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_version, 0444, DMI_CHASSIS_VERSION);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_serial, 0400, DMI_CHASSIS_SERIAL);
+DEFINE_DMI_ATTR_WITH_SHOW(chassis_asset_tag, 0444, DMI_CHASSIS_ASSET_TAG);
+
+static void ascii_filter(char *d, const char *s)
+{
+ /* Filter out characters we don't want to see in the modalias string */
+ for (; *s; s++)
+ if (*s > ' ' && *s < 127 && *s != ':')
+ *(d++) = *s;
+
+ *d = 0;
+}
+
+static ssize_t get_modalias(char *buffer, size_t buffer_size)
+{
+ static const struct mafield {
+ const char *prefix;
+ int field;
+ } fields[] = {
+ { "bvn", DMI_BIOS_VENDOR },
+ { "bvr", DMI_BIOS_VERSION },
+ { "bd", DMI_BIOS_DATE },
+ { "svn", DMI_SYS_VENDOR },
+ { "pn", DMI_PRODUCT_NAME },
+ { "pvr", DMI_PRODUCT_VERSION },
+ { "rvn", DMI_BOARD_VENDOR },
+ { "rn", DMI_BOARD_NAME },
+ { "rvr", DMI_BOARD_VERSION },
+ { "cvn", DMI_CHASSIS_VENDOR },
+ { "ct", DMI_CHASSIS_TYPE },
+ { "cvr", DMI_CHASSIS_VERSION },
+ { NULL, DMI_NONE }
+ };
+
+ ssize_t l, left;
+ char *p;
+ const struct mafield *f;
+
+ strcpy(buffer, "dmi");
+ p = buffer + 3; left = buffer_size - 4;
+
+ for (f = fields; f->prefix && left > 0; f++) {
+ const char *c;
+ char *t;
+
+ c = dmi_get_system_info(f->field);
+ if (!c)
+ continue;
+
+ t = kmalloc(strlen(c) + 1, GFP_KERNEL);
+ if (!t)
+ break;
+ ascii_filter(t, c);
+ l = scnprintf(p, left, ":%s%s", f->prefix, t);
+ kfree(t);
+
+ p += l;
+ left -= l;
+ }
+
+ p[0] = ':';
+ p[1] = 0;
+
+ return p - buffer + 1;
+}
+
+static ssize_t sys_dmi_modalias_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ ssize_t r;
+ r = get_modalias(page, PAGE_SIZE-1);
+ page[r] = '\n';
+ page[r+1] = 0;
+ return r+1;
+}
+
+DEFINE_DMI_ATTR(modalias, 0444, sys_dmi_modalias_show);
+
+static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2];
+
+static struct attribute_group sys_dmi_attribute_group = {
+ .attrs = sys_dmi_attributes,
+};
+
+static struct attribute_group* sys_dmi_attribute_groups[] = {
+ &sys_dmi_attribute_group,
+ NULL
+};
+
+static int dmi_dev_uevent(struct device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size)
+{
+ strcpy(buffer, "MODALIAS=");
+ get_modalias(buffer+9, buffer_size-9);
+ envp[0] = buffer;
+ envp[1] = NULL;
+
+ return 0;
+}
+
+static struct class dmi_class = {
+ .name = "dmi",
+ .dev_release = (void(*)(struct device *)) kfree,
+ .dev_uevent = dmi_dev_uevent,
+};
+
+static struct device *dmi_dev;
+
+/* Initialization */
+
+#define ADD_DMI_ATTR(_name, _field) \
+ if (dmi_get_system_info(_field)) \
+ sys_dmi_attributes[i++] = & sys_dmi_##_name##_attr.attr;
+
+extern int dmi_available;
+
+static int __init dmi_id_init(void)
+{
+ int ret, i;
+
+ if (!dmi_available)
+ return -ENODEV;
+
+ /* Not necessarily all DMI fields are available on all
+ * systems, hence let's built an attribute table of just
+ * what's available */
+ i = 0;
+ ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
+ ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
+ ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
+ ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
+ ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
+ ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
+ ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
+ ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
+ ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
+ ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
+ ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
+ ADD_DMI_ATTR(board_serial, DMI_BOARD_SERIAL);
+ ADD_DMI_ATTR(board_asset_tag, DMI_BOARD_ASSET_TAG);
+ ADD_DMI_ATTR(chassis_vendor, DMI_CHASSIS_VENDOR);
+ ADD_DMI_ATTR(chassis_type, DMI_CHASSIS_TYPE);
+ ADD_DMI_ATTR(chassis_version, DMI_CHASSIS_VERSION);
+ ADD_DMI_ATTR(chassis_serial, DMI_CHASSIS_SERIAL);
+ ADD_DMI_ATTR(chassis_asset_tag, DMI_CHASSIS_ASSET_TAG);
+ sys_dmi_attributes[i++] = &sys_dmi_modalias_attr.attr;
+
+ ret = class_register(&dmi_class);
+ if (ret)
+ return ret;
+
+ dmi_dev = kzalloc(sizeof(*dmi_dev), GFP_KERNEL);
+ if (!dmi_dev) {
+ ret = -ENOMEM;
+ goto fail_class_unregister;
+ }
+
+ dmi_dev->class = &dmi_class;
+ strcpy(dmi_dev->bus_id, "id");
+ dmi_dev->groups = sys_dmi_attribute_groups;
+
+ ret = device_register(dmi_dev);
+ if (ret)
+ goto fail_class_unregister;
+
+ return 0;
+
+fail_class_unregister:
+
+ class_unregister(&dmi_class);
+
+ return ret;
+}
+
+arch_initcall(dmi_id_init);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 37deee6c0c1c..f7318b3b51f2 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -84,6 +84,7 @@ static int __init dmi_checksum(u8 *buf)
static char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
+int dmi_available;
/*
* Save a DMI string
@@ -102,6 +103,51 @@ static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
dmi_ident[slot] = p;
}
+static void __init dmi_save_uuid(struct dmi_header *dm, int slot, int index)
+{
+ u8 *d = (u8*) dm + index;
+ char *s;
+ int is_ff = 1, is_00 = 1, i;
+
+ if (dmi_ident[slot])
+ return;
+
+ for (i = 0; i < 16 && (is_ff || is_00); i++) {
+ if(d[i] != 0x00) is_ff = 0;
+ if(d[i] != 0xFF) is_00 = 0;
+ }
+
+ if (is_ff || is_00)
+ return;
+
+ s = dmi_alloc(16*2+4+1);
+ if (!s)
+ return;
+
+ sprintf(s,
+ "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
+ d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
+ d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]);
+
+ dmi_ident[slot] = s;
+}
+
+static void __init dmi_save_type(struct dmi_header *dm, int slot, int index)
+{
+ u8 *d = (u8*) dm + index;
+ char *s;
+
+ if (dmi_ident[slot])
+ return;
+
+ s = dmi_alloc(4);
+ if (!s)
+ return;
+
+ sprintf(s, "%u", *d & 0x7F);
+ dmi_ident[slot] = s;
+}
+
static void __init dmi_save_devices(struct dmi_header *dm)
{
int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
@@ -192,11 +238,21 @@ static void __init dmi_decode(struct dmi_header *dm)
dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
+ dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
break;
case 2: /* Base Board Information */
dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
dmi_save_ident(dm, DMI_BOARD_NAME, 5);
dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
+ dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
+ dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
+ break;
+ case 3: /* Chassis Information */
+ dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
+ dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
+ dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
+ dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
+ dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
break;
case 10: /* Onboard Devices Information */
dmi_save_devices(dm);
@@ -243,18 +299,20 @@ void __init dmi_scan_machine(void)
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto out;
- /* This is called as a core_initcall() because it isn't
- * needed during early boot. This also means we can
- * iounmap the space when we're done with it.
- */
+ /* This is called as a core_initcall() because it isn't
+ * needed during early boot. This also means we can
+ * iounmap the space when we're done with it.
+ */
p = dmi_ioremap(efi.smbios, 32);
if (p == NULL)
goto out;
rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
dmi_iounmap(p, 32);
- if (!rc)
+ if (!rc) {
+ dmi_available = 1;
return;
+ }
}
else {
/*
@@ -268,8 +326,10 @@ void __init dmi_scan_machine(void)
for (q = p; q < p + 0x10000; q += 16) {
rc = dmi_present(q);
- if (!rc)
+ if (!rc) {
+ dmi_available = 1;
return;
+ }
}
}
out: printk(KERN_INFO "DMI not present or invalid.\n");
@@ -404,3 +464,4 @@ int dmi_get_year(int field)
return year;
}
+
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index d8806e4f1829..0fb730ee1da8 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -74,7 +74,7 @@ static struct edd_device *edd_devices[EDD_MBR_SIG_MAX];
#define EDD_DEVICE_ATTR(_name,_mode,_show,_test) \
struct edd_attribute edd_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.test = _test, \
};
@@ -669,7 +669,7 @@ edd_get_pci_dev(struct edd_device *edev)
struct edd_info *info = edd_dev_get_info(edev);
if (edd_dev_is_type(edev, "PCI")) {
- return pci_find_slot(info->params.interface_path.pci.bus,
+ return pci_get_bus_and_slot(info->params.interface_path.pci.bus,
PCI_DEVFN(info->params.interface_path.pci.slot,
info->params.interface_path.pci.
function));
@@ -682,9 +682,12 @@ edd_create_symlink_to_pcidev(struct edd_device *edev)
{
struct pci_dev *pci_dev = edd_get_pci_dev(edev);
+ int ret;
if (!pci_dev)
return 1;
- return sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev");
+ ret = sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev");
+ pci_dev_put(pci_dev);
+ return ret;
}
static inline void
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 1324984a4c35..bfd2d67df689 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -131,21 +131,21 @@ struct efivar_attribute {
#define EFI_ATTR(_name, _mode, _show, _store) \
struct subsys_attribute efi_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define EFIVAR_ATTR(_name, _mode, _show, _store) \
struct efivar_attribute efivar_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define VAR_SUBSYS_ATTR(_name, _mode, _show, _store) \
struct subsys_attribute var_subsys_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 2b4b76e8bd72..58e9f8e457f8 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -15,6 +15,7 @@
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/serial.h>
+#include <linux/serial_8250.h>
#include <asm/vga.h>
#include "pcdp.h"
@@ -27,7 +28,7 @@ setup_serial_console(struct pcdp_uart *uart)
char parity;
mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
- p += sprintf(p, "console=uart,%s,0x%lx",
+ p += sprintf(p, "uart8250,%s,0x%lx",
mmio ? "mmio" : "io", uart->addr.address);
if (uart->baud) {
p += sprintf(p, ",%lu", uart->baud);
@@ -41,7 +42,8 @@ setup_serial_console(struct pcdp_uart *uart)
}
}
- return early_serial_console_init(options);
+ add_preferred_console("uart", 8250, &options[9]);
+ return setup_early_serial8250_console(options);
#else
return -ENODEV;
#endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 3afa4a5035b7..b2baeaeba9be 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1009,20 +1009,22 @@ static int hid_resume(struct usb_interface *intf)
}
/* Treat USB reset pretty much the same as suspend/resume */
-static void hid_pre_reset(struct usb_interface *intf)
+static int hid_pre_reset(struct usb_interface *intf)
{
/* FIXME: What if the interface is already suspended? */
hid_suspend(intf, PMSG_ON);
+ return 0;
}
-static void hid_post_reset(struct usb_interface *intf)
+/* Same routine used for post_reset and reset_resume */
+static int hid_post_reset(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev (intf);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
/* FIXME: Any more reinitialization needed? */
- hid_resume(intf);
+ return hid_resume(intf);
}
static struct usb_device_id hid_usb_ids [] = {
@@ -1039,6 +1041,7 @@ static struct usb_driver hid_driver = {
.disconnect = hid_disconnect,
.suspend = hid_suspend,
.resume = hid_resume,
+ .reset_resume = hid_post_reset,
.pre_reset = hid_pre_reset,
.post_reset = hid_post_reset,
.id_table = hid_usb_ids,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 13eea47dceb3..dbdca6f10e46 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -29,17 +29,34 @@ config HWMON_VID
default n
config SENSORS_ABITUGURU
- tristate "Abit uGuru"
+ tristate "Abit uGuru (rev 1 & 2)"
depends on EXPERIMENTAL
help
- If you say yes here you get support for the Abit uGuru chips
- sensor part. The voltage and frequency control parts of the Abit
- uGuru are not supported. The Abit uGuru chip can be found on Abit
- uGuru featuring motherboards (most modern Abit motherboards).
+ If you say yes here you get support for the sensor part of the first
+ and second revision of the Abit uGuru chip. The voltage and frequency
+ control parts of the Abit uGuru are not supported. The Abit uGuru
+ chip can be found on Abit uGuru featuring motherboards (most modern
+ Abit motherboards from before end 2005). For more info and a list
+ of which motherboards have which revision see
+ Documentation/hwmon/abituguru
This driver can also be built as a module. If so, the module
will be called abituguru.
+config SENSORS_ABITUGURU3
+ tristate "Abit uGuru (rev 3)"
+ depends on HWMON && EXPERIMENTAL
+ help
+ If you say yes here you get support for the sensor part of the
+ third revision of the Abit uGuru chip. Only reading the sensors
+ and their settings is supported. The third revision of the Abit
+ uGuru chip can be found on recent Abit motherboards (since end
+ 2005). For more info and a list of which motherboards have which
+ revision see Documentation/hwmon/abituguru3
+
+ This driver can also be built as a module. If so, the module
+ will be called abituguru3.
+
config SENSORS_AD7418
tristate "Analog Devices AD7416, AD7417 and AD7418"
depends on I2C && EXPERIMENTAL
@@ -250,12 +267,10 @@ config SENSORS_CORETEMP
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
- depends on I2C
- select I2C_ISA
select HWMON_VID
help
If you say yes here you get support for ITE IT8705F, IT8712F,
- IT8716F and IT8718F sensor chips, and the SiS960 clone.
+ IT8716F, IT8718F and IT8726F sensor chips, and the SiS960 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -365,8 +380,8 @@ config SENSORS_LM90
depends on I2C
help
If you say yes here you get support for National Semiconductor LM90,
- LM86, LM89 and LM99, Analog Devices ADM1032 and Maxim MAX6657 and
- MAX6658 sensor chips.
+ LM86, LM89 and LM99, Analog Devices ADM1032 and Maxim MAX6657,
+ MAX6658, MAX6659, MAX6680 and MAX6681 sensor chips.
The Analog Devices ADT7461 sensor chip is also supported, but only
if found in ADM1032 compatibility mode.
@@ -384,6 +399,17 @@ config SENSORS_LM92
This driver can also be built as a module. If so, the module
will be called lm92.
+config SENSORS_LM93
+ tristate "National Semiconductor LM93 and compatibles"
+ depends on HWMON && I2C
+ select HWMON_VID
+ help
+ If you say yes here you get support for National Semiconductor LM93
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called lm93.
+
config SENSORS_MAX1619
tristate "Maxim MAX1619 sensor chip"
depends on I2C
@@ -405,8 +431,6 @@ config SENSORS_MAX6650
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
- depends on I2C && EXPERIMENTAL
- select I2C_ISA
select HWMON_VID
help
If you say yes here you get access to the hardware monitoring
@@ -433,8 +457,7 @@ config SENSORS_PC87427
config SENSORS_SIS5595
tristate "Silicon Integrated Systems Corp. SiS5595"
- depends on I2C && PCI && EXPERIMENTAL
- select I2C_ISA
+ depends on PCI
help
If you say yes here you get support for the integrated sensors in
SiS5595 South Bridges.
@@ -442,6 +465,18 @@ config SENSORS_SIS5595
This driver can also be built as a module. If so, the module
will be called sis5595.
+config SENSORS_DME1737
+ tristate "SMSC DME1737 and compatibles"
+ depends on I2C && EXPERIMENTAL
+ select HWMON_VID
+ help
+ If you say yes here you get support for the hardware monitoring
+ and fan control features of the SMSC DME1737 (and compatibles
+ like the Asus A8000) Super-I/O chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called dme1737.
+
config SENSORS_SMSC47M1
tristate "SMSC LPC47M10x and compatibles"
help
@@ -487,8 +522,7 @@ config SENSORS_SMSC47B397
config SENSORS_VIA686A
tristate "VIA686A"
- depends on I2C && PCI
- select I2C_ISA
+ depends on PCI
help
If you say yes here you get support for the integrated sensors in
Via 686A/B South Bridges.
@@ -509,9 +543,8 @@ config SENSORS_VT1211
config SENSORS_VT8231
tristate "VIA VT8231"
- depends on I2C && PCI && EXPERIMENTAL
+ depends on PCI
select HWMON_VID
- select I2C_ISA
help
If you say yes here then you get support for the integrated sensors
in the VIA VT8231 device.
@@ -584,17 +617,16 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF"
- depends on I2C && EXPERIMENTAL
- select I2C_ISA
+ tristate "Winbond W83627EHF/DHG"
+ select HWMON_VID
help
- If you say yes here you get preliminary support for the hardware
+ If you say yes here you get support for the hardware
monitoring functionality of the Winbond W83627EHF Super-I/O chip.
- Only fan and temperature inputs are supported at the moment, while
- the chip does much more than that.
This driver also supports the W83627EHG, which is the lead-free
- version of the W83627EHF.
+ version of the W83627EHF, and the W83627DHG, which is a similar
+ chip suited for specific Intel processors that use PECI such as
+ the Core 2 Duo.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index cfaf338919dd..59f81fae40a0 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_W83781D) += w83781d.o
obj-$(CONFIG_SENSORS_W83791D) += w83791d.o
obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
+obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
@@ -25,6 +26,7 @@ obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_AMS) += ams/
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
+obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_FSCHER) += fscher.o
@@ -45,6 +47,7 @@ obj-$(CONFIG_SENSORS_LM85) += lm85.o
obj-$(CONFIG_SENSORS_LM87) += lm87.o
obj-$(CONFIG_SENSORS_LM90) += lm90.o
obj-$(CONFIG_SENSORS_LM92) += lm92.o
+obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index bede4d990ea6..d575ee958de5 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -16,9 +16,9 @@
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
- This driver supports the sensor part of the custom Abit uGuru chip found
- on Abit uGuru motherboards. Note: because of lack of specs the CPU / RAM /
- etc voltage & frequency control is not supported!
+ This driver supports the sensor part of the first and second revision of
+ the custom Abit uGuru chip found on Abit uGuru motherboards. Note: because
+ of lack of specs the CPU/RAM voltage & frequency control is not supported!
*/
#include <linux/module.h>
#include <linux/sched.h>
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/dmi.h>
#include <asm/io.h>
/* Banks */
@@ -418,7 +419,7 @@ static int __devinit
abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
u8 sensor_addr)
{
- u8 val, buf[3];
+ u8 val, test_flag, buf[3];
int i, ret = -ENODEV; /* error is the most common used retval :| */
/* If overriden by the user return the user selected type */
@@ -436,7 +437,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
return -ENODEV;
/* Test val is sane / usable for sensor type detection. */
- if ((val < 10u) || (val > 240u)) {
+ if ((val < 10u) || (val > 250u)) {
printk(KERN_WARNING ABIT_UGURU_NAME
": bank1-sensor: %d reading (%d) too close to limits, "
"unable to determine sensor type, skipping sensor\n",
@@ -449,10 +450,20 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
ABIT_UGURU_DEBUG(2, "testing bank1 sensor %d\n", (int)sensor_addr);
/* Volt sensor test, enable volt low alarm, set min value ridicously
- high. If its a volt sensor this should always give us an alarm. */
- buf[0] = ABIT_UGURU_VOLT_LOW_ALARM_ENABLE;
- buf[1] = 245;
- buf[2] = 250;
+ high, or vica versa if the reading is very high. If its a volt
+ sensor this should always give us an alarm. */
+ if (val <= 240u) {
+ buf[0] = ABIT_UGURU_VOLT_LOW_ALARM_ENABLE;
+ buf[1] = 245;
+ buf[2] = 250;
+ test_flag = ABIT_UGURU_VOLT_LOW_ALARM_FLAG;
+ } else {
+ buf[0] = ABIT_UGURU_VOLT_HIGH_ALARM_ENABLE;
+ buf[1] = 5;
+ buf[2] = 10;
+ test_flag = ABIT_UGURU_VOLT_HIGH_ALARM_FLAG;
+ }
+
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
buf, 3) != 3)
goto abituguru_detect_bank1_sensor_type_exit;
@@ -469,13 +480,13 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
sensor_addr, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
goto abituguru_detect_bank1_sensor_type_exit;
- if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
+ if (buf[0] & test_flag) {
ABIT_UGURU_DEBUG(2, " found volt sensor\n");
ret = ABIT_UGURU_IN_SENSOR;
goto abituguru_detect_bank1_sensor_type_exit;
} else
ABIT_UGURU_DEBUG(2, " alarm raised during volt "
- "sensor test, but volt low flag not set\n");
+ "sensor test, but volt range flag not set\n");
} else
ABIT_UGURU_DEBUG(2, " alarm not raised during volt sensor "
"test\n");
@@ -1287,6 +1298,7 @@ abituguru_probe_error:
for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
device_remove_file(&pdev->dev,
&abituguru_sysfs_attr[i].dev_attr);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return res;
}
@@ -1296,13 +1308,13 @@ static int __devexit abituguru_remove(struct platform_device *pdev)
int i;
struct abituguru_data *data = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
hwmon_device_unregister(data->class_dev);
for (i = 0; data->sysfs_attr[i].dev_attr.attr.name; i++)
device_remove_file(&pdev->dev, &data->sysfs_attr[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
device_remove_file(&pdev->dev,
&abituguru_sysfs_attr[i].dev_attr);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
@@ -1436,6 +1448,15 @@ static int __init abituguru_init(void)
int address, err;
struct resource res = { .flags = IORESOURCE_IO };
+#ifdef CONFIG_DMI
+ char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+
+ /* safety check, refuse to load on non Abit motherboards */
+ if (!force && (!board_vendor ||
+ strcmp(board_vendor, "http://www.abit.com.tw/")))
+ return -ENODEV;
+#endif
+
address = abituguru_detect();
if (address < 0)
return address;
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
new file mode 100644
index 000000000000..a003d104ca45
--- /dev/null
+++ b/drivers/hwmon/abituguru3.c
@@ -0,0 +1,1140 @@
+/*
+ abituguru3.c Copyright (c) 2006 Hans de Goede <j.w.r.degoede@hhs.nl>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+/*
+ This driver supports the sensor part of revision 3 of the custom Abit uGuru
+ chip found on newer Abit uGuru motherboards. Note: because of lack of specs
+ only reading the sensors and their settings is supported.
+*/
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <asm/io.h>
+
+/* uGuru3 bank addresses */
+#define ABIT_UGURU3_SETTINGS_BANK 0x01
+#define ABIT_UGURU3_SENSORS_BANK 0x08
+#define ABIT_UGURU3_MISC_BANK 0x09
+#define ABIT_UGURU3_ALARMS_START 0x1E
+#define ABIT_UGURU3_SETTINGS_START 0x24
+#define ABIT_UGURU3_VALUES_START 0x80
+#define ABIT_UGURU3_BOARD_ID 0x0A
+/* uGuru3 sensor bank flags */ /* Alarm if: */
+#define ABIT_UGURU3_TEMP_HIGH_ALARM_ENABLE 0x01 /* temp over warn */
+#define ABIT_UGURU3_VOLT_HIGH_ALARM_ENABLE 0x02 /* volt over max */
+#define ABIT_UGURU3_VOLT_LOW_ALARM_ENABLE 0x04 /* volt under min */
+#define ABIT_UGURU3_TEMP_HIGH_ALARM_FLAG 0x10 /* temp is over warn */
+#define ABIT_UGURU3_VOLT_HIGH_ALARM_FLAG 0x20 /* volt is over max */
+#define ABIT_UGURU3_VOLT_LOW_ALARM_FLAG 0x40 /* volt is under min */
+#define ABIT_UGURU3_FAN_LOW_ALARM_ENABLE 0x01 /* fan under min */
+#define ABIT_UGURU3_BEEP_ENABLE 0x08 /* beep if alarm */
+#define ABIT_UGURU3_SHUTDOWN_ENABLE 0x80 /* shutdown if alarm */
+/* sensor types */
+#define ABIT_UGURU3_IN_SENSOR 0
+#define ABIT_UGURU3_TEMP_SENSOR 1
+#define ABIT_UGURU3_FAN_SENSOR 2
+
+/* Timeouts / Retries, if these turn out to need a lot of fiddling we could
+ convert them to params. Determined by trial and error. I assume this is
+ cpu-speed independent, since the ISA-bus and not the CPU should be the
+ bottleneck. */
+#define ABIT_UGURU3_WAIT_TIMEOUT 250
+/* Normally the 0xAC at the end of synchronize() is reported after the
+ first read, but sometimes not and we need to poll */
+#define ABIT_UGURU3_SYNCHRONIZE_TIMEOUT 5
+/* utility macros */
+#define ABIT_UGURU3_NAME "abituguru3"
+#define ABIT_UGURU3_DEBUG(format, arg...) \
+ if (verbose) \
+ printk(KERN_DEBUG ABIT_UGURU3_NAME ": " format , ## arg)
+
+/* Macros to help calculate the sysfs_names array length */
+#define ABIT_UGURU3_MAX_NO_SENSORS 26
+/* sum of strlen +1 of: in??_input\0, in??_{min,max}\0, in??_{min,max}_alarm\0,
+ in??_{min,max}_alarm_enable\0, in??_beep\0, in??_shutdown\0, in??_label\0 */
+#define ABIT_UGURU3_IN_NAMES_LENGTH (11 + 2 * 9 + 2 * 15 + 2 * 22 + 10 + 14 + 11)
+/* sum of strlen +1 of: temp??_input\0, temp??_max\0, temp??_crit\0,
+ temp??_alarm\0, temp??_alarm_enable\0, temp??_beep\0, temp??_shutdown\0,
+ temp??_label\0 */
+#define ABIT_UGURU3_TEMP_NAMES_LENGTH (13 + 11 + 12 + 13 + 20 + 12 + 16 + 13)
+/* sum of strlen +1 of: fan??_input\0, fan??_min\0, fan??_alarm\0,
+ fan??_alarm_enable\0, fan??_beep\0, fan??_shutdown\0, fan??_label\0 */
+#define ABIT_UGURU3_FAN_NAMES_LENGTH (12 + 10 + 12 + 19 + 11 + 15 + 12)
+/* Worst case scenario 16 in sensors (longest names_length) and the rest
+ temp sensors (second longest names_length). */
+#define ABIT_UGURU3_SYSFS_NAMES_LENGTH (16 * ABIT_UGURU3_IN_NAMES_LENGTH + \
+ (ABIT_UGURU3_MAX_NO_SENSORS - 16) * ABIT_UGURU3_TEMP_NAMES_LENGTH)
+
+/* All the macros below are named identical to the openguru2 program
+ reverse engineered by Louis Kruger, hence the names might not be 100%
+ logical. I could come up with better names, but I prefer keeping the names
+ identical so that this driver can be compared with his work more easily. */
+/* Two i/o-ports are used by uGuru */
+#define ABIT_UGURU3_BASE 0x00E0
+#define ABIT_UGURU3_CMD 0x00
+#define ABIT_UGURU3_DATA 0x04
+#define ABIT_UGURU3_REGION_LENGTH 5
+/* The wait_xxx functions return this on success and the last contents
+ of the DATA register (0-255) on failure. */
+#define ABIT_UGURU3_SUCCESS -1
+/* uGuru status flags */
+#define ABIT_UGURU3_STATUS_READY_FOR_READ 0x01
+#define ABIT_UGURU3_STATUS_BUSY 0x02
+
+
+/* Structures */
+struct abituguru3_sensor_info {
+ const char* name;
+ int port;
+ int type;
+ int multiplier;
+ int divisor;
+ int offset;
+};
+
+struct abituguru3_motherboard_info {
+ u16 id;
+ const char *name;
+ /* + 1 -> end of sensors indicated by a sensor with name == NULL */
+ struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1];
+};
+
+/* For the Abit uGuru, we need to keep some data in memory.
+ The structure is dynamically allocated, at the same time when a new
+ abituguru3 device is allocated. */
+struct abituguru3_data {
+ struct class_device *class_dev; /* hwmon registered device */
+ struct mutex update_lock; /* protect access to data and uGuru */
+ unsigned short addr; /* uguru base address */
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* For convenience the sysfs attr and their names are generated
+ automatically. We have max 10 entries per sensor (for in sensors) */
+ struct sensor_device_attribute_2 sysfs_attr[ABIT_UGURU3_MAX_NO_SENSORS
+ * 10];
+
+ /* Buffer to store the dynamically generated sysfs names */
+ char sysfs_names[ABIT_UGURU3_SYSFS_NAMES_LENGTH];
+
+ /* Pointer to the sensors info for the detected motherboard */
+ const struct abituguru3_sensor_info *sensors;
+
+ /* The abituguru3 supports upto 48 sensors, and thus has registers
+ sets for 48 sensors, for convienence reasons / simplicity of the
+ code we always read and store all registers for all 48 sensors */
+
+ /* Alarms for all 48 sensors (1 bit per sensor) */
+ u8 alarms[48/8];
+
+ /* Value of all 48 sensors */
+ u8 value[48];
+
+ /* Settings of all 48 sensors, note in and temp sensors (the first 32
+ sensors) have 3 bytes of settings, while fans only have 2 bytes,
+ for convenience we use 3 bytes for all sensors */
+ u8 settings[48][3];
+};
+
+
+/* Constants */
+static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
+ { 0x000C, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS FAN", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 35, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x000D, "Abit AW8", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM1", 26, 1, 1, 1, 0 },
+ { "PWM2", 27, 1, 1, 1, 0 },
+ { "PWM3", 28, 1, 1, 1, 0 },
+ { "PWM4", 29, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 35, 2, 60, 1, 0 },
+ { "AUX2 Fan", 36, 2, 60, 1, 0 },
+ { "AUX3 Fan", 37, 2, 60, 1, 0 },
+ { "AUX4 Fan", 38, 2, 60, 1, 0 },
+ { "AUX5 Fan", 39, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x000E, "AL-8", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x000F, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0010, "Abit NI8 SLI GR", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "NB 1.4V", 4, 0, 10, 1, 0 },
+ { "SB 1.5V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "SYS", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 35, 2, 60, 1, 0 },
+ { "OTES1 Fan", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0011, "Abit AT8 32X", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 20, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VDDA 2.5V", 6, 0, 20, 1, 0 },
+ { "NB 1.8V", 4, 0, 10, 1, 0 },
+ { "NB 1.8V Dual", 5, 0, 10, 1, 0 },
+ { "HTV 1.2", 3, 0, 10, 1, 0 },
+ { "PCIE 1.2V", 12, 0, 10, 1, 0 },
+ { "NB 1.2V", 13, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "NB", 25, 1, 1, 1, 0 },
+ { "System", 26, 1, 1, 1, 0 },
+ { "PWM", 27, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 35, 2, 60, 1, 0 },
+ { "AUX2 Fan", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0012, "Abit AN8 32X", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 20, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "HyperTransport", 3, 0, 10, 1, 0 },
+ { "CPU VDDA 2.5V", 5, 0, 20, 1, 0 },
+ { "NB", 4, 0, 10, 1, 0 },
+ { "SB", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "SYS", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0013, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM1", 26, 1, 1, 1, 0 },
+ { "PWM2", 27, 1, 1, 1, 0 },
+ { "PWM3", 28, 1, 1, 1, 0 },
+ { "PWM4", 29, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 35, 2, 60, 1, 0 },
+ { "AUX2 Fan", 36, 2, 60, 1, 0 },
+ { "AUX3 Fan", 37, 2, 60, 1, 0 },
+ { "AUX4 Fan", 38, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0014, "Abit AB9 Pro", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 10, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0015, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR", 1, 0, 20, 1, 0 },
+ { "DDR VTT", 2, 0, 10, 1, 0 },
+ { "HyperTransport", 3, 0, 10, 1, 0 },
+ { "CPU VDDA 2.5V", 5, 0, 20, 1, 0 },
+ { "NB", 4, 0, 10, 1, 0 },
+ { "SB", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "SYS", 25, 1, 1, 1, 0 },
+ { "PWM", 26, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 33, 2, 60, 1, 0 },
+ { "AUX2 Fan", 35, 2, 60, 1, 0 },
+ { "AUX3 Fan", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0016, "AW9D-MAX", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR2", 1, 0, 20, 1, 0 },
+ { "DDR2 VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH & PCIE 1.5V", 4, 0, 10, 1, 0 },
+ { "MCH 2.5V", 5, 0, 20, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM1", 26, 1, 1, 1, 0 },
+ { "PWM2", 27, 1, 1, 1, 0 },
+ { "PWM3", 28, 1, 1, 1, 0 },
+ { "PWM4", 29, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "NB Fan", 33, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 35, 2, 60, 1, 0 },
+ { "AUX2 Fan", 36, 2, 60, 1, 0 },
+ { "AUX3 Fan", 37, 2, 60, 1, 0 },
+ { "OTES1 Fan", 38, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0017, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR2", 1, 0, 20, 1, 0 },
+ { "DDR2 VTT", 2, 0, 10, 1, 0 },
+ { "HyperTransport", 3, 0, 10, 1, 0 },
+ { "CPU VDDA 2.5V", 6, 0, 20, 1, 0 },
+ { "NB 1.8V", 4, 0, 10, 1, 0 },
+ { "NB 1.2V ", 13, 0, 10, 1, 0 },
+ { "SB 1.2V", 5, 0, 10, 1, 0 },
+ { "PCIE 1.2V", 12, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "ATX +3.3V", 10, 0, 20, 1, 0 },
+ { "ATX 5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 26, 1, 1, 1, 0 },
+ { "PWM", 27, 1, 1, 1, 0 },
+ { "CPU FAN", 32, 2, 60, 1, 0 },
+ { "SYS FAN", 34, 2, 60, 1, 0 },
+ { "AUX1 FAN", 35, 2, 60, 1, 0 },
+ { "AUX2 FAN", 36, 2, 60, 1, 0 },
+ { "AUX3 FAN", 37, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0018, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR2", 1, 0, 20, 1, 0 },
+ { "DDR2 VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT", 3, 0, 10, 1, 0 },
+ { "MCH 1.25V", 4, 0, 10, 1, 0 },
+ { "ICHIO 1.5V", 5, 0, 10, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM Phase1", 26, 1, 1, 1, 0 },
+ { "PWM Phase2", 27, 1, 1, 1, 0 },
+ { "PWM Phase3", 28, 1, 1, 1, 0 },
+ { "PWM Phase4", 29, 1, 1, 1, 0 },
+ { "PWM Phase5", 30, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 33, 2, 60, 1, 0 },
+ { "AUX2 Fan", 35, 2, 60, 1, 0 },
+ { "AUX3 Fan", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0019, "unknown", {
+ { "CPU Core", 7, 0, 10, 1, 0 },
+ { "DDR2", 13, 0, 20, 1, 0 },
+ { "DDR2 VTT", 14, 0, 10, 1, 0 },
+ { "CPU VTT", 3, 0, 20, 1, 0 },
+ { "NB 1.2V ", 4, 0, 10, 1, 0 },
+ { "SB 1.5V", 6, 0, 10, 1, 0 },
+ { "HyperTransport", 5, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 12, 0, 60, 1, 0 },
+ { "ATX +12V (4-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "ATX +3.3V", 10, 0, 20, 1, 0 },
+ { "ATX 5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM Phase1", 26, 1, 1, 1, 0 },
+ { "PWM Phase2", 27, 1, 1, 1, 0 },
+ { "PWM Phase3", 28, 1, 1, 1, 0 },
+ { "PWM Phase4", 29, 1, 1, 1, 0 },
+ { "PWM Phase5", 30, 1, 1, 1, 0 },
+ { "CPU FAN", 32, 2, 60, 1, 0 },
+ { "SYS FAN", 34, 2, 60, 1, 0 },
+ { "AUX1 FAN", 33, 2, 60, 1, 0 },
+ { "AUX2 FAN", 35, 2, 60, 1, 0 },
+ { "AUX3 FAN", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x001A, "unknown", {
+ { "CPU Core", 0, 0, 10, 1, 0 },
+ { "DDR2", 1, 0, 20, 1, 0 },
+ { "DDR2 VTT", 2, 0, 10, 1, 0 },
+ { "CPU VTT 1.2V", 3, 0, 10, 1, 0 },
+ { "MCH 1.25V", 4, 0, 10, 1, 0 },
+ { "ICHIO 1.5V", 5, 0, 10, 1, 0 },
+ { "ICH 1.05V", 6, 0, 10, 1, 0 },
+ { "ATX +12V (24-Pin)", 7, 0, 60, 1, 0 },
+ { "ATX +12V (8-pin)", 8, 0, 60, 1, 0 },
+ { "ATX +5V", 9, 0, 30, 1, 0 },
+ { "+3.3V", 10, 0, 20, 1, 0 },
+ { "5VSB", 11, 0, 30, 1, 0 },
+ { "CPU", 24, 1, 1, 1, 0 },
+ { "System ", 25, 1, 1, 1, 0 },
+ { "PWM ", 26, 1, 1, 1, 0 },
+ { "PWM Phase2", 27, 1, 1, 1, 0 },
+ { "PWM Phase3", 28, 1, 1, 1, 0 },
+ { "PWM Phase4", 29, 1, 1, 1, 0 },
+ { "PWM Phase5", 30, 1, 1, 1, 0 },
+ { "CPU Fan", 32, 2, 60, 1, 0 },
+ { "SYS Fan", 34, 2, 60, 1, 0 },
+ { "AUX1 Fan", 33, 2, 60, 1, 0 },
+ { "AUX2 Fan", 35, 2, 60, 1, 0 },
+ { "AUX3 Fan", 36, 2, 60, 1, 0 },
+ { NULL, 0, 0, 0, 0, 0 } }
+ },
+ { 0x0000, NULL, { { NULL, 0, 0, 0, 0, 0 } } }
+};
+
+
+/* Insmod parameters */
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Set to one to force detection.");
+/* Default verbose is 1, since this driver is still in the testing phase */
+static int verbose = 1;
+module_param(verbose, bool, 0644);
+MODULE_PARM_DESC(verbose, "Enable/disable verbose error reporting");
+
+
+/* wait while the uguru is busy (usually after a write) */
+static int abituguru3_wait_while_busy(struct abituguru3_data *data)
+{
+ u8 x;
+ int timeout = ABIT_UGURU3_WAIT_TIMEOUT;
+
+ while ((x = inb_p(data->addr + ABIT_UGURU3_DATA)) &
+ ABIT_UGURU3_STATUS_BUSY) {
+ timeout--;
+ if (timeout == 0)
+ return x;
+ /* sleep a bit before our last try, to give the uGuru3 one
+ last chance to respond. */
+ if (timeout == 1)
+ msleep(1);
+ }
+ return ABIT_UGURU3_SUCCESS;
+}
+
+/* wait till uguru is ready to be read */
+static int abituguru3_wait_for_read(struct abituguru3_data *data)
+{
+ u8 x;
+ int timeout = ABIT_UGURU3_WAIT_TIMEOUT;
+
+ while (!((x = inb_p(data->addr + ABIT_UGURU3_DATA)) &
+ ABIT_UGURU3_STATUS_READY_FOR_READ)) {
+ timeout--;
+ if (timeout == 0)
+ return x;
+ /* sleep a bit before our last try, to give the uGuru3 one
+ last chance to respond. */
+ if (timeout == 1)
+ msleep(1);
+ }
+ return ABIT_UGURU3_SUCCESS;
+}
+
+/* This synchronizes us with the uGuru3's protocol state machine, this
+ must be done before each command. */
+static int abituguru3_synchronize(struct abituguru3_data *data)
+{
+ int x, timeout = ABIT_UGURU3_SYNCHRONIZE_TIMEOUT;
+
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("synchronize timeout during initial busy "
+ "wait, status: 0x%02x\n", x);
+ return -EIO;
+ }
+
+ outb(0x20, data->addr + ABIT_UGURU3_DATA);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("synchronize timeout after sending 0x20, "
+ "status: 0x%02x\n", x);
+ return -EIO;
+ }
+
+ outb(0x10, data->addr + ABIT_UGURU3_CMD);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("synchronize timeout after sending 0x10, "
+ "status: 0x%02x\n", x);
+ return -EIO;
+ }
+
+ outb(0x00, data->addr + ABIT_UGURU3_CMD);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("synchronize timeout after sending 0x00, "
+ "status: 0x%02x\n", x);
+ return -EIO;
+ }
+
+ if ((x = abituguru3_wait_for_read(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("synchronize timeout waiting for read, "
+ "status: 0x%02x\n", x);
+ return -EIO;
+ }
+
+ while ((x = inb(data->addr + ABIT_UGURU3_CMD)) != 0xAC) {
+ timeout--;
+ if (timeout == 0) {
+ ABIT_UGURU3_DEBUG("synchronize timeout cmd does not "
+ "hold 0xAC after synchronize, cmd: 0x%02x\n",
+ x);
+ return -EIO;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/* Read count bytes from sensor sensor_addr in bank bank_addr and store the
+ result in buf */
+static int abituguru3_read(struct abituguru3_data *data, u8 bank, u8 offset,
+ u8 count, u8 *buf)
+{
+ int i, x;
+
+ if ((x = abituguru3_synchronize(data)))
+ return x;
+
+ outb(0x1A, data->addr + ABIT_UGURU3_DATA);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+ "sending 0x1A, status: 0x%02x\n", (unsigned int)bank,
+ (unsigned int)offset, x);
+ return -EIO;
+ }
+
+ outb(bank, data->addr + ABIT_UGURU3_CMD);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+ "sending the bank, status: 0x%02x\n",
+ (unsigned int)bank, (unsigned int)offset, x);
+ return -EIO;
+ }
+
+ outb(offset, data->addr + ABIT_UGURU3_CMD);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+ "sending the offset, status: 0x%02x\n",
+ (unsigned int)bank, (unsigned int)offset, x);
+ return -EIO;
+ }
+
+ outb(count, data->addr + ABIT_UGURU3_CMD);
+ if ((x = abituguru3_wait_while_busy(data)) != ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("read from 0x%02x:0x%02x timed out after "
+ "sending the count, status: 0x%02x\n",
+ (unsigned int)bank, (unsigned int)offset, x);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if ((x = abituguru3_wait_for_read(data)) !=
+ ABIT_UGURU3_SUCCESS) {
+ ABIT_UGURU3_DEBUG("timeout reading byte %d from "
+ "0x%02x:0x%02x, status: 0x%02x\n", i,
+ (unsigned int)bank, (unsigned int)offset, x);
+ break;
+ }
+ buf[i] = inb(data->addr + ABIT_UGURU3_CMD);
+ }
+ return i;
+}
+
+/* Sensor settings are stored 1 byte per offset with the bytes
+ placed add consecutive offsets. */
+int abituguru3_read_increment_offset(struct abituguru3_data *data, u8 bank,
+ u8 offset, u8 count, u8 *buf, int offset_count)
+{
+ int i, x;
+
+ for (i = 0; i < offset_count; i++)
+ if ((x = abituguru3_read(data, bank, offset + i, count,
+ buf + i * count)) != count)
+ return i * count + (i && (x < 0)) ? 0 : x;
+
+ return i * count;
+}
+
+/* Following are the sysfs callback functions. These functions expect:
+ sensor_device_attribute_2->index: index into the data->sensors array
+ sensor_device_attribute_2->nr: register offset, bitmask or NA. */
+static struct abituguru3_data *abituguru3_update_device(struct device *dev);
+
+static ssize_t show_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int value;
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ struct abituguru3_data *data = abituguru3_update_device(dev);
+ const struct abituguru3_sensor_info *sensor;
+
+ if (!data)
+ return -EIO;
+
+ sensor = &data->sensors[attr->index];
+
+ /* are we reading a setting, or is this a normal read? */
+ if (attr->nr)
+ value = data->settings[sensor->port][attr->nr];
+ else
+ value = data->value[sensor->port];
+
+ /* convert the value */
+ value = (value * sensor->multiplier) / sensor->divisor +
+ sensor->offset;
+
+ /* alternatively we could update the sensors settings struct for this,
+ but then its contents would differ from the windows sw ini files */
+ if (sensor->type == ABIT_UGURU3_TEMP_SENSOR)
+ value *= 1000;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int port;
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ struct abituguru3_data *data = abituguru3_update_device(dev);
+
+ if (!data)
+ return -EIO;
+
+ port = data->sensors[attr->index].port;
+
+ /* See if the alarm bit for this sensor is set and if a bitmask is
+ given in attr->nr also check if the alarm matches the type of alarm
+ we're looking for (for volt it can be either low or high). The type
+ is stored in a few readonly bits in the settings of the sensor. */
+ if ((data->alarms[port / 8] & (0x01 << (port % 8))) &&
+ (!attr->nr || (data->settings[port][0] & attr->nr)))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t show_mask(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ struct abituguru3_data *data = dev_get_drvdata(dev);
+
+ if (data->settings[data->sensors[attr->index].port][0] & attr->nr)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ struct abituguru3_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->sensors[attr->index].name);
+}
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return sprintf(buf, "%s\n", ABIT_UGURU3_NAME);
+}
+
+/* Sysfs attr templates, the real entries are generated automatically. */
+static const
+struct sensor_device_attribute_2 abituguru3_sysfs_templ[3][10] = { {
+ SENSOR_ATTR_2(in%d_input, 0444, show_value, NULL, 0, 0),
+ SENSOR_ATTR_2(in%d_min, 0444, show_value, NULL, 1, 0),
+ SENSOR_ATTR_2(in%d_max, 0444, show_value, NULL, 2, 0),
+ SENSOR_ATTR_2(in%d_min_alarm, 0444, show_alarm, NULL,
+ ABIT_UGURU3_VOLT_LOW_ALARM_FLAG, 0),
+ SENSOR_ATTR_2(in%d_max_alarm, 0444, show_alarm, NULL,
+ ABIT_UGURU3_VOLT_HIGH_ALARM_FLAG, 0),
+ SENSOR_ATTR_2(in%d_beep, 0444, show_mask, NULL,
+ ABIT_UGURU3_BEEP_ENABLE, 0),
+ SENSOR_ATTR_2(in%d_shutdown, 0444, show_mask, NULL,
+ ABIT_UGURU3_SHUTDOWN_ENABLE, 0),
+ SENSOR_ATTR_2(in%d_min_alarm_enable, 0444, show_mask, NULL,
+ ABIT_UGURU3_VOLT_LOW_ALARM_ENABLE, 0),
+ SENSOR_ATTR_2(in%d_max_alarm_enable, 0444, show_mask, NULL,
+ ABIT_UGURU3_VOLT_HIGH_ALARM_ENABLE, 0),
+ SENSOR_ATTR_2(in%d_label, 0444, show_label, NULL, 0, 0)
+ }, {
+ SENSOR_ATTR_2(temp%d_input, 0444, show_value, NULL, 0, 0),
+ SENSOR_ATTR_2(temp%d_max, 0444, show_value, NULL, 1, 0),
+ SENSOR_ATTR_2(temp%d_crit, 0444, show_value, NULL, 2, 0),
+ SENSOR_ATTR_2(temp%d_alarm, 0444, show_alarm, NULL, 0, 0),
+ SENSOR_ATTR_2(temp%d_beep, 0444, show_mask, NULL,
+ ABIT_UGURU3_BEEP_ENABLE, 0),
+ SENSOR_ATTR_2(temp%d_shutdown, 0444, show_mask, NULL,
+ ABIT_UGURU3_SHUTDOWN_ENABLE, 0),
+ SENSOR_ATTR_2(temp%d_alarm_enable, 0444, show_mask, NULL,
+ ABIT_UGURU3_TEMP_HIGH_ALARM_ENABLE, 0),
+ SENSOR_ATTR_2(temp%d_label, 0444, show_label, NULL, 0, 0)
+ }, {
+ SENSOR_ATTR_2(fan%d_input, 0444, show_value, NULL, 0, 0),
+ SENSOR_ATTR_2(fan%d_min, 0444, show_value, NULL, 1, 0),
+ SENSOR_ATTR_2(fan%d_alarm, 0444, show_alarm, NULL, 0, 0),
+ SENSOR_ATTR_2(fan%d_beep, 0444, show_mask, NULL,
+ ABIT_UGURU3_BEEP_ENABLE, 0),
+ SENSOR_ATTR_2(fan%d_shutdown, 0444, show_mask, NULL,
+ ABIT_UGURU3_SHUTDOWN_ENABLE, 0),
+ SENSOR_ATTR_2(fan%d_alarm_enable, 0444, show_mask, NULL,
+ ABIT_UGURU3_FAN_LOW_ALARM_ENABLE, 0),
+ SENSOR_ATTR_2(fan%d_label, 0444, show_label, NULL, 0, 0)
+} };
+
+static struct sensor_device_attribute_2 abituguru3_sysfs_attr[] = {
+ SENSOR_ATTR_2(name, 0444, show_name, NULL, 0, 0),
+};
+
+static int __devinit abituguru3_probe(struct platform_device *pdev)
+{
+ const int no_sysfs_attr[3] = { 10, 8, 7 };
+ int sensor_index[3] = { 0, 1, 1 };
+ struct abituguru3_data *data;
+ int i, j, type, used, sysfs_names_free, sysfs_attr_i, res = -ENODEV;
+ char *sysfs_filename;
+ u8 buf[2];
+ u16 id;
+
+ if (!(data = kzalloc(sizeof(struct abituguru3_data), GFP_KERNEL)))
+ return -ENOMEM;
+
+ data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ mutex_init(&data->update_lock);
+ platform_set_drvdata(pdev, data);
+
+ /* Read the motherboard ID */
+ if ((i = abituguru3_read(data, ABIT_UGURU3_MISC_BANK,
+ ABIT_UGURU3_BOARD_ID, 2, buf)) != 2) {
+ goto abituguru3_probe_error;
+ }
+
+ /* Completely read the uGuru to see if one really is there */
+ if (!abituguru3_update_device(&pdev->dev))
+ goto abituguru3_probe_error;
+
+ /* lookup the ID in our motherboard table */
+ id = ((u16)buf[0] << 8) | (u16)buf[1];
+ for (i = 0; abituguru3_motherboards[i].id; i++)
+ if (abituguru3_motherboards[i].id == id)
+ break;
+ if (!abituguru3_motherboards[i].id) {
+ printk(KERN_ERR ABIT_UGURU3_NAME ": error unknown motherboard "
+ "ID: %04X. Please report this to the abituguru3 "
+ "maintainer (see MAINTAINERS)\n", (unsigned int)id);
+ goto abituguru3_probe_error;
+ }
+ data->sensors = abituguru3_motherboards[i].sensors;
+ printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
+ "ID: %04X (%s)\n", (unsigned int)id,
+ abituguru3_motherboards[i].name);
+
+ /* Fill the sysfs attr array */
+ sysfs_attr_i = 0;
+ sysfs_filename = data->sysfs_names;
+ sysfs_names_free = ABIT_UGURU3_SYSFS_NAMES_LENGTH;
+ for (i = 0; data->sensors[i].name; i++) {
+ /* Fail safe check, this should never happen! */
+ if (i >= ABIT_UGURU3_MAX_NO_SENSORS) {
+ printk(KERN_ERR ABIT_UGURU3_NAME
+ ": Fatal error motherboard has more sensors "
+ "then ABIT_UGURU3_MAX_NO_SENSORS. This should "
+ "never happen please report to the abituguru3 "
+ "maintainer (see MAINTAINERS)\n");
+ res = -ENAMETOOLONG;
+ goto abituguru3_probe_error;
+ }
+ type = data->sensors[i].type;
+ for (j = 0; j < no_sysfs_attr[type]; j++) {
+ used = snprintf(sysfs_filename, sysfs_names_free,
+ abituguru3_sysfs_templ[type][j].dev_attr.attr.
+ name, sensor_index[type]) + 1;
+ data->sysfs_attr[sysfs_attr_i] =
+ abituguru3_sysfs_templ[type][j];
+ data->sysfs_attr[sysfs_attr_i].dev_attr.attr.name =
+ sysfs_filename;
+ data->sysfs_attr[sysfs_attr_i].index = i;
+ sysfs_filename += used;
+ sysfs_names_free -= used;
+ sysfs_attr_i++;
+ }
+ sensor_index[type]++;
+ }
+ /* Fail safe check, this should never happen! */
+ if (sysfs_names_free < 0) {
+ printk(KERN_ERR ABIT_UGURU3_NAME
+ ": Fatal error ran out of space for sysfs attr names. "
+ "This should never happen please report to the "
+ "abituguru3 maintainer (see MAINTAINERS)\n");
+ res = -ENAMETOOLONG;
+ goto abituguru3_probe_error;
+ }
+
+ /* Register sysfs hooks */
+ for (i = 0; i < sysfs_attr_i; i++)
+ if (device_create_file(&pdev->dev,
+ &data->sysfs_attr[i].dev_attr))
+ goto abituguru3_probe_error;
+ for (i = 0; i < ARRAY_SIZE(abituguru3_sysfs_attr); i++)
+ if (device_create_file(&pdev->dev,
+ &abituguru3_sysfs_attr[i].dev_attr))
+ goto abituguru3_probe_error;
+
+ data->class_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->class_dev)) {
+ res = PTR_ERR(data->class_dev);
+ goto abituguru3_probe_error;
+ }
+
+ return 0; /* success */
+
+abituguru3_probe_error:
+ for (i = 0; data->sysfs_attr[i].dev_attr.attr.name; i++)
+ device_remove_file(&pdev->dev, &data->sysfs_attr[i].dev_attr);
+ for (i = 0; i < ARRAY_SIZE(abituguru3_sysfs_attr); i++)
+ device_remove_file(&pdev->dev,
+ &abituguru3_sysfs_attr[i].dev_attr);
+ kfree(data);
+ return res;
+}
+
+static int __devexit abituguru3_remove(struct platform_device *pdev)
+{
+ int i;
+ struct abituguru3_data *data = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ hwmon_device_unregister(data->class_dev);
+ for (i = 0; data->sysfs_attr[i].dev_attr.attr.name; i++)
+ device_remove_file(&pdev->dev, &data->sysfs_attr[i].dev_attr);
+ for (i = 0; i < ARRAY_SIZE(abituguru3_sysfs_attr); i++)
+ device_remove_file(&pdev->dev,
+ &abituguru3_sysfs_attr[i].dev_attr);
+ kfree(data);
+
+ return 0;
+}
+
+static struct abituguru3_data *abituguru3_update_device(struct device *dev)
+{
+ int i;
+ struct abituguru3_data *data = dev_get_drvdata(dev);
+
+ mutex_lock(&data->update_lock);
+ if (!data->valid || time_after(jiffies, data->last_updated + HZ)) {
+ /* Clear data->valid while updating */
+ data->valid = 0;
+ /* Read alarms */
+ if (abituguru3_read_increment_offset(data,
+ ABIT_UGURU3_SETTINGS_BANK,
+ ABIT_UGURU3_ALARMS_START,
+ 1, data->alarms, 48/8) != (48/8))
+ goto LEAVE_UPDATE;
+ /* Read in and temp sensors (3 byte settings / sensor) */
+ for (i = 0; i < 32; i++) {
+ if (abituguru3_read(data, ABIT_UGURU3_SENSORS_BANK,
+ ABIT_UGURU3_VALUES_START + i,
+ 1, &data->value[i]) != 1)
+ goto LEAVE_UPDATE;
+ if (abituguru3_read_increment_offset(data,
+ ABIT_UGURU3_SETTINGS_BANK,
+ ABIT_UGURU3_SETTINGS_START + i * 3,
+ 1,
+ data->settings[i], 3) != 3)
+ goto LEAVE_UPDATE;
+ }
+ /* Read temp sensors (2 byte settings / sensor) */
+ for (i = 0; i < 16; i++) {
+ if (abituguru3_read(data, ABIT_UGURU3_SENSORS_BANK,
+ ABIT_UGURU3_VALUES_START + 32 + i,
+ 1, &data->value[32 + i]) != 1)
+ goto LEAVE_UPDATE;
+ if (abituguru3_read_increment_offset(data,
+ ABIT_UGURU3_SETTINGS_BANK,
+ ABIT_UGURU3_SETTINGS_START + 32 * 3 +
+ i * 2, 1,
+ data->settings[32 + i], 2) != 2)
+ goto LEAVE_UPDATE;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+LEAVE_UPDATE:
+ mutex_unlock(&data->update_lock);
+ if (data->valid)
+ return data;
+ else
+ return NULL;
+}
+
+#ifdef CONFIG_PM
+static int abituguru3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct abituguru3_data *data = platform_get_drvdata(pdev);
+ /* make sure all communications with the uguru3 are done and no new
+ ones are started */
+ mutex_lock(&data->update_lock);
+ return 0;
+}
+
+static int abituguru3_resume(struct platform_device *pdev)
+{
+ struct abituguru3_data *data = platform_get_drvdata(pdev);
+ mutex_unlock(&data->update_lock);
+ return 0;
+}
+#else
+#define abituguru3_suspend NULL
+#define abituguru3_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver abituguru3_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = ABIT_UGURU3_NAME,
+ },
+ .probe = abituguru3_probe,
+ .remove = __devexit_p(abituguru3_remove),
+ .suspend = abituguru3_suspend,
+ .resume = abituguru3_resume
+};
+
+static int __init abituguru3_detect(void)
+{
+ /* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or
+ 0x08 at DATA and 0xAC at CMD. Sometimes the uGuru3 will hold 0x05
+ at CMD instead, why is unknown. So we test for 0x05 too. */
+ u8 data_val = inb_p(ABIT_UGURU3_BASE + ABIT_UGURU3_DATA);
+ u8 cmd_val = inb_p(ABIT_UGURU3_BASE + ABIT_UGURU3_CMD);
+ if (((data_val == 0x00) || (data_val == 0x08)) &&
+ ((cmd_val == 0xAC) || (cmd_val == 0x05)))
+ return ABIT_UGURU3_BASE;
+
+ ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = "
+ "0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
+
+ if (force) {
+ printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
+ "present because of \"force\" parameter\n");
+ return ABIT_UGURU3_BASE;
+ }
+
+ /* No uGuru3 found */
+ return -ENODEV;
+}
+
+static struct platform_device *abituguru3_pdev;
+
+static int __init abituguru3_init(void)
+{
+ int address, err;
+ struct resource res = { .flags = IORESOURCE_IO };
+
+ address = abituguru3_detect();
+ if (address < 0)
+ return address;
+
+ err = platform_driver_register(&abituguru3_driver);
+ if (err)
+ goto exit;
+
+ abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, address);
+ if (!abituguru3_pdev) {
+ printk(KERN_ERR ABIT_UGURU3_NAME
+ ": Device allocation failed\n");
+ err = -ENOMEM;
+ goto exit_driver_unregister;
+ }
+
+ res.start = address;
+ res.end = address + ABIT_UGURU3_REGION_LENGTH - 1;
+ res.name = ABIT_UGURU3_NAME;
+
+ err = platform_device_add_resources(abituguru3_pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR ABIT_UGURU3_NAME
+ ": Device resource addition failed (%d)\n", err);
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(abituguru3_pdev);
+ if (err) {
+ printk(KERN_ERR ABIT_UGURU3_NAME
+ ": Device addition failed (%d)\n", err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(abituguru3_pdev);
+exit_driver_unregister:
+ platform_driver_unregister(&abituguru3_driver);
+exit:
+ return err;
+}
+
+static void __exit abituguru3_exit(void)
+{
+ platform_device_unregister(abituguru3_pdev);
+ platform_driver_unregister(&abituguru3_driver);
+}
+
+MODULE_AUTHOR("Hans de Goede <j.w.r.degoede@hhs.nl>");
+MODULE_DESCRIPTION("Abit uGuru3 Sensor device");
+MODULE_LICENSE("GPL");
+
+module_init(abituguru3_init);
+module_exit(abituguru3_exit);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6d54c8caed79..7c1795225b06 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -318,7 +318,7 @@ exit:
}
#ifdef CONFIG_HOTPLUG_CPU
-void coretemp_device_remove(unsigned int cpu)
+static void coretemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p, *n;
mutex_lock(&pdev_list_mutex);
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
new file mode 100644
index 000000000000..be3aaa5d0b91
--- /dev/null
+++ b/drivers/hwmon/dme1737.c
@@ -0,0 +1,2080 @@
+/*
+ * dme1737.c - driver for the SMSC DME1737 and Asus A8000 Super-I/O chips
+ * integrated hardware monitoring features.
+ * Copyright (c) 2007 Juerg Haefliger <juergh@gmail.com>
+ *
+ * This driver is based on the LM85 driver. The hardware monitoring
+ * capabilities of the DME1737 are very similar to the LM85 with some
+ * additional features. Even though the DME1737 is a Super-I/O chip, the
+ * hardware monitoring registers are only accessible via SMBus.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <asm/io.h>
+
+/* Module load parameters */
+static int force_start;
+module_param(force_start, bool, 0);
+MODULE_PARM_DESC(force_start, "Force the chip to start monitoring inputs");
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_1(dme1737);
+
+/* ---------------------------------------------------------------------
+ * Registers
+ *
+ * The sensors are defined as follows:
+ *
+ * Voltages Temperatures
+ * -------- ------------
+ * in0 +5VTR (+5V stdby) temp1 Remote diode 1
+ * in1 Vccp (proc core) temp2 Internal temp
+ * in2 VCC (internal +3.3V) temp3 Remote diode 2
+ * in3 +5V
+ * in4 +12V
+ * in5 VTR (+3.3V stby)
+ * in6 Vbat
+ *
+ * --------------------------------------------------------------------- */
+
+/* Voltages (in) numbered 0-6 (ix) */
+#define DME1737_REG_IN(ix) ((ix) < 5 ? 0x20 + (ix) \
+ : 0x94 + (ix))
+#define DME1737_REG_IN_MIN(ix) ((ix) < 5 ? 0x44 + (ix) * 2 \
+ : 0x91 + (ix) * 2)
+#define DME1737_REG_IN_MAX(ix) ((ix) < 5 ? 0x45 + (ix) * 2 \
+ : 0x92 + (ix) * 2)
+
+/* Temperatures (temp) numbered 0-2 (ix) */
+#define DME1737_REG_TEMP(ix) (0x25 + (ix))
+#define DME1737_REG_TEMP_MIN(ix) (0x4e + (ix) * 2)
+#define DME1737_REG_TEMP_MAX(ix) (0x4f + (ix) * 2)
+#define DME1737_REG_TEMP_OFFSET(ix) ((ix) == 0 ? 0x1f \
+ : 0x1c + (ix))
+
+/* Voltage and temperature LSBs
+ * The LSBs (4 bits each) are stored in 5 registers with the following layouts:
+ * IN_TEMP_LSB(0) = [in5, in6]
+ * IN_TEMP_LSB(1) = [temp3, temp1]
+ * IN_TEMP_LSB(2) = [in4, temp2]
+ * IN_TEMP_LSB(3) = [in3, in0]
+ * IN_TEMP_LSB(4) = [in2, in1] */
+#define DME1737_REG_IN_TEMP_LSB(ix) (0x84 + (ix))
+static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0};
+static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4};
+static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1};
+static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
+
+/* Fans numbered 0-5 (ix) */
+#define DME1737_REG_FAN(ix) ((ix) < 4 ? 0x28 + (ix) * 2 \
+ : 0xa1 + (ix) * 2)
+#define DME1737_REG_FAN_MIN(ix) ((ix) < 4 ? 0x54 + (ix) * 2 \
+ : 0xa5 + (ix) * 2)
+#define DME1737_REG_FAN_OPT(ix) ((ix) < 4 ? 0x90 + (ix) \
+ : 0xb2 + (ix))
+#define DME1737_REG_FAN_MAX(ix) (0xb4 + (ix)) /* only for fan[4-5] */
+
+/* PWMs numbered 0-2, 4-5 (ix) */
+#define DME1737_REG_PWM(ix) ((ix) < 3 ? 0x30 + (ix) \
+ : 0xa1 + (ix))
+#define DME1737_REG_PWM_CONFIG(ix) (0x5c + (ix)) /* only for pwm[0-2] */
+#define DME1737_REG_PWM_MIN(ix) (0x64 + (ix)) /* only for pwm[0-2] */
+#define DME1737_REG_PWM_FREQ(ix) ((ix) < 3 ? 0x5f + (ix) \
+ : 0xa3 + (ix))
+/* The layout of the ramp rate registers is different from the other pwm
+ * registers. The bits for the 3 PWMs are stored in 2 registers:
+ * PWM_RR(0) = [OFF3, OFF2, OFF1, RES, RR1E, RR1-2, RR1-1, RR1-0]
+ * PWM_RR(1) = [RR2E, RR2-2, RR2-1, RR2-0, RR3E, RR3-2, RR3-1, RR3-0] */
+#define DME1737_REG_PWM_RR(ix) (0x62 + (ix)) /* only for pwm[0-2] */
+
+/* Thermal zones 0-2 */
+#define DME1737_REG_ZONE_LOW(ix) (0x67 + (ix))
+#define DME1737_REG_ZONE_ABS(ix) (0x6a + (ix))
+/* The layout of the hysteresis registers is different from the other zone
+ * registers. The bits for the 3 zones are stored in 2 registers:
+ * ZONE_HYST(0) = [H1-3, H1-2, H1-1, H1-0, H2-3, H2-2, H2-1, H2-0]
+ * ZONE_HYST(1) = [H3-3, H3-2, H3-1, H3-0, RES, RES, RES, RES] */
+#define DME1737_REG_ZONE_HYST(ix) (0x6d + (ix))
+
+/* Alarm registers and bit mapping
+ * The 3 8-bit alarm registers will be concatenated to a single 32-bit
+ * alarm value [0, ALARM3, ALARM2, ALARM1]. */
+#define DME1737_REG_ALARM1 0x41
+#define DME1737_REG_ALARM2 0x42
+#define DME1737_REG_ALARM3 0x83
+static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17};
+static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6};
+static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
+
+/* Miscellaneous registers */
+#define DME1737_REG_COMPANY 0x3e
+#define DME1737_REG_VERSTEP 0x3f
+#define DME1737_REG_CONFIG 0x40
+#define DME1737_REG_CONFIG2 0x7f
+#define DME1737_REG_VID 0x43
+#define DME1737_REG_TACH_PWM 0x81
+
+/* ---------------------------------------------------------------------
+ * Misc defines
+ * --------------------------------------------------------------------- */
+
+/* Chip identification */
+#define DME1737_COMPANY_SMSC 0x5c
+#define DME1737_VERSTEP 0x88
+#define DME1737_VERSTEP_MASK 0xf8
+
+/* ---------------------------------------------------------------------
+ * Data structures and manipulation thereof
+ * --------------------------------------------------------------------- */
+
+struct dme1737_data {
+ struct i2c_client client;
+ struct class_device *class_dev;
+
+ struct mutex update_lock;
+ int valid; /* !=0 if following fields are valid */
+ unsigned long last_update; /* in jiffies */
+ unsigned long last_vbat; /* in jiffies */
+
+ u8 vid;
+ u8 pwm_rr_en;
+ u8 has_pwm;
+ u8 has_fan;
+
+ /* Register values */
+ u16 in[7];
+ u8 in_min[7];
+ u8 in_max[7];
+ s16 temp[3];
+ s8 temp_min[3];
+ s8 temp_max[3];
+ s8 temp_offset[3];
+ u8 config;
+ u8 config2;
+ u8 vrm;
+ u16 fan[6];
+ u16 fan_min[6];
+ u8 fan_max[2];
+ u8 fan_opt[6];
+ u8 pwm[6];
+ u8 pwm_min[3];
+ u8 pwm_config[3];
+ u8 pwm_acz[3];
+ u8 pwm_freq[6];
+ u8 pwm_rr[2];
+ u8 zone_low[3];
+ u8 zone_abs[3];
+ u8 zone_hyst[2];
+ u32 alarms;
+};
+
+/* Nominal voltage values */
+static const int IN_NOMINAL[] = {5000, 2250, 3300, 5000, 12000, 3300, 3300};
+
+/* Voltage input
+ * Voltage inputs have 16 bits resolution, limit values have 8 bits
+ * resolution. */
+static inline int IN_FROM_REG(int reg, int ix, int res)
+{
+ return (reg * IN_NOMINAL[ix] + (3 << (res - 3))) / (3 << (res - 2));
+}
+
+static inline int IN_TO_REG(int val, int ix)
+{
+ return SENSORS_LIMIT((val * 192 + IN_NOMINAL[ix] / 2) /
+ IN_NOMINAL[ix], 0, 255);
+}
+
+/* Temperature input
+ * The register values represent temperatures in 2's complement notation from
+ * -127 degrees C to +127 degrees C. Temp inputs have 16 bits resolution, limit
+ * values have 8 bits resolution. */
+static inline int TEMP_FROM_REG(int reg, int res)
+{
+ return (reg * 1000) >> (res - 8);
+}
+
+static inline int TEMP_TO_REG(int val)
+{
+ return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000,
+ -128, 127);
+}
+
+/* Temperature range */
+static const int TEMP_RANGE[] = {2000, 2500, 3333, 4000, 5000, 6666, 8000,
+ 10000, 13333, 16000, 20000, 26666, 32000,
+ 40000, 53333, 80000};
+
+static inline int TEMP_RANGE_FROM_REG(int reg)
+{
+ return TEMP_RANGE[(reg >> 4) & 0x0f];
+}
+
+static int TEMP_RANGE_TO_REG(int val, int reg)
+{
+ int i;
+
+ for (i = 15; i > 0; i--) {
+ if (val > (TEMP_RANGE[i] + TEMP_RANGE[i - 1] + 1) / 2) {
+ break;
+ }
+ }
+
+ return (reg & 0x0f) | (i << 4);
+}
+
+/* Temperature hysteresis
+ * Register layout:
+ * reg[0] = [H1-3, H1-2, H1-1, H1-0, H2-3, H2-2, H2-1, H2-0]
+ * reg[1] = [H3-3, H3-2, H3-1, H3-0, xxxx, xxxx, xxxx, xxxx] */
+static inline int TEMP_HYST_FROM_REG(int reg, int ix)
+{
+ return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
+}
+
+static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
+{
+ int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15);
+
+ return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
+}
+
+/* Fan input RPM */
+static inline int FAN_FROM_REG(int reg, int tpc)
+{
+ return (reg == 0 || reg == 0xffff) ? 0 :
+ (tpc == 0) ? 90000 * 60 / reg : tpc * reg;
+}
+
+static inline int FAN_TO_REG(int val, int tpc)
+{
+ return SENSORS_LIMIT((tpc == 0) ? 90000 * 60 / val : val / tpc,
+ 0, 0xffff);
+}
+
+/* Fan TPC (tach pulse count)
+ * Converts a register value to a TPC multiplier or returns 0 if the tachometer
+ * is configured in legacy (non-tpc) mode */
+static inline int FAN_TPC_FROM_REG(int reg)
+{
+ return (reg & 0x20) ? 0 : 60 >> (reg & 0x03);
+}
+
+/* Fan type
+ * The type of a fan is expressed in number of pulses-per-revolution that it
+ * emits */
+static inline int FAN_TYPE_FROM_REG(int reg)
+{
+ int edge = (reg >> 1) & 0x03;
+
+ return (edge > 0) ? 1 << (edge - 1) : 0;
+}
+
+static inline int FAN_TYPE_TO_REG(int val, int reg)
+{
+ int edge = (val == 4) ? 3 : val;
+
+ return (reg & 0xf9) | (edge << 1);
+}
+
+/* Fan max RPM */
+static const int FAN_MAX[] = {0x54, 0x38, 0x2a, 0x21, 0x1c, 0x18, 0x15, 0x12,
+ 0x11, 0x0f, 0x0e};
+
+static int FAN_MAX_FROM_REG(int reg)
+{
+ int i;
+
+ for (i = 10; i > 0; i--) {
+ if (reg == FAN_MAX[i]) {
+ break;
+ }
+ }
+
+ return 1000 + i * 500;
+}
+
+static int FAN_MAX_TO_REG(int val)
+{
+ int i;
+
+ for (i = 10; i > 0; i--) {
+ if (val > (1000 + (i - 1) * 500)) {
+ break;
+ }
+ }
+
+ return FAN_MAX[i];
+}
+
+/* PWM enable
+ * Register to enable mapping:
+ * 000: 2 fan on zone 1 auto
+ * 001: 2 fan on zone 2 auto
+ * 010: 2 fan on zone 3 auto
+ * 011: 0 fan full on
+ * 100: -1 fan disabled
+ * 101: 2 fan on hottest of zones 2,3 auto
+ * 110: 2 fan on hottest of zones 1,2,3 auto
+ * 111: 1 fan in manual mode */
+static inline int PWM_EN_FROM_REG(int reg)
+{
+ static const int en[] = {2, 2, 2, 0, -1, 2, 2, 1};
+
+ return en[(reg >> 5) & 0x07];
+}
+
+static inline int PWM_EN_TO_REG(int val, int reg)
+{
+ int en = (val == 1) ? 7 : 3;
+
+ return (reg & 0x1f) | ((en & 0x07) << 5);
+}
+
+/* PWM auto channels zone
+ * Register to auto channels zone mapping (ACZ is a bitfield with bit x
+ * corresponding to zone x+1):
+ * 000: 001 fan on zone 1 auto
+ * 001: 010 fan on zone 2 auto
+ * 010: 100 fan on zone 3 auto
+ * 011: 000 fan full on
+ * 100: 000 fan disabled
+ * 101: 110 fan on hottest of zones 2,3 auto
+ * 110: 111 fan on hottest of zones 1,2,3 auto
+ * 111: 000 fan in manual mode */
+static inline int PWM_ACZ_FROM_REG(int reg)
+{
+ static const int acz[] = {1, 2, 4, 0, 0, 6, 7, 0};
+
+ return acz[(reg >> 5) & 0x07];
+}
+
+static inline int PWM_ACZ_TO_REG(int val, int reg)
+{
+ int acz = (val == 4) ? 2 : val - 1;
+
+ return (reg & 0x1f) | ((acz & 0x07) << 5);
+}
+
+/* PWM frequency */
+static const int PWM_FREQ[] = {11, 15, 22, 29, 35, 44, 59, 88,
+ 15000, 20000, 30000, 25000, 0, 0, 0, 0};
+
+static inline int PWM_FREQ_FROM_REG(int reg)
+{
+ return PWM_FREQ[reg & 0x0f];
+}
+
+static int PWM_FREQ_TO_REG(int val, int reg)
+{
+ int i;
+
+ /* the first two cases are special - stupid chip design! */
+ if (val > 27500) {
+ i = 10;
+ } else if (val > 22500) {
+ i = 11;
+ } else {
+ for (i = 9; i > 0; i--) {
+ if (val > (PWM_FREQ[i] + PWM_FREQ[i - 1] + 1) / 2) {
+ break;
+ }
+ }
+ }
+
+ return (reg & 0xf0) | i;
+}
+
+/* PWM ramp rate
+ * Register layout:
+ * reg[0] = [OFF3, OFF2, OFF1, RES, RR1-E, RR1-2, RR1-1, RR1-0]
+ * reg[1] = [RR2-E, RR2-2, RR2-1, RR2-0, RR3-E, RR3-2, RR3-1, RR3-0] */
+static const u8 PWM_RR[] = {206, 104, 69, 41, 26, 18, 10, 5};
+
+static inline int PWM_RR_FROM_REG(int reg, int ix)
+{
+ int rr = (ix == 1) ? reg >> 4 : reg;
+
+ return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
+}
+
+static int PWM_RR_TO_REG(int val, int ix, int reg)
+{
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ if (val > (PWM_RR[i] + PWM_RR[i + 1] + 1) / 2) {
+ break;
+ }
+ }
+
+ return (ix == 1) ? (reg & 0x8f) | (i << 4) : (reg & 0xf8) | i;
+}
+
+/* PWM ramp rate enable */
+static inline int PWM_RR_EN_FROM_REG(int reg, int ix)
+{
+ return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
+}
+
+static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
+{
+ int en = (ix == 1) ? 0x80 : 0x08;
+
+ return val ? reg | en : reg & ~en;
+}
+
+/* PWM min/off
+ * The PWM min/off bits are part of the PMW ramp rate register 0 (see above for
+ * the register layout). */
+static inline int PWM_OFF_FROM_REG(int reg, int ix)
+{
+ return (reg >> (ix + 5)) & 0x01;
+}
+
+static inline int PWM_OFF_TO_REG(int val, int ix, int reg)
+{
+ return (reg & ~(1 << (ix + 5))) | ((val & 0x01) << (ix + 5));
+}
+
+/* ---------------------------------------------------------------------
+ * Device I/O access
+ * --------------------------------------------------------------------- */
+
+static u8 dme1737_read(struct i2c_client *client, u8 reg)
+{
+ s32 val = i2c_smbus_read_byte_data(client, reg);
+
+ if (val < 0) {
+ dev_warn(&client->dev, "Read from register 0x%02x failed! "
+ "Please report to the driver maintainer.\n", reg);
+ }
+
+ return val;
+}
+
+static s32 dme1737_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ s32 res = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (res < 0) {
+ dev_warn(&client->dev, "Write to register 0x%02x failed! "
+ "Please report to the driver maintainer.\n", reg);
+ }
+
+ return res;
+}
+
+static struct dme1737_data *dme1737_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ int ix;
+ u8 lsb[5];
+
+ mutex_lock(&data->update_lock);
+
+ /* Enable a Vbat monitoring cycle every 10 mins */
+ if (time_after(jiffies, data->last_vbat + 600 * HZ) || !data->valid) {
+ dme1737_write(client, DME1737_REG_CONFIG, dme1737_read(client,
+ DME1737_REG_CONFIG) | 0x10);
+ data->last_vbat = jiffies;
+ }
+
+ /* Sample register contents every 1 sec */
+ if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
+ data->vid = dme1737_read(client, DME1737_REG_VID) & 0x3f;
+
+ /* In (voltage) registers */
+ for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+ /* Voltage inputs are stored as 16 bit values even
+ * though they have only 12 bits resolution. This is
+ * to make it consistent with the temp inputs. */
+ data->in[ix] = dme1737_read(client,
+ DME1737_REG_IN(ix)) << 8;
+ data->in_min[ix] = dme1737_read(client,
+ DME1737_REG_IN_MIN(ix));
+ data->in_max[ix] = dme1737_read(client,
+ DME1737_REG_IN_MAX(ix));
+ }
+
+ /* Temp registers */
+ for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) {
+ /* Temp inputs are stored as 16 bit values even
+ * though they have only 12 bits resolution. This is
+ * to take advantage of implicit conversions between
+ * register values (2's complement) and temp values
+ * (signed decimal). */
+ data->temp[ix] = dme1737_read(client,
+ DME1737_REG_TEMP(ix)) << 8;
+ data->temp_min[ix] = dme1737_read(client,
+ DME1737_REG_TEMP_MIN(ix));
+ data->temp_max[ix] = dme1737_read(client,
+ DME1737_REG_TEMP_MAX(ix));
+ data->temp_offset[ix] = dme1737_read(client,
+ DME1737_REG_TEMP_OFFSET(ix));
+ }
+
+ /* In and temp LSB registers
+ * The LSBs are latched when the MSBs are read, so the order in
+ * which the registers are read (MSB first, then LSB) is
+ * important! */
+ for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) {
+ lsb[ix] = dme1737_read(client,
+ DME1737_REG_IN_TEMP_LSB(ix));
+ }
+ for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+ data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] <<
+ DME1737_REG_IN_LSB_SHL[ix]) & 0xf0;
+ }
+ for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) {
+ data->temp[ix] |= (lsb[DME1737_REG_TEMP_LSB[ix]] <<
+ DME1737_REG_TEMP_LSB_SHL[ix]) & 0xf0;
+ }
+
+ /* Fan registers */
+ for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
+ /* Skip reading registers if optional fans are not
+ * present */
+ if (!(data->has_fan & (1 << ix))) {
+ continue;
+ }
+ data->fan[ix] = dme1737_read(client,
+ DME1737_REG_FAN(ix));
+ data->fan[ix] |= dme1737_read(client,
+ DME1737_REG_FAN(ix) + 1) << 8;
+ data->fan_min[ix] = dme1737_read(client,
+ DME1737_REG_FAN_MIN(ix));
+ data->fan_min[ix] |= dme1737_read(client,
+ DME1737_REG_FAN_MIN(ix) + 1) << 8;
+ data->fan_opt[ix] = dme1737_read(client,
+ DME1737_REG_FAN_OPT(ix));
+ /* fan_max exists only for fan[5-6] */
+ if (ix > 3) {
+ data->fan_max[ix - 4] = dme1737_read(client,
+ DME1737_REG_FAN_MAX(ix));
+ }
+ }
+
+ /* PWM registers */
+ for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) {
+ /* Skip reading registers if optional PWMs are not
+ * present */
+ if (!(data->has_pwm & (1 << ix))) {
+ continue;
+ }
+ data->pwm[ix] = dme1737_read(client,
+ DME1737_REG_PWM(ix));
+ data->pwm_freq[ix] = dme1737_read(client,
+ DME1737_REG_PWM_FREQ(ix));
+ /* pwm_config and pwm_min exist only for pwm[1-3] */
+ if (ix < 3) {
+ data->pwm_config[ix] = dme1737_read(client,
+ DME1737_REG_PWM_CONFIG(ix));
+ data->pwm_min[ix] = dme1737_read(client,
+ DME1737_REG_PWM_MIN(ix));
+ }
+ }
+ for (ix = 0; ix < ARRAY_SIZE(data->pwm_rr); ix++) {
+ data->pwm_rr[ix] = dme1737_read(client,
+ DME1737_REG_PWM_RR(ix));
+ }
+
+ /* Thermal zone registers */
+ for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) {
+ data->zone_low[ix] = dme1737_read(client,
+ DME1737_REG_ZONE_LOW(ix));
+ data->zone_abs[ix] = dme1737_read(client,
+ DME1737_REG_ZONE_ABS(ix));
+ }
+ for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
+ data->zone_hyst[ix] = dme1737_read(client,
+ DME1737_REG_ZONE_HYST(ix));
+ }
+
+ /* Alarm registers */
+ data->alarms = dme1737_read(client,
+ DME1737_REG_ALARM1);
+ /* Bit 7 tells us if the other alarm registers are non-zero and
+ * therefore also need to be read */
+ if (data->alarms & 0x80) {
+ data->alarms |= dme1737_read(client,
+ DME1737_REG_ALARM2) << 8;
+ data->alarms |= dme1737_read(client,
+ DME1737_REG_ALARM3) << 16;
+ }
+
+ data->last_update = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+/* ---------------------------------------------------------------------
+ * Voltage sysfs attributes
+ * ix = [0-5]
+ * --------------------------------------------------------------------- */
+
+#define SYS_IN_INPUT 0
+#define SYS_IN_MIN 1
+#define SYS_IN_MAX 2
+#define SYS_IN_ALARM 3
+
+static ssize_t show_in(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dme1737_data *data = dme1737_update_device(dev);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ int res;
+
+ switch (fn) {
+ case SYS_IN_INPUT:
+ res = IN_FROM_REG(data->in[ix], ix, 16);
+ break;
+ case SYS_IN_MIN:
+ res = IN_FROM_REG(data->in_min[ix], ix, 8);
+ break;
+ case SYS_IN_MAX:
+ res = IN_FROM_REG(data->in_max[ix], ix, 8);
+ break;
+ case SYS_IN_ALARM:
+ res = (data->alarms >> DME1737_BIT_ALARM_IN[ix]) & 0x01;
+ break;
+ default:
+ res = 0;
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+
+ return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_in(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ long val = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ switch (fn) {
+ case SYS_IN_MIN:
+ data->in_min[ix] = IN_TO_REG(val, ix);
+ dme1737_write(client, DME1737_REG_IN_MIN(ix),
+ data->in_min[ix]);
+ break;
+ case SYS_IN_MAX:
+ data->in_max[ix] = IN_TO_REG(val, ix);
+ dme1737_write(client, DME1737_REG_IN_MAX(ix),
+ data->in_max[ix]);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Temperature sysfs attributes
+ * ix = [0-2]
+ * --------------------------------------------------------------------- */
+
+#define SYS_TEMP_INPUT 0
+#define SYS_TEMP_MIN 1
+#define SYS_TEMP_MAX 2
+#define SYS_TEMP_OFFSET 3
+#define SYS_TEMP_ALARM 4
+#define SYS_TEMP_FAULT 5
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dme1737_data *data = dme1737_update_device(dev);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ int res;
+
+ switch (fn) {
+ case SYS_TEMP_INPUT:
+ res = TEMP_FROM_REG(data->temp[ix], 16);
+ break;
+ case SYS_TEMP_MIN:
+ res = TEMP_FROM_REG(data->temp_min[ix], 8);
+ break;
+ case SYS_TEMP_MAX:
+ res = TEMP_FROM_REG(data->temp_max[ix], 8);
+ break;
+ case SYS_TEMP_OFFSET:
+ res = TEMP_FROM_REG(data->temp_offset[ix], 8);
+ break;
+ case SYS_TEMP_ALARM:
+ res = (data->alarms >> DME1737_BIT_ALARM_TEMP[ix]) & 0x01;
+ break;
+ case SYS_TEMP_FAULT:
+ res = (data->temp[ix] == 0x0800);
+ break;
+ default:
+ res = 0;
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+
+ return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ long val = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ switch (fn) {
+ case SYS_TEMP_MIN:
+ data->temp_min[ix] = TEMP_TO_REG(val);
+ dme1737_write(client, DME1737_REG_TEMP_MIN(ix),
+ data->temp_min[ix]);
+ break;
+ case SYS_TEMP_MAX:
+ data->temp_max[ix] = TEMP_TO_REG(val);
+ dme1737_write(client, DME1737_REG_TEMP_MAX(ix),
+ data->temp_max[ix]);
+ break;
+ case SYS_TEMP_OFFSET:
+ data->temp_offset[ix] = TEMP_TO_REG(val);
+ dme1737_write(client, DME1737_REG_TEMP_OFFSET(ix),
+ data->temp_offset[ix]);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Zone sysfs attributes
+ * ix = [0-2]
+ * --------------------------------------------------------------------- */
+
+#define SYS_ZONE_AUTO_CHANNELS_TEMP 0
+#define SYS_ZONE_AUTO_POINT1_TEMP_HYST 1
+#define SYS_ZONE_AUTO_POINT1_TEMP 2
+#define SYS_ZONE_AUTO_POINT2_TEMP 3
+#define SYS_ZONE_AUTO_POINT3_TEMP 4
+
+static ssize_t show_zone(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dme1737_data *data = dme1737_update_device(dev);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ int res;
+
+ switch (fn) {
+ case SYS_ZONE_AUTO_CHANNELS_TEMP:
+ /* check config2 for non-standard temp-to-zone mapping */
+ if ((ix == 1) && (data->config2 & 0x02)) {
+ res = 4;
+ } else {
+ res = 1 << ix;
+ }
+ break;
+ case SYS_ZONE_AUTO_POINT1_TEMP_HYST:
+ res = TEMP_FROM_REG(data->zone_low[ix], 8) -
+ TEMP_HYST_FROM_REG(data->zone_hyst[ix == 2], ix);
+ break;
+ case SYS_ZONE_AUTO_POINT1_TEMP:
+ res = TEMP_FROM_REG(data->zone_low[ix], 8);
+ break;
+ case SYS_ZONE_AUTO_POINT2_TEMP:
+ /* pwm_freq holds the temp range bits in the upper nibble */
+ res = TEMP_FROM_REG(data->zone_low[ix], 8) +
+ TEMP_RANGE_FROM_REG(data->pwm_freq[ix]);
+ break;
+ case SYS_ZONE_AUTO_POINT3_TEMP:
+ res = TEMP_FROM_REG(data->zone_abs[ix], 8);
+ break;
+ default:
+ res = 0;
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+
+ return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_zone(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ long val = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ switch (fn) {
+ case SYS_ZONE_AUTO_POINT1_TEMP_HYST:
+ /* Refresh the cache */
+ data->zone_low[ix] = dme1737_read(client,
+ DME1737_REG_ZONE_LOW(ix));
+ /* Modify the temp hyst value */
+ data->zone_hyst[ix == 2] = TEMP_HYST_TO_REG(
+ TEMP_FROM_REG(data->zone_low[ix], 8) -
+ val, ix, dme1737_read(client,
+ DME1737_REG_ZONE_HYST(ix == 2)));
+ dme1737_write(client, DME1737_REG_ZONE_HYST(ix == 2),
+ data->zone_hyst[ix == 2]);
+ break;
+ case SYS_ZONE_AUTO_POINT1_TEMP:
+ data->zone_low[ix] = TEMP_TO_REG(val);
+ dme1737_write(client, DME1737_REG_ZONE_LOW(ix),
+ data->zone_low[ix]);
+ break;
+ case SYS_ZONE_AUTO_POINT2_TEMP:
+ /* Refresh the cache */
+ data->zone_low[ix] = dme1737_read(client,
+ DME1737_REG_ZONE_LOW(ix));
+ /* Modify the temp range value (which is stored in the upper
+ * nibble of the pwm_freq register) */
+ data->pwm_freq[ix] = TEMP_RANGE_TO_REG(val -
+ TEMP_FROM_REG(data->zone_low[ix], 8),
+ dme1737_read(client,
+ DME1737_REG_PWM_FREQ(ix)));
+ dme1737_write(client, DME1737_REG_PWM_FREQ(ix),
+ data->pwm_freq[ix]);
+ break;
+ case SYS_ZONE_AUTO_POINT3_TEMP:
+ data->zone_abs[ix] = TEMP_TO_REG(val);
+ dme1737_write(client, DME1737_REG_ZONE_ABS(ix),
+ data->zone_abs[ix]);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Fan sysfs attributes
+ * ix = [0-5]
+ * --------------------------------------------------------------------- */
+
+#define SYS_FAN_INPUT 0
+#define SYS_FAN_MIN 1
+#define SYS_FAN_MAX 2
+#define SYS_FAN_ALARM 3
+#define SYS_FAN_TYPE 4
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dme1737_data *data = dme1737_update_device(dev);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ int res;
+
+ switch (fn) {
+ case SYS_FAN_INPUT:
+ res = FAN_FROM_REG(data->fan[ix],
+ ix < 4 ? 0 :
+ FAN_TPC_FROM_REG(data->fan_opt[ix]));
+ break;
+ case SYS_FAN_MIN:
+ res = FAN_FROM_REG(data->fan_min[ix],
+ ix < 4 ? 0 :
+ FAN_TPC_FROM_REG(data->fan_opt[ix]));
+ break;
+ case SYS_FAN_MAX:
+ /* only valid for fan[5-6] */
+ res = FAN_MAX_FROM_REG(data->fan_max[ix - 4]);
+ break;
+ case SYS_FAN_ALARM:
+ res = (data->alarms >> DME1737_BIT_ALARM_FAN[ix]) & 0x01;
+ break;
+ case SYS_FAN_TYPE:
+ /* only valid for fan[1-4] */
+ res = FAN_TYPE_FROM_REG(data->fan_opt[ix]);
+ break;
+ default:
+ res = 0;
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+
+ return sprintf(buf, "%d\n", res);
+}
+
+static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ long val = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ switch (fn) {
+ case SYS_FAN_MIN:
+ if (ix < 4) {
+ data->fan_min[ix] = FAN_TO_REG(val, 0);
+ } else {
+ /* Refresh the cache */
+ data->fan_opt[ix] = dme1737_read(client,
+ DME1737_REG_FAN_OPT(ix));
+ /* Modify the fan min value */
+ data->fan_min[ix] = FAN_TO_REG(val,
+ FAN_TPC_FROM_REG(data->fan_opt[ix]));
+ }
+ dme1737_write(client, DME1737_REG_FAN_MIN(ix),
+ data->fan_min[ix] & 0xff);
+ dme1737_write(client, DME1737_REG_FAN_MIN(ix) + 1,
+ data->fan_min[ix] >> 8);
+ break;
+ case SYS_FAN_MAX:
+ /* Only valid for fan[5-6] */
+ data->fan_max[ix - 4] = FAN_MAX_TO_REG(val);
+ dme1737_write(client, DME1737_REG_FAN_MAX(ix),
+ data->fan_max[ix - 4]);
+ break;
+ case SYS_FAN_TYPE:
+ /* Only valid for fan[1-4] */
+ if (!(val == 1 || val == 2 || val == 4)) {
+ count = -EINVAL;
+ dev_warn(&client->dev, "Fan type value %ld not "
+ "supported. Choose one of 1, 2, or 4.\n",
+ val);
+ goto exit;
+ }
+ data->fan_opt[ix] = FAN_TYPE_TO_REG(val, dme1737_read(client,
+ DME1737_REG_FAN_OPT(ix)));
+ dme1737_write(client, DME1737_REG_FAN_OPT(ix),
+ data->fan_opt[ix]);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+exit:
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ---------------------------------------------------------------------
+ * PWM sysfs attributes
+ * ix = [0-4]
+ * --------------------------------------------------------------------- */
+
+#define SYS_PWM 0
+#define SYS_PWM_FREQ 1
+#define SYS_PWM_ENABLE 2
+#define SYS_PWM_RAMP_RATE 3
+#define SYS_PWM_AUTO_CHANNELS_ZONE 4
+#define SYS_PWM_AUTO_PWM_MIN 5
+#define SYS_PWM_AUTO_POINT1_PWM 6
+#define SYS_PWM_AUTO_POINT2_PWM 7
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dme1737_data *data = dme1737_update_device(dev);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ int res;
+
+ switch (fn) {
+ case SYS_PWM:
+ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 0) {
+ res = 255;
+ } else {
+ res = data->pwm[ix];
+ }
+ break;
+ case SYS_PWM_FREQ:
+ res = PWM_FREQ_FROM_REG(data->pwm_freq[ix]);
+ break;
+ case SYS_PWM_ENABLE:
+ if (ix > 3) {
+ res = 1; /* pwm[5-6] hard-wired to manual mode */
+ } else {
+ res = PWM_EN_FROM_REG(data->pwm_config[ix]);
+ }
+ break;
+ case SYS_PWM_RAMP_RATE:
+ /* Only valid for pwm[1-3] */
+ res = PWM_RR_FROM_REG(data->pwm_rr[ix > 0], ix);
+ break;
+ case SYS_PWM_AUTO_CHANNELS_ZONE:
+ /* Only valid for pwm[1-3] */
+ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+ res = PWM_ACZ_FROM_REG(data->pwm_config[ix]);
+ } else {
+ res = data->pwm_acz[ix];
+ }
+ break;
+ case SYS_PWM_AUTO_PWM_MIN:
+ /* Only valid for pwm[1-3] */
+ if (PWM_OFF_FROM_REG(data->pwm_rr[0], ix)) {
+ res = data->pwm_min[ix];
+ } else {
+ res = 0;
+ }
+ break;
+ case SYS_PWM_AUTO_POINT1_PWM:
+ /* Only valid for pwm[1-3] */
+ res = data->pwm_min[ix];
+ break;
+ case SYS_PWM_AUTO_POINT2_PWM:
+ /* Only valid for pwm[1-3] */
+ res = 255; /* hard-wired */
+ break;
+ default:
+ res = 0;
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+
+ return sprintf(buf, "%d\n", res);
+}
+
+static struct attribute *dme1737_attr_pwm[];
+static void dme1737_chmod_file(struct i2c_client*, struct attribute*, mode_t);
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute_2
+ *sensor_attr_2 = to_sensor_dev_attr_2(attr);
+ int ix = sensor_attr_2->index;
+ int fn = sensor_attr_2->nr;
+ long val = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ switch (fn) {
+ case SYS_PWM:
+ data->pwm[ix] = SENSORS_LIMIT(val, 0, 255);
+ dme1737_write(client, DME1737_REG_PWM(ix), data->pwm[ix]);
+ break;
+ case SYS_PWM_FREQ:
+ data->pwm_freq[ix] = PWM_FREQ_TO_REG(val, dme1737_read(client,
+ DME1737_REG_PWM_FREQ(ix)));
+ dme1737_write(client, DME1737_REG_PWM_FREQ(ix),
+ data->pwm_freq[ix]);
+ break;
+ case SYS_PWM_ENABLE:
+ /* Only valid for pwm[1-3] */
+ if (val < 0 || val > 2) {
+ count = -EINVAL;
+ dev_warn(&client->dev, "PWM enable %ld not "
+ "supported. Choose one of 0, 1, or 2.\n",
+ val);
+ goto exit;
+ }
+ /* Refresh the cache */
+ data->pwm_config[ix] = dme1737_read(client,
+ DME1737_REG_PWM_CONFIG(ix));
+ if (val == PWM_EN_FROM_REG(data->pwm_config[ix])) {
+ /* Bail out if no change */
+ goto exit;
+ }
+ /* Do some housekeeping if we are currently in auto mode */
+ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+ /* Save the current zone channel assignment */
+ data->pwm_acz[ix] = PWM_ACZ_FROM_REG(
+ data->pwm_config[ix]);
+ /* Save the current ramp rate state and disable it */
+ data->pwm_rr[ix > 0] = dme1737_read(client,
+ DME1737_REG_PWM_RR(ix > 0));
+ data->pwm_rr_en &= ~(1 << ix);
+ if (PWM_RR_EN_FROM_REG(data->pwm_rr[ix > 0], ix)) {
+ data->pwm_rr_en |= (1 << ix);
+ data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(0, ix,
+ data->pwm_rr[ix > 0]);
+ dme1737_write(client,
+ DME1737_REG_PWM_RR(ix > 0),
+ data->pwm_rr[ix > 0]);
+ }
+ }
+ /* Set the new PWM mode */
+ switch (val) {
+ case 0:
+ /* Change permissions of pwm[ix] to read-only */
+ dme1737_chmod_file(client, dme1737_attr_pwm[ix],
+ S_IRUGO);
+ /* Turn fan fully on */
+ data->pwm_config[ix] = PWM_EN_TO_REG(0,
+ data->pwm_config[ix]);
+ dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+ data->pwm_config[ix]);
+ break;
+ case 1:
+ /* Turn on manual mode */
+ data->pwm_config[ix] = PWM_EN_TO_REG(1,
+ data->pwm_config[ix]);
+ dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+ data->pwm_config[ix]);
+ /* Change permissions of pwm[ix] to read-writeable */
+ dme1737_chmod_file(client, dme1737_attr_pwm[ix],
+ S_IRUGO | S_IWUSR);
+ break;
+ case 2:
+ /* Change permissions of pwm[ix] to read-only */
+ dme1737_chmod_file(client, dme1737_attr_pwm[ix],
+ S_IRUGO);
+ /* Turn on auto mode using the saved zone channel
+ * assignment */
+ data->pwm_config[ix] = PWM_ACZ_TO_REG(
+ data->pwm_acz[ix],
+ data->pwm_config[ix]);
+ dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+ data->pwm_config[ix]);
+ /* Enable PWM ramp rate if previously enabled */
+ if (data->pwm_rr_en & (1 << ix)) {
+ data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(1, ix,
+ dme1737_read(client,
+ DME1737_REG_PWM_RR(ix > 0)));
+ dme1737_write(client,
+ DME1737_REG_PWM_RR(ix > 0),
+ data->pwm_rr[ix > 0]);
+ }
+ break;
+ }
+ break;
+ case SYS_PWM_RAMP_RATE:
+ /* Only valid for pwm[1-3] */
+ /* Refresh the cache */
+ data->pwm_config[ix] = dme1737_read(client,
+ DME1737_REG_PWM_CONFIG(ix));
+ data->pwm_rr[ix > 0] = dme1737_read(client,
+ DME1737_REG_PWM_RR(ix > 0));
+ /* Set the ramp rate value */
+ if (val > 0) {
+ data->pwm_rr[ix > 0] = PWM_RR_TO_REG(val, ix,
+ data->pwm_rr[ix > 0]);
+ }
+ /* Enable/disable the feature only if the associated PWM
+ * output is in automatic mode. */
+ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+ data->pwm_rr[ix > 0] = PWM_RR_EN_TO_REG(val > 0, ix,
+ data->pwm_rr[ix > 0]);
+ }
+ dme1737_write(client, DME1737_REG_PWM_RR(ix > 0),
+ data->pwm_rr[ix > 0]);
+ break;
+ case SYS_PWM_AUTO_CHANNELS_ZONE:
+ /* Only valid for pwm[1-3] */
+ if (!(val == 1 || val == 2 || val == 4 ||
+ val == 6 || val == 7)) {
+ count = -EINVAL;
+ dev_warn(&client->dev, "PWM auto channels zone %ld "
+ "not supported. Choose one of 1, 2, 4, 6, "
+ "or 7.\n", val);
+ goto exit;
+ }
+ /* Refresh the cache */
+ data->pwm_config[ix] = dme1737_read(client,
+ DME1737_REG_PWM_CONFIG(ix));
+ if (PWM_EN_FROM_REG(data->pwm_config[ix]) == 2) {
+ /* PWM is already in auto mode so update the temp
+ * channel assignment */
+ data->pwm_config[ix] = PWM_ACZ_TO_REG(val,
+ data->pwm_config[ix]);
+ dme1737_write(client, DME1737_REG_PWM_CONFIG(ix),
+ data->pwm_config[ix]);
+ } else {
+ /* PWM is not in auto mode so we save the temp
+ * channel assignment for later use */
+ data->pwm_acz[ix] = val;
+ }
+ break;
+ case SYS_PWM_AUTO_PWM_MIN:
+ /* Only valid for pwm[1-3] */
+ /* Refresh the cache */
+ data->pwm_min[ix] = dme1737_read(client,
+ DME1737_REG_PWM_MIN(ix));
+ /* There are only 2 values supported for the auto_pwm_min
+ * value: 0 or auto_point1_pwm. So if the temperature drops
+ * below the auto_point1_temp_hyst value, the fan either turns
+ * off or runs at auto_point1_pwm duty-cycle. */
+ if (val > ((data->pwm_min[ix] + 1) / 2)) {
+ data->pwm_rr[0] = PWM_OFF_TO_REG(1, ix,
+ dme1737_read(client,
+ DME1737_REG_PWM_RR(0)));
+
+ } else {
+ data->pwm_rr[0] = PWM_OFF_TO_REG(0, ix,
+ dme1737_read(client,
+ DME1737_REG_PWM_RR(0)));
+
+ }
+ dme1737_write(client, DME1737_REG_PWM_RR(0),
+ data->pwm_rr[0]);
+ break;
+ case SYS_PWM_AUTO_POINT1_PWM:
+ /* Only valid for pwm[1-3] */
+ data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255);
+ dme1737_write(client, DME1737_REG_PWM_MIN(ix),
+ data->pwm_min[ix]);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
+ }
+exit:
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ---------------------------------------------------------------------
+ * Miscellaneous sysfs attributes
+ * --------------------------------------------------------------------- */
+
+static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d\n", data->vrm);
+}
+
+static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ long val = simple_strtol(buf, NULL, 10);
+
+ data->vrm = val;
+ return count;
+}
+
+static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dme1737_data *data = dme1737_update_device(dev);
+
+ return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
+}
+
+/* ---------------------------------------------------------------------
+ * Sysfs device attribute defines and structs
+ * --------------------------------------------------------------------- */
+
+/* Voltages 0-6 */
+
+#define SENSOR_DEVICE_ATTR_IN(ix) \
+static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \
+ show_in, NULL, SYS_IN_INPUT, ix); \
+static SENSOR_DEVICE_ATTR_2(in##ix##_min, S_IRUGO | S_IWUSR, \
+ show_in, set_in, SYS_IN_MIN, ix); \
+static SENSOR_DEVICE_ATTR_2(in##ix##_max, S_IRUGO | S_IWUSR, \
+ show_in, set_in, SYS_IN_MAX, ix); \
+static SENSOR_DEVICE_ATTR_2(in##ix##_alarm, S_IRUGO, \
+ show_in, NULL, SYS_IN_ALARM, ix)
+
+SENSOR_DEVICE_ATTR_IN(0);
+SENSOR_DEVICE_ATTR_IN(1);
+SENSOR_DEVICE_ATTR_IN(2);
+SENSOR_DEVICE_ATTR_IN(3);
+SENSOR_DEVICE_ATTR_IN(4);
+SENSOR_DEVICE_ATTR_IN(5);
+SENSOR_DEVICE_ATTR_IN(6);
+
+/* Temperatures 1-3 */
+
+#define SENSOR_DEVICE_ATTR_TEMP(ix) \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_input, S_IRUGO, \
+ show_temp, NULL, SYS_TEMP_INPUT, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_min, S_IRUGO | S_IWUSR, \
+ show_temp, set_temp, SYS_TEMP_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_max, S_IRUGO | S_IWUSR, \
+ show_temp, set_temp, SYS_TEMP_MAX, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_offset, S_IRUGO, \
+ show_temp, set_temp, SYS_TEMP_OFFSET, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_alarm, S_IRUGO, \
+ show_temp, NULL, SYS_TEMP_ALARM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(temp##ix##_fault, S_IRUGO, \
+ show_temp, NULL, SYS_TEMP_FAULT, ix-1)
+
+SENSOR_DEVICE_ATTR_TEMP(1);
+SENSOR_DEVICE_ATTR_TEMP(2);
+SENSOR_DEVICE_ATTR_TEMP(3);
+
+/* Zones 1-3 */
+
+#define SENSOR_DEVICE_ATTR_ZONE(ix) \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_channels_temp, S_IRUGO, \
+ show_zone, NULL, SYS_ZONE_AUTO_CHANNELS_TEMP, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp_hyst, S_IRUGO, \
+ show_zone, set_zone, SYS_ZONE_AUTO_POINT1_TEMP_HYST, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp, S_IRUGO, \
+ show_zone, set_zone, SYS_ZONE_AUTO_POINT1_TEMP, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point2_temp, S_IRUGO, \
+ show_zone, set_zone, SYS_ZONE_AUTO_POINT2_TEMP, ix-1); \
+static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point3_temp, S_IRUGO, \
+ show_zone, set_zone, SYS_ZONE_AUTO_POINT3_TEMP, ix-1)
+
+SENSOR_DEVICE_ATTR_ZONE(1);
+SENSOR_DEVICE_ATTR_ZONE(2);
+SENSOR_DEVICE_ATTR_ZONE(3);
+
+/* Fans 1-4 */
+
+#define SENSOR_DEVICE_ATTR_FAN_1TO4(ix) \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_input, S_IRUGO, \
+ show_fan, NULL, SYS_FAN_INPUT, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \
+ show_fan, set_fan, SYS_FAN_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_alarm, S_IRUGO, \
+ show_fan, NULL, SYS_FAN_ALARM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_type, S_IRUGO | S_IWUSR, \
+ show_fan, set_fan, SYS_FAN_TYPE, ix-1)
+
+SENSOR_DEVICE_ATTR_FAN_1TO4(1);
+SENSOR_DEVICE_ATTR_FAN_1TO4(2);
+SENSOR_DEVICE_ATTR_FAN_1TO4(3);
+SENSOR_DEVICE_ATTR_FAN_1TO4(4);
+
+/* Fans 5-6 */
+
+#define SENSOR_DEVICE_ATTR_FAN_5TO6(ix) \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_input, S_IRUGO, \
+ show_fan, NULL, SYS_FAN_INPUT, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \
+ show_fan, set_fan, SYS_FAN_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_alarm, S_IRUGO, \
+ show_fan, NULL, SYS_FAN_ALARM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(fan##ix##_max, S_IRUGO | S_IWUSR, \
+ show_fan, set_fan, SYS_FAN_MAX, ix-1)
+
+SENSOR_DEVICE_ATTR_FAN_5TO6(5);
+SENSOR_DEVICE_ATTR_FAN_5TO6(6);
+
+/* PWMs 1-3 */
+
+#define SENSOR_DEVICE_ATTR_PWM_1TO3(ix) \
+static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM_ENABLE, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_ramp_rate, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM_RAMP_RATE, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_channels_zone, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM_AUTO_CHANNELS_ZONE, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_pwm_min, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM_AUTO_PWM_MIN, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_point1_pwm, S_IRUGO, \
+ show_pwm, set_pwm, SYS_PWM_AUTO_POINT1_PWM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_auto_point2_pwm, S_IRUGO, \
+ show_pwm, NULL, SYS_PWM_AUTO_POINT2_PWM, ix-1)
+
+SENSOR_DEVICE_ATTR_PWM_1TO3(1);
+SENSOR_DEVICE_ATTR_PWM_1TO3(2);
+SENSOR_DEVICE_ATTR_PWM_1TO3(3);
+
+/* PWMs 5-6 */
+
+#define SENSOR_DEVICE_ATTR_PWM_5TO6(ix) \
+static SENSOR_DEVICE_ATTR_2(pwm##ix, S_IRUGO | S_IWUSR, \
+ show_pwm, set_pwm, SYS_PWM, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_freq, S_IRUGO | S_IWUSR, \
+ show_pwm, set_pwm, SYS_PWM_FREQ, ix-1); \
+static SENSOR_DEVICE_ATTR_2(pwm##ix##_enable, S_IRUGO, \
+ show_pwm, NULL, SYS_PWM_ENABLE, ix-1)
+
+SENSOR_DEVICE_ATTR_PWM_5TO6(5);
+SENSOR_DEVICE_ATTR_PWM_5TO6(6);
+
+/* Misc */
+
+static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
+static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+
+#define SENSOR_DEV_ATTR_IN(ix) \
+&sensor_dev_attr_in##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_in##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_in##ix##_max.dev_attr.attr, \
+&sensor_dev_attr_in##ix##_alarm.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_TEMP_LOCK(ix) \
+&sensor_dev_attr_temp##ix##_offset.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_TEMP(ix) \
+SENSOR_DEV_ATTR_TEMP_LOCK(ix), \
+&sensor_dev_attr_temp##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_max.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_alarm.dev_attr.attr, \
+&sensor_dev_attr_temp##ix##_fault.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_ZONE_LOCK(ix) \
+&sensor_dev_attr_zone##ix##_auto_point1_temp_hyst.dev_attr.attr, \
+&sensor_dev_attr_zone##ix##_auto_point1_temp.dev_attr.attr, \
+&sensor_dev_attr_zone##ix##_auto_point2_temp.dev_attr.attr, \
+&sensor_dev_attr_zone##ix##_auto_point3_temp.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_ZONE(ix) \
+SENSOR_DEV_ATTR_ZONE_LOCK(ix), \
+&sensor_dev_attr_zone##ix##_auto_channels_temp.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_FAN_1TO4(ix) \
+&sensor_dev_attr_fan##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_alarm.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_type.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_FAN_5TO6(ix) \
+&sensor_dev_attr_fan##ix##_input.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_min.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_alarm.dev_attr.attr, \
+&sensor_dev_attr_fan##ix##_max.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_PWM_1TO3_LOCK(ix) \
+&sensor_dev_attr_pwm##ix##_freq.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_enable.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_ramp_rate.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_channels_zone.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_pwm_min.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_point1_pwm.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_PWM_1TO3(ix) \
+SENSOR_DEV_ATTR_PWM_1TO3_LOCK(ix), \
+&sensor_dev_attr_pwm##ix.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_auto_point2_pwm.dev_attr.attr
+
+/* These attributes are read-writeable only if the chip is *not* locked */
+#define SENSOR_DEV_ATTR_PWM_5TO6_LOCK(ix) \
+&sensor_dev_attr_pwm##ix.dev_attr.attr, \
+&sensor_dev_attr_pwm##ix##_freq.dev_attr.attr
+
+#define SENSOR_DEV_ATTR_PWM_5TO6(ix) \
+SENSOR_DEV_ATTR_PWM_5TO6_LOCK(ix), \
+&sensor_dev_attr_pwm##ix##_enable.dev_attr.attr
+
+/* This struct holds all the attributes that are always present and need to be
+ * created unconditionally. The attributes that need modification of their
+ * permissions are created read-only and write permissions are added or removed
+ * on the fly when required */
+static struct attribute *dme1737_attr[] ={
+ /* Voltages */
+ SENSOR_DEV_ATTR_IN(0),
+ SENSOR_DEV_ATTR_IN(1),
+ SENSOR_DEV_ATTR_IN(2),
+ SENSOR_DEV_ATTR_IN(3),
+ SENSOR_DEV_ATTR_IN(4),
+ SENSOR_DEV_ATTR_IN(5),
+ SENSOR_DEV_ATTR_IN(6),
+ /* Temperatures */
+ SENSOR_DEV_ATTR_TEMP(1),
+ SENSOR_DEV_ATTR_TEMP(2),
+ SENSOR_DEV_ATTR_TEMP(3),
+ /* Zones */
+ SENSOR_DEV_ATTR_ZONE(1),
+ SENSOR_DEV_ATTR_ZONE(2),
+ SENSOR_DEV_ATTR_ZONE(3),
+ /* Misc */
+ &dev_attr_vrm.attr,
+ &dev_attr_cpu0_vid.attr,
+ NULL
+};
+
+static const struct attribute_group dme1737_group = {
+ .attrs = dme1737_attr,
+};
+
+/* The following structs hold the PWM attributes, some of which are optional.
+ * Their creation depends on the chip configuration which is determined during
+ * module load. */
+static struct attribute *dme1737_attr_pwm1[] = {
+ SENSOR_DEV_ATTR_PWM_1TO3(1),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm2[] = {
+ SENSOR_DEV_ATTR_PWM_1TO3(2),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm3[] = {
+ SENSOR_DEV_ATTR_PWM_1TO3(3),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm5[] = {
+ SENSOR_DEV_ATTR_PWM_5TO6(5),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm6[] = {
+ SENSOR_DEV_ATTR_PWM_5TO6(6),
+ NULL
+};
+
+static const struct attribute_group dme1737_pwm_group[] = {
+ { .attrs = dme1737_attr_pwm1 },
+ { .attrs = dme1737_attr_pwm2 },
+ { .attrs = dme1737_attr_pwm3 },
+ { .attrs = NULL },
+ { .attrs = dme1737_attr_pwm5 },
+ { .attrs = dme1737_attr_pwm6 },
+};
+
+/* The following structs hold the fan attributes, some of which are optional.
+ * Their creation depends on the chip configuration which is determined during
+ * module load. */
+static struct attribute *dme1737_attr_fan1[] = {
+ SENSOR_DEV_ATTR_FAN_1TO4(1),
+ NULL
+};
+static struct attribute *dme1737_attr_fan2[] = {
+ SENSOR_DEV_ATTR_FAN_1TO4(2),
+ NULL
+};
+static struct attribute *dme1737_attr_fan3[] = {
+ SENSOR_DEV_ATTR_FAN_1TO4(3),
+ NULL
+};
+static struct attribute *dme1737_attr_fan4[] = {
+ SENSOR_DEV_ATTR_FAN_1TO4(4),
+ NULL
+};
+static struct attribute *dme1737_attr_fan5[] = {
+ SENSOR_DEV_ATTR_FAN_5TO6(5),
+ NULL
+};
+static struct attribute *dme1737_attr_fan6[] = {
+ SENSOR_DEV_ATTR_FAN_5TO6(6),
+ NULL
+};
+
+static const struct attribute_group dme1737_fan_group[] = {
+ { .attrs = dme1737_attr_fan1 },
+ { .attrs = dme1737_attr_fan2 },
+ { .attrs = dme1737_attr_fan3 },
+ { .attrs = dme1737_attr_fan4 },
+ { .attrs = dme1737_attr_fan5 },
+ { .attrs = dme1737_attr_fan6 },
+};
+
+/* The permissions of all of the following attributes are changed to read-
+ * writeable if the chip is *not* locked. Otherwise they stay read-only. */
+static struct attribute *dme1737_attr_lock[] = {
+ /* Temperatures */
+ SENSOR_DEV_ATTR_TEMP_LOCK(1),
+ SENSOR_DEV_ATTR_TEMP_LOCK(2),
+ SENSOR_DEV_ATTR_TEMP_LOCK(3),
+ /* Zones */
+ SENSOR_DEV_ATTR_ZONE_LOCK(1),
+ SENSOR_DEV_ATTR_ZONE_LOCK(2),
+ SENSOR_DEV_ATTR_ZONE_LOCK(3),
+ NULL
+};
+
+static const struct attribute_group dme1737_lock_group = {
+ .attrs = dme1737_attr_lock,
+};
+
+/* The permissions of the following PWM attributes are changed to read-
+ * writeable if the chip is *not* locked and the respective PWM is available.
+ * Otherwise they stay read-only. */
+static struct attribute *dme1737_attr_pwm1_lock[] = {
+ SENSOR_DEV_ATTR_PWM_1TO3_LOCK(1),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm2_lock[] = {
+ SENSOR_DEV_ATTR_PWM_1TO3_LOCK(2),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm3_lock[] = {
+ SENSOR_DEV_ATTR_PWM_1TO3_LOCK(3),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm5_lock[] = {
+ SENSOR_DEV_ATTR_PWM_5TO6_LOCK(5),
+ NULL
+};
+static struct attribute *dme1737_attr_pwm6_lock[] = {
+ SENSOR_DEV_ATTR_PWM_5TO6_LOCK(6),
+ NULL
+};
+
+static const struct attribute_group dme1737_pwm_lock_group[] = {
+ { .attrs = dme1737_attr_pwm1_lock },
+ { .attrs = dme1737_attr_pwm2_lock },
+ { .attrs = dme1737_attr_pwm3_lock },
+ { .attrs = NULL },
+ { .attrs = dme1737_attr_pwm5_lock },
+ { .attrs = dme1737_attr_pwm6_lock },
+};
+
+/* Pwm[1-3] are read-writeable if the associated pwm is in manual mode and the
+ * chip is not locked. Otherwise they are read-only. */
+static struct attribute *dme1737_attr_pwm[] = {
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
+};
+
+/* ---------------------------------------------------------------------
+ * Super-IO functions
+ * --------------------------------------------------------------------- */
+
+static inline int dme1737_sio_inb(int sio_cip, int reg)
+{
+ outb(reg, sio_cip);
+ return inb(sio_cip + 1);
+}
+
+static inline void dme1737_sio_outb(int sio_cip, int reg, int val)
+{
+ outb(reg, sio_cip);
+ outb(val, sio_cip + 1);
+}
+
+static int dme1737_sio_get_features(int sio_cip, struct i2c_client *client)
+{
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ int err = 0, reg;
+ u16 addr;
+
+ /* Enter configuration mode */
+ outb(0x55, sio_cip);
+
+ /* Check device ID
+ * The DME1737 can return either 0x78 or 0x77 as its device ID. */
+ reg = dme1737_sio_inb(sio_cip, 0x20);
+ if (!(reg == 0x77 || reg == 0x78)) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /* Select logical device A (runtime registers) */
+ dme1737_sio_outb(sio_cip, 0x07, 0x0a);
+
+ /* Get the base address of the runtime registers */
+ if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+ dme1737_sio_inb(sio_cip, 0x61))) {
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /* Read the runtime registers to determine which optional features
+ * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set
+ * to '10' if the respective feature is enabled. */
+ if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */
+ data->has_fan |= (1 << 5);
+ }
+ if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */
+ data->has_pwm |= (1 << 5);
+ }
+ if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */
+ data->has_fan |= (1 << 4);
+ }
+ if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */
+ data->has_pwm |= (1 << 4);
+ }
+
+exit:
+ /* Exit configuration mode */
+ outb(0xaa, sio_cip);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------
+ * Device detection, registration and initialization
+ * --------------------------------------------------------------------- */
+
+static struct i2c_driver dme1737_driver;
+
+static void dme1737_chmod_file(struct i2c_client *client,
+ struct attribute *attr, mode_t mode)
+{
+ if (sysfs_chmod_file(&client->dev.kobj, attr, mode)) {
+ dev_warn(&client->dev, "Failed to change permissions of %s.\n",
+ attr->name);
+ }
+}
+
+static void dme1737_chmod_group(struct i2c_client *client,
+ const struct attribute_group *group,
+ mode_t mode)
+{
+ struct attribute **attr;
+
+ for (attr = group->attrs; *attr; attr++) {
+ dme1737_chmod_file(client, *attr, mode);
+ }
+}
+
+static int dme1737_init_client(struct i2c_client *client)
+{
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ int ix;
+ u8 reg;
+
+ data->config = dme1737_read(client, DME1737_REG_CONFIG);
+ /* Inform if part is not monitoring/started */
+ if (!(data->config & 0x01)) {
+ if (!force_start) {
+ dev_err(&client->dev, "Device is not monitoring. "
+ "Use the force_start load parameter to "
+ "override.\n");
+ return -EFAULT;
+ }
+
+ /* Force monitoring */
+ data->config |= 0x01;
+ dme1737_write(client, DME1737_REG_CONFIG, data->config);
+ }
+ /* Inform if part is not ready */
+ if (!(data->config & 0x04)) {
+ dev_err(&client->dev, "Device is not ready.\n");
+ return -EFAULT;
+ }
+
+ data->config2 = dme1737_read(client, DME1737_REG_CONFIG2);
+ /* Check if optional fan3 input is enabled */
+ if (data->config2 & 0x04) {
+ data->has_fan |= (1 << 2);
+ }
+
+ /* Fan4 and pwm3 are only available if the client's I2C address
+ * is the default 0x2e. Otherwise the I/Os associated with these
+ * functions are used for addr enable/select. */
+ if (client->addr == 0x2e) {
+ data->has_fan |= (1 << 3);
+ data->has_pwm |= (1 << 2);
+ }
+
+ /* Determine if the optional fan[5-6] and/or pwm[5-6] are enabled.
+ * For this, we need to query the runtime registers through the
+ * Super-IO LPC interface. Try both config ports 0x2e and 0x4e. */
+ if (dme1737_sio_get_features(0x2e, client) &&
+ dme1737_sio_get_features(0x4e, client)) {
+ dev_warn(&client->dev, "Failed to query Super-IO for optional "
+ "features.\n");
+ }
+
+ /* Fan1, fan2, pwm1, and pwm2 are always present */
+ data->has_fan |= 0x03;
+ data->has_pwm |= 0x03;
+
+ dev_info(&client->dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, "
+ "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n",
+ (data->has_pwm & (1 << 2)) ? "yes" : "no",
+ (data->has_pwm & (1 << 4)) ? "yes" : "no",
+ (data->has_pwm & (1 << 5)) ? "yes" : "no",
+ (data->has_fan & (1 << 2)) ? "yes" : "no",
+ (data->has_fan & (1 << 3)) ? "yes" : "no",
+ (data->has_fan & (1 << 4)) ? "yes" : "no",
+ (data->has_fan & (1 << 5)) ? "yes" : "no");
+
+ reg = dme1737_read(client, DME1737_REG_TACH_PWM);
+ /* Inform if fan-to-pwm mapping differs from the default */
+ if (reg != 0xa4) {
+ dev_warn(&client->dev, "Non-standard fan to pwm mapping: "
+ "fan1->pwm%d, fan2->pwm%d, fan3->pwm%d, "
+ "fan4->pwm%d. Please report to the driver "
+ "maintainer.\n",
+ (reg & 0x03) + 1, ((reg >> 2) & 0x03) + 1,
+ ((reg >> 4) & 0x03) + 1, ((reg >> 6) & 0x03) + 1);
+ }
+
+ /* Switch pwm[1-3] to manual mode if they are currently disabled and
+ * set the duty-cycles to 0% (which is identical to the PWMs being
+ * disabled). */
+ if (!(data->config & 0x02)) {
+ for (ix = 0; ix < 3; ix++) {
+ data->pwm_config[ix] = dme1737_read(client,
+ DME1737_REG_PWM_CONFIG(ix));
+ if ((data->has_pwm & (1 << ix)) &&
+ (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) {
+ dev_info(&client->dev, "Switching pwm%d to "
+ "manual mode.\n", ix + 1);
+ data->pwm_config[ix] = PWM_EN_TO_REG(1,
+ data->pwm_config[ix]);
+ dme1737_write(client, DME1737_REG_PWM(ix), 0);
+ dme1737_write(client,
+ DME1737_REG_PWM_CONFIG(ix),
+ data->pwm_config[ix]);
+ }
+ }
+ }
+
+ /* Initialize the default PWM auto channels zone (acz) assignments */
+ data->pwm_acz[0] = 1; /* pwm1 -> zone1 */
+ data->pwm_acz[1] = 2; /* pwm2 -> zone2 */
+ data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
+
+ /* Set VRM */
+ data->vrm = vid_which_vrm();
+
+ return 0;
+}
+
+static int dme1737_detect(struct i2c_adapter *adapter, int address,
+ int kind)
+{
+ u8 company, verstep = 0;
+ struct i2c_client *client;
+ struct dme1737_data *data;
+ int ix, err = 0;
+ const char *name;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ goto exit;
+ }
+
+ if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ client = &data->client;
+ i2c_set_clientdata(client, data);
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &dme1737_driver;
+
+ /* A negative kind means that the driver was loaded with no force
+ * parameter (default), so we must identify the chip. */
+ if (kind < 0) {
+ company = dme1737_read(client, DME1737_REG_COMPANY);
+ verstep = dme1737_read(client, DME1737_REG_VERSTEP);
+
+ if (!((company == DME1737_COMPANY_SMSC) &&
+ ((verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP))) {
+ err = -ENODEV;
+ goto exit_kfree;
+ }
+ }
+
+ kind = dme1737;
+ name = "dme1737";
+
+ /* Fill in the remaining client fields and put it into the global
+ * list */
+ strlcpy(client->name, name, I2C_NAME_SIZE);
+ mutex_init(&data->update_lock);
+
+ /* Tell the I2C layer a new client has arrived */
+ if ((err = i2c_attach_client(client))) {
+ goto exit_kfree;
+ }
+
+ /* Initialize the DME1737 chip */
+ if ((err = dme1737_init_client(client))) {
+ goto exit_detach;
+ }
+
+ /* Create standard sysfs attributes */
+ if ((err = sysfs_create_group(&client->dev.kobj, &dme1737_group))) {
+ goto exit_detach;
+ }
+
+ /* Create fan sysfs attributes */
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
+ if (data->has_fan & (1 << ix)) {
+ if ((err = sysfs_create_group(&client->dev.kobj,
+ &dme1737_fan_group[ix]))) {
+ goto exit_remove;
+ }
+ }
+ }
+
+ /* Create PWM sysfs attributes */
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
+ if (data->has_pwm & (1 << ix)) {
+ if ((err = sysfs_create_group(&client->dev.kobj,
+ &dme1737_pwm_group[ix]))) {
+ goto exit_remove;
+ }
+ }
+ }
+
+ /* Inform if the device is locked. Otherwise change the permissions of
+ * selected attributes from read-only to read-writeable. */
+ if (data->config & 0x02) {
+ dev_info(&client->dev, "Device is locked. Some attributes "
+ "will be read-only.\n");
+ } else {
+ /* Change permissions of standard attributes */
+ dme1737_chmod_group(client, &dme1737_lock_group,
+ S_IRUGO | S_IWUSR);
+
+ /* Change permissions of PWM attributes */
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_lock_group); ix++) {
+ if (data->has_pwm & (1 << ix)) {
+ dme1737_chmod_group(client,
+ &dme1737_pwm_lock_group[ix],
+ S_IRUGO | S_IWUSR);
+ }
+ }
+
+ /* Change permissions of pwm[1-3] if in manual mode */
+ for (ix = 0; ix < 3; ix++) {
+ if ((data->has_pwm & (1 << ix)) &&
+ (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
+ dme1737_chmod_file(client,
+ dme1737_attr_pwm[ix],
+ S_IRUGO | S_IWUSR);
+ }
+ }
+ }
+
+ /* Register device */
+ data->class_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->class_dev)) {
+ err = PTR_ERR(data->class_dev);
+ goto exit_remove;
+ }
+
+ dev_info(&adapter->dev, "Found a DME1737 chip at 0x%02x "
+ "(rev 0x%02x)\n", client->addr, verstep);
+
+ return 0;
+
+exit_remove:
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
+ if (data->has_fan & (1 << ix)) {
+ sysfs_remove_group(&client->dev.kobj,
+ &dme1737_fan_group[ix]);
+ }
+ }
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
+ if (data->has_pwm & (1 << ix)) {
+ sysfs_remove_group(&client->dev.kobj,
+ &dme1737_pwm_group[ix]);
+ }
+ }
+ sysfs_remove_group(&client->dev.kobj, &dme1737_group);
+exit_detach:
+ i2c_detach_client(client);
+exit_kfree:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int dme1737_attach_adapter(struct i2c_adapter *adapter)
+{
+ if (!(adapter->class & I2C_CLASS_HWMON)) {
+ return 0;
+ }
+
+ return i2c_probe(adapter, &addr_data, dme1737_detect);
+}
+
+static int dme1737_detach_client(struct i2c_client *client)
+{
+ struct dme1737_data *data = i2c_get_clientdata(client);
+ int ix, err;
+
+ hwmon_device_unregister(data->class_dev);
+
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
+ if (data->has_fan & (1 << ix)) {
+ sysfs_remove_group(&client->dev.kobj,
+ &dme1737_fan_group[ix]);
+ }
+ }
+ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
+ if (data->has_pwm & (1 << ix)) {
+ sysfs_remove_group(&client->dev.kobj,
+ &dme1737_pwm_group[ix]);
+ }
+ }
+ sysfs_remove_group(&client->dev.kobj, &dme1737_group);
+
+ if ((err = i2c_detach_client(client))) {
+ return err;
+ }
+
+ kfree(data);
+ return 0;
+}
+
+static struct i2c_driver dme1737_driver = {
+ .driver = {
+ .name = "dme1737",
+ },
+ .attach_adapter = dme1737_attach_adapter,
+ .detach_client = dme1737_detach_client,
+};
+
+static int __init dme1737_init(void)
+{
+ return i2c_add_driver(&dme1737_driver);
+}
+
+static void __exit dme1737_exit(void)
+{
+ i2c_del_driver(&dme1737_driver);
+}
+
+MODULE_AUTHOR("Juerg Haefliger <juergh@gmail.com>");
+MODULE_DESCRIPTION("DME1737 sensors");
+MODULE_LICENSE("GPL");
+
+module_init(dme1737_init);
+module_exit(dme1737_exit);
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index d5ac422d73b2..1212d6b7f316 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -27,6 +27,7 @@
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
@@ -52,9 +53,11 @@ MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low")
#define DS1621_REG_CONFIG_DONE 0x80
/* The DS1621 registers */
-#define DS1621_REG_TEMP 0xAA /* word, RO */
-#define DS1621_REG_TEMP_MIN 0xA2 /* word, RW */
-#define DS1621_REG_TEMP_MAX 0xA1 /* word, RW */
+static const u8 DS1621_REG_TEMP[3] = {
+ 0xAA, /* input, word, RO */
+ 0xA2, /* min, word, RW */
+ 0xA1, /* max, word, RW */
+};
#define DS1621_REG_CONF 0xAC /* byte, RW */
#define DS1621_COM_START 0xEE /* no data */
#define DS1621_COM_STOP 0x22 /* no data */
@@ -63,10 +66,7 @@ MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low")
#define DS1621_ALARM_TEMP_HIGH 0x40
#define DS1621_ALARM_TEMP_LOW 0x20
-/* Conversions. Rounding and limit checking is only done on the TO_REG
- variants. Note that you should be a bit careful with which arguments
- these macros are called: arguments may be evaluated more than once.
- Fixing this is just not worth it. */
+/* Conversions */
#define ALARMS_FROM_REG(val) ((val) & \
(DS1621_ALARM_TEMP_HIGH | DS1621_ALARM_TEMP_LOW))
@@ -78,7 +78,7 @@ struct ds1621_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- u16 temp, temp_min, temp_max; /* Register values, word */
+ u16 temp[3]; /* Register values, word */
u8 conf; /* Register encoding, combined */
};
@@ -101,7 +101,7 @@ static struct i2c_driver ds1621_driver = {
/* All registers are word-sized, except for the configuration register.
DS1621 uses a high-byte first convention, which is exactly opposite to
- the usual practice. */
+ the SMBus standard. */
static int ds1621_read_value(struct i2c_client *client, u8 reg)
{
if (reg == DS1621_REG_CONF)
@@ -110,9 +110,6 @@ static int ds1621_read_value(struct i2c_client *client, u8 reg)
return swab16(i2c_smbus_read_word_data(client, reg));
}
-/* All registers are word-sized, except for the configuration register.
- DS1621 uses a high-byte first convention, which is exactly opposite to
- the usual practice. */
static int ds1621_write_value(struct i2c_client *client, u8 reg, u16 value)
{
if (reg == DS1621_REG_CONF)
@@ -139,50 +136,61 @@ static void ds1621_init_client(struct i2c_client *client)
i2c_smbus_write_byte(client, DS1621_COM_START);
}
-#define show(value) \
-static ssize_t show_##value(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- struct ds1621_data *data = ds1621_update_client(dev); \
- return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->value)); \
+static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ds1621_data *data = ds1621_update_client(dev);
+ return sprintf(buf, "%d\n",
+ LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
-show(temp);
-show(temp_min);
-show(temp_max);
-
-#define set_temp(suffix, value, reg) \
-static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
-{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct ds1621_data *data = ds1621_update_client(dev); \
- u16 val = LM75_TEMP_TO_REG(simple_strtoul(buf, NULL, 10)); \
- \
- mutex_lock(&data->update_lock); \
- data->value = val; \
- ds1621_write_value(client, reg, data->value); \
- mutex_unlock(&data->update_lock); \
- return count; \
-}
+static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ds1621_data *data = ds1621_update_client(dev);
+ u16 val = LM75_TEMP_TO_REG(simple_strtoul(buf, NULL, 10));
-set_temp(min, temp_min, DS1621_REG_TEMP_MIN);
-set_temp(max, temp_max, DS1621_REG_TEMP_MAX);
+ mutex_lock(&data->update_lock);
+ data->temp[attr->index] = val;
+ ds1621_write_value(client, DS1621_REG_TEMP[attr->index],
+ data->temp[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarms(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct ds1621_data *data = ds1621_update_client(dev);
return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->conf));
}
+static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ds1621_data *data = ds1621_update_client(dev);
+ return sprintf(buf, "%d\n", !!(data->conf & attr->index));
+}
+
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
-static DEVICE_ATTR(temp1_input, S_IRUGO , show_temp, NULL);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO , show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
+ DS1621_ALARM_TEMP_LOW);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
+ DS1621_ALARM_TEMP_HIGH);
static struct attribute *ds1621_attributes[] = {
- &dev_attr_temp1_input.attr,
- &dev_attr_temp1_min.attr,
- &dev_attr_temp1_max.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
NULL
};
@@ -204,9 +212,9 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address,
int kind)
{
int conf, temp;
- struct i2c_client *new_client;
+ struct i2c_client *client;
struct ds1621_data *data;
- int err = 0;
+ int i, err = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
| I2C_FUNC_SMBUS_WORD_DATA
@@ -221,55 +229,44 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address,
goto exit;
}
- new_client = &data->client;
- i2c_set_clientdata(new_client, data);
- new_client->addr = address;
- new_client->adapter = adapter;
- new_client->driver = &ds1621_driver;
- new_client->flags = 0;
-
+ client = &data->client;
+ i2c_set_clientdata(client, data);
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &ds1621_driver;
/* Now, we do the remaining detection. It is lousy. */
if (kind < 0) {
/* The NVB bit should be low if no EEPROM write has been
requested during the latest 10ms, which is highly
improbable in our case. */
- conf = ds1621_read_value(new_client, DS1621_REG_CONF);
+ conf = ds1621_read_value(client, DS1621_REG_CONF);
if (conf & DS1621_REG_CONFIG_NVB)
goto exit_free;
/* The 7 lowest bits of a temperature should always be 0. */
- temp = ds1621_read_value(new_client, DS1621_REG_TEMP);
- if (temp & 0x007f)
- goto exit_free;
- temp = ds1621_read_value(new_client, DS1621_REG_TEMP_MIN);
- if (temp & 0x007f)
- goto exit_free;
- temp = ds1621_read_value(new_client, DS1621_REG_TEMP_MAX);
- if (temp & 0x007f)
- goto exit_free;
+ for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
+ temp = ds1621_read_value(client, DS1621_REG_TEMP[i]);
+ if (temp & 0x007f)
+ goto exit_free;
+ }
}
- /* Determine the chip type - only one kind supported! */
- if (kind <= 0)
- kind = ds1621;
-
/* Fill in remaining client fields and put it into the global list */
- strlcpy(new_client->name, "ds1621", I2C_NAME_SIZE);
- data->valid = 0;
+ strlcpy(client->name, "ds1621", I2C_NAME_SIZE);
mutex_init(&data->update_lock);
/* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(new_client)))
+ if ((err = i2c_attach_client(client)))
goto exit_free;
/* Initialize the DS1621 chip */
- ds1621_init_client(new_client);
+ ds1621_init_client(client);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &ds1621_group)))
+ if ((err = sysfs_create_group(&client->dev.kobj, &ds1621_group)))
goto exit_detach;
- data->class_dev = hwmon_device_register(&new_client->dev);
+ data->class_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto exit_remove_files;
@@ -278,9 +275,9 @@ static int ds1621_detect(struct i2c_adapter *adapter, int address,
return 0;
exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &ds1621_group);
+ sysfs_remove_group(&client->dev.kobj, &ds1621_group);
exit_detach:
- i2c_detach_client(new_client);
+ i2c_detach_client(client);
exit_free:
kfree(data);
exit:
@@ -314,23 +311,21 @@ static struct ds1621_data *ds1621_update_client(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
+ int i;
dev_dbg(&client->dev, "Starting ds1621 update\n");
data->conf = ds1621_read_value(client, DS1621_REG_CONF);
- data->temp = ds1621_read_value(client, DS1621_REG_TEMP);
-
- data->temp_min = ds1621_read_value(client,
- DS1621_REG_TEMP_MIN);
- data->temp_max = ds1621_read_value(client,
- DS1621_REG_TEMP_MAX);
+ for (i = 0; i < ARRAY_SIZE(data->temp); i++)
+ data->temp[i] = ds1621_read_value(client,
+ DS1621_REG_TEMP[i]);
/* reset alarms if necessary */
new_conf = data->conf;
- if (data->temp > data->temp_min)
+ if (data->temp[0] > data->temp[1]) /* input > min */
new_conf &= ~DS1621_ALARM_TEMP_LOW;
- if (data->temp < data->temp_max)
+ if (data->temp[0] < data->temp[2]) /* input < max */
new_conf &= ~DS1621_ALARM_TEMP_HIGH;
if (data->conf != new_conf)
ds1621_write_value(client, DS1621_REG_CONF,
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index cdbe309b8fc4..6f60715f34f8 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -127,6 +127,13 @@ superio_exit(int base)
#define F71805F_REG_TEMP_HIGH(nr) (0x54 + 2 * (nr))
#define F71805F_REG_TEMP_HYST(nr) (0x55 + 2 * (nr))
#define F71805F_REG_TEMP_MODE 0x01
+/* pwm/fan pwmnr from 0 to 2, auto point apnr from 0 to 2 */
+/* map Fintek numbers to our numbers as follows: 9->0, 5->1, 1->2 */
+#define F71805F_REG_PWM_AUTO_POINT_TEMP(pwmnr, apnr) \
+ (0xA0 + 0x10 * (pwmnr) + (2 - (apnr)))
+#define F71805F_REG_PWM_AUTO_POINT_FAN(pwmnr, apnr) \
+ (0xA4 + 0x10 * (pwmnr) + \
+ 2 * (2 - (apnr)))
#define F71805F_REG_START 0x00
/* status nr from 0 to 2 */
@@ -144,6 +151,11 @@ superio_exit(int base)
* Data structures and manipulation thereof
*/
+struct f71805f_auto_point {
+ u8 temp[3];
+ u16 fan[3];
+};
+
struct f71805f_data {
unsigned short addr;
const char *name;
@@ -170,6 +182,7 @@ struct f71805f_data {
u8 temp_hyst[3];
u8 temp_mode;
unsigned long alarms;
+ struct f71805f_auto_point auto_points[3];
};
struct f71805f_sio_data {
@@ -312,7 +325,7 @@ static void f71805f_write16(struct f71805f_data *data, u8 reg, u16 val)
static struct f71805f_data *f71805f_update_device(struct device *dev)
{
struct f71805f_data *data = dev_get_drvdata(dev);
- int nr;
+ int nr, apnr;
mutex_lock(&data->update_lock);
@@ -342,6 +355,18 @@ static struct f71805f_data *f71805f_update_device(struct device *dev)
F71805F_REG_TEMP_HYST(nr));
}
data->temp_mode = f71805f_read8(data, F71805F_REG_TEMP_MODE);
+ for (nr = 0; nr < 3; nr++) {
+ for (apnr = 0; apnr < 3; apnr++) {
+ data->auto_points[nr].temp[apnr] =
+ f71805f_read8(data,
+ F71805F_REG_PWM_AUTO_POINT_TEMP(nr,
+ apnr));
+ data->auto_points[nr].fan[apnr] =
+ f71805f_read16(data,
+ F71805F_REG_PWM_AUTO_POINT_FAN(nr,
+ apnr));
+ }
+ }
data->last_limits = jiffies;
}
@@ -705,6 +730,70 @@ static ssize_t set_pwm_freq(struct device *dev, struct device_attribute
return count;
}
+static ssize_t show_pwm_auto_point_temp(struct device *dev,
+ struct device_attribute *devattr,
+ char* buf)
+{
+ struct f71805f_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ int pwmnr = attr->nr;
+ int apnr = attr->index;
+
+ return sprintf(buf, "%ld\n",
+ temp_from_reg(data->auto_points[pwmnr].temp[apnr]));
+}
+
+static ssize_t set_pwm_auto_point_temp(struct device *dev,
+ struct device_attribute *devattr,
+ const char* buf, size_t count)
+{
+ struct f71805f_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ int pwmnr = attr->nr;
+ int apnr = attr->index;
+ unsigned long val = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->auto_points[pwmnr].temp[apnr] = temp_to_reg(val);
+ f71805f_write8(data, F71805F_REG_PWM_AUTO_POINT_TEMP(pwmnr, apnr),
+ data->auto_points[pwmnr].temp[apnr]);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_fan(struct device *dev,
+ struct device_attribute *devattr,
+ char* buf)
+{
+ struct f71805f_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ int pwmnr = attr->nr;
+ int apnr = attr->index;
+
+ return sprintf(buf, "%ld\n",
+ fan_from_reg(data->auto_points[pwmnr].fan[apnr]));
+}
+
+static ssize_t set_pwm_auto_point_fan(struct device *dev,
+ struct device_attribute *devattr,
+ const char* buf, size_t count)
+{
+ struct f71805f_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
+ int pwmnr = attr->nr;
+ int apnr = attr->index;
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->auto_points[pwmnr].fan[apnr] = fan_to_reg(val);
+ f71805f_write16(data, F71805F_REG_PWM_AUTO_POINT_FAN(pwmnr, apnr),
+ data->auto_points[pwmnr].fan[apnr]);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
@@ -932,6 +1021,63 @@ static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO | S_IWUSR,
show_pwm_freq, set_pwm_freq, 2);
static SENSOR_DEVICE_ATTR(pwm3_mode, S_IRUGO, show_pwm_mode, NULL, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 0, 1);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 0, 2);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 0, 2);
+
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 1, 1);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 1, 2);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_temp, set_pwm_auto_point_temp,
+ 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_fan, S_IRUGO | S_IWUSR,
+ show_pwm_auto_point_fan, set_pwm_auto_point_fan,
+ 2, 2);
+
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
@@ -1014,6 +1160,25 @@ static struct attribute *f71805f_attributes[] = {
&sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp3_type.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point1_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_fan.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_fan.dev_attr.attr,
+
&sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
@@ -1242,12 +1407,12 @@ static int __devexit f71805f_remove(struct platform_device *pdev)
struct resource *res;
int i;
- platform_set_drvdata(pdev, NULL);
hwmon_device_unregister(data->class_dev);
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group);
for (i = 0; i < 4; i++)
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1290,15 +1455,12 @@ static int __init f71805f_device_add(unsigned short address,
goto exit_device_put;
}
- pdev->dev.platform_data = kmalloc(sizeof(struct f71805f_sio_data),
- GFP_KERNEL);
- if (!pdev->dev.platform_data) {
- err = -ENOMEM;
+ err = platform_device_add_data(pdev, sio_data,
+ sizeof(struct f71805f_sio_data));
+ if (err) {
printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
goto exit_device_put;
}
- memcpy(pdev->dev.platform_data, sio_data,
- sizeof(struct f71805f_sio_data));
err = platform_device_add(pdev);
if (err) {
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 62afc63708a5..eff6036e15c0 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -6,6 +6,7 @@
IT8712F Super I/O chip w/LPC interface
IT8716F Super I/O chip w/LPC interface
IT8718F Super I/O chip w/LPC interface
+ IT8726F Super I/O chip w/LPC interface
Sis950 A clone of the IT8705F
Copyright (C) 2001 Chris Gauthron <chrisg@0-in.com>
@@ -30,8 +31,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
@@ -40,10 +40,12 @@
#include <linux/sysfs.h>
#include <asm/io.h>
+#define DRVNAME "it87"
-static unsigned short isa_address;
enum chips { it87, it8712, it8716, it8718 };
+static struct platform_device *pdev;
+
#define REG 0x2e /* The register to read/write */
#define DEV 0x07 /* Register: Logical device select */
#define VAL 0x2f /* The value to read/write */
@@ -97,6 +99,7 @@ superio_exit(void)
#define IT8705F_DEVID 0x8705
#define IT8716F_DEVID 0x8716
#define IT8718F_DEVID 0x8718
+#define IT8726F_DEVID 0x8726
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -110,10 +113,6 @@ static int update_vbat;
/* Not all BIOSes properly configure the PWM registers */
static int fix_pwm_polarity;
-/* Values read from Super-I/O config space */
-static u16 chip_type;
-static u8 vid_value;
-
/* Many IT87 constants specified below */
/* Length of ISA address segment */
@@ -214,13 +213,20 @@ static const unsigned int pwm_freq[8] = {
};
+struct it87_sio_data {
+ enum chips type;
+ /* Values read from Super-I/O config space */
+ u8 vid_value;
+};
+
/* For each registered chip, we need to keep some data in memory.
The structure is dynamically allocated. */
struct it87_data {
- struct i2c_client client;
struct class_device *class_dev;
enum chips type;
+ unsigned short addr;
+ const char *name;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -245,26 +251,25 @@ struct it87_data {
};
-static int it87_detect(struct i2c_adapter *adapter);
-static int it87_detach_client(struct i2c_client *client);
+static int it87_probe(struct platform_device *pdev);
+static int it87_remove(struct platform_device *pdev);
-static int it87_read_value(struct i2c_client *client, u8 reg);
-static void it87_write_value(struct i2c_client *client, u8 reg, u8 value);
+static int it87_read_value(struct it87_data *data, u8 reg);
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value);
static struct it87_data *it87_update_device(struct device *dev);
-static int it87_check_pwm(struct i2c_client *client);
-static void it87_init_client(struct i2c_client *client, struct it87_data *data);
+static int it87_check_pwm(struct device *dev);
+static void it87_init_device(struct platform_device *pdev);
-static struct i2c_driver it87_isa_driver = {
+static struct platform_driver it87_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "it87-isa",
+ .name = DRVNAME,
},
- .attach_adapter = it87_detect,
- .detach_client = it87_detach_client,
+ .probe = it87_probe,
+ .remove = __devexit_p(it87_remove),
};
-
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -301,13 +306,12 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_min[nr] = IN_TO_REG(val);
- it87_write_value(client, IT87_REG_VIN_MIN(nr),
+ it87_write_value(data, IT87_REG_VIN_MIN(nr),
data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -318,13 +322,12 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_max[nr] = IN_TO_REG(val);
- it87_write_value(client, IT87_REG_VIN_MAX(nr),
+ it87_write_value(data, IT87_REG_VIN_MAX(nr),
data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -392,13 +395,12 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_high[nr] = TEMP_TO_REG(val);
- it87_write_value(client, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
+ it87_write_value(data, IT87_REG_TEMP_HIGH(nr), data->temp_high[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -408,13 +410,12 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_low[nr] = TEMP_TO_REG(val);
- it87_write_value(client, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
+ it87_write_value(data, IT87_REG_TEMP_LOW(nr), data->temp_low[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -451,8 +452,7 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -468,7 +468,7 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return -EINVAL;
}
- it87_write_value(client, IT87_REG_TEMP_ENABLE, data->sensor);
+ it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
mutex_unlock(&data->update_lock);
return count;
}
@@ -542,13 +542,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
u8 reg;
mutex_lock(&data->update_lock);
- reg = it87_read_value(client, IT87_REG_FAN_DIV);
+ reg = it87_read_value(data, IT87_REG_FAN_DIV);
switch (nr) {
case 0: data->fan_div[nr] = reg & 0x07; break;
case 1: data->fan_div[nr] = (reg >> 3) & 0x07; break;
@@ -556,7 +555,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
}
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(client, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
+ it87_write_value(data, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -566,14 +565,13 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
int min;
u8 old;
mutex_lock(&data->update_lock);
- old = it87_read_value(client, IT87_REG_FAN_DIV);
+ old = it87_read_value(data, IT87_REG_FAN_DIV);
/* Save fan min limit */
min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
@@ -594,11 +592,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
val |= (data->fan_div[1] & 0x07) << 3;
if (data->fan_div[2] == 3)
val |= 0x1 << 6;
- it87_write_value(client, IT87_REG_FAN_DIV, val);
+ it87_write_value(data, IT87_REG_FAN_DIV, val);
/* Restore fan min limit */
data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
- it87_write_value(client, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
+ it87_write_value(data, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -609,8 +607,7 @@ static ssize_t set_pwm_enable(struct device *dev,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -618,17 +615,17 @@ static ssize_t set_pwm_enable(struct device *dev,
if (val == 0) {
int tmp;
/* make sure the fan is on when in on/off mode */
- tmp = it87_read_value(client, IT87_REG_FAN_CTL);
- it87_write_value(client, IT87_REG_FAN_CTL, tmp | (1 << nr));
+ tmp = it87_read_value(data, IT87_REG_FAN_CTL);
+ it87_write_value(data, IT87_REG_FAN_CTL, tmp | (1 << nr));
/* set on/off mode */
data->fan_main_ctrl &= ~(1 << nr);
- it87_write_value(client, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
} else if (val == 1) {
/* set SmartGuardian mode */
data->fan_main_ctrl |= (1 << nr);
- it87_write_value(client, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
/* set saved pwm value, clear FAN_CTLX PWM mode bit */
- it87_write_value(client, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
+ it87_write_value(data, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
} else {
mutex_unlock(&data->update_lock);
return -EINVAL;
@@ -643,8 +640,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
if (val < 0 || val > 255)
@@ -653,15 +649,14 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->manual_pwm_ctl[nr] = val;
if (data->fan_main_ctrl & (1 << nr))
- it87_write_value(client, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
+ it87_write_value(data, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_pwm_freq(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
int i;
@@ -672,9 +667,9 @@ static ssize_t set_pwm_freq(struct device *dev,
}
mutex_lock(&data->update_lock);
- data->fan_ctl = it87_read_value(client, IT87_REG_FAN_CTL) & 0x8f;
+ data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
data->fan_ctl |= i << 4;
- it87_write_value(client, IT87_REG_FAN_CTL, data->fan_ctl);
+ it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
mutex_unlock(&data->update_lock);
return count;
@@ -729,15 +724,14 @@ static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN16_TO_REG(val);
- it87_write_value(client, IT87_REG_FAN_MIN(nr),
+ it87_write_value(data, IT87_REG_FAN_MIN(nr),
data->fan_min[nr] & 0xff);
- it87_write_value(client, IT87_REG_FANX_MIN(nr),
+ it87_write_value(data, IT87_REG_FANX_MIN(nr),
data->fan_min[nr] >> 8);
mutex_unlock(&data->update_lock);
return count;
@@ -775,8 +769,7 @@ show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t
store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
u32 val;
val = simple_strtoul(buf, NULL, 10);
@@ -794,6 +787,14 @@ show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
static struct attribute *it87_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
@@ -835,6 +836,7 @@ static struct attribute *it87_attributes[] = {
&sensor_dev_attr_temp3_type.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_name.attr,
NULL
};
@@ -877,17 +879,36 @@ static const struct attribute_group it87_group_opt = {
};
/* SuperIO detection - will change isa_address if a chip is found */
-static int __init it87_find(unsigned short *address)
+static int __init it87_find(unsigned short *address,
+ struct it87_sio_data *sio_data)
{
int err = -ENODEV;
+ u16 chip_type;
superio_enter();
chip_type = superio_inw(DEVID);
- if (chip_type != IT8712F_DEVID
- && chip_type != IT8716F_DEVID
- && chip_type != IT8718F_DEVID
- && chip_type != IT8705F_DEVID)
- goto exit;
+
+ switch (chip_type) {
+ case IT8705F_DEVID:
+ sio_data->type = it87;
+ break;
+ case IT8712F_DEVID:
+ sio_data->type = it8712;
+ break;
+ case IT8716F_DEVID:
+ case IT8726F_DEVID:
+ sio_data->type = it8716;
+ break;
+ case IT8718F_DEVID:
+ sio_data->type = it8718;
+ break;
+ case 0xffff: /* No device at all */
+ goto exit;
+ default:
+ pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n",
+ chip_type);
+ goto exit;
+ }
superio_select(PME);
if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
@@ -911,7 +932,7 @@ static int __init it87_find(unsigned short *address)
superio_select(GPIO);
if (chip_type == it8718)
- vid_value = superio_inb(IT87_SIO_VID_REG);
+ sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
reg = superio_inb(IT87_SIO_PINX2_REG);
if (reg & (1 << 0))
@@ -925,18 +946,26 @@ exit:
return err;
}
-/* This function is called by i2c_probe */
-static int it87_detect(struct i2c_adapter *adapter)
+static int __devinit it87_probe(struct platform_device *pdev)
{
- struct i2c_client *new_client;
struct it87_data *data;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct it87_sio_data *sio_data = dev->platform_data;
int err = 0;
- const char *name;
int enable_pwm_interface;
-
- /* Reserve the ISA region */
- if (!request_region(isa_address, IT87_EXTENT,
- it87_isa_driver.driver.name)){
+ static const char *names[] = {
+ "it87",
+ "it8712",
+ "it8716",
+ "it8718",
+ };
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!request_region(res->start, IT87_EXTENT, DRVNAME)) {
+ dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+ (unsigned long)res->start,
+ (unsigned long)(res->start + IT87_EXTENT - 1));
err = -EBUSY;
goto ERROR0;
}
@@ -946,129 +975,104 @@ static int it87_detect(struct i2c_adapter *adapter)
goto ERROR1;
}
- new_client = &data->client;
- i2c_set_clientdata(new_client, data);
- new_client->addr = isa_address;
- new_client->adapter = adapter;
- new_client->driver = &it87_isa_driver;
+ data->addr = res->start;
+ data->type = sio_data->type;
+ data->name = names[sio_data->type];
/* Now, we do the remaining detection. */
- if ((it87_read_value(new_client, IT87_REG_CONFIG) & 0x80)
- || it87_read_value(new_client, IT87_REG_CHIPID) != 0x90) {
+ if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
+ || it87_read_value(data, IT87_REG_CHIPID) != 0x90) {
err = -ENODEV;
goto ERROR2;
}
- /* Determine the chip type. */
- switch (chip_type) {
- case IT8712F_DEVID:
- data->type = it8712;
- name = "it8712";
- break;
- case IT8716F_DEVID:
- data->type = it8716;
- name = "it8716";
- break;
- case IT8718F_DEVID:
- data->type = it8718;
- name = "it8718";
- break;
- default:
- data->type = it87;
- name = "it87";
- }
+ platform_set_drvdata(pdev, data);
- /* Fill in the remaining client fields and put it into the global list */
- strlcpy(new_client->name, name, I2C_NAME_SIZE);
mutex_init(&data->update_lock);
- /* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(new_client)))
- goto ERROR2;
-
/* Check PWM configuration */
- enable_pwm_interface = it87_check_pwm(new_client);
+ enable_pwm_interface = it87_check_pwm(dev);
/* Initialize the IT87 chip */
- it87_init_client(new_client, data);
+ it87_init_device(pdev);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &it87_group)))
- goto ERROR3;
+ if ((err = sysfs_create_group(&dev->kobj, &it87_group)))
+ goto ERROR2;
/* Do not create fan files for disabled fans */
if (data->type == it8716 || data->type == it8718) {
/* 16-bit tachometers */
if (data->has_fan & (1 << 0)) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_fan1_input16.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan1_min16.dev_attr)))
goto ERROR4;
}
if (data->has_fan & (1 << 1)) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_fan2_input16.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan2_min16.dev_attr)))
goto ERROR4;
}
if (data->has_fan & (1 << 2)) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_fan3_input16.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan3_min16.dev_attr)))
goto ERROR4;
}
} else {
/* 8-bit tachometers with clock divider */
if (data->has_fan & (1 << 0)) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_fan1_input.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan1_min.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan1_div.dev_attr)))
goto ERROR4;
}
if (data->has_fan & (1 << 1)) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_fan2_input.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan2_min.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan2_div.dev_attr)))
goto ERROR4;
}
if (data->has_fan & (1 << 2)) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_fan3_input.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan3_min.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_fan3_div.dev_attr)))
goto ERROR4;
}
}
if (enable_pwm_interface) {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(dev,
&sensor_dev_attr_pwm1_enable.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_pwm2_enable.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_pwm3_enable.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_pwm1.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_pwm2.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&sensor_dev_attr_pwm3.dev_attr))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&dev_attr_pwm1_freq))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&dev_attr_pwm2_freq))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&dev_attr_pwm3_freq)))
goto ERROR4;
}
@@ -1077,15 +1081,15 @@ static int it87_detect(struct i2c_adapter *adapter)
|| data->type == it8718) {
data->vrm = vid_which_vrm();
/* VID reading from Super-I/O config space if available */
- data->vid = vid_value;
- if ((err = device_create_file(&new_client->dev,
+ data->vid = sio_data->vid_value;
+ if ((err = device_create_file(dev,
&dev_attr_vrm))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(dev,
&dev_attr_cpu0_vid)))
goto ERROR4;
}
- data->class_dev = hwmon_device_register(&new_client->dev);
+ data->class_dev = hwmon_device_register(dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto ERROR4;
@@ -1094,31 +1098,27 @@ static int it87_detect(struct i2c_adapter *adapter)
return 0;
ERROR4:
- sysfs_remove_group(&new_client->dev.kobj, &it87_group);
- sysfs_remove_group(&new_client->dev.kobj, &it87_group_opt);
-ERROR3:
- i2c_detach_client(new_client);
+ sysfs_remove_group(&dev->kobj, &it87_group);
+ sysfs_remove_group(&dev->kobj, &it87_group_opt);
ERROR2:
+ platform_set_drvdata(pdev, NULL);
kfree(data);
ERROR1:
- release_region(isa_address, IT87_EXTENT);
+ release_region(res->start, IT87_EXTENT);
ERROR0:
return err;
}
-static int it87_detach_client(struct i2c_client *client)
+static int __devexit it87_remove(struct platform_device *pdev)
{
- struct it87_data *data = i2c_get_clientdata(client);
- int err;
+ struct it87_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->class_dev);
- sysfs_remove_group(&client->dev.kobj, &it87_group);
- sysfs_remove_group(&client->dev.kobj, &it87_group_opt);
+ sysfs_remove_group(&pdev->dev.kobj, &it87_group);
+ sysfs_remove_group(&pdev->dev.kobj, &it87_group_opt);
- if ((err = i2c_detach_client(client)))
- return err;
-
- release_region(client->addr, IT87_EXTENT);
+ release_region(data->addr, IT87_EXTENT);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
@@ -1127,28 +1127,29 @@ static int it87_detach_client(struct i2c_client *client)
/* Must be called with data->update_lock held, except during initialization.
We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
would slow down the IT87 access and should not be necessary. */
-static int it87_read_value(struct i2c_client *client, u8 reg)
+static int it87_read_value(struct it87_data *data, u8 reg)
{
- outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET);
- return inb_p(client->addr + IT87_DATA_REG_OFFSET);
+ outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+ return inb_p(data->addr + IT87_DATA_REG_OFFSET);
}
/* Must be called with data->update_lock held, except during initialization.
We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
would slow down the IT87 access and should not be necessary. */
-static void it87_write_value(struct i2c_client *client, u8 reg, u8 value)
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
{
- outb_p(reg, client->addr + IT87_ADDR_REG_OFFSET);
- outb_p(value, client->addr + IT87_DATA_REG_OFFSET);
+ outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+ outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
}
/* Return 1 if and only if the PWM interface is safe to use */
-static int it87_check_pwm(struct i2c_client *client)
+static int __devinit it87_check_pwm(struct device *dev)
{
+ struct it87_data *data = dev_get_drvdata(dev);
/* Some BIOSes fail to correctly configure the IT87 fans. All fans off
* and polarity set to active low is sign that this is the case so we
* disable pwm control to protect the user. */
- int tmp = it87_read_value(client, IT87_REG_FAN_CTL);
+ int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
if ((tmp & 0x87) == 0) {
if (fix_pwm_polarity) {
/* The user asks us to attempt a chip reconfiguration.
@@ -1158,7 +1159,7 @@ static int it87_check_pwm(struct i2c_client *client)
u8 pwm[3];
for (i = 0; i < 3; i++)
- pwm[i] = it87_read_value(client,
+ pwm[i] = it87_read_value(data,
IT87_REG_PWM(i));
/* If any fan is in automatic pwm mode, the polarity
@@ -1166,26 +1167,26 @@ static int it87_check_pwm(struct i2c_client *client)
* better don't change anything (but still disable the
* PWM interface). */
if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
- dev_info(&client->dev, "Reconfiguring PWM to "
+ dev_info(dev, "Reconfiguring PWM to "
"active high polarity\n");
- it87_write_value(client, IT87_REG_FAN_CTL,
+ it87_write_value(data, IT87_REG_FAN_CTL,
tmp | 0x87);
for (i = 0; i < 3; i++)
- it87_write_value(client,
+ it87_write_value(data,
IT87_REG_PWM(i),
0x7f & ~pwm[i]);
return 1;
}
- dev_info(&client->dev, "PWM configuration is "
+ dev_info(dev, "PWM configuration is "
"too broken to be fixed\n");
}
- dev_info(&client->dev, "Detected broken BIOS "
+ dev_info(dev, "Detected broken BIOS "
"defaults, disabling PWM interface\n");
return 0;
} else if (fix_pwm_polarity) {
- dev_info(&client->dev, "PWM configuration looks "
+ dev_info(dev, "PWM configuration looks "
"sane, won't touch\n");
}
@@ -1193,8 +1194,9 @@ static int it87_check_pwm(struct i2c_client *client)
}
/* Called when we have found a new IT87. */
-static void it87_init_client(struct i2c_client *client, struct it87_data *data)
+static void __devinit it87_init_device(struct platform_device *pdev)
{
+ struct it87_data *data = platform_get_drvdata(pdev);
int tmp, i;
/* initialize to sane defaults:
@@ -1214,48 +1216,48 @@ static void it87_init_client(struct i2c_client *client, struct it87_data *data)
* means -1 degree C, which surprisingly doesn't trigger an alarm,
* but is still confusing, so change to 127 degrees C. */
for (i = 0; i < 8; i++) {
- tmp = it87_read_value(client, IT87_REG_VIN_MIN(i));
+ tmp = it87_read_value(data, IT87_REG_VIN_MIN(i));
if (tmp == 0xff)
- it87_write_value(client, IT87_REG_VIN_MIN(i), 0);
+ it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
}
for (i = 0; i < 3; i++) {
- tmp = it87_read_value(client, IT87_REG_TEMP_HIGH(i));
+ tmp = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
if (tmp == 0xff)
- it87_write_value(client, IT87_REG_TEMP_HIGH(i), 127);
+ it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
}
/* Check if temperature channnels are reset manually or by some reason */
- tmp = it87_read_value(client, IT87_REG_TEMP_ENABLE);
+ tmp = it87_read_value(data, IT87_REG_TEMP_ENABLE);
if ((tmp & 0x3f) == 0) {
/* Temp1,Temp3=thermistor; Temp2=thermal diode */
tmp = (tmp & 0xc0) | 0x2a;
- it87_write_value(client, IT87_REG_TEMP_ENABLE, tmp);
+ it87_write_value(data, IT87_REG_TEMP_ENABLE, tmp);
}
data->sensor = tmp;
/* Check if voltage monitors are reset manually or by some reason */
- tmp = it87_read_value(client, IT87_REG_VIN_ENABLE);
+ tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
if ((tmp & 0xff) == 0) {
/* Enable all voltage monitors */
- it87_write_value(client, IT87_REG_VIN_ENABLE, 0xff);
+ it87_write_value(data, IT87_REG_VIN_ENABLE, 0xff);
}
/* Check if tachometers are reset manually or by some reason */
- data->fan_main_ctrl = it87_read_value(client, IT87_REG_FAN_MAIN_CTRL);
+ data->fan_main_ctrl = it87_read_value(data, IT87_REG_FAN_MAIN_CTRL);
if ((data->fan_main_ctrl & 0x70) == 0) {
/* Enable all fan tachometers */
data->fan_main_ctrl |= 0x70;
- it87_write_value(client, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
}
data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
/* Set tachometers to 16-bit mode if needed */
if (data->type == it8716 || data->type == it8718) {
- tmp = it87_read_value(client, IT87_REG_FAN_16BIT);
+ tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
if (~tmp & 0x07 & data->has_fan) {
- dev_dbg(&client->dev,
+ dev_dbg(&pdev->dev,
"Setting fan1-3 to 16-bit mode\n");
- it87_write_value(client, IT87_REG_FAN_16BIT,
+ it87_write_value(data, IT87_REG_FAN_16BIT,
tmp | 0x07);
}
}
@@ -1265,7 +1267,7 @@ static void it87_init_client(struct i2c_client *client, struct it87_data *data)
for (i = 0; i < 3; i++) {
if (data->fan_main_ctrl & (1 << i)) {
/* pwm mode */
- tmp = it87_read_value(client, IT87_REG_PWM(i));
+ tmp = it87_read_value(data, IT87_REG_PWM(i));
if (tmp & 0x80) {
/* automatic pwm - not yet implemented, but
* leave the settings made by the BIOS alone
@@ -1279,15 +1281,14 @@ static void it87_init_client(struct i2c_client *client, struct it87_data *data)
}
/* Start monitoring */
- it87_write_value(client, IT87_REG_CONFIG,
- (it87_read_value(client, IT87_REG_CONFIG) & 0x36)
+ it87_write_value(data, IT87_REG_CONFIG,
+ (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
| (update_vbat ? 0x41 : 0x01));
}
static struct it87_data *it87_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct it87_data *data = i2c_get_clientdata(client);
+ struct it87_data *data = dev_get_drvdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -1298,20 +1299,20 @@ static struct it87_data *it87_update_device(struct device *dev)
if (update_vbat) {
/* Cleared after each update, so reenable. Value
returned by this read will be previous value */
- it87_write_value(client, IT87_REG_CONFIG,
- it87_read_value(client, IT87_REG_CONFIG) | 0x40);
+ it87_write_value(data, IT87_REG_CONFIG,
+ it87_read_value(data, IT87_REG_CONFIG) | 0x40);
}
for (i = 0; i <= 7; i++) {
data->in[i] =
- it87_read_value(client, IT87_REG_VIN(i));
+ it87_read_value(data, IT87_REG_VIN(i));
data->in_min[i] =
- it87_read_value(client, IT87_REG_VIN_MIN(i));
+ it87_read_value(data, IT87_REG_VIN_MIN(i));
data->in_max[i] =
- it87_read_value(client, IT87_REG_VIN_MAX(i));
+ it87_read_value(data, IT87_REG_VIN_MAX(i));
}
/* in8 (battery) has no limit registers */
data->in[8] =
- it87_read_value(client, IT87_REG_VIN(8));
+ it87_read_value(data, IT87_REG_VIN(8));
for (i = 0; i < 3; i++) {
/* Skip disabled fans */
@@ -1319,46 +1320,47 @@ static struct it87_data *it87_update_device(struct device *dev)
continue;
data->fan_min[i] =
- it87_read_value(client, IT87_REG_FAN_MIN(i));
- data->fan[i] = it87_read_value(client,
+ it87_read_value(data, IT87_REG_FAN_MIN(i));
+ data->fan[i] = it87_read_value(data,
IT87_REG_FAN(i));
/* Add high byte if in 16-bit mode */
if (data->type == it8716 || data->type == it8718) {
- data->fan[i] |= it87_read_value(client,
+ data->fan[i] |= it87_read_value(data,
IT87_REG_FANX(i)) << 8;
- data->fan_min[i] |= it87_read_value(client,
+ data->fan_min[i] |= it87_read_value(data,
IT87_REG_FANX_MIN(i)) << 8;
}
}
for (i = 0; i < 3; i++) {
data->temp[i] =
- it87_read_value(client, IT87_REG_TEMP(i));
+ it87_read_value(data, IT87_REG_TEMP(i));
data->temp_high[i] =
- it87_read_value(client, IT87_REG_TEMP_HIGH(i));
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
data->temp_low[i] =
- it87_read_value(client, IT87_REG_TEMP_LOW(i));
+ it87_read_value(data, IT87_REG_TEMP_LOW(i));
}
/* Newer chips don't have clock dividers */
if ((data->has_fan & 0x07) && data->type != it8716
&& data->type != it8718) {
- i = it87_read_value(client, IT87_REG_FAN_DIV);
+ i = it87_read_value(data, IT87_REG_FAN_DIV);
data->fan_div[0] = i & 0x07;
data->fan_div[1] = (i >> 3) & 0x07;
data->fan_div[2] = (i & 0x40) ? 3 : 1;
}
data->alarms =
- it87_read_value(client, IT87_REG_ALARM1) |
- (it87_read_value(client, IT87_REG_ALARM2) << 8) |
- (it87_read_value(client, IT87_REG_ALARM3) << 16);
- data->fan_main_ctrl = it87_read_value(client, IT87_REG_FAN_MAIN_CTRL);
- data->fan_ctl = it87_read_value(client, IT87_REG_FAN_CTL);
-
- data->sensor = it87_read_value(client, IT87_REG_TEMP_ENABLE);
+ it87_read_value(data, IT87_REG_ALARM1) |
+ (it87_read_value(data, IT87_REG_ALARM2) << 8) |
+ (it87_read_value(data, IT87_REG_ALARM3) << 16);
+ data->fan_main_ctrl = it87_read_value(data,
+ IT87_REG_FAN_MAIN_CTRL);
+ data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
+
+ data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
/* The 8705 does not have VID capability */
if (data->type == it8712 || data->type == it8716) {
- data->vid = it87_read_value(client, IT87_REG_VID);
+ data->vid = it87_read_value(data, IT87_REG_VID);
/* The older IT8712F revisions had only 5 VID pins,
but we assume it is always safe to read 6 bits. */
data->vid &= 0x3f;
@@ -1372,24 +1374,85 @@ static struct it87_data *it87_update_device(struct device *dev)
return data;
}
+static int __init it87_device_add(unsigned short address,
+ const struct it87_sio_data *sio_data)
+{
+ struct resource res = {
+ .start = address ,
+ .end = address + IT87_EXTENT - 1,
+ .name = DRVNAME,
+ .flags = IORESOURCE_IO,
+ };
+ int err;
+
+ pdev = platform_device_alloc(DRVNAME, address);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ goto exit;
+ }
+
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device resource addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
+ }
+
+ err = platform_device_add_data(pdev, sio_data,
+ sizeof(struct it87_sio_data));
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+ err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
static int __init sm_it87_init(void)
{
- int res;
+ int err;
+ unsigned short isa_address=0;
+ struct it87_sio_data sio_data;
+
+ err = it87_find(&isa_address, &sio_data);
+ if (err)
+ return err;
+ err = platform_driver_register(&it87_driver);
+ if (err)
+ return err;
- if ((res = it87_find(&isa_address)))
- return res;
- return i2c_isa_add_driver(&it87_isa_driver);
+ err = it87_device_add(isa_address, &sio_data);
+ if (err){
+ platform_driver_unregister(&it87_driver);
+ return err;
+ }
+
+ return 0;
}
static void __exit sm_it87_exit(void)
{
- i2c_isa_del_driver(&it87_isa_driver);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&it87_driver);
}
MODULE_AUTHOR("Chris Gauthron <chrisg@0-in.com>, "
"Jean Delvare <khali@linux-fr.org>");
-MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F, SiS950 driver");
+MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F/8726F, SiS950 driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
module_param(fix_pwm_polarity, bool, 0);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index d69f3cf07122..2162d69a8c06 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -364,7 +364,7 @@ static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
@@ -383,7 +383,7 @@ static struct attribute *lm63_attributes[] = {
&dev_attr_temp2_crit_hyst.attr,
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 7eaae3834e15..275d392eca61 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -96,6 +96,10 @@ static int __devinit lm70_probe(struct spi_device *spi)
struct lm70 *p_lm70;
int status;
+ /* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
+ if ((spi->mode & (SPI_CPOL|SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
+ return -EINVAL;
+
p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
if (!p_lm70)
return -ENOMEM;
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index feb87b41e986..654c0f73464d 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -223,14 +223,14 @@ static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp, NULL, 8);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp4_input_fault, S_IRUGO, show_alarm, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 10);
static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15);
/* Raw alarm file for compatibility */
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
@@ -245,7 +245,7 @@ static struct attribute *lm83_attributes[] = {
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
@@ -266,9 +266,9 @@ static struct attribute *lm83_attributes_opt[] = {
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
&sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
};
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 6882ce75feee..48833fff4920 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -43,6 +43,13 @@
* variants. The extra address and features of the MAX6659 are not
* supported by this driver.
*
+ * This driver also supports the MAX6680 and MAX6681, two other sensor
+ * chips made by Maxim. These are quite similar to the other Maxim
+ * chips. Complete datasheet can be obtained at:
+ * http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+ * The MAX6680 and MAX6681 only differ in the pinout so they can be
+ * treated identically.
+ *
* This driver also supports the ADT7461 chip from Analog Devices but
* only in its "compatability mode". If an ADT7461 chip is found but
* is configured in non-compatible mode (where its temperature
@@ -84,20 +91,25 @@
/*
* Addresses to scan
* Address is fully defined internally and cannot be changed except for
- * MAX6659.
+ * MAX6659, MAX6680 and MAX6681.
* LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, MAX6657 and MAX6658
* have address 0x4c.
* ADM1032-2, ADT7461-2, LM89-1, and LM99-1 have address 0x4d.
* MAX6659 can have address 0x4c, 0x4d or 0x4e (unsupported).
+ * MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
+ * 0x4c, 0x4d or 0x4e.
*/
-static unsigned short normal_i2c[] = { 0x4c, 0x4d, I2C_CLIENT_END };
+static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a,
+ 0x29, 0x2a, 0x2b,
+ 0x4c, 0x4d, 0x4e,
+ I2C_CLIENT_END };
/*
* Insmod parameters
*/
-I2C_CLIENT_INSMOD_6(lm90, adm1032, lm99, lm86, max6657, adt7461);
+I2C_CLIENT_INSMOD_7(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680);
/*
* The LM90 registers
@@ -359,7 +371,7 @@ static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, show_temphyst, NULL, 4);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 5);
@@ -381,7 +393,7 @@ static struct attribute *lm90_attributes[] = {
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
@@ -429,7 +441,7 @@ static DEVICE_ATTR(pec, S_IWUSR | S_IRUGO, show_pec, set_pec);
*/
/* The ADM1032 supports PEC but not on write byte transactions, so we need
- to explicitely ask for a transaction without PEC. */
+ to explicitly ask for a transaction without PEC. */
static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
{
return i2c_smbus_xfer(client->adapter, client->addr,
@@ -525,7 +537,8 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
&reg_convrate) < 0)
goto exit_free;
- if (man_id == 0x01) { /* National Semiconductor */
+ if ((address == 0x4C || address == 0x4D)
+ && man_id == 0x01) { /* National Semiconductor */
u8 reg_config2;
if (lm90_read_reg(new_client, LM90_REG_R_CONFIG2,
@@ -548,7 +561,8 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
}
}
} else
- if (man_id == 0x41) { /* Analog Devices */
+ if ((address == 0x4C || address == 0x4D)
+ && man_id == 0x41) { /* Analog Devices */
if ((chip_id & 0xF0) == 0x40 /* ADM1032 */
&& (reg_config1 & 0x3F) == 0x00
&& reg_convrate <= 0x0A) {
@@ -562,18 +576,30 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
} else
if (man_id == 0x4D) { /* Maxim */
/*
- * The Maxim variants do NOT have a chip_id register.
- * Reading from that address will return the last read
- * value, which in our case is those of the man_id
- * register. Likewise, the config1 register seems to
- * lack a low nibble, so the value will be those of the
- * previous read, so in our case those of the man_id
- * register.
+ * The MAX6657, MAX6658 and MAX6659 do NOT have a
+ * chip_id register. Reading from that address will
+ * return the last read value, which in our case is
+ * those of the man_id register. Likewise, the config1
+ * register seems to lack a low nibble, so the value
+ * will be those of the previous read, so in our case
+ * those of the man_id register.
*/
if (chip_id == man_id
+ && (address == 0x4F || address == 0x4D)
&& (reg_config1 & 0x1F) == (man_id & 0x0F)
&& reg_convrate <= 0x09) {
kind = max6657;
+ } else
+ /* The chip_id register of the MAX6680 and MAX6681
+ * holds the revision of the chip.
+ * the lowest bit of the config1 register is unused
+ * and should return zero when read, so should the
+ * second to last bit of config1 (software reset)
+ */
+ if (chip_id == 0x01
+ && (reg_config1 & 0x03) == 0x00
+ && reg_convrate <= 0x07) {
+ kind = max6680;
}
}
@@ -599,6 +625,8 @@ static int lm90_detect(struct i2c_adapter *adapter, int address, int kind)
name = "lm86";
} else if (kind == max6657) {
name = "max6657";
+ } else if (kind == max6680) {
+ name = "max6680";
} else if (kind == adt7461) {
name = "adt7461";
}
@@ -646,7 +674,8 @@ exit:
static void lm90_init_client(struct i2c_client *client)
{
- u8 config;
+ u8 config, config_orig;
+ struct lm90_data *data = i2c_get_clientdata(client);
/*
* Start the conversions.
@@ -657,9 +686,20 @@ static void lm90_init_client(struct i2c_client *client)
dev_warn(&client->dev, "Initialization failed!\n");
return;
}
- if (config & 0x40)
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
- config & 0xBF); /* run */
+ config_orig = config;
+
+ /*
+ * Put MAX6680/MAX8881 into extended resolution (bit 0x10,
+ * 0.125 degree resolution) and range (0x08, extend range
+ * to -64 degree) mode for the remote temperature sensor.
+ */
+ if (data->kind == max6680) {
+ config |= 0x18;
+ }
+
+ config &= 0xBF; /* run */
+ if (config != config_orig) /* Only write if changed */
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
}
static int lm90_detach_client(struct i2c_client *client)
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
new file mode 100644
index 000000000000..23edf4fe4221
--- /dev/null
+++ b/drivers/hwmon/lm93.c
@@ -0,0 +1,2655 @@
+/*
+ lm93.c - Part of lm_sensors, Linux kernel modules for hardware monitoring
+
+ Author/Maintainer: Mark M. Hoffman <mhoffman@lightlink.com>
+ Copyright (c) 2004 Utilitek Systems, Inc.
+
+ derived in part from lm78.c:
+ Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
+
+ derived in part from lm85.c:
+ Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com>
+ Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de>
+
+ derived in part from w83l785ts.c:
+ Copyright (c) 2003-2004 Jean Delvare <khali@linux-fr.org>
+
+ Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com>
+ Copyright (c) 2005 Aspen Systems, Inc.
+
+ Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org>
+ Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab
+
+ Modified for mainline integration by Hans J. Koch <hjk@linutronix.de>
+ Copyright (c) 2007 Hans J. Koch, Linutronix GmbH
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+/* LM93 REGISTER ADDRESSES */
+
+/* miscellaneous */
+#define LM93_REG_MFR_ID 0x3e
+#define LM93_REG_VER 0x3f
+#define LM93_REG_STATUS_CONTROL 0xe2
+#define LM93_REG_CONFIG 0xe3
+#define LM93_REG_SLEEP_CONTROL 0xe4
+
+/* alarm values start here */
+#define LM93_REG_HOST_ERROR_1 0x48
+
+/* voltage inputs: in1-in16 (nr => 0-15) */
+#define LM93_REG_IN(nr) (0x56 + (nr))
+#define LM93_REG_IN_MIN(nr) (0x90 + (nr) * 2)
+#define LM93_REG_IN_MAX(nr) (0x91 + (nr) * 2)
+
+/* temperature inputs: temp1-temp4 (nr => 0-3) */
+#define LM93_REG_TEMP(nr) (0x50 + (nr))
+#define LM93_REG_TEMP_MIN(nr) (0x78 + (nr) * 2)
+#define LM93_REG_TEMP_MAX(nr) (0x79 + (nr) * 2)
+
+/* temp[1-4]_auto_boost (nr => 0-3) */
+#define LM93_REG_BOOST(nr) (0x80 + (nr))
+
+/* #PROCHOT inputs: prochot1-prochot2 (nr => 0-1) */
+#define LM93_REG_PROCHOT_CUR(nr) (0x67 + (nr) * 2)
+#define LM93_REG_PROCHOT_AVG(nr) (0x68 + (nr) * 2)
+#define LM93_REG_PROCHOT_MAX(nr) (0xb0 + (nr))
+
+/* fan tach inputs: fan1-fan4 (nr => 0-3) */
+#define LM93_REG_FAN(nr) (0x6e + (nr) * 2)
+#define LM93_REG_FAN_MIN(nr) (0xb4 + (nr) * 2)
+
+/* pwm outputs: pwm1-pwm2 (nr => 0-1, reg => 0-3) */
+#define LM93_REG_PWM_CTL(nr,reg) (0xc8 + (reg) + (nr) * 4)
+#define LM93_PWM_CTL1 0x0
+#define LM93_PWM_CTL2 0x1
+#define LM93_PWM_CTL3 0x2
+#define LM93_PWM_CTL4 0x3
+
+/* GPIO input state */
+#define LM93_REG_GPI 0x6b
+
+/* vid inputs: vid1-vid2 (nr => 0-1) */
+#define LM93_REG_VID(nr) (0x6c + (nr))
+
+/* vccp1 & vccp2: VID relative inputs (nr => 0-1) */
+#define LM93_REG_VCCP_LIMIT_OFF(nr) (0xb2 + (nr))
+
+/* temp[1-4]_auto_boost_hyst */
+#define LM93_REG_BOOST_HYST_12 0xc0
+#define LM93_REG_BOOST_HYST_34 0xc1
+#define LM93_REG_BOOST_HYST(nr) (0xc0 + (nr)/2)
+
+/* temp[1-4]_auto_pwm_[min|hyst] */
+#define LM93_REG_PWM_MIN_HYST_12 0xc3
+#define LM93_REG_PWM_MIN_HYST_34 0xc4
+#define LM93_REG_PWM_MIN_HYST(nr) (0xc3 + (nr)/2)
+
+/* prochot_override & prochot_interval */
+#define LM93_REG_PROCHOT_OVERRIDE 0xc6
+#define LM93_REG_PROCHOT_INTERVAL 0xc7
+
+/* temp[1-4]_auto_base (nr => 0-3) */
+#define LM93_REG_TEMP_BASE(nr) (0xd0 + (nr))
+
+/* temp[1-4]_auto_offsets (step => 0-11) */
+#define LM93_REG_TEMP_OFFSET(step) (0xd4 + (step))
+
+/* #PROCHOT & #VRDHOT PWM ramp control */
+#define LM93_REG_PWM_RAMP_CTL 0xbf
+
+/* miscellaneous */
+#define LM93_REG_SFC1 0xbc
+#define LM93_REG_SFC2 0xbd
+#define LM93_REG_GPI_VID_CTL 0xbe
+#define LM93_REG_SF_TACH_TO_PWM 0xe0
+
+/* error masks */
+#define LM93_REG_GPI_ERR_MASK 0xec
+#define LM93_REG_MISC_ERR_MASK 0xed
+
+/* LM93 REGISTER VALUES */
+#define LM93_MFR_ID 0x73
+#define LM93_MFR_ID_PROTOTYPE 0x72
+
+/* SMBus capabilities */
+#define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA)
+#define LM93_SMBUS_FUNC_MIN (I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA)
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
+
+/* Insmod parameters */
+I2C_CLIENT_INSMOD_1(lm93);
+
+static int disable_block;
+module_param(disable_block, bool, 0);
+MODULE_PARM_DESC(disable_block,
+ "Set to non-zero to disable SMBus block data transactions.");
+
+static int init;
+module_param(init, bool, 0);
+MODULE_PARM_DESC(init, "Set to non-zero to force chip initialization.");
+
+static int vccp_limit_type[2] = {0,0};
+module_param_array(vccp_limit_type, int, NULL, 0);
+MODULE_PARM_DESC(vccp_limit_type, "Configures in7 and in8 limit modes.");
+
+static int vid_agtl;
+module_param(vid_agtl, int, 0);
+MODULE_PARM_DESC(vid_agtl, "Configures VID pin input thresholds.");
+
+/* Driver data */
+static struct i2c_driver lm93_driver;
+
+/* LM93 BLOCK READ COMMANDS */
+static const struct { u8 cmd; u8 len; } lm93_block_read_cmds[12] = {
+ { 0xf2, 8 },
+ { 0xf3, 8 },
+ { 0xf4, 6 },
+ { 0xf5, 16 },
+ { 0xf6, 4 },
+ { 0xf7, 8 },
+ { 0xf8, 12 },
+ { 0xf9, 32 },
+ { 0xfa, 8 },
+ { 0xfb, 8 },
+ { 0xfc, 16 },
+ { 0xfd, 9 },
+};
+
+/* ALARMS: SYSCTL format described further below
+ REG: 64 bits in 8 registers, as immediately below */
+struct block1_t {
+ u8 host_status_1;
+ u8 host_status_2;
+ u8 host_status_3;
+ u8 host_status_4;
+ u8 p1_prochot_status;
+ u8 p2_prochot_status;
+ u8 gpi_status;
+ u8 fan_status;
+};
+
+/*
+ * Client-specific data
+ */
+struct lm93_data {
+ struct i2c_client client;
+ struct class_device *class_dev;
+
+ struct mutex update_lock;
+ unsigned long last_updated; /* In jiffies */
+
+ /* client update function */
+ void (*update)(struct lm93_data *, struct i2c_client *);
+
+ char valid; /* !=0 if following fields are valid */
+
+ /* register values, arranged by block read groups */
+ struct block1_t block1;
+
+ /* temp1 - temp4: unfiltered readings
+ temp1 - temp2: filtered readings */
+ u8 block2[6];
+
+ /* vin1 - vin16: readings */
+ u8 block3[16];
+
+ /* prochot1 - prochot2: readings */
+ struct {
+ u8 cur;
+ u8 avg;
+ } block4[2];
+
+ /* fan counts 1-4 => 14-bits, LE, *left* justified */
+ u16 block5[4];
+
+ /* block6 has a lot of data we don't need */
+ struct {
+ u8 min;
+ u8 max;
+ } temp_lim[3];
+
+ /* vin1 - vin16: low and high limits */
+ struct {
+ u8 min;
+ u8 max;
+ } block7[16];
+
+ /* fan count limits 1-4 => same format as block5 */
+ u16 block8[4];
+
+ /* pwm control registers (2 pwms, 4 regs) */
+ u8 block9[2][4];
+
+ /* auto/pwm base temp and offset temp registers */
+ struct {
+ u8 base[4];
+ u8 offset[12];
+ } block10;
+
+ /* master config register */
+ u8 config;
+
+ /* VID1 & VID2 => register format, 6-bits, right justified */
+ u8 vid[2];
+
+ /* prochot1 - prochot2: limits */
+ u8 prochot_max[2];
+
+ /* vccp1 & vccp2 (in7 & in8): VID relative limits (register format) */
+ u8 vccp_limits[2];
+
+ /* GPIO input state (register format, i.e. inverted) */
+ u8 gpi;
+
+ /* #PROCHOT override (register format) */
+ u8 prochot_override;
+
+ /* #PROCHOT intervals (register format) */
+ u8 prochot_interval;
+
+ /* Fan Boost Temperatures (register format) */
+ u8 boost[4];
+
+ /* Fan Boost Hysteresis (register format) */
+ u8 boost_hyst[2];
+
+ /* Temperature Zone Min. PWM & Hysteresis (register format) */
+ u8 auto_pwm_min_hyst[2];
+
+ /* #PROCHOT & #VRDHOT PWM Ramp Control */
+ u8 pwm_ramp_ctl;
+
+ /* miscellaneous setup regs */
+ u8 sfc1;
+ u8 sfc2;
+ u8 sf_tach_to_pwm;
+
+ /* The two PWM CTL2 registers can read something other than what was
+ last written for the OVR_DC field (duty cycle override). So, we
+ save the user-commanded value here. */
+ u8 pwm_override[2];
+};
+
+/* VID: mV
+ REG: 6-bits, right justified, *always* using Intel VRM/VRD 10 */
+static int LM93_VID_FROM_REG(u8 reg)
+{
+ return vid_from_reg((reg & 0x3f), 100);
+}
+
+/* min, max, and nominal register values, per channel (u8) */
+static const u8 lm93_vin_reg_min[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xae,
+};
+static const u8 lm93_vin_reg_max[16] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1,
+};
+/* Values from the datasheet. They're here for documentation only.
+static const u8 lm93_vin_reg_nom[16] = {
+ 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0,
+ 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x40, 0xc0,
+};
+*/
+
+/* min, max, and nominal voltage readings, per channel (mV)*/
+static const unsigned long lm93_vin_val_min[16] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 3000,
+};
+
+static const unsigned long lm93_vin_val_max[16] = {
+ 1236, 1236, 1236, 1600, 2000, 2000, 1600, 1600,
+ 4400, 6500, 3333, 2625, 1312, 1312, 1236, 3600,
+};
+/* Values from the datasheet. They're here for documentation only.
+static const unsigned long lm93_vin_val_nom[16] = {
+ 927, 927, 927, 1200, 1500, 1500, 1200, 1200,
+ 3300, 5000, 2500, 1969, 984, 984, 309, 3300,
+};
+*/
+
+static unsigned LM93_IN_FROM_REG(int nr, u8 reg)
+{
+ const long uV_max = lm93_vin_val_max[nr] * 1000;
+ const long uV_min = lm93_vin_val_min[nr] * 1000;
+
+ const long slope = (uV_max - uV_min) /
+ (lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]);
+ const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
+
+ return (slope * reg + intercept + 500) / 1000;
+}
+
+/* IN: mV, limits determined by channel nr
+ REG: scaling determined by channel nr */
+static u8 LM93_IN_TO_REG(int nr, unsigned val)
+{
+ /* range limit */
+ const long mV = SENSORS_LIMIT(val,
+ lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
+
+ /* try not to lose too much precision here */
+ const long uV = mV * 1000;
+ const long uV_max = lm93_vin_val_max[nr] * 1000;
+ const long uV_min = lm93_vin_val_min[nr] * 1000;
+
+ /* convert */
+ const long slope = (uV_max - uV_min) /
+ (lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]);
+ const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
+
+ u8 result = ((uV - intercept + (slope/2)) / slope);
+ result = SENSORS_LIMIT(result,
+ lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
+ return result;
+}
+
+/* vid in mV, upper == 0 indicates low limit, otherwise upper limit */
+static unsigned LM93_IN_REL_FROM_REG(u8 reg, int upper, int vid)
+{
+ const long uV_offset = upper ? (((reg >> 4 & 0x0f) + 1) * 12500) :
+ (((reg >> 0 & 0x0f) + 1) * -25000);
+ const long uV_vid = vid * 1000;
+ return (uV_vid + uV_offset + 5000) / 10000;
+}
+
+#define LM93_IN_MIN_FROM_REG(reg,vid) LM93_IN_REL_FROM_REG(reg,0,vid)
+#define LM93_IN_MAX_FROM_REG(reg,vid) LM93_IN_REL_FROM_REG(reg,1,vid)
+
+/* vid in mV , upper == 0 indicates low limit, otherwise upper limit
+ upper also determines which nibble of the register is returned
+ (the other nibble will be 0x0) */
+static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid)
+{
+ long uV_offset = vid * 1000 - val * 10000;
+ if (upper) {
+ uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000);
+ return (u8)((uV_offset / 12500 - 1) << 4);
+ } else {
+ uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000);
+ return (u8)((uV_offset / -25000 - 1) << 0);
+ }
+}
+
+/* TEMP: 1/1000 degrees C (-128C to +127C)
+ REG: 1C/bit, two's complement */
+static int LM93_TEMP_FROM_REG(u8 reg)
+{
+ return (s8)reg * 1000;
+}
+
+#define LM93_TEMP_MIN (-128000)
+#define LM93_TEMP_MAX ( 127000)
+
+/* TEMP: 1/1000 degrees C (-128C to +127C)
+ REG: 1C/bit, two's complement */
+static u8 LM93_TEMP_TO_REG(int temp)
+{
+ int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
+ ntemp += (ntemp<0 ? -500 : 500);
+ return (u8)(ntemp / 1000);
+}
+
+/* Determine 4-bit temperature offset resolution */
+static int LM93_TEMP_OFFSET_MODE_FROM_REG(u8 sfc2, int nr)
+{
+ /* mode: 0 => 1C/bit, nonzero => 0.5C/bit */
+ return sfc2 & (nr < 2 ? 0x10 : 0x20);
+}
+
+/* This function is common to all 4-bit temperature offsets
+ reg is 4 bits right justified
+ mode 0 => 1C/bit, mode !0 => 0.5C/bit */
+static int LM93_TEMP_OFFSET_FROM_REG(u8 reg, int mode)
+{
+ return (reg & 0x0f) * (mode ? 5 : 10);
+}
+
+#define LM93_TEMP_OFFSET_MIN ( 0)
+#define LM93_TEMP_OFFSET_MAX0 (150)
+#define LM93_TEMP_OFFSET_MAX1 ( 75)
+
+/* This function is common to all 4-bit temperature offsets
+ returns 4 bits right justified
+ mode 0 => 1C/bit, mode !0 => 0.5C/bit */
+static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode)
+{
+ int factor = mode ? 5 : 10;
+
+ off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN,
+ mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0);
+ return (u8)((off + factor/2) / factor);
+}
+
+/* 0 <= nr <= 3 */
+static int LM93_TEMP_AUTO_OFFSET_FROM_REG(u8 reg, int nr, int mode)
+{
+ /* temp1-temp2 (nr=0,1) use lower nibble */
+ if (nr < 2)
+ return LM93_TEMP_OFFSET_FROM_REG(reg & 0x0f, mode);
+
+ /* temp3-temp4 (nr=2,3) use upper nibble */
+ else
+ return LM93_TEMP_OFFSET_FROM_REG(reg >> 4 & 0x0f, mode);
+}
+
+/* TEMP: 1/10 degrees C (0C to +15C (mode 0) or +7.5C (mode non-zero))
+ REG: 1.0C/bit (mode 0) or 0.5C/bit (mode non-zero)
+ 0 <= nr <= 3 */
+static u8 LM93_TEMP_AUTO_OFFSET_TO_REG(u8 old, int off, int nr, int mode)
+{
+ u8 new = LM93_TEMP_OFFSET_TO_REG(off, mode);
+
+ /* temp1-temp2 (nr=0,1) use lower nibble */
+ if (nr < 2)
+ return (old & 0xf0) | (new & 0x0f);
+
+ /* temp3-temp4 (nr=2,3) use upper nibble */
+ else
+ return (new << 4 & 0xf0) | (old & 0x0f);
+}
+
+static int LM93_AUTO_BOOST_HYST_FROM_REGS(struct lm93_data *data, int nr,
+ int mode)
+{
+ u8 reg;
+
+ switch (nr) {
+ case 0:
+ reg = data->boost_hyst[0] & 0x0f;
+ break;
+ case 1:
+ reg = data->boost_hyst[0] >> 4 & 0x0f;
+ break;
+ case 2:
+ reg = data->boost_hyst[1] & 0x0f;
+ break;
+ case 3:
+ default:
+ reg = data->boost_hyst[1] >> 4 & 0x0f;
+ break;
+ }
+
+ return LM93_TEMP_FROM_REG(data->boost[nr]) -
+ LM93_TEMP_OFFSET_FROM_REG(reg, mode);
+}
+
+static u8 LM93_AUTO_BOOST_HYST_TO_REG(struct lm93_data *data, long hyst,
+ int nr, int mode)
+{
+ u8 reg = LM93_TEMP_OFFSET_TO_REG(
+ (LM93_TEMP_FROM_REG(data->boost[nr]) - hyst), mode);
+
+ switch (nr) {
+ case 0:
+ reg = (data->boost_hyst[0] & 0xf0) | (reg & 0x0f);
+ break;
+ case 1:
+ reg = (reg << 4 & 0xf0) | (data->boost_hyst[0] & 0x0f);
+ break;
+ case 2:
+ reg = (data->boost_hyst[1] & 0xf0) | (reg & 0x0f);
+ break;
+ case 3:
+ default:
+ reg = (reg << 4 & 0xf0) | (data->boost_hyst[1] & 0x0f);
+ break;
+ }
+
+ return reg;
+}
+
+/* PWM: 0-255 per sensors documentation
+ REG: 0-13 as mapped below... right justified */
+typedef enum { LM93_PWM_MAP_HI_FREQ, LM93_PWM_MAP_LO_FREQ } pwm_freq_t;
+static int lm93_pwm_map[2][16] = {
+ {
+ 0x00, /* 0.00% */ 0x40, /* 25.00% */
+ 0x50, /* 31.25% */ 0x60, /* 37.50% */
+ 0x70, /* 43.75% */ 0x80, /* 50.00% */
+ 0x90, /* 56.25% */ 0xa0, /* 62.50% */
+ 0xb0, /* 68.75% */ 0xc0, /* 75.00% */
+ 0xd0, /* 81.25% */ 0xe0, /* 87.50% */
+ 0xf0, /* 93.75% */ 0xff, /* 100.00% */
+ 0xff, 0xff, /* 14, 15 are reserved and should never occur */
+ },
+ {
+ 0x00, /* 0.00% */ 0x40, /* 25.00% */
+ 0x49, /* 28.57% */ 0x52, /* 32.14% */
+ 0x5b, /* 35.71% */ 0x64, /* 39.29% */
+ 0x6d, /* 42.86% */ 0x76, /* 46.43% */
+ 0x80, /* 50.00% */ 0x89, /* 53.57% */
+ 0x92, /* 57.14% */ 0xb6, /* 71.43% */
+ 0xdb, /* 85.71% */ 0xff, /* 100.00% */
+ 0xff, 0xff, /* 14, 15 are reserved and should never occur */
+ },
+};
+
+static int LM93_PWM_FROM_REG(u8 reg, pwm_freq_t freq)
+{
+ return lm93_pwm_map[freq][reg & 0x0f];
+}
+
+/* round up to nearest match */
+static u8 LM93_PWM_TO_REG(int pwm, pwm_freq_t freq)
+{
+ int i;
+ for (i = 0; i < 13; i++)
+ if (pwm <= lm93_pwm_map[freq][i])
+ break;
+
+ /* can fall through with i==13 */
+ return (u8)i;
+}
+
+static int LM93_FAN_FROM_REG(u16 regs)
+{
+ const u16 count = le16_to_cpu(regs) >> 2;
+ return count==0 ? -1 : count==0x3fff ? 0: 1350000 / count;
+}
+
+/*
+ * RPM: (82.5 to 1350000)
+ * REG: 14-bits, LE, *left* justified
+ */
+static u16 LM93_FAN_TO_REG(long rpm)
+{
+ u16 count, regs;
+
+ if (rpm == 0) {
+ count = 0x3fff;
+ } else {
+ rpm = SENSORS_LIMIT(rpm, 1, 1000000);
+ count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe);
+ }
+
+ regs = count << 2;
+ return cpu_to_le16(regs);
+}
+
+/* PWM FREQ: HZ
+ REG: 0-7 as mapped below */
+static int lm93_pwm_freq_map[8] = {
+ 22500, 96, 84, 72, 60, 48, 36, 12
+};
+
+static int LM93_PWM_FREQ_FROM_REG(u8 reg)
+{
+ return lm93_pwm_freq_map[reg & 0x07];
+}
+
+/* round up to nearest match */
+static u8 LM93_PWM_FREQ_TO_REG(int freq)
+{
+ int i;
+ for (i = 7; i > 0; i--)
+ if (freq <= lm93_pwm_freq_map[i])
+ break;
+
+ /* can fall through with i==0 */
+ return (u8)i;
+}
+
+/* TIME: 1/100 seconds
+ * REG: 0-7 as mapped below */
+static int lm93_spinup_time_map[8] = {
+ 0, 10, 25, 40, 70, 100, 200, 400,
+};
+
+static int LM93_SPINUP_TIME_FROM_REG(u8 reg)
+{
+ return lm93_spinup_time_map[reg >> 5 & 0x07];
+}
+
+/* round up to nearest match */
+static u8 LM93_SPINUP_TIME_TO_REG(int time)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ if (time <= lm93_spinup_time_map[i])
+ break;
+
+ /* can fall through with i==8 */
+ return (u8)i;
+}
+
+#define LM93_RAMP_MIN 0
+#define LM93_RAMP_MAX 75
+
+static int LM93_RAMP_FROM_REG(u8 reg)
+{
+ return (reg & 0x0f) * 5;
+}
+
+/* RAMP: 1/100 seconds
+ REG: 50mS/bit 4-bits right justified */
+static u8 LM93_RAMP_TO_REG(int ramp)
+{
+ ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
+ return (u8)((ramp + 2) / 5);
+}
+
+/* PROCHOT: 0-255, 0 => 0%, 255 => > 96.6%
+ * REG: (same) */
+static u8 LM93_PROCHOT_TO_REG(long prochot)
+{
+ prochot = SENSORS_LIMIT(prochot, 0, 255);
+ return (u8)prochot;
+}
+
+/* PROCHOT-INTERVAL: 73 - 37200 (1/100 seconds)
+ * REG: 0-9 as mapped below */
+static int lm93_interval_map[10] = {
+ 73, 146, 290, 580, 1170, 2330, 4660, 9320, 18600, 37200,
+};
+
+static int LM93_INTERVAL_FROM_REG(u8 reg)
+{
+ return lm93_interval_map[reg & 0x0f];
+}
+
+/* round up to nearest match */
+static u8 LM93_INTERVAL_TO_REG(long interval)
+{
+ int i;
+ for (i = 0; i < 9; i++)
+ if (interval <= lm93_interval_map[i])
+ break;
+
+ /* can fall through with i==9 */
+ return (u8)i;
+}
+
+/* GPIO: 0-255, GPIO0 is LSB
+ * REG: inverted */
+static unsigned LM93_GPI_FROM_REG(u8 reg)
+{
+ return ~reg & 0xff;
+}
+
+/* alarm bitmask definitions
+ The LM93 has nearly 64 bits of error status... I've pared that down to
+ what I think is a useful subset in order to fit it into 32 bits.
+
+ Especially note that the #VRD_HOT alarms are missing because we provide
+ that information as values in another sysfs file.
+
+ If libsensors is extended to support 64 bit values, this could be revisited.
+*/
+#define LM93_ALARM_IN1 0x00000001
+#define LM93_ALARM_IN2 0x00000002
+#define LM93_ALARM_IN3 0x00000004
+#define LM93_ALARM_IN4 0x00000008
+#define LM93_ALARM_IN5 0x00000010
+#define LM93_ALARM_IN6 0x00000020
+#define LM93_ALARM_IN7 0x00000040
+#define LM93_ALARM_IN8 0x00000080
+#define LM93_ALARM_IN9 0x00000100
+#define LM93_ALARM_IN10 0x00000200
+#define LM93_ALARM_IN11 0x00000400
+#define LM93_ALARM_IN12 0x00000800
+#define LM93_ALARM_IN13 0x00001000
+#define LM93_ALARM_IN14 0x00002000
+#define LM93_ALARM_IN15 0x00004000
+#define LM93_ALARM_IN16 0x00008000
+#define LM93_ALARM_FAN1 0x00010000
+#define LM93_ALARM_FAN2 0x00020000
+#define LM93_ALARM_FAN3 0x00040000
+#define LM93_ALARM_FAN4 0x00080000
+#define LM93_ALARM_PH1_ERR 0x00100000
+#define LM93_ALARM_PH2_ERR 0x00200000
+#define LM93_ALARM_SCSI1_ERR 0x00400000
+#define LM93_ALARM_SCSI2_ERR 0x00800000
+#define LM93_ALARM_DVDDP1_ERR 0x01000000
+#define LM93_ALARM_DVDDP2_ERR 0x02000000
+#define LM93_ALARM_D1_ERR 0x04000000
+#define LM93_ALARM_D2_ERR 0x08000000
+#define LM93_ALARM_TEMP1 0x10000000
+#define LM93_ALARM_TEMP2 0x20000000
+#define LM93_ALARM_TEMP3 0x40000000
+
+static unsigned LM93_ALARMS_FROM_REG(struct block1_t b1)
+{
+ unsigned result;
+ result = b1.host_status_2 & 0x3f;
+
+ if (vccp_limit_type[0])
+ result |= (b1.host_status_4 & 0x10) << 2;
+ else
+ result |= b1.host_status_2 & 0x40;
+
+ if (vccp_limit_type[1])
+ result |= (b1.host_status_4 & 0x20) << 2;
+ else
+ result |= b1.host_status_2 & 0x80;
+
+ result |= b1.host_status_3 << 8;
+ result |= (b1.fan_status & 0x0f) << 16;
+ result |= (b1.p1_prochot_status & 0x80) << 13;
+ result |= (b1.p2_prochot_status & 0x80) << 14;
+ result |= (b1.host_status_4 & 0xfc) << 20;
+ result |= (b1.host_status_1 & 0x07) << 28;
+ return result;
+}
+
+#define MAX_RETRIES 5
+
+static u8 lm93_read_byte(struct i2c_client *client, u8 reg)
+{
+ int value, i;
+
+ /* retry in case of read errors */
+ for (i=1; i<=MAX_RETRIES; i++) {
+ if ((value = i2c_smbus_read_byte_data(client, reg)) >= 0) {
+ return value;
+ } else {
+ dev_warn(&client->dev,"lm93: read byte data failed, "
+ "address 0x%02x.\n", reg);
+ mdelay(i + 3);
+ }
+
+ }
+
+ /* <TODO> what to return in case of error? */
+ dev_err(&client->dev,"lm93: All read byte retries failed!!\n");
+ return 0;
+}
+
+static int lm93_write_byte(struct i2c_client *client, u8 reg, u8 value)
+{
+ int result;
+
+ /* <TODO> how to handle write errors? */
+ result = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (result < 0)
+ dev_warn(&client->dev,"lm93: write byte data failed, "
+ "0x%02x at address 0x%02x.\n", value, reg);
+
+ return result;
+}
+
+static u16 lm93_read_word(struct i2c_client *client, u8 reg)
+{
+ int value, i;
+
+ /* retry in case of read errors */
+ for (i=1; i<=MAX_RETRIES; i++) {
+ if ((value = i2c_smbus_read_word_data(client, reg)) >= 0) {
+ return value;
+ } else {
+ dev_warn(&client->dev,"lm93: read word data failed, "
+ "address 0x%02x.\n", reg);
+ mdelay(i + 3);
+ }
+
+ }
+
+ /* <TODO> what to return in case of error? */
+ dev_err(&client->dev,"lm93: All read word retries failed!!\n");
+ return 0;
+}
+
+static int lm93_write_word(struct i2c_client *client, u8 reg, u16 value)
+{
+ int result;
+
+ /* <TODO> how to handle write errors? */
+ result = i2c_smbus_write_word_data(client, reg, value);
+
+ if (result < 0)
+ dev_warn(&client->dev,"lm93: write word data failed, "
+ "0x%04x at address 0x%02x.\n", value, reg);
+
+ return result;
+}
+
+static u8 lm93_block_buffer[I2C_SMBUS_BLOCK_MAX];
+
+/*
+ read block data into values, retry if not expected length
+ fbn => index to lm93_block_read_cmds table
+ (Fixed Block Number - section 14.5.2 of LM93 datasheet)
+*/
+static void lm93_read_block(struct i2c_client *client, u8 fbn, u8 *values)
+{
+ int i, result=0;
+
+ for (i = 1; i <= MAX_RETRIES; i++) {
+ result = i2c_smbus_read_block_data(client,
+ lm93_block_read_cmds[fbn].cmd, lm93_block_buffer);
+
+ if (result == lm93_block_read_cmds[fbn].len) {
+ break;
+ } else {
+ dev_warn(&client->dev,"lm93: block read data failed, "
+ "command 0x%02x.\n",
+ lm93_block_read_cmds[fbn].cmd);
+ mdelay(i + 3);
+ }
+ }
+
+ if (result == lm93_block_read_cmds[fbn].len) {
+ memcpy(values,lm93_block_buffer,lm93_block_read_cmds[fbn].len);
+ } else {
+ /* <TODO> what to do in case of error? */
+ }
+}
+
+static struct lm93_data *lm93_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ const unsigned long interval = HZ + (HZ / 2);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + interval) ||
+ !data->valid) {
+
+ data->update(data, client);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+/* update routine for data that has no corresponding SMBus block command */
+static void lm93_update_client_common(struct lm93_data *data,
+ struct i2c_client *client)
+{
+ int i;
+ u8 *ptr;
+
+ /* temp1 - temp4: limits */
+ for (i = 0; i < 4; i++) {
+ data->temp_lim[i].min =
+ lm93_read_byte(client, LM93_REG_TEMP_MIN(i));
+ data->temp_lim[i].max =
+ lm93_read_byte(client, LM93_REG_TEMP_MAX(i));
+ }
+
+ /* config register */
+ data->config = lm93_read_byte(client, LM93_REG_CONFIG);
+
+ /* vid1 - vid2: values */
+ for (i = 0; i < 2; i++)
+ data->vid[i] = lm93_read_byte(client, LM93_REG_VID(i));
+
+ /* prochot1 - prochot2: limits */
+ for (i = 0; i < 2; i++)
+ data->prochot_max[i] = lm93_read_byte(client,
+ LM93_REG_PROCHOT_MAX(i));
+
+ /* vccp1 - vccp2: VID relative limits */
+ for (i = 0; i < 2; i++)
+ data->vccp_limits[i] = lm93_read_byte(client,
+ LM93_REG_VCCP_LIMIT_OFF(i));
+
+ /* GPIO input state */
+ data->gpi = lm93_read_byte(client, LM93_REG_GPI);
+
+ /* #PROCHOT override state */
+ data->prochot_override = lm93_read_byte(client,
+ LM93_REG_PROCHOT_OVERRIDE);
+
+ /* #PROCHOT intervals */
+ data->prochot_interval = lm93_read_byte(client,
+ LM93_REG_PROCHOT_INTERVAL);
+
+ /* Fan Boost Termperature registers */
+ for (i = 0; i < 4; i++)
+ data->boost[i] = lm93_read_byte(client, LM93_REG_BOOST(i));
+
+ /* Fan Boost Temperature Hyst. registers */
+ data->boost_hyst[0] = lm93_read_byte(client, LM93_REG_BOOST_HYST_12);
+ data->boost_hyst[1] = lm93_read_byte(client, LM93_REG_BOOST_HYST_34);
+
+ /* Temperature Zone Min. PWM & Hysteresis registers */
+ data->auto_pwm_min_hyst[0] =
+ lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_12);
+ data->auto_pwm_min_hyst[1] =
+ lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_34);
+
+ /* #PROCHOT & #VRDHOT PWM Ramp Control register */
+ data->pwm_ramp_ctl = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL);
+
+ /* misc setup registers */
+ data->sfc1 = lm93_read_byte(client, LM93_REG_SFC1);
+ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+ data->sf_tach_to_pwm = lm93_read_byte(client,
+ LM93_REG_SF_TACH_TO_PWM);
+
+ /* write back alarm values to clear */
+ for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++)
+ lm93_write_byte(client, LM93_REG_HOST_ERROR_1 + i, *(ptr + i));
+}
+
+/* update routine which uses SMBus block data commands */
+static void lm93_update_client_full(struct lm93_data *data,
+ struct i2c_client *client)
+{
+ dev_dbg(&client->dev,"starting device update (block data enabled)\n");
+
+ /* in1 - in16: values & limits */
+ lm93_read_block(client, 3, (u8 *)(data->block3));
+ lm93_read_block(client, 7, (u8 *)(data->block7));
+
+ /* temp1 - temp4: values */
+ lm93_read_block(client, 2, (u8 *)(data->block2));
+
+ /* prochot1 - prochot2: values */
+ lm93_read_block(client, 4, (u8 *)(data->block4));
+
+ /* fan1 - fan4: values & limits */
+ lm93_read_block(client, 5, (u8 *)(data->block5));
+ lm93_read_block(client, 8, (u8 *)(data->block8));
+
+ /* pmw control registers */
+ lm93_read_block(client, 9, (u8 *)(data->block9));
+
+ /* alarm values */
+ lm93_read_block(client, 1, (u8 *)(&data->block1));
+
+ /* auto/pwm registers */
+ lm93_read_block(client, 10, (u8 *)(&data->block10));
+
+ lm93_update_client_common(data, client);
+}
+
+/* update routine which uses SMBus byte/word data commands only */
+static void lm93_update_client_min(struct lm93_data *data,
+ struct i2c_client *client)
+{
+ int i,j;
+ u8 *ptr;
+
+ dev_dbg(&client->dev,"starting device update (block data disabled)\n");
+
+ /* in1 - in16: values & limits */
+ for (i = 0; i < 16; i++) {
+ data->block3[i] =
+ lm93_read_byte(client, LM93_REG_IN(i));
+ data->block7[i].min =
+ lm93_read_byte(client, LM93_REG_IN_MIN(i));
+ data->block7[i].max =
+ lm93_read_byte(client, LM93_REG_IN_MAX(i));
+ }
+
+ /* temp1 - temp4: values */
+ for (i = 0; i < 4; i++) {
+ data->block2[i] =
+ lm93_read_byte(client, LM93_REG_TEMP(i));
+ }
+
+ /* prochot1 - prochot2: values */
+ for (i = 0; i < 2; i++) {
+ data->block4[i].cur =
+ lm93_read_byte(client, LM93_REG_PROCHOT_CUR(i));
+ data->block4[i].avg =
+ lm93_read_byte(client, LM93_REG_PROCHOT_AVG(i));
+ }
+
+ /* fan1 - fan4: values & limits */
+ for (i = 0; i < 4; i++) {
+ data->block5[i] =
+ lm93_read_word(client, LM93_REG_FAN(i));
+ data->block8[i] =
+ lm93_read_word(client, LM93_REG_FAN_MIN(i));
+ }
+
+ /* pwm control registers */
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 4; j++) {
+ data->block9[i][j] =
+ lm93_read_byte(client, LM93_REG_PWM_CTL(i,j));
+ }
+ }
+
+ /* alarm values */
+ for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++) {
+ *(ptr + i) =
+ lm93_read_byte(client, LM93_REG_HOST_ERROR_1 + i);
+ }
+
+ /* auto/pwm (base temp) registers */
+ for (i = 0; i < 4; i++) {
+ data->block10.base[i] =
+ lm93_read_byte(client, LM93_REG_TEMP_BASE(i));
+ }
+
+ /* auto/pwm (offset temp) registers */
+ for (i = 0; i < 12; i++) {
+ data->block10.offset[i] =
+ lm93_read_byte(client, LM93_REG_TEMP_OFFSET(i));
+ }
+
+ lm93_update_client_common(data, client);
+}
+
+/* following are the sysfs callback functions */
+static ssize_t show_in(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf, "%d\n", LM93_IN_FROM_REG(nr, data->block3[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 3);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 4);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 5);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 6);
+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 7);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in, NULL, 8);
+static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in, NULL, 9);
+static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_in, NULL, 10);
+static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_in, NULL, 11);
+static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_in, NULL, 12);
+static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, show_in, NULL, 13);
+static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, show_in, NULL, 14);
+static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in, NULL, 15);
+
+static ssize_t show_in_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ int vccp = nr - 6;
+ long rc, vid;
+
+ if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+ vid = LM93_VID_FROM_REG(data->vid[vccp]);
+ rc = LM93_IN_MIN_FROM_REG(data->vccp_limits[vccp], vid);
+ }
+ else {
+ rc = LM93_IN_FROM_REG(nr, data->block7[nr].min); \
+ }
+ return sprintf(buf, "%ld\n", rc); \
+}
+
+static ssize_t store_in_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ int vccp = nr - 6;
+ long vid;
+
+ mutex_lock(&data->update_lock);
+ if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+ vid = LM93_VID_FROM_REG(data->vid[vccp]);
+ data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0xf0) |
+ LM93_IN_REL_TO_REG(val, 0, vid);
+ lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp),
+ data->vccp_limits[vccp]);
+ }
+ else {
+ data->block7[nr].min = LM93_IN_TO_REG(nr,val);
+ lm93_write_byte(client, LM93_REG_IN_MIN(nr),
+ data->block7[nr].min);
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 0);
+static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 1);
+static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 2);
+static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 3);
+static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 4);
+static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 5);
+static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 6);
+static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 7);
+static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 8);
+static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 9);
+static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 10);
+static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 11);
+static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 12);
+static SENSOR_DEVICE_ATTR(in14_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 13);
+static SENSOR_DEVICE_ATTR(in15_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 14);
+static SENSOR_DEVICE_ATTR(in16_min, S_IWUSR | S_IRUGO,
+ show_in_min, store_in_min, 15);
+
+static ssize_t show_in_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ int vccp = nr - 6;
+ long rc, vid;
+
+ if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+ vid = LM93_VID_FROM_REG(data->vid[vccp]);
+ rc = LM93_IN_MAX_FROM_REG(data->vccp_limits[vccp],vid);
+ }
+ else {
+ rc = LM93_IN_FROM_REG(nr,data->block7[nr].max); \
+ }
+ return sprintf(buf,"%ld\n",rc); \
+}
+
+static ssize_t store_in_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ int vccp = nr - 6;
+ long vid;
+
+ mutex_lock(&data->update_lock);
+ if ((nr==6 || nr==7) && (vccp_limit_type[vccp])) {
+ vid = LM93_VID_FROM_REG(data->vid[vccp]);
+ data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0x0f) |
+ LM93_IN_REL_TO_REG(val, 1, vid);
+ lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp),
+ data->vccp_limits[vccp]);
+ }
+ else {
+ data->block7[nr].max = LM93_IN_TO_REG(nr,val);
+ lm93_write_byte(client, LM93_REG_IN_MAX(nr),
+ data->block7[nr].max);
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 0);
+static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 1);
+static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 2);
+static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 3);
+static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 4);
+static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 5);
+static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 6);
+static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 7);
+static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 8);
+static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 9);
+static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 10);
+static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 11);
+static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 12);
+static SENSOR_DEVICE_ATTR(in14_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 13);
+static SENSOR_DEVICE_ATTR(in15_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 14);
+static SENSOR_DEVICE_ATTR(in16_max, S_IWUSR | S_IRUGO,
+ show_in_max, store_in_max, 15);
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->block2[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+
+static ssize_t show_temp_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->temp_lim[nr].min));
+}
+
+static ssize_t store_temp_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->temp_lim[nr].min = LM93_TEMP_TO_REG(val);
+ lm93_write_byte(client, LM93_REG_TEMP_MIN(nr), data->temp_lim[nr].min);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+ show_temp_min, store_temp_min, 0);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO,
+ show_temp_min, store_temp_min, 1);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO,
+ show_temp_min, store_temp_min, 2);
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->temp_lim[nr].max));
+}
+
+static ssize_t store_temp_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->temp_lim[nr].max = LM93_TEMP_TO_REG(val);
+ lm93_write_byte(client, LM93_REG_TEMP_MAX(nr), data->temp_lim[nr].max);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+ show_temp_max, store_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO,
+ show_temp_max, store_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO,
+ show_temp_max, store_temp_max, 2);
+
+static ssize_t show_temp_auto_base(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->block10.base[nr]));
+}
+
+static ssize_t store_temp_auto_base(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->block10.base[nr] = LM93_TEMP_TO_REG(val);
+ lm93_write_byte(client, LM93_REG_TEMP_BASE(nr), data->block10.base[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_base, S_IWUSR | S_IRUGO,
+ show_temp_auto_base, store_temp_auto_base, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_base, S_IWUSR | S_IRUGO,
+ show_temp_auto_base, store_temp_auto_base, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_base, S_IWUSR | S_IRUGO,
+ show_temp_auto_base, store_temp_auto_base, 2);
+
+static ssize_t show_temp_auto_boost(struct device *dev,
+ struct device_attribute *attr,char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_TEMP_FROM_REG(data->boost[nr]));
+}
+
+static ssize_t store_temp_auto_boost(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->boost[nr] = LM93_TEMP_TO_REG(val);
+ lm93_write_byte(client, LM93_REG_BOOST(nr), data->boost[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_boost, S_IWUSR | S_IRUGO,
+ show_temp_auto_boost, store_temp_auto_boost, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_boost, S_IWUSR | S_IRUGO,
+ show_temp_auto_boost, store_temp_auto_boost, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_boost, S_IWUSR | S_IRUGO,
+ show_temp_auto_boost, store_temp_auto_boost, 2);
+
+static ssize_t show_temp_auto_boost_hyst(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr);
+ return sprintf(buf,"%d\n",
+ LM93_AUTO_BOOST_HYST_FROM_REGS(data, nr, mode));
+}
+
+static ssize_t store_temp_auto_boost_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ /* force 0.5C/bit mode */
+ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+ data->sfc2 |= ((nr < 2) ? 0x10 : 0x20);
+ lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+ data->boost_hyst[nr/2] = LM93_AUTO_BOOST_HYST_TO_REG(data, val, nr, 1);
+ lm93_write_byte(client, LM93_REG_BOOST_HYST(nr),
+ data->boost_hyst[nr/2]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_boost_hyst, S_IWUSR | S_IRUGO,
+ show_temp_auto_boost_hyst,
+ store_temp_auto_boost_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_boost_hyst, S_IWUSR | S_IRUGO,
+ show_temp_auto_boost_hyst,
+ store_temp_auto_boost_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_boost_hyst, S_IWUSR | S_IRUGO,
+ show_temp_auto_boost_hyst,
+ store_temp_auto_boost_hyst, 2);
+
+static ssize_t show_temp_auto_offset(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
+ int nr = s_attr->index;
+ int ofs = s_attr->nr;
+ struct lm93_data *data = lm93_update_device(dev);
+ int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr);
+ return sprintf(buf,"%d\n",
+ LM93_TEMP_AUTO_OFFSET_FROM_REG(data->block10.offset[ofs],
+ nr,mode));
+}
+
+static ssize_t store_temp_auto_offset(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr);
+ int nr = s_attr->index;
+ int ofs = s_attr->nr;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ /* force 0.5C/bit mode */
+ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+ data->sfc2 |= ((nr < 2) ? 0x10 : 0x20);
+ lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+ data->block10.offset[ofs] = LM93_TEMP_AUTO_OFFSET_TO_REG(
+ data->block10.offset[ofs], val, nr, 1);
+ lm93_write_byte(client, LM93_REG_TEMP_OFFSET(ofs),
+ data->block10.offset[ofs]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset1, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 0, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset2, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset3, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset4, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 3, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset5, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 4, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset6, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 5, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset7, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 6, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset8, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 7, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset9, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 8, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset10, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 9, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset11, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 10, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_offset12, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 11, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset1, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 0, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset2, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset3, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset4, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 3, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset5, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 4, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset6, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 5, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset7, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 6, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset8, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 7, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset9, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 8, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset10, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 9, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset11, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 10, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_offset12, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 11, 1);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset1, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 0, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset2, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 1, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset3, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 2, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset4, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 3, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset5, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 4, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset6, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 5, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset7, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 6, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset8, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 7, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset9, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 8, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset10, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 9, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset11, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 10, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_auto_offset12, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset, store_temp_auto_offset, 11, 2);
+
+static ssize_t show_temp_auto_pwm_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ u8 reg, ctl4;
+ struct lm93_data *data = lm93_update_device(dev);
+ reg = data->auto_pwm_min_hyst[nr/2] >> 4 & 0x0f;
+ ctl4 = data->block9[nr][LM93_PWM_CTL4];
+ return sprintf(buf,"%d\n",LM93_PWM_FROM_REG(reg, (ctl4 & 0x07) ?
+ LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ));
+}
+
+static ssize_t store_temp_auto_pwm_min(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 reg, ctl4;
+
+ mutex_lock(&data->update_lock);
+ reg = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST(nr));
+ ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4));
+ reg = (reg & 0x0f) |
+ LM93_PWM_TO_REG(val, (ctl4 & 0x07) ?
+ LM93_PWM_MAP_LO_FREQ :
+ LM93_PWM_MAP_HI_FREQ) << 4;
+ data->auto_pwm_min_hyst[nr/2] = reg;
+ lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_pwm_min, S_IWUSR | S_IRUGO,
+ show_temp_auto_pwm_min,
+ store_temp_auto_pwm_min, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_pwm_min, S_IWUSR | S_IRUGO,
+ show_temp_auto_pwm_min,
+ store_temp_auto_pwm_min, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_pwm_min, S_IWUSR | S_IRUGO,
+ show_temp_auto_pwm_min,
+ store_temp_auto_pwm_min, 2);
+
+static ssize_t show_temp_auto_offset_hyst(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr);
+ return sprintf(buf,"%d\n",LM93_TEMP_OFFSET_FROM_REG(
+ data->auto_pwm_min_hyst[nr/2], mode));
+}
+
+static ssize_t store_temp_auto_offset_hyst(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 reg;
+
+ mutex_lock(&data->update_lock);
+ /* force 0.5C/bit mode */
+ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+ data->sfc2 |= ((nr < 2) ? 0x10 : 0x20);
+ lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+ reg = data->auto_pwm_min_hyst[nr/2];
+ reg = (reg & 0xf0) | (LM93_TEMP_OFFSET_TO_REG(val, 1) & 0x0f);
+ data->auto_pwm_min_hyst[nr/2] = reg;
+ lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_auto_offset_hyst, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset_hyst,
+ store_temp_auto_offset_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp2_auto_offset_hyst, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset_hyst,
+ store_temp_auto_offset_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp3_auto_offset_hyst, S_IWUSR | S_IRUGO,
+ show_temp_auto_offset_hyst,
+ store_temp_auto_offset_hyst, 2);
+
+static ssize_t show_fan_input(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *s_attr = to_sensor_dev_attr(attr);
+ int nr = s_attr->index;
+ struct lm93_data *data = lm93_update_device(dev);
+
+ return sprintf(buf,"%d\n",LM93_FAN_FROM_REG(data->block5[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3);
+
+static ssize_t show_fan_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+
+ return sprintf(buf,"%d\n",LM93_FAN_FROM_REG(data->block8[nr]));
+}
+
+static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->block8[nr] = LM93_FAN_TO_REG(val);
+ lm93_write_word(client,LM93_REG_FAN_MIN(nr),data->block8[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
+ show_fan_min, store_fan_min, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
+ show_fan_min, store_fan_min, 1);
+static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO,
+ show_fan_min, store_fan_min, 2);
+static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO,
+ show_fan_min, store_fan_min, 3);
+
+/* some tedious bit-twiddling here to deal with the register format:
+
+ data->sf_tach_to_pwm: (tach to pwm mapping bits)
+
+ bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ T4:P2 T4:P1 T3:P2 T3:P1 T2:P2 T2:P1 T1:P2 T1:P1
+
+ data->sfc2: (enable bits)
+
+ bit | 3 | 2 | 1 | 0
+ T4 T3 T2 T1
+*/
+
+static ssize_t show_fan_smart_tach(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ long rc = 0;
+ int mapping;
+
+ /* extract the relevant mapping */
+ mapping = (data->sf_tach_to_pwm >> (nr * 2)) & 0x03;
+
+ /* if there's a mapping and it's enabled */
+ if (mapping && ((data->sfc2 >> nr) & 0x01))
+ rc = mapping;
+ return sprintf(buf,"%ld\n",rc);
+}
+
+/* helper function - must grab data->update_lock before calling
+ fan is 0-3, indicating fan1-fan4 */
+static void lm93_write_fan_smart_tach(struct i2c_client *client,
+ struct lm93_data *data, int fan, long value)
+{
+ /* insert the new mapping and write it out */
+ data->sf_tach_to_pwm = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM);
+ data->sf_tach_to_pwm &= ~(0x3 << fan * 2);
+ data->sf_tach_to_pwm |= value << fan * 2;
+ lm93_write_byte(client, LM93_REG_SF_TACH_TO_PWM, data->sf_tach_to_pwm);
+
+ /* insert the enable bit and write it out */
+ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+ if (value)
+ data->sfc2 |= 1 << fan;
+ else
+ data->sfc2 &= ~(1 << fan);
+ lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+}
+
+static ssize_t store_fan_smart_tach(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ /* sanity test, ignore the write otherwise */
+ if (0 <= val && val <= 2) {
+ /* can't enable if pwm freq is 22.5KHz */
+ if (val) {
+ u8 ctl4 = lm93_read_byte(client,
+ LM93_REG_PWM_CTL(val-1,LM93_PWM_CTL4));
+ if ((ctl4 & 0x07) == 0)
+ val = 0;
+ }
+ lm93_write_fan_smart_tach(client, data, nr, val);
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_smart_tach, S_IWUSR | S_IRUGO,
+ show_fan_smart_tach, store_fan_smart_tach, 0);
+static SENSOR_DEVICE_ATTR(fan2_smart_tach, S_IWUSR | S_IRUGO,
+ show_fan_smart_tach, store_fan_smart_tach, 1);
+static SENSOR_DEVICE_ATTR(fan3_smart_tach, S_IWUSR | S_IRUGO,
+ show_fan_smart_tach, store_fan_smart_tach, 2);
+static SENSOR_DEVICE_ATTR(fan4_smart_tach, S_IWUSR | S_IRUGO,
+ show_fan_smart_tach, store_fan_smart_tach, 3);
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ u8 ctl2, ctl4;
+ long rc;
+
+ ctl2 = data->block9[nr][LM93_PWM_CTL2];
+ ctl4 = data->block9[nr][LM93_PWM_CTL4];
+ if (ctl2 & 0x01) /* show user commanded value if enabled */
+ rc = data->pwm_override[nr];
+ else /* show present h/w value if manual pwm disabled */
+ rc = LM93_PWM_FROM_REG(ctl2 >> 4, (ctl4 & 0x07) ?
+ LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ);
+ return sprintf(buf,"%ld\n",rc);
+}
+
+static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ctl2, ctl4;
+
+ mutex_lock(&data->update_lock);
+ ctl2 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2));
+ ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4));
+ ctl2 = (ctl2 & 0x0f) | LM93_PWM_TO_REG(val,(ctl4 & 0x07) ?
+ LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ) << 4;
+ /* save user commanded value */
+ data->pwm_override[nr] = LM93_PWM_FROM_REG(ctl2 >> 4,
+ (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ :
+ LM93_PWM_MAP_HI_FREQ);
+ lm93_write_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2),ctl2);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
+
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ u8 ctl2;
+ long rc;
+
+ ctl2 = data->block9[nr][LM93_PWM_CTL2];
+ if (ctl2 & 0x01) /* manual override enabled ? */
+ rc = ((ctl2 & 0xF0) == 0xF0) ? 0 : 1;
+ else
+ rc = 2;
+ return sprintf(buf,"%ld\n",rc);
+}
+
+static ssize_t store_pwm_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ctl2;
+
+ mutex_lock(&data->update_lock);
+ ctl2 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2));
+
+ switch (val) {
+ case 0:
+ ctl2 |= 0xF1; /* enable manual override, set PWM to max */
+ break;
+ case 1: ctl2 |= 0x01; /* enable manual override */
+ break;
+ case 2: ctl2 &= ~0x01; /* disable manual override */
+ break;
+ default:
+ mutex_unlock(&data->update_lock);
+ return -EINVAL;
+ }
+
+ lm93_write_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL2),ctl2);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+ show_pwm_enable, store_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
+ show_pwm_enable, store_pwm_enable, 1);
+
+static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ u8 ctl4;
+
+ ctl4 = data->block9[nr][LM93_PWM_CTL4];
+ return sprintf(buf,"%d\n",LM93_PWM_FREQ_FROM_REG(ctl4));
+}
+
+/* helper function - must grab data->update_lock before calling
+ pwm is 0-1, indicating pwm1-pwm2
+ this disables smart tach for all tach channels bound to the given pwm */
+static void lm93_disable_fan_smart_tach(struct i2c_client *client,
+ struct lm93_data *data, int pwm)
+{
+ int mapping = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM);
+ int mask;
+
+ /* collapse the mapping into a mask of enable bits */
+ mapping = (mapping >> pwm) & 0x55;
+ mask = mapping & 0x01;
+ mask |= (mapping & 0x04) >> 1;
+ mask |= (mapping & 0x10) >> 2;
+ mask |= (mapping & 0x40) >> 3;
+
+ /* disable smart tach according to the mask */
+ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2);
+ data->sfc2 &= ~mask;
+ lm93_write_byte(client, LM93_REG_SFC2, data->sfc2);
+}
+
+static ssize_t store_pwm_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ctl4;
+
+ mutex_lock(&data->update_lock);
+ ctl4 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4));
+ ctl4 = (ctl4 & 0xf8) | LM93_PWM_FREQ_TO_REG(val);
+ data->block9[nr][LM93_PWM_CTL4] = ctl4;
+ /* ctl4 == 0 -> 22.5KHz -> disable smart tach */
+ if (!ctl4)
+ lm93_disable_fan_smart_tach(client, data, nr);
+ lm93_write_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL4), ctl4);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO,
+ show_pwm_freq, store_pwm_freq, 0);
+static SENSOR_DEVICE_ATTR(pwm2_freq, S_IWUSR | S_IRUGO,
+ show_pwm_freq, store_pwm_freq, 1);
+
+static ssize_t show_pwm_auto_channels(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",data->block9[nr][LM93_PWM_CTL1]);
+}
+
+static ssize_t store_pwm_auto_channels(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255);
+ lm93_write_byte(client, LM93_REG_PWM_CTL(nr,LM93_PWM_CTL1),
+ data->block9[nr][LM93_PWM_CTL1]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels, S_IWUSR | S_IRUGO,
+ show_pwm_auto_channels, store_pwm_auto_channels, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels, S_IWUSR | S_IRUGO,
+ show_pwm_auto_channels, store_pwm_auto_channels, 1);
+
+static ssize_t show_pwm_auto_spinup_min(struct device *dev,
+ struct device_attribute *attr,char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ u8 ctl3, ctl4;
+
+ ctl3 = data->block9[nr][LM93_PWM_CTL3];
+ ctl4 = data->block9[nr][LM93_PWM_CTL4];
+ return sprintf(buf,"%d\n",
+ LM93_PWM_FROM_REG(ctl3 & 0x0f, (ctl4 & 0x07) ?
+ LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ));
+}
+
+static ssize_t store_pwm_auto_spinup_min(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ctl3, ctl4;
+
+ mutex_lock(&data->update_lock);
+ ctl3 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3));
+ ctl4 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4));
+ ctl3 = (ctl3 & 0xf0) | LM93_PWM_TO_REG(val, (ctl4 & 0x07) ?
+ LM93_PWM_MAP_LO_FREQ :
+ LM93_PWM_MAP_HI_FREQ);
+ data->block9[nr][LM93_PWM_CTL3] = ctl3;
+ lm93_write_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_min, S_IWUSR | S_IRUGO,
+ show_pwm_auto_spinup_min,
+ store_pwm_auto_spinup_min, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_min, S_IWUSR | S_IRUGO,
+ show_pwm_auto_spinup_min,
+ store_pwm_auto_spinup_min, 1);
+
+static ssize_t show_pwm_auto_spinup_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_SPINUP_TIME_FROM_REG(
+ data->block9[nr][LM93_PWM_CTL3]));
+}
+
+static ssize_t store_pwm_auto_spinup_time(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ctl3;
+
+ mutex_lock(&data->update_lock);
+ ctl3 = lm93_read_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3));
+ ctl3 = (ctl3 & 0x1f) | (LM93_SPINUP_TIME_TO_REG(val) << 5 & 0xe0);
+ data->block9[nr][LM93_PWM_CTL3] = ctl3;
+ lm93_write_byte(client,LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_time, S_IWUSR | S_IRUGO,
+ show_pwm_auto_spinup_time,
+ store_pwm_auto_spinup_time, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO,
+ show_pwm_auto_spinup_time,
+ store_pwm_auto_spinup_time, 1);
+
+static ssize_t show_pwm_auto_prochot_ramp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",
+ LM93_RAMP_FROM_REG(data->pwm_ramp_ctl >> 4 & 0x0f));
+}
+
+static ssize_t store_pwm_auto_prochot_ramp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ramp;
+
+ mutex_lock(&data->update_lock);
+ ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL);
+ ramp = (ramp & 0x0f) | (LM93_RAMP_TO_REG(val) << 4 & 0xf0);
+ lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static DEVICE_ATTR(pwm_auto_prochot_ramp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_prochot_ramp,
+ store_pwm_auto_prochot_ramp);
+
+static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",
+ LM93_RAMP_FROM_REG(data->pwm_ramp_ctl & 0x0f));
+}
+
+static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 ramp;
+
+ mutex_lock(&data->update_lock);
+ ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL);
+ ramp = (ramp & 0xf0) | (LM93_RAMP_TO_REG(val) & 0x0f);
+ lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp);
+ mutex_unlock(&data->update_lock);
+ return 0;
+}
+
+static DEVICE_ATTR(pwm_auto_vrdhot_ramp, S_IRUGO | S_IWUSR,
+ show_pwm_auto_vrdhot_ramp,
+ store_pwm_auto_vrdhot_ramp);
+
+static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_VID_FROM_REG(data->vid[nr]));
+}
+
+static SENSOR_DEVICE_ATTR(vid1, S_IRUGO, show_vid, NULL, 0);
+static SENSOR_DEVICE_ATTR(vid2, S_IRUGO, show_vid, NULL, 1);
+
+static ssize_t show_prochot(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",data->block4[nr].cur);
+}
+
+static SENSOR_DEVICE_ATTR(prochot1, S_IRUGO, show_prochot, NULL, 0);
+static SENSOR_DEVICE_ATTR(prochot2, S_IRUGO, show_prochot, NULL, 1);
+
+static ssize_t show_prochot_avg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",data->block4[nr].avg);
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_avg, S_IRUGO, show_prochot_avg, NULL, 0);
+static SENSOR_DEVICE_ATTR(prochot2_avg, S_IRUGO, show_prochot_avg, NULL, 1);
+
+static ssize_t show_prochot_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",data->prochot_max[nr]);
+}
+
+static ssize_t store_prochot_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->prochot_max[nr] = LM93_PROCHOT_TO_REG(val);
+ lm93_write_byte(client, LM93_REG_PROCHOT_MAX(nr),
+ data->prochot_max[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_max, S_IWUSR | S_IRUGO,
+ show_prochot_max, store_prochot_max, 0);
+static SENSOR_DEVICE_ATTR(prochot2_max, S_IWUSR | S_IRUGO,
+ show_prochot_max, store_prochot_max, 1);
+
+static const u8 prochot_override_mask[] = { 0x80, 0x40 };
+
+static ssize_t show_prochot_override(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",
+ (data->prochot_override & prochot_override_mask[nr]) ? 1 : 0);
+}
+
+static ssize_t store_prochot_override(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ if (val)
+ data->prochot_override |= prochot_override_mask[nr];
+ else
+ data->prochot_override &= (~prochot_override_mask[nr]);
+ lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
+ data->prochot_override);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_override, S_IWUSR | S_IRUGO,
+ show_prochot_override, store_prochot_override, 0);
+static SENSOR_DEVICE_ATTR(prochot2_override, S_IWUSR | S_IRUGO,
+ show_prochot_override, store_prochot_override, 1);
+
+static ssize_t show_prochot_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ u8 tmp;
+ if (nr==1)
+ tmp = (data->prochot_interval & 0xf0) >> 4;
+ else
+ tmp = data->prochot_interval & 0x0f;
+ return sprintf(buf,"%d\n",LM93_INTERVAL_FROM_REG(tmp));
+}
+
+static ssize_t store_prochot_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+ u8 tmp;
+
+ mutex_lock(&data->update_lock);
+ tmp = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL);
+ if (nr==1)
+ tmp = (tmp & 0x0f) | (LM93_INTERVAL_TO_REG(val) << 4);
+ else
+ tmp = (tmp & 0xf0) | LM93_INTERVAL_TO_REG(val);
+ data->prochot_interval = tmp;
+ lm93_write_byte(client, LM93_REG_PROCHOT_INTERVAL, tmp);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO,
+ show_prochot_interval, store_prochot_interval, 0);
+static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO,
+ show_prochot_interval, store_prochot_interval, 1);
+
+static ssize_t show_prochot_override_duty_cycle(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",data->prochot_override & 0x0f);
+}
+
+static ssize_t store_prochot_override_duty_cycle(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ data->prochot_override = (data->prochot_override & 0xf0) |
+ SENSORS_LIMIT(val, 0, 15);
+ lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
+ data->prochot_override);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static DEVICE_ATTR(prochot_override_duty_cycle, S_IRUGO | S_IWUSR,
+ show_prochot_override_duty_cycle,
+ store_prochot_override_duty_cycle);
+
+static ssize_t show_prochot_short(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",(data->config & 0x10) ? 1 : 0);
+}
+
+static ssize_t store_prochot_short(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm93_data *data = i2c_get_clientdata(client);
+ u32 val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+ if (val)
+ data->config |= 0x10;
+ else
+ data->config &= ~0x10;
+ lm93_write_byte(client, LM93_REG_CONFIG, data->config);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static DEVICE_ATTR(prochot_short, S_IRUGO | S_IWUSR,
+ show_prochot_short, store_prochot_short);
+
+static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int nr = (to_sensor_dev_attr(attr))->index;
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",
+ data->block1.host_status_1 & (1 << (nr+4)) ? 1 : 0);
+}
+
+static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0);
+static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1);
+
+static ssize_t show_gpio(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_GPI_FROM_REG(data->gpi));
+}
+
+static DEVICE_ATTR(gpio, S_IRUGO, show_gpio, NULL);
+
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm93_data *data = lm93_update_device(dev);
+ return sprintf(buf,"%d\n",LM93_ALARMS_FROM_REG(data->block1));
+}
+
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static struct attribute *lm93_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_in10_input.dev_attr.attr,
+ &sensor_dev_attr_in11_input.dev_attr.attr,
+ &sensor_dev_attr_in12_input.dev_attr.attr,
+ &sensor_dev_attr_in13_input.dev_attr.attr,
+ &sensor_dev_attr_in14_input.dev_attr.attr,
+ &sensor_dev_attr_in15_input.dev_attr.attr,
+ &sensor_dev_attr_in16_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in5_min.dev_attr.attr,
+ &sensor_dev_attr_in6_min.dev_attr.attr,
+ &sensor_dev_attr_in7_min.dev_attr.attr,
+ &sensor_dev_attr_in8_min.dev_attr.attr,
+ &sensor_dev_attr_in9_min.dev_attr.attr,
+ &sensor_dev_attr_in10_min.dev_attr.attr,
+ &sensor_dev_attr_in11_min.dev_attr.attr,
+ &sensor_dev_attr_in12_min.dev_attr.attr,
+ &sensor_dev_attr_in13_min.dev_attr.attr,
+ &sensor_dev_attr_in14_min.dev_attr.attr,
+ &sensor_dev_attr_in15_min.dev_attr.attr,
+ &sensor_dev_attr_in16_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in5_max.dev_attr.attr,
+ &sensor_dev_attr_in6_max.dev_attr.attr,
+ &sensor_dev_attr_in7_max.dev_attr.attr,
+ &sensor_dev_attr_in8_max.dev_attr.attr,
+ &sensor_dev_attr_in9_max.dev_attr.attr,
+ &sensor_dev_attr_in10_max.dev_attr.attr,
+ &sensor_dev_attr_in11_max.dev_attr.attr,
+ &sensor_dev_attr_in12_max.dev_attr.attr,
+ &sensor_dev_attr_in13_max.dev_attr.attr,
+ &sensor_dev_attr_in14_max.dev_attr.attr,
+ &sensor_dev_attr_in15_max.dev_attr.attr,
+ &sensor_dev_attr_in16_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_base.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_base.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_base.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_boost.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_boost.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_boost.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_boost_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_boost_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_boost_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset1.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset2.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset3.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset4.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset5.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset6.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset7.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset8.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset9.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset10.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset11.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset12.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset1.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset2.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset3.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset4.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset5.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset6.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset7.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset8.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset9.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset10.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset11.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset12.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset1.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset2.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset3.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset4.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset5.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset6.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset7.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset8.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset9.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset10.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset11.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset12.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_pwm_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_pwm_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_pwm_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_offset_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_offset_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_offset_hyst.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan3_min.dev_attr.attr,
+ &sensor_dev_attr_fan4_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_smart_tach.dev_attr.attr,
+ &sensor_dev_attr_fan2_smart_tach.dev_attr.attr,
+ &sensor_dev_attr_fan3_smart_tach.dev_attr.attr,
+ &sensor_dev_attr_fan4_smart_tach.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm2_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_channels.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_channels.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_spinup_min.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_spinup_min.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_spinup_time.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_spinup_time.dev_attr.attr,
+ &dev_attr_pwm_auto_prochot_ramp.attr,
+ &dev_attr_pwm_auto_vrdhot_ramp.attr,
+ &sensor_dev_attr_vid1.dev_attr.attr,
+ &sensor_dev_attr_vid2.dev_attr.attr,
+ &sensor_dev_attr_prochot1.dev_attr.attr,
+ &sensor_dev_attr_prochot2.dev_attr.attr,
+ &sensor_dev_attr_prochot1_avg.dev_attr.attr,
+ &sensor_dev_attr_prochot2_avg.dev_attr.attr,
+ &sensor_dev_attr_prochot1_max.dev_attr.attr,
+ &sensor_dev_attr_prochot2_max.dev_attr.attr,
+ &sensor_dev_attr_prochot1_override.dev_attr.attr,
+ &sensor_dev_attr_prochot2_override.dev_attr.attr,
+ &sensor_dev_attr_prochot1_interval.dev_attr.attr,
+ &sensor_dev_attr_prochot2_interval.dev_attr.attr,
+ &dev_attr_prochot_override_duty_cycle.attr,
+ &dev_attr_prochot_short.attr,
+ &sensor_dev_attr_vrdhot1.dev_attr.attr,
+ &sensor_dev_attr_vrdhot2.dev_attr.attr,
+ &dev_attr_gpio.attr,
+ &dev_attr_alarms.attr,
+ NULL
+};
+
+static struct attribute_group lm93_attr_grp = {
+ .attrs = lm93_attrs,
+};
+
+static void lm93_init_client(struct i2c_client *client)
+{
+ int i;
+ u8 reg;
+
+ /* configure VID pin input thresholds */
+ reg = lm93_read_byte(client, LM93_REG_GPI_VID_CTL);
+ lm93_write_byte(client, LM93_REG_GPI_VID_CTL,
+ reg | (vid_agtl ? 0x03 : 0x00));
+
+ if (init) {
+ /* enable #ALERT pin */
+ reg = lm93_read_byte(client, LM93_REG_CONFIG);
+ lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x08);
+
+ /* enable ASF mode for BMC status registers */
+ reg = lm93_read_byte(client, LM93_REG_STATUS_CONTROL);
+ lm93_write_byte(client, LM93_REG_STATUS_CONTROL, reg | 0x02);
+
+ /* set sleep state to S0 */
+ lm93_write_byte(client, LM93_REG_SLEEP_CONTROL, 0);
+
+ /* unmask #VRDHOT and dynamic VCCP (if nec) error events */
+ reg = lm93_read_byte(client, LM93_REG_MISC_ERR_MASK);
+ reg &= ~0x03;
+ reg &= ~(vccp_limit_type[0] ? 0x10 : 0);
+ reg &= ~(vccp_limit_type[1] ? 0x20 : 0);
+ lm93_write_byte(client, LM93_REG_MISC_ERR_MASK, reg);
+ }
+
+ /* start monitoring */
+ reg = lm93_read_byte(client, LM93_REG_CONFIG);
+ lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x01);
+
+ /* spin until ready */
+ for (i=0; i<20; i++) {
+ msleep(10);
+ if ((lm93_read_byte(client, LM93_REG_CONFIG) & 0x80) == 0x80)
+ return;
+ }
+
+ dev_warn(&client->dev,"timed out waiting for sensor "
+ "chip to signal ready!\n");
+}
+
+static int lm93_detect(struct i2c_adapter *adapter, int address, int kind)
+{
+ struct lm93_data *data;
+ struct i2c_client *client;
+
+ int err = -ENODEV, func;
+ void (*update)(struct lm93_data *, struct i2c_client *);
+
+ /* choose update routine based on bus capabilities */
+ func = i2c_get_functionality(adapter);
+ if ( ((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) &&
+ (!disable_block) ) {
+ dev_dbg(&adapter->dev,"using SMBus block data transactions\n");
+ update = lm93_update_client_full;
+ } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) {
+ dev_dbg(&adapter->dev,"disabled SMBus block data "
+ "transactions\n");
+ update = lm93_update_client_min;
+ } else {
+ dev_dbg(&adapter->dev,"detect failed, "
+ "smbus byte and/or word data not supported!\n");
+ goto err_out;
+ }
+
+ /* OK. For now, we presume we have a valid client. We now create the
+ client structure, even though we cannot fill it completely yet.
+ But it allows us to access lm78_{read,write}_value. */
+
+ if ( !(data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL))) {
+ dev_dbg(&adapter->dev,"out of memory!\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ client = &data->client;
+ i2c_set_clientdata(client, data);
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &lm93_driver;
+
+ /* detection */
+ if (kind < 0) {
+ int mfr = lm93_read_byte(client, LM93_REG_MFR_ID);
+
+ if (mfr != 0x01) {
+ dev_dbg(&adapter->dev,"detect failed, "
+ "bad manufacturer id 0x%02x!\n", mfr);
+ goto err_free;
+ }
+ }
+
+ if (kind <= 0) {
+ int ver = lm93_read_byte(client, LM93_REG_VER);
+
+ if ((ver == LM93_MFR_ID) || (ver == LM93_MFR_ID_PROTOTYPE)) {
+ kind = lm93;
+ } else {
+ dev_dbg(&adapter->dev,"detect failed, "
+ "bad version id 0x%02x!\n", ver);
+ if (kind == 0)
+ dev_dbg(&adapter->dev,
+ "(ignored 'force' parameter)\n");
+ goto err_free;
+ }
+ }
+
+ /* fill in remaining client fields */
+ strlcpy(client->name, "lm93", I2C_NAME_SIZE);
+ dev_dbg(&adapter->dev,"loading %s at %d,0x%02x\n",
+ client->name, i2c_adapter_id(client->adapter),
+ client->addr);
+
+ /* housekeeping */
+ data->valid = 0;
+ data->update = update;
+ mutex_init(&data->update_lock);
+
+ /* tell the I2C layer a new client has arrived */
+ if ((err = i2c_attach_client(client)))
+ goto err_free;
+
+ /* initialize the chip */
+ lm93_init_client(client);
+
+ err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp);
+ if (err)
+ goto err_detach;
+
+ /* Register hwmon driver class */
+ data->class_dev = hwmon_device_register(&client->dev);
+ if ( !IS_ERR(data->class_dev))
+ return 0;
+
+ err = PTR_ERR(data->class_dev);
+ dev_err(&client->dev, "error registering hwmon device.\n");
+ sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
+err_detach:
+ i2c_detach_client(client);
+err_free:
+ kfree(data);
+err_out:
+ return err;
+}
+
+/* This function is called when:
+ * lm93_driver is inserted (when this module is loaded), for each
+ available adapter
+ * when a new adapter is inserted (and lm93_driver is still present) */
+static int lm93_attach_adapter(struct i2c_adapter *adapter)
+{
+ return i2c_probe(adapter, &addr_data, lm93_detect);
+}
+
+static int lm93_detach_client(struct i2c_client *client)
+{
+ struct lm93_data *data = i2c_get_clientdata(client);
+ int err = 0;
+
+ hwmon_device_unregister(data->class_dev);
+ sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
+
+ err = i2c_detach_client(client);
+ if (!err)
+ kfree(data);
+ return err;
+}
+
+static struct i2c_driver lm93_driver = {
+ .driver = {
+ .name = "lm93",
+ },
+ .attach_adapter = lm93_attach_adapter,
+ .detach_client = lm93_detach_client,
+};
+
+static int __init lm93_init(void)
+{
+ return i2c_add_driver(&lm93_driver);
+}
+
+static void __exit lm93_exit(void)
+{
+ i2c_del_driver(&lm93_driver);
+}
+
+MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, "
+ "Hans J. Koch <hjk@linutronix.de");
+MODULE_DESCRIPTION("LM93 driver");
+MODULE_LICENSE("GPL");
+
+module_init(lm93_init);
+module_exit(lm93_exit);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index c8a21be09d87..cb72526c346a 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1,7 +1,7 @@
/*
* pc87360.c - Part of lm_sensors, Linux kernel modules
* for hardware monitoring
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2007 Jean Delvare <khali@linux-fr.org>
*
* Copied from smsc47m1.c:
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
@@ -37,8 +37,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
@@ -47,12 +46,10 @@
#include <asm/io.h>
static u8 devid;
-static unsigned short address;
+static struct platform_device *pdev;
static unsigned short extra_isa[3];
static u8 confreg[4];
-enum chips { any_chip, pc87360, pc87363, pc87364, pc87365, pc87366 };
-
static int init = 1;
module_param(init, int, 0);
MODULE_PARM_DESC(init,
@@ -178,11 +175,11 @@ static inline u8 PWM_TO_REG(int val, int inv)
((val) + 500) / 1000)
/*
- * Client data (each client gets its own)
+ * Device data
*/
struct pc87360_data {
- struct i2c_client client;
+ const char *name;
struct class_device *class_dev;
struct mutex lock;
struct mutex update_lock;
@@ -222,27 +219,28 @@ struct pc87360_data {
* Functions declaration
*/
-static int pc87360_detect(struct i2c_adapter *adapter);
-static int pc87360_detach_client(struct i2c_client *client);
+static int pc87360_probe(struct platform_device *pdev);
+static int pc87360_remove(struct platform_device *pdev);
static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
u8 reg);
static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
u8 reg, u8 value);
-static void pc87360_init_client(struct i2c_client *client, int use_thermistors);
+static void pc87360_init_device(struct platform_device *pdev,
+ int use_thermistors);
static struct pc87360_data *pc87360_update_device(struct device *dev);
/*
- * Driver data (common to all clients)
+ * Driver data
*/
-static struct i2c_driver pc87360_driver = {
+static struct platform_driver pc87360_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "pc87360",
},
- .attach_adapter = pc87360_detect,
- .detach_client = pc87360_detach_client,
+ .probe = pc87360_probe,
+ .remove = __devexit_p(pc87360_remove),
};
/*
@@ -281,8 +279,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long fan_min = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -347,8 +344,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, con
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -410,8 +406,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -425,8 +420,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -511,8 +505,7 @@ static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char
}
static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
data->vrm = simple_strtoul(buf, NULL, 10);
return count;
}
@@ -584,8 +577,7 @@ static ssize_t set_therm_min(struct device *dev, struct device_attribute *devatt
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -599,8 +591,7 @@ static ssize_t set_therm_max(struct device *dev, struct device_attribute *devatt
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -614,8 +605,7 @@ static ssize_t set_therm_crit(struct device *dev, struct device_attribute *devat
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -715,8 +705,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *devattr
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -730,8 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -745,8 +733,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *devatt
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
@@ -818,6 +805,14 @@ static const struct attribute_group pc8736x_temp_group = {
.attrs = pc8736x_temp_attr_array,
};
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
/*
* Device detection, registration and update
*/
@@ -912,28 +907,18 @@ static int __init pc87360_find(int sioaddr, u8 *devid, unsigned short *addresses
return 0;
}
-static int pc87360_detect(struct i2c_adapter *adapter)
+static int __devinit pc87360_probe(struct platform_device *pdev)
{
int i;
- struct i2c_client *client;
struct pc87360_data *data;
int err = 0;
const char *name = "pc87360";
int use_thermistors = 0;
- struct device *dev;
+ struct device *dev = &pdev->dev;
if (!(data = kzalloc(sizeof(struct pc87360_data), GFP_KERNEL)))
return -ENOMEM;
- client = &data->client;
- dev = &client->dev;
- i2c_set_clientdata(client, data);
- client->addr = address;
- mutex_init(&data->lock);
- client->adapter = adapter;
- client->driver = &pc87360_driver;
- client->flags = 0;
-
data->fannr = 2;
data->innr = 0;
data->tempnr = 0;
@@ -960,15 +945,17 @@ static int pc87360_detect(struct i2c_adapter *adapter)
break;
}
- strlcpy(client->name, name, sizeof(client->name));
+ data->name = name;
data->valid = 0;
+ mutex_init(&data->lock);
mutex_init(&data->update_lock);
+ platform_set_drvdata(pdev, data);
for (i = 0; i < 3; i++) {
if (((data->address[i] = extra_isa[i]))
&& !request_region(extra_isa[i], PC87360_EXTENT,
pc87360_driver.driver.name)) {
- dev_err(&client->dev, "Region 0x%x-0x%x already "
+ dev_err(dev, "Region 0x%x-0x%x already "
"in use!\n", extra_isa[i],
extra_isa[i]+PC87360_EXTENT-1);
for (i--; i >= 0; i--)
@@ -982,9 +969,6 @@ static int pc87360_detect(struct i2c_adapter *adapter)
if (data->fannr)
data->fan_conf = confreg[0] | (confreg[1] << 8);
- if ((err = i2c_attach_client(client)))
- goto ERROR2;
-
/* Use the correct reference voltage
Unless both the VLM and the TMS logical devices agree to
use an external Vref, the internal one is used. */
@@ -996,7 +980,7 @@ static int pc87360_detect(struct i2c_adapter *adapter)
PC87365_REG_TEMP_CONFIG);
}
data->in_vref = (i&0x02) ? 3025 : 2966;
- dev_dbg(&client->dev, "Using %s reference voltage\n",
+ dev_dbg(dev, "Using %s reference voltage\n",
(i&0x02) ? "external" : "internal");
data->vid_conf = confreg[3];
@@ -1015,18 +999,18 @@ static int pc87360_detect(struct i2c_adapter *adapter)
if (devid == 0xe9 && data->address[1]) /* PC87366 */
use_thermistors = confreg[2] & 0x40;
- pc87360_init_client(client, use_thermistors);
+ pc87360_init_device(pdev, use_thermistors);
}
/* Register all-or-nothing sysfs groups */
if (data->innr &&
- (err = sysfs_create_group(&client->dev.kobj,
+ (err = sysfs_create_group(&dev->kobj,
&pc8736x_vin_group)))
goto ERROR3;
if (data->innr == 14 &&
- (err = sysfs_create_group(&client->dev.kobj,
+ (err = sysfs_create_group(&dev->kobj,
&pc8736x_therm_group)))
goto ERROR3;
@@ -1067,7 +1051,10 @@ static int pc87360_detect(struct i2c_adapter *adapter)
goto ERROR3;
}
- data->class_dev = hwmon_device_register(&client->dev);
+ if ((err = device_create_file(dev, &dev_attr_name)))
+ goto ERROR3;
+
+ data->class_dev = hwmon_device_register(dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto ERROR3;
@@ -1075,14 +1062,12 @@ static int pc87360_detect(struct i2c_adapter *adapter)
return 0;
ERROR3:
+ device_remove_file(dev, &dev_attr_name);
/* can still remove groups whose members were added individually */
- sysfs_remove_group(&client->dev.kobj, &pc8736x_temp_group);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_fan_group);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_therm_group);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_vin_group);
-
- i2c_detach_client(client);
-ERROR2:
+ sysfs_remove_group(&dev->kobj, &pc8736x_temp_group);
+ sysfs_remove_group(&dev->kobj, &pc8736x_fan_group);
+ sysfs_remove_group(&dev->kobj, &pc8736x_therm_group);
+ sysfs_remove_group(&dev->kobj, &pc8736x_vin_group);
for (i = 0; i < 3; i++) {
if (data->address[i]) {
release_region(data->address[i], PC87360_EXTENT);
@@ -1093,20 +1078,18 @@ ERROR1:
return err;
}
-static int pc87360_detach_client(struct i2c_client *client)
+static int __devexit pc87360_remove(struct platform_device *pdev)
{
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = platform_get_drvdata(pdev);
int i;
hwmon_device_unregister(data->class_dev);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_temp_group);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_fan_group);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_therm_group);
- sysfs_remove_group(&client->dev.kobj, &pc8736x_vin_group);
-
- if ((i = i2c_detach_client(client)))
- return i;
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ sysfs_remove_group(&pdev->dev.kobj, &pc8736x_temp_group);
+ sysfs_remove_group(&pdev->dev.kobj, &pc8736x_fan_group);
+ sysfs_remove_group(&pdev->dev.kobj, &pc8736x_therm_group);
+ sysfs_remove_group(&pdev->dev.kobj, &pc8736x_vin_group);
for (i = 0; i < 3; i++) {
if (data->address[i]) {
@@ -1144,9 +1127,10 @@ static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
mutex_unlock(&(data->lock));
}
-static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
+static void pc87360_init_device(struct platform_device *pdev,
+ int use_thermistors)
{
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = platform_get_drvdata(pdev);
int i, nr;
const u8 init_in[14] = { 2, 2, 2, 2, 2, 2, 2, 1, 1, 3, 1, 2, 2, 2 };
const u8 init_temp[3] = { 2, 2, 1 };
@@ -1155,7 +1139,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
if (init >= 2 && data->innr) {
reg = pc87360_read_value(data, LD_IN, NO_BANK,
PC87365_REG_IN_CONVRATE);
- dev_info(&client->dev, "VLM conversion set to "
+ dev_info(&pdev->dev, "VLM conversion set to "
"1s period, 160us delay\n");
pc87360_write_value(data, LD_IN, NO_BANK,
PC87365_REG_IN_CONVRATE,
@@ -1169,7 +1153,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
reg = pc87360_read_value(data, LD_IN, i,
PC87365_REG_IN_STATUS);
if (!(reg & 0x01)) {
- dev_dbg(&client->dev, "Forcibly "
+ dev_dbg(&pdev->dev, "Forcibly "
"enabling in%d\n", i);
pc87360_write_value(data, LD_IN, i,
PC87365_REG_IN_STATUS,
@@ -1193,7 +1177,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
reg = pc87360_read_value(data, LD_TEMP, i,
PC87365_REG_TEMP_STATUS);
if (!(reg & 0x01)) {
- dev_dbg(&client->dev, "Forcibly "
+ dev_dbg(&pdev->dev, "Forcibly "
"enabling temp%d\n", i+1);
pc87360_write_value(data, LD_TEMP, i,
PC87365_REG_TEMP_STATUS,
@@ -1210,7 +1194,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
reg = pc87360_read_value(data, LD_TEMP,
(i-11)/2, PC87365_REG_TEMP_STATUS);
if (reg & 0x01) {
- dev_dbg(&client->dev, "Skipping "
+ dev_dbg(&pdev->dev, "Skipping "
"temp%d, pin already in use "
"by temp%d\n", i-7, (i-11)/2);
continue;
@@ -1220,7 +1204,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
reg = pc87360_read_value(data, LD_IN, i,
PC87365_REG_IN_STATUS);
if (!(reg & 0x01)) {
- dev_dbg(&client->dev, "Forcibly "
+ dev_dbg(&pdev->dev, "Forcibly "
"enabling temp%d\n", i-7);
pc87360_write_value(data, LD_IN, i,
PC87365_REG_TEMP_STATUS,
@@ -1234,7 +1218,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
reg = pc87360_read_value(data, LD_IN, NO_BANK,
PC87365_REG_IN_CONFIG);
if (reg & 0x01) {
- dev_dbg(&client->dev, "Forcibly "
+ dev_dbg(&pdev->dev, "Forcibly "
"enabling monitoring (VLM)\n");
pc87360_write_value(data, LD_IN, NO_BANK,
PC87365_REG_IN_CONFIG,
@@ -1246,7 +1230,7 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
PC87365_REG_TEMP_CONFIG);
if (reg & 0x01) {
- dev_dbg(&client->dev, "Forcibly enabling "
+ dev_dbg(&pdev->dev, "Forcibly enabling "
"monitoring (TMS)\n");
pc87360_write_value(data, LD_TEMP, NO_BANK,
PC87365_REG_TEMP_CONFIG,
@@ -1268,9 +1252,9 @@ static void pc87360_init_client(struct i2c_client *client, int use_thermistors)
}
}
-static void pc87360_autodiv(struct i2c_client *client, int nr)
+static void pc87360_autodiv(struct device *dev, int nr)
{
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
u8 old_min = data->fan_min[nr];
/* Increase clock divider if needed and possible */
@@ -1280,7 +1264,7 @@ static void pc87360_autodiv(struct i2c_client *client, int nr)
data->fan_status[nr] += 0x20;
data->fan_min[nr] >>= 1;
data->fan[nr] >>= 1;
- dev_dbg(&client->dev, "Increasing "
+ dev_dbg(dev, "Increasing "
"clock divider to %d for fan %d\n",
FAN_DIV_FROM_REG(data->fan_status[nr]), nr+1);
}
@@ -1292,7 +1276,7 @@ static void pc87360_autodiv(struct i2c_client *client, int nr)
data->fan_status[nr] -= 0x20;
data->fan_min[nr] <<= 1;
data->fan[nr] <<= 1;
- dev_dbg(&client->dev, "Decreasing "
+ dev_dbg(dev, "Decreasing "
"clock divider to %d for fan %d\n",
FAN_DIV_FROM_REG(data->fan_status[nr]),
nr+1);
@@ -1309,14 +1293,13 @@ static void pc87360_autodiv(struct i2c_client *client, int nr)
static struct pc87360_data *pc87360_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct pc87360_data *data = i2c_get_clientdata(client);
+ struct pc87360_data *data = dev_get_drvdata(dev);
u8 i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
- dev_dbg(&client->dev, "Data update\n");
+ dev_dbg(dev, "Data update\n");
/* Fans */
for (i = 0; i < data->fannr; i++) {
@@ -1330,7 +1313,7 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
LD_FAN, NO_BANK,
PC87360_REG_FAN_MIN(i));
/* Change clock divider if needed */
- pc87360_autodiv(client, i);
+ pc87360_autodiv(dev, i);
/* Clear bits and write new divider */
pc87360_write_value(data, LD_FAN, NO_BANK,
PC87360_REG_FAN_STATUS(i),
@@ -1418,9 +1401,53 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
return data;
}
+static int __init pc87360_device_add(unsigned short address)
+{
+ struct resource res = {
+ .name = "pc87360",
+ .flags = IORESOURCE_IO,
+ };
+ int err, i;
+
+ pdev = platform_device_alloc("pc87360", address);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR "pc87360: Device allocation failed\n");
+ goto exit;
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (!extra_isa[i])
+ continue;
+ res.start = extra_isa[i];
+ res.end = extra_isa[i] + PC87360_EXTENT - 1;
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR "pc87360: Device resource[%d] "
+ "addition failed (%d)\n", i, err);
+ goto exit_device_put;
+ }
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR "pc87360: Device addition failed (%d)\n",
+ err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
static int __init pc87360_init(void)
{
- int i;
+ int err, i;
+ unsigned short address = 0;
if (pc87360_find(0x2e, &devid, extra_isa)
&& pc87360_find(0x4e, &devid, extra_isa)) {
@@ -1443,12 +1470,27 @@ static int __init pc87360_init(void)
return -ENODEV;
}
- return i2c_isa_add_driver(&pc87360_driver);
+ err = platform_driver_register(&pc87360_driver);
+ if (err)
+ goto exit;
+
+ /* Sets global pdev as a side effect */
+ err = pc87360_device_add(address);
+ if (err)
+ goto exit_driver;
+
+ return 0;
+
+ exit_driver:
+ platform_driver_unregister(&pc87360_driver);
+ exit:
+ return err;
}
static void __exit pc87360_exit(void)
{
- i2c_isa_del_driver(&pc87360_driver);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&pc87360_driver);
}
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 29354fa26f81..2915bc4ad0d5 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -484,7 +484,6 @@ static int __devexit pc87427_remove(struct platform_device *pdev)
struct resource *res;
int i;
- platform_set_drvdata(pdev, NULL);
hwmon_device_unregister(data->class_dev);
device_remove_file(&pdev->dev, &dev_attr_name);
for (i = 0; i < 8; i++) {
@@ -492,6 +491,7 @@ static int __devexit pc87427_remove(struct platform_device *pdev)
continue;
sysfs_remove_group(&pdev->dev.kobj, &pc87427_group_fan[i]);
}
+ platform_set_drvdata(pdev, NULL);
kfree(data);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 3f400263fc0f..83321b28cf0e 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -54,9 +54,9 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/pci.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/jiffies.h>
@@ -72,17 +72,13 @@ module_param(force_addr, ushort, 0);
MODULE_PARM_DESC(force_addr,
"Initialize the base address of the sensors");
-/* Device address
- Note that we can't determine the ISA address until we have initialized
- our module */
-static unsigned short address;
+static struct platform_device *pdev;
/* Many SIS5595 constants specified below */
/* Length of ISA address segment */
#define SIS5595_EXTENT 8
/* PCI Config Registers */
-#define SIS5595_REVISION_REG 0x08
#define SIS5595_BASE_REG 0x68
#define SIS5595_PIN_REG 0x7A
#define SIS5595_ENABLE_REG 0x7B
@@ -165,7 +161,8 @@ static inline u8 DIV_TO_REG(int val)
/* For each registered chip, we need to keep some data in memory.
The structure is dynamically allocated. */
struct sis5595_data {
- struct i2c_client client;
+ unsigned short addr;
+ const char *name;
struct class_device *class_dev;
struct mutex lock;
@@ -189,102 +186,88 @@ struct sis5595_data {
static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */
-static int sis5595_detect(struct i2c_adapter *adapter);
-static int sis5595_detach_client(struct i2c_client *client);
+static int sis5595_probe(struct platform_device *pdev);
+static int sis5595_remove(struct platform_device *pdev);
-static int sis5595_read_value(struct i2c_client *client, u8 reg);
-static int sis5595_write_value(struct i2c_client *client, u8 reg, u8 value);
+static int sis5595_read_value(struct sis5595_data *data, u8 reg);
+static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value);
static struct sis5595_data *sis5595_update_device(struct device *dev);
-static void sis5595_init_client(struct i2c_client *client);
+static void sis5595_init_device(struct sis5595_data *data);
-static struct i2c_driver sis5595_driver = {
+static struct platform_driver sis5595_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "sis5595",
},
- .attach_adapter = sis5595_detect,
- .detach_client = sis5595_detach_client,
+ .probe = sis5595_probe,
+ .remove = __devexit_p(sis5595_remove),
};
/* 4 Voltages */
-static ssize_t show_in(struct device *dev, char *buf, int nr)
+static ssize_t show_in(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, char *buf, int nr)
+static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr]));
}
-static ssize_t show_in_max(struct device *dev, char *buf, int nr)
+static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr]));
}
-static ssize_t set_in_min(struct device *dev, const char *buf,
- size_t count, int nr)
+static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_min[nr] = IN_TO_REG(val);
- sis5595_write_value(client, SIS5595_REG_IN_MIN(nr), data->in_min[nr]);
+ sis5595_write_value(data, SIS5595_REG_IN_MIN(nr), data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_in_max(struct device *dev, const char *buf,
- size_t count, int nr)
+static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_max[nr] = IN_TO_REG(val);
- sis5595_write_value(client, SIS5595_REG_IN_MAX(nr), data->in_max[nr]);
+ sis5595_write_value(data, SIS5595_REG_IN_MAX(nr), data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define show_in_offset(offset) \
-static ssize_t \
- show_in##offset (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_in(dev, buf, offset); \
-} \
-static DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in##offset, NULL); \
-static ssize_t \
- show_in##offset##_min (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_in_min(dev, buf, offset); \
-} \
-static ssize_t \
- show_in##offset##_max (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_in_max(dev, buf, offset); \
-} \
-static ssize_t set_in##offset##_min (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_in_min(dev, buf, count, offset); \
-} \
-static ssize_t set_in##offset##_max (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_in_max(dev, buf, count, offset); \
-} \
-static DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in##offset##_min, set_in##offset##_min); \
-static DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in##offset##_max, set_in##offset##_max);
+static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
+ show_in, NULL, offset); \
+static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
+ show_in_min, set_in_min, offset); \
+static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
+ show_in_max, set_in_max, offset);
show_in_offset(0);
show_in_offset(1);
@@ -307,13 +290,12 @@ static ssize_t show_temp_over(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_over(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_over = TEMP_TO_REG(val);
- sis5595_write_value(client, SIS5595_REG_TEMP_OVER, data->temp_over);
+ sis5595_write_value(data, SIS5595_REG_TEMP_OVER, data->temp_over);
mutex_unlock(&data->update_lock);
return count;
}
@@ -326,13 +308,12 @@ static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_hyst = TEMP_TO_REG(val);
- sis5595_write_value(client, SIS5595_REG_TEMP_HYST, data->temp_hyst);
+ sis5595_write_value(data, SIS5595_REG_TEMP_HYST, data->temp_hyst);
mutex_unlock(&data->update_lock);
return count;
}
@@ -344,37 +325,47 @@ static DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
show_temp_hyst, set_temp_hyst);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, char *buf, int nr)
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])) );
}
-static ssize_t show_fan_min(struct device *dev, char *buf, int nr)
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf,"%d\n", FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr])) );
}
-static ssize_t set_fan_min(struct device *dev, const char *buf,
- size_t count, int nr)
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- sis5595_write_value(client, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
+ sis5595_write_value(data, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
+static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+ char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]) );
}
@@ -382,11 +373,12 @@ static ssize_t show_fan_div(struct device *dev, char *buf, int nr)
determined in part by the fan divisor. This follows the principle of
least surprise; the user doesn't expect the fan minimum to change just
because the divisor changed. */
-static ssize_t set_fan_div(struct device *dev, const char *buf,
- size_t count, int nr)
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
unsigned long min;
unsigned long val = simple_strtoul(buf, NULL, 10);
int reg;
@@ -394,7 +386,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf,
mutex_lock(&data->update_lock);
min = FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr]));
- reg = sis5595_read_value(client, SIS5595_REG_FANDIV);
+ reg = sis5595_read_value(data, SIS5595_REG_FANDIV);
switch (val) {
case 1: data->fan_div[nr] = 0; break;
@@ -402,7 +394,7 @@ static ssize_t set_fan_div(struct device *dev, const char *buf,
case 4: data->fan_div[nr] = 2; break;
case 8: data->fan_div[nr] = 3; break;
default:
- dev_err(&client->dev, "fan_div value %ld not "
+ dev_err(dev, "fan_div value %ld not "
"supported. Choose one of 1, 2, 4 or 8!\n", val);
mutex_unlock(&data->update_lock);
return -EINVAL;
@@ -416,55 +408,25 @@ static ssize_t set_fan_div(struct device *dev, const char *buf,
reg = (reg & 0x3f) | (data->fan_div[nr] << 6);
break;
}
- sis5595_write_value(client, SIS5595_REG_FANDIV, reg);
+ sis5595_write_value(data, SIS5595_REG_FANDIV, reg);
data->fan_min[nr] =
FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
- sis5595_write_value(client, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
+ sis5595_write_value(data, SIS5595_REG_FAN_MIN(nr), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define show_fan_offset(offset) \
-static ssize_t show_fan_##offset (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_fan(dev, buf, offset - 1); \
-} \
-static ssize_t show_fan_##offset##_min (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_fan_min(dev, buf, offset - 1); \
-} \
-static ssize_t show_fan_##offset##_div (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_fan_div(dev, buf, offset - 1); \
-} \
-static ssize_t set_fan_##offset##_min (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_fan_min(dev, buf, count, offset - 1); \
-} \
-static DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan_##offset, NULL);\
-static DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_##offset##_min, set_fan_##offset##_min);
+static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
+ show_fan, NULL, offset - 1); \
+static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
+ show_fan_min, set_fan_min, offset - 1); \
+static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
+ show_fan_div, set_fan_div, offset - 1);
show_fan_offset(1);
show_fan_offset(2);
-static ssize_t set_fan_1_div(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count)
-{
- return set_fan_div(dev, buf, count, 0) ;
-}
-
-static ssize_t set_fan_2_div(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count)
-{
- return set_fan_div(dev, buf, count, 1) ;
-}
-static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
- show_fan_1_div, set_fan_1_div);
-static DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
- show_fan_2_div, set_fan_2_div);
-
/* Alarms */
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -473,28 +435,37 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, ch
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
static struct attribute *sis5595_attributes[] = {
- &dev_attr_in0_input.attr,
- &dev_attr_in0_min.attr,
- &dev_attr_in0_max.attr,
- &dev_attr_in1_input.attr,
- &dev_attr_in1_min.attr,
- &dev_attr_in1_max.attr,
- &dev_attr_in2_input.attr,
- &dev_attr_in2_min.attr,
- &dev_attr_in2_max.attr,
- &dev_attr_in3_input.attr,
- &dev_attr_in3_min.attr,
- &dev_attr_in3_max.attr,
-
- &dev_attr_fan1_input.attr,
- &dev_attr_fan1_min.attr,
- &dev_attr_fan1_div.attr,
- &dev_attr_fan2_input.attr,
- &dev_attr_fan2_min.attr,
- &dev_attr_fan2_div.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_div.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_div.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_name.attr,
NULL
};
@@ -503,9 +474,9 @@ static const struct attribute_group sis5595_group = {
};
static struct attribute *sis5595_attributes_opt[] = {
- &dev_attr_in4_input.attr,
- &dev_attr_in4_min.attr,
- &dev_attr_in4_max.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
&dev_attr_temp1_input.attr,
&dev_attr_temp1_max.attr,
@@ -518,68 +489,35 @@ static const struct attribute_group sis5595_group_opt = {
};
/* This is called when the module is loaded */
-static int sis5595_detect(struct i2c_adapter *adapter)
+static int __devinit sis5595_probe(struct platform_device *pdev)
{
int err = 0;
int i;
- struct i2c_client *new_client;
struct sis5595_data *data;
+ struct resource *res;
char val;
- u16 a;
- if (force_addr)
- address = force_addr & ~(SIS5595_EXTENT - 1);
/* Reserve the ISA region */
- if (!request_region(address, SIS5595_EXTENT,
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!request_region(res->start, SIS5595_EXTENT,
sis5595_driver.driver.name)) {
err = -EBUSY;
goto exit;
}
- if (force_addr) {
- dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n", address);
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(s_bridge, SIS5595_BASE_REG, address))
- goto exit_release;
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(s_bridge, SIS5595_BASE_REG, &a))
- goto exit_release;
- if ((a & ~(SIS5595_EXTENT - 1)) != address)
- /* doesn't work for some chips? */
- goto exit_release;
- }
-
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_byte(s_bridge, SIS5595_ENABLE_REG, &val)) {
- goto exit_release;
- }
- if ((val & 0x80) == 0) {
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_byte(s_bridge, SIS5595_ENABLE_REG,
- val | 0x80))
- goto exit_release;
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_byte(s_bridge, SIS5595_ENABLE_REG, &val))
- goto exit_release;
- if ((val & 0x80) == 0)
- /* doesn't work for some chips! */
- goto exit_release;
- }
if (!(data = kzalloc(sizeof(struct sis5595_data), GFP_KERNEL))) {
err = -ENOMEM;
goto exit_release;
}
- new_client = &data->client;
- new_client->addr = address;
mutex_init(&data->lock);
- i2c_set_clientdata(new_client, data);
- new_client->adapter = adapter;
- new_client->driver = &sis5595_driver;
- new_client->flags = 0;
+ mutex_init(&data->update_lock);
+ data->addr = res->start;
+ data->name = "sis5595";
+ platform_set_drvdata(pdev, data);
/* Check revision and pin registers to determine whether 4 or 5 voltages */
- pci_read_config_byte(s_bridge, SIS5595_REVISION_REG, &(data->revision));
+ pci_read_config_byte(s_bridge, PCI_REVISION_ID, &data->revision);
/* 4 voltages, 1 temp */
data->maxins = 3;
if (data->revision >= REV2MIN) {
@@ -589,47 +527,37 @@ static int sis5595_detect(struct i2c_adapter *adapter)
data->maxins = 4;
}
- /* Fill in the remaining client fields and put it into the global list */
- strlcpy(new_client->name, "sis5595", I2C_NAME_SIZE);
-
- data->valid = 0;
- mutex_init(&data->update_lock);
-
- /* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(new_client)))
- goto exit_free;
-
/* Initialize the SIS5595 chip */
- sis5595_init_client(new_client);
+ sis5595_init_device(data);
/* A few vars need to be filled upon startup */
for (i = 0; i < 2; i++) {
- data->fan_min[i] = sis5595_read_value(new_client,
+ data->fan_min[i] = sis5595_read_value(data,
SIS5595_REG_FAN_MIN(i));
}
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &sis5595_group)))
- goto exit_detach;
+ if ((err = sysfs_create_group(&pdev->dev.kobj, &sis5595_group)))
+ goto exit_free;
if (data->maxins == 4) {
- if ((err = device_create_file(&new_client->dev,
- &dev_attr_in4_input))
- || (err = device_create_file(&new_client->dev,
- &dev_attr_in4_min))
- || (err = device_create_file(&new_client->dev,
- &dev_attr_in4_max)))
+ if ((err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_in4_input.dev_attr))
+ || (err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_in4_min.dev_attr))
+ || (err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_in4_max.dev_attr)))
goto exit_remove_files;
} else {
- if ((err = device_create_file(&new_client->dev,
+ if ((err = device_create_file(&pdev->dev,
&dev_attr_temp1_input))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(&pdev->dev,
&dev_attr_temp1_max))
- || (err = device_create_file(&new_client->dev,
+ || (err = device_create_file(&pdev->dev,
&dev_attr_temp1_max_hyst)))
goto exit_remove_files;
}
- data->class_dev = hwmon_device_register(&new_client->dev);
+ data->class_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto exit_remove_files;
@@ -638,32 +566,26 @@ static int sis5595_detect(struct i2c_adapter *adapter)
return 0;
exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &sis5595_group);
- sysfs_remove_group(&new_client->dev.kobj, &sis5595_group_opt);
-exit_detach:
- i2c_detach_client(new_client);
+ sysfs_remove_group(&pdev->dev.kobj, &sis5595_group);
+ sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_opt);
exit_free:
kfree(data);
exit_release:
- release_region(address, SIS5595_EXTENT);
+ release_region(res->start, SIS5595_EXTENT);
exit:
return err;
}
-static int sis5595_detach_client(struct i2c_client *client)
+static int __devexit sis5595_remove(struct platform_device *pdev)
{
- struct sis5595_data *data = i2c_get_clientdata(client);
- int err;
+ struct sis5595_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->class_dev);
- sysfs_remove_group(&client->dev.kobj, &sis5595_group);
- sysfs_remove_group(&client->dev.kobj, &sis5595_group_opt);
-
- if ((err = i2c_detach_client(client)))
- return err;
-
- release_region(client->addr, SIS5595_EXTENT);
+ sysfs_remove_group(&pdev->dev.kobj, &sis5595_group);
+ sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_opt);
+ release_region(data->addr, SIS5595_EXTENT);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
@@ -671,41 +593,37 @@ static int sis5595_detach_client(struct i2c_client *client)
/* ISA access must be locked explicitly. */
-static int sis5595_read_value(struct i2c_client *client, u8 reg)
+static int sis5595_read_value(struct sis5595_data *data, u8 reg)
{
int res;
- struct sis5595_data *data = i2c_get_clientdata(client);
mutex_lock(&data->lock);
- outb_p(reg, client->addr + SIS5595_ADDR_REG_OFFSET);
- res = inb_p(client->addr + SIS5595_DATA_REG_OFFSET);
+ outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+ res = inb_p(data->addr + SIS5595_DATA_REG_OFFSET);
mutex_unlock(&data->lock);
return res;
}
-static int sis5595_write_value(struct i2c_client *client, u8 reg, u8 value)
+static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
{
- struct sis5595_data *data = i2c_get_clientdata(client);
mutex_lock(&data->lock);
- outb_p(reg, client->addr + SIS5595_ADDR_REG_OFFSET);
- outb_p(value, client->addr + SIS5595_DATA_REG_OFFSET);
+ outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+ outb_p(value, data->addr + SIS5595_DATA_REG_OFFSET);
mutex_unlock(&data->lock);
- return 0;
}
/* Called when we have found a new SIS5595. */
-static void sis5595_init_client(struct i2c_client *client)
+static void __devinit sis5595_init_device(struct sis5595_data *data)
{
- u8 config = sis5595_read_value(client, SIS5595_REG_CONFIG);
+ u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
if (!(config & 0x01))
- sis5595_write_value(client, SIS5595_REG_CONFIG,
+ sis5595_write_value(data, SIS5595_REG_CONFIG,
(config & 0xf7) | 0x01);
}
static struct sis5595_data *sis5595_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct sis5595_data *data = i2c_get_clientdata(client);
+ struct sis5595_data *data = dev_get_drvdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -715,35 +633,35 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
for (i = 0; i <= data->maxins; i++) {
data->in[i] =
- sis5595_read_value(client, SIS5595_REG_IN(i));
+ sis5595_read_value(data, SIS5595_REG_IN(i));
data->in_min[i] =
- sis5595_read_value(client,
+ sis5595_read_value(data,
SIS5595_REG_IN_MIN(i));
data->in_max[i] =
- sis5595_read_value(client,
+ sis5595_read_value(data,
SIS5595_REG_IN_MAX(i));
}
for (i = 0; i < 2; i++) {
data->fan[i] =
- sis5595_read_value(client, SIS5595_REG_FAN(i));
+ sis5595_read_value(data, SIS5595_REG_FAN(i));
data->fan_min[i] =
- sis5595_read_value(client,
+ sis5595_read_value(data,
SIS5595_REG_FAN_MIN(i));
}
if (data->maxins == 3) {
data->temp =
- sis5595_read_value(client, SIS5595_REG_TEMP);
+ sis5595_read_value(data, SIS5595_REG_TEMP);
data->temp_over =
- sis5595_read_value(client, SIS5595_REG_TEMP_OVER);
+ sis5595_read_value(data, SIS5595_REG_TEMP_OVER);
data->temp_hyst =
- sis5595_read_value(client, SIS5595_REG_TEMP_HYST);
+ sis5595_read_value(data, SIS5595_REG_TEMP_HYST);
}
- i = sis5595_read_value(client, SIS5595_REG_FANDIV);
+ i = sis5595_read_value(data, SIS5595_REG_FANDIV);
data->fan_div[0] = (i >> 4) & 0x03;
data->fan_div[1] = i >> 6;
data->alarms =
- sis5595_read_value(client, SIS5595_REG_ALARM1) |
- (sis5595_read_value(client, SIS5595_REG_ALARM2) << 8);
+ sis5595_read_value(data, SIS5595_REG_ALARM1) |
+ (sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
data->last_updated = jiffies;
data->valid = 1;
}
@@ -774,10 +692,50 @@ static int blacklist[] __devinitdata = {
PCI_DEVICE_ID_SI_5598,
0 };
+static int __devinit sis5595_device_add(unsigned short address)
+{
+ struct resource res = {
+ .start = address,
+ .end = address + SIS5595_EXTENT - 1,
+ .name = "sis5595",
+ .flags = IORESOURCE_IO,
+ };
+ int err;
+
+ pdev = platform_device_alloc("sis5595", address);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR "sis5595: Device allocation failed\n");
+ goto exit;
+ }
+
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR "sis5595: Device resource addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR "sis5595: Device addition failed (%d)\n",
+ err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
static int __devinit sis5595_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
- u16 val;
+ u16 address;
+ u8 enable;
int *i;
for (i = blacklist; *i != 0; i++) {
@@ -790,27 +748,68 @@ static int __devinit sis5595_pci_probe(struct pci_dev *dev,
}
}
+ force_addr &= ~(SIS5595_EXTENT - 1);
+ if (force_addr) {
+ dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", force_addr);
+ pci_write_config_word(dev, SIS5595_BASE_REG, force_addr);
+ }
+
if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(dev, SIS5595_BASE_REG, &val))
+ pci_read_config_word(dev, SIS5595_BASE_REG, &address)) {
+ dev_err(&dev->dev, "Failed to read ISA address\n");
return -ENODEV;
+ }
- address = val & ~(SIS5595_EXTENT - 1);
- if (address == 0 && force_addr == 0) {
+ address &= ~(SIS5595_EXTENT - 1);
+ if (!address) {
dev_err(&dev->dev, "Base address not set - upgrade BIOS or use force_addr=0xaddr\n");
return -ENODEV;
}
+ if (force_addr && address != force_addr) {
+ /* doesn't work for some chips? */
+ dev_err(&dev->dev, "Failed to force ISA address\n");
+ return -ENODEV;
+ }
- s_bridge = pci_dev_get(dev);
- if (i2c_isa_add_driver(&sis5595_driver)) {
- pci_dev_put(s_bridge);
- s_bridge = NULL;
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable)) {
+ dev_err(&dev->dev, "Failed to read enable register\n");
+ return -ENODEV;
+ }
+ if (!(enable & 0x80)) {
+ if ((PCIBIOS_SUCCESSFUL !=
+ pci_write_config_byte(dev, SIS5595_ENABLE_REG,
+ enable | 0x80))
+ || (PCIBIOS_SUCCESSFUL !=
+ pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable))
+ || (!(enable & 0x80))) {
+ /* doesn't work for some chips! */
+ dev_err(&dev->dev, "Failed to enable HWM device\n");
+ return -ENODEV;
+ }
}
+ if (platform_driver_register(&sis5595_driver)) {
+ dev_dbg(&dev->dev, "Failed to register sis5595 driver\n");
+ goto exit;
+ }
+
+ s_bridge = pci_dev_get(dev);
+ /* Sets global pdev as a side effect */
+ if (sis5595_device_add(address))
+ goto exit_unregister;
+
/* Always return failure here. This is to allow other drivers to bind
* to this pci device. We don't really want to have control over the
* pci device, we only wanted to read as few register values from it.
*/
return -ENODEV;
+
+exit_unregister:
+ pci_dev_put(dev);
+ platform_driver_unregister(&sis5595_driver);
+exit:
+ return -ENODEV;
}
static struct pci_driver sis5595_pci_driver = {
@@ -828,7 +827,8 @@ static void __exit sm_sis5595_exit(void)
{
pci_unregister_driver(&sis5595_pci_driver);
if (s_bridge != NULL) {
- i2c_isa_del_driver(&sis5595_driver);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&sis5595_driver);
pci_dev_put(s_bridge);
s_bridge = NULL;
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 943abbd95ab5..45266b30ce1d 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -174,6 +174,8 @@ static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
REG: count of 90kHz pulses / revolution */
static int fan_from_reg(u16 reg)
{
+ if (reg == 0 || reg == 0xffff)
+ return 0;
return 90000 * 60 / reg;
}
@@ -333,7 +335,7 @@ static int __init smsc47b397_find(unsigned short *addr)
superio_enter();
id = superio_inb(SUPERIO_REG_DEVID);
- if ((id != 0x6f) && (id != 0x81)) {
+ if ((id != 0x6f) && (id != 0x81) && (id != 0x85)) {
superio_exit();
return -ENODEV;
}
@@ -346,7 +348,8 @@ static int __init smsc47b397_find(unsigned short *addr)
printk(KERN_INFO DRVNAME ": found SMSC %s "
"(base address 0x%04x, revision %u)\n",
- id == 0x81 ? "SCH5307-NS" : "LPC47B397-NC", *addr, rev);
+ id == 0x81 ? "SCH5307-NS" : id == 0x85 ? "SCH5317" :
+ "LPC47B397-NC", *addr, rev);
superio_exit();
return 0;
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 1e21c8cc948f..1de2f2be8708 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -597,6 +597,7 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
error_remove_files:
sysfs_remove_group(&dev->kobj, &smsc47m1_group);
error_free:
+ platform_set_drvdata(pdev, NULL);
kfree(data);
error_release:
release_region(res->start, SMSC_EXTENT);
@@ -608,12 +609,12 @@ static int __devexit smsc47m1_remove(struct platform_device *pdev)
struct smsc47m1_data *data = platform_get_drvdata(pdev);
struct resource *res;
- platform_set_drvdata(pdev, NULL);
hwmon_device_unregister(data->class_dev);
sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(res->start, SMSC_EXTENT);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
@@ -693,15 +694,12 @@ static int __init smsc47m1_device_add(unsigned short address,
goto exit_device_put;
}
- pdev->dev.platform_data = kmalloc(sizeof(struct smsc47m1_sio_data),
- GFP_KERNEL);
- if (!pdev->dev.platform_data) {
- err = -ENOMEM;
+ err = platform_device_add_data(pdev, sio_data,
+ sizeof(struct smsc47m1_sio_data));
+ if (err) {
printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
goto exit_device_put;
}
- memcpy(pdev->dev.platform_data, sio_data,
- sizeof(struct smsc47m1_sio_data));
err = platform_device_add(pdev);
if (err) {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index a012f396f354..d3a3ba04cb0f 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -31,6 +31,7 @@
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/sysfs.h>
+#include <linux/mutex.h>
/* Addresses to scan */
static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
@@ -97,7 +98,7 @@ static inline int TEMP_FROM_REG(s8 val)
struct smsc47m192_data {
struct i2c_client client;
struct class_device *class_dev;
- struct semaphore update_lock;
+ struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
@@ -164,11 +165,11 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
struct smsc47m192_data *data = i2c_get_clientdata(client);
unsigned long val = simple_strtoul(buf, NULL, 10);
- down(&data->update_lock);
+ mutex_lock(&data->update_lock);
data->in_min[nr] = IN_TO_REG(val, nr);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(nr),
data->in_min[nr]);
- up(&data->update_lock);
+ mutex_unlock(&data->update_lock);
return count;
}
@@ -181,11 +182,11 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
struct smsc47m192_data *data = i2c_get_clientdata(client);
unsigned long val = simple_strtoul(buf, NULL, 10);
- down(&data->update_lock);
+ mutex_lock(&data->update_lock);
data->in_max[nr] = IN_TO_REG(val, nr);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(nr),
data->in_max[nr]);
- up(&data->update_lock);
+ mutex_unlock(&data->update_lock);
return count;
}
@@ -243,11 +244,11 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct smsc47m192_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
- down(&data->update_lock);
+ mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[nr],
data->temp_min[nr]);
- up(&data->update_lock);
+ mutex_unlock(&data->update_lock);
return count;
}
@@ -260,11 +261,11 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct smsc47m192_data *data = i2c_get_clientdata(client);
long val = simple_strtol(buf, NULL, 10);
- down(&data->update_lock);
+ mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[nr],
data->temp_max[nr]);
- up(&data->update_lock);
+ mutex_unlock(&data->update_lock);
return count;
}
@@ -287,7 +288,7 @@ static ssize_t set_temp_offset(struct device *dev, struct device_attribute
u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR);
long val = simple_strtol(buf, NULL, 10);
- down(&data->update_lock);
+ mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_TO_REG(val);
if (nr>1)
i2c_smbus_write_byte_data(client,
@@ -303,7 +304,7 @@ static ssize_t set_temp_offset(struct device *dev, struct device_attribute
} else if ((sfr & 0x10) == (nr==0 ? 0x10 : 0))
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_TEMP_OFFSET(nr), 0);
- up(&data->update_lock);
+ mutex_unlock(&data->update_lock);
return count;
}
@@ -360,8 +361,8 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040);
-static SENSOR_DEVICE_ATTR(temp2_input_fault, S_IRUGO, show_alarm, NULL, 0x4000);
-static SENSOR_DEVICE_ATTR(temp3_input_fault, S_IRUGO, show_alarm, NULL, 0x8000);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004);
@@ -411,13 +412,13 @@ static struct attribute *smsc47m192_attributes[] = {
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_input_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
&dev_attr_cpu0_vid.attr,
&dev_attr_vrm.attr,
@@ -531,7 +532,7 @@ static int smsc47m192_detect(struct i2c_adapter *adapter, int address,
/* Fill in the remaining client fields and put into the global list */
strlcpy(client->name, "smsc47m192", I2C_NAME_SIZE);
data->vrm = vid_which_vrm();
- init_MUTEX(&data->update_lock);
+ mutex_init(&data->update_lock);
/* Tell the I2C layer a new client has arrived */
if ((err = i2c_attach_client(client)))
@@ -594,7 +595,7 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
struct smsc47m192_data *data = i2c_get_clientdata(client);
int i, config;
- down(&data->update_lock);
+ mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
@@ -645,7 +646,7 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
data->valid = 1;
}
- up(&data->update_lock);
+ mutex_unlock(&data->update_lock);
return data;
}
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 9a440c8cc520..24a6851491d0 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -34,9 +34,9 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mutex.h>
@@ -51,10 +51,7 @@ module_param(force_addr, ushort, 0);
MODULE_PARM_DESC(force_addr,
"Initialize the base address of the sensors");
-/* Device address
- Note that we can't determine the ISA address until we have initialized
- our module */
-static unsigned short address;
+static struct platform_device *pdev;
/*
The Via 686a southbridge has a LM78-like chip integrated on the same IC.
@@ -295,7 +292,8 @@ static inline long TEMP_FROM_REG10(u16 val)
/* For each registered chip, we need to keep some data in memory.
The structure is dynamically allocated. */
struct via686a_data {
- struct i2c_client client;
+ unsigned short addr;
+ const char *name;
struct class_device *class_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
@@ -315,98 +313,85 @@ struct via686a_data {
static struct pci_dev *s_bridge; /* pointer to the (only) via686a */
-static int via686a_detect(struct i2c_adapter *adapter);
-static int via686a_detach_client(struct i2c_client *client);
+static int via686a_probe(struct platform_device *pdev);
+static int via686a_remove(struct platform_device *pdev);
-static inline int via686a_read_value(struct i2c_client *client, u8 reg)
+static inline int via686a_read_value(struct via686a_data *data, u8 reg)
{
- return (inb_p(client->addr + reg));
+ return inb_p(data->addr + reg);
}
-static inline void via686a_write_value(struct i2c_client *client, u8 reg,
+static inline void via686a_write_value(struct via686a_data *data, u8 reg,
u8 value)
{
- outb_p(value, client->addr + reg);
+ outb_p(value, data->addr + reg);
}
static struct via686a_data *via686a_update_device(struct device *dev);
-static void via686a_init_client(struct i2c_client *client);
+static void via686a_init_device(struct via686a_data *data);
/* following are the sysfs callback functions */
/* 7 voltage sensors */
-static ssize_t show_in(struct device *dev, char *buf, int nr) {
+static ssize_t show_in(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, char *buf, int nr) {
+static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, char *buf, int nr) {
+static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, const char *buf,
- size_t count, int nr) {
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
+ struct via686a_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_min[nr] = IN_TO_REG(val, nr);
- via686a_write_value(client, VIA686A_REG_IN_MIN(nr),
+ via686a_write_value(data, VIA686A_REG_IN_MIN(nr),
data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_in_max(struct device *dev, const char *buf,
- size_t count, int nr) {
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
+ struct via686a_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_max[nr] = IN_TO_REG(val, nr);
- via686a_write_value(client, VIA686A_REG_IN_MAX(nr),
+ via686a_write_value(data, VIA686A_REG_IN_MAX(nr),
data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define show_in_offset(offset) \
-static ssize_t \
- show_in##offset (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_in(dev, buf, offset); \
-} \
-static ssize_t \
- show_in##offset##_min (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_in_min(dev, buf, offset); \
-} \
-static ssize_t \
- show_in##offset##_max (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_in_max(dev, buf, offset); \
-} \
-static ssize_t set_in##offset##_min (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_in_min(dev, buf, count, offset); \
-} \
-static ssize_t set_in##offset##_max (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_in_max(dev, buf, count, offset); \
-} \
-static DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in##offset, NULL);\
-static DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in##offset##_min, set_in##offset##_min); \
-static DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in##offset##_max, set_in##offset##_max);
+static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
+ show_in, NULL, offset); \
+static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
+ show_in_min, set_in_min, offset); \
+static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
+ show_in_max, set_in_max, offset);
show_in_offset(0);
show_in_offset(1);
@@ -415,150 +400,128 @@ show_in_offset(3);
show_in_offset(4);
/* 3 temperatures */
-static ssize_t show_temp(struct device *dev, char *buf, int nr) {
+static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG10(data->temp[nr]));
}
-static ssize_t show_temp_over(struct device *dev, char *buf, int nr) {
+static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_over[nr]));
}
-static ssize_t show_temp_hyst(struct device *dev, char *buf, int nr) {
+static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_hyst[nr]));
}
-static ssize_t set_temp_over(struct device *dev, const char *buf,
- size_t count, int nr) {
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
+ struct via686a_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_over[nr] = TEMP_TO_REG(val);
- via686a_write_value(client, VIA686A_REG_TEMP_OVER[nr],
+ via686a_write_value(data, VIA686A_REG_TEMP_OVER[nr],
data->temp_over[nr]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_hyst(struct device *dev, const char *buf,
- size_t count, int nr) {
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
+ struct via686a_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_hyst[nr] = TEMP_TO_REG(val);
- via686a_write_value(client, VIA686A_REG_TEMP_HYST[nr],
+ via686a_write_value(data, VIA686A_REG_TEMP_HYST[nr],
data->temp_hyst[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define show_temp_offset(offset) \
-static ssize_t show_temp_##offset (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_temp(dev, buf, offset - 1); \
-} \
-static ssize_t \
-show_temp_##offset##_over (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_temp_over(dev, buf, offset - 1); \
-} \
-static ssize_t \
-show_temp_##offset##_hyst (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_temp_hyst(dev, buf, offset - 1); \
-} \
-static ssize_t set_temp_##offset##_over (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_temp_over(dev, buf, count, offset - 1); \
-} \
-static ssize_t set_temp_##offset##_hyst (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_temp_hyst(dev, buf, count, offset - 1); \
-} \
-static DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp_##offset, NULL);\
-static DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_##offset##_over, set_temp_##offset##_over); \
-static DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_##offset##_hyst, set_temp_##offset##_hyst);
+static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
+ show_temp, NULL, offset - 1); \
+static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
+ show_temp_over, set_temp_over, offset - 1); \
+static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
+ show_temp_hyst, set_temp_hyst, offset - 1);
show_temp_offset(1);
show_temp_offset(2);
show_temp_offset(3);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, char *buf, int nr) {
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])) );
}
-static ssize_t show_fan_min(struct device *dev, char *buf, int nr) {
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n",
FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])) );
}
-static ssize_t show_fan_div(struct device *dev, char *buf, int nr) {
+static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]) );
}
-static ssize_t set_fan_min(struct device *dev, const char *buf,
- size_t count, int nr) {
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
+ struct via686a_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- via686a_write_value(client, VIA686A_REG_FAN_MIN(nr+1), data->fan_min[nr]);
+ via686a_write_value(data, VIA686A_REG_FAN_MIN(nr+1), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, const char *buf,
- size_t count, int nr) {
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
+ struct via686a_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int nr = attr->index;
int val = simple_strtol(buf, NULL, 10);
int old;
mutex_lock(&data->update_lock);
- old = via686a_read_value(client, VIA686A_REG_FANDIV);
+ old = via686a_read_value(data, VIA686A_REG_FANDIV);
data->fan_div[nr] = DIV_TO_REG(val);
old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4);
- via686a_write_value(client, VIA686A_REG_FANDIV, old);
+ via686a_write_value(data, VIA686A_REG_FANDIV, old);
mutex_unlock(&data->update_lock);
return count;
}
#define show_fan_offset(offset) \
-static ssize_t show_fan_##offset (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_fan(dev, buf, offset - 1); \
-} \
-static ssize_t show_fan_##offset##_min (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_fan_min(dev, buf, offset - 1); \
-} \
-static ssize_t show_fan_##offset##_div (struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return show_fan_div(dev, buf, offset - 1); \
-} \
-static ssize_t set_fan_##offset##_min (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_fan_min(dev, buf, count, offset - 1); \
-} \
-static ssize_t set_fan_##offset##_div (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- return set_fan_div(dev, buf, count, offset - 1); \
-} \
-static DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan_##offset, NULL);\
-static DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_##offset##_min, set_fan_##offset##_min); \
-static DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_##offset##_div, set_fan_##offset##_div);
+static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
+ show_fan, NULL, offset - 1); \
+static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
+ show_fan_min, set_fan_min, offset - 1); \
+static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
+ show_fan_div, set_fan_div, offset - 1);
show_fan_offset(1);
show_fan_offset(2);
@@ -570,41 +533,50 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, ch
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct via686a_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
static struct attribute *via686a_attributes[] = {
- &dev_attr_in0_input.attr,
- &dev_attr_in1_input.attr,
- &dev_attr_in2_input.attr,
- &dev_attr_in3_input.attr,
- &dev_attr_in4_input.attr,
- &dev_attr_in0_min.attr,
- &dev_attr_in1_min.attr,
- &dev_attr_in2_min.attr,
- &dev_attr_in3_min.attr,
- &dev_attr_in4_min.attr,
- &dev_attr_in0_max.attr,
- &dev_attr_in1_max.attr,
- &dev_attr_in2_max.attr,
- &dev_attr_in3_max.attr,
- &dev_attr_in4_max.attr,
-
- &dev_attr_temp1_input.attr,
- &dev_attr_temp2_input.attr,
- &dev_attr_temp3_input.attr,
- &dev_attr_temp1_max.attr,
- &dev_attr_temp2_max.attr,
- &dev_attr_temp3_max.attr,
- &dev_attr_temp1_max_hyst.attr,
- &dev_attr_temp2_max_hyst.attr,
- &dev_attr_temp3_max_hyst.attr,
-
- &dev_attr_fan1_input.attr,
- &dev_attr_fan2_input.attr,
- &dev_attr_fan1_min.attr,
- &dev_attr_fan2_min.attr,
- &dev_attr_fan1_div.attr,
- &dev_attr_fan2_div.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in1_min.dev_attr.attr,
+ &sensor_dev_attr_in2_min.dev_attr.attr,
+ &sensor_dev_attr_in3_min.dev_attr.attr,
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
+ &sensor_dev_attr_in1_max.dev_attr.attr,
+ &sensor_dev_attr_in2_max.dev_attr.attr,
+ &sensor_dev_attr_in3_max.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan2_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_div.dev_attr.attr,
+ &sensor_dev_attr_fan2_div.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_name.attr,
NULL
};
@@ -612,58 +584,29 @@ static const struct attribute_group via686a_group = {
.attrs = via686a_attributes,
};
-/* The driver. I choose to use type i2c_driver, as at is identical to both
- smbus_driver and isa_driver, and clients could be of either kind */
-static struct i2c_driver via686a_driver = {
+static struct platform_driver via686a_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "via686a",
},
- .attach_adapter = via686a_detect,
- .detach_client = via686a_detach_client,
+ .probe = via686a_probe,
+ .remove = __devexit_p(via686a_remove),
};
/* This is called when the module is loaded */
-static int via686a_detect(struct i2c_adapter *adapter)
+static int __devinit via686a_probe(struct platform_device *pdev)
{
- struct i2c_client *new_client;
struct via686a_data *data;
- int err = 0;
- const char client_name[] = "via686a";
- u16 val;
-
- /* 8231 requires multiple of 256, we enforce that on 686 as well */
- if (force_addr) {
- address = force_addr & 0xFF00;
- dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n",
- address);
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(s_bridge, VIA686A_BASE_REG, address))
- return -ENODEV;
- }
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(s_bridge, VIA686A_ENABLE_REG, &val))
- return -ENODEV;
- if (!(val & 0x0001)) {
- if (force_addr) {
- dev_info(&adapter->dev, "enabling sensors\n");
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(s_bridge, VIA686A_ENABLE_REG,
- val | 0x0001))
- return -ENODEV;
- } else {
- dev_warn(&adapter->dev, "sensors disabled - enable "
- "with force_addr=0x%x\n", address);
- return -ENODEV;
- }
- }
+ struct resource *res;
+ int err;
/* Reserve the ISA region */
- if (!request_region(address, VIA686A_EXTENT,
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!request_region(res->start, VIA686A_EXTENT,
via686a_driver.driver.name)) {
- dev_err(&adapter->dev, "region 0x%x already in use!\n",
- address);
+ dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
+ (unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
}
@@ -672,30 +615,19 @@ static int via686a_detect(struct i2c_adapter *adapter)
goto exit_release;
}
- new_client = &data->client;
- i2c_set_clientdata(new_client, data);
- new_client->addr = address;
- new_client->adapter = adapter;
- new_client->driver = &via686a_driver;
- new_client->flags = 0;
-
- /* Fill in the remaining client fields and put into the global list */
- strlcpy(new_client->name, client_name, I2C_NAME_SIZE);
-
- data->valid = 0;
+ platform_set_drvdata(pdev, data);
+ data->addr = res->start;
+ data->name = "via686a";
mutex_init(&data->update_lock);
- /* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(new_client)))
- goto exit_free;
/* Initialize the VIA686A chip */
- via686a_init_client(new_client);
+ via686a_init_device(data);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj, &via686a_group)))
- goto exit_detach;
+ if ((err = sysfs_create_group(&pdev->dev.kobj, &via686a_group)))
+ goto exit_free;
- data->class_dev = hwmon_device_register(&new_client->dev);
+ data->class_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto exit_remove_files;
@@ -704,51 +636,46 @@ static int via686a_detect(struct i2c_adapter *adapter)
return 0;
exit_remove_files:
- sysfs_remove_group(&new_client->dev.kobj, &via686a_group);
-exit_detach:
- i2c_detach_client(new_client);
+ sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
exit_free:
kfree(data);
exit_release:
- release_region(address, VIA686A_EXTENT);
+ release_region(res->start, VIA686A_EXTENT);
return err;
}
-static int via686a_detach_client(struct i2c_client *client)
+static int __devexit via686a_remove(struct platform_device *pdev)
{
- struct via686a_data *data = i2c_get_clientdata(client);
- int err;
+ struct via686a_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->class_dev);
- sysfs_remove_group(&client->dev.kobj, &via686a_group);
+ sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
- if ((err = i2c_detach_client(client)))
- return err;
-
- release_region(client->addr, VIA686A_EXTENT);
+ release_region(data->addr, VIA686A_EXTENT);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
}
-static void via686a_init_client(struct i2c_client *client)
+static void __devinit via686a_init_device(struct via686a_data *data)
{
u8 reg;
/* Start monitoring */
- reg = via686a_read_value(client, VIA686A_REG_CONFIG);
- via686a_write_value(client, VIA686A_REG_CONFIG, (reg|0x01)&0x7F);
+ reg = via686a_read_value(data, VIA686A_REG_CONFIG);
+ via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F);
/* Configure temp interrupt mode for continuous-interrupt operation */
- via686a_write_value(client, VIA686A_REG_TEMP_MODE,
- via686a_read_value(client, VIA686A_REG_TEMP_MODE) &
- !(VIA686A_TEMP_MODE_MASK | VIA686A_TEMP_MODE_CONTINUOUS));
+ reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE);
+ via686a_write_value(data, VIA686A_REG_TEMP_MODE,
+ (reg & ~VIA686A_TEMP_MODE_MASK)
+ | VIA686A_TEMP_MODE_CONTINUOUS);
}
static struct via686a_data *via686a_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct via686a_data *data = i2c_get_clientdata(client);
+ struct via686a_data *data = dev_get_drvdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -757,27 +684,27 @@ static struct via686a_data *via686a_update_device(struct device *dev)
|| !data->valid) {
for (i = 0; i <= 4; i++) {
data->in[i] =
- via686a_read_value(client, VIA686A_REG_IN(i));
- data->in_min[i] = via686a_read_value(client,
+ via686a_read_value(data, VIA686A_REG_IN(i));
+ data->in_min[i] = via686a_read_value(data,
VIA686A_REG_IN_MIN
(i));
data->in_max[i] =
- via686a_read_value(client, VIA686A_REG_IN_MAX(i));
+ via686a_read_value(data, VIA686A_REG_IN_MAX(i));
}
for (i = 1; i <= 2; i++) {
data->fan[i - 1] =
- via686a_read_value(client, VIA686A_REG_FAN(i));
- data->fan_min[i - 1] = via686a_read_value(client,
+ via686a_read_value(data, VIA686A_REG_FAN(i));
+ data->fan_min[i - 1] = via686a_read_value(data,
VIA686A_REG_FAN_MIN(i));
}
for (i = 0; i <= 2; i++) {
- data->temp[i] = via686a_read_value(client,
+ data->temp[i] = via686a_read_value(data,
VIA686A_REG_TEMP[i]) << 2;
data->temp_over[i] =
- via686a_read_value(client,
+ via686a_read_value(data,
VIA686A_REG_TEMP_OVER[i]);
data->temp_hyst[i] =
- via686a_read_value(client,
+ via686a_read_value(data,
VIA686A_REG_TEMP_HYST[i]);
}
/* add in lower 2 bits
@@ -785,23 +712,23 @@ static struct via686a_data *via686a_update_device(struct device *dev)
temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23
temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23
*/
- data->temp[0] |= (via686a_read_value(client,
+ data->temp[0] |= (via686a_read_value(data,
VIA686A_REG_TEMP_LOW1)
& 0xc0) >> 6;
data->temp[1] |=
- (via686a_read_value(client, VIA686A_REG_TEMP_LOW23) &
+ (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
0x30) >> 4;
data->temp[2] |=
- (via686a_read_value(client, VIA686A_REG_TEMP_LOW23) &
+ (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
0xc0) >> 6;
- i = via686a_read_value(client, VIA686A_REG_FANDIV);
+ i = via686a_read_value(data, VIA686A_REG_FANDIV);
data->fan_div[0] = (i >> 4) & 0x03;
data->fan_div[1] = i >> 6;
data->alarms =
- via686a_read_value(client,
+ via686a_read_value(data,
VIA686A_REG_ALARM1) |
- (via686a_read_value(client, VIA686A_REG_ALARM2) << 8);
+ (via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
data->last_updated = jiffies;
data->valid = 1;
}
@@ -818,32 +745,102 @@ static struct pci_device_id via686a_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
+static int __devinit via686a_device_add(unsigned short address)
+{
+ struct resource res = {
+ .start = address,
+ .end = address + VIA686A_EXTENT - 1,
+ .name = "via686a",
+ .flags = IORESOURCE_IO,
+ };
+ int err;
+
+ pdev = platform_device_alloc("via686a", address);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR "via686a: Device allocation failed\n");
+ goto exit;
+ }
+
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR "via686a: Device resource addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR "via686a: Device addition failed (%d)\n",
+ err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
static int __devinit via686a_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
- u16 val;
+ u16 address, val;
+ if (force_addr) {
+ address = force_addr & ~(VIA686A_EXTENT - 1);
+ dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address);
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_write_config_word(dev, VIA686A_BASE_REG, address | 1))
+ return -ENODEV;
+ }
if (PCIBIOS_SUCCESSFUL !=
pci_read_config_word(dev, VIA686A_BASE_REG, &val))
return -ENODEV;
address = val & ~(VIA686A_EXTENT - 1);
- if (address == 0 && force_addr == 0) {
+ if (address == 0) {
dev_err(&dev->dev, "base address not set - upgrade BIOS "
"or use force_addr=0xaddr\n");
return -ENODEV;
}
- s_bridge = pci_dev_get(dev);
- if (i2c_isa_add_driver(&via686a_driver)) {
- pci_dev_put(s_bridge);
- s_bridge = NULL;
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_read_config_word(dev, VIA686A_ENABLE_REG, &val))
+ return -ENODEV;
+ if (!(val & 0x0001)) {
+ if (!force_addr) {
+ dev_warn(&dev->dev, "Sensors disabled, enable "
+ "with force_addr=0x%x\n", address);
+ return -ENODEV;
+ }
+
+ dev_warn(&dev->dev, "Enabling sensors\n");
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_write_config_word(dev, VIA686A_ENABLE_REG,
+ val | 0x0001))
+ return -ENODEV;
}
+ if (platform_driver_register(&via686a_driver))
+ goto exit;
+
+ /* Sets global pdev as a side effect */
+ if (via686a_device_add(address))
+ goto exit_unregister;
+
/* Always return failure here. This is to allow other drivers to bind
* to this pci device. We don't really want to have control over the
* pci device, we only wanted to read as few register values from it.
*/
+ s_bridge = pci_dev_get(dev);
+ return -ENODEV;
+
+exit_unregister:
+ platform_driver_unregister(&via686a_driver);
+exit:
return -ENODEV;
}
@@ -862,7 +859,8 @@ static void __exit sm_via686a_exit(void)
{
pci_unregister_driver(&via686a_pci_driver);
if (s_bridge != NULL) {
- i2c_isa_del_driver(&via686a_driver);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&via686a_driver);
pci_dev_put(s_bridge);
s_bridge = NULL;
}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index a6a4aa0eee16..c604972f0186 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -29,8 +29,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
@@ -42,10 +41,7 @@ static int force_addr;
module_param(force_addr, int, 0);
MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors");
-/* Device address
- Note that we can't determine the ISA address until we have initialized
- our module */
-static unsigned short isa_address;
+static struct platform_device *pdev;
#define VT8231_EXTENT 0x80
#define VT8231_BASE_REG 0x70
@@ -148,7 +144,9 @@ static inline u8 FAN_TO_REG(long rpm, int div)
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
struct vt8231_data {
- struct i2c_client client;
+ unsigned short addr;
+ const char *name;
+
struct mutex update_lock;
struct class_device *class_dev;
char valid; /* !=0 if following fields are valid */
@@ -168,20 +166,20 @@ struct vt8231_data {
};
static struct pci_dev *s_bridge;
-static int vt8231_detect(struct i2c_adapter *adapter);
-static int vt8231_detach_client(struct i2c_client *client);
+static int vt8231_probe(struct platform_device *pdev);
+static int vt8231_remove(struct platform_device *pdev);
static struct vt8231_data *vt8231_update_device(struct device *dev);
-static void vt8231_init_client(struct i2c_client *client);
+static void vt8231_init_device(struct vt8231_data *data);
-static inline int vt8231_read_value(struct i2c_client *client, u8 reg)
+static inline int vt8231_read_value(struct vt8231_data *data, u8 reg)
{
- return inb_p(client->addr + reg);
+ return inb_p(data->addr + reg);
}
-static inline void vt8231_write_value(struct i2c_client *client, u8 reg,
+static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
u8 value)
{
- outb_p(value, client->addr + reg);
+ outb_p(value, data->addr + reg);
}
/* following are the sysfs callback functions */
@@ -220,13 +218,12 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
- vt8231_write_value(client, regvoltmin[nr], data->in_min[nr]);
+ vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -236,13 +233,12 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
- vt8231_write_value(client, regvoltmax[nr], data->in_max[nr]);
+ vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -278,14 +274,13 @@ static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
0, 255);
- vt8231_write_value(client, regvoltmin[5], data->in_min[5]);
+ vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -293,14 +288,13 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
unsigned long val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
0, 255);
- vt8231_write_value(client, regvoltmax[5], data->in_max[5]);
+ vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -348,26 +342,24 @@ static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
- vt8231_write_value(client, regtempmax[0], data->temp_max[0]);
+ vt8231_write_value(data, regtempmax[0], data->temp_max[0]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
- vt8231_write_value(client, regtempmin[0], data->temp_min[0]);
+ vt8231_write_value(data, regtempmin[0], data->temp_min[0]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -404,13 +396,12 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
- vt8231_write_value(client, regtempmax[nr], data->temp_max[nr]);
+ vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -419,13 +410,12 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
int val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
- vt8231_write_value(client, regtempmin[nr], data->temp_min[nr]);
+ vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -486,13 +476,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
int val = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
+ vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -500,12 +489,11 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
unsigned long val = simple_strtoul(buf, NULL, 10);
int nr = sensor_attr->index;
- int old = vt8231_read_value(client, VT8231_REG_FANDIV);
+ int old = vt8231_read_value(data, VT8231_REG_FANDIV);
long min = FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr]));
@@ -516,7 +504,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
case 4: data->fan_div[nr] = 2; break;
case 8: data->fan_div[nr] = 3; break;
default:
- dev_err(&client->dev, "fan_div value %ld not supported."
+ dev_err(dev, "fan_div value %ld not supported."
"Choose one of 1, 2, 4 or 8!\n", val);
mutex_unlock(&data->update_lock);
return -EINVAL;
@@ -524,10 +512,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
/* Correct the fan minimum speed */
data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
- vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
+ vt8231_write_value(data, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4);
- vt8231_write_value(client, VT8231_REG_FANDIV, old);
+ vt8231_write_value(data, VT8231_REG_FANDIV, old);
mutex_unlock(&data->update_lock);
return count;
}
@@ -551,9 +539,16 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
-
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct vt8231_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
static struct attribute *vt8231_attributes_temps[6][4] = {
{
&dev_attr_temp1_input.attr,
@@ -648,6 +643,7 @@ static struct attribute *vt8231_attributes[] = {
&sensor_dev_attr_fan1_div.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_name.attr,
NULL
};
@@ -655,13 +651,13 @@ static const struct attribute_group vt8231_group = {
.attrs = vt8231_attributes,
};
-static struct i2c_driver vt8231_driver = {
+static struct platform_driver vt8231_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "vt8231",
},
- .attach_adapter = vt8231_detect,
- .detach_client = vt8231_detach_client,
+ .probe = vt8231_probe,
+ .remove = __devexit_p(vt8231_remove),
};
static struct pci_device_id vt8231_pci_ids[] = {
@@ -680,40 +676,18 @@ static struct pci_driver vt8231_pci_driver = {
.probe = vt8231_pci_probe,
};
-int vt8231_detect(struct i2c_adapter *adapter)
+int vt8231_probe(struct platform_device *pdev)
{
- struct i2c_client *client;
+ struct resource *res;
struct vt8231_data *data;
int err = 0, i;
- u16 val;
-
- /* 8231 requires multiple of 256 */
- if (force_addr) {
- isa_address = force_addr & 0xFF00;
- dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n",
- isa_address);
- if (PCIBIOS_SUCCESSFUL != pci_write_config_word(s_bridge,
- VT8231_BASE_REG, isa_address))
- return -ENODEV;
- }
-
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(s_bridge, VT8231_ENABLE_REG, &val))
- return -ENODEV;
-
- if (!(val & 0x0001)) {
- dev_warn(&adapter->dev, "enabling sensors\n");
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(s_bridge, VT8231_ENABLE_REG,
- val | 0x0001))
- return -ENODEV;
- }
/* Reserve the ISA region */
- if (!request_region(isa_address, VT8231_EXTENT,
- vt8231_pci_driver.name)) {
- dev_err(&adapter->dev, "region 0x%x already in use!\n",
- isa_address);
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!request_region(res->start, VT8231_EXTENT,
+ vt8231_driver.driver.name)) {
+ dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
+ (unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
}
@@ -722,33 +696,23 @@ int vt8231_detect(struct i2c_adapter *adapter)
goto exit_release;
}
- client = &data->client;
- i2c_set_clientdata(client, data);
- client->addr = isa_address;
- client->adapter = adapter;
- client->driver = &vt8231_driver;
-
- /* Fill in the remaining client fields and put into the global list */
- strlcpy(client->name, "vt8231", I2C_NAME_SIZE);
+ platform_set_drvdata(pdev, data);
+ data->addr = res->start;
+ data->name = "vt8231";
mutex_init(&data->update_lock);
-
- /* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(client)))
- goto exit_free;
-
- vt8231_init_client(client);
+ vt8231_init_device(data);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&client->dev.kobj, &vt8231_group)))
- goto exit_detach;
+ if ((err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group)))
+ goto exit_free;
/* Must update device information to find out the config field */
- data->uch_config = vt8231_read_value(client, VT8231_REG_UCH_CONFIG);
+ data->uch_config = vt8231_read_value(data, VT8231_REG_UCH_CONFIG);
for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) {
if (ISTEMP(i, data->uch_config)) {
- if ((err = sysfs_create_group(&client->dev.kobj,
+ if ((err = sysfs_create_group(&pdev->dev.kobj,
&vt8231_group_temps[i])))
goto exit_remove_files;
}
@@ -756,13 +720,13 @@ int vt8231_detect(struct i2c_adapter *adapter)
for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) {
if (ISVOLT(i, data->uch_config)) {
- if ((err = sysfs_create_group(&client->dev.kobj,
+ if ((err = sysfs_create_group(&pdev->dev.kobj,
&vt8231_group_volts[i])))
goto exit_remove_files;
}
}
- data->class_dev = hwmon_device_register(&client->dev);
+ data->class_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto exit_remove_files;
@@ -771,56 +735,52 @@ int vt8231_detect(struct i2c_adapter *adapter)
exit_remove_files:
for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++)
- sysfs_remove_group(&client->dev.kobj, &vt8231_group_volts[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]);
for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++)
- sysfs_remove_group(&client->dev.kobj, &vt8231_group_temps[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]);
+
+ sysfs_remove_group(&pdev->dev.kobj, &vt8231_group);
- sysfs_remove_group(&client->dev.kobj, &vt8231_group);
-exit_detach:
- i2c_detach_client(client);
exit_free:
+ platform_set_drvdata(pdev, NULL);
kfree(data);
+
exit_release:
- release_region(isa_address, VT8231_EXTENT);
+ release_region(res->start, VT8231_EXTENT);
return err;
}
-static int vt8231_detach_client(struct i2c_client *client)
+static int vt8231_remove(struct platform_device *pdev)
{
- struct vt8231_data *data = i2c_get_clientdata(client);
- int err, i;
+ struct vt8231_data *data = platform_get_drvdata(pdev);
+ int i;
hwmon_device_unregister(data->class_dev);
for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++)
- sysfs_remove_group(&client->dev.kobj, &vt8231_group_volts[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]);
for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++)
- sysfs_remove_group(&client->dev.kobj, &vt8231_group_temps[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]);
- sysfs_remove_group(&client->dev.kobj, &vt8231_group);
+ sysfs_remove_group(&pdev->dev.kobj, &vt8231_group);
- if ((err = i2c_detach_client(client))) {
- return err;
- }
-
- release_region(client->addr, VT8231_EXTENT);
+ release_region(data->addr, VT8231_EXTENT);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
-
return 0;
}
-static void vt8231_init_client(struct i2c_client *client)
+static void vt8231_init_device(struct vt8231_data *data)
{
- vt8231_write_value(client, VT8231_REG_TEMP1_CONFIG, 0);
- vt8231_write_value(client, VT8231_REG_TEMP2_CONFIG, 0);
+ vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0);
+ vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0);
}
static struct vt8231_data *vt8231_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct vt8231_data *data = i2c_get_clientdata(client);
+ struct vt8231_data *data = dev_get_drvdata(dev);
int i;
u16 low;
@@ -830,41 +790,41 @@ static struct vt8231_data *vt8231_update_device(struct device *dev)
|| !data->valid) {
for (i = 0; i < 6; i++) {
if (ISVOLT(i, data->uch_config)) {
- data->in[i] = vt8231_read_value(client,
+ data->in[i] = vt8231_read_value(data,
regvolt[i]);
- data->in_min[i] = vt8231_read_value(client,
+ data->in_min[i] = vt8231_read_value(data,
regvoltmin[i]);
- data->in_max[i] = vt8231_read_value(client,
+ data->in_max[i] = vt8231_read_value(data,
regvoltmax[i]);
}
}
for (i = 0; i < 2; i++) {
- data->fan[i] = vt8231_read_value(client,
+ data->fan[i] = vt8231_read_value(data,
VT8231_REG_FAN(i));
- data->fan_min[i] = vt8231_read_value(client,
+ data->fan_min[i] = vt8231_read_value(data,
VT8231_REG_FAN_MIN(i));
}
- low = vt8231_read_value(client, VT8231_REG_TEMP_LOW01);
+ low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01);
low = (low >> 6) | ((low & 0x30) >> 2)
- | (vt8231_read_value(client, VT8231_REG_TEMP_LOW25) << 4);
+ | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4);
for (i = 0; i < 6; i++) {
if (ISTEMP(i, data->uch_config)) {
- data->temp[i] = (vt8231_read_value(client,
+ data->temp[i] = (vt8231_read_value(data,
regtemp[i]) << 2)
| ((low >> (2 * i)) & 0x03);
- data->temp_max[i] = vt8231_read_value(client,
+ data->temp_max[i] = vt8231_read_value(data,
regtempmax[i]);
- data->temp_min[i] = vt8231_read_value(client,
+ data->temp_min[i] = vt8231_read_value(data,
regtempmin[i]);
}
}
- i = vt8231_read_value(client, VT8231_REG_FANDIV);
+ i = vt8231_read_value(data, VT8231_REG_FANDIV);
data->fan_div[0] = (i >> 4) & 0x03;
data->fan_div[1] = i >> 6;
- data->alarms = vt8231_read_value(client, VT8231_REG_ALARM1) |
- (vt8231_read_value(client, VT8231_REG_ALARM2) << 8);
+ data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) |
+ (vt8231_read_value(data, VT8231_REG_ALARM2) << 8);
/* Set alarm flags correctly */
if (!data->fan[0] && data->fan_min[0]) {
@@ -888,33 +848,102 @@ static struct vt8231_data *vt8231_update_device(struct device *dev)
return data;
}
+static int __devinit vt8231_device_add(unsigned short address)
+{
+ struct resource res = {
+ .start = address,
+ .end = address + VT8231_EXTENT - 1,
+ .name = "vt8231",
+ .flags = IORESOURCE_IO,
+ };
+ int err;
+
+ pdev = platform_device_alloc("vt8231", address);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR "vt8231: Device allocation failed\n");
+ goto exit;
+ }
+
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR "vt8231: Device resource addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR "vt8231: Device addition failed (%d)\n",
+ err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
static int __devinit vt8231_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
- u16 val;
+ u16 address, val;
+ if (force_addr) {
+ address = force_addr & 0xff00;
+ dev_warn(&dev->dev, "Forcing ISA address 0x%x\n",
+ address);
+
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_write_config_word(dev, VT8231_BASE_REG, address | 1))
+ return -ENODEV;
+ }
if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG,
&val))
return -ENODEV;
- isa_address = val & ~(VT8231_EXTENT - 1);
- if (isa_address == 0 && force_addr == 0) {
+ address = val & ~(VT8231_EXTENT - 1);
+ if (address == 0) {
dev_err(&dev->dev, "base address not set -\
upgrade BIOS or use force_addr=0xaddr\n");
return -ENODEV;
}
- s_bridge = pci_dev_get(dev);
+ if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_ENABLE_REG,
+ &val))
+ return -ENODEV;
- if (i2c_isa_add_driver(&vt8231_driver)) {
- pci_dev_put(s_bridge);
- s_bridge = NULL;
+ if (!(val & 0x0001)) {
+ dev_warn(&dev->dev, "enabling sensors\n");
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_write_config_word(dev, VT8231_ENABLE_REG,
+ val | 0x0001))
+ return -ENODEV;
}
+ if (platform_driver_register(&vt8231_driver))
+ goto exit;
+
+ /* Sets global pdev as a side effect */
+ if (vt8231_device_add(address))
+ goto exit_unregister;
+
/* Always return failure here. This is to allow other drivers to bind
* to this pci device. We don't really want to have control over the
* pci device, we only wanted to read as few register values from it.
*/
+
+ /* We do, however, mark ourselves as using the PCI device to stop it
+ getting unloaded. */
+ s_bridge = pci_dev_get(dev);
+ return -ENODEV;
+
+exit_unregister:
+ platform_driver_unregister(&vt8231_driver);
+exit:
return -ENODEV;
}
@@ -927,7 +956,8 @@ static void __exit sm_vt8231_exit(void)
{
pci_unregister_driver(&vt8231_pci_driver);
if (s_bridge != NULL) {
- i2c_isa_del_driver(&vt8231_driver);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&vt8231_driver);
pci_dev_put(s_bridge);
s_bridge = NULL;
}
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 30a76404f0af..c51ae2e17758 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -41,41 +41,39 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <asm/io.h>
#include "lm75.h"
-/* The actual ISA address is read from Super-I/O configuration space */
-static unsigned short address;
+enum kinds { w83627ehf, w83627dhg };
-/*
- * Super-I/O constants and functions
- */
+/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
+static const char * w83627ehf_device_names[] = {
+ "w83627ehf",
+ "w83627dhg",
+};
+
+#define DRVNAME "w83627ehf"
/*
- * The three following globals are initialized in w83627ehf_find(), before
- * the i2c-isa device is created. Otherwise, they could be stored in
- * w83627ehf_data. This is ugly, but necessary, and when the driver is next
- * updated to become a platform driver, the globals will disappear.
+ * Super-I/O constants and functions
*/
-static int REG; /* The register to read/write */
-static int VAL; /* The value to read/write */
-/* The w83627ehf/ehg have 10 voltage inputs, but the w83627dhg has 9. This
- * value is also used in w83627ehf_detect() to export a device name in sysfs
- * (e.g. w83627ehf or w83627dhg) */
-static int w83627ehf_num_in;
#define W83627EHF_LD_HWM 0x0b
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
+#define SIO_REG_EN_VRM10 0x2C /* GPIO3, GPIO4 selection */
#define SIO_REG_ENABLE 0x30 /* Logical device enable */
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
+#define SIO_REG_VID_CTRL 0xF0 /* VID control */
+#define SIO_REG_VID_DATA 0xF1 /* VID data */
#define SIO_W83627EHF_ID 0x8850
#define SIO_W83627EHG_ID 0x8860
@@ -83,38 +81,38 @@ static int w83627ehf_num_in;
#define SIO_ID_MASK 0xFFF0
static inline void
-superio_outb(int reg, int val)
+superio_outb(int ioreg, int reg, int val)
{
- outb(reg, REG);
- outb(val, VAL);
+ outb(reg, ioreg);
+ outb(val, ioreg + 1);
}
static inline int
-superio_inb(int reg)
+superio_inb(int ioreg, int reg)
{
- outb(reg, REG);
- return inb(VAL);
+ outb(reg, ioreg);
+ return inb(ioreg + 1);
}
static inline void
-superio_select(int ld)
+superio_select(int ioreg, int ld)
{
- outb(SIO_REG_LDSEL, REG);
- outb(ld, VAL);
+ outb(SIO_REG_LDSEL, ioreg);
+ outb(ld, ioreg + 1);
}
static inline void
-superio_enter(void)
+superio_enter(int ioreg)
{
- outb(0x87, REG);
- outb(0x87, REG);
+ outb(0x87, ioreg);
+ outb(0x87, ioreg);
}
static inline void
-superio_exit(void)
+superio_exit(int ioreg)
{
- outb(0x02, REG);
- outb(0x02, VAL);
+ outb(0x02, ioreg);
+ outb(0x02, ioreg + 1);
}
/*
@@ -124,8 +122,8 @@ superio_exit(void)
#define IOREGION_ALIGNMENT ~7
#define IOREGION_OFFSET 5
#define IOREGION_LENGTH 2
-#define ADDR_REG_OFFSET 5
-#define DATA_REG_OFFSET 6
+#define ADDR_REG_OFFSET 0
+#define DATA_REG_OFFSET 1
#define W83627EHF_REG_BANK 0x4E
#define W83627EHF_REG_CONFIG 0x40
@@ -255,7 +253,9 @@ static inline u8 in_to_reg(u32 val, u8 nr)
*/
struct w83627ehf_data {
- struct i2c_client client;
+ int addr; /* IO base of hw monitor block */
+ const char *name;
+
struct class_device *class_dev;
struct mutex lock;
@@ -264,6 +264,7 @@ struct w83627ehf_data {
unsigned long last_updated; /* In jiffies */
/* Register values */
+ u8 in_num; /* number of in inputs we have */
u8 in[10]; /* Register value */
u8 in_max[10]; /* Register value */
u8 in_min[10]; /* Register value */
@@ -271,6 +272,7 @@ struct w83627ehf_data {
u8 fan_min[5];
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
+ u8 temp_type[3];
s8 temp1;
s8 temp1_max;
s8 temp1_max_hyst;
@@ -288,6 +290,14 @@ struct w83627ehf_data {
u8 fan_min_output[4]; /* minimum fan speed */
u8 fan_stop_time[4];
+
+ u8 vid;
+ u8 vrm;
+};
+
+struct w83627ehf_sio_data {
+ int sioreg;
+ enum kinds kind;
};
static inline int is_word_sized(u16 reg)
@@ -303,156 +313,152 @@ static inline int is_word_sized(u16 reg)
nothing for registers which live in bank 0. For others, they respectively
set the bank register to the correct value (before the register is
accessed), and back to 0 (afterwards). */
-static inline void w83627ehf_set_bank(struct i2c_client *client, u16 reg)
+static inline void w83627ehf_set_bank(struct w83627ehf_data *data, u16 reg)
{
if (reg & 0xff00) {
- outb_p(W83627EHF_REG_BANK, client->addr + ADDR_REG_OFFSET);
- outb_p(reg >> 8, client->addr + DATA_REG_OFFSET);
+ outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
+ outb_p(reg >> 8, data->addr + DATA_REG_OFFSET);
}
}
-static inline void w83627ehf_reset_bank(struct i2c_client *client, u16 reg)
+static inline void w83627ehf_reset_bank(struct w83627ehf_data *data, u16 reg)
{
if (reg & 0xff00) {
- outb_p(W83627EHF_REG_BANK, client->addr + ADDR_REG_OFFSET);
- outb_p(0, client->addr + DATA_REG_OFFSET);
+ outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
+ outb_p(0, data->addr + DATA_REG_OFFSET);
}
}
-static u16 w83627ehf_read_value(struct i2c_client *client, u16 reg)
+static u16 w83627ehf_read_value(struct w83627ehf_data *data, u16 reg)
{
- struct w83627ehf_data *data = i2c_get_clientdata(client);
int res, word_sized = is_word_sized(reg);
mutex_lock(&data->lock);
- w83627ehf_set_bank(client, reg);
- outb_p(reg & 0xff, client->addr + ADDR_REG_OFFSET);
- res = inb_p(client->addr + DATA_REG_OFFSET);
+ w83627ehf_set_bank(data, reg);
+ outb_p(reg & 0xff, data->addr + ADDR_REG_OFFSET);
+ res = inb_p(data->addr + DATA_REG_OFFSET);
if (word_sized) {
outb_p((reg & 0xff) + 1,
- client->addr + ADDR_REG_OFFSET);
- res = (res << 8) + inb_p(client->addr + DATA_REG_OFFSET);
+ data->addr + ADDR_REG_OFFSET);
+ res = (res << 8) + inb_p(data->addr + DATA_REG_OFFSET);
}
- w83627ehf_reset_bank(client, reg);
+ w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
return res;
}
-static int w83627ehf_write_value(struct i2c_client *client, u16 reg, u16 value)
+static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value)
{
- struct w83627ehf_data *data = i2c_get_clientdata(client);
int word_sized = is_word_sized(reg);
mutex_lock(&data->lock);
- w83627ehf_set_bank(client, reg);
- outb_p(reg & 0xff, client->addr + ADDR_REG_OFFSET);
+ w83627ehf_set_bank(data, reg);
+ outb_p(reg & 0xff, data->addr + ADDR_REG_OFFSET);
if (word_sized) {
- outb_p(value >> 8, client->addr + DATA_REG_OFFSET);
+ outb_p(value >> 8, data->addr + DATA_REG_OFFSET);
outb_p((reg & 0xff) + 1,
- client->addr + ADDR_REG_OFFSET);
+ data->addr + ADDR_REG_OFFSET);
}
- outb_p(value & 0xff, client->addr + DATA_REG_OFFSET);
- w83627ehf_reset_bank(client, reg);
+ outb_p(value & 0xff, data->addr + DATA_REG_OFFSET);
+ w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
return 0;
}
/* This function assumes that the caller holds data->update_lock */
-static void w83627ehf_write_fan_div(struct i2c_client *client, int nr)
+static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
- struct w83627ehf_data *data = i2c_get_clientdata(client);
u8 reg;
switch (nr) {
case 0:
- reg = (w83627ehf_read_value(client, W83627EHF_REG_FANDIV1) & 0xcf)
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_FANDIV1) & 0xcf)
| ((data->fan_div[0] & 0x03) << 4);
/* fan5 input control bit is write only, compute the value */
reg |= (data->has_fan & (1 << 4)) ? 1 : 0;
- w83627ehf_write_value(client, W83627EHF_REG_FANDIV1, reg);
- reg = (w83627ehf_read_value(client, W83627EHF_REG_VBAT) & 0xdf)
+ w83627ehf_write_value(data, W83627EHF_REG_FANDIV1, reg);
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_VBAT) & 0xdf)
| ((data->fan_div[0] & 0x04) << 3);
- w83627ehf_write_value(client, W83627EHF_REG_VBAT, reg);
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, reg);
break;
case 1:
- reg = (w83627ehf_read_value(client, W83627EHF_REG_FANDIV1) & 0x3f)
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_FANDIV1) & 0x3f)
| ((data->fan_div[1] & 0x03) << 6);
/* fan5 input control bit is write only, compute the value */
reg |= (data->has_fan & (1 << 4)) ? 1 : 0;
- w83627ehf_write_value(client, W83627EHF_REG_FANDIV1, reg);
- reg = (w83627ehf_read_value(client, W83627EHF_REG_VBAT) & 0xbf)
+ w83627ehf_write_value(data, W83627EHF_REG_FANDIV1, reg);
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_VBAT) & 0xbf)
| ((data->fan_div[1] & 0x04) << 4);
- w83627ehf_write_value(client, W83627EHF_REG_VBAT, reg);
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, reg);
break;
case 2:
- reg = (w83627ehf_read_value(client, W83627EHF_REG_FANDIV2) & 0x3f)
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_FANDIV2) & 0x3f)
| ((data->fan_div[2] & 0x03) << 6);
- w83627ehf_write_value(client, W83627EHF_REG_FANDIV2, reg);
- reg = (w83627ehf_read_value(client, W83627EHF_REG_VBAT) & 0x7f)
+ w83627ehf_write_value(data, W83627EHF_REG_FANDIV2, reg);
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_VBAT) & 0x7f)
| ((data->fan_div[2] & 0x04) << 5);
- w83627ehf_write_value(client, W83627EHF_REG_VBAT, reg);
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, reg);
break;
case 3:
- reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0xfc)
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_DIODE) & 0xfc)
| (data->fan_div[3] & 0x03);
- w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg);
- reg = (w83627ehf_read_value(client, W83627EHF_REG_SMI_OVT) & 0x7f)
+ w83627ehf_write_value(data, W83627EHF_REG_DIODE, reg);
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_SMI_OVT) & 0x7f)
| ((data->fan_div[3] & 0x04) << 5);
- w83627ehf_write_value(client, W83627EHF_REG_SMI_OVT, reg);
+ w83627ehf_write_value(data, W83627EHF_REG_SMI_OVT, reg);
break;
case 4:
- reg = (w83627ehf_read_value(client, W83627EHF_REG_DIODE) & 0x73)
+ reg = (w83627ehf_read_value(data, W83627EHF_REG_DIODE) & 0x73)
| ((data->fan_div[4] & 0x03) << 2)
| ((data->fan_div[4] & 0x04) << 5);
- w83627ehf_write_value(client, W83627EHF_REG_DIODE, reg);
+ w83627ehf_write_value(data, W83627EHF_REG_DIODE, reg);
break;
}
}
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
int i;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ)
+ if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- i = w83627ehf_read_value(client, W83627EHF_REG_FANDIV1);
+ i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
data->fan_div[0] = (i >> 4) & 0x03;
data->fan_div[1] = (i >> 6) & 0x03;
- i = w83627ehf_read_value(client, W83627EHF_REG_FANDIV2);
+ i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV2);
data->fan_div[2] = (i >> 6) & 0x03;
- i = w83627ehf_read_value(client, W83627EHF_REG_VBAT);
+ i = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
data->fan_div[0] |= (i >> 3) & 0x04;
data->fan_div[1] |= (i >> 4) & 0x04;
data->fan_div[2] |= (i >> 5) & 0x04;
if (data->has_fan & ((1 << 3) | (1 << 4))) {
- i = w83627ehf_read_value(client, W83627EHF_REG_DIODE);
+ i = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
data->fan_div[3] = i & 0x03;
data->fan_div[4] = ((i >> 2) & 0x03)
| ((i >> 5) & 0x04);
}
if (data->has_fan & (1 << 3)) {
- i = w83627ehf_read_value(client, W83627EHF_REG_SMI_OVT);
+ i = w83627ehf_read_value(data, W83627EHF_REG_SMI_OVT);
data->fan_div[3] |= (i >> 5) & 0x04;
}
/* Measured voltages and limits */
- for (i = 0; i < w83627ehf_num_in; i++) {
- data->in[i] = w83627ehf_read_value(client,
+ for (i = 0; i < data->in_num; i++) {
+ data->in[i] = w83627ehf_read_value(data,
W83627EHF_REG_IN(i));
- data->in_min[i] = w83627ehf_read_value(client,
+ data->in_min[i] = w83627ehf_read_value(data,
W83627EHF_REG_IN_MIN(i));
- data->in_max[i] = w83627ehf_read_value(client,
+ data->in_max[i] = w83627ehf_read_value(data,
W83627EHF_REG_IN_MAX(i));
}
@@ -461,9 +467,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (!(data->has_fan & (1 << i)))
continue;
- data->fan[i] = w83627ehf_read_value(client,
+ data->fan[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN[i]);
- data->fan_min[i] = w83627ehf_read_value(client,
+ data->fan_min[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN_MIN[i]);
/* If we failed to measure the fan speed and clock
@@ -471,16 +477,16 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
time */
if (data->fan[i] == 0xff
&& data->fan_div[i] < 0x07) {
- dev_dbg(&client->dev, "Increasing fan%d "
+ dev_dbg(dev, "Increasing fan%d "
"clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div(client, i);
+ w83627ehf_write_fan_div(data, i);
/* Preserve min limit if possible */
if (data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
- w83627ehf_write_value(client,
+ w83627ehf_write_value(data,
W83627EHF_REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
@@ -489,9 +495,9 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
for (i = 0; i < 4; i++) {
/* pwmcfg, tolarance mapped for i=0, i=1 to same reg */
if (i != 1) {
- pwmcfg = w83627ehf_read_value(client,
+ pwmcfg = w83627ehf_read_value(data,
W83627EHF_REG_PWM_ENABLE[i]);
- tolerance = w83627ehf_read_value(client,
+ tolerance = w83627ehf_read_value(data,
W83627EHF_REG_TOLERANCE[i]);
}
data->pwm_mode[i] =
@@ -500,14 +506,14 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
data->pwm_enable[i] =
((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
& 3) + 1;
- data->pwm[i] = w83627ehf_read_value(client,
+ data->pwm[i] = w83627ehf_read_value(data,
W83627EHF_REG_PWM[i]);
- data->fan_min_output[i] = w83627ehf_read_value(client,
+ data->fan_min_output[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN_MIN_OUTPUT[i]);
- data->fan_stop_time[i] = w83627ehf_read_value(client,
+ data->fan_stop_time[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN_STOP_TIME[i]);
data->target_temp[i] =
- w83627ehf_read_value(client,
+ w83627ehf_read_value(data,
W83627EHF_REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0))
@@ -515,26 +521,26 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
}
/* Measured temperatures and limits */
- data->temp1 = w83627ehf_read_value(client,
+ data->temp1 = w83627ehf_read_value(data,
W83627EHF_REG_TEMP1);
- data->temp1_max = w83627ehf_read_value(client,
+ data->temp1_max = w83627ehf_read_value(data,
W83627EHF_REG_TEMP1_OVER);
- data->temp1_max_hyst = w83627ehf_read_value(client,
+ data->temp1_max_hyst = w83627ehf_read_value(data,
W83627EHF_REG_TEMP1_HYST);
for (i = 0; i < 2; i++) {
- data->temp[i] = w83627ehf_read_value(client,
+ data->temp[i] = w83627ehf_read_value(data,
W83627EHF_REG_TEMP[i]);
- data->temp_max[i] = w83627ehf_read_value(client,
+ data->temp_max[i] = w83627ehf_read_value(data,
W83627EHF_REG_TEMP_OVER[i]);
- data->temp_max_hyst[i] = w83627ehf_read_value(client,
+ data->temp_max_hyst[i] = w83627ehf_read_value(data,
W83627EHF_REG_TEMP_HYST[i]);
}
- data->alarms = w83627ehf_read_value(client,
+ data->alarms = w83627ehf_read_value(data,
W83627EHF_REG_ALARM1) |
- (w83627ehf_read_value(client,
+ (w83627ehf_read_value(data,
W83627EHF_REG_ALARM2) << 8) |
- (w83627ehf_read_value(client,
+ (w83627ehf_read_value(data,
W83627EHF_REG_ALARM3) << 16);
data->last_updated = jiffies;
@@ -567,15 +573,14 @@ static ssize_t \
store_in_##reg (struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct w83627ehf_data *data = i2c_get_clientdata(client); \
+ struct w83627ehf_data *data = dev_get_drvdata(dev); \
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
u32 val = simple_strtoul(buf, NULL, 10); \
\
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = in_to_reg(val, nr); \
- w83627ehf_write_value(client, W83627EHF_REG_IN_##REG(nr), \
+ w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
return count; \
@@ -673,8 +678,7 @@ static ssize_t
store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
unsigned int val = simple_strtoul(buf, NULL, 10);
@@ -716,18 +720,25 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
/* Write both the fan clock divider (if it changed) and the new
fan min (unconditionally) */
if (new_div != data->fan_div[nr]) {
- if (new_div > data->fan_div[nr])
- data->fan[nr] >>= (data->fan_div[nr] - new_div);
- else
- data->fan[nr] <<= (new_div - data->fan_div[nr]);
+ /* Preserve the fan speed reading */
+ if (data->fan[nr] != 0xff) {
+ if (new_div > data->fan_div[nr])
+ data->fan[nr] >>= new_div - data->fan_div[nr];
+ else if (data->fan[nr] & 0x80)
+ data->fan[nr] = 0xff;
+ else
+ data->fan[nr] <<= data->fan_div[nr] - new_div;
+ }
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
nr + 1, div_from_reg(data->fan_div[nr]),
div_from_reg(new_div));
data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div(client, nr);
+ w83627ehf_write_fan_div(data, nr);
+ /* Give the chip time to sample a new speed value */
+ data->last_updated = jiffies;
}
- w83627ehf_write_value(client, W83627EHF_REG_FAN_MIN[nr],
+ w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[nr],
data->fan_min[nr]);
mutex_unlock(&data->update_lock);
@@ -788,13 +799,12 @@ static ssize_t \
store_temp1_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct w83627ehf_data *data = i2c_get_clientdata(client); \
+ struct w83627ehf_data *data = dev_get_drvdata(dev); \
u32 val = simple_strtoul(buf, NULL, 10); \
\
mutex_lock(&data->update_lock); \
data->temp1_##reg = temp1_to_reg(val, -128000, 127000); \
- w83627ehf_write_value(client, W83627EHF_REG_TEMP1_##REG, \
+ w83627ehf_write_value(data, W83627EHF_REG_TEMP1_##REG, \
data->temp1_##reg); \
mutex_unlock(&data->update_lock); \
return count; \
@@ -822,15 +832,14 @@ static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct w83627ehf_data *data = i2c_get_clientdata(client); \
+ struct w83627ehf_data *data = dev_get_drvdata(dev); \
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
u32 val = simple_strtoul(buf, NULL, 10); \
\
mutex_lock(&data->update_lock); \
data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_value(client, W83627EHF_REG_TEMP_##REG[nr], \
+ w83627ehf_write_value(data, W83627EHF_REG_TEMP_##REG[nr], \
data->reg[nr]); \
mutex_unlock(&data->update_lock); \
return count; \
@@ -838,6 +847,15 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
store_temp_reg(OVER, temp_max);
store_temp_reg(HYST, temp_max_hyst);
+static ssize_t
+show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n", (int)data->temp_type[nr]);
+}
+
static struct sensor_device_attribute sda_temp[] = {
SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0),
SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0),
@@ -857,6 +875,9 @@ static struct sensor_device_attribute sda_temp[] = {
SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
+ SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0),
+ SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1),
+ SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2),
};
#define show_pwm_reg(reg) \
@@ -877,8 +898,7 @@ static ssize_t
store_pwm_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u32 val = simple_strtoul(buf, NULL, 10);
@@ -887,12 +907,12 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(client, W83627EHF_REG_PWM_ENABLE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
data->pwm_mode[nr] = val;
reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]);
if (!val)
reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr];
- w83627ehf_write_value(client, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -901,15 +921,14 @@ static ssize_t
store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
- w83627ehf_write_value(client, W83627EHF_REG_PWM[nr], val);
+ w83627ehf_write_value(data, W83627EHF_REG_PWM[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -918,8 +937,7 @@ static ssize_t
store_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u32 val = simple_strtoul(buf, NULL, 10);
@@ -928,11 +946,11 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
if (!val || (val > 2)) /* only modes 1 and 2 are supported */
return -EINVAL;
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(client, W83627EHF_REG_PWM_ENABLE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
data->pwm_enable[nr] = val;
reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(client, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -955,15 +973,14 @@ static ssize_t
store_target_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 127000);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(client, W83627EHF_REG_TARGET[nr], val);
+ w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -972,8 +989,7 @@ static ssize_t
store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct w83627ehf_data *data = i2c_get_clientdata(client);
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
@@ -981,13 +997,13 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 15000);
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(client, W83627EHF_REG_TOLERANCE[nr]);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
data->tolerance[nr] = val;
if (nr == 1)
reg = (reg & 0x0f) | (val << 4);
else
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(client, W83627EHF_REG_TOLERANCE[nr], reg);
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1058,14 +1074,13 @@ static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{\
- struct i2c_client *client = to_i2c_client(dev); \
- struct w83627ehf_data *data = i2c_get_clientdata(client); \
+ struct w83627ehf_data *data = dev_get_drvdata(dev); \
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(client, W83627EHF_REG_##REG[nr], val); \
+ w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
}
@@ -1087,21 +1102,28 @@ static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct i2c_client *client = to_i2c_client(dev); \
- struct w83627ehf_data *data = i2c_get_clientdata(client); \
+ struct w83627ehf_data *data = dev_get_drvdata(dev); \
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
u8 val = step_time_to_reg(simple_strtoul(buf, NULL, 10), \
data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
- w83627ehf_write_value(client, W83627EHF_REG_##REG[nr], val); \
+ w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
mutex_unlock(&data->update_lock); \
return count; \
} \
fan_time_functions(fan_stop_time, FAN_STOP_TIME)
+static ssize_t show_name(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
@@ -1125,8 +1147,16 @@ static struct sensor_device_attribute sda_sf3_arrays[] = {
store_fan_min_output, 2),
};
+static ssize_t
+show_vid(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
+}
+static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
+
/*
- * Driver and client management
+ * Driver and device management
*/
static void w83627ehf_device_remove_files(struct device *dev)
@@ -1134,12 +1164,13 @@ static void w83627ehf_device_remove_files(struct device *dev)
/* some entries in the following arrays may not have been used in
* device_create_file(), but device_remove_file() will ignore them */
int i;
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
device_remove_file(dev, &sda_sf3_arrays[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr);
- for (i = 0; i < w83627ehf_num_in; i++) {
+ for (i = 0; i < data->in_num; i++) {
device_remove_file(dev, &sda_in_input[i].dev_attr);
device_remove_file(dev, &sda_in_alarm[i].dev_attr);
device_remove_file(dev, &sda_in_min[i].dev_attr);
@@ -1160,43 +1191,64 @@ static void w83627ehf_device_remove_files(struct device *dev)
}
for (i = 0; i < ARRAY_SIZE(sda_temp); i++)
device_remove_file(dev, &sda_temp[i].dev_attr);
-}
-static struct i2c_driver w83627ehf_driver;
+ device_remove_file(dev, &dev_attr_name);
+ if (data->vid != 0x3f)
+ device_remove_file(dev, &dev_attr_cpu0_vid);
+}
-static void w83627ehf_init_client(struct i2c_client *client)
+/* Get the monitoring functions started */
+static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
{
int i;
- u8 tmp;
+ u8 tmp, diode;
/* Start monitoring is needed */
- tmp = w83627ehf_read_value(client, W83627EHF_REG_CONFIG);
+ tmp = w83627ehf_read_value(data, W83627EHF_REG_CONFIG);
if (!(tmp & 0x01))
- w83627ehf_write_value(client, W83627EHF_REG_CONFIG,
+ w83627ehf_write_value(data, W83627EHF_REG_CONFIG,
tmp | 0x01);
/* Enable temp2 and temp3 if needed */
for (i = 0; i < 2; i++) {
- tmp = w83627ehf_read_value(client,
+ tmp = w83627ehf_read_value(data,
W83627EHF_REG_TEMP_CONFIG[i]);
if (tmp & 0x01)
- w83627ehf_write_value(client,
+ w83627ehf_write_value(data,
W83627EHF_REG_TEMP_CONFIG[i],
tmp & 0xfe);
}
+
+ /* Enable VBAT monitoring if needed */
+ tmp = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
+ if (!(tmp & 0x01))
+ w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
+
+ /* Get thermal sensor types */
+ diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
+ for (i = 0; i < 3; i++) {
+ if ((tmp & (0x02 << i)))
+ data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
+ else
+ data->temp_type[i] = 4; /* thermistor */
+ }
}
-static int w83627ehf_detect(struct i2c_adapter *adapter)
+static int __devinit w83627ehf_probe(struct platform_device *pdev)
{
- struct i2c_client *client;
+ struct device *dev = &pdev->dev;
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct w83627ehf_data *data;
- struct device *dev;
- u8 fan4pin, fan5pin;
+ struct resource *res;
+ u8 fan4pin, fan5pin, en_vrm10;
int i, err = 0;
- if (!request_region(address + IOREGION_OFFSET, IOREGION_LENGTH,
- w83627ehf_driver.driver.name)) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) {
err = -EBUSY;
+ dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+ (unsigned long)res->start,
+ (unsigned long)res->start + IOREGION_LENGTH - 1);
goto exit;
}
@@ -1205,41 +1257,47 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
goto exit_release;
}
- client = &data->client;
- i2c_set_clientdata(client, data);
- client->addr = address;
+ data->addr = res->start;
mutex_init(&data->lock);
- client->adapter = adapter;
- client->driver = &w83627ehf_driver;
- client->flags = 0;
- dev = &client->dev;
-
- if (w83627ehf_num_in == 9)
- strlcpy(client->name, "w83627dhg", I2C_NAME_SIZE);
- else /* just say ehf. 627EHG is 627EHF in lead-free packaging. */
- strlcpy(client->name, "w83627ehf", I2C_NAME_SIZE);
-
- data->valid = 0;
mutex_init(&data->update_lock);
+ data->name = w83627ehf_device_names[sio_data->kind];
+ platform_set_drvdata(pdev, data);
- /* Tell the i2c layer a new client has arrived */
- if ((err = i2c_attach_client(client)))
- goto exit_free;
+ /* 627EHG and 627EHF have 10 voltage inputs; DHG has 9 */
+ data->in_num = (sio_data->kind == w83627dhg) ? 9 : 10;
/* Initialize the chip */
- w83627ehf_init_client(client);
-
- /* A few vars need to be filled upon startup */
- for (i = 0; i < 5; i++)
- data->fan_min[i] = w83627ehf_read_value(client,
- W83627EHF_REG_FAN_MIN[i]);
+ w83627ehf_init_device(data);
+
+ data->vrm = vid_which_vrm();
+ superio_enter(sio_data->sioreg);
+ /* Set VID input sensibility if needed. In theory the BIOS should
+ have set it, but in practice it's not always the case. */
+ en_vrm10 = superio_inb(sio_data->sioreg, SIO_REG_EN_VRM10);
+ if ((en_vrm10 & 0x08) && data->vrm != 100) {
+ dev_warn(dev, "Setting VID input voltage to TTL\n");
+ superio_outb(sio_data->sioreg, SIO_REG_EN_VRM10,
+ en_vrm10 & ~0x08);
+ } else if (!(en_vrm10 & 0x08) && data->vrm == 100) {
+ dev_warn(dev, "Setting VID input voltage to VRM10\n");
+ superio_outb(sio_data->sioreg, SIO_REG_EN_VRM10,
+ en_vrm10 | 0x08);
+ }
+ /* Read VID value */
+ superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+ if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80)
+ data->vid = superio_inb(sio_data->sioreg, SIO_REG_VID_DATA) & 0x3f;
+ else {
+ dev_info(dev, "VID pins in output mode, CPU VID not "
+ "available\n");
+ data->vid = 0x3f;
+ }
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- superio_enter();
- fan5pin = superio_inb(0x24) & 0x2;
- fan4pin = superio_inb(0x29) & 0x6;
- superio_exit();
+ fan5pin = superio_inb(sio_data->sioreg, 0x24) & 0x2;
+ fan4pin = superio_inb(sio_data->sioreg, 0x29) & 0x6;
+ superio_exit(sio_data->sioreg);
/* It looks like fan4 and fan5 pins can be alternatively used
as fan on/off switches, but fan5 control is write only :/
@@ -1248,7 +1306,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
is not the default. */
data->has_fan = 0x07; /* fan1, fan2 and fan3 */
- i = w83627ehf_read_value(client, W83627EHF_REG_FANDIV1);
+ i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
if ((i & (1 << 2)) && (!fan4pin))
data->has_fan |= (1 << 3);
if (!(i & (1 << 1)) && (!fan5pin))
@@ -1268,7 +1326,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
goto exit_remove;
}
- for (i = 0; i < w83627ehf_num_in; i++)
+ for (i = 0; i < data->in_num; i++)
if ((err = device_create_file(dev, &sda_in_input[i].dev_attr))
|| (err = device_create_file(dev,
&sda_in_alarm[i].dev_attr))
@@ -1308,6 +1366,16 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
if ((err = device_create_file(dev, &sda_temp[i].dev_attr)))
goto exit_remove;
+ err = device_create_file(dev, &dev_attr_name);
+ if (err)
+ goto exit_remove;
+
+ if (data->vid != 0x3f) {
+ err = device_create_file(dev, &dev_attr_cpu0_vid);
+ if (err)
+ goto exit_remove;
+ }
+
data->class_dev = hwmon_device_register(dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
@@ -1318,95 +1386,172 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
exit_remove:
w83627ehf_device_remove_files(dev);
- i2c_detach_client(client);
-exit_free:
kfree(data);
+ platform_set_drvdata(pdev, NULL);
exit_release:
- release_region(address + IOREGION_OFFSET, IOREGION_LENGTH);
+ release_region(res->start, IOREGION_LENGTH);
exit:
return err;
}
-static int w83627ehf_detach_client(struct i2c_client *client)
+static int __devexit w83627ehf_remove(struct platform_device *pdev)
{
- struct w83627ehf_data *data = i2c_get_clientdata(client);
- int err;
+ struct w83627ehf_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->class_dev);
- w83627ehf_device_remove_files(&client->dev);
-
- if ((err = i2c_detach_client(client)))
- return err;
- release_region(client->addr + IOREGION_OFFSET, IOREGION_LENGTH);
+ w83627ehf_device_remove_files(&pdev->dev);
+ release_region(data->addr, IOREGION_LENGTH);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
}
-static struct i2c_driver w83627ehf_driver = {
+static struct platform_driver w83627ehf_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = "w83627ehf",
+ .name = DRVNAME,
},
- .attach_adapter = w83627ehf_detect,
- .detach_client = w83627ehf_detach_client,
+ .probe = w83627ehf_probe,
+ .remove = __devexit_p(w83627ehf_remove),
};
-static int __init w83627ehf_find(int sioaddr, unsigned short *addr)
+/* w83627ehf_find() looks for a '627 in the Super-I/O config space */
+static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
+ struct w83627ehf_sio_data *sio_data)
{
+ static const char __initdata sio_name_W83627EHF[] = "W83627EHF";
+ static const char __initdata sio_name_W83627EHG[] = "W83627EHG";
+ static const char __initdata sio_name_W83627DHG[] = "W83627DHG";
+
u16 val;
+ const char *sio_name;
- REG = sioaddr;
- VAL = sioaddr + 1;
- superio_enter();
+ superio_enter(sioaddr);
- val = (superio_inb(SIO_REG_DEVID) << 8)
- | superio_inb(SIO_REG_DEVID + 1);
+ val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8)
+ | superio_inb(sioaddr, SIO_REG_DEVID + 1);
switch (val & SIO_ID_MASK) {
- case SIO_W83627DHG_ID:
- w83627ehf_num_in = 9;
- break;
case SIO_W83627EHF_ID:
+ sio_data->kind = w83627ehf;
+ sio_name = sio_name_W83627EHF;
+ break;
case SIO_W83627EHG_ID:
- w83627ehf_num_in = 10;
+ sio_data->kind = w83627ehf;
+ sio_name = sio_name_W83627EHG;
+ break;
+ case SIO_W83627DHG_ID:
+ sio_data->kind = w83627dhg;
+ sio_name = sio_name_W83627DHG;
break;
default:
- printk(KERN_WARNING "w83627ehf: unsupported chip ID: 0x%04x\n",
- val);
- superio_exit();
+ if (val != 0xffff)
+ pr_debug(DRVNAME ": unsupported chip ID: 0x%04x\n",
+ val);
+ superio_exit(sioaddr);
return -ENODEV;
}
- superio_select(W83627EHF_LD_HWM);
- val = (superio_inb(SIO_REG_ADDR) << 8)
- | superio_inb(SIO_REG_ADDR + 1);
+ /* We have a known chip, find the HWM I/O address */
+ superio_select(sioaddr, W83627EHF_LD_HWM);
+ val = (superio_inb(sioaddr, SIO_REG_ADDR) << 8)
+ | superio_inb(sioaddr, SIO_REG_ADDR + 1);
*addr = val & IOREGION_ALIGNMENT;
if (*addr == 0) {
- superio_exit();
+ printk(KERN_ERR DRVNAME ": Refusing to enable a Super-I/O "
+ "device with a base I/O port 0.\n");
+ superio_exit(sioaddr);
return -ENODEV;
}
/* Activate logical device if needed */
- val = superio_inb(SIO_REG_ENABLE);
- if (!(val & 0x01))
- superio_outb(SIO_REG_ENABLE, val | 0x01);
+ val = superio_inb(sioaddr, SIO_REG_ENABLE);
+ if (!(val & 0x01)) {
+ printk(KERN_WARNING DRVNAME ": Forcibly enabling Super-I/O. "
+ "Sensor is probably unusable.\n");
+ superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
+ }
+
+ superio_exit(sioaddr);
+ pr_info(DRVNAME ": Found %s chip at %#x\n", sio_name, *addr);
+ sio_data->sioreg = sioaddr;
- superio_exit();
return 0;
}
+/* when Super-I/O functions move to a separate file, the Super-I/O
+ * bus will manage the lifetime of the device and this module will only keep
+ * track of the w83627ehf driver. But since we platform_device_alloc(), we
+ * must keep track of the device */
+static struct platform_device *pdev;
+
static int __init sensors_w83627ehf_init(void)
{
- if (w83627ehf_find(0x2e, &address)
- && w83627ehf_find(0x4e, &address))
+ int err;
+ unsigned short address;
+ struct resource res;
+ struct w83627ehf_sio_data sio_data;
+
+ /* initialize sio_data->kind and sio_data->sioreg.
+ *
+ * when Super-I/O functions move to a separate file, the Super-I/O
+ * driver will probe 0x2e and 0x4e and auto-detect the presence of a
+ * w83627ehf hardware monitor, and call probe() */
+ if (w83627ehf_find(0x2e, &address, &sio_data) &&
+ w83627ehf_find(0x4e, &address, &sio_data))
return -ENODEV;
- return i2c_isa_add_driver(&w83627ehf_driver);
+ err = platform_driver_register(&w83627ehf_driver);
+ if (err)
+ goto exit;
+
+ if (!(pdev = platform_device_alloc(DRVNAME, address))) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ goto exit_unregister;
+ }
+
+ err = platform_device_add_data(pdev, &sio_data,
+ sizeof(struct w83627ehf_sio_data));
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+ goto exit_device_put;
+ }
+
+ memset(&res, 0, sizeof(res));
+ res.name = DRVNAME;
+ res.start = address + IOREGION_OFFSET;
+ res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
+ res.flags = IORESOURCE_IO;
+ err = platform_device_add_resources(pdev, &res, 1);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device resource addition failed "
+ "(%d)\n", err);
+ goto exit_device_put;
+ }
+
+ /* platform_device_add calls probe() */
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+ err);
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(pdev);
+exit_unregister:
+ platform_driver_unregister(&w83627ehf_driver);
+exit:
+ return err;
}
static void __exit sensors_w83627ehf_exit(void)
{
- i2c_isa_del_driver(&w83627ehf_driver);
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&w83627ehf_driver);
}
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 12cb40a975de..1ce78179b005 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -220,6 +220,18 @@ static const u8 regpwm[] = { W83627THF_REG_PWM1, W83627THF_REG_PWM2,
#define W836X7HF_REG_PWM(type, nr) (((type) == w83627hf) ? \
regpwm_627hf[(nr) - 1] : regpwm[(nr) - 1])
+#define W83627HF_REG_PWM_FREQ 0x5C /* Only for the 627HF */
+
+#define W83637HF_REG_PWM_FREQ1 0x00 /* 697HF/687THF too */
+#define W83637HF_REG_PWM_FREQ2 0x02 /* 697HF/687THF too */
+#define W83637HF_REG_PWM_FREQ3 0x10 /* 687THF too */
+
+static const u8 W83637HF_REG_PWM_FREQ[] = { W83637HF_REG_PWM_FREQ1,
+ W83637HF_REG_PWM_FREQ2,
+ W83637HF_REG_PWM_FREQ3 };
+
+#define W83627HF_BASE_PWM_FREQ 46870
+
#define W83781D_REG_I2C_ADDR 0x48
#define W83781D_REG_I2C_SUBADDR 0x4A
@@ -267,6 +279,49 @@ static int TEMP_FROM_REG(u8 reg)
#define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255))
+static inline unsigned long pwm_freq_from_reg_627hf(u8 reg)
+{
+ unsigned long freq;
+ freq = W83627HF_BASE_PWM_FREQ >> reg;
+ return freq;
+}
+static inline u8 pwm_freq_to_reg_627hf(unsigned long val)
+{
+ u8 i;
+ /* Only 5 dividers (1 2 4 8 16)
+ Search for the nearest available frequency */
+ for (i = 0; i < 4; i++) {
+ if (val > (((W83627HF_BASE_PWM_FREQ >> i) +
+ (W83627HF_BASE_PWM_FREQ >> (i+1))) / 2))
+ break;
+ }
+ return i;
+}
+
+static inline unsigned long pwm_freq_from_reg(u8 reg)
+{
+ /* Clock bit 8 -> 180 kHz or 24 MHz */
+ unsigned long clock = (reg & 0x80) ? 180000UL : 24000000UL;
+
+ reg &= 0x7f;
+ /* This should not happen but anyway... */
+ if (reg == 0)
+ reg++;
+ return (clock / (reg << 8));
+}
+static inline u8 pwm_freq_to_reg(unsigned long val)
+{
+ /* Minimum divider value is 0x01 and maximum is 0x7F */
+ if (val >= 93750) /* The highest we can do */
+ return 0x01;
+ if (val >= 720) /* Use 24 MHz clock */
+ return (24000000UL / (val << 8));
+ if (val < 6) /* The lowest we can do */
+ return 0xFF;
+ else /* Use 180 kHz clock */
+ return (0x80 | (180000UL / (val << 8)));
+}
+
#define BEEP_MASK_FROM_REG(val) (val)
#define BEEP_MASK_TO_REG(val) ((val) & 0xffffff)
#define BEEP_ENABLE_TO_REG(val) ((val)?1:0)
@@ -316,6 +371,7 @@ struct w83627hf_data {
u32 beep_mask; /* Register encoding, combined */
u8 beep_enable; /* Boolean */
u8 pwm[3]; /* Register value */
+ u8 pwm_freq[3]; /* Register value */
u16 sens[3]; /* 782D/783S only.
1 = pentium diode; 2 = 3904 diode;
3000-5000 = thermistor beta.
@@ -852,6 +908,64 @@ sysfs_pwm(2);
sysfs_pwm(3);
static ssize_t
+show_pwm_freq_reg(struct device *dev, char *buf, int nr)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ if (data->type == w83627hf)
+ return sprintf(buf, "%ld\n",
+ pwm_freq_from_reg_627hf(data->pwm_freq[nr - 1]));
+ else
+ return sprintf(buf, "%ld\n",
+ pwm_freq_from_reg(data->pwm_freq[nr - 1]));
+}
+
+static ssize_t
+store_pwm_freq_reg(struct device *dev, const char *buf, size_t count, int nr)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ static const u8 mask[]={0xF8, 0x8F};
+ u32 val;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ mutex_lock(&data->update_lock);
+
+ if (data->type == w83627hf) {
+ data->pwm_freq[nr - 1] = pwm_freq_to_reg_627hf(val);
+ w83627hf_write_value(data, W83627HF_REG_PWM_FREQ,
+ (data->pwm_freq[nr - 1] << ((nr - 1)*4)) |
+ (w83627hf_read_value(data,
+ W83627HF_REG_PWM_FREQ) & mask[nr - 1]));
+ } else {
+ data->pwm_freq[nr - 1] = pwm_freq_to_reg(val);
+ w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr - 1],
+ data->pwm_freq[nr - 1]);
+ }
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+#define sysfs_pwm_freq(offset) \
+static ssize_t show_regs_pwm_freq_##offset(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return show_pwm_freq_reg(dev, buf, offset); \
+} \
+static ssize_t \
+store_regs_pwm_freq_##offset(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ return store_pwm_freq_reg(dev, buf, count, offset); \
+} \
+static DEVICE_ATTR(pwm##offset##_freq, S_IRUGO | S_IWUSR, \
+ show_regs_pwm_freq_##offset, store_regs_pwm_freq_##offset);
+
+sysfs_pwm_freq(1);
+sysfs_pwm_freq(2);
+sysfs_pwm_freq(3);
+
+static ssize_t
show_sensor_reg(struct device *dev, char *buf, int nr)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1077,6 +1191,9 @@ static struct attribute *w83627hf_attributes_opt[] = {
&dev_attr_pwm3.attr,
+ &dev_attr_pwm1_freq.attr,
+ &dev_attr_pwm2_freq.attr,
+ &dev_attr_pwm3_freq.attr,
NULL
};
@@ -1139,7 +1256,9 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
|| (err = device_create_file(dev, &dev_attr_in5_max))
|| (err = device_create_file(dev, &dev_attr_in6_input))
|| (err = device_create_file(dev, &dev_attr_in6_min))
- || (err = device_create_file(dev, &dev_attr_in6_max)))
+ || (err = device_create_file(dev, &dev_attr_in6_max))
+ || (err = device_create_file(dev, &dev_attr_pwm1_freq))
+ || (err = device_create_file(dev, &dev_attr_pwm2_freq)))
goto ERROR4;
if (data->type != w83697hf)
@@ -1169,6 +1288,12 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
if ((err = device_create_file(dev, &dev_attr_pwm3)))
goto ERROR4;
+ if (data->type == w83637hf || data->type == w83687thf)
+ if ((err = device_create_file(dev, &dev_attr_pwm1_freq))
+ || (err = device_create_file(dev, &dev_attr_pwm2_freq))
+ || (err = device_create_file(dev, &dev_attr_pwm3_freq)))
+ goto ERROR4;
+
data->class_dev = hwmon_device_register(dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
@@ -1181,6 +1306,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
sysfs_remove_group(&dev->kobj, &w83627hf_group);
sysfs_remove_group(&dev->kobj, &w83627hf_group_opt);
ERROR3:
+ platform_set_drvdata(pdev, NULL);
kfree(data);
ERROR1:
release_region(res->start, WINB_REGION_SIZE);
@@ -1193,11 +1319,11 @@ static int __devexit w83627hf_remove(struct platform_device *pdev)
struct w83627hf_data *data = platform_get_drvdata(pdev);
struct resource *res;
- platform_set_drvdata(pdev, NULL);
hwmon_device_unregister(data->class_dev);
sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group);
sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt);
+ platform_set_drvdata(pdev, NULL);
kfree(data);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1472,6 +1598,20 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
(data->type == w83627hf || data->type == w83697hf))
break;
}
+ if (data->type == w83627hf) {
+ u8 tmp = w83627hf_read_value(data,
+ W83627HF_REG_PWM_FREQ);
+ data->pwm_freq[0] = tmp & 0x07;
+ data->pwm_freq[1] = (tmp >> 4) & 0x07;
+ } else if (data->type != w83627thf) {
+ for (i = 1; i <= 3; i++) {
+ data->pwm_freq[i - 1] =
+ w83627hf_read_value(data,
+ W83637HF_REG_PWM_FREQ[i - 1]);
+ if (i == 2 && (data->type == w83697hf))
+ break;
+ }
+ }
data->temp = w83627hf_read_value(data, W83781D_REG_TEMP(1));
data->temp_max =
@@ -1548,15 +1688,12 @@ static int __init w83627hf_device_add(unsigned short address,
goto exit_device_put;
}
- pdev->dev.platform_data = kmalloc(sizeof(struct w83627hf_sio_data),
- GFP_KERNEL);
- if (!pdev->dev.platform_data) {
- err = -ENOMEM;
+ err = platform_device_add_data(pdev, sio_data,
+ sizeof(struct w83627hf_sio_data));
+ if (err) {
printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
goto exit_device_put;
}
- memcpy(pdev->dev.platform_data, sio_data,
- sizeof(struct w83627hf_sio_data));
err = platform_device_add(pdev);
if (err) {
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 58899078810b..014dfa575be7 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -34,10 +34,6 @@ config I2C_ALGOPCA
This support is also available as a module. If so, the module
will be called i2c-algo-pca.
-config I2C_ALGO8XX
- tristate "MPC8xx CPM I2C interface"
- depends on 8xx
-
config I2C_ALGO_SGI
tristate "I2C SGI interfaces"
depends on SGI_IP22 || SGI_IP32 || X86_VISWS
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 838dc1c19d61..da1647869f91 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -207,6 +207,7 @@ config I2C_PIIX4
ATI IXP300
ATI IXP400
ATI SB600
+ ATI SB700
Serverworks OSB4
Serverworks CSB5
Serverworks CSB6
@@ -236,9 +237,6 @@ config I2C_IOP3XX
This driver can also be built as a module. If so, the module
will be called i2c-iop3xx.
-config I2C_ISA
- tristate
-
config I2C_IXP4XX
tristate "IXP4xx GPIO-Based I2C Interface (DEPRECATED)"
depends on ARCH_IXP4XX
@@ -390,11 +388,6 @@ config I2C_PROSAVAGE
This support is also available as a module. If so, the module
will be called i2c-prosavage.
-config I2C_RPXLITE
- tristate "Embedded Planet RPX Lite/Classic support"
- depends on RPXLITE || RPXCLASSIC
- select I2C_ALGO8XX
-
config I2C_S3C2410
tristate "S3C2410 I2C Driver"
depends on ARCH_S3C2410
@@ -512,6 +505,22 @@ config I2C_SIS96X
This driver can also be built as a module. If so, the module
will be called i2c-sis96x.
+config I2C_TAOS_EVM
+ tristate "TAOS evaluation module"
+ depends on EXPERIMENTAL
+ select SERIO
+ select SERIO_SERPORT
+ default n
+ help
+ This supports TAOS evaluation modules on serial port. In order to
+ use this driver, you will need the inputattach tool, which is part
+ of the input-utils package.
+
+ If unsure, say N.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-taos-evm.
+
config I2C_STUB
tristate "I2C/SMBus Test Stub"
depends on EXPERIMENTAL && m
@@ -548,7 +557,7 @@ config I2C_VERSATILE
will be called i2c-versatile.
config I2C_ACORN
- bool "Acorn IOC/IOMD I2C bus support"
+ tristate "Acorn IOC/IOMD I2C bus support"
depends on ARCH_ACORN
default y
select I2C_ALGOBIT
@@ -635,4 +644,13 @@ config I2C_PNX
This driver can also be built as a module. If so, the module
will be called i2c-pnx.
+config I2C_PMCMSP
+ tristate "PMC MSP I2C TWI Controller"
+ depends on PMC_MSP
+ help
+ This driver supports the PMC TWI controller on MSP devices.
+
+ This driver can also be built as module. If so, the module
+ will be called i2c-pmcmsp.
+
endmenu
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 14d1432f698b..5b752e4e1918 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_I2C_I801) += i2c-i801.o
obj-$(CONFIG_I2C_I810) += i2c-i810.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
-obj-$(CONFIG_I2C_ISA) += i2c-isa.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
@@ -32,10 +31,10 @@ obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
+obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PROSAVAGE) += i2c-prosavage.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
-obj-$(CONFIG_I2C_RPXLITE) += i2c-rpx.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_SAVAGE4) += i2c-savage4.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
@@ -44,6 +43,7 @@ obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
+obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 09bd7f40b90c..7c2be3558a24 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -94,4 +94,4 @@ static int __init i2c_ioc_init(void)
return i2c_bit_add_bus(&ioc_ops);
}
-__initcall(i2c_ioc_init);
+module_init(i2c_ioc_init);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index a7dd54654a9a..025f19423faf 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -63,14 +63,14 @@ static void i2c_gpio_setscl_val(void *data, int state)
gpio_set_value(pdata->scl_pin, state);
}
-int i2c_gpio_getsda(void *data)
+static int i2c_gpio_getsda(void *data)
{
struct i2c_gpio_platform_data *pdata = data;
return gpio_get_value(pdata->sda_pin);
}
-int i2c_gpio_getscl(void *data)
+static int i2c_gpio_getscl(void *data)
{
struct i2c_gpio_platform_data *pdata = data;
@@ -142,7 +142,13 @@ static int __init i2c_gpio_probe(struct platform_device *pdev)
adap->algo_data = bit_data;
adap->dev.parent = &pdev->dev;
- ret = i2c_bit_add_bus(adap);
+ /*
+ * If "dev->id" is negative we consider it as zero.
+ * The reason to do so is to avoid sysfs names that only make
+ * sense when there are multiple adapters.
+ */
+ adap->nr = pdev->id >= 0 ? pdev->id : 0;
+ ret = i2c_bit_add_numbered_bus(adap);
if (ret)
goto err_add_bus;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 611b57192c96..8f5c686123b8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -22,12 +22,12 @@
/*
SUPPORTED DEVICES PCI ID
- 82801AA 2413
- 82801AB 2423
- 82801BA 2443
- 82801CA/CAM 2483
- 82801DB 24C3 (HW PEC supported, 32 byte buffer not supported)
- 82801EB 24D3 (HW PEC supported, 32 byte buffer not supported)
+ 82801AA 2413
+ 82801AB 2423
+ 82801BA 2443
+ 82801CA/CAM 2483
+ 82801DB 24C3 (HW PEC supported)
+ 82801EB 24D3 (HW PEC supported)
6300ESB 25A4
ICH6 266A
ICH7 27DA
@@ -74,6 +74,13 @@
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+/* Auxillary control register bits, ICH4+ only */
+#define SMBAUXCTL_CRC 1
+#define SMBAUXCTL_E32B 2
+
+/* kill bit for SMBHSTCNT */
+#define SMBHSTCNT_KILL 2
+
/* Other settings */
#define MAX_TIMEOUT 100
#define ENABLE_INT9 0 /* set to 0x01 to enable - untested */
@@ -91,10 +98,15 @@
#define I801_START 0x40
#define I801_PEC_EN 0x80 /* ICH4 only */
-
-static int i801_transaction(void);
-static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
- int command, int hwpec);
+/* I801 Hosts Status register bits */
+#define SMBHSTSTS_BYTE_DONE 0x80
+#define SMBHSTSTS_INUSE_STS 0x40
+#define SMBHSTSTS_SMBALERT_STS 0x20
+#define SMBHSTSTS_FAILED 0x10
+#define SMBHSTSTS_BUS_ERR 0x08
+#define SMBHSTSTS_DEV_ERR 0x04
+#define SMBHSTSTS_INTR 0x02
+#define SMBHSTSTS_HOST_BUSY 0x01
static unsigned long i801_smba;
static unsigned char i801_original_hstcfg;
@@ -102,7 +114,7 @@ static struct pci_driver i801_driver;
static struct pci_dev *I801_dev;
static int isich4;
-static int i801_transaction(void)
+static int i801_transaction(int xact)
{
int temp;
int result = 0;
@@ -127,33 +139,40 @@ static int i801_transaction(void)
}
}
- outb_p(inb(SMBHSTCNT) | I801_START, SMBHSTCNT);
+ /* the current contents of SMBHSTCNT can be overwritten, since PEC,
+ * INTREN, SMBSCMD are passed in xact */
+ outb_p(xact | I801_START, SMBHSTCNT);
/* We will always wait for a fraction of a second! */
do {
msleep(1);
temp = inb_p(SMBHSTSTS);
- } while ((temp & 0x01) && (timeout++ < MAX_TIMEOUT));
+ } while ((temp & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout >= MAX_TIMEOUT) {
dev_dbg(&I801_dev->dev, "SMBus Timeout!\n");
result = -1;
+ /* try to stop the current command */
+ dev_dbg(&I801_dev->dev, "Terminating the current operation\n");
+ outb_p(inb_p(SMBHSTCNT) | SMBHSTCNT_KILL, SMBHSTCNT);
+ msleep(1);
+ outb_p(inb_p(SMBHSTCNT) & (~SMBHSTCNT_KILL), SMBHSTCNT);
}
- if (temp & 0x10) {
+ if (temp & SMBHSTSTS_FAILED) {
result = -1;
dev_dbg(&I801_dev->dev, "Error: Failed bus transaction\n");
}
- if (temp & 0x08) {
+ if (temp & SMBHSTSTS_BUS_ERR) {
result = -1;
dev_err(&I801_dev->dev, "Bus collision! SMBus may be locked "
"until next hard reset. (sorry!)\n");
/* Clock stops and slave is stuck in mid-transmission */
}
- if (temp & 0x04) {
+ if (temp & SMBHSTSTS_DEV_ERR) {
result = -1;
dev_dbg(&I801_dev->dev, "Error: no response!\n");
}
@@ -172,44 +191,70 @@ static int i801_transaction(void)
return result;
}
-/* All-inclusive block transaction function */
-static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
- int command, int hwpec)
+/* wait for INTR bit as advised by Intel */
+static void i801_wait_hwpec(void)
+{
+ int timeout = 0;
+ int temp;
+
+ do {
+ msleep(1);
+ temp = inb_p(SMBHSTSTS);
+ } while ((!(temp & SMBHSTSTS_INTR))
+ && (timeout++ < MAX_TIMEOUT));
+
+ if (timeout >= MAX_TIMEOUT) {
+ dev_dbg(&I801_dev->dev, "PEC Timeout!\n");
+ }
+ outb_p(temp, SMBHSTSTS);
+}
+
+static int i801_block_transaction_by_block(union i2c_smbus_data *data,
+ char read_write, int hwpec)
+{
+ int i, len;
+
+ inb_p(SMBHSTCNT); /* reset the data buffer index */
+
+ /* Use 32-byte buffer to process this transaction */
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ outb_p(len, SMBHSTDAT0);
+ for (i = 0; i < len; i++)
+ outb_p(data->block[i+1], SMBBLKDAT);
+ }
+
+ if (i801_transaction(I801_BLOCK_DATA | ENABLE_INT9 |
+ I801_PEC_EN * hwpec))
+ return -1;
+
+ if (read_write == I2C_SMBUS_READ) {
+ len = inb_p(SMBHSTDAT0);
+ if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
+ return -1;
+
+ data->block[0] = len;
+ for (i = 0; i < len; i++)
+ data->block[i + 1] = inb_p(SMBBLKDAT);
+ }
+ return 0;
+}
+
+static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+ char read_write, int hwpec)
{
int i, len;
int smbcmd;
int temp;
int result = 0;
int timeout;
- unsigned char hostc, errmask;
+ unsigned char errmask;
- if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
- if (read_write == I2C_SMBUS_WRITE) {
- /* set I2C_EN bit in configuration register */
- pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
- pci_write_config_byte(I801_dev, SMBHSTCFG,
- hostc | SMBHSTCFG_I2C_EN);
- } else {
- dev_err(&I801_dev->dev,
- "I2C_SMBUS_I2C_BLOCK_READ not DB!\n");
- return -1;
- }
- }
+ len = data->block[0];
if (read_write == I2C_SMBUS_WRITE) {
- len = data->block[0];
- if (len < 1)
- len = 1;
- if (len > 32)
- len = 32;
outb_p(len, SMBHSTDAT0);
outb_p(data->block[1], SMBBLKDAT);
- } else {
- len = 32; /* max for reads */
- }
-
- if(isich4 && command != I2C_SMBUS_I2C_BLOCK_DATA) {
- /* set 32 byte buffer */
}
for (i = 1; i <= len; i++) {
@@ -227,13 +272,13 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
/* Make sure the SMBus host is ready to start transmitting */
temp = inb_p(SMBHSTSTS);
if (i == 1) {
- /* Erronenous conditions before transaction:
+ /* Erronenous conditions before transaction:
* Byte_Done, Failed, Bus_Err, Dev_Err, Intr, Host_Busy */
- errmask=0x9f;
+ errmask = 0x9f;
} else {
- /* Erronenous conditions during transaction:
+ /* Erronenous conditions during transaction:
* Failed, Bus_Err, Dev_Err, Intr */
- errmask=0x1e;
+ errmask = 0x1e;
}
if (temp & errmask) {
dev_dbg(&I801_dev->dev, "SMBus busy (%02x). "
@@ -242,14 +287,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
if (((temp = inb_p(SMBHSTSTS)) & errmask) != 0x00) {
dev_err(&I801_dev->dev,
"Reset failed! (%02x)\n", temp);
- result = -1;
- goto END;
+ return -1;
}
- if (i != 1) {
+ if (i != 1)
/* if die in middle of block transaction, fail */
- result = -1;
- goto END;
- }
+ return -1;
}
if (i == 1)
@@ -261,33 +303,38 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
msleep(1);
temp = inb_p(SMBHSTSTS);
}
- while ((!(temp & 0x80))
- && (timeout++ < MAX_TIMEOUT));
+ while ((!(temp & SMBHSTSTS_BYTE_DONE))
+ && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout >= MAX_TIMEOUT) {
+ /* try to stop the current command */
+ dev_dbg(&I801_dev->dev, "Terminating the current "
+ "operation\n");
+ outb_p(inb_p(SMBHSTCNT) | SMBHSTCNT_KILL, SMBHSTCNT);
+ msleep(1);
+ outb_p(inb_p(SMBHSTCNT) & (~SMBHSTCNT_KILL),
+ SMBHSTCNT);
result = -1;
dev_dbg(&I801_dev->dev, "SMBus Timeout!\n");
}
- if (temp & 0x10) {
+ if (temp & SMBHSTSTS_FAILED) {
result = -1;
dev_dbg(&I801_dev->dev,
"Error: Failed bus transaction\n");
- } else if (temp & 0x08) {
+ } else if (temp & SMBHSTSTS_BUS_ERR) {
result = -1;
dev_err(&I801_dev->dev, "Bus collision!\n");
- } else if (temp & 0x04) {
+ } else if (temp & SMBHSTSTS_DEV_ERR) {
result = -1;
dev_dbg(&I801_dev->dev, "Error: no response!\n");
}
if (i == 1 && read_write == I2C_SMBUS_READ) {
len = inb_p(SMBHSTDAT0);
- if (len < 1)
- len = 1;
- if (len > 32)
- len = 32;
+ if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
+ return -1;
data->block[0] = len;
}
@@ -310,25 +357,58 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
inb_p(SMBHSTDAT0), inb_p(SMBBLKDAT));
if (result < 0)
- goto END;
+ return result;
}
+ return result;
+}
- if (hwpec) {
- /* wait for INTR bit as advised by Intel */
- timeout = 0;
- do {
- msleep(1);
- temp = inb_p(SMBHSTSTS);
- } while ((!(temp & 0x02))
- && (timeout++ < MAX_TIMEOUT));
+static int i801_set_block_buffer_mode(void)
+{
+ outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_E32B, SMBAUXCTL);
+ if ((inb_p(SMBAUXCTL) & SMBAUXCTL_E32B) == 0)
+ return -1;
+ return 0;
+}
- if (timeout >= MAX_TIMEOUT) {
- dev_dbg(&I801_dev->dev, "PEC Timeout!\n");
+/* Block transaction function */
+static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
+ int command, int hwpec)
+{
+ int result = 0;
+ unsigned char hostc;
+
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (read_write == I2C_SMBUS_WRITE) {
+ /* set I2C_EN bit in configuration register */
+ pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
+ pci_write_config_byte(I801_dev, SMBHSTCFG,
+ hostc | SMBHSTCFG_I2C_EN);
+ } else {
+ dev_err(&I801_dev->dev,
+ "I2C_SMBUS_I2C_BLOCK_READ not DB!\n");
+ return -1;
}
- outb_p(temp, SMBHSTSTS);
}
- result = 0;
-END:
+
+ if (read_write == I2C_SMBUS_WRITE) {
+ if (data->block[0] < 1)
+ data->block[0] = 1;
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ data->block[0] = I2C_SMBUS_BLOCK_MAX;
+ } else {
+ data->block[0] = 32; /* max for reads */
+ }
+
+ if (isich4 && i801_set_block_buffer_mode() == 0 )
+ result = i801_block_transaction_by_block(data, read_write,
+ hwpec);
+ else
+ result = i801_block_transaction_byte_by_byte(data, read_write,
+ hwpec);
+
+ if (result == 0 && hwpec)
+ i801_wait_hwpec();
+
if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
/* restore saved configuration register value */
pci_write_config_byte(I801_dev, SMBHSTCFG, hostc);
@@ -393,19 +473,22 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
return -1;
}
- outb_p(hwpec, SMBAUXCTL); /* enable/disable hardware PEC */
+ if (hwpec) /* enable/disable hardware PEC */
+ outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_CRC, SMBAUXCTL);
+ else
+ outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
if(block)
ret = i801_block_transaction(data, read_write, size, hwpec);
- else {
- outb_p(xact | ENABLE_INT9, SMBHSTCNT);
- ret = i801_transaction();
- }
+ else
+ ret = i801_transaction(xact | ENABLE_INT9);
/* Some BIOSes don't like it when PEC is enabled at reboot or resume
- time, so we forcibly disable it after every transaction. */
+ time, so we forcibly disable it after every transaction. Turn off
+ E32B for the same reason. */
if (hwpec)
- outb_p(0, SMBAUXCTL);
+ outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
+ SMBAUXCTL);
if(block)
return ret;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 90e2d9350c1b..440342bc62e1 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -491,6 +491,7 @@ iop3xx_i2c_probe(struct platform_device *pdev)
new_adapter->id = I2C_HW_IOP3XX;
new_adapter->owner = THIS_MODULE;
new_adapter->dev.parent = &pdev->dev;
+ new_adapter->nr = pdev->id;
/*
* Default values...should these come in from board code?
@@ -508,7 +509,7 @@ iop3xx_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, new_adapter);
new_adapter->algo_data = adapter_data;
- i2c_add_adapter(new_adapter);
+ i2c_add_numbered_adapter(new_adapter);
return 0;
diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c
deleted file mode 100644
index b0e1370075de..000000000000
--- a/drivers/i2c/busses/i2c-isa.c
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- i2c-isa.c - an i2c-core-like thing for ISA hardware monitoring chips
- Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
-
- Based on the i2c-isa pseudo-adapter from the lm_sensors project
- Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-/* This implements an i2c-core-like thing for ISA hardware monitoring
- chips. Such chips are linked to the i2c subsystem for historical
- reasons (because the early ISA hardware monitoring chips such as the
- LM78 had both an I2C and an ISA interface). They used to be
- registered with the main i2c-core, but as a first step in the
- direction of a clean separation between I2C and ISA chip drivers,
- we now have this separate core for ISA ones. It is significantly
- more simple than the real one, of course, because we don't have to
- handle multiple busses: there is only one (fake) ISA adapter.
- It is worth noting that we still rely on i2c-core for some things
- at the moment - but hopefully this won't last. */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/i2c-isa.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-
-/* Exported by i2c-core for i2c-isa only */
-extern void i2c_adapter_dev_release(struct device *dev);
-extern struct class i2c_adapter_class;
-
-static u32 isa_func(struct i2c_adapter *adapter);
-
-/* This is the actual algorithm we define */
-static const struct i2c_algorithm isa_algorithm = {
- .functionality = isa_func,
-};
-
-/* There can only be one... */
-static struct i2c_adapter isa_adapter = {
- .owner = THIS_MODULE,
- .id = I2C_HW_ISA,
- .class = I2C_CLASS_HWMON,
- .algo = &isa_algorithm,
- .name = "ISA main adapter",
-};
-
-/* We can't do a thing... */
-static u32 isa_func(struct i2c_adapter *adapter)
-{
- return 0;
-}
-
-
-/* We implement an interface which resembles i2c_{add,del}_driver,
- but for i2c-isa drivers. We don't have to remember and handle lists
- of drivers and adapters so this is much more simple, of course. */
-
-int i2c_isa_add_driver(struct i2c_driver *driver)
-{
- int res;
-
- /* Add the driver to the list of i2c drivers in the driver core */
- driver->driver.bus = &i2c_bus_type;
- res = driver_register(&driver->driver);
- if (res)
- return res;
- dev_dbg(&isa_adapter.dev, "Driver %s registered\n", driver->driver.name);
-
- /* Now look for clients */
- res = driver->attach_adapter(&isa_adapter);
- if (res) {
- dev_dbg(&isa_adapter.dev,
- "Driver %s failed to attach adapter, unregistering\n",
- driver->driver.name);
- driver_unregister(&driver->driver);
- }
- return res;
-}
-
-int i2c_isa_del_driver(struct i2c_driver *driver)
-{
- struct list_head *item, *_n;
- struct i2c_client *client;
- int res;
-
- /* Detach all clients belonging to this one driver */
- list_for_each_safe(item, _n, &isa_adapter.clients) {
- client = list_entry(item, struct i2c_client, list);
- if (client->driver != driver)
- continue;
- dev_dbg(&isa_adapter.dev, "Detaching client %s at 0x%x\n",
- client->name, client->addr);
- if ((res = driver->detach_client(client))) {
- dev_err(&isa_adapter.dev, "Failed, driver "
- "%s not unregistered!\n",
- driver->driver.name);
- return res;
- }
- }
-
- /* Get the driver off the core list */
- driver_unregister(&driver->driver);
- dev_dbg(&isa_adapter.dev, "Driver %s unregistered\n", driver->driver.name);
-
- return 0;
-}
-
-
-static int __init i2c_isa_init(void)
-{
- int err;
-
- mutex_init(&isa_adapter.clist_lock);
- INIT_LIST_HEAD(&isa_adapter.clients);
-
- isa_adapter.nr = ANY_I2C_ISA_BUS;
- isa_adapter.dev.parent = &platform_bus;
- sprintf(isa_adapter.dev.bus_id, "i2c-%d", isa_adapter.nr);
- isa_adapter.dev.release = &i2c_adapter_dev_release;
- isa_adapter.dev.class = &i2c_adapter_class;
- err = device_register(&isa_adapter.dev);
- if (err) {
- printk(KERN_ERR "i2c-isa: Failed to register device\n");
- goto exit;
- }
-
- dev_dbg(&isa_adapter.dev, "%s registered\n", isa_adapter.name);
-
- return 0;
-
-exit:
- return err;
-}
-
-static void __exit i2c_isa_exit(void)
-{
-#ifdef DEBUG
- struct list_head *item, *_n;
- struct i2c_client *client = NULL;
-#endif
-
- /* There should be no more active client */
-#ifdef DEBUG
- dev_dbg(&isa_adapter.dev, "Looking for clients\n");
- list_for_each_safe(item, _n, &isa_adapter.clients) {
- client = list_entry(item, struct i2c_client, list);
- dev_err(&isa_adapter.dev, "Driver %s still has an active "
- "ISA client at 0x%x\n", client->driver->driver.name,
- client->addr);
- }
- if (client != NULL)
- return;
-#endif
-
- /* Clean up the sysfs representation */
- dev_dbg(&isa_adapter.dev, "Unregistering from sysfs\n");
- init_completion(&isa_adapter.dev_released);
- device_unregister(&isa_adapter.dev);
-
- /* Wait for sysfs to drop all references */
- dev_dbg(&isa_adapter.dev, "Waiting for sysfs completion\n");
- wait_for_completion(&isa_adapter.dev_released);
-
- dev_dbg(&isa_adapter.dev, "%s unregistered\n", isa_adapter.name);
-}
-
-EXPORT_SYMBOL(i2c_isa_add_driver);
-EXPORT_SYMBOL(i2c_isa_del_driver);
-
-MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
-MODULE_DESCRIPTION("ISA bus access through i2c");
-MODULE_LICENSE("GPL");
-
-module_init(i2c_isa_init);
-module_exit(i2c_isa_exit);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index c6b6898592b1..851c3ed513d0 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -74,6 +74,25 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
+ * the bus, because it wants to send ACK.
+ * Following sequence of enabling/disabling and sending start/stop generates
+ * the pulse, so it's all OK.
+ */
+static void mpc_i2c_fixup(struct mpc_i2c *i2c)
+{
+ writeccr(i2c, 0);
+ udelay(30);
+ writeccr(i2c, CCR_MEN);
+ udelay(30);
+ writeccr(i2c, CCR_MSTA | CCR_MTX);
+ udelay(30);
+ writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
+ udelay(30);
+ writeccr(i2c, CCR_MEN);
+ udelay(30);
+}
+
static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
{
unsigned long orig_jiffies = jiffies;
@@ -153,6 +172,7 @@ static void mpc_i2c_start(struct mpc_i2c *i2c)
static void mpc_i2c_stop(struct mpc_i2c *i2c)
{
writeccr(i2c, CCR_MEN);
+ writeccr(i2c, 0);
}
static int mpc_write(struct mpc_i2c *i2c, int target,
@@ -245,6 +265,9 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
}
if (time_after(jiffies, orig_jiffies + HZ)) {
pr_debug("I2C: timeout\n");
+ if (readb(i2c->base + MPC_I2C_SR) ==
+ (CSR_MCF | CSR_MBB | CSR_RXAK))
+ mpc_i2c_fixup(i2c);
return -EIO;
}
schedule();
@@ -327,9 +350,10 @@ static int fsl_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
i2c->adap = mpc_ops;
+ i2c->adap.nr = pdev->id;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
- if ((result = i2c_add_adapter(&i2c->adap)) < 0) {
+ if ((result = i2c_add_numbered_adapter(&i2c->adap)) < 0) {
printk(KERN_ERR "i2c-mpc - failed to add adapter\n");
goto fail_add;
}
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index a55b3335d1be..251154ae5d97 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -527,6 +527,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
drv_data->adapter.class = I2C_CLASS_HWMON;
drv_data->adapter.timeout = pdata->timeout;
drv_data->adapter.retries = pdata->retries;
+ drv_data->adapter.nr = pd->id;
platform_set_drvdata(pd, drv_data);
i2c_set_adapdata(&drv_data->adapter, drv_data);
@@ -539,7 +540,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
drv_data->irq);
rc = -EINVAL;
goto exit_unmap_regs;
- } else if ((rc = i2c_add_adapter(&drv_data->adapter)) != 0) {
+ } else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) {
dev_err(&drv_data->adapter.dev,
"mv64xxx: Can't add i2c adapter, rc: %d\n", -rc);
goto exit_free_irq;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 3cd0d63e7b50..c48140f782d0 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -61,6 +61,7 @@ struct nforce2_smbus {
struct i2c_adapter adapter;
int base;
int size;
+ int blockops;
};
@@ -80,6 +81,8 @@ struct nforce2_smbus {
#define NVIDIA_SMB_ADDR (smbus->base + 0x02) /* address */
#define NVIDIA_SMB_CMD (smbus->base + 0x03) /* command */
#define NVIDIA_SMB_DATA (smbus->base + 0x04) /* 32 data registers */
+#define NVIDIA_SMB_BCNT (smbus->base + 0x24) /* number of data
+ bytes */
#define NVIDIA_SMB_STS_DONE 0x80
#define NVIDIA_SMB_STS_ALRM 0x40
@@ -92,6 +95,7 @@ struct nforce2_smbus {
#define NVIDIA_SMB_PRTCL_BYTE 0x04
#define NVIDIA_SMB_PRTCL_BYTE_DATA 0x06
#define NVIDIA_SMB_PRTCL_WORD_DATA 0x08
+#define NVIDIA_SMB_PRTCL_BLOCK_DATA 0x0a
#define NVIDIA_SMB_PRTCL_PEC 0x80
static struct pci_driver nforce2_driver;
@@ -103,6 +107,8 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
{
struct nforce2_smbus *smbus = adap->algo_data;
unsigned char protocol, pec, temp;
+ u8 len;
+ int i;
protocol = (read_write == I2C_SMBUS_READ) ? NVIDIA_SMB_PRTCL_READ :
NVIDIA_SMB_PRTCL_WRITE;
@@ -137,6 +143,25 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
protocol |= NVIDIA_SMB_PRTCL_WORD_DATA | pec;
break;
+ case I2C_SMBUS_BLOCK_DATA:
+ outb_p(command, NVIDIA_SMB_CMD);
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ if ((len == 0) || (len > I2C_SMBUS_BLOCK_MAX)) {
+ dev_err(&adap->dev,
+ "Transaction failed "
+ "(requested block size: %d)\n",
+ len);
+ return -1;
+ }
+ outb_p(len, NVIDIA_SMB_BCNT);
+ for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++)
+ outb_p(data->block[i + 1],
+ NVIDIA_SMB_DATA+i);
+ }
+ protocol |= NVIDIA_SMB_PRTCL_BLOCK_DATA | pec;
+ break;
+
default:
dev_err(&adap->dev, "Unsupported transaction %d\n", size);
return -1;
@@ -174,6 +199,14 @@ static s32 nforce2_access(struct i2c_adapter * adap, u16 addr,
case I2C_SMBUS_WORD_DATA:
data->word = inb_p(NVIDIA_SMB_DATA) | (inb_p(NVIDIA_SMB_DATA+1) << 8);
break;
+
+ case I2C_SMBUS_BLOCK_DATA:
+ len = inb_p(NVIDIA_SMB_BCNT);
+ len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX);
+ for (i = 0; i < len; i++)
+ data->block[i+1] = inb_p(NVIDIA_SMB_DATA + i);
+ data->block[0] = len;
+ break;
}
return 0;
@@ -184,7 +217,9 @@ static u32 nforce2_func(struct i2c_adapter *adapter)
{
/* other functionality might be possible, but is not tested */
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA;
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ (((struct nforce2_smbus*)adapter->algo_data)->blockops ?
+ I2C_FUNC_SMBUS_BLOCK_DATA : 0);
}
static struct i2c_algorithm smbus_algorithm = {
@@ -268,6 +303,13 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
return -ENOMEM;
pci_set_drvdata(dev, smbuses);
+ switch(dev->device) {
+ case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS:
+ case PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS:
+ smbuses[0].blockops = 1;
+ smbuses[1].blockops = 1;
+ }
+
/* SMBus adapter 1 */
res1 = nforce2_probe_smb(dev, 4, NFORCE_PCI_SMB1, &smbuses[0], "SMB1");
if (res1 < 0) {
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 5a52bf5e3fb0..debc76cd2161 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -23,7 +23,7 @@
Supports:
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000
- ATI IXP200, IXP300, IXP400, SB600
+ ATI IXP200, IXP300, IXP400, SB600, SB700
SMSC Victory66
Note: we assume there can only be one device, with one SMBus interface.
@@ -399,6 +399,8 @@ static struct pci_device_id piix4_ids[] = {
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SMBUS),
.driver_data = 0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SMBUS),
+ .driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4),
.driver_data = 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5),
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
new file mode 100644
index 000000000000..17cecf1ea797
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -0,0 +1,653 @@
+/*
+ * Specific bus support for PMC-TWI compliant implementation on MSP71xx.
+ *
+ * Copyright 2005-2007 PMC-Sierra, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#define DRV_NAME "pmcmsptwi"
+
+#define MSP_TWI_SF_CLK_REG_OFFSET 0x00
+#define MSP_TWI_HS_CLK_REG_OFFSET 0x04
+#define MSP_TWI_CFG_REG_OFFSET 0x08
+#define MSP_TWI_CMD_REG_OFFSET 0x0c
+#define MSP_TWI_ADD_REG_OFFSET 0x10
+#define MSP_TWI_DAT_0_REG_OFFSET 0x14
+#define MSP_TWI_DAT_1_REG_OFFSET 0x18
+#define MSP_TWI_INT_STS_REG_OFFSET 0x1c
+#define MSP_TWI_INT_MSK_REG_OFFSET 0x20
+#define MSP_TWI_BUSY_REG_OFFSET 0x24
+
+#define MSP_TWI_INT_STS_DONE (1 << 0)
+#define MSP_TWI_INT_STS_LOST_ARBITRATION (1 << 1)
+#define MSP_TWI_INT_STS_NO_RESPONSE (1 << 2)
+#define MSP_TWI_INT_STS_DATA_COLLISION (1 << 3)
+#define MSP_TWI_INT_STS_BUSY (1 << 4)
+#define MSP_TWI_INT_STS_ALL 0x1f
+
+#define MSP_MAX_BYTES_PER_RW 8
+#define MSP_MAX_POLL 5
+#define MSP_POLL_DELAY 10
+#define MSP_IRQ_TIMEOUT (MSP_MAX_POLL * MSP_POLL_DELAY)
+
+/* IO Operation macros */
+#define pmcmsptwi_readl __raw_readl
+#define pmcmsptwi_writel __raw_writel
+
+/* TWI command type */
+enum pmcmsptwi_cmd_type {
+ MSP_TWI_CMD_WRITE = 0, /* Write only */
+ MSP_TWI_CMD_READ = 1, /* Read only */
+ MSP_TWI_CMD_WRITE_READ = 2, /* Write then Read */
+};
+
+/* The possible results of the xferCmd */
+enum pmcmsptwi_xfer_result {
+ MSP_TWI_XFER_OK = 0,
+ MSP_TWI_XFER_TIMEOUT,
+ MSP_TWI_XFER_BUSY,
+ MSP_TWI_XFER_DATA_COLLISION,
+ MSP_TWI_XFER_NO_RESPONSE,
+ MSP_TWI_XFER_LOST_ARBITRATION,
+};
+
+/* Corresponds to a PMCTWI clock configuration register */
+struct pmcmsptwi_clock {
+ u8 filter; /* Bits 15:12, default = 0x03 */
+ u16 clock; /* Bits 9:0, default = 0x001f */
+};
+
+struct pmcmsptwi_clockcfg {
+ struct pmcmsptwi_clock standard; /* The standard/fast clock config */
+ struct pmcmsptwi_clock highspeed; /* The highspeed clock config */
+};
+
+/* Corresponds to the main TWI configuration register */
+struct pmcmsptwi_cfg {
+ u8 arbf; /* Bits 15:12, default=0x03 */
+ u8 nak; /* Bits 11:8, default=0x03 */
+ u8 add10; /* Bit 7, default=0x00 */
+ u8 mst_code; /* Bits 6:4, default=0x00 */
+ u8 arb; /* Bit 1, default=0x01 */
+ u8 highspeed; /* Bit 0, default=0x00 */
+};
+
+/* A single pmctwi command to issue */
+struct pmcmsptwi_cmd {
+ u16 addr; /* The slave address (7 or 10 bits) */
+ enum pmcmsptwi_cmd_type type; /* The command type */
+ u8 write_len; /* Number of bytes in the write buffer */
+ u8 read_len; /* Number of bytes in the read buffer */
+ u8 *write_data; /* Buffer of characters to send */
+ u8 *read_data; /* Buffer to fill with incoming data */
+};
+
+/* The private data */
+struct pmcmsptwi_data {
+ void __iomem *iobase; /* iomapped base for IO */
+ int irq; /* IRQ to use (0 disables) */
+ struct completion wait; /* Completion for xfer */
+ struct mutex lock; /* Used for threadsafeness */
+ enum pmcmsptwi_xfer_result last_result; /* result of last xfer */
+};
+
+/* The default settings */
+const static struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = {
+ .standard = {
+ .filter = 0x3,
+ .clock = 0x1f,
+ },
+ .highspeed = {
+ .filter = 0x3,
+ .clock = 0x1f,
+ },
+};
+
+const static struct pmcmsptwi_cfg pmcmsptwi_defcfg = {
+ .arbf = 0x03,
+ .nak = 0x03,
+ .add10 = 0x00,
+ .mst_code = 0x00,
+ .arb = 0x01,
+ .highspeed = 0x00,
+};
+
+static struct pmcmsptwi_data pmcmsptwi_data;
+
+static struct i2c_adapter pmcmsptwi_adapter;
+
+/* inline helper functions */
+static inline u32 pmcmsptwi_clock_to_reg(
+ const struct pmcmsptwi_clock *clock)
+{
+ return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
+}
+
+static inline void pmcmsptwi_reg_to_clock(
+ u32 reg, struct pmcmsptwi_clock *clock)
+{
+ clock->filter = (reg >> 12) & 0xf;
+ clock->clock = reg & 0x03ff;
+}
+
+static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
+{
+ return ((cfg->arbf & 0xf) << 12) |
+ ((cfg->nak & 0xf) << 8) |
+ ((cfg->add10 & 0x1) << 7) |
+ ((cfg->mst_code & 0x7) << 4) |
+ ((cfg->arb & 0x1) << 1) |
+ (cfg->highspeed & 0x1);
+}
+
+static inline void pmcmsptwi_reg_to_cfg(u32 reg, struct pmcmsptwi_cfg *cfg)
+{
+ cfg->arbf = (reg >> 12) & 0xf;
+ cfg->nak = (reg >> 8) & 0xf;
+ cfg->add10 = (reg >> 7) & 0x1;
+ cfg->mst_code = (reg >> 4) & 0x7;
+ cfg->arb = (reg >> 1) & 0x1;
+ cfg->highspeed = reg & 0x1;
+}
+
+/*
+ * Sets the current clock configuration
+ */
+static void pmcmsptwi_set_clock_config(const struct pmcmsptwi_clockcfg *cfg,
+ struct pmcmsptwi_data *data)
+{
+ mutex_lock(&data->lock);
+ pmcmsptwi_writel(pmcmsptwi_clock_to_reg(&cfg->standard),
+ data->iobase + MSP_TWI_SF_CLK_REG_OFFSET);
+ pmcmsptwi_writel(pmcmsptwi_clock_to_reg(&cfg->highspeed),
+ data->iobase + MSP_TWI_HS_CLK_REG_OFFSET);
+ mutex_unlock(&data->lock);
+}
+
+/*
+ * Gets the current TWI bus configuration
+ */
+static void pmcmsptwi_get_twi_config(struct pmcmsptwi_cfg *cfg,
+ struct pmcmsptwi_data *data)
+{
+ mutex_lock(&data->lock);
+ pmcmsptwi_reg_to_cfg(pmcmsptwi_readl(
+ data->iobase + MSP_TWI_CFG_REG_OFFSET), cfg);
+ mutex_unlock(&data->lock);
+}
+
+/*
+ * Sets the current TWI bus configuration
+ */
+static void pmcmsptwi_set_twi_config(const struct pmcmsptwi_cfg *cfg,
+ struct pmcmsptwi_data *data)
+{
+ mutex_lock(&data->lock);
+ pmcmsptwi_writel(pmcmsptwi_cfg_to_reg(cfg),
+ data->iobase + MSP_TWI_CFG_REG_OFFSET);
+ mutex_unlock(&data->lock);
+}
+
+/*
+ * Parses the 'int_sts' register and returns a well-defined error code
+ */
+static enum pmcmsptwi_xfer_result pmcmsptwi_get_result(u32 reg)
+{
+ if (reg & MSP_TWI_INT_STS_LOST_ARBITRATION) {
+ dev_dbg(&pmcmsptwi_adapter.dev,
+ "Result: Lost arbitration\n");
+ return MSP_TWI_XFER_LOST_ARBITRATION;
+ } else if (reg & MSP_TWI_INT_STS_NO_RESPONSE) {
+ dev_dbg(&pmcmsptwi_adapter.dev,
+ "Result: No response\n");
+ return MSP_TWI_XFER_NO_RESPONSE;
+ } else if (reg & MSP_TWI_INT_STS_DATA_COLLISION) {
+ dev_dbg(&pmcmsptwi_adapter.dev,
+ "Result: Data collision\n");
+ return MSP_TWI_XFER_DATA_COLLISION;
+ } else if (reg & MSP_TWI_INT_STS_BUSY) {
+ dev_dbg(&pmcmsptwi_adapter.dev,
+ "Result: Bus busy\n");
+ return MSP_TWI_XFER_BUSY;
+ }
+
+ dev_dbg(&pmcmsptwi_adapter.dev, "Result: Operation succeeded\n");
+ return MSP_TWI_XFER_OK;
+}
+
+/*
+ * In interrupt mode, handle the interrupt.
+ * NOTE: Assumes data->lock is held.
+ */
+static irqreturn_t pmcmsptwi_interrupt(int irq, void *ptr)
+{
+ struct pmcmsptwi_data *data = ptr;
+
+ u32 reason = pmcmsptwi_readl(data->iobase +
+ MSP_TWI_INT_STS_REG_OFFSET);
+ pmcmsptwi_writel(reason, data->iobase + MSP_TWI_INT_STS_REG_OFFSET);
+
+ dev_dbg(&pmcmsptwi_adapter.dev, "Got interrupt 0x%08x\n", reason);
+ if (!(reason & MSP_TWI_INT_STS_DONE))
+ return IRQ_NONE;
+
+ data->last_result = pmcmsptwi_get_result(reason);
+ complete(&data->wait);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Probe for and register the device and return 0 if there is one.
+ */
+static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
+{
+ struct resource *res;
+ int rc = -ENODEV;
+
+ /* get the static platform resources */
+ res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pldev->dev, "IOMEM resource not found\n");
+ goto ret_err;
+ }
+
+ /* reserve the memory region */
+ if (!request_mem_region(res->start, res->end - res->start + 1,
+ pldev->name)) {
+ dev_err(&pldev->dev,
+ "Unable to get memory/io address region 0x%08x\n",
+ res->start);
+ rc = -EBUSY;
+ goto ret_err;
+ }
+
+ /* remap the memory */
+ pmcmsptwi_data.iobase = ioremap_nocache(res->start,
+ res->end - res->start + 1);
+ if (!pmcmsptwi_data.iobase) {
+ dev_err(&pldev->dev,
+ "Unable to ioremap address 0x%08x\n", res->start);
+ rc = -EIO;
+ goto ret_unreserve;
+ }
+
+ /* request the irq */
+ pmcmsptwi_data.irq = platform_get_irq(pldev, 0);
+ if (pmcmsptwi_data.irq) {
+ rc = request_irq(pmcmsptwi_data.irq, &pmcmsptwi_interrupt,
+ IRQF_SHARED | IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
+ pldev->name, &pmcmsptwi_data);
+ if (rc == 0) {
+ /*
+ * Enable 'DONE' interrupt only.
+ *
+ * If you enable all interrupts, you will get one on
+ * error and another when the operation completes.
+ * This way you only have to handle one interrupt,
+ * but you can still check all result flags.
+ */
+ pmcmsptwi_writel(MSP_TWI_INT_STS_DONE,
+ pmcmsptwi_data.iobase +
+ MSP_TWI_INT_MSK_REG_OFFSET);
+ } else {
+ dev_warn(&pldev->dev,
+ "Could not assign TWI IRQ handler "
+ "to irq %d (continuing with poll)\n",
+ pmcmsptwi_data.irq);
+ pmcmsptwi_data.irq = 0;
+ }
+ }
+
+ init_completion(&pmcmsptwi_data.wait);
+ mutex_init(&pmcmsptwi_data.lock);
+
+ pmcmsptwi_set_clock_config(&pmcmsptwi_defclockcfg, &pmcmsptwi_data);
+ pmcmsptwi_set_twi_config(&pmcmsptwi_defcfg, &pmcmsptwi_data);
+
+ printk(KERN_INFO DRV_NAME ": Registering MSP71xx I2C adapter\n");
+
+ pmcmsptwi_adapter.dev.parent = &pldev->dev;
+ platform_set_drvdata(pldev, &pmcmsptwi_adapter);
+ i2c_set_adapdata(&pmcmsptwi_adapter, &pmcmsptwi_data);
+
+ rc = i2c_add_adapter(&pmcmsptwi_adapter);
+ if (rc) {
+ dev_err(&pldev->dev, "Unable to register I2C adapter\n");
+ goto ret_unmap;
+ }
+
+ return 0;
+
+ret_unmap:
+ platform_set_drvdata(pldev, NULL);
+ if (pmcmsptwi_data.irq) {
+ pmcmsptwi_writel(0,
+ pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
+ free_irq(pmcmsptwi_data.irq, &pmcmsptwi_data);
+ }
+
+ iounmap(pmcmsptwi_data.iobase);
+
+ret_unreserve:
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ret_err:
+ return rc;
+}
+
+/*
+ * Release the device and return 0 if there is one.
+ */
+static int __devexit pmcmsptwi_remove(struct platform_device *pldev)
+{
+ struct resource *res;
+
+ i2c_del_adapter(&pmcmsptwi_adapter);
+
+ platform_set_drvdata(pldev, NULL);
+ if (pmcmsptwi_data.irq) {
+ pmcmsptwi_writel(0,
+ pmcmsptwi_data.iobase + MSP_TWI_INT_MSK_REG_OFFSET);
+ free_irq(pmcmsptwi_data.irq, &pmcmsptwi_data);
+ }
+
+ iounmap(pmcmsptwi_data.iobase);
+
+ res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start + 1);
+
+ return 0;
+}
+
+/*
+ * Polls the 'busy' register until the command is complete.
+ * NOTE: Assumes data->lock is held.
+ */
+static void pmcmsptwi_poll_complete(struct pmcmsptwi_data *data)
+{
+ int i;
+
+ for (i = 0; i < MSP_MAX_POLL; i++) {
+ u32 val = pmcmsptwi_readl(data->iobase +
+ MSP_TWI_BUSY_REG_OFFSET);
+ if (val == 0) {
+ u32 reason = pmcmsptwi_readl(data->iobase +
+ MSP_TWI_INT_STS_REG_OFFSET);
+ pmcmsptwi_writel(reason, data->iobase +
+ MSP_TWI_INT_STS_REG_OFFSET);
+ data->last_result = pmcmsptwi_get_result(reason);
+ return;
+ }
+ udelay(MSP_POLL_DELAY);
+ }
+
+ dev_dbg(&pmcmsptwi_adapter.dev, "Result: Poll timeout\n");
+ data->last_result = MSP_TWI_XFER_TIMEOUT;
+}
+
+/*
+ * Do the transfer (low level):
+ * May use interrupt-driven or polling, depending on if an IRQ is
+ * presently registered.
+ * NOTE: Assumes data->lock is held.
+ */
+static enum pmcmsptwi_xfer_result pmcmsptwi_do_xfer(
+ u32 reg, struct pmcmsptwi_data *data)
+{
+ dev_dbg(&pmcmsptwi_adapter.dev, "Writing cmd reg 0x%08x\n", reg);
+ pmcmsptwi_writel(reg, data->iobase + MSP_TWI_CMD_REG_OFFSET);
+ if (data->irq) {
+ unsigned long timeleft = wait_for_completion_timeout(
+ &data->wait, MSP_IRQ_TIMEOUT);
+ if (timeleft == 0) {
+ dev_dbg(&pmcmsptwi_adapter.dev,
+ "Result: IRQ timeout\n");
+ complete(&data->wait);
+ data->last_result = MSP_TWI_XFER_TIMEOUT;
+ }
+ } else
+ pmcmsptwi_poll_complete(data);
+
+ return data->last_result;
+}
+
+/*
+ * Helper routine, converts 'pmctwi_cmd' struct to register format
+ */
+static inline u32 pmcmsptwi_cmd_to_reg(const struct pmcmsptwi_cmd *cmd)
+{
+ return ((cmd->type & 0x3) << 8) |
+ (((cmd->write_len - 1) & 0x7) << 4) |
+ ((cmd->read_len - 1) & 0x7);
+}
+
+/*
+ * Do the transfer (high level)
+ */
+static enum pmcmsptwi_xfer_result pmcmsptwi_xfer_cmd(
+ struct pmcmsptwi_cmd *cmd,
+ struct pmcmsptwi_data *data)
+{
+ enum pmcmsptwi_xfer_result retval;
+
+ if ((cmd->type == MSP_TWI_CMD_WRITE && cmd->write_len == 0) ||
+ (cmd->type == MSP_TWI_CMD_READ && cmd->read_len == 0) ||
+ (cmd->type == MSP_TWI_CMD_WRITE_READ &&
+ (cmd->read_len == 0 || cmd->write_len == 0))) {
+ dev_err(&pmcmsptwi_adapter.dev,
+ "%s: Cannot transfer less than 1 byte\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if (cmd->read_len > MSP_MAX_BYTES_PER_RW ||
+ cmd->write_len > MSP_MAX_BYTES_PER_RW) {
+ dev_err(&pmcmsptwi_adapter.dev,
+ "%s: Cannot transfer more than %d bytes\n",
+ __FUNCTION__, MSP_MAX_BYTES_PER_RW);
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->lock);
+ dev_dbg(&pmcmsptwi_adapter.dev,
+ "Setting address to 0x%04x\n", cmd->addr);
+ pmcmsptwi_writel(cmd->addr, data->iobase + MSP_TWI_ADD_REG_OFFSET);
+
+ if (cmd->type == MSP_TWI_CMD_WRITE ||
+ cmd->type == MSP_TWI_CMD_WRITE_READ) {
+ __be64 tmp = cpu_to_be64p((u64 *)cmd->write_data);
+ tmp >>= (MSP_MAX_BYTES_PER_RW - cmd->write_len) * 8;
+ dev_dbg(&pmcmsptwi_adapter.dev, "Writing 0x%016llx\n", tmp);
+ pmcmsptwi_writel(tmp & 0x00000000ffffffffLL,
+ data->iobase + MSP_TWI_DAT_0_REG_OFFSET);
+ if (cmd->write_len > 4)
+ pmcmsptwi_writel(tmp >> 32,
+ data->iobase + MSP_TWI_DAT_1_REG_OFFSET);
+ }
+
+ retval = pmcmsptwi_do_xfer(pmcmsptwi_cmd_to_reg(cmd), data);
+ if (retval != MSP_TWI_XFER_OK)
+ goto xfer_err;
+
+ if (cmd->type == MSP_TWI_CMD_READ ||
+ cmd->type == MSP_TWI_CMD_WRITE_READ) {
+ int i;
+ u64 rmsk = ~(0xffffffffffffffffLL << (cmd->read_len * 8));
+ u64 tmp = (u64)pmcmsptwi_readl(data->iobase +
+ MSP_TWI_DAT_0_REG_OFFSET);
+ if (cmd->read_len > 4)
+ tmp |= (u64)pmcmsptwi_readl(data->iobase +
+ MSP_TWI_DAT_1_REG_OFFSET) << 32;
+ tmp &= rmsk;
+ dev_dbg(&pmcmsptwi_adapter.dev, "Read 0x%016llx\n", tmp);
+
+ for (i = 0; i < cmd->read_len; i++)
+ cmd->read_data[i] = tmp >> i;
+ }
+
+xfer_err:
+ mutex_unlock(&data->lock);
+
+ return retval;
+}
+
+/* -- Algorithm functions -- */
+
+/*
+ * Sends an i2c command out on the adapter
+ */
+static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msg, int num)
+{
+ struct pmcmsptwi_data *data = i2c_get_adapdata(adap);
+ struct pmcmsptwi_cmd cmd;
+ struct pmcmsptwi_cfg oldcfg, newcfg;
+ int ret;
+
+ if (num > 2) {
+ dev_dbg(&adap->dev, "%d messages unsupported\n", num);
+ return -EINVAL;
+ } else if (num == 2) {
+ /* Check for a dual write-then-read command */
+ struct i2c_msg *nextmsg = msg + 1;
+ if (!(msg->flags & I2C_M_RD) &&
+ (nextmsg->flags & I2C_M_RD) &&
+ msg->addr == nextmsg->addr) {
+ cmd.type = MSP_TWI_CMD_WRITE_READ;
+ cmd.write_len = msg->len;
+ cmd.write_data = msg->buf;
+ cmd.read_len = nextmsg->len;
+ cmd.read_data = nextmsg->buf;
+ } else {
+ dev_dbg(&adap->dev,
+ "Non write-read dual messages unsupported\n");
+ return -EINVAL;
+ }
+ } else if (msg->flags & I2C_M_RD) {
+ cmd.type = MSP_TWI_CMD_READ;
+ cmd.read_len = msg->len;
+ cmd.read_data = msg->buf;
+ cmd.write_len = 0;
+ cmd.write_data = NULL;
+ } else {
+ cmd.type = MSP_TWI_CMD_WRITE;
+ cmd.read_len = 0;
+ cmd.read_data = NULL;
+ cmd.write_len = msg->len;
+ cmd.write_data = msg->buf;
+ }
+
+ if (msg->len == 0) {
+ dev_err(&adap->dev, "Zero-byte messages unsupported\n");
+ return -EINVAL;
+ }
+
+ cmd.addr = msg->addr;
+
+ if (msg->flags & I2C_M_TEN) {
+ pmcmsptwi_get_twi_config(&newcfg, data);
+ memcpy(&oldcfg, &newcfg, sizeof(oldcfg));
+
+ /* Set the special 10-bit address flag */
+ newcfg.add10 = 1;
+
+ pmcmsptwi_set_twi_config(&newcfg, data);
+ }
+
+ /* Execute the command */
+ ret = pmcmsptwi_xfer_cmd(&cmd, data);
+
+ if (msg->flags & I2C_M_TEN)
+ pmcmsptwi_set_twi_config(&oldcfg, data);
+
+ dev_dbg(&adap->dev, "I2C %s of %d bytes ",
+ (msg->flags & I2C_M_RD) ? "read" : "write", msg->len);
+ if (ret != MSP_TWI_XFER_OK) {
+ /*
+ * TODO: We could potentially loop and retry in the case
+ * of MSP_TWI_XFER_TIMEOUT.
+ */
+ dev_dbg(&adap->dev, "failed\n");
+ return -1;
+ }
+
+ dev_dbg(&adap->dev, "succeeded\n");
+ return 0;
+}
+
+static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL;
+}
+
+/* -- Initialization -- */
+
+static struct i2c_algorithm pmcmsptwi_algo = {
+ .master_xfer = pmcmsptwi_master_xfer,
+ .functionality = pmcmsptwi_i2c_func,
+};
+
+static struct i2c_adapter pmcmsptwi_adapter = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_HWMON,
+ .algo = &pmcmsptwi_algo,
+ .name = DRV_NAME,
+};
+
+static struct platform_driver pmcmsptwi_driver = {
+ .probe = pmcmsptwi_probe,
+ .remove = __devexit_p(pmcmsptwi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pmcmsptwi_init(void)
+{
+ return platform_driver_register(&pmcmsptwi_driver);
+}
+
+static void __exit pmcmsptwi_exit(void)
+{
+ platform_driver_unregister(&pmcmsptwi_driver);
+}
+
+MODULE_DESCRIPTION("PMC MSP TWI/SMBus/I2C driver");
+MODULE_LICENSE("GPL");
+
+module_init(pmcmsptwi_init);
+module_exit(pmcmsptwi_exit);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 1425d2245c82..0ab4f2627c26 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -121,8 +121,7 @@ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap,
if (rc)
goto bail;
rc = pmac_i2c_xfer(bus, addrdir, 1, command,
- read ? data->block : &data->block[1],
- data->block[0]);
+ &data->block[1], data->block[0]);
break;
default:
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 28e7b91a4553..9d6b790d4321 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -921,7 +921,14 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.class = plat->class;
}
- ret = i2c_add_adapter(&i2c->adap);
+ /*
+ * If "dev->id" is negative we consider it as zero.
+ * The reason to do so is to avoid sysfs names that only make
+ * sense when there are multiple adapters.
+ */
+ i2c->adap.nr = dev->id >= 0 ? dev->id : 0;
+
+ ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
printk(KERN_INFO "I2C: Failed to add bus\n");
goto eadapt;
diff --git a/drivers/i2c/busses/i2c-rpx.c b/drivers/i2c/busses/i2c-rpx.c
deleted file mode 100644
index 8764df06f51d..000000000000
--- a/drivers/i2c/busses/i2c-rpx.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Embedded Planet RPX Lite MPC8xx CPM I2C interface.
- * Copyright (c) 1999 Dan Malek (dmalek@jlc.net).
- *
- * moved into proper i2c interface;
- * Brad Parker (brad@heeltoe.com)
- *
- * RPX lite specific parts of the i2c interface
- * Update: There actually isn't anything RPXLite-specific about this module.
- * This should work for most any 8xx board. The console messages have been
- * changed to eliminate RPXLite references.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/stddef.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-8xx.h>
-#include <asm/mpc8xx.h>
-#include <asm/commproc.h>
-
-
-static void
-rpx_iic_init(struct i2c_algo_8xx_data *data)
-{
- volatile cpm8xx_t *cp;
- volatile immap_t *immap;
-
- cp = cpmp; /* Get pointer to Communication Processor */
- immap = (immap_t *)IMAP_ADDR; /* and to internal registers */
-
- data->iip = (iic_t *)&cp->cp_dparam[PROFF_IIC];
-
- /* Check for and use a microcode relocation patch.
- */
- if ((data->reloc = data->iip->iic_rpbase))
- data->iip = (iic_t *)&cp->cp_dpmem[data->iip->iic_rpbase];
-
- data->i2c = (i2c8xx_t *)&(immap->im_i2c);
- data->cp = cp;
-
- /* Initialize Port B IIC pins.
- */
- cp->cp_pbpar |= 0x00000030;
- cp->cp_pbdir |= 0x00000030;
- cp->cp_pbodr |= 0x00000030;
-
- /* Allocate space for two transmit and two receive buffer
- * descriptors in the DP ram.
- */
- data->dp_addr = cpm_dpalloc(sizeof(cbd_t) * 4, 8);
-
- /* ptr to i2c area */
- data->i2c = (i2c8xx_t *)&(((immap_t *)IMAP_ADDR)->im_i2c);
-}
-
-static int rpx_install_isr(int irq, void (*func)(void *), void *data)
-{
- /* install interrupt handler */
- cpm_install_handler(irq, func, data);
-
- return 0;
-}
-
-static struct i2c_algo_8xx_data rpx_data = {
- .setisr = rpx_install_isr
-};
-
-static struct i2c_adapter rpx_ops = {
- .owner = THIS_MODULE,
- .name = "m8xx",
- .id = I2C_HW_MPC8XX_EPON,
- .algo_data = &rpx_data,
-};
-
-int __init i2c_rpx_init(void)
-{
- printk(KERN_INFO "i2c-rpx: i2c MPC8xx driver\n");
-
- /* reset hardware to sane state */
- rpx_iic_init(&rpx_data);
-
- if (i2c_8xx_add_bus(&rpx_ops) < 0) {
- printk(KERN_ERR "i2c-rpx: Unable to register with I2C\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-void __exit i2c_rpx_exit(void)
-{
- i2c_8xx_del_bus(&rpx_ops);
-}
-
-MODULE_AUTHOR("Dan Malek <dmalek@jlc.net>");
-MODULE_DESCRIPTION("I2C-Bus adapter routines for MPC8xx boards");
-
-module_init(i2c_rpx_init);
-module_exit(i2c_rpx_exit);
diff --git a/drivers/i2c/busses/i2c-savage4.c b/drivers/i2c/busses/i2c-savage4.c
index b7fb65c30112..8adf4abaa035 100644
--- a/drivers/i2c/busses/i2c-savage4.c
+++ b/drivers/i2c/busses/i2c-savage4.c
@@ -25,8 +25,6 @@
/* This interfaces to the I2C bus of the Savage4 to gain access to
the BT869 and possibly other I2C devices. The DDC bus is not
yet supported because its register is not memory-mapped.
- However we leave the DDC code here, commented out, to make
- it easier to add later.
*/
#include <linux/kernel.h>
@@ -37,36 +35,19 @@
#include <linux/i2c-algo-bit.h>
#include <asm/io.h>
-/* 3DFX defines */
-#define PCI_CHIP_SAVAGE3D 0x8A20
-#define PCI_CHIP_SAVAGE3D_MV 0x8A21
+/* device IDs */
#define PCI_CHIP_SAVAGE4 0x8A22
#define PCI_CHIP_SAVAGE2000 0x9102
-#define PCI_CHIP_PROSAVAGE_PM 0x8A25
-#define PCI_CHIP_PROSAVAGE_KM 0x8A26
-#define PCI_CHIP_SAVAGE_MX_MV 0x8c10
-#define PCI_CHIP_SAVAGE_MX 0x8c11
-#define PCI_CHIP_SAVAGE_IX_MV 0x8c12
-#define PCI_CHIP_SAVAGE_IX 0x8c13
#define REG 0xff20 /* Serial Port 1 Register */
/* bit locations in the register */
-#define DDC_ENAB 0x00040000
-#define DDC_SCL_OUT 0x00080000
-#define DDC_SDA_OUT 0x00100000
-#define DDC_SCL_IN 0x00200000
-#define DDC_SDA_IN 0x00400000
#define I2C_ENAB 0x00000020
#define I2C_SCL_OUT 0x00000001
#define I2C_SDA_OUT 0x00000002
#define I2C_SCL_IN 0x00000008
#define I2C_SDA_IN 0x00000010
-/* initialization states */
-#define INIT2 0x20
-#define INIT3 0x04
-
/* delays */
#define CYCLE_DELAY 10
#define TIMEOUT (HZ / 2)
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index a6feed449dbe..283769cecee2 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -129,6 +129,7 @@ MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"
static struct pci_driver sis5595_driver;
static unsigned short sis5595_base;
+static struct pci_dev *sis5595_pdev;
static u8 sis5595_read(u8 reg)
{
@@ -379,6 +380,8 @@ MODULE_DEVICE_TABLE (pci, sis5595_ids);
static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int err;
+
if (sis5595_setup(dev)) {
dev_err(&dev->dev, "SIS5595 not detected, module not inserted.\n");
return -ENODEV;
@@ -389,20 +392,24 @@ static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_
sprintf(sis5595_adapter.name, "SMBus SIS5595 adapter at %04x",
sis5595_base + SMB_INDEX);
- return i2c_add_adapter(&sis5595_adapter);
-}
+ err = i2c_add_adapter(&sis5595_adapter);
+ if (err) {
+ release_region(sis5595_base + SMB_INDEX, 2);
+ return err;
+ }
-static void __devexit sis5595_remove(struct pci_dev *dev)
-{
- i2c_del_adapter(&sis5595_adapter);
- release_region(sis5595_base + SMB_INDEX, 2);
+ /* Always return failure here. This is to allow other drivers to bind
+ * to this pci device. We don't really want to have control over the
+ * pci device, we only wanted to read as few register values from it.
+ */
+ sis5595_pdev = pci_dev_get(dev);
+ return -ENODEV;
}
static struct pci_driver sis5595_driver = {
.name = "sis5595_smbus",
.id_table = sis5595_ids,
.probe = sis5595_probe,
- .remove = __devexit_p(sis5595_remove),
};
static int __init i2c_sis5595_init(void)
@@ -413,6 +420,12 @@ static int __init i2c_sis5595_init(void)
static void __exit i2c_sis5595_exit(void)
{
pci_unregister_driver(&sis5595_driver);
+ if (sis5595_pdev) {
+ i2c_del_adapter(&sis5595_adapter);
+ release_region(sis5595_base + SMB_INDEX, 2);
+ pci_dev_put(sis5595_pdev);
+ sis5595_pdev = NULL;
+ }
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
new file mode 100644
index 000000000000..1b0cfd5472fd
--- /dev/null
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -0,0 +1,330 @@
+/*
+ * Driver for the TAOS evaluation modules
+ * These devices include an I2C master which can be controlled over the
+ * serial port.
+ *
+ * Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+
+#define TAOS_BUFFER_SIZE 63
+
+#define TAOS_STATE_INIT 0
+#define TAOS_STATE_IDLE 1
+#define TAOS_STATE_SEND 2
+#define TAOS_STATE_RECV 3
+
+#define TAOS_CMD_RESET 0x12
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+struct taos_data {
+ struct i2c_adapter adapter;
+ struct i2c_client *client;
+ int state;
+ u8 addr; /* last used address */
+ unsigned char buffer[TAOS_BUFFER_SIZE];
+ unsigned int pos; /* position inside the buffer */
+};
+
+/* TAOS TSL2550 EVM */
+static struct i2c_board_info tsl2550_info = {
+ I2C_BOARD_INFO("tsl2550", 0x39),
+ .type = "tsl2550",
+};
+
+/* Instantiate i2c devices based on the adapter name */
+static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter)
+{
+ if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) {
+ dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n",
+ tsl2550_info.driver_name, tsl2550_info.addr);
+ return i2c_new_device(adapter, &tsl2550_info);
+ }
+
+ return NULL;
+}
+
+static int taos_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct serio *serio = adapter->algo_data;
+ struct taos_data *taos = serio_get_drvdata(serio);
+ char *p;
+
+ /* Encode our transaction. "@" is for the device address, "$" for the
+ SMBus command and "#" for the data. */
+ p = taos->buffer;
+
+ /* The device remembers the last used address, no need to send it
+ again if it's the same */
+ if (addr != taos->addr)
+ p += sprintf(p, "@%02X", addr);
+
+ switch (size) {
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE)
+ sprintf(p, "$#%02X", command);
+ else
+ sprintf(p, "$");
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE)
+ sprintf(p, "$%02X#%02X", command, data->byte);
+ else
+ sprintf(p, "$%02X", command);
+ break;
+ default:
+ dev_dbg(&adapter->dev, "Unsupported transaction size %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ /* Send the transaction to the TAOS EVM */
+ dev_dbg(&adapter->dev, "Command buffer: %s\n", taos->buffer);
+ taos->pos = 0;
+ taos->state = TAOS_STATE_SEND;
+ serio_write(serio, taos->buffer[0]);
+ wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
+ msecs_to_jiffies(250));
+ if (taos->state != TAOS_STATE_IDLE) {
+ dev_err(&adapter->dev, "Transaction failed "
+ "(state=%d, pos=%d)\n", taos->state, taos->pos);
+ taos->addr = 0;
+ return -EIO;
+ }
+ taos->addr = addr;
+
+ /* Start the transaction and read the answer */
+ taos->pos = 0;
+ taos->state = TAOS_STATE_RECV;
+ serio_write(serio, read_write == I2C_SMBUS_WRITE ? '>' : '<');
+ wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
+ msecs_to_jiffies(150));
+ if (taos->state != TAOS_STATE_IDLE
+ || taos->pos != 6) {
+ dev_err(&adapter->dev, "Transaction timeout (pos=%d)\n",
+ taos->pos);
+ return -EIO;
+ }
+ dev_dbg(&adapter->dev, "Answer buffer: %s\n", taos->buffer);
+
+ /* Interpret the returned string */
+ p = taos->buffer + 2;
+ p[3] = '\0';
+ if (!strcmp(p, "NAK"))
+ return -ENODEV;
+
+ if (read_write == I2C_SMBUS_WRITE) {
+ if (!strcmp(p, "ACK"))
+ return 0;
+ } else {
+ if (p[0] == 'x') {
+ data->byte = simple_strtol(p + 1, NULL, 16);
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+static u32 taos_smbus_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static const struct i2c_algorithm taos_algorithm = {
+ .smbus_xfer = taos_smbus_xfer,
+ .functionality = taos_smbus_func,
+};
+
+static irqreturn_t taos_interrupt(struct serio *serio, unsigned char data,
+ unsigned int flags)
+{
+ struct taos_data *taos = serio_get_drvdata(serio);
+
+ switch (taos->state) {
+ case TAOS_STATE_INIT:
+ taos->buffer[taos->pos++] = data;
+ if (data == ':'
+ || taos->pos == TAOS_BUFFER_SIZE - 1) {
+ taos->buffer[taos->pos] = '\0';
+ taos->state = TAOS_STATE_IDLE;
+ wake_up_interruptible(&wq);
+ }
+ break;
+ case TAOS_STATE_SEND:
+ if (taos->buffer[++taos->pos])
+ serio_write(serio, taos->buffer[taos->pos]);
+ else {
+ taos->state = TAOS_STATE_IDLE;
+ wake_up_interruptible(&wq);
+ }
+ break;
+ case TAOS_STATE_RECV:
+ taos->buffer[taos->pos++] = data;
+ if (data == ']') {
+ taos->buffer[taos->pos] = '\0';
+ taos->state = TAOS_STATE_IDLE;
+ wake_up_interruptible(&wq);
+ }
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Extract the adapter name from the buffer received after reset.
+ The buffer is modified and a pointer inside the buffer is returned. */
+static char *taos_adapter_name(char *buffer)
+{
+ char *start, *end;
+
+ start = strstr(buffer, "TAOS ");
+ if (!start)
+ return NULL;
+
+ end = strchr(start, '\r');
+ if (!end)
+ return NULL;
+ *end = '\0';
+
+ return start;
+}
+
+static int taos_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct taos_data *taos;
+ struct i2c_adapter *adapter;
+ char *name;
+ int err;
+
+ taos = kzalloc(sizeof(struct taos_data), GFP_KERNEL);
+ if (!taos) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ taos->state = TAOS_STATE_INIT;
+ serio_set_drvdata(serio, taos);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto exit_kfree;
+
+ adapter = &taos->adapter;
+ adapter->owner = THIS_MODULE;
+ adapter->algo = &taos_algorithm;
+ adapter->algo_data = serio;
+ adapter->dev.parent = &serio->dev;
+
+ /* Reset the TAOS evaluation module to identify it */
+ serio_write(serio, TAOS_CMD_RESET);
+ wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE,
+ msecs_to_jiffies(2000));
+
+ if (taos->state != TAOS_STATE_IDLE) {
+ err = -ENODEV;
+ dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, "
+ "pos=%d)\n", taos->state, taos->pos);
+ goto exit_close;
+ }
+
+ name = taos_adapter_name(taos->buffer);
+ if (!name) {
+ err = -ENODEV;
+ dev_err(&serio->dev, "TAOS EVM identification failed\n");
+ goto exit_close;
+ }
+ strlcpy(adapter->name, name, sizeof(adapter->name));
+
+ err = i2c_add_adapter(adapter);
+ if (err)
+ goto exit_close;
+ dev_dbg(&serio->dev, "Connected to TAOS EVM\n");
+
+ taos->client = taos_instantiate_device(adapter);
+ return 0;
+
+ exit_close:
+ serio_close(serio);
+ exit_kfree:
+ serio_set_drvdata(serio, NULL);
+ kfree(taos);
+ exit:
+ return err;
+}
+
+static void taos_disconnect(struct serio *serio)
+{
+ struct taos_data *taos = serio_get_drvdata(serio);
+
+ if (taos->client)
+ i2c_unregister_device(taos->client);
+ i2c_del_adapter(&taos->adapter);
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ kfree(taos);
+
+ dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n");
+}
+
+static struct serio_device_id taos_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_TAOSEVM,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(serio, taos_serio_ids);
+
+static struct serio_driver taos_drv = {
+ .driver = {
+ .name = "taos-evm",
+ },
+ .description = "TAOS evaluation module driver",
+ .id_table = taos_serio_ids,
+ .connect = taos_connect,
+ .disconnect = taos_disconnect,
+ .interrupt = taos_interrupt,
+};
+
+static int __init taos_init(void)
+{
+ return serio_register_driver(&taos_drv);
+}
+
+static void __exit taos_exit(void)
+{
+ serio_unregister_driver(&taos_drv);
+}
+
+MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("TAOS evaluation module driver");
+MODULE_LICENSE("GPL");
+
+module_init(taos_init);
+module_exit(taos_exit);
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 7a2bc06304fc..edc275002f80 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -235,7 +235,7 @@ static s32 vt596_access(struct i2c_adapter *adap, u16 addr,
if (!(vt596_features & FEATURE_I2CBLOCK))
goto exit_unsupported;
if (read_write == I2C_SMBUS_READ)
- outb_p(I2C_SMBUS_BLOCK_MAX, SMBHSTDAT0);
+ outb_p(data->block[0], SMBHSTDAT0);
/* Fall through */
case I2C_SMBUS_BLOCK_DATA:
outb_p(command, SMBHSTCMD);
@@ -397,8 +397,7 @@ found:
case PCI_DEVICE_ID_VIA_82C686_4:
/* The VT82C686B (rev 0x40) does support I2C block
transactions, but the VT82C686A (rev 0x30) doesn't */
- if (!pci_read_config_byte(pdev, PCI_REVISION_ID, &temp)
- && temp >= 0x40)
+ if (pdev->revision >= 0x40)
vt596_features |= FEATURE_I2CBLOCK;
break;
}
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 0d6bd4f7b7fa..e6c4a2b762ec 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -310,8 +310,6 @@ static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter,
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- if (rw == I2C_SMBUS_READ)
- data->block[0] = I2C_SMBUS_BLOCK_MAX; /* For now */
len = data->block[0];
if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
@@ -388,7 +386,7 @@ static const struct i2c_algorithm scx200_acb_algorithm = {
};
static struct scx200_acb_iface *scx200_acb_list;
-static DECLARE_MUTEX(scx200_acb_list_mutex);
+static DEFINE_MUTEX(scx200_acb_list_mutex);
static __init int scx200_acb_probe(struct scx200_acb_iface *iface)
{
@@ -472,10 +470,10 @@ static int __init scx200_acb_create(struct scx200_acb_iface *iface)
return -ENODEV;
}
- down(&scx200_acb_list_mutex);
+ mutex_lock(&scx200_acb_list_mutex);
iface->next = scx200_acb_list;
scx200_acb_list = iface;
- up(&scx200_acb_list_mutex);
+ mutex_unlock(&scx200_acb_list_mutex);
return 0;
}
@@ -633,10 +631,10 @@ static void __exit scx200_acb_cleanup(void)
{
struct scx200_acb_iface *iface;
- down(&scx200_acb_list_mutex);
+ mutex_lock(&scx200_acb_list_mutex);
while ((iface = scx200_acb_list) != NULL) {
scx200_acb_list = iface->next;
- up(&scx200_acb_list_mutex);
+ mutex_unlock(&scx200_acb_list_mutex);
i2c_del_adapter(&iface->adapter);
@@ -648,9 +646,9 @@ static void __exit scx200_acb_cleanup(void)
release_region(iface->base, 8);
kfree(iface);
- down(&scx200_acb_list_mutex);
+ mutex_lock(&scx200_acb_list_mutex);
}
- up(&scx200_acb_list_mutex);
+ mutex_unlock(&scx200_acb_list_mutex);
}
module_init(scx200_acb_init);
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index ea085a006ead..2e1c24f671cf 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -5,7 +5,7 @@
menu "Miscellaneous I2C Chip support"
config SENSORS_DS1337
- tristate "Dallas Semiconductor DS1337 and DS1339 Real Time Clock"
+ tristate "Dallas DS1337 and DS1339 Real Time Clock (DEPRECATED)"
depends on EXPERIMENTAL
help
If you say yes here you get support for Dallas Semiconductor
@@ -14,8 +14,11 @@ config SENSORS_DS1337
This driver can also be built as a module. If so, the module
will be called ds1337.
+ This driver is deprecated and will be dropped soon. Use
+ rtc-ds1307 instead.
+
config SENSORS_DS1374
- tristate "Maxim/Dallas Semiconductor DS1374 Real Time Clock"
+ tristate "Dallas DS1374 Real Time Clock (DEPRECATED)"
depends on EXPERIMENTAL
help
If you say yes here you get support for Dallas Semiconductor
@@ -24,6 +27,19 @@ config SENSORS_DS1374
This driver can also be built as a module. If so, the module
will be called ds1374.
+ This driver is deprecated and will be dropped soon. Use
+ rtc-ds1374 instead.
+
+config DS1682
+ tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm"
+ depends on EXPERIMENTAL
+ help
+ If you say yes here you get support for Dallas Semiconductor
+ DS1682 Total Elapsed Time Recorder.
+
+ This driver can also be built as a module. If so, the module
+ will be called ds1682.
+
config SENSORS_EEPROM
tristate "EEPROM reader"
depends on EXPERIMENTAL
@@ -101,7 +117,7 @@ config TPS65010
will be called tps65010.
config SENSORS_M41T00
- tristate "ST M41T00 RTC chip"
+ tristate "ST M41T00 RTC chip (DEPRECATED)"
depends on PPC32
help
If you say yes here you get support for the ST M41T00 RTC chip.
@@ -109,6 +125,9 @@ config SENSORS_M41T00
This driver can also be built as a module. If so, the module
will be called m41t00.
+ This driver is deprecated and will be dropped soon. Use
+ rtc-ds1307 or rtc-m41t80 instead.
+
config SENSORS_MAX6875
tristate "Maxim MAX6875 Power supply supervisor"
depends on EXPERIMENTAL
@@ -124,4 +143,24 @@ config SENSORS_MAX6875
This driver can also be built as a module. If so, the module
will be called max6875.
+config SENSORS_TSL2550
+ tristate "Taos TSL2550 ambient light sensor"
+ depends on EXPERIMENTAL
+ help
+ If you say yes here you get support for the Taos TSL2550
+ ambient light sensor.
+
+ This driver can also be built as a module. If so, the module
+ will be called tsl2550.
+
+config MENELAUS
+ bool "TWL92330/Menelaus PM chip"
+ depends on I2C=y && ARCH_OMAP24XX
+ help
+ If you say yes here you get support for the Texas Instruments
+ TWL92330/Menelaus Power Management chip. This include voltage
+ regulators, Dual slot memory card tranceivers, real-time clock
+ and other features that are often used in portable devices like
+ cell phones and PDAs.
+
endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 779868ef2e26..ca924e105959 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_SENSORS_DS1337) += ds1337.o
obj-$(CONFIG_SENSORS_DS1374) += ds1374.o
+obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o
obj-$(CONFIG_SENSORS_MAX6875) += max6875.o
obj-$(CONFIG_SENSORS_M41T00) += m41t00.o
@@ -12,6 +13,8 @@ obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TPS65010) += tps65010.o
+obj-$(CONFIG_MENELAUS) += menelaus.o
+obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/i2c/chips/ds1682.c b/drivers/i2c/chips/ds1682.c
new file mode 100644
index 000000000000..5879f0f25495
--- /dev/null
+++ b/drivers/i2c/chips/ds1682.c
@@ -0,0 +1,259 @@
+/*
+ * Dallas Semiconductor DS1682 Elapsed Time Recorder device driver
+ *
+ * Written by: Grant Likely <grant.likely@secretlab.ca>
+ *
+ * Copyright (C) 2007 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The DS1682 elapsed timer recorder is a simple device that implements
+ * one elapsed time counter, one event counter, an alarm signal and 10
+ * bytes of general purpose EEPROM.
+ *
+ * This driver provides access to the DS1682 counters and user data via
+ * the sysfs. The following attributes are added to the device node:
+ * elapsed_time (u32): Total elapsed event time in ms resolution
+ * alarm_time (u32): When elapsed time exceeds the value in alarm_time,
+ * then the alarm pin is asserted.
+ * event_count (u16): number of times the event pin has gone low.
+ * eeprom (u8[10]): general purpose EEPROM
+ *
+ * Counter registers and user data are both read/write unless the device
+ * has been write protected. This driver does not support turning off write
+ * protection. Once write protection is turned on, it is impossible to
+ * turn it off again, so I have left the feature out of this driver to avoid
+ * accidental enabling, but it is trivial to add write protect support.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <linux/hwmon-sysfs.h>
+
+/* Device registers */
+#define DS1682_REG_CONFIG 0x00
+#define DS1682_REG_ALARM 0x01
+#define DS1682_REG_ELAPSED 0x05
+#define DS1682_REG_EVT_CNTR 0x09
+#define DS1682_REG_EEPROM 0x0b
+#define DS1682_REG_RESET 0x1d
+#define DS1682_REG_WRITE_DISABLE 0x1e
+#define DS1682_REG_WRITE_MEM_DISABLE 0x1f
+
+#define DS1682_EEPROM_SIZE 10
+
+/*
+ * Generic counter attributes
+ */
+static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ __le32 val = 0;
+ int rc;
+
+ dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name);
+
+ /* Read the register */
+ rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr,
+ (u8 *) & val);
+ if (rc < 0)
+ return -EIO;
+
+ /* Special case: the 32 bit regs are time values with 1/4s
+ * resolution, scale them up to milliseconds */
+ if (sattr->nr == 4)
+ return sprintf(buf, "%llu\n", ((u64) le32_to_cpu(val)) * 250);
+
+ /* Format the output string and return # of bytes */
+ return sprintf(buf, "%li\n", (long)le32_to_cpu(val));
+}
+
+static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ char *endp;
+ u64 val;
+ __le32 val_le;
+ int rc;
+
+ dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);
+
+ /* Decode input */
+ val = simple_strtoull(buf, &endp, 0);
+ if (buf == endp) {
+ dev_dbg(dev, "input string not a number\n");
+ return -EINVAL;
+ }
+
+ /* Special case: the 32 bit regs are time values with 1/4s
+ * resolution, scale input down to quarter-seconds */
+ if (sattr->nr == 4)
+ do_div(val, 250);
+
+ /* write out the value */
+ val_le = cpu_to_le32(val);
+ rc = i2c_smbus_write_i2c_block_data(client, sattr->index, sattr->nr,
+ (u8 *) & val_le);
+ if (rc < 0) {
+ dev_err(dev, "register write failed; reg=0x%x, size=%i\n",
+ sattr->index, sattr->nr);
+ return -EIO;
+ }
+
+ return count;
+}
+
+/*
+ * Simple register attributes
+ */
+static SENSOR_DEVICE_ATTR_2(elapsed_time, S_IRUGO | S_IWUSR, ds1682_show,
+ ds1682_store, 4, DS1682_REG_ELAPSED);
+static SENSOR_DEVICE_ATTR_2(alarm_time, S_IRUGO | S_IWUSR, ds1682_show,
+ ds1682_store, 4, DS1682_REG_ALARM);
+static SENSOR_DEVICE_ATTR_2(event_count, S_IRUGO | S_IWUSR, ds1682_show,
+ ds1682_store, 2, DS1682_REG_EVT_CNTR);
+
+static const struct attribute_group ds1682_group = {
+ .attrs = (struct attribute *[]) {
+ &sensor_dev_attr_elapsed_time.dev_attr.attr,
+ &sensor_dev_attr_alarm_time.dev_attr.attr,
+ &sensor_dev_attr_event_count.dev_attr.attr,
+ NULL,
+ },
+};
+
+/*
+ * User data attribute
+ */
+static ssize_t ds1682_eeprom_read(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
+ int rc;
+
+ dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
+ buf, off, count);
+
+ if (off >= DS1682_EEPROM_SIZE)
+ return 0;
+
+ if (off + count > DS1682_EEPROM_SIZE)
+ count = DS1682_EEPROM_SIZE - off;
+
+ rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
+ count, buf);
+ if (rc < 0)
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t ds1682_eeprom_write(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = kobj_to_i2c_client(kobj);
+
+ dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
+ buf, off, count);
+
+ if (off >= DS1682_EEPROM_SIZE)
+ return -ENOSPC;
+
+ if (off + count > DS1682_EEPROM_SIZE)
+ count = DS1682_EEPROM_SIZE - off;
+
+ /* Write out to the device */
+ if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
+ count, buf) < 0)
+ return -EIO;
+
+ return count;
+}
+
+static struct bin_attribute ds1682_eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = S_IRUGO | S_IWUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = DS1682_EEPROM_SIZE,
+ .read = ds1682_eeprom_read,
+ .write = ds1682_eeprom_write,
+};
+
+/*
+ * Called when a ds1682 device is matched with this driver
+ */
+static int ds1682_probe(struct i2c_client *client)
+{
+ int rc;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&client->dev, "i2c bus does not support the ds1682\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ rc = sysfs_create_group(&client->dev.kobj, &ds1682_group);
+ if (rc)
+ goto exit;
+
+ rc = sysfs_create_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
+ if (rc)
+ goto exit_bin_attr;
+
+ return 0;
+
+ exit_bin_attr:
+ sysfs_remove_group(&client->dev.kobj, &ds1682_group);
+ exit:
+ return rc;
+}
+
+static int ds1682_remove(struct i2c_client *client)
+{
+ sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
+ sysfs_remove_group(&client->dev.kobj, &ds1682_group);
+ return 0;
+}
+
+static struct i2c_driver ds1682_driver = {
+ .driver = {
+ .name = "ds1682",
+ },
+ .probe = ds1682_probe,
+ .remove = ds1682_remove,
+};
+
+static int __init ds1682_init(void)
+{
+ return i2c_add_driver(&ds1682_driver);
+}
+
+static void __exit ds1682_exit(void)
+{
+ i2c_del_driver(&ds1682_driver);
+}
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver");
+MODULE_LICENSE("GPL");
+
+module_init(ds1682_init);
+module_exit(ds1682_exit);
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
index bfce13c8f1ff..d3da1fb05b9b 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/i2c/chips/eeprom.c
@@ -88,8 +88,10 @@ static void eeprom_update_client(struct i2c_client *client, u8 slice)
dev_dbg(&client->dev, "Starting eeprom update, slice %u\n", slice);
if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
- for (i = slice << 5; i < (slice + 1) << 5; i += I2C_SMBUS_BLOCK_MAX)
- if (i2c_smbus_read_i2c_block_data(client, i, data->data + i) != I2C_SMBUS_BLOCK_MAX)
+ for (i = slice << 5; i < (slice + 1) << 5; i += 32)
+ if (i2c_smbus_read_i2c_block_data(client, i,
+ 32, data->data + i)
+ != 32)
goto exit;
} else {
if (i2c_smbus_write_byte(client, slice << 5)) {
@@ -110,7 +112,8 @@ exit:
mutex_unlock(&data->update_lock);
}
-static ssize_t eeprom_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
struct eeprom_data *data = i2c_get_clientdata(client);
@@ -143,7 +146,6 @@ static struct bin_attribute eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
- .owner = THIS_MODULE,
},
.size = EEPROM_SIZE,
.read = eeprom_read,
diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c
index 76645c142977..64692f666372 100644
--- a/drivers/i2c/chips/max6875.c
+++ b/drivers/i2c/chips/max6875.c
@@ -106,6 +106,7 @@ static void max6875_update_slice(struct i2c_client *client, int slice)
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
if (i2c_smbus_read_i2c_block_data(client,
MAX6875_CMD_BLK_READ,
+ SLICE_SIZE,
buf) != SLICE_SIZE) {
goto exit_up;
}
@@ -125,8 +126,9 @@ exit_up:
mutex_unlock(&data->update_lock);
}
-static ssize_t max6875_read(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+static ssize_t max6875_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
struct max6875_data *data = i2c_get_clientdata(client);
@@ -152,7 +154,6 @@ static struct bin_attribute user_eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
- .owner = THIS_MODULE,
},
.size = USER_EEPROM_SIZE,
.read = max6875_read,
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
new file mode 100644
index 000000000000..48a7e2f0bdd3
--- /dev/null
+++ b/drivers/i2c/chips/menelaus.c
@@ -0,0 +1,1281 @@
+#define DEBUG
+/*
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ *
+ * Some parts based tps65010.c:
+ * Copyright (C) 2004 Texas Instruments and
+ * Copyright (C) 2004-2005 David Brownell
+ *
+ * Some parts based on tlv320aic24.c:
+ * Copyright (C) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Changes for interrupt handling and clean-up by
+ * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com>
+ * Cleanup and generalized support for voltage setting by
+ * Juha Yrjola
+ * Added support for controlling VCORE and regulator sleep states,
+ * Amit Kucheria <amit.kucheria@nokia.com>
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/menelaus.h>
+
+#define DRIVER_NAME "menelaus"
+
+#define pr_err(fmt, arg...) printk(KERN_ERR DRIVER_NAME ": ", ## arg);
+
+#define MENELAUS_I2C_ADDRESS 0x72
+
+#define MENELAUS_REV 0x01
+#define MENELAUS_VCORE_CTRL1 0x02
+#define MENELAUS_VCORE_CTRL2 0x03
+#define MENELAUS_VCORE_CTRL3 0x04
+#define MENELAUS_VCORE_CTRL4 0x05
+#define MENELAUS_VCORE_CTRL5 0x06
+#define MENELAUS_DCDC_CTRL1 0x07
+#define MENELAUS_DCDC_CTRL2 0x08
+#define MENELAUS_DCDC_CTRL3 0x09
+#define MENELAUS_LDO_CTRL1 0x0A
+#define MENELAUS_LDO_CTRL2 0x0B
+#define MENELAUS_LDO_CTRL3 0x0C
+#define MENELAUS_LDO_CTRL4 0x0D
+#define MENELAUS_LDO_CTRL5 0x0E
+#define MENELAUS_LDO_CTRL6 0x0F
+#define MENELAUS_LDO_CTRL7 0x10
+#define MENELAUS_LDO_CTRL8 0x11
+#define MENELAUS_SLEEP_CTRL1 0x12
+#define MENELAUS_SLEEP_CTRL2 0x13
+#define MENELAUS_DEVICE_OFF 0x14
+#define MENELAUS_OSC_CTRL 0x15
+#define MENELAUS_DETECT_CTRL 0x16
+#define MENELAUS_INT_MASK1 0x17
+#define MENELAUS_INT_MASK2 0x18
+#define MENELAUS_INT_STATUS1 0x19
+#define MENELAUS_INT_STATUS2 0x1A
+#define MENELAUS_INT_ACK1 0x1B
+#define MENELAUS_INT_ACK2 0x1C
+#define MENELAUS_GPIO_CTRL 0x1D
+#define MENELAUS_GPIO_IN 0x1E
+#define MENELAUS_GPIO_OUT 0x1F
+#define MENELAUS_BBSMS 0x20
+#define MENELAUS_RTC_CTRL 0x21
+#define MENELAUS_RTC_UPDATE 0x22
+#define MENELAUS_RTC_SEC 0x23
+#define MENELAUS_RTC_MIN 0x24
+#define MENELAUS_RTC_HR 0x25
+#define MENELAUS_RTC_DAY 0x26
+#define MENELAUS_RTC_MON 0x27
+#define MENELAUS_RTC_YR 0x28
+#define MENELAUS_RTC_WKDAY 0x29
+#define MENELAUS_RTC_AL_SEC 0x2A
+#define MENELAUS_RTC_AL_MIN 0x2B
+#define MENELAUS_RTC_AL_HR 0x2C
+#define MENELAUS_RTC_AL_DAY 0x2D
+#define MENELAUS_RTC_AL_MON 0x2E
+#define MENELAUS_RTC_AL_YR 0x2F
+#define MENELAUS_RTC_COMP_MSB 0x30
+#define MENELAUS_RTC_COMP_LSB 0x31
+#define MENELAUS_S1_PULL_EN 0x32
+#define MENELAUS_S1_PULL_DIR 0x33
+#define MENELAUS_S2_PULL_EN 0x34
+#define MENELAUS_S2_PULL_DIR 0x35
+#define MENELAUS_MCT_CTRL1 0x36
+#define MENELAUS_MCT_CTRL2 0x37
+#define MENELAUS_MCT_CTRL3 0x38
+#define MENELAUS_MCT_PIN_ST 0x39
+#define MENELAUS_DEBOUNCE1 0x3A
+
+#define IH_MENELAUS_IRQS 12
+#define MENELAUS_MMC_S1CD_IRQ 0 /* MMC slot 1 card change */
+#define MENELAUS_MMC_S2CD_IRQ 1 /* MMC slot 2 card change */
+#define MENELAUS_MMC_S1D1_IRQ 2 /* MMC DAT1 low in slot 1 */
+#define MENELAUS_MMC_S2D1_IRQ 3 /* MMC DAT1 low in slot 2 */
+#define MENELAUS_LOWBAT_IRQ 4 /* Low battery */
+#define MENELAUS_HOTDIE_IRQ 5 /* Hot die detect */
+#define MENELAUS_UVLO_IRQ 6 /* UVLO detect */
+#define MENELAUS_TSHUT_IRQ 7 /* Thermal shutdown */
+#define MENELAUS_RTCTMR_IRQ 8 /* RTC timer */
+#define MENELAUS_RTCALM_IRQ 9 /* RTC alarm */
+#define MENELAUS_RTCERR_IRQ 10 /* RTC error */
+#define MENELAUS_PSHBTN_IRQ 11 /* Push button */
+#define MENELAUS_RESERVED12_IRQ 12 /* Reserved */
+#define MENELAUS_RESERVED13_IRQ 13 /* Reserved */
+#define MENELAUS_RESERVED14_IRQ 14 /* Reserved */
+#define MENELAUS_RESERVED15_IRQ 15 /* Reserved */
+
+static void menelaus_work(struct work_struct *_menelaus);
+
+struct menelaus_chip {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct work_struct work;
+#ifdef CONFIG_RTC_DRV_TWL92330
+ struct rtc_device *rtc;
+ u8 rtc_control;
+ unsigned uie:1;
+#endif
+ unsigned vcore_hw_mode:1;
+ u8 mask1, mask2;
+ void (*handlers[16])(struct menelaus_chip *);
+ void (*mmc_callback)(void *data, u8 mask);
+ void *mmc_callback_data;
+};
+
+static struct menelaus_chip *the_menelaus;
+
+static int menelaus_write_reg(int reg, u8 value)
+{
+ int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value);
+
+ if (val < 0) {
+ pr_err("write error");
+ return val;
+ }
+
+ return 0;
+}
+
+static int menelaus_read_reg(int reg)
+{
+ int val = i2c_smbus_read_byte_data(the_menelaus->client, reg);
+
+ if (val < 0)
+ pr_err("read error");
+
+ return val;
+}
+
+static int menelaus_enable_irq(int irq)
+{
+ if (irq > 7) {
+ irq -= 8;
+ the_menelaus->mask2 &= ~(1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK2,
+ the_menelaus->mask2);
+ } else {
+ the_menelaus->mask1 &= ~(1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK1,
+ the_menelaus->mask1);
+ }
+}
+
+static int menelaus_disable_irq(int irq)
+{
+ if (irq > 7) {
+ irq -= 8;
+ the_menelaus->mask2 |= (1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK2,
+ the_menelaus->mask2);
+ } else {
+ the_menelaus->mask1 |= (1 << irq);
+ return menelaus_write_reg(MENELAUS_INT_MASK1,
+ the_menelaus->mask1);
+ }
+}
+
+static int menelaus_ack_irq(int irq)
+{
+ if (irq > 7)
+ return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8));
+ else
+ return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq);
+}
+
+/* Adds a handler for an interrupt. Does not run in interrupt context */
+static int menelaus_add_irq_work(int irq,
+ void (*handler)(struct menelaus_chip *))
+{
+ int ret = 0;
+
+ mutex_lock(&the_menelaus->lock);
+ the_menelaus->handlers[irq] = handler;
+ ret = menelaus_enable_irq(irq);
+ mutex_unlock(&the_menelaus->lock);
+
+ return ret;
+}
+
+/* Removes handler for an interrupt */
+static int menelaus_remove_irq_work(int irq)
+{
+ int ret = 0;
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_disable_irq(irq);
+ the_menelaus->handlers[irq] = NULL;
+ mutex_unlock(&the_menelaus->lock);
+
+ return ret;
+}
+
+/*
+ * Gets scheduled when a card detect interrupt happens. Note that in some cases
+ * this line is wired to card cover switch rather than the card detect switch
+ * in each slot. In this case the cards are not seen by menelaus.
+ * FIXME: Add handling for D1 too
+ */
+static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
+{
+ int reg;
+ unsigned char card_mask = 0;
+
+ reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST);
+ if (reg < 0)
+ return;
+
+ if (!(reg & 0x1))
+ card_mask |= (1 << 0);
+
+ if (!(reg & 0x2))
+ card_mask |= (1 << 1);
+
+ if (menelaus_hw->mmc_callback)
+ menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
+ card_mask);
+}
+
+/*
+ * Toggles the MMC slots between open-drain and push-pull mode.
+ */
+int menelaus_set_mmc_opendrain(int slot, int enable)
+{
+ int ret, val;
+
+ if (slot != 1 && slot != 2)
+ return -EINVAL;
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_read_reg(MENELAUS_MCT_CTRL1);
+ if (ret < 0) {
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+ }
+ val = ret;
+ if (slot == 1) {
+ if (enable)
+ val |= 1 << 2;
+ else
+ val &= ~(1 << 2);
+ } else {
+ if (enable)
+ val |= 1 << 3;
+ else
+ val &= ~(1 << 3);
+ }
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
+ mutex_unlock(&the_menelaus->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_set_mmc_opendrain);
+
+int menelaus_set_slot_sel(int enable)
+{
+ int ret;
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
+ if (ret < 0)
+ goto out;
+ ret |= 0x02;
+ if (enable)
+ ret |= 1 << 5;
+ else
+ ret &= ~(1 << 5);
+ ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_set_slot_sel);
+
+int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
+{
+ int ret, val;
+
+ if (slot != 1 && slot != 2)
+ return -EINVAL;
+ if (power >= 3)
+ return -EINVAL;
+
+ mutex_lock(&the_menelaus->lock);
+
+ ret = menelaus_read_reg(MENELAUS_MCT_CTRL2);
+ if (ret < 0)
+ goto out;
+ val = ret;
+ if (slot == 1) {
+ if (cd_en)
+ val |= (1 << 4) | (1 << 6);
+ else
+ val &= ~((1 << 4) | (1 << 6));
+ } else {
+ if (cd_en)
+ val |= (1 << 5) | (1 << 7);
+ else
+ val &= ~((1 << 5) | (1 << 7));
+ }
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
+ if (ret < 0)
+ goto out;
+
+ ret = menelaus_read_reg(MENELAUS_MCT_CTRL3);
+ if (ret < 0)
+ goto out;
+ val = ret;
+ if (slot == 1) {
+ if (enable)
+ val |= 1 << 0;
+ else
+ val &= ~(1 << 0);
+ } else {
+ int b;
+
+ if (enable)
+ ret |= 1 << 1;
+ else
+ ret &= ~(1 << 1);
+ b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
+ b &= ~0x03;
+ b |= power;
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
+ if (ret < 0)
+ goto out;
+ }
+ /* Disable autonomous shutdown */
+ val &= ~(0x03 << 2);
+ ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_set_mmc_slot);
+
+int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
+ void *data)
+{
+ int ret = 0;
+
+ the_menelaus->mmc_callback_data = data;
+ the_menelaus->mmc_callback = callback;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ,
+ menelaus_mmc_cd_work);
+ if (ret < 0)
+ return ret;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ,
+ menelaus_mmc_cd_work);
+ if (ret < 0)
+ return ret;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ,
+ menelaus_mmc_cd_work);
+ if (ret < 0)
+ return ret;
+ ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ,
+ menelaus_mmc_cd_work);
+
+ return ret;
+}
+EXPORT_SYMBOL(menelaus_register_mmc_callback);
+
+void menelaus_unregister_mmc_callback(void)
+{
+ menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ);
+ menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ);
+ menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ);
+ menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
+
+ the_menelaus->mmc_callback = NULL;
+ the_menelaus->mmc_callback_data = 0;
+}
+EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
+
+struct menelaus_vtg {
+ const char *name;
+ u8 vtg_reg;
+ u8 vtg_shift;
+ u8 vtg_bits;
+ u8 mode_reg;
+};
+
+struct menelaus_vtg_value {
+ u16 vtg;
+ u16 val;
+};
+
+static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
+ int vtg_val, int mode)
+{
+ int val, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ mutex_lock(&the_menelaus->lock);
+ if (vtg == 0)
+ goto set_voltage;
+
+ ret = menelaus_read_reg(vtg->vtg_reg);
+ if (ret < 0)
+ goto out;
+ val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift);
+ val |= vtg_val << vtg->vtg_shift;
+
+ dev_dbg(&c->dev, "Setting voltage '%s'"
+ "to %d mV (reg 0x%02x, val 0x%02x)\n",
+ vtg->name, mV, vtg->vtg_reg, val);
+
+ ret = menelaus_write_reg(vtg->vtg_reg, val);
+ if (ret < 0)
+ goto out;
+set_voltage:
+ ret = menelaus_write_reg(vtg->mode_reg, mode);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ if (ret == 0) {
+ /* Wait for voltage to stabilize */
+ msleep(1);
+ }
+ return ret;
+}
+
+static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl,
+ int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++, tbl++)
+ if (tbl->vtg == vtg)
+ return tbl->val;
+ return -EINVAL;
+}
+
+/*
+ * Vcore can be programmed in two ways:
+ * SW-controlled: Required voltage is programmed into VCORE_CTRL1
+ * HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3
+ * and VCORE_CTRL4
+ *
+ * Call correct 'set' function accordingly
+ */
+
+static const struct menelaus_vtg_value vcore_values[] = {
+ { 1000, 0 },
+ { 1025, 1 },
+ { 1050, 2 },
+ { 1075, 3 },
+ { 1100, 4 },
+ { 1125, 5 },
+ { 1150, 6 },
+ { 1175, 7 },
+ { 1200, 8 },
+ { 1225, 9 },
+ { 1250, 10 },
+ { 1275, 11 },
+ { 1300, 12 },
+ { 1325, 13 },
+ { 1350, 14 },
+ { 1375, 15 },
+ { 1400, 16 },
+ { 1425, 17 },
+ { 1450, 18 },
+};
+
+int menelaus_set_vcore_sw(unsigned int mV)
+{
+ int val, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ val = menelaus_get_vtg_value(mV, vcore_values,
+ ARRAY_SIZE(vcore_values));
+ if (val < 0)
+ return -EINVAL;
+
+ dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
+
+ /* Set SW mode and the voltage in one go. */
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
+ if (ret == 0)
+ the_menelaus->vcore_hw_mode = 0;
+ mutex_unlock(&the_menelaus->lock);
+ msleep(1);
+
+ return ret;
+}
+
+int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
+{
+ int fval, rval, val, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ rval = menelaus_get_vtg_value(roof_mV, vcore_values,
+ ARRAY_SIZE(vcore_values));
+ if (rval < 0)
+ return -EINVAL;
+ fval = menelaus_get_vtg_value(floor_mV, vcore_values,
+ ARRAY_SIZE(vcore_values));
+ if (fval < 0)
+ return -EINVAL;
+
+ dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n",
+ floor_mV, roof_mV);
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval);
+ if (ret < 0)
+ goto out;
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval);
+ if (ret < 0)
+ goto out;
+ if (!the_menelaus->vcore_hw_mode) {
+ val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
+ /* HW mode, turn OFF byte comparator */
+ val |= ((1 << 7) | (1 << 5));
+ ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
+ the_menelaus->vcore_hw_mode = 1;
+ }
+ msleep(1);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+
+static const struct menelaus_vtg vmem_vtg = {
+ .name = "VMEM",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 0,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL3,
+};
+
+static const struct menelaus_vtg_value vmem_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 1900, 2 },
+ { 2500, 3 },
+};
+
+int menelaus_set_vmem(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vmem_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vmem);
+
+static const struct menelaus_vtg vio_vtg = {
+ .name = "VIO",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 2,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL4,
+};
+
+static const struct menelaus_vtg_value vio_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 2500, 2 },
+ { 2800, 3 },
+};
+
+int menelaus_set_vio(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vio_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vio_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vio);
+
+static const struct menelaus_vtg_value vdcdc_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 2000, 2 },
+ { 2200, 3 },
+ { 2400, 4 },
+ { 2800, 5 },
+ { 3000, 6 },
+ { 3300, 7 },
+};
+
+static const struct menelaus_vtg vdcdc2_vtg = {
+ .name = "VDCDC2",
+ .vtg_reg = MENELAUS_DCDC_CTRL1,
+ .vtg_shift = 0,
+ .vtg_bits = 3,
+ .mode_reg = MENELAUS_DCDC_CTRL2,
+};
+
+static const struct menelaus_vtg vdcdc3_vtg = {
+ .name = "VDCDC3",
+ .vtg_reg = MENELAUS_DCDC_CTRL1,
+ .vtg_shift = 3,
+ .vtg_bits = 3,
+ .mode_reg = MENELAUS_DCDC_CTRL3,
+};
+
+int menelaus_set_vdcdc(int dcdc, unsigned int mV)
+{
+ const struct menelaus_vtg *vtg;
+ int val;
+
+ if (dcdc != 2 && dcdc != 3)
+ return -EINVAL;
+ if (dcdc == 2)
+ vtg = &vdcdc2_vtg;
+ else
+ vtg = &vdcdc3_vtg;
+
+ if (mV == 0)
+ return menelaus_set_voltage(vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vdcdc_values,
+ ARRAY_SIZE(vdcdc_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(vtg, mV, val, 0x03);
+}
+
+static const struct menelaus_vtg_value vmmc_values[] = {
+ { 1850, 0 },
+ { 2800, 1 },
+ { 3000, 2 },
+ { 3100, 3 },
+};
+
+static const struct menelaus_vtg vmmc_vtg = {
+ .name = "VMMC",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 6,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL7,
+};
+
+int menelaus_set_vmmc(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vmmc);
+
+
+static const struct menelaus_vtg_value vaux_values[] = {
+ { 1500, 0 },
+ { 1800, 1 },
+ { 2500, 2 },
+ { 2800, 3 },
+};
+
+static const struct menelaus_vtg vaux_vtg = {
+ .name = "VAUX",
+ .vtg_reg = MENELAUS_LDO_CTRL1,
+ .vtg_shift = 4,
+ .vtg_bits = 2,
+ .mode_reg = MENELAUS_LDO_CTRL6,
+};
+
+int menelaus_set_vaux(unsigned int mV)
+{
+ int val;
+
+ if (mV == 0)
+ return menelaus_set_voltage(&vaux_vtg, 0, 0, 0);
+
+ val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values));
+ if (val < 0)
+ return -EINVAL;
+ return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02);
+}
+EXPORT_SYMBOL(menelaus_set_vaux);
+
+int menelaus_get_slot_pin_states(void)
+{
+ return menelaus_read_reg(MENELAUS_MCT_PIN_ST);
+}
+EXPORT_SYMBOL(menelaus_get_slot_pin_states);
+
+int menelaus_set_regulator_sleep(int enable, u32 val)
+{
+ int t, ret;
+ struct i2c_client *c = the_menelaus->client;
+
+ mutex_lock(&the_menelaus->lock);
+ ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val);
+ if (ret < 0)
+ goto out;
+
+ dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val);
+
+ ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
+ if (ret < 0)
+ goto out;
+ t = ((1 << 6) | 0x04);
+ if (enable)
+ ret |= t;
+ else
+ ret &= ~t;
+ ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
+out:
+ mutex_unlock(&the_menelaus->lock);
+ return ret;
+}
+
+/*-----------------------------------------------------------------------*/
+
+/* Handles Menelaus interrupts. Does not run in interrupt context */
+static void menelaus_work(struct work_struct *_menelaus)
+{
+ struct menelaus_chip *menelaus =
+ container_of(_menelaus, struct menelaus_chip, work);
+ void (*handler)(struct menelaus_chip *menelaus);
+
+ while (1) {
+ unsigned isr;
+
+ isr = (menelaus_read_reg(MENELAUS_INT_STATUS2)
+ & ~menelaus->mask2) << 8;
+ isr |= menelaus_read_reg(MENELAUS_INT_STATUS1)
+ & ~menelaus->mask1;
+ if (!isr)
+ break;
+
+ while (isr) {
+ int irq = fls(isr) - 1;
+ isr &= ~(1 << irq);
+
+ mutex_lock(&menelaus->lock);
+ menelaus_disable_irq(irq);
+ menelaus_ack_irq(irq);
+ handler = menelaus->handlers[irq];
+ if (handler)
+ handler(menelaus);
+ menelaus_enable_irq(irq);
+ mutex_unlock(&menelaus->lock);
+ }
+ }
+ enable_irq(menelaus->client->irq);
+}
+
+/*
+ * We cannot use I2C in interrupt context, so we just schedule work.
+ */
+static irqreturn_t menelaus_irq(int irq, void *_menelaus)
+{
+ struct menelaus_chip *menelaus = _menelaus;
+
+ disable_irq_nosync(irq);
+ (void)schedule_work(&menelaus->work);
+
+ return IRQ_HANDLED;
+}
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * The RTC needs to be set once, then it runs on backup battery power.
+ * It supports alarms, including system wake alarms (from some modes);
+ * and 1/second IRQs if requested.
+ */
+#ifdef CONFIG_RTC_DRV_TWL92330
+
+#define RTC_CTRL_RTC_EN (1 << 0)
+#define RTC_CTRL_AL_EN (1 << 1)
+#define RTC_CTRL_MODE12 (1 << 2)
+#define RTC_CTRL_EVERY_MASK (3 << 3)
+#define RTC_CTRL_EVERY_SEC (0 << 3)
+#define RTC_CTRL_EVERY_MIN (1 << 3)
+#define RTC_CTRL_EVERY_HR (2 << 3)
+#define RTC_CTRL_EVERY_DAY (3 << 3)
+
+#define RTC_UPDATE_EVERY 0x08
+
+#define RTC_HR_PM (1 << 7)
+
+static void menelaus_to_time(char *regs, struct rtc_time *t)
+{
+ t->tm_sec = BCD2BIN(regs[0]);
+ t->tm_min = BCD2BIN(regs[1]);
+ if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
+ t->tm_hour = BCD2BIN(regs[2] & 0x1f) - 1;
+ if (regs[2] & RTC_HR_PM)
+ t->tm_hour += 12;
+ } else
+ t->tm_hour = BCD2BIN(regs[2] & 0x3f);
+ t->tm_mday = BCD2BIN(regs[3]);
+ t->tm_mon = BCD2BIN(regs[4]) - 1;
+ t->tm_year = BCD2BIN(regs[5]) + 100;
+}
+
+static int time_to_menelaus(struct rtc_time *t, int regnum)
+{
+ int hour, status;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_sec));
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_min));
+ if (status < 0)
+ goto fail;
+
+ if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
+ hour = t->tm_hour + 1;
+ if (hour > 12)
+ hour = RTC_HR_PM | BIN2BCD(hour - 12);
+ else
+ hour = BIN2BCD(hour);
+ } else
+ hour = BIN2BCD(t->tm_hour);
+ status = menelaus_write_reg(regnum++, hour);
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mday));
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mon + 1));
+ if (status < 0)
+ goto fail;
+
+ status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_year - 100));
+ if (status < 0)
+ goto fail;
+
+ return 0;
+fail:
+ dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n",
+ --regnum, status);
+ return status;
+}
+
+static int menelaus_read_time(struct device *dev, struct rtc_time *t)
+{
+ struct i2c_msg msg[2];
+ char regs[7];
+ int status;
+
+ /* block read date and time registers */
+ regs[0] = MENELAUS_RTC_SEC;
+
+ msg[0].addr = MENELAUS_I2C_ADDRESS;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = regs;
+
+ msg[1].addr = MENELAUS_I2C_ADDRESS;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = sizeof(regs);
+ msg[1].buf = regs;
+
+ status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
+ if (status != 2) {
+ dev_err(dev, "%s error %d\n", "read", status);
+ return -EIO;
+ }
+
+ menelaus_to_time(regs, t);
+ t->tm_wday = BCD2BIN(regs[6]);
+
+ return 0;
+}
+
+static int menelaus_set_time(struct device *dev, struct rtc_time *t)
+{
+ int status;
+
+ /* write date and time registers */
+ status = time_to_menelaus(t, MENELAUS_RTC_SEC);
+ if (status < 0)
+ return status;
+ status = menelaus_write_reg(MENELAUS_RTC_WKDAY, BIN2BCD(t->tm_wday));
+ if (status < 0) {
+ dev_err(&the_menelaus->client->dev, "rtc write reg %02x",
+ "err %d\n", MENELAUS_RTC_WKDAY, status);
+ return status;
+ }
+
+ /* now commit the write */
+ status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY);
+ if (status < 0)
+ dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n",
+ status);
+
+ return 0;
+}
+
+static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w)
+{
+ struct i2c_msg msg[2];
+ char regs[6];
+ int status;
+
+ /* block read alarm registers */
+ regs[0] = MENELAUS_RTC_AL_SEC;
+
+ msg[0].addr = MENELAUS_I2C_ADDRESS;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = regs;
+
+ msg[1].addr = MENELAUS_I2C_ADDRESS;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = sizeof(regs);
+ msg[1].buf = regs;
+
+ status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
+ if (status != 2) {
+ dev_err(dev, "%s error %d\n", "alarm read", status);
+ return -EIO;
+ }
+
+ menelaus_to_time(regs, &w->time);
+
+ w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN);
+
+ /* NOTE we *could* check if actually pending... */
+ w->pending = 0;
+
+ return 0;
+}
+
+static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w)
+{
+ int status;
+
+ if (the_menelaus->client->irq <= 0 && w->enabled)
+ return -ENODEV;
+
+ /* clear previous alarm enable */
+ if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) {
+ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+ status = menelaus_write_reg(MENELAUS_RTC_CTRL,
+ the_menelaus->rtc_control);
+ if (status < 0)
+ return status;
+ }
+
+ /* write alarm registers */
+ status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC);
+ if (status < 0)
+ return status;
+
+ /* enable alarm if requested */
+ if (w->enabled) {
+ the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
+ status = menelaus_write_reg(MENELAUS_RTC_CTRL,
+ the_menelaus->rtc_control);
+ }
+
+ return status;
+}
+
+#ifdef CONFIG_RTC_INTF_DEV
+
+static void menelaus_rtc_update_work(struct menelaus_chip *m)
+{
+ /* report 1/sec update */
+ local_irq_disable();
+ rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF);
+ local_irq_enable();
+}
+
+static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+{
+ int status;
+
+ if (the_menelaus->client->irq <= 0)
+ return -ENOIOCTLCMD;
+
+ switch (cmd) {
+ /* alarm IRQ */
+ case RTC_AIE_ON:
+ if (the_menelaus->rtc_control & RTC_CTRL_AL_EN)
+ return 0;
+ the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
+ break;
+ case RTC_AIE_OFF:
+ if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN))
+ return 0;
+ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+ break;
+ /* 1/second "update" IRQ */
+ case RTC_UIE_ON:
+ if (the_menelaus->uie)
+ return 0;
+ status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
+ status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ,
+ menelaus_rtc_update_work);
+ if (status == 0)
+ the_menelaus->uie = 1;
+ return status;
+ case RTC_UIE_OFF:
+ if (!the_menelaus->uie)
+ return 0;
+ status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
+ if (status == 0)
+ the_menelaus->uie = 0;
+ return status;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
+}
+
+#else
+#define menelaus_ioctl NULL
+#endif
+
+/* REVISIT no compensation register support ... */
+
+static const struct rtc_class_ops menelaus_rtc_ops = {
+ .ioctl = menelaus_ioctl,
+ .read_time = menelaus_read_time,
+ .set_time = menelaus_set_time,
+ .read_alarm = menelaus_read_alarm,
+ .set_alarm = menelaus_set_alarm,
+};
+
+static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
+{
+ /* report alarm */
+ local_irq_disable();
+ rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF);
+ local_irq_enable();
+
+ /* then disable it; alarms are oneshot */
+ the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
+ menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
+}
+
+static inline void menelaus_rtc_init(struct menelaus_chip *m)
+{
+ int alarm = (m->client->irq > 0);
+
+ /* assume 32KDETEN pin is pulled high */
+ if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
+ dev_dbg(&m->client->dev, "no 32k oscillator\n");
+ return;
+ }
+
+ /* support RTC alarm; it can issue wakeups */
+ if (alarm) {
+ if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
+ menelaus_rtc_alarm_work) < 0) {
+ dev_err(&m->client->dev, "can't handle RTC alarm\n");
+ return;
+ }
+ device_init_wakeup(&m->client->dev, 1);
+ }
+
+ /* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */
+ m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL);
+ if (!(m->rtc_control & RTC_CTRL_RTC_EN)
+ || (m->rtc_control & RTC_CTRL_AL_EN)
+ || (m->rtc_control & RTC_CTRL_EVERY_MASK)) {
+ if (!(m->rtc_control & RTC_CTRL_RTC_EN)) {
+ dev_warn(&m->client->dev, "rtc clock needs setting\n");
+ m->rtc_control |= RTC_CTRL_RTC_EN;
+ }
+ m->rtc_control &= ~RTC_CTRL_EVERY_MASK;
+ m->rtc_control &= ~RTC_CTRL_AL_EN;
+ menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
+ }
+
+ m->rtc = rtc_device_register(DRIVER_NAME,
+ &m->client->dev,
+ &menelaus_rtc_ops, THIS_MODULE);
+ if (IS_ERR(m->rtc)) {
+ if (alarm) {
+ menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
+ device_init_wakeup(&m->client->dev, 0);
+ }
+ dev_err(&m->client->dev, "can't register RTC: %d\n",
+ (int) PTR_ERR(m->rtc));
+ the_menelaus->rtc = NULL;
+ }
+}
+
+#else
+
+static inline void menelaus_rtc_init(struct menelaus_chip *m)
+{
+ /* nothing */
+}
+
+#endif
+
+/*-----------------------------------------------------------------------*/
+
+static struct i2c_driver menelaus_i2c_driver;
+
+static int menelaus_probe(struct i2c_client *client)
+{
+ struct menelaus_chip *menelaus;
+ int rev = 0, val;
+ int err = 0;
+ struct menelaus_platform_data *menelaus_pdata =
+ client->dev.platform_data;
+
+ if (the_menelaus) {
+ dev_dbg(&client->dev, "only one %s for now\n",
+ DRIVER_NAME);
+ return -ENODEV;
+ }
+
+ menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
+ if (!menelaus)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, menelaus);
+
+ the_menelaus = menelaus;
+ menelaus->client = client;
+
+ /* If a true probe check the device */
+ rev = menelaus_read_reg(MENELAUS_REV);
+ if (rev < 0) {
+ pr_err("device not found");
+ err = -ENODEV;
+ goto fail1;
+ }
+
+ /* Ack and disable all Menelaus interrupts */
+ menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
+ menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
+ menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
+ menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
+ menelaus->mask1 = 0xff;
+ menelaus->mask2 = 0xff;
+
+ /* Set output buffer strengths */
+ menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
+
+ if (client->irq > 0) {
+ err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
+ DRIVER_NAME, menelaus);
+ if (err) {
+ dev_dbg(&client->dev, "can't get IRQ %d, err %d",
+ client->irq, err);
+ goto fail1;
+ }
+ }
+
+ mutex_init(&menelaus->lock);
+ INIT_WORK(&menelaus->work, menelaus_work);
+
+ pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);
+
+ val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
+ if (val < 0)
+ goto fail2;
+ if (val & (1 << 7))
+ menelaus->vcore_hw_mode = 1;
+ else
+ menelaus->vcore_hw_mode = 0;
+
+ if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
+ err = menelaus_pdata->late_init(&client->dev);
+ if (err < 0)
+ goto fail2;
+ }
+
+ menelaus_rtc_init(menelaus);
+
+ return 0;
+fail2:
+ free_irq(client->irq, menelaus);
+ flush_scheduled_work();
+fail1:
+ kfree(menelaus);
+ return err;
+}
+
+static int __exit menelaus_remove(struct i2c_client *client)
+{
+ struct menelaus_chip *menelaus = i2c_get_clientdata(client);
+
+ free_irq(client->irq, menelaus);
+ kfree(menelaus);
+ i2c_set_clientdata(client, NULL);
+ the_menelaus = NULL;
+ return 0;
+}
+
+static struct i2c_driver menelaus_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = menelaus_probe,
+ .remove = __exit_p(menelaus_remove),
+};
+
+static int __init menelaus_init(void)
+{
+ int res;
+
+ res = i2c_add_driver(&menelaus_i2c_driver);
+ if (res < 0) {
+ pr_err("driver registration failed\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static void __exit menelaus_exit(void)
+{
+ i2c_del_driver(&menelaus_i2c_driver);
+
+ /* FIXME: Shutdown menelaus parts that can be shut down */
+}
+
+MODULE_AUTHOR("Texas Instruments, Inc. (and others)");
+MODULE_DESCRIPTION("I2C interface for Menelaus.");
+MODULE_LICENSE("GPL");
+
+module_init(menelaus_init);
+module_exit(menelaus_exit);
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
new file mode 100644
index 000000000000..3de4b19ba08f
--- /dev/null
+++ b/drivers/i2c/chips/tsl2550.c
@@ -0,0 +1,460 @@
+/*
+ * tsl2550.c - Linux kernel modules for ambient light sensor
+ *
+ * Copyright (C) 2007 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (C) 2007 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define TSL2550_DRV_NAME "tsl2550"
+#define DRIVER_VERSION "1.1.1"
+
+/*
+ * Defines
+ */
+
+#define TSL2550_POWER_DOWN 0x00
+#define TSL2550_POWER_UP 0x03
+#define TSL2550_STANDARD_RANGE 0x18
+#define TSL2550_EXTENDED_RANGE 0x1d
+#define TSL2550_READ_ADC0 0x43
+#define TSL2550_READ_ADC1 0x83
+
+/*
+ * Structs
+ */
+
+struct tsl2550_data {
+ struct i2c_client *client;
+ struct mutex update_lock;
+
+ unsigned int power_state : 1;
+ unsigned int operating_mode : 1;
+};
+
+/*
+ * Global data
+ */
+
+static const u8 TSL2550_MODE_RANGE[2] = {
+ TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE,
+};
+
+/*
+ * Management functions
+ */
+
+static int tsl2550_set_operating_mode(struct i2c_client *client, int mode)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+
+ int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]);
+
+ data->operating_mode = mode;
+
+ return ret;
+}
+
+static int tsl2550_set_power_state(struct i2c_client *client, int state)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ if (state == 0)
+ ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN);
+ else {
+ ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP);
+
+ /* On power up we should reset operating mode also... */
+ tsl2550_set_operating_mode(client, data->operating_mode);
+ }
+
+ data->power_state = state;
+
+ return ret;
+}
+
+static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd)
+{
+ unsigned long end;
+ int loop = 0, ret = 0;
+
+ /*
+ * Read ADC channel waiting at most 400ms (see data sheet for further
+ * info).
+ * To avoid long busy wait we spin for few milliseconds then
+ * start sleeping.
+ */
+ end = jiffies + msecs_to_jiffies(400);
+ while (time_before(jiffies, end)) {
+ i2c_smbus_write_byte(client, cmd);
+
+ if (loop++ < 5)
+ mdelay(1);
+ else
+ msleep(1);
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0)
+ return ret;
+ else if (ret & 0x0080)
+ break;
+ }
+ if (!(ret & 0x80))
+ return -EIO;
+ return ret & 0x7f; /* remove the "valid" bit */
+}
+
+/*
+ * LUX calculation
+ */
+
+#define TSL2550_MAX_LUX 1846
+
+static const u8 ratio_lut[] = {
+ 100, 100, 100, 100, 100, 100, 100, 100,
+ 100, 100, 100, 100, 100, 100, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 98, 98, 98, 98, 98,
+ 98, 98, 97, 97, 97, 97, 97, 96,
+ 96, 96, 96, 95, 95, 95, 94, 94,
+ 93, 93, 93, 92, 92, 91, 91, 90,
+ 89, 89, 88, 87, 87, 86, 85, 84,
+ 83, 82, 81, 80, 79, 78, 77, 75,
+ 74, 73, 71, 69, 68, 66, 64, 62,
+ 60, 58, 56, 54, 52, 49, 47, 44,
+ 42, 41, 40, 40, 39, 39, 38, 38,
+ 37, 37, 37, 36, 36, 36, 35, 35,
+ 35, 35, 34, 34, 34, 34, 33, 33,
+ 33, 33, 32, 32, 32, 32, 32, 31,
+ 31, 31, 31, 31, 30, 30, 30, 30,
+ 30,
+};
+
+static const u16 count_lut[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 26, 28, 30,
+ 32, 34, 36, 38, 40, 42, 44, 46,
+ 49, 53, 57, 61, 65, 69, 73, 77,
+ 81, 85, 89, 93, 97, 101, 105, 109,
+ 115, 123, 131, 139, 147, 155, 163, 171,
+ 179, 187, 195, 203, 211, 219, 227, 235,
+ 247, 263, 279, 295, 311, 327, 343, 359,
+ 375, 391, 407, 423, 439, 455, 471, 487,
+ 511, 543, 575, 607, 639, 671, 703, 735,
+ 767, 799, 831, 863, 895, 927, 959, 991,
+ 1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487,
+ 1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999,
+ 2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991,
+ 3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015,
+};
+
+/*
+ * This function is described into Taos TSL2550 Designer's Notebook
+ * pages 2, 3.
+ */
+static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
+{
+ unsigned int lux;
+
+ /* Look up count from channel values */
+ u16 c0 = count_lut[ch0];
+ u16 c1 = count_lut[ch1];
+
+ /*
+ * Calculate ratio.
+ * Note: the "128" is a scaling factor
+ */
+ u8 r = 128;
+
+ /* Avoid division by 0 and count 1 cannot be greater than count 0 */
+ if (c0 && (c1 <= c0))
+ r = c1 * 128 / c0;
+ else
+ return -1;
+
+ /* Calculate LUX */
+ lux = ((c0 - c1) * ratio_lut[r]) / 256;
+
+ /* LUX range check */
+ return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
+}
+
+/*
+ * SysFS support
+ */
+
+static ssize_t tsl2550_show_power_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sprintf(buf, "%u\n", data->power_state);
+}
+
+static ssize_t tsl2550_store_power_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+ int ret;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = tsl2550_set_power_state(client, val);
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+ tsl2550_show_power_state, tsl2550_store_power_state);
+
+static ssize_t tsl2550_show_operating_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sprintf(buf, "%u\n", data->operating_mode);
+}
+
+static ssize_t tsl2550_store_operating_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+ int ret;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ if (data->power_state == 0)
+ return -EBUSY;
+
+ mutex_lock(&data->update_lock);
+ ret = tsl2550_set_operating_mode(client, val);
+ mutex_unlock(&data->update_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
+ tsl2550_show_operating_mode, tsl2550_store_operating_mode);
+
+static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
+{
+ u8 ch0, ch1;
+ int ret;
+
+ ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0);
+ if (ret < 0)
+ return ret;
+ ch0 = ret;
+
+ mdelay(1);
+
+ ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1);
+ if (ret < 0)
+ return ret;
+ ch1 = ret;
+
+ /* Do the job */
+ ret = tsl2550_calculate_lux(ch0, ch1);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t tsl2550_show_lux1_input(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ /* No LUX data if not operational */
+ if (!data->power_state)
+ return -EBUSY;
+
+ mutex_lock(&data->update_lock);
+ ret = __tsl2550_show_lux(client, buf);
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR(lux1_input, S_IRUGO,
+ tsl2550_show_lux1_input, NULL);
+
+static struct attribute *tsl2550_attributes[] = {
+ &dev_attr_power_state.attr,
+ &dev_attr_operating_mode.attr,
+ &dev_attr_lux1_input.attr,
+ NULL
+};
+
+static const struct attribute_group tsl2550_attr_group = {
+ .attrs = tsl2550_attributes,
+};
+
+/*
+ * Initialization function
+ */
+
+static int tsl2550_init_client(struct i2c_client *client)
+{
+ struct tsl2550_data *data = i2c_get_clientdata(client);
+ int err;
+
+ /*
+ * Probe the chip. To do so we try to power up the device and then to
+ * read back the 0x03 code
+ */
+ err = i2c_smbus_write_byte(client, TSL2550_POWER_UP);
+ if (err < 0)
+ return err;
+ mdelay(1);
+ if (i2c_smbus_read_byte(client) != TSL2550_POWER_UP)
+ return -ENODEV;
+ data->power_state = 1;
+
+ /* Set the default operating mode */
+ err = i2c_smbus_write_byte(client,
+ TSL2550_MODE_RANGE[data->operating_mode]);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * I2C init/probing/exit functions
+ */
+
+static struct i2c_driver tsl2550_driver;
+static int __devinit tsl2550_probe(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct tsl2550_data *data;
+ int *opmode, err = 0;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
+ err = -EIO;
+ goto exit;
+ }
+
+ data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ /* Check platform data */
+ opmode = client->dev.platform_data;
+ if (opmode) {
+ if (*opmode < 0 || *opmode > 1) {
+ dev_err(&client->dev, "invalid operating_mode (%d)\n",
+ *opmode);
+ err = -EINVAL;
+ goto exit_kfree;
+ }
+ data->operating_mode = *opmode;
+ } else
+ data->operating_mode = 0; /* default mode is standard */
+ dev_info(&client->dev, "%s operating mode\n",
+ data->operating_mode ? "extended" : "standard");
+
+ mutex_init(&data->update_lock);
+
+ /* Initialize the TSL2550 chip */
+ err = tsl2550_init_client(client);
+ if (err)
+ goto exit_kfree;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group);
+ if (err)
+ goto exit_kfree;
+
+ dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+
+ return 0;
+
+exit_kfree:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit tsl2550_remove(struct i2c_client *client)
+{
+ sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
+
+ /* Power down the device */
+ tsl2550_set_power_state(client, 0);
+
+ kfree(i2c_get_clientdata(client));
+
+ return 0;
+}
+
+static struct i2c_driver tsl2550_driver = {
+ .driver = {
+ .name = TSL2550_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tsl2550_probe,
+ .remove = __devexit_p(tsl2550_remove),
+};
+
+static int __init tsl2550_init(void)
+{
+ return i2c_add_driver(&tsl2550_driver);
+}
+
+static void __exit tsl2550_exit(void)
+{
+ i2c_del_driver(&tsl2550_driver);
+}
+
+MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
+MODULE_DESCRIPTION("TSL2550 ambient light sensor driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(tsl2550_init);
+module_exit(tsl2550_exit);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 435925eba437..d663e6960d93 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -207,6 +207,7 @@ EXPORT_SYMBOL_GPL(i2c_bus_type);
* i2c_new_device - instantiate an i2c device for use with a new style driver
* @adap: the adapter managing the device
* @info: describes one I2C device; bus_num is ignored
+ * Context: can sleep
*
* Create a device to work with a new style i2c driver, where binding is
* handled through driver model probe()/remove() methods. This call is not
@@ -255,6 +256,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
/**
* i2c_unregister_device - reverse effect of i2c_new_device()
* @client: value returned from i2c_new_device()
+ * Context: can sleep
*/
void i2c_unregister_device(struct i2c_client *client)
{
@@ -286,7 +288,6 @@ void i2c_adapter_dev_release(struct device *dev)
struct i2c_adapter *adap = to_i2c_adapter(dev);
complete(&adap->dev_released);
}
-EXPORT_SYMBOL_GPL(i2c_adapter_dev_release); /* exported to i2c-isa */
static ssize_t
show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -305,7 +306,6 @@ struct class i2c_adapter_class = {
.name = "i2c-adapter",
.dev_attrs = i2c_adapter_attrs,
};
-EXPORT_SYMBOL_GPL(i2c_adapter_class); /* exported to i2c-isa */
static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
{
@@ -379,6 +379,7 @@ out_list:
/**
* i2c_add_adapter - declare i2c adapter, use dynamic bus number
* @adapter: the adapter to add
+ * Context: can sleep
*
* This routine is used to declare an I2C adapter when its bus number
* doesn't matter. Examples: for I2C adapters dynamically added by
@@ -416,6 +417,7 @@ EXPORT_SYMBOL(i2c_add_adapter);
/**
* i2c_add_numbered_adapter - declare i2c adapter, use static bus number
* @adap: the adapter to register (with adap->nr initialized)
+ * Context: can sleep
*
* This routine is used to declare an I2C adapter when its bus number
* matters. Example: for I2C adapters from system-on-chip CPUs, or
@@ -463,6 +465,14 @@ retry:
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
+/**
+ * i2c_del_adapter - unregister I2C adapter
+ * @adap: the adapter being unregistered
+ * Context: can sleep
+ *
+ * This unregisters an I2C adapter which was previously registered
+ * by @i2c_add_adapter or @i2c_add_numbered_adapter.
+ */
int i2c_del_adapter(struct i2c_adapter *adap)
{
struct list_head *item, *_n;
@@ -598,6 +608,7 @@ EXPORT_SYMBOL(i2c_register_driver);
/**
* i2c_del_driver - unregister I2C driver
* @driver: the driver being unregistered
+ * Context: can sleep
*/
void i2c_del_driver(struct i2c_driver *driver)
{
@@ -1331,10 +1342,14 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
EXPORT_SYMBOL(i2c_smbus_write_block_data);
/* Returns the number of read bytes */
-s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command, u8 *values)
+s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
+ u8 length, u8 *values)
{
union i2c_smbus_data data;
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+ data.block[0] = length;
if (i2c_smbus_xfer(client->adapter,client->addr,client->flags,
I2C_SMBUS_READ,command,
I2C_SMBUS_I2C_BLOCK_DATA,&data))
@@ -1455,7 +1470,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
if (read_write == I2C_SMBUS_READ) {
- msg[1].len = I2C_SMBUS_BLOCK_MAX;
+ msg[1].len = data->block[0];
} else {
msg[0].len = data->block[0] + 1;
if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
@@ -1511,9 +1526,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter * adapter, u16 addr,
data->word = msgbuf1[0] | (msgbuf1[1] << 8);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- /* fixed at 32 for now */
- data->block[0] = I2C_SMBUS_BLOCK_MAX;
- for (i = 0; i < I2C_SMBUS_BLOCK_MAX; i++)
+ for (i = 0; i < data->block[0]; i++)
data->block[i+1] = msgbuf1[i];
break;
case I2C_SMBUS_BLOCK_DATA:
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index e7a709710592..64eee9551b22 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -283,6 +283,7 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
(data_arg.size != I2C_SMBUS_WORD_DATA) &&
(data_arg.size != I2C_SMBUS_PROC_CALL) &&
(data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
+ (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
(data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
(data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
dev_dbg(&client->adapter->dev,
@@ -329,10 +330,18 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
(data_arg.read_write == I2C_SMBUS_WRITE)) {
if (copy_from_user(&temp, data_arg.data, datasize))
return -EFAULT;
}
+ if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
+ /* Convert old I2C block commands to the new
+ convention. This preserves binary compatibility. */
+ data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
+ if (data_arg.read_write == I2C_SMBUS_READ)
+ temp.block[0] = I2C_SMBUS_BLOCK_MAX;
+ }
res = i2c_smbus_xfer(client->adapter,client->addr,client->flags,
data_arg.read_write,
data_arg.command,data_arg.size,&temp);
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 444a0b84f5bd..c89b5f4b2d04 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -18,10 +18,10 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include <asm/ecard.h>
-#include <asm/io.h>
#define ICS_IDENT_OFFSET 0x2280
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 886091bc7db0..fbfea46a34f2 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -414,12 +414,6 @@ cris_ide_reset(unsigned val)
#ifdef CONFIG_ETRAX_IDE_G27_RESET
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, val);
#endif
-#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
- REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, val);
-#endif
-#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
- REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, val);
-#endif
#ifdef CONFIG_ETRAX_IDE_PB7_RESET
port_pb_dir_shadow = port_pb_dir_shadow |
IO_STATE(R_PORT_PB_DIR, dir7, output);
@@ -690,6 +684,8 @@ static void tune_cris_ide(ide_drive_t *drive, u8 pio)
{
int setup, strobe, hold;
+ pio = ide_get_best_pio_mode(drive, pio, 4);
+
switch(pio)
{
case 0:
@@ -820,6 +816,7 @@ init_e100_ide (void)
hwif->dma_host_on = &cris_dma_on;
hwif->dma_off_quietly = &cris_dma_off;
hwif->cbl = ATA_CBL_PATA40;
+ hwif->pio_mask = ATA_PIO4,
hwif->ultra_mask = cris_ultra_mask;
hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
hwif->autodma = 1;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f429be88c4f9..ae8e1a64b8ad 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -99,6 +99,8 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
+#include <scsi/scsi_ioctl.h>
+
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
@@ -1258,19 +1260,25 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
-static int
+static void
idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy, idefloppy_pc_t *pc, struct request *rq)
{
- /*
- * just support eject for now, it would not be hard to make the
- * REQ_BLOCK_PC support fully-featured
- */
- if (rq->cmd[0] != IDEFLOPPY_START_STOP_CMD)
- return 1;
-
idefloppy_init_pc(pc);
+ pc->callback = &idefloppy_rw_callback;
memcpy(pc->c, rq->cmd, sizeof(pc->c));
- return 0;
+ pc->rq = rq;
+ pc->b_count = rq->data_len;
+ if (rq->data_len && rq_data_dir(rq) == WRITE)
+ set_bit(PC_WRITING, &pc->flags);
+ pc->buffer = rq->data;
+ if (rq->bio)
+ set_bit(PC_DMA_RECOMMENDED, &pc->flags);
+
+ /*
+ * possibly problematic, doesn't look like ide-floppy correctly
+ * handled scattered requests if dma fails...
+ */
+ pc->request_transfer = pc->buffer_size = rq->data_len;
}
/*
@@ -1317,10 +1325,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
pc = (idefloppy_pc_t *) rq->buffer;
} else if (blk_pc_request(rq)) {
pc = idefloppy_next_pc_storage(drive);
- if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
- idefloppy_do_end_request(drive, 0, 0);
- return ide_stopped;
- }
+ idefloppy_blockpc_cmd(floppy, pc, rq);
} else {
blk_dump_rq_flags(rq,
"ide-floppy: unsupported command in queue");
@@ -2096,7 +2101,21 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
return idefloppy_get_format_progress(drive, argp);
}
- return generic_ide_ioctl(drive, file, bdev, cmd, arg);
+
+ /*
+ * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
+ * and CDROM_SEND_PACKET (legacy) ioctls
+ */
+ if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+ err = scsi_cmd_ioctl(file, bdev->bd_disk->queue,
+ bdev->bd_disk, cmd, argp);
+ else
+ err = -ENOTTY;
+
+ if (err == -ENOTTY)
+ err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
+
+ return err;
}
static int idefloppy_media_changed(struct gendisk *disk)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index c5b5011da56e..484c50e71446 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -55,7 +55,7 @@
#include <asm/bitops.h>
static int __ide_end_request(ide_drive_t *drive, struct request *rq,
- int uptodate, int nr_sectors)
+ int uptodate, unsigned int nr_bytes)
{
int ret = 1;
@@ -64,7 +64,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
* complete the whole request right now
*/
if (blk_noretry_request(rq) && end_io_error(uptodate))
- nr_sectors = rq->hard_nr_sectors;
+ nr_bytes = rq->hard_nr_sectors << 9;
if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
rq->errors = -EIO;
@@ -78,7 +78,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
HWGROUP(drive)->hwif->ide_dma_on(drive);
}
- if (!end_that_request_first(rq, uptodate, nr_sectors)) {
+ if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
add_disk_randomness(rq->rq_disk);
if (!list_empty(&rq->queuelist))
blkdev_dequeue_request(rq);
@@ -103,6 +103,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{
+ unsigned int nr_bytes = nr_sectors << 9;
struct request *rq;
unsigned long flags;
int ret = 1;
@@ -114,10 +115,14 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
- if (!nr_sectors)
- nr_sectors = rq->hard_cur_sectors;
+ if (!nr_bytes) {
+ if (blk_pc_request(rq))
+ nr_bytes = rq->data_len;
+ else
+ nr_bytes = rq->hard_cur_sectors << 9;
+ }
- ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
+ ret = __ide_end_request(drive, rq, uptodate, nr_bytes);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
@@ -219,11 +224,12 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
- if ((drive->id->capability & 1) == 0)
- break;
if (drive->hwif->ide_dma_check == NULL)
break;
drive->hwif->dma_off_quietly(drive);
+ /*
+ * TODO: respect ->using_dma setting
+ */
ide_set_dma(drive);
break;
}
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 074bb32a4a40..92a6c7bcf527 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -249,12 +249,34 @@ static int ide_scan_pio_blacklist (char *model)
return -1;
}
+unsigned int ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
+{
+ struct hd_driveid *id = drive->id;
+ int cycle_time = 0;
+
+ if (id->field_valid & 2) {
+ if (id->capability & 8)
+ cycle_time = id->eide_pio_iordy;
+ else
+ cycle_time = id->eide_pio;
+ }
+
+ /* conservative "downgrade" for all pre-ATA2 drives */
+ if (pio < 3) {
+ if (cycle_time && cycle_time < ide_pio_timings[pio].cycle_time)
+ cycle_time = 0; /* use standard timing */
+ }
+
+ return cycle_time ? cycle_time : ide_pio_timings[pio].cycle_time;
+}
+
+EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
+
/**
* ide_get_best_pio_mode - get PIO mode from drive
* @drive: drive to consider
* @mode_wanted: preferred mode
* @max_mode: highest allowed mode
- * @d: PIO data
*
* This routine returns the recommended PIO settings for a given drive,
* based on the drive->id information and the ide_pio_blacklist[].
@@ -263,22 +285,18 @@ static int ide_scan_pio_blacklist (char *model)
* This is used by most chipset support modules when "auto-tuning".
*/
-u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_pio_data_t *d)
+u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
{
int pio_mode;
- int cycle_time = 0;
- int use_iordy = 0;
struct hd_driveid* id = drive->id;
int overridden = 0;
- if (mode_wanted != 255) {
- pio_mode = mode_wanted;
- use_iordy = (pio_mode > 2);
- } else if (!drive->id) {
- pio_mode = 0;
- } else if ((pio_mode = ide_scan_pio_blacklist(id->model)) != -1) {
- overridden = 1;
- use_iordy = (pio_mode > 2);
+ if (mode_wanted != 255)
+ return min_t(u8, mode_wanted, max_mode);
+
+ if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0 &&
+ (pio_mode = ide_scan_pio_blacklist(id->model)) != -1) {
+ printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
} else {
pio_mode = id->tPIO;
if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
@@ -286,9 +304,7 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_p
overridden = 1;
}
if (id->field_valid & 2) { /* drive implements ATA2? */
- if (id->capability & 8) { /* drive supports use_iordy? */
- use_iordy = 1;
- cycle_time = id->eide_pio_iordy;
+ if (id->capability & 8) { /* IORDY supported? */
if (id->eide_pio_modes & 7) {
overridden = 0;
if (id->eide_pio_modes & 4)
@@ -298,31 +314,27 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode, ide_p
else
pio_mode = 3;
}
- } else {
- cycle_time = id->eide_pio;
}
}
+ if (overridden)
+ printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
+ drive->name);
+
/*
* Conservative "downgrade" for all pre-ATA2 drives
*/
- if (pio_mode && pio_mode < 4) {
+ if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_DOWNGRADE) == 0 &&
+ pio_mode && pio_mode < 4) {
pio_mode--;
- overridden = 1;
- if (cycle_time && cycle_time < ide_pio_timings[pio_mode].cycle_time)
- cycle_time = 0; /* use standard timing */
+ printk(KERN_INFO "%s: applying conservative "
+ "PIO \"downgrade\"\n", drive->name);
}
}
- if (pio_mode > max_mode) {
+
+ if (pio_mode > max_mode)
pio_mode = max_mode;
- cycle_time = 0;
- }
- if (d) {
- d->pio_mode = pio_mode;
- d->cycle_time = cycle_time ? cycle_time : ide_pio_timings[pio_mode].cycle_time;
- d->use_iordy = use_iordy;
- d->overridden = overridden;
- }
+
return pio_mode;
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index cc5801399467..5a4c5ea12f89 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
hwgroup->hwif->next = hwif;
spin_unlock_irq(&ide_lock);
} else {
- hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL,
+ hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
+ GFP_KERNEL | __GFP_ZERO,
hwif_to_node(hwif->drives[0].hwif));
if (!hwgroup)
goto out_up;
hwif->hwgroup = hwgroup;
- memset(hwgroup, 0, sizeof(ide_hwgroup_t));
hwgroup->hwif = hwif->next = hwif;
hwgroup->rq = NULL;
hwgroup->handler = NULL;
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h
index e6cb8593b5ba..daffbb9797e1 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timing.h
@@ -106,23 +106,6 @@ static struct ide_timing ide_timing[] = {
#define XFER_EPIO 0x01
#define XFER_PIO 0x00
-static short ide_find_best_pio_mode(ide_drive_t *drive)
-{
- struct hd_driveid *id = drive->id;
- short best = 0;
-
- if (id->field_valid & 2) { /* EIDE PIO modes */
-
- if ((best = (drive->id->eide_pio_modes & 4) ? XFER_PIO_5 :
- (drive->id->eide_pio_modes & 2) ? XFER_PIO_4 :
- (drive->id->eide_pio_modes & 1) ? XFER_PIO_3 : 0)) return best;
- }
-
- return (drive->id->tPIO == 2) ? XFER_PIO_2 :
- (drive->id->tPIO == 1) ? XFER_PIO_1 :
- (drive->id->tPIO == 0) ? XFER_PIO_0 : XFER_PIO_SLOW;
-}
-
static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT)
{
q->setup = EZ(t->setup * 1000, T);
@@ -212,7 +195,8 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
*/
if ((speed & XFER_MODE) != XFER_PIO) {
- ide_timing_compute(drive, ide_find_best_pio_mode(drive), &p, T, UT);
+ u8 pio = ide_get_best_pio_mode(drive, 255, 5);
+ ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT);
ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index c948a5c17a5d..5e88a060df06 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -16,10 +16,6 @@
* (usually 14 & 15).
* There can be up to two drives per interface, as per the ATA-2 spec.
*
- * Primary: ide0, port 0x1f0; major=3; hda is minor=0; hdb is minor=64
- * Secondary: ide1, port 0x170; major=22; hdc is minor=0; hdd is minor=64
- * Tertiary: ide2, port 0x???; major=33; hde is minor=0; hdf is minor=64
- * Quaternary: ide3, port 0x???; major=34; hdg is minor=0; hdh is minor=64
* ...
*
* From hd.c:
@@ -47,80 +43,6 @@
* This was a rewrite of just about everything from hd.c, though some original
* code is still sprinkled about. Think of it as a major evolution, with
* inspiration from lots of linux users, esp. hamish@zot.apana.org.au
- *
- * Version 1.0 ALPHA initial code, primary i/f working okay
- * Version 1.3 BETA dual i/f on shared irq tested & working!
- * Version 1.4 BETA added auto probing for irq(s)
- * Version 1.5 BETA added ALPHA (untested) support for IDE cd-roms,
- * ...
- * Version 5.50 allow values as small as 20 for idebus=
- * Version 5.51 force non io_32bit in drive_cmd_intr()
- * change delay_10ms() to delay_50ms() to fix problems
- * Version 5.52 fix incorrect invalidation of removable devices
- * add "hdx=slow" command line option
- * Version 5.60 start to modularize the driver; the disk and ATAPI
- * drivers can be compiled as loadable modules.
- * move IDE probe code to ide-probe.c
- * move IDE disk code to ide-disk.c
- * add support for generic IDE device subdrivers
- * add m68k code from Geert Uytterhoeven
- * probe all interfaces by default
- * add ioctl to (re)probe an interface
- * Version 6.00 use per device request queues
- * attempt to optimize shared hwgroup performance
- * add ioctl to manually adjust bandwidth algorithms
- * add kerneld support for the probe module
- * fix bug in ide_error()
- * fix bug in the first ide_get_lock() call for Atari
- * don't flush leftover data for ATAPI devices
- * Version 6.01 clear hwgroup->active while the hwgroup sleeps
- * support HDIO_GETGEO for floppies
- * Version 6.02 fix ide_ack_intr() call
- * check partition table on floppies
- * Version 6.03 handle bad status bit sequencing in ide_wait_stat()
- * Version 6.10 deleted old entries from this list of updates
- * replaced triton.c with ide-dma.c generic PCI DMA
- * added support for BIOS-enabled UltraDMA
- * rename all "promise" things to "pdc4030"
- * fix EZ-DRIVE handling on small disks
- * Version 6.11 fix probe error in ide_scan_devices()
- * fix ancient "jiffies" polling bugs
- * mask all hwgroup interrupts on each irq entry
- * Version 6.12 integrate ioctl and proc interfaces
- * fix parsing of "idex=" command line parameter
- * Version 6.13 add support for ide4/ide5 courtesy rjones@orchestream.com
- * Version 6.14 fixed IRQ sharing among PCI devices
- * Version 6.15 added SMP awareness to IDE drivers
- * Version 6.16 fixed various bugs; even more SMP friendly
- * Version 6.17 fix for newest EZ-Drive problem
- * Version 6.18 default unpartitioned-disk translation now "BIOS LBA"
- * Version 6.19 Re-design for a UNIFORM driver for all platforms,
- * model based on suggestions from Russell King and
- * Geert Uytterhoeven
- * Promise DC4030VL now supported.
- * add support for ide6/ide7
- * delay_50ms() changed to ide_delay_50ms() and exported.
- * Version 6.20 Added/Fixed Generic ATA-66 support and hwif detection.
- * Added hdx=flash to allow for second flash disk
- * detection w/o the hang loop.
- * Added support for ide8/ide9
- * Added idex=ata66 for the quirky chipsets that are
- * ATA-66 compliant, but have yet to determine a method
- * of verification of the 80c cable presence.
- * Specifically Promise's PDC20262 chipset.
- * Version 6.21 Fixing/Fixed SMP spinlock issue with insight from an old
- * hat that clarified original low level driver design.
- * Version 6.30 Added SMP support; fixed multmode issues. -ml
- * Version 6.31 Debug Share INTR's and request queue streaming
- * Native ATA-100 support
- * Prep for Cascades Project
- * Version 7.00alpha First named revision of ide rearrange
- *
- * Some additional driver compile-time options are in ./include/linux/ide.h
- *
- * To do, in likely order of completion:
- * - modify kernel to obtain BIOS geometry for drives on 2nd/3rd/4th i/f
- *
*/
#define REVISION "Revision: 7.00alpha2"
@@ -455,6 +377,10 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
hwif->straight8 = tmp_hwif->straight8;
hwif->bus_state = tmp_hwif->bus_state;
+ hwif->host_flags = tmp_hwif->host_flags;
+
+ hwif->pio_mask = tmp_hwif->pio_mask;
+
hwif->atapi_dma = tmp_hwif->atapi_dma;
hwif->ultra_mask = tmp_hwif->ultra_mask;
hwif->mwdma_mask = tmp_hwif->mwdma_mask;
@@ -1171,10 +1097,6 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
return 0;
}
- case CDROMEJECT:
- case CDROMCLOSETRAY:
- return scsi_cmd_ioctl(file, bdev->bd_disk, cmd, p);
-
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index df17ed68c0bc..9b9c4761cb7d 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -115,13 +115,12 @@ static void ali14xx_tune_drive (ide_drive_t *drive, u8 pio)
int time1, time2;
u8 param1, param2, param3, param4;
unsigned long flags;
- ide_pio_data_t d;
int bus_speed = system_bus_clock();
- pio = ide_get_best_pio_mode(drive, pio, ALI_MAX_PIO, &d);
+ pio = ide_get_best_pio_mode(drive, pio, ALI_MAX_PIO);
/* calculate timing, according to PIO mode */
- time1 = d.cycle_time;
+ time1 = ide_pio_cycle_time(drive, pio);
time2 = ide_pio_timings[pio].active_time;
param3 = param1 = (time2 * bus_speed + 999) / 1000;
param4 = param2 = (time1 * bus_speed + 999) / 1000 - param1;
@@ -212,10 +211,12 @@ static int __init ali14xx_probe(void)
mate = &ide_hwifs[1];
hwif->chipset = ide_ali14xx;
+ hwif->pio_mask = ATA_PIO4;
hwif->tuneproc = &ali14xx_tune_drive;
hwif->mate = mate;
mate->chipset = ide_ali14xx;
+ mate->pio_mask = ATA_PIO4;
mate->tuneproc = &ali14xx_tune_drive;
mate->mate = hwif;
mate->channel = 1;
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 36a3f0ac6162..6c01d951d074 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -71,7 +71,7 @@ static void tune_dtc2278 (ide_drive_t *drive, u8 pio)
{
unsigned long flags;
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
if (pio >= 3) {
spin_lock_irqsave(&ide_lock, flags);
@@ -123,6 +123,7 @@ static int __init dtc2278_probe(void)
hwif->serialized = 1;
hwif->chipset = ide_dtc2278;
+ hwif->pio_mask = ATA_PIO4;
hwif->tuneproc = &tune_dtc2278;
hwif->drives[0].no_unmask = 1;
hwif->drives[1].no_unmask = 1;
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index e1e9d9d6893f..f0829b83e970 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -8,6 +8,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -54,6 +55,7 @@ static int falconide_offsets[IDE_NR_PORTS] __initdata = {
*/
int falconide_intr_lock;
+EXPORT_SYMBOL(falconide_intr_lock);
/*
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 7f4c0a5050a1..8f2db8dd35f7 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -719,74 +719,25 @@ static int __init hd_init(void)
device_timer.function = hd_times_out;
blk_queue_hardsect_size(hd_queue, 512);
-#ifdef __i386__
if (!NR_HD) {
- extern struct drive_info drive_info;
- unsigned char *BIOS = (unsigned char *) &drive_info;
- unsigned long flags;
- int cmos_disks;
-
- for (drive=0 ; drive<2 ; drive++) {
- hd_info[drive].cyl = *(unsigned short *) BIOS;
- hd_info[drive].head = *(2+BIOS);
- hd_info[drive].wpcom = *(unsigned short *) (5+BIOS);
- hd_info[drive].ctl = *(8+BIOS);
- hd_info[drive].lzone = *(unsigned short *) (12+BIOS);
- hd_info[drive].sect = *(14+BIOS);
-#ifdef does_not_work_for_everybody_with_scsi_but_helps_ibm_vp
- if (hd_info[drive].cyl && NR_HD == drive)
- NR_HD++;
-#endif
- BIOS += 16;
- }
-
- /*
- We query CMOS about hard disks : it could be that
- we have a SCSI/ESDI/etc controller that is BIOS
- compatible with ST-506, and thus showing up in our
- BIOS table, but not register compatible, and therefore
- not present in CMOS.
-
- Furthermore, we will assume that our ST-506 drives
- <if any> are the primary drives in the system, and
- the ones reflected as drive 1 or 2.
-
- The first drive is stored in the high nibble of CMOS
- byte 0x12, the second in the low nibble. This will be
- either a 4 bit drive type or 0xf indicating use byte 0x19
- for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS.
-
- Needless to say, a non-zero value means we have
- an AT controller hard disk for that drive.
-
- Currently the rtc_lock is a bit academic since this
- driver is non-modular, but someday... ? Paul G.
- */
-
- spin_lock_irqsave(&rtc_lock, flags);
- cmos_disks = CMOS_READ(0x12);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- if (cmos_disks & 0xf0) {
- if (cmos_disks & 0x0f)
- NR_HD = 2;
- else
- NR_HD = 1;
- }
- }
-#endif /* __i386__ */
-#ifdef __arm__
- if (!NR_HD) {
- /* We don't know anything about the drive. This means
+ /*
+ * We don't know anything about the drive. This means
* that you *MUST* specify the drive parameters to the
* kernel yourself.
+ *
+ * If we were on an i386, we used to read this info from
+ * the BIOS or CMOS. This doesn't work all that well,
+ * since this assumes that this is a primary or secondary
+ * drive, and if we're using this legacy driver, it's
+ * probably an auxilliary controller added to recover
+ * legacy data off an ST-506 drive. Either way, it's
+ * definitely safest to have the user explicitly specify
+ * the information.
*/
printk("hd: no drives specified - use hd=cyl,head,sectors"
" on kernel command line\n");
- }
-#endif
- if (!NR_HD)
goto out;
+ }
for (drive=0 ; drive < NR_HD ; drive++) {
struct gendisk *disk = alloc_disk(64);
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index c8f353b1296f..bfaa2025173b 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -203,19 +203,21 @@ static u8 ht_pio2timings(ide_drive_t *drive, u8 pio)
{
int active_time, recovery_time;
int active_cycles, recovery_cycles;
- ide_pio_data_t d;
int bus_speed = system_bus_clock();
if (pio) {
- pio = ide_get_best_pio_mode(drive, pio, 5, &d);
-
+ unsigned int cycle_time;
+
+ pio = ide_get_best_pio_mode(drive, pio, 5);
+ cycle_time = ide_pio_cycle_time(drive, pio);
+
/*
* Just like opti621.c we try to calculate the
* actual cycle time for recovery and activity
* according system bus speed.
*/
active_time = ide_pio_timings[pio].active_time;
- recovery_time = d.cycle_time
+ recovery_time = cycle_time
- active_time
- ide_pio_timings[pio].setup_time;
/*
@@ -331,12 +333,14 @@ int __init ht6560b_init(void)
hwif->chipset = ide_ht6560b;
hwif->selectproc = &ht6560b_selectproc;
+ hwif->pio_mask = ATA_PIO5;
hwif->tuneproc = &tune_ht6560b;
hwif->serialized = 1; /* is this needed? */
hwif->mate = mate;
mate->chipset = ide_ht6560b;
mate->selectproc = &ht6560b_selectproc;
+ mate->pio_mask = ATA_PIO5;
mate->tuneproc = &tune_ht6560b;
mate->serialized = 1; /* is this needed? */
mate->mate = hwif;
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 2f3977f195b7..4cdb519f9832 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -386,6 +386,7 @@ static struct pcmcia_device_id ide_ids[] = {
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 7783745dd167..8b87a424094a 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -252,38 +252,38 @@ static void qd6500_tune_drive (ide_drive_t *drive, u8 pio)
static void qd6580_tune_drive (ide_drive_t *drive, u8 pio)
{
- ide_pio_data_t d;
int base = HWIF(drive)->select_data;
+ unsigned int cycle_time;
int active_time = 175;
int recovery_time = 415; /* worst case values from the dos driver */
if (drive->id && !qd_find_disk_type(drive, &active_time, &recovery_time)) {
- pio = ide_get_best_pio_mode(drive, pio, 4, &d);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
+ cycle_time = ide_pio_cycle_time(drive, pio);
switch (pio) {
case 0: break;
case 3:
- if (d.cycle_time >= 110) {
+ if (cycle_time >= 110) {
active_time = 86;
- recovery_time = d.cycle_time - 102;
+ recovery_time = cycle_time - 102;
} else
printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
break;
case 4:
- if (d.cycle_time >= 69) {
+ if (cycle_time >= 69) {
active_time = 70;
- recovery_time = d.cycle_time - 61;
+ recovery_time = cycle_time - 61;
} else
printk(KERN_WARNING "%s: Strange recovery time !\n",drive->name);
break;
default:
- if (d.cycle_time >= 180) {
+ if (cycle_time >= 180) {
active_time = 110;
- recovery_time = d.cycle_time - 120;
+ recovery_time = cycle_time - 120;
} else {
active_time = ide_pio_timings[pio].active_time;
- recovery_time = d.cycle_time
- -active_time;
+ recovery_time = cycle_time - active_time;
}
}
printk(KERN_INFO "%s: PIO mode%d\n", drive->name,pio);
@@ -346,6 +346,7 @@ static void __init qd_setup(ide_hwif_t *hwif, int base, int config,
hwif->drives[1].drive_data = data1;
hwif->drives[0].io_32bit =
hwif->drives[1].io_32bit = 1;
+ hwif->pio_mask = ATA_PIO4;
hwif->tuneproc = tuneproc;
probe_hwif_init(hwif);
}
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index ddc403a0bd82..d2862e638bc5 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -110,7 +110,7 @@ static void tune_umc (ide_drive_t *drive, u8 pio)
unsigned long flags;
ide_hwgroup_t *hwgroup = ide_hwifs[HWIF(drive)->index^1].hwgroup;
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]);
spin_lock_irqsave(&ide_lock, flags);
@@ -149,10 +149,12 @@ static int __init umc8672_probe(void)
mate = &ide_hwifs[1];
hwif->chipset = ide_umc8672;
+ hwif->pio_mask = ATA_PIO4;
hwif->tuneproc = &tune_umc;
hwif->mate = mate;
mate->chipset = ide_umc8672;
+ mate->pio_mask = ATA_PIO4;
mate->tuneproc = &tune_umc;
mate->mate = hwif;
mate->channel = 1;
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index 2e7013a2a7f6..2ba6a054b861 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -106,7 +106,7 @@ static void auide_tune_drive(ide_drive_t *drive, byte pio)
u8 speed;
/* get the best pio mode for the drive */
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
printk(KERN_INFO "%s: setting Au1XXX IDE to PIO mode%d\n",
drive->name, pio);
@@ -692,6 +692,8 @@ static int au_ide_probe(struct device *dev)
hwif->swdma_mask = 0x0;
#endif
+ hwif->pio_mask = ATA_PIO4;
+
hwif->noprobe = 0;
hwif->drives[0].unmask = 1;
hwif->drives[1].unmask = 1;
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 6e935d7c63fd..c2e29571b007 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -165,12 +165,11 @@ static int __devinit swarm_ide_init_module(void)
goto out;
}
- if (!(pldev = kmalloc(sizeof (*pldev), GFP_KERNEL))) {
+ if (!(pldev = kzalloc(sizeof (*pldev), GFP_KERNEL))) {
err = -ENOMEM;
goto out_unregister_driver;
}
- memset (pldev, 0, sizeof (*pldev));
pldev->name = swarm_ide_string;
pldev->id = 0;
pldev->dev.release = swarm_ide_platform_release;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index e5d09367627e..74432830abf7 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -142,7 +142,7 @@ static int aec6260_tune_chipset (ide_drive_t *drive, u8 xferspeed)
static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
(void) HWIF(drive)->speedproc(drive, pio + XFER_PIO_0);
}
@@ -174,12 +174,6 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch
{
int bus_speed = system_bus_clock();
- if (dev->resource[PCI_ROM_RESOURCE].start) {
- pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name,
- (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
- }
-
if (bus_speed <= 33)
pci_set_drvdata(dev, (void *) aec6xxx_33_base);
else
@@ -271,48 +265,48 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
.init_setup = init_setup_aec62xx,
.init_chipset = init_chipset_aec62xx,
.init_hwif = init_hwif_aec62xx,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x07, /* udma0-2 */
},{ /* 1 */
.name = "AEC6260",
.init_setup = init_setup_aec62xx,
.init_chipset = init_chipset_aec62xx,
.init_hwif = init_hwif_aec62xx,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x1f, /* udma0-4 */
},{ /* 2 */
.name = "AEC6260R",
.init_setup = init_setup_aec62xx,
.init_chipset = init_chipset_aec62xx,
.init_hwif = init_hwif_aec62xx,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = NEVER_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x1f, /* udma0-4 */
},{ /* 3 */
.name = "AEC6280",
.init_setup = init_setup_aec6x80,
.init_chipset = init_chipset_aec62xx,
.init_hwif = init_hwif_aec62xx,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x3f, /* udma0-5 */
},{ /* 4 */
.name = "AEC6280R",
.init_setup = init_setup_aec6x80,
.init_chipset = init_chipset_aec62xx,
.init_hwif = init_hwif_aec62xx,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x3f, /* udma0-5 */
}
};
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 8a6b27b3bcc3..5511c86733dc 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -295,7 +295,6 @@ static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
static u8 ali15x3_tune_pio (ide_drive_t *drive, u8 pio)
{
- ide_pio_data_t d;
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
int s_time, a_time, c_time;
@@ -307,7 +306,7 @@ static u8 ali15x3_tune_pio (ide_drive_t *drive, u8 pio)
u8 cd_dma_fifo = 0;
int unit = drive->select.b.unit & 1;
- pio = ide_get_best_pio_mode(drive, pio, 5, &d);
+ pio = ide_get_best_pio_mode(drive, pio, 5);
s_time = ide_pio_timings[pio].setup_time;
a_time = ide_pio_timings[pio].active_time;
if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8)
@@ -508,7 +507,7 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
u8 tmpbyte;
struct pci_dev *north = pci_get_slot(dev->bus, PCI_DEVFN(0,0));
- pci_read_config_byte(dev, PCI_REVISION_ID, &m5229_revision);
+ m5229_revision = dev->revision;
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
@@ -817,9 +816,9 @@ static ide_pci_device_t ali15x3_chipset __devinitdata = {
.init_chipset = init_chipset_ali15x3,
.init_hwif = init_hwif_ali15x3,
.init_dma = init_dma_ali15x3,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO5,
};
/**
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 84ed30cdb324..06c15a6a3e7d 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -1,5 +1,5 @@
/*
- * Version 2.20
+ * Version 2.21
*
* AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04
* IDE driver for Linux.
@@ -123,8 +123,7 @@ static int amd74xx_get_info(char *buffer, char **addr, off_t offset, int count)
amd_print("Driver Version: 2.13");
amd_print("South Bridge: %s", pci_name(bmide_dev));
- pci_read_config_byte(dev, PCI_REVISION_ID, &t);
- amd_print("Revision: IDE %#x", t);
+ amd_print("Revision: IDE %#x", dev->revision);
amd_print("Highest DMA rate: UDMA%s", amd_dma[fls(amd_config->udma_mask) - 1]);
amd_print("BM-DMA base: %#lx", amd_base);
@@ -273,10 +272,8 @@ static int amd_set_drive(ide_drive_t *drive, u8 speed)
static void amd74xx_tune_drive(ide_drive_t *drive, u8 pio)
{
- if (pio == 255) {
- amd_set_drive(drive, ide_find_best_pio_mode(drive));
- return;
- }
+ if (pio == 255)
+ pio = ide_get_best_pio_mode(drive, 255, 5);
amd_set_drive(drive, XFER_PIO_0 + min_t(byte, pio, 5));
}
@@ -285,12 +282,14 @@ static int amd74xx_ide_dma_check(ide_drive_t *drive)
{
u8 speed = ide_max_dma_mode(drive);
- if (speed == 0)
- speed = ide_find_best_pio_mode(drive);
+ if (speed == 0) {
+ amd74xx_tune_drive(drive, 255);
+ return -1;
+ }
amd_set_drive(drive, speed);
- if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
+ if (drive->autodma)
return 0;
return -1;
@@ -312,8 +311,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
*/
if (amd_config->flags & AMD_CHECK_SWDMA) {
- pci_read_config_byte(dev, PCI_REVISION_ID, &t);
- if (t <= 7)
+ if (dev->revision <= 7)
amd_config->flags |= AMD_BAD_SWDMA;
}
@@ -383,7 +381,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
pci_read_config_byte(dev, PCI_REVISION_ID, &t);
printk(KERN_INFO "%s: %s (rev %02x) UDMA%s controller\n",
- amd_chipset->name, pci_name(dev), t,
+ amd_chipset->name, pci_name(dev), dev->revision,
amd_dma[fls(amd_config->udma_mask) - 1]);
/*
@@ -450,10 +448,12 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
.name = name_str, \
.init_chipset = init_chipset_amd74xx, \
.init_hwif = init_hwif_amd74xx, \
- .channels = 2, \
.autodma = AUTODMA, \
.enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
.bootable = ON_BOARD, \
+ .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST \
+ | IDE_HFLAG_PIO_NO_DOWNGRADE, \
+ .pio_mask = ATA_PIO5, \
}
#define DECLARE_NV_DEV(name_str) \
@@ -461,10 +461,12 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
.name = name_str, \
.init_chipset = init_chipset_amd74xx, \
.init_hwif = init_hwif_amd74xx, \
- .channels = 2, \
.autodma = AUTODMA, \
.enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
.bootable = ON_BOARD, \
+ .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST \
+ | IDE_HFLAG_PIO_NO_DOWNGRADE, \
+ .pio_mask = ATA_PIO5, \
}
static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 2761510309b3..1725aa402d98 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -1,9 +1,8 @@
/*
- * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
+ * linux/drivers/ide/pci/atiixp.c Version 0.02 Jun 16 2007
*
* Copyright (C) 2003 ATI Inc. <hyu@ati.com>
- * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
- *
+ * Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz
*/
#include <linux/types.h>
@@ -123,14 +122,14 @@ static void atiixp_dma_host_off(ide_drive_t *drive)
}
/**
- * atiixp_tune_drive - tune a drive attached to a ATIIXP
+ * atiixp_tune_pio - tune a drive attached to a ATIIXP
* @drive: drive to tune
* @pio: desired PIO mode
*
* Set the interface PIO mode.
*/
-static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
+static void atiixp_tune_pio(ide_drive_t *drive, u8 pio)
{
struct pci_dev *dev = drive->hwif->pci_dev;
unsigned long flags;
@@ -154,6 +153,13 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
spin_unlock_irqrestore(&atiixp_lock, flags);
}
+static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
+{
+ pio = ide_get_best_pio_mode(drive, pio, 4);
+ atiixp_tune_pio(drive, pio);
+ (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
+}
+
/**
* atiixp_tune_chipset - tune a ATIIXP interface
* @drive: IDE drive to tune
@@ -175,6 +181,11 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
speed = ide_rate_filter(drive, xferspeed);
+ if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {
+ atiixp_tune_pio(drive, speed - XFER_PIO_0);
+ return ide_config_drive_speed(drive, speed);
+ }
+
spin_lock_irqsave(&atiixp_lock, flags);
save_mdma_mode[drive->dn] = 0;
@@ -201,7 +212,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
else
pio = speed - XFER_PIO_0;
- atiixp_tuneproc(drive, pio);
+ atiixp_tune_pio(drive, pio);
return ide_config_drive_speed(drive, speed);
}
@@ -216,18 +227,13 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
static int atiixp_dma_check(ide_drive_t *drive)
{
- u8 tspeed, speed;
-
drive->init_speed = 0;
if (ide_tune_dma(drive))
return 0;
- if (ide_use_fast_pio(drive)) {
- tspeed = ide_get_best_pio_mode(drive, 255, 5, NULL);
- speed = atiixp_dma_2_pio(XFER_PIO_0 + tspeed) + XFER_PIO_0;
- atiixp_speedproc(drive, speed);
- }
+ if (ide_use_fast_pio(drive))
+ atiixp_tuneproc(drive, 255);
return -1;
}
@@ -285,17 +291,18 @@ static ide_pci_device_t atiixp_pci_info[] __devinitdata = {
{ /* 0 */
.name = "ATIIXP",
.init_hwif = init_hwif_atiixp,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
},{ /* 1 */
.name = "SB600_PATA",
.init_hwif = init_hwif_atiixp,
- .channels = 1,
.autodma = AUTODMA,
.enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
+ .pio_mask = ATA_PIO4,
},
};
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index dc43f009acab..9689494efa24 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -633,9 +633,8 @@ static void cmd640_set_mode (unsigned int index, u8 pio_mode, unsigned int cycle
*/
static void cmd640_tune_drive (ide_drive_t *drive, u8 mode_wanted)
{
+ unsigned int index = 0, cycle_time;
u8 b;
- ide_pio_data_t d;
- unsigned int index = 0;
while (drive != cmd_drives[index]) {
if (++index > 3) {
@@ -662,16 +661,14 @@ static void cmd640_tune_drive (ide_drive_t *drive, u8 mode_wanted)
return;
}
- (void) ide_get_best_pio_mode (drive, mode_wanted, 5, &d);
- cmd640_set_mode (index, d.pio_mode, d.cycle_time);
+ mode_wanted = ide_get_best_pio_mode(drive, mode_wanted, 5);
+ cycle_time = ide_pio_cycle_time(drive, mode_wanted);
+ cmd640_set_mode(index, mode_wanted, cycle_time);
+
+ printk("%s: selected cmd640 PIO mode%d (%dns)",
+ drive->name, mode_wanted, cycle_time);
- printk ("%s: selected cmd640 PIO mode%d (%dns)%s",
- drive->name,
- d.pio_mode,
- d.cycle_time,
- d.overridden ? " (overriding vendor mode)" : "");
display_clocks(index);
- return;
}
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
@@ -769,6 +766,7 @@ int __init ide_probe_for_cmd640x (void)
cmd_hwif0->name, 'a' + cmd640_chip_version - 1, bus_type, cfr);
cmd_hwif0->chipset = ide_cmd640;
#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ cmd_hwif0->pio_mask = ATA_PIO5;
cmd_hwif0->tuneproc = &cmd640_tune_drive;
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
@@ -824,6 +822,7 @@ int __init ide_probe_for_cmd640x (void)
cmd_hwif1->mate = cmd_hwif0;
cmd_hwif1->channel = 1;
#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+ cmd_hwif1->pio_mask = ATA_PIO5;
cmd_hwif1->tuneproc = &cmd640_tune_drive;
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
}
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 8631b6c8aa15..19633c5aba15 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -88,7 +88,6 @@ static char * print_cmd64x_get_info (char *buf, struct pci_dev *dev, int index)
u8 reg72 = 0, reg73 = 0; /* primary */
u8 reg7a = 0, reg7b = 0; /* secondary */
u8 reg50 = 1, reg51 = 1, reg57 = 0, reg71 = 0; /* extra */
- u8 rev = 0;
p += sprintf(p, "\nController: %d\n", index);
p += sprintf(p, "PCI-%x Chipset.\n", dev->device);
@@ -103,9 +102,8 @@ static char * print_cmd64x_get_info (char *buf, struct pci_dev *dev, int index)
(void) pci_read_config_byte(dev, UDIDETCR1, &reg7b);
/* PCI0643/6 originally didn't have the primary channel enable bit */
- (void) pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
if ((dev->device == PCI_DEVICE_ID_CMD_643) ||
- (dev->device == PCI_DEVICE_ID_CMD_646 && rev < 3))
+ (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 3))
reg51 |= CNTRL_ENA_1ST;
p += sprintf(p, "---------------- Primary Channel "
@@ -223,17 +221,18 @@ static u8 cmd64x_tune_pio (ide_drive_t *drive, u8 mode_wanted)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
- ide_pio_data_t pio;
+ unsigned int cycle_time;
u8 pio_mode, setup_count, arttim = 0;
static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
- pio_mode = ide_get_best_pio_mode(drive, mode_wanted, 5, &pio);
- cmdprintk("%s: PIO mode wanted %d, selected %d (%d ns)%s\n",
- drive->name, mode_wanted, pio_mode, pio.cycle_time,
- pio.overridden ? " (overriding vendor mode)" : "");
+ pio_mode = ide_get_best_pio_mode(drive, mode_wanted, 5);
+ cycle_time = ide_pio_cycle_time(drive, pio_mode);
- program_cycle_times(drive, pio.cycle_time,
+ cmdprintk("%s: PIO mode wanted %d, selected %d (%d ns)\n",
+ drive->name, mode_wanted, pio_mode, cycle_time);
+
+ program_cycle_times(drive, cycle_time,
ide_pio_timings[pio_mode].active_time);
setup_count = quantize_timing(ide_pio_timings[pio_mode].setup_time,
@@ -604,14 +603,11 @@ static int __devinit init_setup_cmd64x(struct pci_dev *dev, ide_pci_device_t *d)
static int __devinit init_setup_cmd646(struct pci_dev *dev, ide_pci_device_t *d)
{
- u8 rev = 0;
-
/*
* The original PCI0646 didn't have the primary channel enable bit,
* it appeared starting with PCI0646U (i.e. revision ID 3).
*/
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
- if (rev < 3)
+ if (dev->revision < 3)
d->enablebits[0].reg = 0;
return ide_setup_pci_device(dev, d);
@@ -623,40 +619,40 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
.init_setup = init_setup_cmd64x,
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO5,
.udma_mask = 0x00, /* no udma */
},{ /* 1 */
.name = "CMD646",
.init_setup = init_setup_cmd646,
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO5,
.udma_mask = 0x07, /* udma0-2 */
},{ /* 2 */
.name = "CMD648",
.init_setup = init_setup_cmd64x,
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO5,
.udma_mask = 0x1f, /* udma0-4 */
},{ /* 3 */
.name = "CMD649",
.init_setup = init_setup_cmd64x,
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO5,
.udma_mask = 0x3f, /* udma0-5 */
}
};
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 3b88a3a56116..bccedf9b8b28 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -126,7 +126,7 @@ static int cs5520_tune_chipset(ide_drive_t *drive, u8 xferspeed)
static void cs5520_tune_drive(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
cs5520_tune_chipset(drive, (XFER_PIO_0 + pio));
}
@@ -194,10 +194,10 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
.name = name_str, \
.init_setup_dma = cs5520_init_setup_dma, \
.init_hwif = init_hwif_cs5520, \
- .channels = 2, \
.autodma = AUTODMA, \
.bootable = ON_BOARD, \
- .flags = IDEPCI_FLAG_ISA_PORTS, \
+ .host_flags = IDE_HFLAG_ISA_PORTS, \
+ .pio_mask = ATA_PIO4, \
}
static ide_pci_device_t cyrix_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 1eec1f308d16..acaf71fd4c09 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -82,7 +82,7 @@ static void cs5530_tunepio(ide_drive_t *drive, u8 pio)
static void cs5530_tuneproc (ide_drive_t *drive, u8 pio) /* pio=255 means "autotune" */
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
if (cs5530_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0)
cs5530_tunepio(drive, pio);
@@ -236,7 +236,7 @@ static unsigned int __devinit init_chipset_cs5530 (struct pci_dev *dev, const ch
*/
pci_set_master(cs5530_0);
- pci_set_mwi(cs5530_0);
+ pci_try_set_mwi(cs5530_0);
/*
* Set PCI CacheLineSize to 16-bytes:
@@ -341,9 +341,9 @@ static ide_pci_device_t cs5530_chipset __devinitdata = {
.name = "CS5530",
.init_chipset = init_chipset_cs5530,
.init_hwif = init_hwif_cs5530,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
};
static int __devinit cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 10f61f38243c..ce44e38390aa 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -89,7 +89,7 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
pioa = speed - XFER_PIO_0;
piob = ide_get_best_pio_mode(&(drive->hwif->drives[!unit]),
- 255, 4, NULL);
+ 255, 4);
cmd = pioa < piob ? pioa : piob;
/* Write the speed of the current drive */
@@ -159,7 +159,7 @@ static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed)
/* cs5535 max pio is pio 4, best_pio will check the blacklist.
i think we don't need to rate_filter the incoming xferspeed
since we know we're only going to choose pio */
- xferspeed = ide_get_best_pio_mode(drive, xferspeed, 4, NULL);
+ xferspeed = ide_get_best_pio_mode(drive, xferspeed, 4);
ide_config_drive_speed(drive, modes[xferspeed]);
cs5535_set_speed(drive, xferspeed);
}
@@ -174,7 +174,7 @@ static int cs5535_dma_check(ide_drive_t *drive)
return 0;
if (ide_use_fast_pio(drive)) {
- speed = ide_get_best_pio_mode(drive, 255, 4, NULL);
+ speed = ide_get_best_pio_mode(drive, 255, 4);
cs5535_set_drive(drive, speed);
}
@@ -228,9 +228,10 @@ static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
static ide_pci_device_t cs5535_chipset __devinitdata = {
.name = "CS5535",
.init_hwif = init_hwif_cs5535,
- .channels = 1,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
+ .pio_mask = ATA_PIO4,
};
static int __devinit cs5535_init_one(struct pci_dev *dev,
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 103b9db97853..daa36fcbc8ef 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -330,7 +330,7 @@ static void cy82c693_tune_drive (ide_drive_t *drive, u8 pio)
#endif /* CY82C693_DEBUG_LOGS */
/* first let's calc the pio modes */
- pio = ide_get_best_pio_mode(drive, pio, CY82C693_MAX_PIO, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, CY82C693_MAX_PIO);
#if CY82C693_DEBUG_INFO
printk (KERN_INFO "%s: Selected PIO mode %d\n", drive->name, pio);
@@ -483,9 +483,10 @@ static ide_pci_device_t cy82c693_chipset __devinitdata = {
.init_chipset = init_chipset_cy82c693,
.init_iops = init_iops_cy82c693,
.init_hwif = init_hwif_cy82c693,
- .channels = 1,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
+ .pio_mask = ATA_PIO4,
};
static int __devinit cy82c693_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 0d51a11e81da..48caa468b762 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -95,92 +95,77 @@ static ide_pci_device_t generic_chipsets[] __devinitdata = {
{ /* 0 */
.name = "Unknown",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
},{ /* 1 */
.name = "NS87410",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x43,0x08,0x08}, {0x47,0x08,0x08}},
.bootable = ON_BOARD,
},{ /* 2 */
.name = "SAMURAI",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
},{ /* 3 */
.name = "HT6565",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
},{ /* 4 */
.name = "UM8673F",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
},{ /* 5 */
.name = "UM8886A",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
},{ /* 6 */
.name = "UM8886BF",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
},{ /* 7 */
.name = "HINT_IDE",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
},{ /* 8 */
.name = "VIA_IDE",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 9 */
.name = "OPTI621V",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 10 */
.name = "VIA8237SATA",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
},{ /* 11 */
.name = "Piccolo0102",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 12 */
.name = "Piccolo0103",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 13 */
.name = "Piccolo0105",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
},{ /* 14 */
.name = "Revolution",
.init_hwif = init_hwif_generic,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
}
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 2c24c3de8846..19778c5fe711 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -80,7 +80,7 @@ static int hpt34x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
static void hpt34x_tune_drive (ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 5, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 5);
(void) hpt34x_tune_chipset(drive, (XFER_PIO_0 + pio));
}
@@ -120,17 +120,10 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
pci_write_config_byte(dev, HPT34X_PCI_INIT_REG, 0x00);
pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (cmd & PCI_COMMAND_MEMORY) {
- if (pci_resource_start(dev, PCI_ROM_RESOURCE)) {
- pci_write_config_dword(dev, PCI_ROM_ADDRESS,
- dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "HPT345: ROM enabled at 0x%08lx\n",
- (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
- }
+ if (cmd & PCI_COMMAND_MEMORY)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
- } else {
+ else
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
- }
/*
* Since 20-23 can be assigned and are R/W, we correct them.
@@ -182,10 +175,10 @@ static ide_pci_device_t hpt34x_chipset __devinitdata = {
.name = "HPT34X",
.init_chipset = init_chipset_hpt34x,
.init_hwif = init_hwif_hpt34x,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = NEVER_BOARD,
- .extra = 16
+ .extra = 16,
+ .pio_mask = ATA_PIO5,
};
static int __devinit hpt34x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 4b6bae8eee82..2cd74c345a6c 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -652,7 +652,7 @@ static int hpt3xx_tune_chipset(ide_drive_t *drive, u8 speed)
static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
(void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio);
}
@@ -994,14 +994,6 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
*/
*info = *(struct hpt_info *)pci_get_drvdata(dev);
- /*
- * FIXME: Not portable. Also, why do we enable the ROM in the first place?
- * We don't seem to be using it.
- */
- if (dev->resource[PCI_ROM_RESOURCE].start)
- pci_write_config_dword(dev, PCI_ROM_ADDRESS,
- dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
-
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
@@ -1413,11 +1405,9 @@ static int __devinit init_setup_hpt372n(struct pci_dev *dev, ide_pci_device_t *d
static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
{
struct hpt_info *info;
- u8 rev = 0, mcr1 = 0;
-
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
+ u8 mcr1 = 0;
- if (rev > 1) {
+ if (dev->revision > 1) {
d->name = "HPT371N";
info = &hpt371n;
@@ -1442,11 +1432,8 @@ static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
static int __devinit init_setup_hpt372a(struct pci_dev *dev, ide_pci_device_t *d)
{
struct hpt_info *info;
- u8 rev = 0;
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
-
- if (rev > 1) {
+ if (dev->revision > 1) {
d->name = "HPT372N";
info = &hpt372n;
@@ -1460,11 +1447,8 @@ static int __devinit init_setup_hpt372a(struct pci_dev *dev, ide_pci_device_t *d
static int __devinit init_setup_hpt302(struct pci_dev *dev, ide_pci_device_t *d)
{
struct hpt_info *info;
- u8 rev = 0;
-
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
- if (rev > 1) {
+ if (dev->revision > 1) {
d->name = "HPT302N";
info = &hpt302n;
@@ -1478,7 +1462,7 @@ static int __devinit init_setup_hpt302(struct pci_dev *dev, ide_pci_device_t *d)
static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
{
struct pci_dev *dev2;
- u8 rev = 0;
+ u8 rev = dev->revision;
static char *chipset_names[] = { "HPT366", "HPT366", "HPT368",
"HPT370", "HPT370A", "HPT372",
"HPT372N" };
@@ -1489,8 +1473,6 @@ static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
if (PCI_FUNC(dev->devfn) & 1)
return -ENODEV;
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
-
switch (rev) {
case 0:
case 1:
@@ -1501,7 +1483,7 @@ static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
* to both functions -- really stupid design decision... :-(
* Bit 4 is for the primary channel, bit 5 for the secondary.
*/
- d->channels = 1;
+ d->host_flags |= IDE_HFLAG_SINGLE;
d->enablebits[0].mask = d->enablebits[0].val = 0x10;
d->udma_mask = HPT366_ALLOW_ATA66_3 ?
@@ -1564,71 +1546,71 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
.init_chipset = init_chipset_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.bootable = OFF_BOARD,
- .extra = 240
+ .extra = 240,
+ .pio_mask = ATA_PIO4,
},{ /* 1 */
.name = "HPT372A",
.init_setup = init_setup_hpt372a,
.init_chipset = init_chipset_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f,
.bootable = OFF_BOARD,
- .extra = 240
+ .extra = 240,
+ .pio_mask = ATA_PIO4,
},{ /* 2 */
.name = "HPT302",
.init_setup = init_setup_hpt302,
.init_chipset = init_chipset_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = HPT302_ALLOW_ATA133_6 ? 0x7f : 0x3f,
.bootable = OFF_BOARD,
- .extra = 240
+ .extra = 240,
+ .pio_mask = ATA_PIO4,
},{ /* 3 */
.name = "HPT371",
.init_setup = init_setup_hpt371,
.init_chipset = init_chipset_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = HPT371_ALLOW_ATA133_6 ? 0x7f : 0x3f,
.bootable = OFF_BOARD,
- .extra = 240
+ .extra = 240,
+ .pio_mask = ATA_PIO4,
},{ /* 4 */
.name = "HPT374",
.init_setup = init_setup_hpt374,
.init_chipset = init_chipset_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
- .channels = 2, /* 4 */
.autodma = AUTODMA,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = 0x3f,
.bootable = OFF_BOARD,
- .extra = 240
+ .extra = 240,
+ .pio_mask = ATA_PIO4,
},{ /* 5 */
.name = "HPT372N",
.init_setup = init_setup_hpt372n,
.init_chipset = init_chipset_hpt366,
.init_hwif = init_hwif_hpt366,
.init_dma = init_dma_hpt366,
- .channels = 2, /* 4 */
.autodma = AUTODMA,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f,
.bootable = OFF_BOARD,
- .extra = 240
+ .extra = 240,
+ .pio_mask = ATA_PIO4,
}
};
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index ff48c23e571e..95dbed7e6022 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -82,7 +82,7 @@ static void it8213_tuneproc (ide_drive_t *drive, u8 pio)
{ 2, 1 },
{ 2, 3 }, };
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
spin_lock_irqsave(&tune_lock, flags);
pci_read_config_word(dev, master_port, &master_data);
@@ -214,7 +214,7 @@ static int it8213_config_drive_for_dma (ide_drive_t *drive)
if (ide_tune_dma(drive))
return 0;
- pio = ide_get_best_pio_mode(drive, 255, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, 255, 4);
it8213_tune_chipset(drive, XFER_PIO_0 + pio);
return -1;
@@ -272,10 +272,11 @@ static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
{ \
.name = name_str, \
.init_hwif = init_hwif_it8213, \
- .channels = 1, \
.autodma = AUTODMA, \
.enablebits = {{0x41,0x80,0x80}}, \
.bootable = ON_BOARD, \
+ .host_flags = IDE_HFLAG_SINGLE, \
+ .pio_mask = ATA_PIO4, \
}
static ide_pci_device_t it8213_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 8197b653ba1e..9286c99e2ff0 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -255,7 +255,7 @@ static int it821x_tunepio(ide_drive_t *drive, u8 set_pio)
* on the cable.
*/
if (pair) {
- u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4, NULL);
+ u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4);
/* trim PIO to the slowest of the master/slave */
if (pair_pio < set_pio)
set_pio = pair_pio;
@@ -276,7 +276,7 @@ static int it821x_tunepio(ide_drive_t *drive, u8 set_pio)
static void it821x_tuneproc(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
(void)it821x_tunepio(drive, pio);
}
@@ -718,10 +718,10 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
.name = name_str, \
.init_chipset = init_chipset_it821x, \
.init_hwif = init_hwif_it821x, \
- .channels = 2, \
.autodma = AUTODMA, \
.bootable = ON_BOARD, \
- .fixup = it821x_fixups \
+ .fixup = it821x_fixups, \
+ .pio_mask = ATA_PIO4, \
}
static ide_pci_device_t it821x_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index a6008f63e71e..d7ce9dd8de16 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -97,7 +97,7 @@ static void jmicron_tuneproc (ide_drive_t *drive, byte mode_wanted)
static void config_jmicron_chipset_for_pio (ide_drive_t *drive, byte set_speed)
{
- u8 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
+ u8 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5);
if (set_speed)
(void) ide_config_drive_speed(drive, speed);
}
@@ -177,10 +177,10 @@ fallback:
{ \
.name = name_str, \
.init_hwif = init_hwif_jmicron, \
- .channels = 2, \
.autodma = AUTODMA, \
.bootable = ON_BOARD, \
.enablebits = { {0x40, 1, 1}, {0x40, 0x10, 0x10} }, \
+ .pio_mask = ATA_PIO5, \
}
static ide_pci_device_t jmicron_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c
index b310c4f51077..09941f37d635 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/pci/ns87415.c
@@ -281,7 +281,6 @@ static ide_pci_device_t ns87415_chipset __devinitdata = {
.init_iops = init_iops_ns87415,
#endif
.init_hwif = init_hwif_ns87415,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
};
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c
index aede7eee9246..3a2bb2723515 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/pci/opti621.c
@@ -147,12 +147,12 @@ static void compute_pios(ide_drive_t *drive, u8 pio)
int d;
ide_hwif_t *hwif = HWIF(drive);
- drive->drive_data = ide_get_best_pio_mode(drive, pio, OPTI621_MAX_PIO, NULL);
+ drive->drive_data = ide_get_best_pio_mode(drive, pio, OPTI621_MAX_PIO);
for (d = 0; d < 2; ++d) {
drive = &hwif->drives[d];
if (drive->present) {
if (drive->drive_data == PIO_DONT_KNOW)
- drive->drive_data = ide_get_best_pio_mode(drive, 255, OPTI621_MAX_PIO, NULL);
+ drive->drive_data = ide_get_best_pio_mode(drive, 255, OPTI621_MAX_PIO);
#ifdef OPTI621_DEBUG
printk("%s: Selected PIO mode %d\n",
drive->name, drive->drive_data);
@@ -350,17 +350,17 @@ static ide_pci_device_t opti621_chipsets[] __devinitdata = {
{ /* 0 */
.name = "OPTI621",
.init_hwif = init_hwif_opti621,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO3,
},{ /* 1 */
.name = "OPTI621X",
.init_hwif = init_hwif_opti621,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x45,0x80,0x00}, {0x40,0x08,0x00}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO3,
}
};
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index ee5020df005d..8a66a2871b3a 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -219,7 +219,7 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed)
static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
(void)pdcnew_tune_chipset(drive, XFER_PIO_0 + pio);
}
@@ -378,13 +378,6 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
int f, r;
u8 pll_ctl0, pll_ctl1;
- if (dev->resource[PCI_ROM_RESOURCE].start) {
- pci_write_config_dword(dev, PCI_ROM_ADDRESS,
- dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name,
- (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
- }
-
#ifdef CONFIG_PPC_PMAC
apple_kiwi_init(dev);
#endif
@@ -573,63 +566,63 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
.init_setup = init_setup_pdcnew,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x3f, /* udma0-5 */
},{ /* 1 */
.name = "PDC20269",
.init_setup = init_setup_pdcnew,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x7f, /* udma0-6*/
},{ /* 2 */
.name = "PDC20270",
.init_setup = init_setup_pdc20270,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x3f, /* udma0-5 */
},{ /* 3 */
.name = "PDC20271",
.init_setup = init_setup_pdcnew,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x7f, /* udma0-6*/
},{ /* 4 */
.name = "PDC20275",
.init_setup = init_setup_pdcnew,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x7f, /* udma0-6*/
},{ /* 5 */
.name = "PDC20276",
.init_setup = init_setup_pdc20276,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x7f, /* udma0-6*/
},{ /* 6 */
.name = "PDC20277",
.init_setup = init_setup_pdcnew,
.init_chipset = init_chipset_pdcnew,
.init_hwif = init_hwif_pdc202new,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x7f, /* udma0-6*/
}
};
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 41ac4a94959f..fbcb0bb9c956 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -145,7 +145,7 @@ static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed)
static void pdc202xx_tune_drive(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
pdc202xx_tune_chipset(drive, XFER_PIO_0 + pio);
}
@@ -316,14 +316,6 @@ static void pdc202xx_reset (ide_drive_t *drive)
static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
const char *name)
{
- /* This doesn't appear needed */
- if (dev->resource[PCI_ROM_RESOURCE].start) {
- pci_write_config_dword(dev, PCI_ROM_ADDRESS,
- dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
- printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name,
- (unsigned long)dev->resource[PCI_ROM_RESOURCE].start);
- }
-
return dev->irq;
}
@@ -449,10 +441,10 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_pdc202xx,
.init_hwif = init_hwif_pdc202xx,
.init_dma = init_dma_pdc202xx,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 16,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x07, /* udma0-2 */
},{ /* 1 */
.name = "PDC20262",
@@ -460,10 +452,10 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_pdc202xx,
.init_hwif = init_hwif_pdc202xx,
.init_dma = init_dma_pdc202xx,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x1f, /* udma0-4 */
},{ /* 2 */
.name = "PDC20263",
@@ -471,10 +463,10 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_pdc202xx,
.init_hwif = init_hwif_pdc202xx,
.init_dma = init_dma_pdc202xx,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x1f, /* udma0-4 */
},{ /* 3 */
.name = "PDC20265",
@@ -482,10 +474,10 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_pdc202xx,
.init_hwif = init_hwif_pdc202xx,
.init_dma = init_dma_pdc202xx,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x3f, /* udma0-5 */
},{ /* 4 */
.name = "PDC20267",
@@ -493,10 +485,10 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
.init_chipset = init_chipset_pdc202xx,
.init_hwif = init_hwif_pdc202xx,
.init_dma = init_dma_pdc202xx,
- .channels = 2,
.autodma = AUTODMA,
.bootable = OFF_BOARD,
.extra = 48,
+ .pio_mask = ATA_PIO4,
.udma_mask = 0x3f, /* udma0-5 */
}
};
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 2e0b29ef596a..4f69cd067e5e 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -219,7 +219,7 @@ static void piix_tune_pio (ide_drive_t *drive, u8 pio)
*/
static void piix_tune_drive (ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
piix_tune_pio(drive, pio);
(void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
@@ -495,10 +495,10 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
.name = name_str, \
.init_chipset = init_chipset_piix, \
.init_hwif = init_hwif_piix, \
- .channels = 2, \
.autodma = AUTODMA, \
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
.bootable = ON_BOARD, \
+ .pio_mask = ATA_PIO4, \
.udma_mask = udma, \
}
@@ -514,11 +514,11 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = {
*/
.name = "MPIIX",
.init_hwif = init_hwif_piix,
- .channels = 2,
.autodma = NODMA,
.enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
.bootable = ON_BOARD,
- .flags = IDEPCI_FLAG_ISA_PORTS
+ .host_flags = IDE_HFLAG_ISA_PORTS,
+ .pio_mask = ATA_PIO4,
},
/* 3 */ DECLARE_PIIX_DEV("PIIX3", 0x00), /* no udma */
@@ -572,18 +572,16 @@ static void __devinit piix_check_450nx(void)
{
struct pci_dev *pdev = NULL;
u16 cfg;
- u8 rev;
while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL)
{
/* Look for 450NX PXB. Check for problem configurations
A PCI quirk checks bit 6 already */
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
pci_read_config_word(pdev, 0x41, &cfg);
/* Only on the original revision: IDE DMA can hang */
- if(rev == 0x00)
+ if (pdev->revision == 0x00)
no_piix_dma = 1;
/* On all revisions below 5 PXB bus lock must be disabled for IDE */
- else if(cfg & (1<<14) && rev < 5)
+ else if (cfg & (1<<14) && pdev->revision < 5)
no_piix_dma = 2;
}
if(no_piix_dma)
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/pci/rz1000.c
index f8c954690142..10e1ae7a4a02 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/pci/rz1000.c
@@ -52,7 +52,6 @@ static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif)
static ide_pci_device_t rz1000_chipset __devinitdata = {
.name = "RZ100x",
.init_hwif = init_hwif_rz1000,
- .channels = 2,
.autodma = NODMA,
.bootable = ON_BOARD,
};
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 523363c93794..9bdc9694d50d 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/ide/pci/sc1200.c Version 0.94 Mar 10 2007
+ * linux/drivers/ide/pci/sc1200.c Version 0.95 Jun 16 2007
*
* Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
* Copyright (C) 2007 Bartlomiej Zolnierkiewicz
@@ -304,7 +304,7 @@ static void sc1200_tuneproc (ide_drive_t *drive, byte pio) /* mode=255 means "au
return;
}
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
printk("SC1200: %s: setting PIO mode%d\n", drive->name, pio);
if (sc1200_set_xfer_mode(drive, XFER_PIO_0 + pio) == 0)
@@ -390,7 +390,7 @@ static int sc1200_resume (struct pci_dev *dev)
// loop over all interfaces that are part of this pci device:
//
while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) {
- unsigned int basereg, r, d, format;
+ unsigned int basereg, r;
sc1200_saved_state_t *ss = (sc1200_saved_state_t *)hwif->config_data;
//
@@ -402,41 +402,6 @@ static int sc1200_resume (struct pci_dev *dev)
pci_write_config_dword(hwif->pci_dev, basereg + (r<<2), ss->regs[r]);
}
}
- //
- // Re-program drive PIO modes
- //
- pci_read_config_dword(hwif->pci_dev, basereg+4, &format);
- format = (format >> 31) & 1;
- if (format)
- format += sc1200_get_pci_clock();
- for (d = 0; d < 2; ++d) {
- ide_drive_t *drive = &(hwif->drives[d]);
- if (drive->present) {
- unsigned int pio, timings;
- pci_read_config_dword(hwif->pci_dev, basereg+(drive->select.b.unit << 3), &timings);
- for (pio = 0; pio <= 4; ++pio) {
- if (sc1200_pio_timings[format][pio] == timings)
- break;
- }
- if (pio > 4)
- pio = 255; /* autotune */
- (void)sc1200_tuneproc(drive, pio);
- }
- }
- //
- // Re-program drive DMA modes
- //
- for (d = 0; d < MAX_DRIVES; ++d) {
- ide_drive_t *drive = &(hwif->drives[d]);
- if (drive->present && !__ide_dma_bad_drive(drive)) {
- int enable_dma = drive->using_dma;
- hwif->dma_off_quietly(drive);
- if (sc1200_config_dma(drive))
- enable_dma = 0;
- if (enable_dma)
- hwif->dma_host_on(drive);
- }
- }
}
return 0;
}
@@ -471,9 +436,9 @@ static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif)
static ide_pci_device_t sc1200_chipset __devinitdata = {
.name = "SC1200",
.init_hwif = init_hwif_sc1200,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
};
static int __devinit sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 7b87488e3daa..f668d235e6be 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -165,9 +165,9 @@ scc_ide_outbsync(ide_drive_t * drive, u8 addr, unsigned long port)
ide_hwif_t *hwif = HWIF(drive);
out_be32((void*)port, addr);
- __asm__ __volatile__("eieio":::"memory");
+ eieio();
in_be32((void*)(hwif->dma_base + 0x01c));
- __asm__ __volatile__("eieio":::"memory");
+ eieio();
}
static void
@@ -210,7 +210,7 @@ static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted)
unsigned char speed = XFER_PIO_0;
int offset;
- mode_wanted = ide_get_best_pio_mode(drive, mode_wanted, 4, NULL);
+ mode_wanted = ide_get_best_pio_mode(drive, mode_wanted, 4);
switch (mode_wanted) {
case 4:
speed = XFER_PIO_4;
@@ -401,6 +401,33 @@ static int scc_ide_dma_end(ide_drive_t * drive)
ide_hwif_t *hwif = HWIF(drive);
unsigned long intsts_port = hwif->dma_base + 0x014;
u32 reg;
+ int dma_stat, data_loss = 0;
+ static int retry = 0;
+
+ /* errata A308 workaround: Step5 (check data loss) */
+ /* We don't check non ide_disk because it is limited to UDMA4 */
+ if (!(in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT) &&
+ drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
+ reg = in_be32((void __iomem *)intsts_port);
+ if (!(reg & INTSTS_ACTEINT)) {
+ printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
+ drive->name);
+ data_loss = 1;
+ if (retry++) {
+ struct request *rq = HWGROUP(drive)->rq;
+ int unit;
+ /* ERROR_RESET and drive->crc_count are needed
+ * to reduce DMA transfer mode in retry process.
+ */
+ if (rq)
+ rq->errors |= ERROR_RESET;
+ for (unit = 0; unit < MAX_DRIVES; unit++) {
+ ide_drive_t *drive = &hwif->drives[unit];
+ drive->crc_count++;
+ }
+ }
+ }
+ }
while (1) {
reg = in_be32((void __iomem *)intsts_port);
@@ -469,27 +496,25 @@ static int scc_ide_dma_end(ide_drive_t * drive)
break;
}
- return __ide_dma_end(drive);
+ dma_stat = __ide_dma_end(drive);
+ if (data_loss)
+ dma_stat |= 2; /* emulate DMA error (to retry command) */
+ return dma_stat;
}
/* returns 1 if dma irq issued, 0 otherwise */
static int scc_dma_test_irq(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 dma_stat = hwif->INB(hwif->dma_status);
+ ide_hwif_t *hwif = HWIF(drive);
+ u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
- /* return 1 if INTR asserted */
- if ((dma_stat & 4) == 4)
+ /* SCC errata A252,A308 workaround: Step4 */
+ if ((in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT) &&
+ (int_stat & INTSTS_INTRQ))
return 1;
- /* Workaround for PTERADD: emulate DMA_INTR when
- * - IDE_STATUS[ERR] = 1
- * - INT_STATUS[INTRQ] = 1
- * - DMA_STATUS[IORACTA] = 1
- */
- if (in_be32((void __iomem *)IDE_ALTSTATUS_REG) & ERR_STAT &&
- in_be32((void __iomem *)(hwif->dma_base + 0x014)) & INTSTS_INTRQ &&
- dma_stat & 1)
+ /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
+ if (int_stat & INTSTS_IOIRQS)
return 1;
if (!drive->waiting_for_dma)
@@ -498,6 +523,21 @@ static int scc_dma_test_irq(ide_drive_t *drive)
return 0;
}
+static u8 scc_udma_filter(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 mask = hwif->ultra_mask;
+
+ /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
+ if ((drive->media != ide_disk) && (mask & 0xE0)) {
+ printk(KERN_INFO "%s: limit %s to UDMA4\n",
+ SCC_PATA_NAME, drive->name);
+ mask = 0x1F;
+ }
+
+ return mask;
+}
+
/**
* setup_mmio_scc - map CTRL/BMID region
* @dev: PCI device we are configuring
@@ -702,6 +742,7 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
hwif->tuneproc = scc_tuneproc;
hwif->ide_dma_check = scc_config_drive_for_dma;
hwif->ide_dma_test_irq = scc_dma_test_irq;
+ hwif->udma_filter = scc_udma_filter;
hwif->drives[0].autotune = IDE_TUNE_AUTO;
hwif->drives[1].autotune = IDE_TUNE_AUTO;
@@ -731,9 +772,10 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
.init_setup = init_setup_scc, \
.init_iops = init_iops_scc, \
.init_hwif = init_hwif_scc, \
- .channels = 1, \
.autodma = AUTODMA, \
.bootable = ON_BOARD, \
+ .host_flags = IDE_HFLAG_SINGLE, \
+ .pio_mask = ATA_PIO4, \
}
static ide_pci_device_t scc_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 1371b5bf6bf0..9fead2e7d4c8 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/ide/pci/serverworks.c Version 0.20 Jun 3 2007
+ * linux/drivers/ide/pci/serverworks.c Version 0.22 Jun 27 2007
*
* Copyright (C) 1998-2000 Michel Aubry
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz
@@ -55,7 +55,6 @@ static const char *svwks_bad_ata100[] = {
NULL
};
-static u8 svwks_revision = 0;
static struct pci_dev *isa_dev;
static int check_in_drive_lists (ide_drive_t *drive, const char **list)
@@ -71,9 +70,6 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
struct pci_dev *dev = HWIF(drive)->pci_dev;
u8 mask = 0;
- if (!svwks_revision)
- pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
-
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
return 0x1f;
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
@@ -88,9 +84,9 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
return 0;
/* Check the OSB4 DMA33 enable bit */
return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
- } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) {
+ } else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
return 0x07;
- } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) {
+ } else if (dev->revision >= SVWKS_CSB5_REVISION_NEW) {
u8 btr = 0, mode;
pci_read_config_byte(dev, 0x5A, &btr);
mode = btr & 0x3;
@@ -127,23 +123,45 @@ static u8 svwks_csb_check (struct pci_dev *dev)
}
return 0;
}
+
+static void svwks_tune_pio(ide_drive_t *drive, const u8 pio)
+{
+ static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
+ static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
+
+ struct pci_dev *dev = drive->hwif->pci_dev;
+
+ pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
+
+ if (svwks_csb_check(dev)) {
+ u16 csb_pio = 0;
+
+ pci_read_config_word(dev, 0x4a, &csb_pio);
+
+ csb_pio &= ~(0x0f << (4 * drive->dn));
+ csb_pio |= (pio << (4 * drive->dn));
+
+ pci_write_config_word(dev, 0x4a, csb_pio);
+ }
+}
+
static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
{
static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
- static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
- static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = hwif->pci_dev;
u8 speed = ide_rate_filter(drive, xferspeed);
- u8 pio = ide_get_best_pio_mode(drive, 255, 4, NULL);
u8 unit = (drive->select.b.unit & 0x01);
- u8 csb5 = svwks_csb_check(dev);
- u8 ultra_enable = 0, ultra_timing = 0;
- u8 dma_timing = 0, pio_timing = 0;
- u16 csb5_pio = 0;
+
+ u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
+
+ if (speed >= XFER_PIO_0 && speed <= XFER_PIO_4) {
+ svwks_tune_pio(drive, speed - XFER_PIO_0);
+ return ide_config_drive_speed(drive, speed);
+ }
/* If we are about to put a disk into UDMA mode we screwed up.
Our code assumes we never _ever_ do this on an OSB4 */
@@ -153,31 +171,15 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
BUG();
pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
- pci_read_config_word(dev, 0x4A, &csb5_pio);
pci_read_config_byte(dev, 0x54, &ultra_enable);
ultra_timing &= ~(0x0F << (4*unit));
ultra_enable &= ~(0x01 << drive->dn);
- csb5_pio &= ~(0x0F << (4*drive->dn));
switch(speed) {
- case XFER_PIO_4:
- case XFER_PIO_3:
- case XFER_PIO_2:
- case XFER_PIO_1:
- case XFER_PIO_0:
- pio_timing |= pio_modes[speed - XFER_PIO_0];
- csb5_pio |= ((speed - XFER_PIO_0) << (4*drive->dn));
- break;
-
case XFER_MW_DMA_2:
case XFER_MW_DMA_1:
case XFER_MW_DMA_0:
- /*
- * TODO: always setup PIO mode so this won't be needed
- */
- pio_timing |= pio_modes[pio];
- csb5_pio |= (pio << (4*drive->dn));
dma_timing |= dma_modes[speed - XFER_MW_DMA_0];
break;
@@ -187,11 +189,6 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
case XFER_UDMA_2:
case XFER_UDMA_1:
case XFER_UDMA_0:
- /*
- * TODO: always setup PIO mode so this won't be needed
- */
- pio_timing |= pio_modes[pio];
- csb5_pio |= (pio << (4*drive->dn));
dma_timing |= dma_modes[2];
ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit));
ultra_enable |= (0x01 << drive->dn);
@@ -199,10 +196,6 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
break;
}
- pci_write_config_byte(dev, drive_pci[drive->dn], pio_timing);
- if (csb5)
- pci_write_config_word(dev, 0x4A, csb5_pio);
-
pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing);
pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing);
pci_write_config_byte(dev, 0x54, ultra_enable);
@@ -212,8 +205,9 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
static void svwks_tune_drive (ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
- (void)svwks_tune_chipset(drive, XFER_PIO_0 + pio);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
+ svwks_tune_pio(drive, pio);
+ (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
static int svwks_config_drive_xfer_rate (ide_drive_t *drive)
@@ -234,9 +228,6 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
unsigned int reg;
u8 btr;
- /* save revision id to determine DMA capability */
- pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
-
/* force Master Latency Timer value to 64 PCICLKs */
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40);
@@ -315,7 +306,7 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
if (!(PCI_FUNC(dev->devfn) & 1))
btr |= 0x2;
else
- btr |= (svwks_revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
+ btr |= (dev->revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
pci_write_config_byte(dev, 0x5A, btr);
}
/* Setup HT1000 SouthBridge Controller - Single Channel Only */
@@ -396,8 +387,6 @@ static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
{
- u8 dma_stat = 0;
-
if (!hwif->irq)
hwif->irq = hwif->channel ? 15 : 14;
@@ -414,11 +403,11 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
hwif->autodma = 0;
- if (!hwif->dma_base) {
- hwif->drives[0].autotune = 1;
- hwif->drives[1].autotune = 1;
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+
+ if (!hwif->dma_base)
return;
- }
hwif->ide_dma_check = &svwks_config_drive_xfer_rate;
if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
@@ -428,11 +417,7 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
if (!noautodma)
hwif->autodma = 1;
- dma_stat = inb(hwif->dma_status);
- hwif->drives[0].autodma = (dma_stat & 0x20);
- hwif->drives[1].autodma = (dma_stat & 0x40);
- hwif->drives[0].autotune = (!(dma_stat & 0x20));
- hwif->drives[1].autotune = (!(dma_stat & 0x40));
+ hwif->drives[0].autodma = hwif->drives[1].autodma = 1;
}
static int __devinit init_setup_svwks (struct pci_dev *dev, ide_pci_device_t *d)
@@ -448,9 +433,12 @@ static int __devinit init_setup_csb6 (struct pci_dev *dev, ide_pci_device_t *d)
d->bootable = ON_BOARD;
}
- d->channels = ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE ||
- dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) &&
- (!(PCI_FUNC(dev->devfn) & 1))) ? 1 : 2;
+ if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE ||
+ dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) &&
+ (!(PCI_FUNC(dev->devfn) & 1)))
+ d->host_flags |= IDE_HFLAG_SINGLE;
+ else
+ d->host_flags &= ~IDE_HFLAG_SINGLE;
return ide_setup_pci_device(dev, d);
}
@@ -461,41 +449,43 @@ static ide_pci_device_t serverworks_chipsets[] __devinitdata = {
.init_setup = init_setup_svwks,
.init_chipset = init_chipset_svwks,
.init_hwif = init_hwif_svwks,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
},{ /* 1 */
.name = "SvrWks CSB5",
.init_setup = init_setup_svwks,
.init_chipset = init_chipset_svwks,
.init_hwif = init_hwif_svwks,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
},{ /* 2 */
.name = "SvrWks CSB6",
.init_setup = init_setup_csb6,
.init_chipset = init_chipset_svwks,
.init_hwif = init_hwif_svwks,
- .channels = 2,
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
},{ /* 3 */
.name = "SvrWks CSB6",
.init_setup = init_setup_csb6,
.init_chipset = init_chipset_svwks,
.init_hwif = init_hwif_svwks,
- .channels = 1, /* 2 */
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
+ .pio_mask = ATA_PIO4,
},{ /* 4 */
.name = "SvrWks HT1000",
.init_setup = init_setup_svwks,
.init_chipset = init_chipset_svwks,
.init_hwif = init_hwif_svwks,
- .channels = 1, /* 2 */
.autodma = AUTODMA,
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
+ .pio_mask = ATA_PIO4,
}
};
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index d396b2929ed8..57145767c3df 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -586,6 +586,7 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
hwif->ultra_mask = 0x0; /* Disable Ultra DMA */
hwif->mwdma_mask = 0x2; /* Multimode-2 DMA */
hwif->swdma_mask = 0x2;
+ hwif->pio_mask = 0x00;
hwif->tuneproc = NULL; /* Sets timing for PIO mode */
hwif->speedproc = NULL; /* Sets timing for DMA &/or PIO modes */
hwif->selectproc = NULL;/* Use the default routine to select drive */
@@ -724,10 +725,10 @@ static ide_pci_device_t sgiioc4_chipset __devinitdata = {
.name = "SGIIOC4",
.init_hwif = ide_init_sgiioc4,
.init_dma = ide_dma_sgiioc4,
- .channels = 1,
.autodma = AUTODMA,
/* SGI IOC4 doesn't have enablebits. */
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
};
int
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 1c3e35487893..50f6d172ef77 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -1,9 +1,10 @@
/*
- * linux/drivers/ide/pci/siimage.c Version 1.12 Mar 10 2007
+ * linux/drivers/ide/pci/siimage.c Version 1.15 Jun 29 2007
*
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat <alan@redhat.com>
* Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
*
* May be copied or modified under the terms of the GNU General Public License
*
@@ -31,6 +32,10 @@
* unplugging/replugging the virtual CD interface when the DRAC is reset.
* This often causes drivers/ide/siimage to panic but is ok with the rather
* smarter code in libata.
+ *
+ * TODO:
+ * - IORDY fixes
+ * - VDMA support
*/
#include <linux/types.h>
@@ -160,82 +165,45 @@ out:
}
/**
- * siimage_taskfile_timing - turn timing data to a mode
- * @hwif: interface to query
- *
- * Read the timing data for the interface and return the
- * mode that is being used.
- */
-
-static byte siimage_taskfile_timing (ide_hwif_t *hwif)
-{
- u16 timing = 0x328a;
- unsigned long addr = siimage_selreg(hwif, 2);
-
- if (hwif->mmio)
- timing = hwif->INW(addr);
- else
- pci_read_config_word(hwif->pci_dev, addr, &timing);
-
- switch (timing) {
- case 0x10c1: return 4;
- case 0x10c3: return 3;
- case 0x1104:
- case 0x1281: return 2;
- case 0x2283: return 1;
- case 0x328a:
- default: return 0;
- }
-}
-
-/**
- * simmage_tuneproc - tune a drive
+ * sil_tune_pio - tune a drive
* @drive: drive to tune
- * @mode_wanted: the target operating mode
+ * @pio: the desired PIO mode
*
* Load the timing settings for this device mode into the
* controller. If we are in PIO mode 3 or 4 turn on IORDY
* monitoring (bit 9). The TF timing is bits 31:16
*/
-
-static void siimage_tuneproc (ide_drive_t *drive, byte mode_wanted)
+
+static void sil_tune_pio(ide_drive_t *drive, u8 pio)
{
+ const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
+ const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
+
ide_hwif_t *hwif = HWIF(drive);
+ ide_drive_t *pair = &hwif->drives[drive->dn ^ 1];
u32 speedt = 0;
u16 speedp = 0;
unsigned long addr = siimage_seldev(drive, 0x04);
unsigned long tfaddr = siimage_selreg(hwif, 0x02);
-
- /* cheat for now and use the docs */
- switch (mode_wanted) {
- case 4:
- speedp = 0x10c1;
- speedt = 0x10c1;
- break;
- case 3:
- speedp = 0x10c3;
- speedt = 0x10c3;
- break;
- case 2:
- speedp = 0x1104;
- speedt = 0x1281;
- break;
- case 1:
- speedp = 0x2283;
- speedt = 0x2283;
- break;
- case 0:
- default:
- speedp = 0x328a;
- speedt = 0x328a;
- break;
+ u8 tf_pio = pio;
+
+ /* trim *taskfile* PIO to the slowest of the master/slave */
+ if (pair->present) {
+ u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4);
+
+ if (pair_pio < tf_pio)
+ tf_pio = pair_pio;
}
+ /* cheat for now and use the docs */
+ speedp = data_speed[pio];
+ speedt = tf_speed[tf_pio];
+
if (hwif->mmio) {
hwif->OUTW(speedp, addr);
hwif->OUTW(speedt, tfaddr);
/* Now set up IORDY */
- if(mode_wanted == 3 || mode_wanted == 4)
+ if (pio > 2)
hwif->OUTW(hwif->INW(tfaddr-2)|0x200, tfaddr-2);
else
hwif->OUTW(hwif->INW(tfaddr-2)&~0x200, tfaddr-2);
@@ -245,42 +213,17 @@ static void siimage_tuneproc (ide_drive_t *drive, byte mode_wanted)
pci_read_config_word(hwif->pci_dev, tfaddr-2, &speedp);
speedp &= ~0x200;
/* Set IORDY for mode 3 or 4 */
- if(mode_wanted == 3 || mode_wanted == 4)
+ if (pio > 2)
speedp |= 0x200;
pci_write_config_word(hwif->pci_dev, tfaddr-2, speedp);
}
}
-/**
- * config_siimage_chipset_for_pio - set drive timings
- * @drive: drive to tune
- * @speed we want
- *
- * Compute the best pio mode we can for a given device. Also honour
- * the timings for the driver when dealing with mixed devices. Some
- * of this is ugly but its all wrapped up here
- *
- * The SI680 can also do VDMA - we need to start using that
- *
- * FIXME: we use the BIOS channel timings to avoid driving the task
- * files too fast at the disk. We need to compute the master/slave
- * drive PIO mode properly so that we can up the speed on a hotplug
- * system.
- */
-
-static void config_siimage_chipset_for_pio (ide_drive_t *drive, byte set_speed)
+static void sil_tuneproc(ide_drive_t *drive, u8 pio)
{
- u8 channel_timings = siimage_taskfile_timing(HWIF(drive));
- u8 speed = 0, set_pio = ide_get_best_pio_mode(drive, 4, 5, NULL);
-
- /* WARNING PIO timing mess is going to happen b/w devices, argh */
- if ((channel_timings != set_pio) && (set_pio > channel_timings))
- set_pio = channel_timings;
-
- siimage_tuneproc(drive, set_pio);
- speed = XFER_PIO_0 + set_pio;
- if (set_speed)
- (void) ide_config_drive_speed(drive, speed);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
+ sil_tune_pio(drive, pio);
+ (void)ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
/**
@@ -335,7 +278,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
case XFER_PIO_2:
case XFER_PIO_1:
case XFER_PIO_0:
- siimage_tuneproc(drive, (speed - XFER_PIO_0));
+ sil_tune_pio(drive, speed - XFER_PIO_0);
mode |= ((unit) ? 0x10 : 0x01);
break;
case XFER_MW_DMA_2:
@@ -343,7 +286,6 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
case XFER_MW_DMA_0:
multi = dma[speed - XFER_MW_DMA_0];
mode |= ((unit) ? 0x20 : 0x02);
- config_siimage_chipset_for_pio(drive, 0);
break;
case XFER_UDMA_6:
case XFER_UDMA_5:
@@ -356,7 +298,6 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
ultra |= ((scsc) ? (ultra6[speed - XFER_UDMA_0]) :
(ultra5[speed - XFER_UDMA_0]));
mode |= ((unit) ? 0x30 : 0x03);
- config_siimage_chipset_for_pio(drive, 0);
break;
default:
return 1;
@@ -390,7 +331,7 @@ static int siimage_config_drive_for_dma (ide_drive_t *drive)
return 0;
if (ide_use_fast_pio(drive))
- config_siimage_chipset_for_pio(drive, 1);
+ sil_tuneproc(drive, 255);
return -1;
}
@@ -961,7 +902,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
hwif->resetproc = &siimage_reset;
hwif->speedproc = &siimage_tune_chipset;
- hwif->tuneproc = &siimage_tuneproc;
+ hwif->tuneproc = &sil_tuneproc;
hwif->reset_poll = &siimage_reset_poll;
hwif->pre_reset = &siimage_pre_reset;
hwif->udma_filter = &sil_udma_filter;
@@ -976,11 +917,11 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
first = 0;
}
}
- if (!hwif->dma_base) {
- hwif->drives[0].autotune = 1;
- hwif->drives[1].autotune = 1;
+
+ hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
+
+ if (hwif->dma_base == 0)
return;
- }
hwif->ultra_mask = 0x7f;
hwif->mwdma_mask = 0x07;
@@ -1016,9 +957,9 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
.init_iops = init_iops_siimage, \
.init_hwif = init_hwif_siimage, \
.fixup = siimage_fixup, \
- .channels = 2, \
.autodma = AUTODMA, \
.bootable = ON_BOARD, \
+ .pio_mask = ATA_PIO4, \
}
static ide_pci_device_t siimage_chipsets[] __devinitdata = {
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index f875183ac8d9..63fbb79e8178 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -521,7 +521,7 @@ static void config_art_rwp_pio (ide_drive_t *drive, u8 pio)
static int sis5513_tune_drive(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
config_art_rwp_pio(drive, pio);
return ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
@@ -659,9 +659,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
/* Special case for SiS630 : 630S/ET is ATA_100a */
if (SiSHostChipInfo[i].host_id == PCI_DEVICE_ID_SI_630) {
- u8 hostrev;
- pci_read_config_byte(host, PCI_REVISION_ID, &hostrev);
- if (hostrev >= 0x30)
+ if (host->revision >= 0x30)
chipset_family = ATA_100a;
}
pci_dev_put(host);
@@ -702,7 +700,6 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
u16 trueid;
u8 prefctl;
u8 idecfg;
- u8 sbrev;
pci_read_config_byte(dev, 0x4a, &idecfg);
pci_write_config_byte(dev, 0x4a, idecfg | 0x10);
@@ -712,11 +709,10 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
if (trueid == 0x5517) { /* SiS 961/961B */
lpc_bridge = pci_get_slot(dev->bus, 0x10); /* Bus 0, Dev 2, Fn 0 */
- pci_read_config_byte(lpc_bridge, PCI_REVISION_ID, &sbrev);
pci_read_config_byte(dev, 0x49, &prefctl);
pci_dev_put(lpc_bridge);
- if (sbrev == 0x10 && (prefctl & 0x80)) {
+ if (lpc_bridge->revision == 0x10 && (prefctl & 0x80)) {
printk(KERN_INFO "SIS5513: SiS 961B MuTIOL IDE UDMA133 controller\n");
chipset_family = ATA_133a;
} else {
@@ -882,10 +878,10 @@ static ide_pci_device_t sis5513_chipset __devinitdata = {
.name = "SIS5513",
.init_chipset = init_chipset_sis5513,
.init_hwif = init_hwif_sis5513,
- .channels = 2,
.autodma = NOAUTODMA,
.enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
};
static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 487879842af4..0947cab00595 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -52,12 +52,13 @@
* Convert a PIO mode and cycle time to the required on/off times
* for the interface. This has protection against runaway timings.
*/
-static unsigned int get_pio_timings(ide_pio_data_t *p)
+static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
{
unsigned int cmd_on, cmd_off;
+ u8 iordy = 0;
- cmd_on = (ide_pio_timings[p->pio_mode].active_time + 29) / 30;
- cmd_off = (p->cycle_time - 30 * cmd_on + 29) / 30;
+ cmd_on = (ide_pio_timings[pio].active_time + 29) / 30;
+ cmd_off = (ide_pio_cycle_time(drive, pio) - 30 * cmd_on + 29) / 30;
if (cmd_on == 0)
cmd_on = 1;
@@ -65,7 +66,10 @@ static unsigned int get_pio_timings(ide_pio_data_t *p)
if (cmd_off == 0)
cmd_off = 1;
- return (cmd_on - 1) << 8 | (cmd_off - 1) | (p->use_iordy ? 0x40 : 0x00);
+ if (pio > 2 || ide_dev_has_iordy(drive->id))
+ iordy = 0x40;
+
+ return (cmd_on - 1) << 8 | (cmd_off - 1) | iordy;
}
/*
@@ -75,14 +79,13 @@ static u8 sl82c105_tune_pio(ide_drive_t *drive, u8 pio)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
int reg = 0x44 + drive->dn * 4;
- ide_pio_data_t p;
u16 drv_ctrl;
DBG(("sl82c105_tune_pio(drive:%s, pio:%u)\n", drive->name, pio));
- pio = ide_get_best_pio_mode(drive, pio, 5, &p);
+ pio = ide_get_best_pio_mode(drive, pio, 5);
- drv_ctrl = get_pio_timings(&p);
+ drv_ctrl = get_pio_timings(drive, pio);
/*
* Store the PIO timings so that we can restore them
@@ -101,7 +104,8 @@ static u8 sl82c105_tune_pio(ide_drive_t *drive, u8 pio)
}
printk(KERN_DEBUG "%s: selected %s (%dns) (%04X)\n", drive->name,
- ide_xfer_verbose(pio + XFER_PIO_0), p.cycle_time, drv_ctrl);
+ ide_xfer_verbose(pio + XFER_PIO_0),
+ ide_pio_cycle_time(drive, pio), drv_ctrl);
return pio;
}
@@ -338,7 +342,6 @@ static void sl82c105_tune_drive(ide_drive_t *drive, u8 pio)
static unsigned int sl82c105_bridge_revision(struct pci_dev *dev)
{
struct pci_dev *bridge;
- u8 rev;
/*
* The bridge should be part of the same device, but function 0.
@@ -360,10 +363,9 @@ static unsigned int sl82c105_bridge_revision(struct pci_dev *dev)
/*
* We need to find function 0's revision, not function 1
*/
- pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
pci_dev_put(bridge);
- return rev;
+ return bridge->revision;
}
/*
@@ -451,10 +453,10 @@ static ide_pci_device_t sl82c105_chipset __devinitdata = {
.name = "W82C105",
.init_chipset = init_chipset_sl82c105,
.init_hwif = init_hwif_sl82c105,
- .channels = 2,
.autodma = NOAUTODMA,
.enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO5,
};
static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 575dbbd8b482..8e655f2db5cb 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -103,7 +103,7 @@ static void slc90e66_tune_pio (ide_drive_t *drive, u8 pio)
static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
slc90e66_tune_pio(drive, pio);
(void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
}
@@ -214,10 +214,10 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
static ide_pci_device_t slc90e66_chipset __devinitdata = {
.name = "SLC90E66",
.init_hwif = init_hwif_slc90e66,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
};
static int __devinit slc90e66_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 8de1f8e22494..ec79bacc30c2 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -47,7 +47,7 @@ static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed)
static void tc86c001_tune_drive(ide_drive_t *drive, u8 pio)
{
- pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
(void) tc86c001_tune_chipset(drive, XFER_PIO_0 + pio);
}
@@ -248,9 +248,10 @@ static ide_pci_device_t tc86c001_chipset __devinitdata = {
.name = "TC86C001",
.init_chipset = init_chipset_tc86c001,
.init_hwif = init_hwif_tc86c001,
- .channels = 1,
.autodma = AUTODMA,
- .bootable = OFF_BOARD
+ .bootable = OFF_BOARD,
+ .host_flags = IDE_HFLAG_SINGLE,
+ .pio_mask = ATA_PIO4,
};
static int __devinit tc86c001_init_one(struct pci_dev *dev,
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index 35e8c612638f..024bbfae0429 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -96,7 +96,7 @@ static int triflex_tune_chipset(ide_drive_t *drive, u8 xferspeed)
static void triflex_tune_drive(ide_drive_t *drive, u8 pio)
{
- int use_pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ int use_pio = ide_get_best_pio_mode(drive, pio, 4);
(void) triflex_tune_chipset(drive, (XFER_PIO_0 + use_pio));
}
@@ -129,10 +129,10 @@ static void __devinit init_hwif_triflex(ide_hwif_t *hwif)
static ide_pci_device_t triflex_device __devinitdata = {
.name = "TRIFLEX",
.init_hwif = init_hwif_triflex,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
.bootable = ON_BOARD,
+ .pio_mask = ATA_PIO4,
};
static int __devinit triflex_init_one(struct pci_dev *dev,
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index cbb1b11119a5..dc4f4e298e00 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -327,7 +327,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
static ide_pci_device_t trm290_chipset __devinitdata = {
.name = "TRM290",
.init_hwif = init_hwif_trm290,
- .channels = 2,
.autodma = NOAUTODMA,
.bootable = ON_BOARD,
};
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index d21dd2e7eeb3..581316f9581d 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -1,6 +1,6 @@
/*
*
- * Version 3.45
+ * Version 3.46
*
* VIA IDE driver for Linux. Supported southbridges:
*
@@ -203,10 +203,8 @@ static int via_set_drive(ide_drive_t *drive, u8 speed)
static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
{
- if (pio == 255) {
- via_set_drive(drive, ide_find_best_pio_mode(drive));
- return;
- }
+ if (pio == 255)
+ pio = ide_get_best_pio_mode(drive, 255, 5);
via_set_drive(drive, XFER_PIO_0 + min_t(u8, pio, 5));
}
@@ -223,12 +221,14 @@ static int via82cxxx_ide_dma_check (ide_drive_t *drive)
{
u8 speed = ide_max_dma_mode(drive);
- if (speed == 0)
- speed = ide_find_best_pio_mode(drive);
+ if (speed == 0) {
+ via82cxxx_tune_drive(drive, 255);
+ return -1;
+ }
via_set_drive(drive, speed);
- if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
+ if (drive->autodma)
return 0;
return -1;
@@ -237,16 +237,14 @@ static int via82cxxx_ide_dma_check (ide_drive_t *drive)
static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
{
struct via_isa_bridge *via_config;
- u8 t;
for (via_config = via_isa_bridges; via_config->id; via_config++)
if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA +
!!(via_config->flags & VIA_BAD_ID),
via_config->id, NULL))) {
- pci_read_config_byte(*isa, PCI_REVISION_ID, &t);
- if (t >= via_config->rev_min &&
- t <= via_config->rev_max)
+ if ((*isa)->revision >= via_config->rev_min &&
+ (*isa)->revision <= via_config->rev_max)
break;
pci_dev_put(*isa);
}
@@ -404,10 +402,9 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
* Print the boot message.
*/
- pci_read_config_byte(isa, PCI_REVISION_ID, &t);
printk(KERN_INFO "VP_IDE: VIA %s (rev %02x) IDE %sDMA%s "
"controller on pci%s\n",
- via_config->name, t,
+ via_config->name, isa->revision,
via_config->udma_mask ? "U" : "MW",
via_dma[via_config->udma_mask ?
(fls(via_config->udma_mask) - 1) : 0],
@@ -501,18 +498,22 @@ static ide_pci_device_t via82cxxx_chipsets[] __devinitdata = {
.name = "VP_IDE",
.init_chipset = init_chipset_via82cxxx,
.init_hwif = init_hwif_via82cxxx,
- .channels = 2,
.autodma = NOAUTODMA,
.enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}},
- .bootable = ON_BOARD
+ .bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST
+ | IDE_HFLAG_PIO_NO_DOWNGRADE,
+ .pio_mask = ATA_PIO5,
},{ /* 1 */
.name = "VP_IDE",
.init_chipset = init_chipset_via82cxxx,
.init_hwif = init_hwif_via82cxxx,
- .channels = 2,
.autodma = AUTODMA,
.enablebits = {{0x00,0x00,0x00}, {0x00,0x00,0x00}},
.bootable = ON_BOARD,
+ .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST
+ | IDE_HFLAG_PIO_NO_DOWNGRADE,
+ .pio_mask = ATA_PIO5,
}
};
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c
index 82de2d781f2e..8859fe2f5ac2 100644
--- a/drivers/ide/ppc/mpc8xx.c
+++ b/drivers/ide/ppc/mpc8xx.c
@@ -316,6 +316,7 @@ m8xx_ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
}
/* register routine to tune PIO mode */
+ ide_hwifs[data_port].pio_mask = ATA_PIO4;
ide_hwifs[data_port].tuneproc = m8xx_ide_tuneproc;
hw->ack_intr = (ide_ack_intr_t *) ide_interrupt_ack;
@@ -402,6 +403,7 @@ void m8xx_ide_init_hwif_ports (hw_regs_t *hw,
}
/* register routine to tune PIO mode */
+ ide_hwifs[data_port].pio_mask = ATA_PIO4;
ide_hwifs[data_port].tuneproc = m8xx_ide_tuneproc;
hw->ack_intr = (ide_ack_intr_t *) ide_interrupt_ack;
@@ -431,13 +433,12 @@ void m8xx_ide_init_hwif_ports (hw_regs_t *hw,
static void
m8xx_ide_tuneproc(ide_drive_t *drive, u8 pio)
{
- ide_pio_data_t d;
#if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
volatile pcmconf8xx_t *pcmp;
ulong timing, mask, reg;
#endif
- pio = ide_get_best_pio_mode(drive, pio, 4, &d);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
#if 1
printk("%s[%d] %s: best PIO mode: %d\n",
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index e46f47206542..33630ad3e794 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -615,24 +615,25 @@ out:
static void
pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
{
- ide_pio_data_t d;
u32 *timings;
unsigned accessTicks, recTicks;
unsigned accessTime, recTime;
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
-
+ unsigned int cycle_time;
+
if (pmif == NULL)
return;
/* which drive is it ? */
timings = &pmif->timings[drive->select.b.unit & 0x01];
- pio = ide_get_best_pio_mode(drive, pio, 4, &d);
+ pio = ide_get_best_pio_mode(drive, pio, 4);
+ cycle_time = ide_pio_cycle_time(drive, pio);
switch (pmif->kind) {
case controller_sh_ata6: {
/* 133Mhz cell */
- u32 tr = kauai_lookup_timing(shasta_pio_timings, d.cycle_time);
+ u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
if (tr == 0)
return;
*timings = ((*timings) & ~TR_133_PIOREG_PIO_MASK) | tr;
@@ -641,7 +642,7 @@ pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
case controller_un_ata6:
case controller_k2_ata6: {
/* 100Mhz cell */
- u32 tr = kauai_lookup_timing(kauai_pio_timings, d.cycle_time);
+ u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
if (tr == 0)
return;
*timings = ((*timings) & ~TR_100_PIOREG_PIO_MASK) | tr;
@@ -649,7 +650,7 @@ pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
}
case controller_kl_ata4:
/* 66Mhz cell */
- recTime = d.cycle_time - ide_pio_timings[pio].active_time
+ recTime = cycle_time - ide_pio_timings[pio].active_time
- ide_pio_timings[pio].setup_time;
recTime = max(recTime, 150U);
accessTime = ide_pio_timings[pio].active_time;
@@ -665,7 +666,7 @@ pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
default: {
/* 33Mhz cell */
int ebit = 0;
- recTime = d.cycle_time - ide_pio_timings[pio].active_time
+ recTime = cycle_time - ide_pio_timings[pio].active_time
- ide_pio_timings[pio].setup_time;
recTime = max(recTime, 150U);
accessTime = ide_pio_timings[pio].active_time;
@@ -1247,6 +1248,7 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
hwif->drives[0].unmask = 1;
hwif->drives[1].unmask = 1;
+ hwif->pio_mask = ATA_PIO4;
hwif->tuneproc = pmac_ide_tuneproc;
if (pmif->kind == controller_un_ata6
|| pmif->kind == controller_k2_ata6
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index c88d33225cf9..30e596c0f120 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -5,12 +5,6 @@
*
* Copyright (c) 1995-1998 Mark Lord
* May be copied or modified under the terms of the GNU General Public License
- *
- * Recent Changes
- * Split the set up function into multiple functions
- * Use pci_set_master
- * Fix misreporting of I/O v MMIO problems
- * Initial fixups for simplex devices
*/
/*
@@ -407,7 +401,7 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev, ide_pci_device_t *d,
unsigned long ctl = 0, base = 0;
ide_hwif_t *hwif;
- if ((d->flags & IDEPCI_FLAG_ISA_PORTS) == 0) {
+ if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
/* Possibly we should fail if these checks report true */
ide_pci_check_iomem(dev, d, 2*port);
ide_pci_check_iomem(dev, d, 2*port+1);
@@ -571,7 +565,7 @@ out:
void ide_pci_setup_ports(struct pci_dev *dev, ide_pci_device_t *d, int pciirq, ata_index_t *index)
{
- int port;
+ int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
int at_least_one_hwif_enabled = 0;
ide_hwif_t *hwif, *mate = NULL;
u8 tmp;
@@ -582,16 +576,13 @@ void ide_pci_setup_ports(struct pci_dev *dev, ide_pci_device_t *d, int pciirq, a
* Set up the IDE ports
*/
- for (port = 0; port <= 1; ++port) {
+ for (port = 0; port < channels; ++port) {
ide_pci_enablebit_t *e = &(d->enablebits[port]);
if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
(tmp & e->mask) != e->val))
continue; /* port not enabled */
- if (d->channels <= port)
- break;
-
if ((hwif = ide_hwif_configure(dev, d, mate, port, pciirq)) == NULL)
continue;
@@ -616,6 +607,9 @@ void ide_pci_setup_ports(struct pci_dev *dev, ide_pci_device_t *d, int pciirq, a
else
ide_hwif_setup_dma(dev, d, hwif);
bypass_legacy_dma:
+ hwif->host_flags = d->host_flags;
+ hwif->pio_mask = d->pio_mask;
+
if (d->init_hwif)
/* Call chipset-specific routine
* for each enabled hwif
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 93362eed94ed..3a9d7e2d4de6 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -1729,7 +1729,7 @@ static int __init ether1394_init_module(void)
packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct packet_task),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!packet_task_cache)
return -ENOMEM;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 0fc8c6e559e4..ee45259573c8 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,6 +30,7 @@
#include <linux/moduleparam.h>
#include <linux/bitops.h>
#include <linux/kdev_t.h>
+#include <linux/freezer.h>
#include <linux/suspend.h>
#include <linux/kthread.h>
#include <linux/preempt.h>
@@ -1128,8 +1129,6 @@ static int hpsbpkt_thread(void *__hi)
struct list_head tmp;
int may_schedule;
- current->flags |= PF_NOFREEZE;
-
while (!kthread_should_stop()) {
INIT_LIST_HEAD(&tmp);
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index c4d3d4131f01..2ffd53461db6 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -283,7 +283,7 @@ static ssize_t fw_show_##class##_##td_kv (struct device *dev, struct device_attr
memcpy(buf, \
CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(class->td_kv), \
len); \
- while ((buf + len - 1) == '\0') \
+ while (buf[len - 1] == '\0') \
len--; \
buf[len++] = '\n'; \
buf[len] = '\0'; \
@@ -1699,6 +1699,7 @@ static int nodemgr_host_thread(void *__hi)
unsigned int g, generation = 0;
int i, reset_cycles = 0;
+ set_freezable();
/* Setup our device-model entries */
nodemgr_create_host_dev_files(host);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index e0c385a3b450..e882cb951b47 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1509,69 +1509,6 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
}
}
-static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
- struct sbp2_fwhost_info *hi,
- struct sbp2_command_info *cmd,
- struct scatterlist *sgpnt,
- u32 orb_direction,
- unsigned int scsi_request_bufflen,
- void *scsi_request_buffer,
- enum dma_data_direction dma_dir)
-{
- cmd->dma_dir = dma_dir;
- cmd->dma_size = scsi_request_bufflen;
- cmd->dma_type = CMD_DMA_SINGLE;
- cmd->cmd_dma = dma_map_single(hi->host->device.parent,
- scsi_request_buffer,
- cmd->dma_size, cmd->dma_dir);
- orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- orb->misc |= ORB_SET_DIRECTION(orb_direction);
-
- /* handle case where we get a command w/o s/g enabled
- * (but check for transfers larger than 64K) */
- if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
-
- orb->data_descriptor_lo = cmd->cmd_dma;
- orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
-
- } else {
- /* The buffer is too large. Turn this into page tables. */
-
- struct sbp2_unrestricted_page_table *sg_element =
- &cmd->scatter_gather_element[0];
- u32 sg_count, sg_len;
- dma_addr_t sg_addr;
-
- orb->data_descriptor_lo = cmd->sge_dma;
- orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
-
- /* fill out our SBP-2 page tables; split up the large buffer */
- sg_count = 0;
- sg_len = scsi_request_bufflen;
- sg_addr = cmd->cmd_dma;
- while (sg_len) {
- sg_element[sg_count].segment_base_lo = sg_addr;
- if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
- sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
- sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
- } else {
- sg_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
- sg_len = 0;
- }
- sg_count++;
- }
-
- orb->misc |= ORB_SET_DATA_SIZE(sg_count);
-
- sbp2util_cpu_to_be32_buffer(sg_element,
- (sizeof(struct sbp2_unrestricted_page_table)) *
- sg_count);
- }
-}
-
static void sbp2_create_command_orb(struct sbp2_lu *lu,
struct sbp2_command_info *cmd,
unchar *scsi_cmd,
@@ -1615,13 +1552,9 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
orb->data_descriptor_hi = 0x0;
orb->data_descriptor_lo = 0x0;
orb->misc |= ORB_SET_DIRECTION(1);
- } else if (scsi_use_sg)
+ } else
sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
orb_direction, dma_dir);
- else
- sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,
- scsi_request_bufflen,
- scsi_request_buffer, dma_dir);
sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
@@ -1710,15 +1643,15 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
- unsigned int request_bufflen = SCpnt->request_bufflen;
+ unsigned int request_bufflen = scsi_bufflen(SCpnt);
struct sbp2_command_info *cmd;
cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
if (!cmd)
return -EIO;
- sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg,
- request_bufflen, SCpnt->request_buffer,
+ sbp2_create_command_orb(lu, cmd, scsi_cmd, scsi_sg_count(SCpnt),
+ request_bufflen, scsi_sglist(SCpnt),
SCpnt->sc_data_direction);
sbp2_link_orb_command(lu, cmd);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 994decc7bcf2..a193dfbf99d2 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,14 +1,14 @@
-menu "InfiniBand support"
- depends on HAS_IOMEM
-
-config INFINIBAND
- depends on PCI || BROKEN
+menuconfig INFINIBAND
tristate "InfiniBand support"
+ depends on PCI || BROKEN
+ depends on HAS_IOMEM
---help---
Core support for InfiniBand (IB). Make sure to also select
any protocols you wish to use as well as drivers for your
InfiniBand hardware.
+if INFINIBAND
+
config INFINIBAND_USER_MAD
tristate "InfiniBand userspace MAD support"
depends on INFINIBAND
@@ -20,7 +20,6 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
- depends on INFINIBAND
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
@@ -37,7 +36,7 @@ config INFINIBAND_USER_MEM
config INFINIBAND_ADDR_TRANS
bool
- depends on INFINIBAND && INET
+ depends on INET
default y
source "drivers/infiniband/hw/mthca/Kconfig"
@@ -54,4 +53,4 @@ source "drivers/infiniband/ulp/srp/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
-endmenu
+endif # INFINIBAND
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a91001c59b69..c5c33d35f87d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -295,10 +295,9 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
struct addr_req *req;
int ret = 0;
- req = kmalloc(sizeof *req, GFP_KERNEL);
+ req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
- memset(req, 0, sizeof *req);
if (src_addr)
memcpy(&req->src_addr, src_addr, ip_addr_size(src_addr));
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index ecd1a3057c61..db2633e4aae6 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -3,7 +3,7 @@
* Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
- * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
+ * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -34,7 +34,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
*/
#include <linux/slab.h>
@@ -42,6 +41,7 @@
#include "agent.h"
#include "smi.h"
+#include "mad_priv.h"
#define SPFX "ib_agent: "
@@ -87,8 +87,13 @@ int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
struct ib_mad_send_buf *send_buf;
struct ib_ah *ah;
int ret;
+ struct ib_mad_send_wr_private *mad_send_wr;
+
+ if (device->node_type == RDMA_NODE_IB_SWITCH)
+ port_priv = ib_get_agent_port(device, 0);
+ else
+ port_priv = ib_get_agent_port(device, port_num);
- port_priv = ib_get_agent_port(device, port_num);
if (!port_priv) {
printk(KERN_ERR SPFX "Unable to find port agent\n");
return -ENODEV;
@@ -113,6 +118,14 @@ int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
memcpy(send_buf->mad, mad, sizeof *mad);
send_buf->ah = ah;
+
+ if (device->node_type == RDMA_NODE_IB_SWITCH) {
+ mad_send_wr = container_of(send_buf,
+ struct ib_mad_send_wr_private,
+ send_buf);
+ mad_send_wr->send_wr.wr.ud.port_num = port_num;
+ }
+
if ((ret = ib_post_send_mad(send_buf, NULL))) {
printk(KERN_ERR SPFX "ib_post_send_mad error:%d\n", ret);
goto err2;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 40c004a2697e..4df269f5d9ac 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -87,6 +87,7 @@ struct cm_port {
struct cm_device {
struct list_head list;
struct ib_device *device;
+ u8 ack_delay;
struct cm_port port[0];
};
@@ -95,7 +96,7 @@ struct cm_av {
union ib_gid dgid;
struct ib_ah_attr ah_attr;
u16 pkey_index;
- u8 packet_life_time;
+ u8 timeout;
};
struct cm_work {
@@ -154,6 +155,7 @@ struct cm_id_private {
u8 retry_count;
u8 rnr_retry_count;
u8 service_timeout;
+ u8 target_ack_delay;
struct list_head work_list;
atomic_t work_count;
@@ -293,7 +295,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
av->port = port;
ib_init_ah_from_path(cm_dev->device, port->port_num, path,
&av->ah_attr);
- av->packet_life_time = path->packet_life_time;
+ av->timeout = path->packet_life_time + 1;
return 0;
}
@@ -318,12 +320,10 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
static void cm_free_id(__be32 local_id)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cm.lock, flags);
+ spin_lock_irq(&cm.lock);
idr_remove(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
}
static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
@@ -345,11 +345,10 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
- unsigned long flags;
- spin_lock_irqsave(&cm.lock, flags);
+ spin_lock_irq(&cm.lock);
cm_id_priv = cm_get_id(local_id, remote_id);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
return cm_id_priv;
}
@@ -646,6 +645,25 @@ static inline int cm_convert_to_ms(int iba_time)
return 1 << max(iba_time - 8, 0);
}
+/*
+ * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
+ * Because of how ack_timeout is stored, adding one doubles the timeout.
+ * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
+ * increment it (round up) only if the other is within 50%.
+ */
+static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
+{
+ int ack_timeout = packet_life_time + 1;
+
+ if (ack_timeout >= ca_ack_delay)
+ ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
+ else
+ ack_timeout = ca_ack_delay +
+ (ack_timeout >= (ca_ack_delay - 1));
+
+ return min(31, ack_timeout);
+}
+
static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
{
if (timewait_info->inserted_remote_id) {
@@ -689,7 +707,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
* timewait before notifying the user that we've exited timewait.
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
- wait_time = cm_convert_to_ms(cm_id_priv->av.packet_life_time + 1);
+ wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
cm_id_priv->timewait_info = NULL;
@@ -713,31 +731,30 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
- unsigned long flags;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
retest:
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
switch (cm_id->state) {
case IB_CM_LISTEN:
cm_id->state = IB_CM_IDLE;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- spin_lock_irqsave(&cm.lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
+ spin_lock_irq(&cm.lock);
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_SIDR_REQ_RCVD:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
break;
case IB_CM_REQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
&cm_id_priv->id.device->node_guid,
sizeof cm_id_priv->id.device->node_guid,
@@ -747,9 +764,9 @@ retest:
if (err == -ENOMEM) {
/* Do not reject to allow future retries. */
cm_reset_to_idle(cm_id_priv);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
} else {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
}
@@ -762,25 +779,25 @@ retest:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_dreq(cm_id, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_DREQ_RCVD:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_drep(cm_id, NULL, 0);
break;
default:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
break;
}
@@ -912,7 +929,8 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_primary_sl(req_msg, param->primary_path->sl);
cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
cm_req_set_primary_local_ack_timeout(req_msg,
- min(31, param->primary_path->packet_life_time + 1));
+ cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
+ param->primary_path->packet_life_time));
if (param->alternate_path) {
req_msg->alt_local_lid = param->alternate_path->slid;
@@ -927,7 +945,8 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
cm_req_set_alt_local_ack_timeout(req_msg,
- min(31, param->alternate_path->packet_life_time + 1));
+ cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
+ param->alternate_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
@@ -1169,7 +1188,6 @@ static void cm_format_req_event(struct cm_work *work,
static void cm_process_work(struct cm_id_private *cm_id_priv,
struct cm_work *work)
{
- unsigned long flags;
int ret;
/* We will typically only have the current event to report. */
@@ -1177,9 +1195,9 @@ static void cm_process_work(struct cm_id_private *cm_id_priv,
cm_free_work(work);
while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
work = cm_dequeue_work(cm_id_priv);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
BUG_ON(!work);
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
&work->cm_event);
@@ -1250,7 +1268,6 @@ static void cm_dup_req_handler(struct cm_work *work,
struct cm_id_private *cm_id_priv)
{
struct ib_mad_send_buf *msg = NULL;
- unsigned long flags;
int ret;
/* Quick state check to discard duplicate REQs. */
@@ -1261,7 +1278,7 @@ static void cm_dup_req_handler(struct cm_work *work,
if (ret)
return;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
@@ -1276,14 +1293,14 @@ static void cm_dup_req_handler(struct cm_work *work,
default:
goto unlock;
}
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
return;
-unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
}
@@ -1293,17 +1310,16 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
- unsigned long flags;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
/* Check for possible duplicate REQ. */
- spin_lock_irqsave(&cm.lock, flags);
+ spin_lock_irq(&cm.lock);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
if (timewait_info) {
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
if (cur_cm_id_priv) {
cm_dup_req_handler(work, cur_cm_id_priv);
cm_deref_id(cur_cm_id_priv);
@@ -1315,7 +1331,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
@@ -1328,7 +1344,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
req_msg->private_data);
if (!listen_cm_id_priv) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
@@ -1338,7 +1354,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
atomic_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
out:
return listen_cm_id_priv;
}
@@ -1440,7 +1456,8 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
rep_msg->resp_resources = param->responder_resources;
rep_msg->initiator_depth = param->initiator_depth;
- cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
+ cm_rep_set_target_ack_delay(rep_msg,
+ cm_id_priv->av.port->cm_dev->ack_delay);
cm_rep_set_failover(rep_msg, param->failover_accepted);
cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
@@ -1591,7 +1608,6 @@ static void cm_dup_rep_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
struct ib_mad_send_buf *msg = NULL;
- unsigned long flags;
int ret;
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
@@ -1604,7 +1620,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
if (ret)
goto deref;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
cm_id_priv->private_data,
@@ -1616,14 +1632,14 @@ static void cm_dup_rep_handler(struct cm_work *work)
cm_id_priv->private_data_len);
else
goto unlock;
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
goto deref;
-unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
deref: cm_deref_id(cm_id_priv);
}
@@ -1632,7 +1648,6 @@ static int cm_rep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
- unsigned long flags;
int ret;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1644,13 +1659,13 @@ static int cm_rep_handler(struct cm_work *work)
cm_format_rep_event(work);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
break;
default:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto error;
}
@@ -1663,7 +1678,7 @@ static int cm_rep_handler(struct cm_work *work)
/* Check for duplicate REP. */
if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
spin_unlock(&cm.lock);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto error;
}
@@ -1673,7 +1688,7 @@ static int cm_rep_handler(struct cm_work *work)
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
spin_unlock(&cm.lock);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
@@ -1689,6 +1704,13 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv->responder_resources = rep_msg->initiator_depth;
cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
+ cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
+ cm_id_priv->av.timeout =
+ cm_ack_timeout(cm_id_priv->target_ack_delay,
+ cm_id_priv->av.timeout - 1);
+ cm_id_priv->alt_av.timeout =
+ cm_ack_timeout(cm_id_priv->target_ack_delay,
+ cm_id_priv->alt_av.timeout - 1);
/* todo: handle peer_to_peer */
@@ -1696,7 +1718,7 @@ static int cm_rep_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -1712,7 +1734,6 @@ error:
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
- unsigned long flags;
int ret;
/* See comment in cm_establish about lookup. */
@@ -1720,9 +1741,9 @@ static int cm_establish_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
@@ -1730,7 +1751,7 @@ static int cm_establish_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -1746,7 +1767,6 @@ static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
- unsigned long flags;
int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1757,10 +1777,10 @@ static int cm_rtu_handler(struct cm_work *work)
work->cm_event.private_data = &rtu_msg->private_data;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
@@ -1769,7 +1789,7 @@ static int cm_rtu_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -1932,7 +1952,6 @@ static int cm_dreq_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
- unsigned long flags;
int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1945,7 +1964,7 @@ static int cm_dreq_handler(struct cm_work *work)
work->cm_event.private_data = &dreq_msg->private_data;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
goto unlock;
@@ -1964,7 +1983,7 @@ static int cm_dreq_handler(struct cm_work *work)
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
@@ -1977,7 +1996,7 @@ static int cm_dreq_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -1985,7 +2004,7 @@ static int cm_dreq_handler(struct cm_work *work)
cm_deref_id(cm_id_priv);
return 0;
-unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+unlock: spin_unlock_irq(&cm_id_priv->lock);
deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
@@ -1994,7 +2013,6 @@ static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
- unsigned long flags;
int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2005,10 +2023,10 @@ static int cm_drep_handler(struct cm_work *work)
work->cm_event.private_data = &drep_msg->private_data;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_enter_timewait(cm_id_priv);
@@ -2017,7 +2035,7 @@ static int cm_drep_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -2107,17 +2125,16 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
- unsigned long flags;
__be32 remote_id;
remote_id = rej_msg->local_comm_id;
if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
- spin_lock_irqsave(&cm.lock, flags);
+ spin_lock_irq(&cm.lock);
timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
remote_id);
if (!timewait_info) {
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
return NULL;
}
cm_id_priv = idr_find(&cm.local_id_table, (__force int)
@@ -2129,7 +2146,7 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
else
cm_id_priv = NULL;
}
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
else
@@ -2142,7 +2159,6 @@ static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
- unsigned long flags;
int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2152,7 +2168,7 @@ static int cm_rej_handler(struct cm_work *work)
cm_format_rej_event(work);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
@@ -2176,7 +2192,7 @@ static int cm_rej_handler(struct cm_work *work)
cm_enter_timewait(cm_id_priv);
break;
default:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto out;
}
@@ -2184,7 +2200,7 @@ static int cm_rej_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -2295,7 +2311,6 @@ static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
- unsigned long flags;
int timeout, ret;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2307,9 +2322,9 @@ static int cm_mra_handler(struct cm_work *work)
work->cm_event.param.mra_rcvd.service_timeout =
cm_mra_get_service_timeout(mra_msg);
timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
- cm_convert_to_ms(cm_id_priv->av.packet_life_time);
+ cm_convert_to_ms(cm_id_priv->av.timeout);
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
@@ -2342,7 +2357,7 @@ static int cm_mra_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -2350,7 +2365,7 @@ static int cm_mra_handler(struct cm_work *work)
cm_deref_id(cm_id_priv);
return 0;
out:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_deref_id(cm_id_priv);
return -EINVAL;
}
@@ -2379,7 +2394,8 @@ static void cm_format_lap(struct cm_lap_msg *lap_msg,
cm_lap_set_sl(lap_msg, alternate_path->sl);
cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
cm_lap_set_local_ack_timeout(lap_msg,
- min(31, alternate_path->packet_life_time + 1));
+ cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
+ alternate_path->packet_life_time));
if (private_data && private_data_len)
memcpy(lap_msg->private_data, private_data, private_data_len);
@@ -2410,6 +2426,9 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
if (ret)
goto out;
+ cm_id_priv->alt_av.timeout =
+ cm_ack_timeout(cm_id_priv->target_ack_delay,
+ cm_id_priv->alt_av.timeout - 1);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
@@ -2465,7 +2484,6 @@ static int cm_lap_handler(struct cm_work *work)
struct cm_lap_msg *lap_msg;
struct ib_cm_lap_event_param *param;
struct ib_mad_send_buf *msg = NULL;
- unsigned long flags;
int ret;
/* todo: verify LAP request and send reject APR if invalid. */
@@ -2480,7 +2498,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
goto unlock;
@@ -2497,7 +2515,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
@@ -2515,7 +2533,7 @@ static int cm_lap_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -2523,7 +2541,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_deref_id(cm_id_priv);
return 0;
-unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+unlock: spin_unlock_irq(&cm_id_priv->lock);
deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
@@ -2598,7 +2616,6 @@ static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
- unsigned long flags;
int ret;
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2612,11 +2629,11 @@ static int cm_apr_handler(struct cm_work *work)
work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
work->cm_event.private_data = &apr_msg->private_data;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
(cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
@@ -2626,7 +2643,7 @@ static int cm_apr_handler(struct cm_work *work)
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
@@ -2761,7 +2778,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
- unsigned long flags;
cm_id = ib_create_cm_id(work->port->cm_dev->device, NULL, NULL);
if (IS_ERR(cm_id))
@@ -2778,27 +2794,26 @@ static int cm_sidr_req_handler(struct cm_work *work)
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
- cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
- spin_lock_irqsave(&cm.lock, flags);
+ spin_lock_irq(&cm.lock);
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
goto out; /* Duplicate message. */
}
+ cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
cur_cm_id_priv = cm_find_listen(cm_id->device,
sidr_req_msg->service_id,
sidr_req_msg->private_data);
if (!cur_cm_id_priv) {
- rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
- spin_unlock_irqrestore(&cm.lock, flags);
- /* todo: reply with no match */
+ spin_unlock_irq(&cm.lock);
+ cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
atomic_inc(&cur_cm_id_priv->refcount);
- spin_unlock_irqrestore(&cm.lock, flags);
+ spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = cur_cm_id_priv->id.context;
@@ -2899,7 +2914,6 @@ static int cm_sidr_rep_handler(struct cm_work *work)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct cm_id_private *cm_id_priv;
- unsigned long flags;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
@@ -2907,14 +2921,14 @@ static int cm_sidr_rep_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_format_sidr_rep_event(work);
cm_process_work(cm_id_priv, work);
@@ -2930,14 +2944,13 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
struct cm_id_private *cm_id_priv;
struct ib_cm_event cm_event;
enum ib_cm_state state;
- unsigned long flags;
int ret;
memset(&cm_event, 0, sizeof cm_event);
cm_id_priv = msg->context[0];
/* Discard old sends or ones without a response. */
- spin_lock_irqsave(&cm_id_priv->lock, flags);
+ spin_lock_irq(&cm_id_priv->lock);
state = (enum ib_cm_state) (unsigned long) msg->context[1];
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
goto discard;
@@ -2964,7 +2977,7 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
default:
goto discard;
}
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_event.param.send_status = wc_status;
/* No other events can occur on the cm_id at this point. */
@@ -2974,7 +2987,7 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
ib_destroy_cm_id(&cm_id_priv->id);
return;
discard:
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ spin_unlock_irq(&cm_id_priv->lock);
cm_free_msg(msg);
}
@@ -3269,8 +3282,7 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
- qp_attr->alt_timeout =
- cm_id_priv->alt_av.packet_life_time + 1;
+ qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
}
ret = 0;
@@ -3308,8 +3320,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
IB_QP_RNR_RETRY |
IB_QP_MAX_QP_RD_ATOMIC;
- qp_attr->timeout =
- cm_id_priv->av.packet_life_time + 1;
+ qp_attr->timeout = cm_id_priv->av.timeout;
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
qp_attr->max_rd_atomic =
@@ -3323,8 +3334,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
- qp_attr->alt_timeout =
- cm_id_priv->alt_av.packet_life_time + 1;
+ qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
qp_attr->path_mig_state = IB_MIG_REARM;
}
@@ -3364,6 +3374,16 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
+static void cm_get_ack_delay(struct cm_device *cm_dev)
+{
+ struct ib_device_attr attr;
+
+ if (ib_query_device(cm_dev->device, &attr))
+ cm_dev->ack_delay = 0; /* acks will rely on packet life time */
+ else
+ cm_dev->ack_delay = attr.local_ca_ack_delay;
+}
+
static void cm_add_one(struct ib_device *device)
{
struct cm_device *cm_dev;
@@ -3388,6 +3408,7 @@ static void cm_add_one(struct ib_device *device)
return;
cm_dev->device = device;
+ cm_get_ack_delay(cm_dev);
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= device->phys_port_cnt; i++) {
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 4d3aee90c249..aec9c7af825d 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -35,6 +35,7 @@
#define CM_MSGS_H
#include <rdma/ib_mad.h>
+#include <rdma/ib_cm.h>
/*
* Parameters to routines below should be in network-byte order, and values
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 32a0e66d2a23..9ffb9987450a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -573,7 +573,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
break;
case RDMA_TRANSPORT_IWARP:
if (!id_priv->cm_id.iw) {
- qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ qp_attr->qp_access_flags = 0;
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
@@ -2326,7 +2326,6 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.private_data_len = conn_param->private_data_len;
rep.responder_resources = conn_param->responder_resources;
rep.initiator_depth = conn_param->initiator_depth;
- rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
rep.failover_accepted = 0;
rep.flow_control = conn_param->flow_control;
rep.rnr_retry_count = conn_param->rnr_retry_count;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 85ccf13b8041..bc547f1d34ba 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -675,10 +675,16 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_port_private *port_priv;
struct ib_mad_agent_private *recv_mad_agent = NULL;
struct ib_device *device = mad_agent_priv->agent.device;
- u8 port_num = mad_agent_priv->agent.port_num;
+ u8 port_num;
struct ib_wc mad_wc;
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
+ if (device->node_type == RDMA_NODE_IB_SWITCH &&
+ smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ port_num = send_wr->wr.ud.port_num;
+ else
+ port_num = mad_agent_priv->agent.port_num;
+
/*
* Directed route handling starts if the initial LID routed part of
* a request or the ending LID routed part of a response is empty.
@@ -1839,6 +1845,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_mad_private *recv, *response;
struct ib_mad_list_head *mad_list;
struct ib_mad_agent_private *mad_agent;
+ int port_num;
response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
if (!response)
@@ -1872,25 +1879,50 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num))
goto out;
+ if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+ port_num = wc->port_num;
+ else
+ port_num = port_priv->port_num;
+
if (recv->mad.mad.mad_hdr.mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ enum smi_forward_action retsmi;
+
if (smi_handle_dr_smp_recv(&recv->mad.smp,
port_priv->device->node_type,
- port_priv->port_num,
+ port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
goto out;
- if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL)
+ retsmi = smi_check_forward_dr_smp(&recv->mad.smp);
+ if (retsmi == IB_SMI_LOCAL)
goto local;
- if (smi_handle_dr_smp_send(&recv->mad.smp,
- port_priv->device->node_type,
- port_priv->port_num) == IB_SMI_DISCARD)
- goto out;
+ if (retsmi == IB_SMI_SEND) { /* don't forward */
+ if (smi_handle_dr_smp_send(&recv->mad.smp,
+ port_priv->device->node_type,
+ port_num) == IB_SMI_DISCARD)
+ goto out;
+
+ if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
+ goto out;
+ } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+ /* forward case for switches */
+ memcpy(response, recv, sizeof(*response));
+ response->header.recv_wc.wc = &response->header.wc;
+ response->header.recv_wc.recv_buf.mad = &response->mad.mad;
+ response->header.recv_wc.recv_buf.grh = &response->grh;
+
+ if (!agent_send_response(&response->mad.mad,
+ &response->grh, wc,
+ port_priv->device,
+ smi_get_fwd_port(&recv->mad.smp),
+ qp_info->qp->qp_num))
+ response = NULL;
- if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
goto out;
+ }
}
local:
@@ -1919,7 +1951,7 @@ local:
agent_send_response(&response->mad.mad,
&recv->grh, wc,
port_priv->device,
- port_priv->port_num,
+ port_num,
qp_info->qp->qp_num);
goto out;
}
@@ -2966,7 +2998,6 @@ static int __init ib_mad_init_module(void)
sizeof(struct ib_mad_private),
0,
SLAB_HWCACHE_ALIGN,
- NULL,
NULL);
if (!ib_mad_cache) {
printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 1e13ab42b70b..15b4c4d3606d 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index 24c93fd320fb..b1d4bbf4ce5c 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 6469406ea9d8..20ab6b3e484d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
+ * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -56,6 +56,7 @@ MODULE_LICENSE("Dual BSD/GPL");
struct ib_sa_sm_ah {
struct ib_ah *ah;
struct kref ref;
+ u16 pkey_index;
u8 src_path_mask;
};
@@ -382,6 +383,13 @@ static void update_sm_ah(struct work_struct *work)
kref_init(&new_ah->ref);
new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
+ new_ah->pkey_index = 0;
+ if (ib_find_pkey(port->agent->device, port->port_num,
+ IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index) &&
+ ib_find_pkey(port->agent->device, port->port_num,
+ IB_DEFAULT_PKEY_PARTIAL, &new_ah->pkey_index))
+ printk(KERN_ERR "Couldn't find index for default PKey\n");
+
memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid;
ah_attr.sl = port_attr.sm_sl;
@@ -512,6 +520,35 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
}
EXPORT_SYMBOL(ib_init_ah_from_path);
+static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&query->port->ah_lock, flags);
+ kref_get(&query->port->sm_ah->ref);
+ query->sm_ah = query->port->sm_ah;
+ spin_unlock_irqrestore(&query->port->ah_lock, flags);
+
+ query->mad_buf = ib_create_send_mad(query->port->agent, 1,
+ query->sm_ah->pkey_index,
+ 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
+ gfp_mask);
+ if (!query->mad_buf) {
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ return -ENOMEM;
+ }
+
+ query->mad_buf->ah = query->sm_ah->ah;
+
+ return 0;
+}
+
+static void free_mad(struct ib_sa_query *query)
+{
+ ib_free_send_mad(query->mad_buf);
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+}
+
static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
{
unsigned long flags;
@@ -548,20 +585,11 @@ retry:
query->mad_buf->context[0] = query;
query->id = id;
- spin_lock_irqsave(&query->port->ah_lock, flags);
- kref_get(&query->port->sm_ah->ref);
- query->sm_ah = query->port->sm_ah;
- spin_unlock_irqrestore(&query->port->ah_lock, flags);
-
- query->mad_buf->ah = query->sm_ah->ah;
-
ret = ib_post_send_mad(query->mad_buf, NULL);
if (ret) {
spin_lock_irqsave(&idr_lock, flags);
idr_remove(&query_idr, id);
spin_unlock_irqrestore(&idr_lock, flags);
-
- kref_put(&query->sm_ah->ref, free_sm_ah);
}
/*
@@ -647,13 +675,10 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
+ query->sa_query.port = port;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
goto err1;
- }
ib_sa_client_get(client);
query->sa_query.client = client;
@@ -665,7 +690,6 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
query->sa_query.release = ib_sa_path_rec_release;
- query->sa_query.port = port;
mad->mad_hdr.method = IB_MGMT_METHOD_GET;
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
mad->sa_hdr.comp_mask = comp_mask;
@@ -683,7 +707,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
- ib_free_send_mad(query->sa_query.mad_buf);
+ free_mad(&query->sa_query);
err1:
kfree(query);
@@ -773,13 +797,10 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
+ query->sa_query.port = port;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
goto err1;
- }
ib_sa_client_get(client);
query->sa_query.client = client;
@@ -791,7 +812,6 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
query->sa_query.release = ib_sa_service_rec_release;
- query->sa_query.port = port;
mad->mad_hdr.method = method;
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
mad->sa_hdr.comp_mask = comp_mask;
@@ -810,7 +830,7 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
- ib_free_send_mad(query->sa_query.mad_buf);
+ free_mad(&query->sa_query);
err1:
kfree(query);
@@ -869,13 +889,10 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
if (!query)
return -ENOMEM;
- query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0,
- 0, IB_MGMT_SA_HDR,
- IB_MGMT_SA_DATA, gfp_mask);
- if (!query->sa_query.mad_buf) {
- ret = -ENOMEM;
+ query->sa_query.port = port;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
goto err1;
- }
ib_sa_client_get(client);
query->sa_query.client = client;
@@ -887,7 +904,6 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
query->sa_query.release = ib_sa_mcmember_rec_release;
- query->sa_query.port = port;
mad->mad_hdr.method = method;
mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
mad->sa_hdr.comp_mask = comp_mask;
@@ -906,7 +922,7 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
- ib_free_send_mad(query->sa_query.mad_buf);
+ free_mad(&query->sa_query);
err1:
kfree(query);
@@ -939,8 +955,7 @@ static void send_handler(struct ib_mad_agent *agent,
idr_remove(&query_idr, query->id);
spin_unlock_irqrestore(&idr_lock, flags);
- ib_free_send_mad(mad_send_wc->send_buf);
- kref_put(&query->sm_ah->ref, free_sm_ah);
+ free_mad(query);
ib_sa_client_put(query->client);
query->release(query);
}
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 2bca753eb622..87236753bce9 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -192,7 +192,7 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
}
/* smp->hop_ptr updated when sending */
return (node_type == RDMA_NODE_IB_SWITCH ?
- IB_SMI_HANDLE: IB_SMI_DISCARD);
+ IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -211,7 +211,7 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
if (!ib_get_smp_direction(smp)) {
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt)
- return IB_SMI_SEND;
+ return IB_SMI_FORWARD;
/* C14-9:3 -- at the end of the DR segment of path */
if (hop_ptr == hop_cnt)
@@ -224,7 +224,7 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
} else {
/* C14-13:2 -- intermediate hop */
if (2 <= hop_ptr && hop_ptr <= hop_cnt)
- return IB_SMI_SEND;
+ return IB_SMI_FORWARD;
/* C14-13:3 -- at the end of the DR segment of path */
if (hop_ptr == 1)
@@ -233,3 +233,13 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
}
return IB_SMI_LOCAL;
}
+
+/*
+ * Return the forwarding port number from initial_path for outgoing SMP and
+ * from return_path for returning SMP
+ */
+int smi_get_fwd_port(struct ib_smp *smp)
+{
+ return (!ib_get_smp_direction(smp) ? smp->initial_path[smp->hop_ptr+1] :
+ smp->return_path[smp->hop_ptr-1]);
+}
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index 9a4b349efc30..1cfc2984434f 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -48,10 +48,12 @@ enum smi_action {
enum smi_forward_action {
IB_SMI_LOCAL, /* SMP should be completed up the stack */
IB_SMI_SEND, /* received DR SMP should be forwarded to the send queue */
+ IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
};
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
int port_num, int phys_port_cnt);
+int smi_get_fwd_port(struct ib_smp *smp);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type, int port_num);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 08c299ebf4a8..70b77ae67422 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -311,7 +311,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
return sprintf(buf, "N/A (no PMA)\n");
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
@@ -479,7 +479,6 @@ alloc_group_attrs(ssize_t (*show)(struct ib_port *,
element->attr.attr.name = element->name;
element->attr.attr.mode = S_IRUGO;
- element->attr.attr.owner = THIS_MODULE;
element->attr.show = show;
element->index = i;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 2586a3ee8eba..424983f5b1ee 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -823,7 +823,6 @@ static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
param.private_data_len = cmd.len;
param.responder_resources = cmd.responder_resources;
param.initiator_depth = cmd.initiator_depth;
- param.target_ack_delay = cmd.target_ack_delay;
param.failover_accepted = cmd.failover_accepted;
param.flow_control = cmd.flow_control;
param.rnr_retry_count = cmd.rnr_retry_count;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index d40652a80151..26d0470eef6e 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -121,6 +121,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base = addr & PAGE_MASK;
+ ret = 0;
while (npages) {
ret = get_user_pages(current, current->mm, cur_base,
min_t(int, npages,
diff --git a/drivers/infiniband/hw/amso1100/Kconfig b/drivers/infiniband/hw/amso1100/Kconfig
index 809cb14ac6de..e6ce5f209e47 100644
--- a/drivers/infiniband/hw/amso1100/Kconfig
+++ b/drivers/infiniband/hw/amso1100/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_AMSO1100
tristate "Ammasso 1100 HCA support"
- depends on PCI && INET && INFINIBAND
+ depends on PCI && INET
---help---
This is a low-level driver for the Ammasso 1100 host
channel adapter (HCA).
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 36620a22413c..cfdacb1ec279 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -85,7 +85,7 @@ int vq_init(struct c2_dev *c2dev)
(char) ('0' + c2dev->devnum));
c2dev->host_msg_cache =
kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (c2dev->host_msg_cache == NULL) {
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
index 77977f55dca3..2acec3fadf69 100644
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ b/drivers/infiniband/hw/cxgb3/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3 && INFINIBAND && INET
+ depends on CHELSIO_T3 && INET
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 76049afc7655..1518b41482ae 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -144,7 +144,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
}
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 1, qpid, 7);
+ build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 0, qpid, 7);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
sge_cmd = qpid << 8 | 3;
wqe->sge_cmd = cpu_to_be64(sge_cmd);
@@ -548,7 +548,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 1,
+ build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
T3_CTL_QP_TID, 7);
wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
@@ -833,7 +833,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
wqe->ird = cpu_to_be32(attr->ird);
wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
- wqe->rsvd = 0;
+ wqe->irs = cpu_to_be32(attr->irs);
skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index ff7290eacefb..c84d4ac49355 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -294,6 +294,7 @@ struct t3_rdma_init_attr {
u64 qp_dma_addr;
u32 qp_dma_size;
u32 flags;
+ u32 irs;
};
struct t3_rdma_init_wr {
@@ -314,7 +315,7 @@ struct t3_rdma_init_wr {
__be32 ird;
__be64 qp_dma_addr; /* 7 */
__be32 qp_dma_size; /* 8 */
- u32 rsvd;
+ u32 irs;
};
struct t3_genbit {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index b2faff5abce8..9574088f0d4e 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -229,9 +229,8 @@ static void *alloc_ep(int size, gfp_t gfp)
{
struct iwch_ep_common *epc;
- epc = kmalloc(size, gfp);
+ epc = kzalloc(size, gfp);
if (epc) {
- memset(epc, 0, size);
kref_init(&epc->kref);
spin_lock_init(&epc->lock);
init_waitqueue_head(&epc->waitq);
@@ -254,8 +253,6 @@ static void release_ep_resources(struct iwch_ep *ep)
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
dst_release(ep->dst);
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
- if (ep->com.tdev->type == T3B)
- release_tid(ep->com.tdev, ep->hwtid, NULL);
put_ep(&ep->com);
}
@@ -515,7 +512,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
req->len = htonl(len);
req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
+ req->flags = htonl(F_TX_INIT);
req->sndseq = htonl(ep->snd_seq);
BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
@@ -566,7 +563,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
req->len = htonl(mpalen);
req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT);
+ req->flags = htonl(F_TX_INIT);
req->sndseq = htonl(ep->snd_seq);
BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
@@ -618,7 +615,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
req->len = htonl(len);
req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT);
+ req->flags = htonl(F_TX_INIT);
req->sndseq = htonl(ep->snd_seq);
ep->mpa_skb = skb;
state_set(&ep->com, MPA_REP_SENT);
@@ -641,6 +638,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
ep->snd_seq = ntohl(req->snd_isn);
+ ep->rcv_seq = ntohl(req->rcv_isn);
set_emss(ep, ntohs(req->tcp_opt));
@@ -1023,6 +1021,9 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
+ ep->rcv_seq += dlen;
+ BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
+
switch (state_read(&ep->com)) {
case MPA_REQ_SENT:
process_mpa_reply(ep, skb);
@@ -1060,7 +1061,6 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *ep = ctx;
struct cpl_wr_ack *hdr = cplhdr(skb);
unsigned int credits = ntohs(hdr->credits);
- enum iwch_qp_attr_mask mask;
PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
@@ -1072,30 +1072,6 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
ep->mpa_skb = NULL;
dst_confirm(ep->dst);
if (state_read(&ep->com) == MPA_REP_SENT) {
- struct iwch_qp_attributes attrs;
-
- /* bind QP to EP and move to RTS */
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ord;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- /* bind QP and TID with INIT_WR */
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_MAX_ORD;
-
- ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
-
- if (!ep->com.rpl_err) {
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- }
-
ep->com.rpl_done = 1;
PDBG("waking up ep %p\n", ep);
wake_up(&ep->com.waitq);
@@ -1124,6 +1100,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
+/*
+ * Return whether a failed active open has allocated a TID
+ */
+static inline int act_open_has_tid(int status)
+{
+ return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
+ status != CPL_ERR_ARP_MISS;
+}
+
static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
{
struct iwch_ep *ep = ctx;
@@ -1133,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
status2errno(rpl->status));
connect_reply_upcall(ep, status2errno(rpl->status));
state_set(&ep->com, DEAD);
- if (ep->com.tdev->type == T3B)
+ if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status))
release_tid(ep->com.tdev, GET_TID(rpl), NULL);
cxgb3_free_atid(ep->com.tdev, ep->atid);
dst_release(ep->dst);
@@ -1378,6 +1363,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
PDBG("%s ep %p\n", __FUNCTION__, ep);
ep->snd_seq = ntohl(req->snd_isn);
+ ep->rcv_seq = ntohl(req->rcv_isn);
set_emss(ep, ntohs(req->tcp_opt));
@@ -1485,6 +1471,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
int ret;
int state;
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
+ ep->hwtid);
+ t3_l2t_send_event(ep->com.tdev, ep->l2t);
+ return CPL_RET_BUF_DONE;
+ }
+
/*
* We get 2 peer aborts from the HW. The first one must
* be ignored except for scribbling that we need one more.
@@ -1494,13 +1487,6 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
- if (is_neg_adv_abort(req->status)) {
- PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
- ep->hwtid);
- t3_l2t_send_event(ep->com.tdev, ep->l2t);
- return CPL_RET_BUF_DONE;
- }
-
state = state_read(&ep->com);
PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
switch (state) {
@@ -1732,10 +1718,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
- put_ep(&ep->com);
+ if (state_read(&ep->com) == DEAD)
return -ECONNRESET;
- }
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
BUG_ON(!qp);
@@ -1755,17 +1739,8 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
+
get_ep(&ep->com);
- err = send_mpa_reply(ep, conn_param->private_data,
- conn_param->private_data_len);
- if (err) {
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
- abort_connection(ep, NULL, GFP_KERNEL);
- put_ep(&ep->com);
- return err;
- }
/* bind QP to EP and move to RTS */
attrs.mpa_attr = ep->mpa_attr;
@@ -1783,16 +1758,28 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = iwch_modify_qp(ep->com.qp->rhp,
ep->com.qp, mask, &attrs, 1);
+ if (err)
+ goto err;
- if (err) {
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
- abort_connection(ep, NULL, GFP_KERNEL);
- } else {
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- }
+ err = send_mpa_reply(ep, conn_param->private_data,
+ conn_param->private_data_len);
+ if (err)
+ goto err;
+
+ /* wait for wr_ack */
+ wait_event(ep->com.waitq, ep->com.rpl_done);
+ err = ep->com.rpl_err;
+ if (err)
+ goto err;
+
+ state_set(&ep->com, FPDU_MODE);
+ established_upcall(ep);
+ put_ep(&ep->com);
+ return 0;
+err:
+ ep->com.cm_id = NULL;
+ ep->com.qp = NULL;
+ cm_id->rem_ref(cm_id);
put_ep(&ep->com);
return err;
}
@@ -1926,6 +1913,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
fail3:
cxgb3_free_stid(ep->com.tdev, ep->stid);
fail2:
+ cm_id->rem_ref(cm_id);
put_ep(&ep->com);
fail1:
out:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 21a388c313cf..6107e7cd9b57 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -175,6 +175,7 @@ struct iwch_ep {
unsigned int atid;
u32 hwtid;
u32 snd_seq;
+ u32 rcv_seq;
struct l2t_entry *l2t;
struct dst_entry *dst;
struct sk_buff *mpa_skb;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e7c2c3948037..f0c777589374 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1163,9 +1163,10 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.post_recv = iwch_post_receive;
- dev->ibdev.iwcm =
- (struct iw_cm_verbs *) kmalloc(sizeof(struct iw_cm_verbs),
- GFP_KERNEL);
+ dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
+ if (!dev->ibdev.iwcm)
+ return -ENOMEM;
+
dev->ibdev.iwcm->connect = iwch_connect;
dev->ibdev.iwcm->accept = iwch_accept_cr;
dev->ibdev.iwcm->reject = iwch_reject_cr;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 714dddbc9a98..dd89b6b91f9c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -628,9 +628,9 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
/* immediate data starts here. */
term = (struct terminate_message *)wqe->send.sgl;
build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
- build_fw_riwrh((void *)wqe, T3_WR_SEND,
- T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1,
- qhp->ep->hwtid, 5);
+ wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
+ V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
+ wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
skb->priority = CPL_PRIORITY_DATA;
return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
}
@@ -732,6 +732,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
init_attr.qp_dma_addr = qhp->wq.dma_addr;
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
+ init_attr.irs = qhp->ep->rcv_seq;
PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
"flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
init_attr.rq_addr, init_attr.rq_size,
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig
index 1a854598e0e6..59f807d8d58e 100644
--- a/drivers/infiniband/hw/ehca/Kconfig
+++ b/drivers/infiniband/hw/ehca/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_EHCA
tristate "eHCA support"
- depends on IBMEBUS && INFINIBAND
+ depends on IBMEBUS
---help---
This driver supports the IBM pSeries eHCA InfiniBand adapter.
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 0d6e2c4bb245..97d108634c58 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -79,7 +79,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
av->av.ipd = (ah_mult > 0) ?
((ehca_mult - 1) / ah_mult) : 0;
} else
- av->av.ipd = ehca_static_rate;
+ av->av.ipd = ehca_static_rate;
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
@@ -118,7 +118,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
}
memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
}
- av->av.pmtu = EHCA_MAX_MTU;
+ av->av.pmtu = shca->max_mtu;
/* dgid comes in grh.word_3 */
memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
@@ -137,6 +137,8 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
struct ehca_av *av;
struct ehca_ud_av new_ehca_av;
struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
+ ib_device);
u32 cur_pid = current->tgid;
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
@@ -192,7 +194,7 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
}
- new_ehca_av.pmtu = EHCA_MAX_MTU;
+ new_ehca_av.pmtu = shca->max_mtu;
memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
@@ -257,7 +259,7 @@ int ehca_init_av_cache(void)
av_cache = kmem_cache_create("ehca_cache_av",
sizeof(struct ehca_av), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!av_cache)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1d286d3cc2d5..3725aa8664d9 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -5,6 +5,7 @@
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
+ * Joachim Fenkes <fenkes@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
@@ -42,7 +43,6 @@
#ifndef __EHCA_CLASSES_H__
#define __EHCA_CLASSES_H__
-
struct ehca_module;
struct ehca_qp;
struct ehca_cq;
@@ -86,13 +86,24 @@ struct ehca_eq {
struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
};
+struct ehca_sma_attr {
+ u16 lid, lmc, sm_sl, sm_lid;
+ u16 pkey_tbl_len, pkeys[16];
+};
+
struct ehca_sport {
struct ib_cq *ibcq_aqp1;
struct ib_qp *ibqp_aqp1;
enum ib_rate rate;
enum ib_port_state port_state;
+ struct ehca_sma_attr saved_attr;
};
+#define HCA_CAP_MR_PGSIZE_4K 1
+#define HCA_CAP_MR_PGSIZE_64K 2
+#define HCA_CAP_MR_PGSIZE_1M 4
+#define HCA_CAP_MR_PGSIZE_16M 8
+
struct ehca_shca {
struct ib_device ib_device;
struct ibmebus_dev *ibmebus_dev;
@@ -107,17 +118,36 @@ struct ehca_shca {
struct ehca_pd *pd;
struct h_galpas galpas;
struct mutex modify_mutex;
+ u64 hca_cap;
+ /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
+ u32 hca_cap_mr_pgsize;
+ int max_mtu;
};
struct ehca_pd {
struct ib_pd ib_pd;
struct ipz_pd fw_pd;
u32 ownpid;
+ /* small queue mgmt */
+ struct mutex lock;
+ struct list_head free[2];
+ struct list_head full[2];
+};
+
+enum ehca_ext_qp_type {
+ EQPT_NORMAL = 0,
+ EQPT_LLQP = 1,
+ EQPT_SRQBASE = 2,
+ EQPT_SRQ = 3,
};
struct ehca_qp {
- struct ib_qp ib_qp;
+ union {
+ struct ib_qp ib_qp;
+ struct ib_srq ib_srq;
+ };
u32 qp_type;
+ enum ehca_ext_qp_type ext_type;
struct ipz_queue ipz_squeue;
struct ipz_queue ipz_rqueue;
struct h_galpas galpas;
@@ -140,6 +170,10 @@ struct ehca_qp {
u32 mm_count_galpa;
};
+#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
+#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
+#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
+
/* must be power of 2 */
#define QP_HASHTAB_LEN 8
@@ -156,8 +190,8 @@ struct ehca_cq {
spinlock_t cb_lock;
struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
struct list_head entry;
- u32 nr_callbacks; /* #events assigned to cpu by scaling code */
- u32 nr_events; /* #events seen */
+ u32 nr_callbacks; /* #events assigned to cpu by scaling code */
+ atomic_t nr_events; /* #events seen */
wait_queue_head_t wait_completion;
spinlock_t task_lock;
u32 ownpid;
@@ -180,11 +214,12 @@ struct ehca_mr {
spinlock_t mrlock;
enum ehca_mr_flag flags;
- u32 num_pages; /* number of MR pages */
- u32 num_4k; /* number of 4k "page" portions to form MR */
+ u32 num_kpages; /* number of kernel pages */
+ u32 num_hwpages; /* number of hw pages to form MR */
+ u64 hwpage_size; /* hw page size used for this MR */
int acl; /* ACL (stored here for usage in reregister) */
u64 *start; /* virtual start address (stored here for */
- /* usage in reregister) */
+ /* usage in reregister) */
u64 size; /* size (stored here for usage in reregister) */
u32 fmr_page_size; /* page size for FMR */
u32 fmr_max_pages; /* max pages for FMR */
@@ -193,9 +228,6 @@ struct ehca_mr {
/* fw specific data */
struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
struct h_galpas galpas;
- /* data for userspace bridge */
- u32 nr_of_pages;
- void *pagearray;
};
struct ehca_mw {
@@ -217,26 +249,30 @@ enum ehca_mr_pgi_type {
struct ehca_mr_pginfo {
enum ehca_mr_pgi_type type;
- u64 num_pages;
- u64 page_cnt;
- u64 num_4k; /* number of 4k "page" portions */
- u64 page_4k_cnt; /* counter for 4k "page" portions */
- u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */
-
- /* type EHCA_MR_PGI_PHYS section */
- int num_phys_buf;
- struct ib_phys_buf *phys_buf_array;
- u64 next_buf;
-
- /* type EHCA_MR_PGI_USER section */
- struct ib_umem *region;
- struct ib_umem_chunk *next_chunk;
- u64 next_nmap;
-
- /* type EHCA_MR_PGI_FMR section */
- u64 *page_list;
- u64 next_listelem;
- /* next_4k also used within EHCA_MR_PGI_FMR */
+ u64 num_kpages;
+ u64 kpage_cnt;
+ u64 hwpage_size; /* hw page size used for this MR */
+ u64 num_hwpages; /* number of hw pages */
+ u64 hwpage_cnt; /* counter for hw pages */
+ u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
+
+ union {
+ struct { /* type EHCA_MR_PGI_PHYS section */
+ int num_phys_buf;
+ struct ib_phys_buf *phys_buf_array;
+ u64 next_buf;
+ } phy;
+ struct { /* type EHCA_MR_PGI_USER section */
+ struct ib_umem *region;
+ struct ib_umem_chunk *next_chunk;
+ u64 next_nmap;
+ } usr;
+ struct { /* type EHCA_MR_PGI_FMR section */
+ u64 fmr_pgsize;
+ u64 *page_list;
+ u64 next_listelem;
+ } fmr;
+ } u;
};
/* output parameters for MR/FMR hipz calls */
@@ -274,10 +310,11 @@ int ehca_init_av_cache(void);
void ehca_cleanup_av_cache(void);
int ehca_init_mrmw_cache(void);
void ehca_cleanup_mrmw_cache(void);
+int ehca_init_small_qp_cache(void);
+void ehca_cleanup_small_qp_cache(void);
-extern spinlock_t ehca_qp_idr_lock;
-extern spinlock_t ehca_cq_idr_lock;
-extern spinlock_t hcall_lock;
+extern rwlock_t ehca_qp_idr_lock;
+extern rwlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
@@ -292,7 +329,7 @@ struct ipzu_queue_resp {
u32 queue_length; /* queue length allocated in bytes */
u32 pagesize;
u32 toggle_state;
- u32 dummy; /* padding for 8 byte alignment */
+ u32 offset; /* save offset within a page for small_qp */
};
struct ehca_create_cq_resp {
@@ -305,6 +342,7 @@ struct ehca_create_qp_resp {
u32 qp_num;
u32 token;
u32 qp_type;
+ u32 ext_type;
u32 qkey;
/* qp_num assigned by ehca: sqp0/1 may have got different numbers */
u32 real_qp_num;
@@ -320,28 +358,59 @@ struct ehca_alloc_cq_parms {
struct ipz_eq_handle eq_handle;
};
+enum ehca_service_type {
+ ST_RC = 0,
+ ST_UC = 1,
+ ST_RD = 2,
+ ST_UD = 3,
+};
+
+enum ehca_ll_comp_flags {
+ LLQP_SEND_COMP = 0x20,
+ LLQP_RECV_COMP = 0x40,
+ LLQP_COMP_MASK = 0x60,
+};
+
+struct ehca_alloc_queue_parms {
+ /* input parameters */
+ int max_wr;
+ int max_sge;
+ int page_size;
+ int is_small;
+
+ /* output parameters */
+ u16 act_nr_wqes;
+ u8 act_nr_sges;
+ u32 queue_size; /* bytes for small queues, pages otherwise */
+};
+
struct ehca_alloc_qp_parms {
- int servicetype;
+ struct ehca_alloc_queue_parms squeue;
+ struct ehca_alloc_queue_parms rqueue;
+
+ /* input parameters */
+ enum ehca_service_type servicetype;
+ int qp_storage;
int sigtype;
- int daqp_ctrl;
- int max_send_sge;
- int max_recv_sge;
+ enum ehca_ext_qp_type ext_type;
+ enum ehca_ll_comp_flags ll_comp_flags;
int ud_av_l_key_ctl;
- u16 act_nr_send_wqes;
- u16 act_nr_recv_wqes;
- u8 act_nr_recv_sges;
- u8 act_nr_send_sges;
+ u32 token;
+ struct ipz_eq_handle eq_handle;
+ struct ipz_pd pd;
+ struct ipz_cq_handle send_cq_handle, recv_cq_handle;
- u32 nr_rq_pages;
- u32 nr_sq_pages;
+ u32 srq_qpn, srq_token, srq_limit;
- struct ipz_eq_handle ipz_eq_handle;
- struct ipz_pd pd;
+ /* output parameters */
+ u32 real_qp_num;
+ struct ipz_qp_handle qp_handle;
+ struct h_galpas galpas;
};
int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
+struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index 5665f213b81a..1798e6466bd0 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -154,83 +154,83 @@ struct hcp_modify_qp_control_block {
u32 reserved_70_127[58]; /* 70 */
};
-#define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0)
-#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2)
-#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3)
-#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4)
-#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6)
-#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8)
-#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9)
-#define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13)
-#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15)
-#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16)
-#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17)
-#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18)
-#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19)
-#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20)
-#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21)
-#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22)
-#define MQPCB_DLID EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23)
-#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24)
-#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31)
-#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25)
-#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26)
-#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27)
-#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28)
-#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31)
-#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31)
-#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32)
-#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31)
-#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33)
-#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34)
-#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35)
-#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36)
-#define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37)
-#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38)
-#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39)
-#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40)
-#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41)
-#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31)
-#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42)
-#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31)
-#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45)
-#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46)
-#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47)
-#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31)
-#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31)
-#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48)
-#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31)
-#define MQPCB_MASK_CURR_SQR_LIMIT EHCA_BMASK_IBM(49,49)
-#define MQPCB_CURR_SQR_LIMIT EHCA_BMASK_IBM(15,31)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50)
-#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51)
+#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
+#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
+#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
+#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
+#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
+#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
+#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
+#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
+#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
+#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
+#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
+#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
+#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
+#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
+#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
+#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
+#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
+#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
+#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
+#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
+#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
+#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
+#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
+#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
+#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
+#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
+#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
+#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
+#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
+#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
+#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
+#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
+#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
+#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
+#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
+#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
+#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
+#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
+#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
+#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
+#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
+#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
+#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
+#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
+#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
+#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
+#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
+#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
+#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
+#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
+#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
+#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
+#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
+#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
+#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
+#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
+#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
+#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
+#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
+#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 67f0670fe3b1..81aff36101ba 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -56,11 +56,11 @@ int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
{
unsigned int qp_num = qp->real_qp_num;
unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
- unsigned long spl_flags;
+ unsigned long flags;
- spin_lock_irqsave(&cq->spinlock, spl_flags);
+ spin_lock_irqsave(&cq->spinlock, flags);
hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
- spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+ spin_unlock_irqrestore(&cq->spinlock, flags);
ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
cq->cq_number, qp_num);
@@ -74,9 +74,9 @@ int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
struct hlist_node *iter;
struct ehca_qp *qp;
- unsigned long spl_flags;
+ unsigned long flags;
- spin_lock_irqsave(&cq->spinlock, spl_flags);
+ spin_lock_irqsave(&cq->spinlock, flags);
hlist_for_each(iter, &cq->qp_hashtab[key]) {
qp = hlist_entry(iter, struct ehca_qp, list_entries);
if (qp->real_qp_num == real_qp_num) {
@@ -88,7 +88,7 @@ int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
break;
}
}
- spin_unlock_irqrestore(&cq->spinlock, spl_flags);
+ spin_unlock_irqrestore(&cq->spinlock, flags);
if (ret)
ehca_err(cq->ib_cq.device,
"qp not found cq_num=%x real_qp_num=%x",
@@ -97,7 +97,7 @@ int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
return ret;
}
-struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
+struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
{
struct ehca_qp *ret = NULL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
@@ -146,6 +146,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
spin_lock_init(&my_cq->spinlock);
spin_lock_init(&my_cq->cb_lock);
spin_lock_init(&my_cq->task_lock);
+ atomic_set(&my_cq->nr_events, 0);
init_waitqueue_head(&my_cq->wait_completion);
my_cq->ownpid = current->tgid;
@@ -162,9 +163,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
goto create_cq_exit1;
}
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
} while (ret == -EAGAIN);
@@ -189,8 +190,8 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
goto create_cq_exit2;
}
- ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
- EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
+ ipz_rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
+ EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
if (!ipz_rc) {
ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
ipz_rc, device);
@@ -284,7 +285,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
return cq;
create_cq_exit4:
- ipz_queue_dtor(&my_cq->ipz_queue);
+ ipz_queue_dtor(NULL, &my_cq->ipz_queue);
create_cq_exit3:
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
@@ -293,9 +294,9 @@ create_cq_exit3:
"cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
create_cq_exit2:
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
idr_remove(&ehca_cq_idr, my_cq->token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
create_cq_exit1:
kmem_cache_free(cq_cache, my_cq);
@@ -303,16 +304,6 @@ create_cq_exit1:
return cq;
}
-static int get_cq_nr_events(struct ehca_cq *my_cq)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- ret = my_cq->nr_events;
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- return ret;
-}
-
int ehca_destroy_cq(struct ib_cq *cq)
{
u64 h_ret;
@@ -339,17 +330,18 @@ int ehca_destroy_cq(struct ib_cq *cq)
}
}
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- while (my_cq->nr_events) {
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- wait_event(my_cq->wait_completion, !get_cq_nr_events(my_cq));
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- /* recheck nr_events to assure no cqe has just arrived */
- }
-
+ /*
+ * remove the CQ from the idr first to make sure
+ * no more interrupt tasklets will touch this CQ
+ */
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
idr_remove(&ehca_cq_idr, my_cq->token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+ /* now wait until all pending events have completed */
+ wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
+ /* nobody's using our CQ any longer -- we can destroy it */
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
if (h_ret == H_R_STATE) {
/* cq in err: read err data and destroy it forcibly */
@@ -367,7 +359,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
"ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
return ehca2ib_return_code(h_ret);
}
- ipz_queue_dtor(&my_cq->ipz_queue);
+ ipz_queue_dtor(NULL, &my_cq->ipz_queue);
kmem_cache_free(cq_cache, my_cq);
return 0;
@@ -395,7 +387,7 @@ int ehca_init_cq_cache(void)
cq_cache = kmem_cache_create("ehca_cache_cq",
sizeof(struct ehca_cq), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!cq_cache)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 4961eb88827c..1d41faa7a337 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -86,8 +86,8 @@ int ehca_create_eq(struct ehca_shca *shca,
return -EINVAL;
}
- ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages,
- EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0);
+ ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
+ EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
if (!ret) {
ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
goto create_eq_exit1;
@@ -96,7 +96,8 @@ int ehca_create_eq(struct ehca_shca *shca,
for (i = 0; i < nr_pages; i++) {
u64 rpage;
- if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
+ vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
+ if (!vpage) {
ret = H_RESOURCE;
goto create_eq_exit2;
}
@@ -144,7 +145,7 @@ int ehca_create_eq(struct ehca_shca *shca,
return 0;
create_eq_exit2:
- ipz_queue_dtor(&eq->ipz_queue);
+ ipz_queue_dtor(NULL, &eq->ipz_queue);
create_eq_exit1:
hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
@@ -180,7 +181,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
ehca_err(&shca->ib_device, "Can't free EQ resources.");
return -EINVAL;
}
- ipz_queue_dtor(&eq->ipz_queue);
+ ipz_queue_dtor(NULL, &eq->ipz_queue);
return 0;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 32b55a4f0e5b..fc19ef9fd963 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -45,11 +45,25 @@
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
{
- int ret = 0;
+ int i, ret = 0;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_hca *rblock;
+ static const u32 cap_mapping[] = {
+ IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
+ IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
+ IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
+ IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
+ IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
+ IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
+ IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
+ IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
+ IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
+ IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
+ IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
+ };
+
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
@@ -96,6 +110,13 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
props->max_total_mcast_qp_attach
= min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
+ /* translate device capabilities */
+ props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
+ IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
+ for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
+ if (rblock->hca_cap_indicators & cap_mapping[i + 1])
+ props->device_cap_flags |= cap_mapping[i];
+
query_device1:
ehca_free_fw_ctrlblock(rblock);
@@ -106,6 +127,7 @@ int ehca_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
int ret = 0;
+ u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
@@ -116,7 +138,8 @@ int ehca_query_port(struct ib_device *ibdev,
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_port1;
@@ -172,12 +195,50 @@ query_port1:
return ret;
}
+int ehca_query_sma_attr(struct ehca_shca *shca,
+ u8 port, struct ehca_sma_attr *attr)
+{
+ int ret = 0;
+ u64 h_ret;
+ struct hipz_query_port *rblock;
+
+ rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
+ if (!rblock) {
+ ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
+ return -ENOMEM;
+ }
+
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "Can't query port properties");
+ ret = -EINVAL;
+ goto query_sma_attr1;
+ }
+
+ memset(attr, 0, sizeof(struct ehca_sma_attr));
+
+ attr->lid = rblock->lid;
+ attr->lmc = rblock->lmc;
+ attr->sm_sl = rblock->sm_sl;
+ attr->sm_lid = rblock->sm_lid;
+
+ attr->pkey_tbl_len = rblock->pkey_tbl_len;
+ memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
+
+query_sma_attr1:
+ ehca_free_fw_ctrlblock(rblock);
+
+ return ret;
+}
+
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
int ret = 0;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+ u64 h_ret;
+ struct ehca_shca *shca;
struct hipz_query_port *rblock;
+ shca = container_of(ibdev, struct ehca_shca, ib_device);
if (index > 16) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
@@ -189,7 +250,8 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_pkey1;
@@ -207,6 +269,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
int ret = 0;
+ u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
@@ -222,7 +285,8 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
return -ENOMEM;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_gid1;
@@ -247,11 +311,12 @@ int ehca_modify_port(struct ib_device *ibdev,
struct ib_port_modify *props)
{
int ret = 0;
- struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
+ struct ehca_shca *shca;
struct hipz_query_port *rblock;
u32 cap;
u64 hret;
+ shca = container_of(ibdev, struct ehca_shca, ib_device);
if ((props->set_port_cap_mask | props->clr_port_cap_mask)
& ~allowed_port_caps) {
ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
@@ -261,7 +326,7 @@ int ehca_modify_port(struct ib_device *ibdev,
}
if (mutex_lock_interruptible(&shca->modify_mutex))
- return -ERESTARTSYS;
+ return -ERESTARTSYS;
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
@@ -270,7 +335,8 @@ int ehca_modify_port(struct ib_device *ibdev,
goto modify_port1;
}
- if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
+ hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
+ if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto modify_port2;
@@ -282,7 +348,8 @@ int ehca_modify_port(struct ib_device *ibdev,
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
cap, props->init_type, port_modify_mask);
if (hret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
+ ehca_err(&shca->ib_device, "Modify port failed hret=%lx",
+ hret);
ret = -EINVAL;
}
@@ -290,7 +357,7 @@ modify_port2:
ehca_free_fw_ctrlblock(rblock);
modify_port1:
- mutex_unlock(&shca->modify_mutex);
+ mutex_unlock(&shca->modify_mutex);
return ret;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 100329ba3343..71c0799b3500 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -5,6 +5,8 @@
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Khadija Souissi <souissi@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Joachim Fenkes <fenkes@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
@@ -47,25 +49,26 @@
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
-#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
-#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
-#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
-#define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
-#define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
-#define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
-#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
+#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
+#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
+#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
+#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
+#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
-#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
-#define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
-#define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
+#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
+#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
+#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
+#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
+#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
-#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
static void queue_comp_task(struct ehca_cq *__cq);
-static struct ehca_comp_pool* pool;
+static struct ehca_comp_pool *pool;
#ifdef CONFIG_HOTPLUG_CPU
static struct notifier_block comp_pool_callback_nb;
#endif
@@ -82,8 +85,8 @@ static inline void comp_event_callback(struct ehca_cq *cq)
return;
}
-static void print_error_data(struct ehca_shca * shca, void* data,
- u64* rblock, int length)
+static void print_error_data(struct ehca_shca *shca, void *data,
+ u64 *rblock, int length)
{
u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
u64 resource = rblock[1];
@@ -91,7 +94,7 @@ static void print_error_data(struct ehca_shca * shca, void* data,
switch (type) {
case 0x1: /* Queue Pair */
{
- struct ehca_qp *qp = (struct ehca_qp*)data;
+ struct ehca_qp *qp = (struct ehca_qp *)data;
/* only print error data if AER is set */
if (rblock[6] == 0)
@@ -104,7 +107,7 @@ static void print_error_data(struct ehca_shca * shca, void* data,
}
case 0x4: /* Completion Queue */
{
- struct ehca_cq *cq = (struct ehca_cq*)data;
+ struct ehca_cq *cq = (struct ehca_cq *)data;
ehca_err(&shca->ib_device,
"CQ 0x%x (resource=%lx) has errors.",
@@ -172,33 +175,41 @@ error_data1:
}
-static void qp_event_callback(struct ehca_shca *shca,
- u64 eqe,
- enum ib_event_type event_type)
+static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
+ enum ib_event_type event_type, int fatal)
{
struct ib_event event;
struct ehca_qp *qp;
- unsigned long flags;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ read_unlock(&ehca_qp_idr_lock);
if (!qp)
return;
- ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
+ if (fatal)
+ ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
- if (!qp->ib_qp.event_handler)
- return;
+ event.device = &shca->ib_device;
- event.device = &shca->ib_device;
- event.event = event_type;
- event.element.qp = &qp->ib_qp;
+ if (qp->ext_type == EQPT_SRQ) {
+ if (!qp->ib_srq.event_handler)
+ return;
- qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+ event.event = fatal ? IB_EVENT_SRQ_ERR : event_type;
+ event.element.srq = &qp->ib_srq;
+ qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
+ } else {
+ if (!qp->ib_qp.event_handler)
+ return;
+
+ event.event = event_type;
+ event.element.qp = &qp->ib_qp;
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+ }
return;
}
@@ -207,18 +218,22 @@ static void cq_event_callback(struct ehca_shca *shca,
u64 eqe)
{
struct ehca_cq *cq;
- unsigned long flags;
u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, token);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ if (cq)
+ atomic_inc(&cq->nr_events);
+ read_unlock(&ehca_cq_idr_lock);
if (!cq)
return;
ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
+ if (atomic_dec_and_test(&cq->nr_events))
+ wake_up(&cq->wait_completion);
+
return;
}
@@ -228,17 +243,17 @@ static void parse_identifier(struct ehca_shca *shca, u64 eqe)
switch (identifier) {
case 0x02: /* path migrated */
- qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
+ qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
break;
case 0x03: /* communication established */
- qp_event_callback(shca, eqe, IB_EVENT_COMM_EST);
+ qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
break;
case 0x04: /* send queue drained */
- qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED);
+ qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
break;
case 0x05: /* QP error */
case 0x06: /* QP error */
- qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL);
+ qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
break;
case 0x07: /* CQ error */
case 0x08: /* CQ error */
@@ -272,6 +287,11 @@ static void parse_identifier(struct ehca_shca *shca, u64 eqe)
ehca_err(&shca->ib_device, "Interface trace stopped.");
break;
case 0x14: /* first error capture info available */
+ ehca_info(&shca->ib_device, "First error capture available");
+ break;
+ case 0x15: /* SRQ limit reached */
+ qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
+ break;
default:
ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
identifier, shca->ib_device.name);
@@ -281,30 +301,61 @@ static void parse_identifier(struct ehca_shca *shca, u64 eqe)
return;
}
-static void parse_ec(struct ehca_shca *shca, u64 eqe)
+static void dispatch_port_event(struct ehca_shca *shca, int port_num,
+ enum ib_event_type type, const char *msg)
{
struct ib_event event;
+
+ ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
+ event.device = &shca->ib_device;
+ event.event = type;
+ event.element.port_num = port_num;
+ ib_dispatch_event(&event);
+}
+
+static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
+{
+ struct ehca_sma_attr new_attr;
+ struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
+
+ ehca_query_sma_attr(shca, port_num, &new_attr);
+
+ if (new_attr.sm_sl != old_attr->sm_sl ||
+ new_attr.sm_lid != old_attr->sm_lid)
+ dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
+ "SM changed");
+
+ if (new_attr.lid != old_attr->lid ||
+ new_attr.lmc != old_attr->lmc)
+ dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
+ "LID changed");
+
+ if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
+ memcmp(new_attr.pkeys, old_attr->pkeys,
+ sizeof(u16) * new_attr.pkey_tbl_len))
+ dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
+ "P_Key changed");
+
+ *old_attr = new_attr;
+}
+
+static void parse_ec(struct ehca_shca *shca, u64 eqe)
+{
u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
switch (ec) {
case 0x30: /* port availability change */
if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
- ehca_info(&shca->ib_device,
- "port %x is active.", port);
- event.device = &shca->ib_device;
- event.event = IB_EVENT_PORT_ACTIVE;
- event.element.port_num = port;
shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
- ib_dispatch_event(&event);
+ dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
+ "is active");
+ ehca_query_sma_attr(shca, port,
+ &shca->sport[port - 1].saved_attr);
} else {
- ehca_info(&shca->ib_device,
- "port %x is inactive.", port);
- event.device = &shca->ib_device;
- event.event = IB_EVENT_PORT_ERR;
- event.element.port_num = port;
shca->sport[port - 1].port_state = IB_PORT_DOWN;
- ib_dispatch_event(&event);
+ dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
+ "is inactive");
}
break;
case 0x31:
@@ -312,24 +363,19 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
* disruptive change is caused by
* LID, PKEY or SM change
*/
- ehca_warn(&shca->ib_device,
- "disruptive port %x configuration change", port);
-
- ehca_info(&shca->ib_device,
- "port %x is inactive.", port);
- event.device = &shca->ib_device;
- event.event = IB_EVENT_PORT_ERR;
- event.element.port_num = port;
- shca->sport[port - 1].port_state = IB_PORT_DOWN;
- ib_dispatch_event(&event);
-
- ehca_info(&shca->ib_device,
- "port %x is active.", port);
- event.device = &shca->ib_device;
- event.event = IB_EVENT_PORT_ACTIVE;
- event.element.port_num = port;
- shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
- ib_dispatch_event(&event);
+ if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
+ ehca_warn(&shca->ib_device, "disruptive port "
+ "%d configuration change", port);
+
+ shca->sport[port - 1].port_state = IB_PORT_DOWN;
+ dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
+ "is inactive");
+
+ shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
+ dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
+ "is active");
+ } else
+ notify_port_conf_change(shca, port);
break;
case 0x32: /* adapter malfunction */
ehca_err(&shca->ib_device, "Adapter malfunction.");
@@ -404,7 +450,6 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
{
u64 eqe_value;
u32 token;
- unsigned long flags;
struct ehca_cq *cq;
eqe_value = eqe->entry;
@@ -412,27 +457,24 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
ehca_dbg(&shca->ib_device, "Got completion event");
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, token);
+ if (cq)
+ atomic_inc(&cq->nr_events);
+ read_unlock(&ehca_cq_idr_lock);
if (cq == NULL) {
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq token=%x",
token);
return;
}
reset_eq_pending(cq);
- cq->nr_events++;
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
if (ehca_scaling_code)
queue_comp_task(cq);
else {
comp_event_callback(cq);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- cq->nr_events--;
- if (!cq->nr_events)
+ if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
}
} else {
ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -476,17 +518,17 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
eqe_value = eqe_cache[eqe_cnt].eqe->entry;
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
- spin_lock(&ehca_cq_idr_lock);
+ read_lock(&ehca_cq_idr_lock);
eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
+ if (eqe_cache[eqe_cnt].cq)
+ atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
+ read_unlock(&ehca_cq_idr_lock);
if (!eqe_cache[eqe_cnt].cq) {
- spin_unlock(&ehca_cq_idr_lock);
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq "
"token=%x", token);
continue;
}
- eqe_cache[eqe_cnt].cq->nr_events++;
- spin_unlock(&ehca_cq_idr_lock);
} else
eqe_cache[eqe_cnt].cq = NULL;
eqe_cnt++;
@@ -517,11 +559,8 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
else {
struct ehca_cq *cq = eq->eqe_cache[i].cq;
comp_event_callback(cq);
- spin_lock(&ehca_cq_idr_lock);
- cq->nr_events--;
- if (!cq->nr_events)
+ if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
- spin_unlock(&ehca_cq_idr_lock);
}
} else {
ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -547,7 +586,7 @@ void ehca_tasklet_eq(unsigned long data)
ehca_process_eq((struct ehca_shca*)data, 1);
}
-static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
+static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
{
int cpu;
unsigned long flags;
@@ -611,7 +650,7 @@ static void queue_comp_task(struct ehca_cq *__cq)
__queue_comp_task(__cq, cct);
}
-static void run_comp_task(struct ehca_cpu_comp_task* cct)
+static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
struct ehca_cq *cq;
unsigned long flags;
@@ -621,13 +660,10 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
while (!list_empty(&cct->cq_list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
spin_unlock_irqrestore(&cct->task_lock, flags);
- comp_event_callback(cq);
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
- cq->nr_events--;
- if (!cq->nr_events)
+ comp_event_callback(cq);
+ if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
spin_lock_irqsave(&cct->task_lock, flags);
spin_lock(&cq->task_lock);
@@ -644,12 +680,12 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
static int comp_task(void *__cct)
{
- struct ehca_cpu_comp_task* cct = __cct;
+ struct ehca_cpu_comp_task *cct = __cct;
int cql_empty;
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_INTERRUPTIBLE);
- while(!kthread_should_stop()) {
+ while (!kthread_should_stop()) {
add_wait_queue(&cct->wait_queue, &wait);
spin_lock_irq(&cct->task_lock);
@@ -723,7 +759,7 @@ static void take_over_work(struct ehca_comp_pool *pool,
list_splice_init(&cct->cq_list, &list);
- while(!list_empty(&list)) {
+ while (!list_empty(&list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
list_del(&cq->entry);
@@ -746,7 +782,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
- if(!create_comp_task(pool, cpu)) {
+ if (!create_comp_task(pool, cpu)) {
ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
return NOTIFY_BAD;
}
@@ -816,7 +852,7 @@ int ehca_create_comp_pool(void)
#ifdef CONFIG_HOTPLUG_CPU
comp_pool_callback_nb.notifier_call = comp_pool_callback;
- comp_pool_callback_nb.priority =0;
+ comp_pool_callback_nb.priority = 0;
register_cpu_notifier(&comp_pool_callback_nb);
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h
index 6ed06ee033ed..3346cb06cea6 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.h
+++ b/drivers/infiniband/hw/ehca/ehca_irq.h
@@ -47,7 +47,6 @@ struct ehca_shca;
#include <linux/interrupt.h>
#include <linux/types.h>
-#include <asm/atomic.h>
int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 37e7fe0908cf..dce503bb7d6b 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -49,6 +49,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
int ehca_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
+int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
+ struct ehca_sma_attr *attr);
+
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
@@ -78,8 +81,9 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
int num_phys_buf,
int mr_access_flags, u64 *iova_start);
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
- int mr_access_flags, struct ib_udata *udata);
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int mr_access_flags,
+ struct ib_udata *udata);
int ehca_rereg_phys_mr(struct ib_mr *mr,
int mr_rereg_mask,
@@ -154,6 +158,21 @@ int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
+int ehca_post_srq_recv(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
+
+struct ib_srq *ehca_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+
+int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+
+int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+
+int ehca_destroy_srq(struct ib_srq *srq);
+
u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
struct ib_qp_init_attr *qp_init_attr);
@@ -174,7 +193,7 @@ void ehca_poll_eqs(unsigned long data);
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
+#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c3f99f33b49c..99036b65bb84 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -63,6 +63,7 @@ int ehca_port_act_time = 30;
int ehca_poll_all_eqs = 1;
int ehca_static_rate = -1;
int ehca_scaling_code = 0;
+int ehca_mr_largepage = 0;
module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
module_param_named(debug_level, ehca_debug_level, int, 0);
@@ -72,7 +73,8 @@ module_param_named(use_hp_mr, ehca_use_hp_mr, int, 0);
module_param_named(port_act_time, ehca_port_act_time, int, 0);
module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, 0);
module_param_named(static_rate, ehca_static_rate, int, 0);
-module_param_named(scaling_code, ehca_scaling_code, int, 0);
+module_param_named(scaling_code, ehca_scaling_code, int, 0);
+module_param_named(mr_largepage, ehca_mr_largepage, int, 0);
MODULE_PARM_DESC(open_aqp1,
"AQP1 on startup (0: no (default), 1: yes)");
@@ -94,22 +96,23 @@ MODULE_PARM_DESC(poll_all_eqs,
MODULE_PARM_DESC(static_rate,
"set permanent static rate (default: disabled)");
MODULE_PARM_DESC(scaling_code,
- "set scaling code (0: disabled, 1: enabled/default)");
+ "set scaling code (0: disabled/default, 1: enabled)");
+MODULE_PARM_DESC(mr_largepage,
+ "use large page for MR (0: use PAGE_SIZE (default), "
+ "1: use large page depending on MR size");
-spinlock_t ehca_qp_idr_lock;
-spinlock_t ehca_cq_idr_lock;
-spinlock_t hcall_lock;
+DEFINE_RWLOCK(ehca_qp_idr_lock);
+DEFINE_RWLOCK(ehca_cq_idr_lock);
DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
-
-static struct list_head shca_list; /* list of all registered ehcas */
-static spinlock_t shca_list_lock;
+static LIST_HEAD(shca_list); /* list of all registered ehcas */
+static DEFINE_SPINLOCK(shca_list_lock);
static struct timer_list poll_eqs_timer;
#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache = NULL;
+static struct kmem_cache *ctblk_cache;
void *ehca_alloc_fw_ctrlblock(gfp_t flags)
{
@@ -127,6 +130,23 @@ void ehca_free_fw_ctrlblock(void *ptr)
}
#endif
+int ehca2ib_return_code(u64 ehca_rc)
+{
+ switch (ehca_rc) {
+ case H_SUCCESS:
+ return 0;
+ case H_RESOURCE: /* Resource in use */
+ case H_BUSY:
+ return -EBUSY;
+ case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
+ case H_CONSTRAINED: /* resource constraint */
+ case H_NO_MEM:
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ehca_create_slab_caches(void)
{
int ret;
@@ -161,19 +181,28 @@ static int ehca_create_slab_caches(void)
goto create_slab_caches5;
}
+ ret = ehca_init_small_qp_cache();
+ if (ret) {
+ ehca_gen_err("Cannot create small queue SLAB cache.");
+ goto create_slab_caches6;
+ }
+
#ifdef CONFIG_PPC_64K_PAGES
ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
EHCA_PAGESIZE, H_CB_ALIGNMENT,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!ctblk_cache) {
ehca_gen_err("Cannot create ctblk SLAB cache.");
- ehca_cleanup_mrmw_cache();
- goto create_slab_caches5;
+ ehca_cleanup_small_qp_cache();
+ goto create_slab_caches6;
}
#endif
return 0;
+create_slab_caches6:
+ ehca_cleanup_mrmw_cache();
+
create_slab_caches5:
ehca_cleanup_av_cache();
@@ -191,6 +220,7 @@ create_slab_caches2:
static void ehca_destroy_slab_caches(void)
{
+ ehca_cleanup_small_qp_cache();
ehca_cleanup_mrmw_cache();
ehca_cleanup_av_cache();
ehca_cleanup_qp_cache();
@@ -202,14 +232,38 @@ static void ehca_destroy_slab_caches(void)
#endif
}
-#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
-#define EHCA_REVID EHCA_BMASK_IBM(40,63)
+#define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
+#define EHCA_REVID EHCA_BMASK_IBM(40, 63)
+
+static struct cap_descr {
+ u64 mask;
+ char *descr;
+} hca_cap_descr[] = {
+ { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
+ { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
+ { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
+ { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
+ { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
+ { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
+ { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
+ { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
+ { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
+ { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
+ { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
+ { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
+ { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
+ { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
+ { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
+ { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
+ { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
+};
int ehca_sense_attributes(struct ehca_shca *shca)
{
- int ret = 0;
+ int i, ret = 0;
u64 h_ret;
struct hipz_query_hca *rblock;
+ struct hipz_query_port *port;
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
@@ -222,7 +276,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
ehca_gen_err("Cannot query device properties. h_ret=%lx",
h_ret);
ret = -EPERM;
- goto num_ports1;
+ goto sense_attributes1;
}
if (ehca_nr_ports == 1)
@@ -241,19 +295,52 @@ int ehca_sense_attributes(struct ehca_shca *shca)
ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
- if ((hcaaver == 1) && (revid == 0))
- shca->hw_level = 0;
- else if ((hcaaver == 1) && (revid == 1))
- shca->hw_level = 1;
- else if ((hcaaver == 1) && (revid == 2))
- shca->hw_level = 2;
- }
+ if (hcaaver == 1) {
+ if (revid <= 3)
+ shca->hw_level = 0x10 | (revid + 1);
+ else
+ shca->hw_level = 0x14;
+ } else if (hcaaver == 2) {
+ if (revid == 0)
+ shca->hw_level = 0x21;
+ else if (revid == 0x10)
+ shca->hw_level = 0x22;
+ else if (revid == 0x20 || revid == 0x21)
+ shca->hw_level = 0x23;
+ }
+
+ if (!shca->hw_level) {
+ ehca_gen_warn("unknown hardware version"
+ " - assuming default level");
+ shca->hw_level = 0x22;
+ }
+ } else
+ shca->hw_level = ehca_hw_level;
ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
shca->sport[0].rate = IB_RATE_30_GBPS;
shca->sport[1].rate = IB_RATE_30_GBPS;
-num_ports1:
+ shca->hca_cap = rblock->hca_cap_indicators;
+ ehca_gen_dbg(" ... HCA capabilities:");
+ for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
+ if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
+ ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
+
+ shca->hca_cap_mr_pgsize = rblock->memory_page_size_supported;
+
+ port = (struct hipz_query_port *)rblock;
+ h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
+ if (h_ret != H_SUCCESS) {
+ ehca_gen_err("Cannot query port properties. h_ret=%lx",
+ h_ret);
+ ret = -EPERM;
+ goto sense_attributes1;
+ }
+
+ shca->max_mtu = port->max_mtu;
+
+sense_attributes1:
ehca_free_fw_ctrlblock(rblock);
return ret;
}
@@ -293,7 +380,7 @@ int ehca_init_device(struct ehca_shca *shca)
strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
shca->ib_device.owner = THIS_MODULE;
- shca->ib_device.uverbs_abi_ver = 6;
+ shca->ib_device.uverbs_abi_ver = 7;
shca->ib_device.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -361,6 +448,20 @@ int ehca_init_device(struct ehca_shca *shca)
/* shca->ib_device.process_mad = ehca_process_mad; */
shca->ib_device.mmap = ehca_mmap;
+ if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
+ shca->ib_device.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+
+ shca->ib_device.create_srq = ehca_create_srq;
+ shca->ib_device.modify_srq = ehca_modify_srq;
+ shca->ib_device.query_srq = ehca_query_srq;
+ shca->ib_device.destroy_srq = ehca_destroy_srq;
+ shca->ib_device.post_srq_recv = ehca_post_srq_recv;
+ }
+
return ret;
}
@@ -377,7 +478,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
return -EPERM;
}
- ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
+ ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0);
if (IS_ERR(ibcq)) {
ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
return PTR_ERR(ibcq);
@@ -523,6 +624,14 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
}
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+static ssize_t ehca_show_mr_largepage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", ehca_mr_largepage);
+}
+static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
+
static struct attribute *ehca_dev_attrs[] = {
&dev_attr_adapter_handle.attr,
&dev_attr_num_ports.attr,
@@ -539,6 +648,7 @@ static struct attribute *ehca_dev_attrs[] = {
&dev_attr_cur_mw.attr,
&dev_attr_max_pd.attr,
&dev_attr_max_ah.attr,
+ &dev_attr_mr_largepage.attr,
NULL
};
@@ -604,7 +714,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
}
/* create internal protection domain */
- ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
+ ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
if (IS_ERR(ibpd)) {
ehca_err(&shca->ib_device, "Cannot create internal PD.");
ret = PTR_ERR(ibpd);
@@ -800,27 +910,22 @@ int __init ehca_module_init(void)
printk(KERN_INFO "eHCA Infiniband Device Driver "
"(Rel.: SVNEHCA_0023)\n");
- idr_init(&ehca_qp_idr);
- idr_init(&ehca_cq_idr);
- spin_lock_init(&ehca_qp_idr_lock);
- spin_lock_init(&ehca_cq_idr_lock);
- spin_lock_init(&hcall_lock);
- INIT_LIST_HEAD(&shca_list);
- spin_lock_init(&shca_list_lock);
-
- if ((ret = ehca_create_comp_pool())) {
+ ret = ehca_create_comp_pool();
+ if (ret) {
ehca_gen_err("Cannot create comp pool.");
return ret;
}
- if ((ret = ehca_create_slab_caches())) {
+ ret = ehca_create_slab_caches();
+ if (ret) {
ehca_gen_err("Cannot create SLAB caches");
ret = -ENOMEM;
goto module_init1;
}
- if ((ret = ibmebus_register_driver(&ehca_driver))) {
+ ret = ibmebus_register_driver(&ehca_driver);
+ if (ret) {
ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL;
goto module_init2;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index add79bd44e39..c1b868b79d67 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -5,6 +5,7 @@
*
* Authors: Dietmar Decker <ddecker@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
+ * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
@@ -48,17 +49,53 @@
#include "hcp_if.h"
#include "hipz_hw.h"
+#define NUM_CHUNKS(length, chunk_size) \
+ (((length) + (chunk_size - 1)) / (chunk_size))
+/* max number of rpages (per hcall register_rpages) */
+#define MAX_RPAGES 512
+
static struct kmem_cache *mr_cache;
static struct kmem_cache *mw_cache;
+enum ehca_mr_pgsize {
+ EHCA_MR_PGSIZE4K = 0x1000L,
+ EHCA_MR_PGSIZE64K = 0x10000L,
+ EHCA_MR_PGSIZE1M = 0x100000L,
+ EHCA_MR_PGSIZE16M = 0x1000000L
+};
+
+extern int ehca_mr_largepage;
+
+static u32 ehca_encode_hwpage_size(u32 pgsize)
+{
+ u32 idx = 0;
+ pgsize >>= 12;
+ /*
+ * map mr page size into hw code:
+ * 0, 1, 2, 3 for 4K, 64K, 1M, 64M
+ */
+ while (!(pgsize & 1)) {
+ idx++;
+ pgsize >>= 4;
+ }
+ return idx;
+}
+
+static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
+{
+ if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)
+ return EHCA_MR_PGSIZE16M;
+ return EHCA_MR_PGSIZE4K;
+}
+
static struct ehca_mr *ehca_mr_new(void)
{
struct ehca_mr *me;
me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
- if (me) {
+ if (me)
spin_lock_init(&me->mrlock);
- } else
+ else
ehca_gen_err("alloc failed");
return me;
@@ -74,9 +111,9 @@ static struct ehca_mw *ehca_mw_new(void)
struct ehca_mw *me;
me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
- if (me) {
+ if (me)
spin_lock_init(&me->mwlock);
- } else
+ else
ehca_gen_err("alloc failed");
return me;
@@ -106,11 +143,12 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
goto get_dma_mr_exit0;
}
- ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
+ ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
mr_access_flags, e_pd,
&e_maxmr->ib.ib_mr.lkey,
&e_maxmr->ib.ib_mr.rkey);
if (ret) {
+ ehca_mr_delete(e_maxmr);
ib_mr = ERR_PTR(ret);
goto get_dma_mr_exit0;
}
@@ -144,9 +182,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
u64 size;
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- u32 num_pages_mr;
- u32 num_pages_4k; /* 4k portion "pages" */
if ((num_phys_buf <= 0) || !phys_buf_array) {
ehca_err(pd->device, "bad input values: num_phys_buf=%x "
@@ -190,12 +225,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
goto reg_phys_mr_exit0;
}
- /* determine number of MR pages */
- num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
- PAGE_SIZE - 1) / PAGE_SIZE);
- num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
- EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
-
/* register MR on HCA */
if (ehca_mr_is_maxmr(size, iova_start)) {
e_mr->flags |= EHCA_MR_FLAG_MAXMR;
@@ -207,13 +236,26 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
goto reg_phys_mr_exit1;
}
} else {
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_pages = num_pages_mr;
- pginfo.num_4k = num_pages_4k;
- pginfo.num_phys_buf = num_phys_buf;
- pginfo.phys_buf_array = phys_buf_array;
- pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
- EHCA_PAGESIZE);
+ struct ehca_mr_pginfo pginfo;
+ u32 num_kpages;
+ u32 num_hwpages;
+ u64 hw_pgsize;
+
+ num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
+ PAGE_SIZE);
+ /* for kernel space we try most possible pgsize */
+ hw_pgsize = ehca_get_max_hwpage_size(shca);
+ num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
+ hw_pgsize);
+ memset(&pginfo, 0, sizeof(pginfo));
+ pginfo.type = EHCA_MR_PGI_PHYS;
+ pginfo.num_kpages = num_kpages;
+ pginfo.hwpage_size = hw_pgsize;
+ pginfo.num_hwpages = num_hwpages;
+ pginfo.u.phy.num_phys_buf = num_phys_buf;
+ pginfo.u.phy.phys_buf_array = phys_buf_array;
+ pginfo.next_hwpage =
+ ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
@@ -240,18 +282,20 @@ reg_phys_mr_exit0:
/*----------------------------------------------------------------------*/
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
- int mr_access_flags, struct ib_udata *udata)
+struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int mr_access_flags,
+ struct ib_udata *udata)
{
struct ib_mr *ib_mr;
struct ehca_mr *e_mr;
struct ehca_shca *shca =
container_of(pd->device, struct ehca_shca, ib_device);
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ struct ehca_mr_pginfo pginfo;
int ret;
- u32 num_pages_mr;
- u32 num_pages_4k; /* 4k portion "pages" */
+ u32 num_kpages;
+ u32 num_hwpages;
+ u64 hwpage_size;
if (!pd) {
ehca_gen_err("bad pd=%p", pd);
@@ -289,7 +333,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags);
if (IS_ERR(e_mr->umem)) {
- ib_mr = (void *) e_mr->umem;
+ ib_mr = (void *)e_mr->umem;
goto reg_user_mr_exit1;
}
@@ -301,23 +345,52 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
}
/* determine number of MR pages */
- num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
- PAGE_SIZE);
- num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
- EHCA_PAGESIZE);
+ num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
+ /* select proper hw_pgsize */
+ if (ehca_mr_largepage &&
+ (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) {
+ if (length <= EHCA_MR_PGSIZE4K
+ && PAGE_SIZE == EHCA_MR_PGSIZE4K)
+ hwpage_size = EHCA_MR_PGSIZE4K;
+ else if (length <= EHCA_MR_PGSIZE64K)
+ hwpage_size = EHCA_MR_PGSIZE64K;
+ else if (length <= EHCA_MR_PGSIZE1M)
+ hwpage_size = EHCA_MR_PGSIZE1M;
+ else
+ hwpage_size = EHCA_MR_PGSIZE16M;
+ } else
+ hwpage_size = EHCA_MR_PGSIZE4K;
+ ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size);
+reg_user_mr_fallback:
+ num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
/* register MR on HCA */
- pginfo.type = EHCA_MR_PGI_USER;
- pginfo.num_pages = num_pages_mr;
- pginfo.num_4k = num_pages_4k;
- pginfo.region = e_mr->umem;
- pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE;
- pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
- (&e_mr->umem->chunk_list),
- list);
-
- ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
- &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
+ memset(&pginfo, 0, sizeof(pginfo));
+ pginfo.type = EHCA_MR_PGI_USER;
+ pginfo.hwpage_size = hwpage_size;
+ pginfo.num_kpages = num_kpages;
+ pginfo.num_hwpages = num_hwpages;
+ pginfo.u.usr.region = e_mr->umem;
+ pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
+ pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
+ (&e_mr->umem->chunk_list),
+ list);
+
+ ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
+ e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
+ &e_mr->ib.ib_mr.rkey);
+ if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
+ ehca_warn(pd->device, "failed to register mr "
+ "with hwpage_size=%lx", hwpage_size);
+ ehca_info(pd->device, "try to register mr with "
+ "kpage_size=%lx", PAGE_SIZE);
+ /*
+ * this means kpages are not contiguous for a hw page
+ * try kernel page size as fallback solution
+ */
+ hwpage_size = PAGE_SIZE;
+ goto reg_user_mr_fallback;
+ }
if (ret) {
ib_mr = ERR_PTR(ret);
goto reg_user_mr_exit2;
@@ -360,9 +433,9 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
struct ehca_pd *new_pd;
u32 tmp_lkey, tmp_rkey;
unsigned long sl_flags;
- u32 num_pages_mr = 0;
- u32 num_pages_4k = 0; /* 4k portion "pages" */
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ u32 num_kpages = 0;
+ u32 num_hwpages = 0;
+ struct ehca_mr_pginfo pginfo;
u32 cur_pid = current->tgid;
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
@@ -414,7 +487,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
goto rereg_phys_mr_exit0;
}
if (!phys_buf_array || num_phys_buf <= 0) {
- ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
+ ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
" phys_buf_array=%p num_phys_buf=%x",
mr_rereg_mask, phys_buf_array, num_phys_buf);
ret = -EINVAL;
@@ -438,12 +511,14 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
/* set requested values dependent on rereg request */
spin_lock_irqsave(&e_mr->mrlock, sl_flags);
- new_start = e_mr->start; /* new == old address */
- new_size = e_mr->size; /* new == old length */
- new_acl = e_mr->acl; /* new == old access control */
- new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
+ new_start = e_mr->start;
+ new_size = e_mr->size;
+ new_acl = e_mr->acl;
+ new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+ u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
+
new_start = iova_start; /* change address */
/* check physical buffer list and calculate size */
ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
@@ -458,17 +533,19 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
ret = -EINVAL;
goto rereg_phys_mr_exit1;
}
- num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
- PAGE_SIZE - 1) / PAGE_SIZE);
- num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
- EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_pages = num_pages_mr;
- pginfo.num_4k = num_pages_4k;
- pginfo.num_phys_buf = num_phys_buf;
- pginfo.phys_buf_array = phys_buf_array;
- pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
- EHCA_PAGESIZE);
+ num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
+ new_size, PAGE_SIZE);
+ num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
+ new_size, hw_pgsize);
+ memset(&pginfo, 0, sizeof(pginfo));
+ pginfo.type = EHCA_MR_PGI_PHYS;
+ pginfo.num_kpages = num_kpages;
+ pginfo.hwpage_size = hw_pgsize;
+ pginfo.num_hwpages = num_hwpages;
+ pginfo.u.phy.num_phys_buf = num_phys_buf;
+ pginfo.u.phy.phys_buf_array = phys_buf_array;
+ pginfo.next_hwpage =
+ ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
}
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
new_acl = mr_access_flags;
@@ -510,7 +587,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
u32 cur_pid = current->tgid;
unsigned long sl_flags;
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+ struct ehca_mr_hipzout_parms hipzout;
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
(my_pd->ownpid != cur_pid)) {
@@ -536,14 +613,14 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
"hca_hndl=%lx mr_hndl=%lx lkey=%x",
h_ret, mr, shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle, mr->lkey);
- ret = ehca_mrmw_map_hrc_query_mr(h_ret);
+ ret = ehca2ib_return_code(h_ret);
goto query_mr_exit1;
}
- mr_attr->pd = mr->pd;
+ mr_attr->pd = mr->pd;
mr_attr->device_virt_addr = hipzout.vaddr;
- mr_attr->size = hipzout.len;
- mr_attr->lkey = hipzout.lkey;
- mr_attr->rkey = hipzout.rkey;
+ mr_attr->size = hipzout.len;
+ mr_attr->lkey = hipzout.lkey;
+ mr_attr->rkey = hipzout.rkey;
ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
query_mr_exit1:
@@ -596,7 +673,7 @@ int ehca_dereg_mr(struct ib_mr *mr)
"e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle, mr->lkey);
- ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ ret = ehca2ib_return_code(h_ret);
goto dereg_mr_exit0;
}
@@ -622,7 +699,7 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_shca *shca =
container_of(pd->device, struct ehca_shca, ib_device);
- struct ehca_mw_hipzout_parms hipzout = {{0},0};
+ struct ehca_mw_hipzout_parms hipzout;
e_mw = ehca_mw_new();
if (!e_mw) {
@@ -636,7 +713,7 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
"shca=%p hca_hndl=%lx mw=%p",
h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
- ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
+ ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
goto alloc_mw_exit1;
}
/* successful MW allocation */
@@ -679,7 +756,7 @@ int ehca_dealloc_mw(struct ib_mw *mw)
"mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
e_mw->ipz_mw_handle.handle);
- return ehca_mrmw_map_hrc_free_mw(h_ret);
+ return ehca2ib_return_code(h_ret);
}
/* successful deallocation */
ehca_mw_delete(e_mw);
@@ -699,7 +776,8 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
struct ehca_mr *e_fmr;
int ret;
u32 tmp_lkey, tmp_rkey;
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ struct ehca_mr_pginfo pginfo;
+ u64 hw_pgsize;
/* check other parameters */
if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
@@ -729,8 +807,8 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
ib_fmr = ERR_PTR(-EINVAL);
goto alloc_fmr_exit0;
}
- if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
- ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
+ hw_pgsize = ehca_get_max_hwpage_size(shca);
+ if ((1 << fmr_attr->page_shift) != hw_pgsize) {
ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
fmr_attr->page_shift);
ib_fmr = ERR_PTR(-EINVAL);
@@ -745,6 +823,11 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
e_fmr->flags |= EHCA_MR_FLAG_FMR;
/* register MR on HCA */
+ memset(&pginfo, 0, sizeof(pginfo));
+ /*
+ * pginfo.num_hwpages==0, ie register_rpages() will not be called
+ * but deferred to map_phys_fmr()
+ */
ret = ehca_reg_mr(shca, e_fmr, NULL,
fmr_attr->max_pages * (1 << fmr_attr->page_shift),
mr_access_flags, e_pd, &pginfo,
@@ -755,6 +838,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
}
/* successful */
+ e_fmr->hwpage_size = hw_pgsize;
e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
e_fmr->fmr_max_pages = fmr_attr->max_pages;
e_fmr->fmr_max_maps = fmr_attr->max_maps;
@@ -783,7 +867,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
container_of(fmr->device, struct ehca_shca, ib_device);
struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ struct ehca_mr_pginfo pginfo;
u32 tmp_lkey, tmp_rkey;
if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
@@ -809,14 +893,18 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
}
- pginfo.type = EHCA_MR_PGI_FMR;
- pginfo.num_pages = list_len;
- pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
- pginfo.page_list = page_list;
- pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
- EHCA_PAGESIZE);
+ memset(&pginfo, 0, sizeof(pginfo));
+ pginfo.type = EHCA_MR_PGI_FMR;
+ pginfo.num_kpages = list_len;
+ pginfo.hwpage_size = e_fmr->hwpage_size;
+ pginfo.num_hwpages =
+ list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
+ pginfo.u.fmr.page_list = page_list;
+ pginfo.next_hwpage =
+ (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
+ pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
- ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
+ ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
list_len * e_fmr->fmr_page_size,
e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
if (ret)
@@ -831,8 +919,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
map_phys_fmr_exit0:
if (ret)
ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
- "iova=%lx",
- ret, fmr, page_list, list_len, iova);
+ "iova=%lx", ret, fmr, page_list, list_len, iova);
return ret;
} /* end ehca_map_phys_fmr() */
@@ -922,7 +1009,7 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr)
"hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
h_ret, e_fmr, shca->ipz_hca_handle.handle,
e_fmr->ipz_mr_handle.handle, fmr->lkey);
- ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ ret = ehca2ib_return_code(h_ret);
goto free_fmr_exit0;
}
/* successful deregistration */
@@ -950,12 +1037,12 @@ int ehca_reg_mr(struct ehca_shca *shca,
int ret;
u64 h_ret;
u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+ struct ehca_mr_hipzout_parms hipzout;
ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
if (ehca_use_hp_mr == 1)
- hipz_acl |= 0x00000001;
+ hipz_acl |= 0x00000001;
h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
(u64)iova_start, size, hipz_acl,
@@ -963,7 +1050,7 @@ int ehca_reg_mr(struct ehca_shca *shca,
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
"hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
- ret = ehca_mrmw_map_hrc_alloc(h_ret);
+ ret = ehca2ib_return_code(h_ret);
goto ehca_reg_mr_exit0;
}
@@ -974,11 +1061,12 @@ int ehca_reg_mr(struct ehca_shca *shca,
goto ehca_reg_mr_exit1;
/* successful registration */
- e_mr->num_pages = pginfo->num_pages;
- e_mr->num_4k = pginfo->num_4k;
- e_mr->start = iova_start;
- e_mr->size = size;
- e_mr->acl = acl;
+ e_mr->num_kpages = pginfo->num_kpages;
+ e_mr->num_hwpages = pginfo->num_hwpages;
+ e_mr->hwpage_size = pginfo->hwpage_size;
+ e_mr->start = iova_start;
+ e_mr->size = size;
+ e_mr->acl = acl;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
return 0;
@@ -988,10 +1076,10 @@ ehca_reg_mr_exit1:
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
"iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
- "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
+ "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
h_ret, shca, e_mr, iova_start, size, acl, e_pd,
- hipzout.lkey, pginfo, pginfo->num_pages,
- pginfo->num_4k, ret);
+ hipzout.lkey, pginfo, pginfo->num_kpages,
+ pginfo->num_hwpages, ret);
ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
"not recoverable");
}
@@ -999,9 +1087,9 @@ ehca_reg_mr_exit0:
if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
"iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
- "num_pages=%lx num_4k=%lx",
+ "num_kpages=%lx num_hwpages=%lx",
ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
- pginfo->num_pages, pginfo->num_4k);
+ pginfo->num_kpages, pginfo->num_hwpages);
return ret;
} /* end ehca_reg_mr() */
@@ -1018,6 +1106,9 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
u32 i;
u64 *kpage;
+ if (!pginfo->num_hwpages) /* in case of fmr */
+ return 0;
+
kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed");
@@ -1025,25 +1116,25 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
goto ehca_reg_mr_rpages_exit0;
}
- /* max 512 pages per shot */
- for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
+ /* max MAX_RPAGES ehca mr pages per register call */
+ for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
- if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
- rnum = pginfo->num_4k % 512; /* last shot */
+ if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
+ rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
if (rnum == 0)
- rnum = 512; /* last shot is full */
+ rnum = MAX_RPAGES; /* last shot is full */
} else
- rnum = 512;
+ rnum = MAX_RPAGES;
+
+ ret = ehca_set_pagebuf(pginfo, rnum, kpage);
+ if (ret) {
+ ehca_err(&shca->ib_device, "ehca_set_pagebuf "
+ "bad rc, ret=%x rnum=%x kpage=%p",
+ ret, rnum, kpage);
+ goto ehca_reg_mr_rpages_exit1;
+ }
if (rnum > 1) {
- ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
- if (ret) {
- ehca_err(&shca->ib_device, "ehca_set_pagebuf "
- "bad rc, ret=%x rnum=%x kpage=%p",
- ret, rnum, kpage);
- ret = -EFAULT;
- goto ehca_reg_mr_rpages_exit1;
- }
rpage = virt_to_abs(kpage);
if (!rpage) {
ehca_err(&shca->ib_device, "kpage=%p i=%x",
@@ -1051,21 +1142,15 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
ret = -EFAULT;
goto ehca_reg_mr_rpages_exit1;
}
- } else { /* rnum==1 */
- ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
- if (ret) {
- ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
- "bad rc, ret=%x i=%x", ret, i);
- ret = -EFAULT;
- goto ehca_reg_mr_rpages_exit1;
- }
- }
+ } else
+ rpage = *kpage;
- h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
- 0, /* pagesize 4k */
- 0, rpage, rnum);
+ h_ret = hipz_h_register_rpage_mr(
+ shca->ipz_hca_handle, e_mr,
+ ehca_encode_hwpage_size(pginfo->hwpage_size),
+ 0, rpage, rnum);
- if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
+ if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
/*
* check for 'registration complete'==H_SUCCESS
* and for 'page registered'==H_PAGE_REGISTERED
@@ -1078,7 +1163,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle,
e_mr->ib.ib_mr.lkey);
- ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
+ ret = ehca2ib_return_code(h_ret);
break;
} else
ret = 0;
@@ -1089,7 +1174,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
e_mr->ib.ib_mr.lkey,
shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle);
- ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
+ ret = ehca2ib_return_code(h_ret);
break;
} else
ret = 0;
@@ -1101,8 +1186,8 @@ ehca_reg_mr_rpages_exit1:
ehca_reg_mr_rpages_exit0:
if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
- "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
- pginfo->num_pages, pginfo->num_4k);
+ "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
+ pginfo, pginfo->num_kpages, pginfo->num_hwpages);
return ret;
} /* end ehca_reg_mr_rpages() */
@@ -1124,10 +1209,10 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
u64 *kpage;
u64 rpage;
struct ehca_mr_pginfo pginfo_save;
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+ struct ehca_mr_hipzout_parms hipzout;
ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!kpage) {
@@ -1137,12 +1222,12 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
}
pginfo_save = *pginfo;
- ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
+ ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
if (ret) {
ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
- "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
- e_mr, pginfo, pginfo->type, pginfo->num_pages,
- pginfo->num_4k,kpage);
+ "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
+ "kpage=%p", e_mr, pginfo, pginfo->type,
+ pginfo->num_kpages, pginfo->num_hwpages, kpage);
goto ehca_rereg_mr_rereg1_exit1;
}
rpage = virt_to_abs(kpage);
@@ -1164,7 +1249,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
"(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
*pginfo = pginfo_save;
ret = -EAGAIN;
- } else if ((u64*)hipzout.vaddr != iova_start) {
+ } else if ((u64 *)hipzout.vaddr != iova_start) {
ehca_err(&shca->ib_device, "PHYP changed iova_start in "
"rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
"mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
@@ -1176,11 +1261,12 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
* successful reregistration
* note: start and start_out are identical for eServer HCAs
*/
- e_mr->num_pages = pginfo->num_pages;
- e_mr->num_4k = pginfo->num_4k;
- e_mr->start = iova_start;
- e_mr->size = size;
- e_mr->acl = acl;
+ e_mr->num_kpages = pginfo->num_kpages;
+ e_mr->num_hwpages = pginfo->num_hwpages;
+ e_mr->hwpage_size = pginfo->hwpage_size;
+ e_mr->start = iova_start;
+ e_mr->size = size;
+ e_mr->acl = acl;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
}
@@ -1190,9 +1276,9 @@ ehca_rereg_mr_rereg1_exit1:
ehca_rereg_mr_rereg1_exit0:
if ( ret && (ret != -EAGAIN) )
ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
- "pginfo=%p num_pages=%lx num_4k=%lx",
- ret, *lkey, *rkey, pginfo, pginfo->num_pages,
- pginfo->num_4k);
+ "pginfo=%p num_kpages=%lx num_hwpages=%lx",
+ ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
+ pginfo->num_hwpages);
return ret;
} /* end ehca_rereg_mr_rereg1() */
@@ -1214,10 +1300,12 @@ int ehca_rereg_mr(struct ehca_shca *shca,
int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
/* first determine reregistration hCall(s) */
- if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
- (pginfo->num_4k > e_mr->num_4k)) {
- ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
- "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
+ if ((pginfo->num_hwpages > MAX_RPAGES) ||
+ (e_mr->num_hwpages > MAX_RPAGES) ||
+ (pginfo->num_hwpages > e_mr->num_hwpages)) {
+ ehca_dbg(&shca->ib_device, "Rereg3 case, "
+ "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
+ pginfo->num_hwpages, e_mr->num_hwpages);
rereg_1_hcall = 0;
rereg_3_hcall = 1;
}
@@ -1253,7 +1341,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
h_ret, e_mr, shca->ipz_hca_handle.handle,
e_mr->ipz_mr_handle.handle,
e_mr->ib.ib_mr.lkey);
- ret = ehca_mrmw_map_hrc_free_mr(h_ret);
+ ret = ehca2ib_return_code(h_ret);
goto ehca_rereg_mr_exit0;
}
/* clean ehca_mr_t, without changing struct ib_mr and lock */
@@ -1262,13 +1350,14 @@ int ehca_rereg_mr(struct ehca_shca *shca,
/* set some MR values */
e_mr->flags = save_mr.flags;
+ e_mr->hwpage_size = save_mr.hwpage_size;
e_mr->fmr_page_size = save_mr.fmr_page_size;
e_mr->fmr_max_pages = save_mr.fmr_max_pages;
e_mr->fmr_max_maps = save_mr.fmr_max_maps;
e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
- e_pd, pginfo, lkey, rkey);
+ e_pd, pginfo, lkey, rkey);
if (ret) {
u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
memcpy(&e_mr->flags, &(save_mr.flags),
@@ -1281,9 +1370,9 @@ ehca_rereg_mr_exit0:
if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
"iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
- "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
+ "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
"rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
- acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
+ acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
rereg_1_hcall, rereg_3_hcall);
return ret;
} /* end ehca_rereg_mr() */
@@ -1295,97 +1384,84 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
{
int ret = 0;
u64 h_ret;
- int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
- int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
struct ehca_pd *e_pd =
container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
struct ehca_mr save_fmr;
u32 tmp_lkey, tmp_rkey;
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
-
- /* first check if reregistration hCall can be used for unmap */
- if (e_fmr->fmr_max_pages > 512) {
- rereg_1_hcall = 0;
- rereg_3_hcall = 1;
- }
+ struct ehca_mr_pginfo pginfo;
+ struct ehca_mr_hipzout_parms hipzout;
+ struct ehca_mr save_mr;
- if (rereg_1_hcall) {
+ if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
/*
* note: after using rereg hcall with len=0,
* rereg hcall must be used again for registering pages
*/
h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
0, 0, e_pd->fw_pd, 0, &hipzout);
- if (h_ret != H_SUCCESS) {
- /*
- * should not happen, because length checked above,
- * FMRs are not shared and no MW bound to FMRs
- */
- ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
- "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
- "mr_hndl=%lx lkey=%x lkey_out=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle,
- e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
- rereg_3_hcall = 1;
- } else {
+ if (h_ret == H_SUCCESS) {
/* successful reregistration */
e_fmr->start = NULL;
e_fmr->size = 0;
tmp_lkey = hipzout.lkey;
tmp_rkey = hipzout.rkey;
+ return 0;
}
+ /*
+ * should not happen, because length checked above,
+ * FMRs are not shared and no MW bound to FMRs
+ */
+ ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
+ "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
+ "mr_hndl=%lx lkey=%x lkey_out=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle,
+ e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
+ /* try free and rereg */
}
- if (rereg_3_hcall) {
- struct ehca_mr save_mr;
-
- /* first free old FMR */
- h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
- if (h_ret != H_SUCCESS) {
- ehca_err(&shca->ib_device, "hipz_free_mr failed, "
- "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
- "lkey=%x",
- h_ret, e_fmr, shca->ipz_hca_handle.handle,
- e_fmr->ipz_mr_handle.handle,
- e_fmr->ib.ib_fmr.lkey);
- ret = ehca_mrmw_map_hrc_free_mr(h_ret);
- goto ehca_unmap_one_fmr_exit0;
- }
- /* clean ehca_mr_t, without changing lock */
- save_fmr = *e_fmr;
- ehca_mr_deletenew(e_fmr);
-
- /* set some MR values */
- e_fmr->flags = save_fmr.flags;
- e_fmr->fmr_page_size = save_fmr.fmr_page_size;
- e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
- e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
- e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
- e_fmr->acl = save_fmr.acl;
-
- pginfo.type = EHCA_MR_PGI_FMR;
- pginfo.num_pages = 0;
- pginfo.num_4k = 0;
- ret = ehca_reg_mr(shca, e_fmr, NULL,
- (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
- e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
- &tmp_rkey);
- if (ret) {
- u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
- memcpy(&e_fmr->flags, &(save_mr.flags),
- sizeof(struct ehca_mr) - offset);
- goto ehca_unmap_one_fmr_exit0;
- }
+ /* first free old FMR */
+ h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
+ if (h_ret != H_SUCCESS) {
+ ehca_err(&shca->ib_device, "hipz_free_mr failed, "
+ "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
+ "lkey=%x",
+ h_ret, e_fmr, shca->ipz_hca_handle.handle,
+ e_fmr->ipz_mr_handle.handle,
+ e_fmr->ib.ib_fmr.lkey);
+ ret = ehca2ib_return_code(h_ret);
+ goto ehca_unmap_one_fmr_exit0;
+ }
+ /* clean ehca_mr_t, without changing lock */
+ save_fmr = *e_fmr;
+ ehca_mr_deletenew(e_fmr);
+
+ /* set some MR values */
+ e_fmr->flags = save_fmr.flags;
+ e_fmr->hwpage_size = save_fmr.hwpage_size;
+ e_fmr->fmr_page_size = save_fmr.fmr_page_size;
+ e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
+ e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
+ e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
+ e_fmr->acl = save_fmr.acl;
+
+ memset(&pginfo, 0, sizeof(pginfo));
+ pginfo.type = EHCA_MR_PGI_FMR;
+ ret = ehca_reg_mr(shca, e_fmr, NULL,
+ (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
+ e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
+ &tmp_rkey);
+ if (ret) {
+ u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
+ memcpy(&e_fmr->flags, &(save_mr.flags),
+ sizeof(struct ehca_mr) - offset);
}
ehca_unmap_one_fmr_exit0:
if (ret)
ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
- "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
- ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
- rereg_1_hcall, rereg_3_hcall);
+ "fmr_max_pages=%x",
+ ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
return ret;
} /* end ehca_unmap_one_fmr() */
@@ -1403,10 +1479,10 @@ int ehca_reg_smr(struct ehca_shca *shca,
int ret = 0;
u64 h_ret;
u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+ struct ehca_mr_hipzout_parms hipzout;
ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
(u64)iova_start, hipz_acl, e_pd->fw_pd,
@@ -1419,15 +1495,16 @@ int ehca_reg_smr(struct ehca_shca *shca,
shca->ipz_hca_handle.handle,
e_origmr->ipz_mr_handle.handle,
e_origmr->ib.ib_mr.lkey);
- ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
+ ret = ehca2ib_return_code(h_ret);
goto ehca_reg_smr_exit0;
}
/* successful registration */
- e_newmr->num_pages = e_origmr->num_pages;
- e_newmr->num_4k = e_origmr->num_4k;
- e_newmr->start = iova_start;
- e_newmr->size = e_origmr->size;
- e_newmr->acl = acl;
+ e_newmr->num_kpages = e_origmr->num_kpages;
+ e_newmr->num_hwpages = e_origmr->num_hwpages;
+ e_newmr->hwpage_size = e_origmr->hwpage_size;
+ e_newmr->start = iova_start;
+ e_newmr->size = e_origmr->size;
+ e_newmr->acl = acl;
e_newmr->ipz_mr_handle = hipzout.handle;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
@@ -1453,10 +1530,11 @@ int ehca_reg_internal_maxmr(
struct ehca_mr *e_mr;
u64 *iova_start;
u64 size_maxmr;
- struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
+ struct ehca_mr_pginfo pginfo;
struct ib_phys_buf ib_pbuf;
- u32 num_pages_mr;
- u32 num_pages_4k; /* 4k portion "pages" */
+ u32 num_kpages;
+ u32 num_hwpages;
+ u64 hw_pgsize;
e_mr = ehca_mr_new();
if (!e_mr) {
@@ -1468,28 +1546,31 @@ int ehca_reg_internal_maxmr(
/* register internal max-MR on HCA */
size_maxmr = (u64)high_memory - PAGE_OFFSET;
- iova_start = (u64*)KERNELBASE;
+ iova_start = (u64 *)KERNELBASE;
ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr;
- num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
- PAGE_SIZE - 1) / PAGE_SIZE);
- num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
- EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
-
- pginfo.type = EHCA_MR_PGI_PHYS;
- pginfo.num_pages = num_pages_mr;
- pginfo.num_4k = num_pages_4k;
- pginfo.num_phys_buf = 1;
- pginfo.phys_buf_array = &ib_pbuf;
+ num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
+ PAGE_SIZE);
+ hw_pgsize = ehca_get_max_hwpage_size(shca);
+ num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
+ hw_pgsize);
+
+ memset(&pginfo, 0, sizeof(pginfo));
+ pginfo.type = EHCA_MR_PGI_PHYS;
+ pginfo.num_kpages = num_kpages;
+ pginfo.num_hwpages = num_hwpages;
+ pginfo.hwpage_size = hw_pgsize;
+ pginfo.u.phy.num_phys_buf = 1;
+ pginfo.u.phy.phys_buf_array = &ib_pbuf;
ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
&pginfo, &e_mr->ib.ib_mr.lkey,
&e_mr->ib.ib_mr.rkey);
if (ret) {
ehca_err(&shca->ib_device, "reg of internal max MR failed, "
- "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
- "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
- num_pages_mr, num_pages_4k);
+ "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
+ "num_hwpages=%x", e_mr, iova_start, size_maxmr,
+ num_kpages, num_hwpages);
goto ehca_reg_internal_maxmr_exit1;
}
@@ -1524,10 +1605,10 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
u64 h_ret;
struct ehca_mr *e_origmr = shca->maxmr;
u32 hipz_acl;
- struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
+ struct ehca_mr_hipzout_parms hipzout;
ehca_mrmw_map_acl(acl, &hipz_acl);
- ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
+ ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
(u64)iova_start, hipz_acl, e_pd->fw_pd,
@@ -1538,14 +1619,15 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
h_ret, e_origmr, shca->ipz_hca_handle.handle,
e_origmr->ipz_mr_handle.handle,
e_origmr->ib.ib_mr.lkey);
- return ehca_mrmw_map_hrc_reg_smr(h_ret);
+ return ehca2ib_return_code(h_ret);
}
/* successful registration */
- e_newmr->num_pages = e_origmr->num_pages;
- e_newmr->num_4k = e_origmr->num_4k;
- e_newmr->start = iova_start;
- e_newmr->size = e_origmr->size;
- e_newmr->acl = acl;
+ e_newmr->num_kpages = e_origmr->num_kpages;
+ e_newmr->num_hwpages = e_origmr->num_hwpages;
+ e_newmr->hwpage_size = e_origmr->hwpage_size;
+ e_newmr->start = iova_start;
+ e_newmr->size = e_origmr->size;
+ e_newmr->acl = acl;
e_newmr->ipz_mr_handle = hipzout.handle;
*lkey = hipzout.lkey;
*rkey = hipzout.rkey;
@@ -1677,299 +1759,352 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
/*----------------------------------------------------------------------*/
-/* setup page buffer from page info */
-int ehca_set_pagebuf(struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo,
- u32 number,
- u64 *kpage)
+/* PAGE_SIZE >= pginfo->hwpage_size */
+static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage)
{
int ret = 0;
struct ib_umem_chunk *prev_chunk;
struct ib_umem_chunk *chunk;
- struct ib_phys_buf *pbuf;
- u64 *fmrlist;
- u64 num4k, pgaddr, offs4k;
+ u64 pgaddr;
u32 i = 0;
u32 j = 0;
-
- if (pginfo->type == EHCA_MR_PGI_PHYS) {
- /* loop over desired phys_buf_array entries */
- while (i < number) {
- pbuf = pginfo->phys_buf_array + pginfo->next_buf;
- num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
- EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
- offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
- while (pginfo->next_4k < offs4k + num4k) {
- /* sanity check */
- if ((pginfo->page_cnt >= pginfo->num_pages) ||
- (pginfo->page_4k_cnt >= pginfo->num_4k)) {
- ehca_gen_err("page_cnt >= num_pages, "
- "page_cnt=%lx "
- "num_pages=%lx "
- "page_4k_cnt=%lx "
- "num_4k=%lx i=%x",
- pginfo->page_cnt,
- pginfo->num_pages,
- pginfo->page_4k_cnt,
- pginfo->num_4k, i);
- ret = -EFAULT;
- goto ehca_set_pagebuf_exit0;
- }
- *kpage = phys_to_abs(
- (pbuf->addr & EHCA_PAGEMASK)
- + (pginfo->next_4k * EHCA_PAGESIZE));
- if ( !(*kpage) && pbuf->addr ) {
- ehca_gen_err("pbuf->addr=%lx "
- "pbuf->size=%lx "
- "next_4k=%lx", pbuf->addr,
- pbuf->size,
- pginfo->next_4k);
- ret = -EFAULT;
- goto ehca_set_pagebuf_exit0;
- }
- (pginfo->page_4k_cnt)++;
- (pginfo->next_4k)++;
- if (pginfo->next_4k %
- (PAGE_SIZE / EHCA_PAGESIZE) == 0)
- (pginfo->page_cnt)++;
- kpage++;
- i++;
- if (i >= number) break;
+ int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
+
+ /* loop over desired chunk entries */
+ chunk = pginfo->u.usr.next_chunk;
+ prev_chunk = pginfo->u.usr.next_chunk;
+ list_for_each_entry_continue(
+ chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
+ for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
+ pgaddr = page_to_pfn(chunk->page_list[i].page)
+ << PAGE_SHIFT ;
+ *kpage = phys_to_abs(pgaddr +
+ (pginfo->next_hwpage *
+ pginfo->hwpage_size));
+ if ( !(*kpage) ) {
+ ehca_gen_err("pgaddr=%lx "
+ "chunk->page_list[i]=%lx "
+ "i=%x next_hwpage=%lx",
+ pgaddr, (u64)sg_dma_address(
+ &chunk->page_list[i]),
+ i, pginfo->next_hwpage);
+ return -EFAULT;
}
- if (pginfo->next_4k >= offs4k + num4k) {
- (pginfo->next_buf)++;
- pginfo->next_4k = 0;
+ (pginfo->hwpage_cnt)++;
+ (pginfo->next_hwpage)++;
+ kpage++;
+ if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
+ (pginfo->kpage_cnt)++;
+ (pginfo->u.usr.next_nmap)++;
+ pginfo->next_hwpage = 0;
+ i++;
}
+ j++;
+ if (j >= number) break;
}
- } else if (pginfo->type == EHCA_MR_PGI_USER) {
- /* loop over desired chunk entries */
- chunk = pginfo->next_chunk;
- prev_chunk = pginfo->next_chunk;
- list_for_each_entry_continue(chunk,
- (&(pginfo->region->chunk_list)),
- list) {
- for (i = pginfo->next_nmap; i < chunk->nmap; ) {
+ if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
+ (j >= number)) {
+ pginfo->u.usr.next_nmap = 0;
+ prev_chunk = chunk;
+ break;
+ } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
+ pginfo->u.usr.next_nmap = 0;
+ prev_chunk = chunk;
+ } else if (j >= number)
+ break;
+ else
+ prev_chunk = chunk;
+ }
+ pginfo->u.usr.next_chunk =
+ list_prepare_entry(prev_chunk,
+ (&(pginfo->u.usr.region->chunk_list)),
+ list);
+ return ret;
+}
+
+/*
+ * check given pages for contiguous layout
+ * last page addr is returned in prev_pgaddr for further check
+ */
+static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
+ int start_idx, int end_idx,
+ u64 *prev_pgaddr)
+{
+ int t;
+ for (t = start_idx; t <= end_idx; t++) {
+ u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT;
+ ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
+ *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
+ if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
+ ehca_gen_err("uncontiguous page found pgaddr=%lx "
+ "prev_pgaddr=%lx page_list_i=%x",
+ pgaddr, *prev_pgaddr, t);
+ return -EINVAL;
+ }
+ *prev_pgaddr = pgaddr;
+ }
+ return 0;
+}
+
+/* PAGE_SIZE < pginfo->hwpage_size */
+static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage)
+{
+ int ret = 0;
+ struct ib_umem_chunk *prev_chunk;
+ struct ib_umem_chunk *chunk;
+ u64 pgaddr, prev_pgaddr;
+ u32 i = 0;
+ u32 j = 0;
+ int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
+ int nr_kpages = kpages_per_hwpage;
+
+ /* loop over desired chunk entries */
+ chunk = pginfo->u.usr.next_chunk;
+ prev_chunk = pginfo->u.usr.next_chunk;
+ list_for_each_entry_continue(
+ chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
+ for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
+ if (nr_kpages == kpages_per_hwpage) {
pgaddr = ( page_to_pfn(chunk->page_list[i].page)
<< PAGE_SHIFT );
- *kpage = phys_to_abs(pgaddr +
- (pginfo->next_4k *
- EHCA_PAGESIZE));
+ *kpage = phys_to_abs(pgaddr);
if ( !(*kpage) ) {
- ehca_gen_err("pgaddr=%lx "
- "chunk->page_list[i]=%lx "
- "i=%x next_4k=%lx mr=%p",
- pgaddr,
- (u64)sg_dma_address(
- &chunk->
- page_list[i]),
- i, pginfo->next_4k, e_mr);
+ ehca_gen_err("pgaddr=%lx i=%x",
+ pgaddr, i);
ret = -EFAULT;
- goto ehca_set_pagebuf_exit0;
+ return ret;
}
- (pginfo->page_4k_cnt)++;
- (pginfo->next_4k)++;
- kpage++;
- if (pginfo->next_4k %
- (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
- (pginfo->page_cnt)++;
- (pginfo->next_nmap)++;
- pginfo->next_4k = 0;
- i++;
+ /*
+ * The first page in a hwpage must be aligned;
+ * the first MR page is exempt from this rule.
+ */
+ if (pgaddr & (pginfo->hwpage_size - 1)) {
+ if (pginfo->hwpage_cnt) {
+ ehca_gen_err(
+ "invalid alignment "
+ "pgaddr=%lx i=%x "
+ "mr_pgsize=%lx",
+ pgaddr, i,
+ pginfo->hwpage_size);
+ ret = -EFAULT;
+ return ret;
+ }
+ /* first MR page */
+ pginfo->kpage_cnt =
+ (pgaddr &
+ (pginfo->hwpage_size - 1)) >>
+ PAGE_SHIFT;
+ nr_kpages -= pginfo->kpage_cnt;
+ *kpage = phys_to_abs(
+ pgaddr &
+ ~(pginfo->hwpage_size - 1));
}
- j++;
- if (j >= number) break;
+ ehca_gen_dbg("kpage=%lx chunk_page=%lx "
+ "value=%016lx", *kpage, pgaddr,
+ *(u64 *)abs_to_virt(
+ phys_to_abs(pgaddr)));
+ prev_pgaddr = pgaddr;
+ i++;
+ pginfo->kpage_cnt++;
+ pginfo->u.usr.next_nmap++;
+ nr_kpages--;
+ if (!nr_kpages)
+ goto next_kpage;
+ continue;
}
- if ((pginfo->next_nmap >= chunk->nmap) &&
- (j >= number)) {
- pginfo->next_nmap = 0;
- prev_chunk = chunk;
- break;
- } else if (pginfo->next_nmap >= chunk->nmap) {
- pginfo->next_nmap = 0;
- prev_chunk = chunk;
- } else if (j >= number)
+ if (i + nr_kpages > chunk->nmap) {
+ ret = ehca_check_kpages_per_ate(
+ chunk->page_list, i,
+ chunk->nmap - 1, &prev_pgaddr);
+ if (ret) return ret;
+ pginfo->kpage_cnt += chunk->nmap - i;
+ pginfo->u.usr.next_nmap += chunk->nmap - i;
+ nr_kpages -= chunk->nmap - i;
break;
- else
- prev_chunk = chunk;
- }
- pginfo->next_chunk =
- list_prepare_entry(prev_chunk,
- (&(pginfo->region->chunk_list)),
- list);
- } else if (pginfo->type == EHCA_MR_PGI_FMR) {
- /* loop over desired page_list entries */
- fmrlist = pginfo->page_list + pginfo->next_listelem;
- for (i = 0; i < number; i++) {
- *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
- pginfo->next_4k * EHCA_PAGESIZE);
- if ( !(*kpage) ) {
- ehca_gen_err("*fmrlist=%lx fmrlist=%p "
- "next_listelem=%lx next_4k=%lx",
- *fmrlist, fmrlist,
- pginfo->next_listelem,
- pginfo->next_4k);
- ret = -EFAULT;
- goto ehca_set_pagebuf_exit0;
}
- (pginfo->page_4k_cnt)++;
- (pginfo->next_4k)++;
+
+ ret = ehca_check_kpages_per_ate(chunk->page_list, i,
+ i + nr_kpages - 1,
+ &prev_pgaddr);
+ if (ret) return ret;
+ i += nr_kpages;
+ pginfo->kpage_cnt += nr_kpages;
+ pginfo->u.usr.next_nmap += nr_kpages;
+next_kpage:
+ nr_kpages = kpages_per_hwpage;
+ (pginfo->hwpage_cnt)++;
kpage++;
- if (pginfo->next_4k %
- (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
- (pginfo->page_cnt)++;
- (pginfo->next_listelem)++;
- fmrlist++;
- pginfo->next_4k = 0;
- }
+ j++;
+ if (j >= number) break;
}
- } else {
- ehca_gen_err("bad pginfo->type=%x", pginfo->type);
- ret = -EFAULT;
- goto ehca_set_pagebuf_exit0;
+ if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
+ (j >= number)) {
+ pginfo->u.usr.next_nmap = 0;
+ prev_chunk = chunk;
+ break;
+ } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
+ pginfo->u.usr.next_nmap = 0;
+ prev_chunk = chunk;
+ } else if (j >= number)
+ break;
+ else
+ prev_chunk = chunk;
}
-
-ehca_set_pagebuf_exit0:
- if (ret)
- ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
- "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
- "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
- "next_listelem=%lx region=%p next_chunk=%p "
- "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
- pginfo->num_pages, pginfo->num_4k,
- pginfo->next_buf, pginfo->next_4k, number, kpage,
- pginfo->page_cnt, pginfo->page_4k_cnt, i,
- pginfo->next_listelem, pginfo->region,
- pginfo->next_chunk, pginfo->next_nmap);
+ pginfo->u.usr.next_chunk =
+ list_prepare_entry(prev_chunk,
+ (&(pginfo->u.usr.region->chunk_list)),
+ list);
return ret;
-} /* end ehca_set_pagebuf() */
-
-/*----------------------------------------------------------------------*/
+}
-/* setup 1 page from page info page buffer */
-int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo,
- u64 *rpage)
+int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage)
{
int ret = 0;
- struct ib_phys_buf *tmp_pbuf;
- u64 *fmrlist;
- struct ib_umem_chunk *chunk;
- struct ib_umem_chunk *prev_chunk;
- u64 pgaddr, num4k, offs4k;
-
- if (pginfo->type == EHCA_MR_PGI_PHYS) {
- /* sanity check */
- if ((pginfo->page_cnt >= pginfo->num_pages) ||
- (pginfo->page_4k_cnt >= pginfo->num_4k)) {
- ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
- "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
- pginfo->page_cnt, pginfo->num_pages,
- pginfo->page_4k_cnt, pginfo->num_4k);
- ret = -EFAULT;
- goto ehca_set_pagebuf_1_exit0;
- }
- tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
- num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
- EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
- offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
- *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
- (pginfo->next_4k * EHCA_PAGESIZE));
- if ( !(*rpage) && tmp_pbuf->addr ) {
- ehca_gen_err("tmp_pbuf->addr=%lx"
- " tmp_pbuf->size=%lx next_4k=%lx",
- tmp_pbuf->addr, tmp_pbuf->size,
- pginfo->next_4k);
- ret = -EFAULT;
- goto ehca_set_pagebuf_1_exit0;
- }
- (pginfo->page_4k_cnt)++;
- (pginfo->next_4k)++;
- if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
- (pginfo->page_cnt)++;
- if (pginfo->next_4k >= offs4k + num4k) {
- (pginfo->next_buf)++;
- pginfo->next_4k = 0;
- }
- } else if (pginfo->type == EHCA_MR_PGI_USER) {
- chunk = pginfo->next_chunk;
- prev_chunk = pginfo->next_chunk;
- list_for_each_entry_continue(chunk,
- (&(pginfo->region->chunk_list)),
- list) {
- pgaddr = ( page_to_pfn(chunk->page_list[
- pginfo->next_nmap].page)
- << PAGE_SHIFT);
- *rpage = phys_to_abs(pgaddr +
- (pginfo->next_4k * EHCA_PAGESIZE));
- if ( !(*rpage) ) {
- ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
- " next_nmap=%lx next_4k=%lx mr=%p",
- pgaddr, (u64)sg_dma_address(
- &chunk->page_list[
- pginfo->
- next_nmap]),
- pginfo->next_nmap, pginfo->next_4k,
- e_mr);
- ret = -EFAULT;
- goto ehca_set_pagebuf_1_exit0;
- }
- (pginfo->page_4k_cnt)++;
- (pginfo->next_4k)++;
- if (pginfo->next_4k %
- (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
- (pginfo->page_cnt)++;
- (pginfo->next_nmap)++;
- pginfo->next_4k = 0;
+ struct ib_phys_buf *pbuf;
+ u64 num_hw, offs_hw;
+ u32 i = 0;
+
+ /* loop over desired phys_buf_array entries */
+ while (i < number) {
+ pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
+ num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
+ pbuf->size, pginfo->hwpage_size);
+ offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
+ pginfo->hwpage_size;
+ while (pginfo->next_hwpage < offs_hw + num_hw) {
+ /* sanity check */
+ if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
+ (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
+ ehca_gen_err("kpage_cnt >= num_kpages, "
+ "kpage_cnt=%lx num_kpages=%lx "
+ "hwpage_cnt=%lx "
+ "num_hwpages=%lx i=%x",
+ pginfo->kpage_cnt,
+ pginfo->num_kpages,
+ pginfo->hwpage_cnt,
+ pginfo->num_hwpages, i);
+ return -EFAULT;
}
- if (pginfo->next_nmap >= chunk->nmap) {
- pginfo->next_nmap = 0;
- prev_chunk = chunk;
+ *kpage = phys_to_abs(
+ (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
+ (pginfo->next_hwpage * pginfo->hwpage_size));
+ if ( !(*kpage) && pbuf->addr ) {
+ ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx "
+ "next_hwpage=%lx", pbuf->addr,
+ pbuf->size, pginfo->next_hwpage);
+ return -EFAULT;
}
- break;
+ (pginfo->hwpage_cnt)++;
+ (pginfo->next_hwpage)++;
+ if (PAGE_SIZE >= pginfo->hwpage_size) {
+ if (pginfo->next_hwpage %
+ (PAGE_SIZE / pginfo->hwpage_size) == 0)
+ (pginfo->kpage_cnt)++;
+ } else
+ pginfo->kpage_cnt += pginfo->hwpage_size /
+ PAGE_SIZE;
+ kpage++;
+ i++;
+ if (i >= number) break;
+ }
+ if (pginfo->next_hwpage >= offs_hw + num_hw) {
+ (pginfo->u.phy.next_buf)++;
+ pginfo->next_hwpage = 0;
}
- pginfo->next_chunk =
- list_prepare_entry(prev_chunk,
- (&(pginfo->region->chunk_list)),
- list);
- } else if (pginfo->type == EHCA_MR_PGI_FMR) {
- fmrlist = pginfo->page_list + pginfo->next_listelem;
- *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
- pginfo->next_4k * EHCA_PAGESIZE);
- if ( !(*rpage) ) {
+ }
+ return ret;
+}
+
+int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage)
+{
+ int ret = 0;
+ u64 *fmrlist;
+ u32 i;
+
+ /* loop over desired page_list entries */
+ fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
+ for (i = 0; i < number; i++) {
+ *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
+ pginfo->next_hwpage * pginfo->hwpage_size);
+ if ( !(*kpage) ) {
ehca_gen_err("*fmrlist=%lx fmrlist=%p "
- "next_listelem=%lx next_4k=%lx",
- *fmrlist, fmrlist, pginfo->next_listelem,
- pginfo->next_4k);
- ret = -EFAULT;
- goto ehca_set_pagebuf_1_exit0;
+ "next_listelem=%lx next_hwpage=%lx",
+ *fmrlist, fmrlist,
+ pginfo->u.fmr.next_listelem,
+ pginfo->next_hwpage);
+ return -EFAULT;
}
- (pginfo->page_4k_cnt)++;
- (pginfo->next_4k)++;
- if (pginfo->next_4k %
- (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
- (pginfo->page_cnt)++;
- (pginfo->next_listelem)++;
- pginfo->next_4k = 0;
+ (pginfo->hwpage_cnt)++;
+ if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
+ if (pginfo->next_hwpage %
+ (pginfo->u.fmr.fmr_pgsize /
+ pginfo->hwpage_size) == 0) {
+ (pginfo->kpage_cnt)++;
+ (pginfo->u.fmr.next_listelem)++;
+ fmrlist++;
+ pginfo->next_hwpage = 0;
+ } else
+ (pginfo->next_hwpage)++;
+ } else {
+ unsigned int cnt_per_hwpage = pginfo->hwpage_size /
+ pginfo->u.fmr.fmr_pgsize;
+ unsigned int j;
+ u64 prev = *kpage;
+ /* check if adrs are contiguous */
+ for (j = 1; j < cnt_per_hwpage; j++) {
+ u64 p = phys_to_abs(fmrlist[j] &
+ ~(pginfo->hwpage_size - 1));
+ if (prev + pginfo->u.fmr.fmr_pgsize != p) {
+ ehca_gen_err("uncontiguous fmr pages "
+ "found prev=%lx p=%lx "
+ "idx=%x", prev, p, i + j);
+ return -EINVAL;
+ }
+ prev = p;
+ }
+ pginfo->kpage_cnt += cnt_per_hwpage;
+ pginfo->u.fmr.next_listelem += cnt_per_hwpage;
+ fmrlist += cnt_per_hwpage;
}
- } else {
+ kpage++;
+ }
+ return ret;
+}
+
+/* setup page buffer from page info */
+int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
+ u32 number,
+ u64 *kpage)
+{
+ int ret;
+
+ switch (pginfo->type) {
+ case EHCA_MR_PGI_PHYS:
+ ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
+ break;
+ case EHCA_MR_PGI_USER:
+ ret = PAGE_SIZE >= pginfo->hwpage_size ?
+ ehca_set_pagebuf_user1(pginfo, number, kpage) :
+ ehca_set_pagebuf_user2(pginfo, number, kpage);
+ break;
+ case EHCA_MR_PGI_FMR:
+ ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
+ break;
+ default:
ehca_gen_err("bad pginfo->type=%x", pginfo->type);
ret = -EFAULT;
- goto ehca_set_pagebuf_1_exit0;
+ break;
}
-
-ehca_set_pagebuf_1_exit0:
- if (ret)
- ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
- "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
- "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
- "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
- pginfo, pginfo->type, pginfo->num_pages,
- pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
- rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
- pginfo->next_listelem, pginfo->region,
- pginfo->next_chunk, pginfo->next_nmap);
return ret;
-} /* end ehca_set_pagebuf_1() */
+} /* end ehca_set_pagebuf() */
/*----------------------------------------------------------------------*/
@@ -1982,7 +2117,7 @@ int ehca_mr_is_maxmr(u64 size,
{
/* a MR is treated as max-MR only if it fits following: */
if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
- (iova_start == (void*)KERNELBASE)) {
+ (iova_start == (void *)KERNELBASE)) {
ehca_gen_dbg("this is a max-MR");
return 1;
} else
@@ -2011,9 +2146,9 @@ void ehca_mrmw_map_acl(int ib_acl,
/*----------------------------------------------------------------------*/
/* sets page size in hipz access control for MR/MW. */
-void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
+void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
{
- return; /* HCA supports only 4k */
+ *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
} /* end ehca_mrmw_set_pgsize_hipz_acl() */
/*----------------------------------------------------------------------*/
@@ -2042,196 +2177,23 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
/*----------------------------------------------------------------------*/
/*
- * map HIPZ rc to IB retcodes for MR/MW allocations
- * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
- */
-int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_SUCCESS: /* successful completion */
- return 0;
- case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
- case H_CONSTRAINED: /* resource constraint */
- case H_NO_MEM:
- return -ENOMEM;
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_alloc() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for MR register rpage
- * Used for hipz_h_register_rpage_mr at registering last page
- */
-int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_SUCCESS: /* registration complete */
- return 0;
- case H_PAGE_REGISTERED: /* page registered */
- case H_ADAPTER_PARM: /* invalid adapter handle */
- case H_RH_PARM: /* invalid resource handle */
-/* case H_QT_PARM: invalid queue type */
- case H_PARAMETER: /*
- * invalid logical address,
- * or count zero or greater 512
- */
- case H_TABLE_FULL: /* page table full */
- case H_HARDWARE: /* HCA not operational */
- return -EINVAL;
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_rrpg_last() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for MR register rpage
- * Used for hipz_h_register_rpage_mr at registering one page, but not last page
- */
-int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_PAGE_REGISTERED: /* page registered */
- return 0;
- case H_SUCCESS: /* registration complete */
- case H_ADAPTER_PARM: /* invalid adapter handle */
- case H_RH_PARM: /* invalid resource handle */
-/* case H_QT_PARM: invalid queue type */
- case H_PARAMETER: /*
- * invalid logical address,
- * or count zero or greater 512
- */
- case H_TABLE_FULL: /* page table full */
- case H_HARDWARE: /* HCA not operational */
- return -EINVAL;
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_rrpg_notlast() */
-
-/*----------------------------------------------------------------------*/
-
-/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
-int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_SUCCESS: /* successful completion */
- return 0;
- case H_ADAPTER_PARM: /* invalid adapter handle */
- case H_RH_PARM: /* invalid resource handle */
- return -EINVAL;
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_query_mr() */
-
-/*----------------------------------------------------------------------*/
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for freeing MR resource
- * Used for hipz_h_free_resource_mr
- */
-int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_SUCCESS: /* resource freed */
- return 0;
- case H_ADAPTER_PARM: /* invalid adapter handle */
- case H_RH_PARM: /* invalid resource handle */
- case H_R_STATE: /* invalid resource state */
- case H_HARDWARE: /* HCA not operational */
- return -EINVAL;
- case H_RESOURCE: /* Resource in use */
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_free_mr() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for freeing MW resource
- * Used for hipz_h_free_resource_mw
- */
-int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_SUCCESS: /* resource freed */
- return 0;
- case H_ADAPTER_PARM: /* invalid adapter handle */
- case H_RH_PARM: /* invalid resource handle */
- case H_R_STATE: /* invalid resource state */
- case H_HARDWARE: /* HCA not operational */
- return -EINVAL;
- case H_RESOURCE: /* Resource in use */
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_free_mw() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * map HIPZ rc to IB retcodes for SMR registrations
- * Used for hipz_h_register_smr.
- */
-int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
-{
- switch (hipz_rc) {
- case H_SUCCESS: /* successful completion */
- return 0;
- case H_ADAPTER_PARM: /* invalid adapter handle */
- case H_RH_PARM: /* invalid resource handle */
- case H_MEM_PARM: /* invalid MR virtual address */
- case H_MEM_ACCESS_PARM: /* invalid access controls */
- case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
- return -EINVAL;
- case H_BUSY: /* long busy */
- return -EBUSY;
- default:
- return -EINVAL;
- }
-} /* end ehca_mrmw_map_hrc_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-
-/*
* MR destructor and constructor
* used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
* except struct ib_mr and spinlock
*/
void ehca_mr_deletenew(struct ehca_mr *mr)
{
- mr->flags = 0;
- mr->num_pages = 0;
- mr->num_4k = 0;
- mr->acl = 0;
- mr->start = NULL;
+ mr->flags = 0;
+ mr->num_kpages = 0;
+ mr->num_hwpages = 0;
+ mr->acl = 0;
+ mr->start = NULL;
mr->fmr_page_size = 0;
mr->fmr_max_pages = 0;
- mr->fmr_max_maps = 0;
- mr->fmr_map_cnt = 0;
+ mr->fmr_max_maps = 0;
+ mr->fmr_map_cnt = 0;
memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
memset(&mr->galpas, 0, sizeof(mr->galpas));
- mr->nr_of_pages = 0;
- mr->pagearray = NULL;
} /* end ehca_mr_deletenew() */
int ehca_init_mrmw_cache(void)
@@ -2239,13 +2201,13 @@ int ehca_init_mrmw_cache(void)
mr_cache = kmem_cache_create("ehca_cache_mr",
sizeof(struct ehca_mr), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!mr_cache)
return -ENOMEM;
mw_cache = kmem_cache_create("ehca_cache_mw",
sizeof(struct ehca_mw), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!mw_cache) {
kmem_cache_destroy(mr_cache);
mr_cache = NULL;
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
index d936e40a5748..bc8f4e31c123 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -101,40 +101,21 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
u64 *page_list,
int list_len);
-int ehca_set_pagebuf(struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo,
+int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
u32 number,
u64 *kpage);
-int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
- struct ehca_mr_pginfo *pginfo,
- u64 *rpage);
-
int ehca_mr_is_maxmr(u64 size,
u64 *iova_start);
void ehca_mrmw_map_acl(int ib_acl,
u32 *hipz_acl);
-void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl);
+void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
int *ib_acl);
-int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
-
-int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
-
void ehca_mr_deletenew(struct ehca_mr *mr);
#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
index 79d0591a8043..3dafd7ff36cd 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -49,6 +49,7 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
struct ib_ucontext *context, struct ib_udata *udata)
{
struct ehca_pd *pd;
+ int i;
pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
if (!pd) {
@@ -58,6 +59,11 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
}
pd->ownpid = current->tgid;
+ for (i = 0; i < 2; i++) {
+ INIT_LIST_HEAD(&pd->free[i]);
+ INIT_LIST_HEAD(&pd->full[i]);
+ }
+ mutex_init(&pd->lock);
/*
* Kernel PD: when device = -1, 0
@@ -81,6 +87,9 @@ int ehca_dealloc_pd(struct ib_pd *pd)
{
u32 cur_pid = current->tgid;
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
+ int i, leftovers = 0;
+ extern struct kmem_cache *small_qp_cache;
+ struct ipz_small_queue_page *page, *tmp;
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
my_pd->ownpid != cur_pid) {
@@ -89,8 +98,20 @@ int ehca_dealloc_pd(struct ib_pd *pd)
return -EINVAL;
}
- kmem_cache_free(pd_cache,
- container_of(pd, struct ehca_pd, ib_pd));
+ for (i = 0; i < 2; i++) {
+ list_splice(&my_pd->full[i], &my_pd->free[i]);
+ list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
+ leftovers = 1;
+ free_page(page->page);
+ kmem_cache_free(small_qp_cache, page);
+ }
+ }
+
+ if (leftovers)
+ ehca_warn(pd->device,
+ "Some small queue pages were not freed");
+
+ kmem_cache_free(pd_cache, my_pd);
return 0;
}
@@ -100,7 +121,7 @@ int ehca_init_pd_cache(void)
pd_cache = kmem_cache_create("ehca_cache_pd",
sizeof(struct ehca_pd), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!pd_cache)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
index 8707d297ce4c..818803057ebf 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -53,13 +53,13 @@ struct ehca_vsgentry {
u32 length;
};
-#define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7)
-#define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3)
-#define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12)
-#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31)
-#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47)
-#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55)
-#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63)
+#define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7)
+#define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3)
+#define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12)
+#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31)
+#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47)
+#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55)
+#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63)
/*
* Unreliable Datagram Address Vector Format
@@ -206,10 +206,10 @@ struct ehca_wqe {
};
-#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0)
-#define WC_IMM_DATA EHCA_BMASK_IBM(1,1)
-#define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2)
-#define WC_SE_BIT EHCA_BMASK_IBM(3,3)
+#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
+#define WC_IMM_DATA EHCA_BMASK_IBM(1, 1)
+#define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2)
+#define WC_SE_BIT EHCA_BMASK_IBM(3, 3)
#define WC_STATUS_ERROR_BIT 0x80000000
#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
#define WC_STATUS_PURGE_BIT 0x10
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b5bc787c77b6..b178cba96345 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -3,7 +3,9 @@
*
* QP functions
*
- * Authors: Waleri Fomin <fomin@de.ibm.com>
+ * Authors: Joachim Fenkes <fenkes@de.ibm.com>
+ * Stefan Roscher <stefan.roscher@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
@@ -234,13 +236,6 @@ static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
return index;
}
-enum ehca_service_type {
- ST_RC = 0,
- ST_UC = 1,
- ST_RD = 2,
- ST_UD = 3
-};
-
/*
* ibqptype2servicetype returns hcp service type corresponding to given
* ib qp type used by create_qp()
@@ -268,143 +263,169 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
}
/*
- * init_qp_queues initializes/constructs r/squeue and registers queue pages.
+ * init userspace queue info from ipz_queue data
+ */
+static inline void queue2resp(struct ipzu_queue_resp *resp,
+ struct ipz_queue *queue)
+{
+ resp->qe_size = queue->qe_size;
+ resp->act_nr_of_sg = queue->act_nr_of_sg;
+ resp->queue_length = queue->queue_length;
+ resp->pagesize = queue->pagesize;
+ resp->toggle_state = queue->toggle_state;
+}
+
+/*
+ * init_qp_queue initializes/constructs r/squeue and registers queue pages.
*/
-static inline int init_qp_queues(struct ehca_shca *shca,
- struct ehca_qp *my_qp,
- int nr_sq_pages,
- int nr_rq_pages,
- int swqe_size,
- int rwqe_size,
- int nr_send_sges, int nr_receive_sges)
+static inline int init_qp_queue(struct ehca_shca *shca,
+ struct ehca_pd *pd,
+ struct ehca_qp *my_qp,
+ struct ipz_queue *queue,
+ int q_type,
+ u64 expected_hret,
+ struct ehca_alloc_queue_parms *parms,
+ int wqe_size)
{
- int ret, cnt, ipz_rc;
+ int ret, cnt, ipz_rc, nr_q_pages;
void *vpage;
u64 rpage, h_ret;
struct ib_device *ib_dev = &shca->ib_device;
struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
- ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
- nr_sq_pages,
- EHCA_PAGESIZE, swqe_size, nr_send_sges);
- if (!ipz_rc) {
- ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
- ipz_rc);
- return -EBUSY;
+ if (!parms->queue_size)
+ return 0;
+
+ if (parms->is_small) {
+ nr_q_pages = 1;
+ ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
+ 128 << parms->page_size,
+ wqe_size, parms->act_nr_sges, 1);
+ } else {
+ nr_q_pages = parms->queue_size;
+ ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
+ EHCA_PAGESIZE, wqe_size,
+ parms->act_nr_sges, 0);
}
- ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
- nr_rq_pages,
- EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
if (!ipz_rc) {
- ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
+ ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%x",
ipz_rc);
- ret = -EBUSY;
- goto init_qp_queues0;
+ return -EBUSY;
}
- /* register SQ pages */
- for (cnt = 0; cnt < nr_sq_pages; cnt++) {
- vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
+
+ /* register queue pages */
+ for (cnt = 0; cnt < nr_q_pages; cnt++) {
+ vpage = ipz_qpageit_get_inc(queue);
if (!vpage) {
- ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
+ ehca_err(ib_dev, "ipz_qpageit_get_inc() "
"failed p_vpage= %p", vpage);
ret = -EINVAL;
- goto init_qp_queues1;
+ goto init_qp_queue1;
}
rpage = virt_to_abs(vpage);
h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
my_qp->ipz_qp_handle,
- &my_qp->pf, 0, 0,
- rpage, 1,
+ NULL, 0, q_type,
+ rpage, parms->is_small ? 0 : 1,
my_qp->galpas.kernel);
- if (h_ret < H_SUCCESS) {
- ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
- " failed rc=%lx", h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto init_qp_queues1;
- }
- }
-
- ipz_qeit_reset(&my_qp->ipz_squeue);
-
- /* register RQ pages */
- for (cnt = 0; cnt < nr_rq_pages; cnt++) {
- vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
- if (!vpage) {
- ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
- "failed p_vpage = %p", vpage);
- ret = -EINVAL;
- goto init_qp_queues1;
- }
-
- rpage = virt_to_abs(vpage);
-
- h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
- my_qp->ipz_qp_handle,
- &my_qp->pf, 0, 1,
- rpage, 1,my_qp->galpas.kernel);
- if (h_ret < H_SUCCESS) {
- ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
- "rc=%lx", h_ret);
- ret = ehca2ib_return_code(h_ret);
- goto init_qp_queues1;
- }
- if (cnt == (nr_rq_pages - 1)) { /* last page! */
- if (h_ret != H_SUCCESS) {
- ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
+ if (cnt == (nr_q_pages - 1)) { /* last page! */
+ if (h_ret != expected_hret) {
+ ehca_err(ib_dev, "hipz_qp_register_rpage() "
"h_ret= %lx ", h_ret);
ret = ehca2ib_return_code(h_ret);
- goto init_qp_queues1;
+ goto init_qp_queue1;
}
vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
if (vpage) {
ehca_err(ib_dev, "ipz_qpageit_get_inc() "
"should not succeed vpage=%p", vpage);
ret = -EINVAL;
- goto init_qp_queues1;
+ goto init_qp_queue1;
}
} else {
if (h_ret != H_PAGE_REGISTERED) {
- ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
+ ehca_err(ib_dev, "hipz_qp_register_rpage() "
"h_ret= %lx ", h_ret);
ret = ehca2ib_return_code(h_ret);
- goto init_qp_queues1;
+ goto init_qp_queue1;
}
}
}
- ipz_qeit_reset(&my_qp->ipz_rqueue);
+ ipz_qeit_reset(queue);
return 0;
-init_qp_queues1:
- ipz_queue_dtor(&my_qp->ipz_rqueue);
-init_qp_queues0:
- ipz_queue_dtor(&my_qp->ipz_squeue);
+init_qp_queue1:
+ ipz_queue_dtor(pd, queue);
return ret;
}
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
+{
+ if (is_llqp)
+ return 128 << act_nr_sge;
+ else
+ return offsetof(struct ehca_wqe,
+ u.nud.sg_list[act_nr_sge]);
+}
+
+static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
+ int req_nr_sge, int is_llqp)
+{
+ u32 wqe_size, q_size;
+ int act_nr_sge = req_nr_sge;
+
+ if (!is_llqp)
+ /* round up #SGEs so WQE size is a power of 2 */
+ for (act_nr_sge = 4; act_nr_sge <= 252;
+ act_nr_sge = 4 + 2 * act_nr_sge)
+ if (act_nr_sge >= req_nr_sge)
+ break;
+
+ wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
+ q_size = wqe_size * (queue->max_wr + 1);
+
+ if (q_size <= 512)
+ queue->page_size = 2;
+ else if (q_size <= 1024)
+ queue->page_size = 3;
+ else
+ queue->page_size = 0;
+
+ queue->is_small = (queue->page_size != 0);
+}
+
+/*
+ * Create an ib_qp struct that is either a QP or an SRQ, depending on
+ * the value of the is_srq parameter. If init_attr and srq_init_attr share
+ * fields, the field out of init_attr is used.
+ */
+static struct ehca_qp *internal_create_qp(
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata, int is_srq)
{
- static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
- static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
struct ehca_qp *my_qp;
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
struct ib_ucontext *context = NULL;
u64 h_ret;
- int max_send_sge, max_recv_sge, ret;
+ int is_llqp = 0, has_srq = 0;
+ int qp_type, max_send_sge, max_recv_sge, ret;
/* h_call's out parameters */
struct ehca_alloc_qp_parms parms;
- u32 swqe_size = 0, rwqe_size = 0;
- u8 daqp_completion, isdaqp;
+ u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
unsigned long flags;
+ memset(&parms, 0, sizeof(parms));
+ qp_type = init_attr->qp_type;
+
if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
@@ -412,41 +433,98 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- /* save daqp completion bits */
- daqp_completion = init_attr->qp_type & 0x60;
- /* save daqp bit */
- isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
- init_attr->qp_type = init_attr->qp_type & 0x1F;
+ /* save LLQP info */
+ if (qp_type & 0x80) {
+ is_llqp = 1;
+ parms.ext_type = EQPT_LLQP;
+ parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
+ }
+ qp_type &= 0x1F;
+ init_attr->qp_type &= 0x1F;
- if (init_attr->qp_type != IB_QPT_UD &&
- init_attr->qp_type != IB_QPT_SMI &&
- init_attr->qp_type != IB_QPT_GSI &&
- init_attr->qp_type != IB_QPT_UC &&
- init_attr->qp_type != IB_QPT_RC) {
- ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
- return ERR_PTR(-EINVAL);
+ /* handle SRQ base QPs */
+ if (init_attr->srq) {
+ struct ehca_qp *my_srq =
+ container_of(init_attr->srq, struct ehca_qp, ib_srq);
+
+ has_srq = 1;
+ parms.ext_type = EQPT_SRQBASE;
+ parms.srq_qpn = my_srq->real_qp_num;
+ parms.srq_token = my_srq->token;
}
- if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
- && isdaqp) {
- ehca_err(pd->device, "unsupported LL QP Type=%x",
- init_attr->qp_type);
+
+ if (is_llqp && has_srq) {
+ ehca_err(pd->device, "LLQPs can't have an SRQ");
return ERR_PTR(-EINVAL);
- } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
- (init_attr->cap.max_send_wr > 255 ||
- init_attr->cap.max_recv_wr > 255 )) {
- ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
- "or max_rq_wr=%x for QP Type=%x",
- init_attr->cap.max_send_wr,
- init_attr->cap.max_recv_wr,init_attr->qp_type);
- return ERR_PTR(-EINVAL);
- } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
- init_attr->cap.max_send_wr > 255) {
- ehca_err(pd->device,
- "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
- init_attr->cap.max_send_wr, init_attr->qp_type);
+ }
+
+ /* handle SRQs */
+ if (is_srq) {
+ parms.ext_type = EQPT_SRQ;
+ parms.srq_limit = srq_init_attr->attr.srq_limit;
+ if (init_attr->cap.max_recv_sge > 3) {
+ ehca_err(pd->device, "no more than three SGEs "
+ "supported for SRQ pd=%p max_sge=%x",
+ pd, init_attr->cap.max_recv_sge);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* check QP type */
+ if (qp_type != IB_QPT_UD &&
+ qp_type != IB_QPT_UC &&
+ qp_type != IB_QPT_RC &&
+ qp_type != IB_QPT_SMI &&
+ qp_type != IB_QPT_GSI) {
+ ehca_err(pd->device, "wrong QP Type=%x", qp_type);
return ERR_PTR(-EINVAL);
}
+ if (is_llqp) {
+ switch (qp_type) {
+ case IB_QPT_RC:
+ if ((init_attr->cap.max_send_wr > 255) ||
+ (init_attr->cap.max_recv_wr > 255)) {
+ ehca_err(pd->device,
+ "Invalid Number of max_sq_wr=%x "
+ "or max_rq_wr=%x for RC LLQP",
+ init_attr->cap.max_send_wr,
+ init_attr->cap.max_recv_wr);
+ return ERR_PTR(-EINVAL);
+ }
+ break;
+ case IB_QPT_UD:
+ if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
+ ehca_err(pd->device, "UD LLQP not supported "
+ "by this adapter");
+ return ERR_PTR(-ENOSYS);
+ }
+ if (!(init_attr->cap.max_send_sge <= 5
+ && init_attr->cap.max_send_sge >= 1
+ && init_attr->cap.max_recv_sge <= 5
+ && init_attr->cap.max_recv_sge >= 1)) {
+ ehca_err(pd->device,
+ "Invalid Number of max_send_sge=%x "
+ "or max_recv_sge=%x for UD LLQP",
+ init_attr->cap.max_send_sge,
+ init_attr->cap.max_recv_sge);
+ return ERR_PTR(-EINVAL);
+ } else if (init_attr->cap.max_send_wr > 255) {
+ ehca_err(pd->device,
+ "Invalid Number of "
+ "ax_send_wr=%x for UD QP_TYPE=%x",
+ init_attr->cap.max_send_wr, qp_type);
+ return ERR_PTR(-EINVAL);
+ }
+ break;
+ default:
+ ehca_err(pd->device, "unsupported LL QP Type=%x",
+ qp_type);
+ return ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+
if (pd->uobject && udata)
context = pd->uobject->context;
@@ -456,16 +534,17 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
}
- memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
spin_lock_init(&my_qp->spinlock_s);
spin_lock_init(&my_qp->spinlock_r);
+ my_qp->qp_type = qp_type;
+ my_qp->ext_type = parms.ext_type;
- my_qp->recv_cq =
- container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
- my_qp->send_cq =
- container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
-
- my_qp->init_attr = *init_attr;
+ if (init_attr->recv_cq)
+ my_qp->recv_cq =
+ container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
+ if (init_attr->send_cq)
+ my_qp->send_cq =
+ container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
do {
if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
@@ -474,9 +553,9 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
goto create_qp_exit0;
}
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
} while (ret == -EAGAIN);
@@ -486,10 +565,10 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
goto create_qp_exit0;
}
- parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
+ parms.servicetype = ibqptype2servicetype(qp_type);
if (parms.servicetype < 0) {
ret = -EINVAL;
- ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
+ ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
goto create_qp_exit0;
}
@@ -501,21 +580,35 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
/* UD_AV CIRCUMVENTION */
max_send_sge = init_attr->cap.max_send_sge;
max_recv_sge = init_attr->cap.max_recv_sge;
- if (IB_QPT_UD == init_attr->qp_type ||
- IB_QPT_GSI == init_attr->qp_type ||
- IB_QPT_SMI == init_attr->qp_type) {
+ if (parms.servicetype == ST_UD && !is_llqp) {
max_send_sge += 2;
max_recv_sge += 2;
}
- parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
- parms.daqp_ctrl = isdaqp | daqp_completion;
+ parms.token = my_qp->token;
+ parms.eq_handle = shca->eq.ipz_eq_handle;
parms.pd = my_pd->fw_pd;
- parms.max_recv_sge = max_recv_sge;
- parms.max_send_sge = max_send_sge;
-
- h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);
-
+ if (my_qp->send_cq)
+ parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
+ if (my_qp->recv_cq)
+ parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
+
+ parms.squeue.max_wr = init_attr->cap.max_send_wr;
+ parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
+ parms.squeue.max_sge = max_send_sge;
+ parms.rqueue.max_sge = max_recv_sge;
+
+ if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
+ && !(context && udata)) { /* no small QP support in userspace ATM */
+ ehca_determine_small_queue(
+ &parms.squeue, max_send_sge, is_llqp);
+ ehca_determine_small_queue(
+ &parms.rqueue, max_recv_sge, is_llqp);
+ parms.qp_storage =
+ (parms.squeue.is_small || parms.rqueue.is_small);
+ }
+
+ h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
if (h_ret != H_SUCCESS) {
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
h_ret);
@@ -523,55 +616,38 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
goto create_qp_exit1;
}
- my_qp->ib_qp.qp_num = my_qp->real_qp_num;
+ ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
+ my_qp->ipz_qp_handle = parms.qp_handle;
+ my_qp->galpas = parms.galpas;
- switch (init_attr->qp_type) {
+ swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
+ rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
+
+ switch (qp_type) {
case IB_QPT_RC:
- if (isdaqp == 0) {
- swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
- (parms.act_nr_send_sges)]);
- rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
- (parms.act_nr_recv_sges)]);
- } else { /* for daqp we need to use msg size, not wqe size */
- swqe_size = da_rc_msg_size[max_send_sge];
- rwqe_size = da_rc_msg_size[max_recv_sge];
- parms.act_nr_send_sges = 1;
- parms.act_nr_recv_sges = 1;
+ if (is_llqp) {
+ parms.squeue.act_nr_sges = 1;
+ parms.rqueue.act_nr_sges = 1;
}
break;
- case IB_QPT_UC:
- swqe_size = offsetof(struct ehca_wqe,
- u.nud.sg_list[parms.act_nr_send_sges]);
- rwqe_size = offsetof(struct ehca_wqe,
- u.nud.sg_list[parms.act_nr_recv_sges]);
- break;
-
case IB_QPT_UD:
case IB_QPT_GSI:
case IB_QPT_SMI:
/* UD circumvention */
- parms.act_nr_recv_sges -= 2;
- parms.act_nr_send_sges -= 2;
- if (isdaqp) {
- swqe_size = da_ud_sq_msg_size[max_send_sge];
- rwqe_size = da_rc_msg_size[max_recv_sge];
- parms.act_nr_send_sges = 1;
- parms.act_nr_recv_sges = 1;
+ if (is_llqp) {
+ parms.squeue.act_nr_sges = 1;
+ parms.rqueue.act_nr_sges = 1;
} else {
- swqe_size = offsetof(struct ehca_wqe,
- u.ud_av.sg_list[parms.act_nr_send_sges]);
- rwqe_size = offsetof(struct ehca_wqe,
- u.ud_av.sg_list[parms.act_nr_recv_sges]);
+ parms.squeue.act_nr_sges -= 2;
+ parms.rqueue.act_nr_sges -= 2;
}
- if (IB_QPT_GSI == init_attr->qp_type ||
- IB_QPT_SMI == init_attr->qp_type) {
- parms.act_nr_send_wqes = init_attr->cap.max_send_wr;
- parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
- parms.act_nr_send_sges = init_attr->cap.max_send_sge;
- parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
- my_qp->ib_qp.qp_num =
- (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
+ if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
+ parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
+ parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
+ parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
+ parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
+ ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
}
break;
@@ -580,108 +656,234 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
break;
}
- /* initializes r/squeue and registers queue pages */
- ret = init_qp_queues(shca, my_qp,
- parms.nr_sq_pages, parms.nr_rq_pages,
- swqe_size, rwqe_size,
- parms.act_nr_send_sges, parms.act_nr_recv_sges);
- if (ret) {
- ehca_err(pd->device,
- "Couldn't initialize r/squeue and pages ret=%x", ret);
- goto create_qp_exit2;
+ /* initialize r/squeue and register queue pages */
+ if (HAS_SQ(my_qp)) {
+ ret = init_qp_queue(
+ shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
+ HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
+ &parms.squeue, swqe_size);
+ if (ret) {
+ ehca_err(pd->device, "Couldn't initialize squeue "
+ "and pages ret=%x", ret);
+ goto create_qp_exit2;
+ }
}
- my_qp->ib_qp.pd = &my_pd->ib_pd;
- my_qp->ib_qp.device = my_pd->ib_pd.device;
+ if (HAS_RQ(my_qp)) {
+ ret = init_qp_queue(
+ shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
+ H_SUCCESS, &parms.rqueue, rwqe_size);
+ if (ret) {
+ ehca_err(pd->device, "Couldn't initialize rqueue "
+ "and pages ret=%x", ret);
+ goto create_qp_exit3;
+ }
+ }
+
+ if (is_srq) {
+ my_qp->ib_srq.pd = &my_pd->ib_pd;
+ my_qp->ib_srq.device = my_pd->ib_pd.device;
- my_qp->ib_qp.recv_cq = init_attr->recv_cq;
- my_qp->ib_qp.send_cq = init_attr->send_cq;
+ my_qp->ib_srq.srq_context = init_attr->qp_context;
+ my_qp->ib_srq.event_handler = init_attr->event_handler;
+ } else {
+ my_qp->ib_qp.qp_num = ib_qp_num;
+ my_qp->ib_qp.pd = &my_pd->ib_pd;
+ my_qp->ib_qp.device = my_pd->ib_pd.device;
- my_qp->ib_qp.qp_type = init_attr->qp_type;
+ my_qp->ib_qp.recv_cq = init_attr->recv_cq;
+ my_qp->ib_qp.send_cq = init_attr->send_cq;
- my_qp->qp_type = init_attr->qp_type;
- my_qp->ib_qp.srq = init_attr->srq;
+ my_qp->ib_qp.qp_type = qp_type;
+ my_qp->ib_qp.srq = init_attr->srq;
- my_qp->ib_qp.qp_context = init_attr->qp_context;
- my_qp->ib_qp.event_handler = init_attr->event_handler;
+ my_qp->ib_qp.qp_context = init_attr->qp_context;
+ my_qp->ib_qp.event_handler = init_attr->event_handler;
+ }
init_attr->cap.max_inline_data = 0; /* not supported yet */
- init_attr->cap.max_recv_sge = parms.act_nr_recv_sges;
- init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes;
- init_attr->cap.max_send_sge = parms.act_nr_send_sges;
- init_attr->cap.max_send_wr = parms.act_nr_send_wqes;
+ init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
+ init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
+ init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
+ init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
+ my_qp->init_attr = *init_attr;
/* NOTE: define_apq0() not supported yet */
- if (init_attr->qp_type == IB_QPT_GSI) {
+ if (qp_type == IB_QPT_GSI) {
h_ret = ehca_define_sqp(shca, my_qp, init_attr);
if (h_ret != H_SUCCESS) {
ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
h_ret);
ret = ehca2ib_return_code(h_ret);
- goto create_qp_exit3;
+ goto create_qp_exit4;
}
}
- if (init_attr->send_cq) {
- struct ehca_cq *cq = container_of(init_attr->send_cq,
- struct ehca_cq, ib_cq);
- ret = ehca_cq_assign_qp(cq, my_qp);
+
+ if (my_qp->send_cq) {
+ ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
if (ret) {
- ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
- ret);
- goto create_qp_exit3;
+ ehca_err(pd->device,
+ "Couldn't assign qp to send_cq ret=%x", ret);
+ goto create_qp_exit4;
}
- my_qp->send_cq = cq;
}
+
/* copy queues, galpa data to user space */
if (context && udata) {
- struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
- struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
struct ehca_create_qp_resp resp;
memset(&resp, 0, sizeof(resp));
resp.qp_num = my_qp->real_qp_num;
resp.token = my_qp->token;
resp.qp_type = my_qp->qp_type;
+ resp.ext_type = my_qp->ext_type;
resp.qkey = my_qp->qkey;
resp.real_qp_num = my_qp->real_qp_num;
- /* rqueue properties */
- resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size;
- resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg;
- resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
- resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
- resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
- /* squeue properties */
- resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
- resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
- resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
- resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
- resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
+ resp.ipz_rqueue.offset = my_qp->ipz_rqueue.offset;
+ resp.ipz_squeue.offset = my_qp->ipz_squeue.offset;
+ if (HAS_SQ(my_qp))
+ queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
+ if (HAS_RQ(my_qp))
+ queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
+
if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
ehca_err(pd->device, "Copy to udata failed");
ret = -EINVAL;
- goto create_qp_exit3;
+ goto create_qp_exit4;
}
}
- return &my_qp->ib_qp;
+ return my_qp;
+
+create_qp_exit4:
+ if (HAS_RQ(my_qp))
+ ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
create_qp_exit3:
- ipz_queue_dtor(&my_qp->ipz_rqueue);
- ipz_queue_dtor(&my_qp->ipz_squeue);
+ if (HAS_SQ(my_qp))
+ ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
create_qp_exit2:
hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
create_qp_exit1:
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
idr_remove(&ehca_qp_idr, my_qp->token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
create_qp_exit0:
kmem_cache_free(qp_cache, my_qp);
return ERR_PTR(ret);
}
+struct ib_qp *ehca_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct ehca_qp *ret;
+
+ ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
+ return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
+}
+
+static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
+ struct ib_uobject *uobject);
+
+struct ib_srq *ehca_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_qp_init_attr qp_init_attr;
+ struct ehca_qp *my_qp;
+ struct ib_srq *ret;
+ struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
+ ib_device);
+ struct hcp_modify_qp_control_block *mqpcb;
+ u64 hret, update_mask;
+
+ /* For common attributes, internal_create_qp() takes its info
+ * out of qp_init_attr, so copy all common attrs there.
+ */
+ memset(&qp_init_attr, 0, sizeof(qp_init_attr));
+ qp_init_attr.event_handler = srq_init_attr->event_handler;
+ qp_init_attr.qp_context = srq_init_attr->srq_context;
+ qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ qp_init_attr.qp_type = IB_QPT_RC;
+ qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
+ qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
+
+ my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
+ if (IS_ERR(my_qp))
+ return (struct ib_srq *)my_qp;
+
+ /* copy back return values */
+ srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
+ srq_init_attr->attr.max_sge = qp_init_attr.cap.max_recv_sge;
+
+ /* drive SRQ into RTR state */
+ mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
+ if (!mqpcb) {
+ ehca_err(pd->device, "Could not get zeroed page for mqpcb "
+ "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
+ ret = ERR_PTR(-ENOMEM);
+ goto create_srq1;
+ }
+
+ mqpcb->qp_state = EHCA_QPS_INIT;
+ mqpcb->prim_phys_port = 1;
+ update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
+ hret = hipz_h_modify_qp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ update_mask,
+ mqpcb, my_qp->galpas.kernel);
+ if (hret != H_SUCCESS) {
+ ehca_err(pd->device, "Could not modify SRQ to INIT"
+ "ehca_qp=%p qp_num=%x hret=%lx",
+ my_qp, my_qp->real_qp_num, hret);
+ goto create_srq2;
+ }
+
+ mqpcb->qp_enable = 1;
+ update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
+ hret = hipz_h_modify_qp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ update_mask,
+ mqpcb, my_qp->galpas.kernel);
+ if (hret != H_SUCCESS) {
+ ehca_err(pd->device, "Could not enable SRQ"
+ "ehca_qp=%p qp_num=%x hret=%lx",
+ my_qp, my_qp->real_qp_num, hret);
+ goto create_srq2;
+ }
+
+ mqpcb->qp_state = EHCA_QPS_RTR;
+ update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
+ hret = hipz_h_modify_qp(shca->ipz_hca_handle,
+ my_qp->ipz_qp_handle,
+ &my_qp->pf,
+ update_mask,
+ mqpcb, my_qp->galpas.kernel);
+ if (hret != H_SUCCESS) {
+ ehca_err(pd->device, "Could not modify SRQ to RTR"
+ "ehca_qp=%p qp_num=%x hret=%lx",
+ my_qp, my_qp->real_qp_num, hret);
+ goto create_srq2;
+ }
+
+ return &my_qp->ib_srq;
+
+create_srq2:
+ ret = ERR_PTR(ehca2ib_return_code(hret));
+ ehca_free_fw_ctrlblock(mqpcb);
+
+create_srq1:
+ internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
+
+ return ret;
+}
+
/*
* prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
* set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
@@ -707,7 +909,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
- bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
+ bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
qp_num, bad_send_wqe_p);
/* convert wqe pointer to vadr */
@@ -722,7 +924,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
}
/* loop sets wqe's purge bit */
- wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
+ wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = 0;
while (wqe->optype != 0xff && wqe->wqef != 0xff) {
if (ehca_debug_level)
@@ -730,7 +932,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
wqe->nr_of_data_seg = 0; /* suppress data access */
wqe->wqef = WQEF_PURGE; /* WQE to be purged */
q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
- wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
+ wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
*bad_wqe_cnt = (*bad_wqe_cnt)+1;
}
/*
@@ -765,7 +967,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
u64 h_ret;
int bad_wqe_cnt = 0;
int squeue_locked = 0;
- unsigned long spl_flags = 0;
+ unsigned long flags = 0;
/* do query_qp to obtain current attr values */
mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
@@ -835,7 +1037,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit1;
}
- ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
+ ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
"new qp_state=%x attribute_mask=%x",
my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
@@ -851,7 +1053,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
goto modify_qp_exit1;
}
- if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
+ mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
+ if (mqpcb->qp_state)
update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
else {
ret = -EINVAL;
@@ -886,6 +1089,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
"ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
my_qp, ibqp->qp_num, statetrans);
+ /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
+ * in non-LL UD QPs.
+ */
+ if ((my_qp->qp_type == IB_QPT_UD) &&
+ (my_qp->ext_type != EQPT_LLQP) &&
+ (statetrans == IB_QPST_INIT2RTR) &&
+ (shca->hw_level >= 0x22)) {
+ update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
+ mqpcb->send_grh_flag = 1;
+ }
+
/* sqe -> rts: set purge bit of bad wqe before actual trans */
if ((my_qp->qp_type == IB_QPT_UD ||
my_qp->qp_type == IB_QPT_GSI ||
@@ -895,10 +1109,10 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (!ibqp->uobject) {
struct ehca_wqe *wqe;
/* lock send queue */
- spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+ spin_lock_irqsave(&my_qp->spinlock_s, flags);
squeue_locked = 1;
/* mark next free wqe */
- wqe = (struct ehca_wqe*)
+ wqe = (struct ehca_wqe *)
ipz_qeit_get(&my_qp->ipz_squeue);
wqe->optype = wqe->wqef = 0xff;
ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
@@ -1133,7 +1347,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
- "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
+ "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
@@ -1181,7 +1395,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
modify_qp_exit2:
if (squeue_locked) { /* this means: sqe -> rts */
- spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
+ spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
my_qp->sqerr_purgeflag = 1;
}
@@ -1232,7 +1446,7 @@ int ehca_query_qp(struct ib_qp *qp,
}
if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
- ehca_err(qp->device,"Invalid attribute mask "
+ ehca_err(qp->device, "Invalid attribute mask "
"ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
my_qp, qp->qp_num, qp_attr_mask);
return -EINVAL;
@@ -1240,7 +1454,7 @@ int ehca_query_qp(struct ib_qp *qp,
qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!qpcb) {
- ehca_err(qp->device,"Out of memory for qpcb "
+ ehca_err(qp->device, "Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
return -ENOMEM;
}
@@ -1252,7 +1466,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
- ehca_err(qp->device,"hipz_h_query_qp() failed "
+ ehca_err(qp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%lx",
my_qp, qp->qp_num, h_ret);
goto query_qp_exit1;
@@ -1263,7 +1477,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (qp_attr->cur_qp_state == -EINVAL) {
ret = -EINVAL;
- ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
+ ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
"ehca_qp=%p qp_num=%x",
qpcb->qp_state, my_qp, qp->qp_num);
goto query_qp_exit1;
@@ -1312,6 +1526,9 @@ int ehca_query_qp(struct ib_qp *qp,
qp_attr->alt_port_num = qpcb->alt_phys_port;
qp_attr->alt_timeout = qpcb->timeout_al;
+ qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
+ qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
+
/* primary av */
qp_attr->ah_attr.sl = qpcb->service_level;
@@ -1367,53 +1584,170 @@ query_qp_exit1:
return ret;
}
-int ehca_destroy_qp(struct ib_qp *ibqp)
+int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
{
- struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
- struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
+ struct ehca_qp *my_qp =
+ container_of(ibsrq, struct ehca_qp, ib_srq);
+ struct ehca_pd *my_pd =
+ container_of(ibsrq->pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca =
+ container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
+ struct hcp_modify_qp_control_block *mqpcb;
+ u64 update_mask;
+ u64 h_ret;
+ int ret = 0;
+
+ u32 cur_pid = current->tgid;
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(ibsrq->pd->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
+ if (!mqpcb) {
+ ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
+ "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
+ return -ENOMEM;
+ }
+
+ update_mask = 0;
+ if (attr_mask & IB_SRQ_LIMIT) {
+ attr_mask &= ~IB_SRQ_LIMIT;
+ update_mask |=
+ EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
+ | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
+ mqpcb->curr_srq_limit =
+ EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
+ mqpcb->qp_aff_asyn_ev_log_reg =
+ EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
+ }
+
+ /* by now, all bits in attr_mask should have been cleared */
+ if (attr_mask) {
+ ehca_err(ibsrq->device, "invalid attribute mask bits set "
+ "attr_mask=%x", attr_mask);
+ ret = -EINVAL;
+ goto modify_srq_exit0;
+ }
+
+ if (ehca_debug_level)
+ ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
+
+ h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
+ NULL, update_mask, mqpcb,
+ my_qp->galpas.kernel);
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(ibsrq->device, "hipz_h_modify_qp() failed rc=%lx "
+ "ehca_qp=%p qp_num=%x",
+ h_ret, my_qp, my_qp->real_qp_num);
+ }
+
+modify_srq_exit0:
+ ehca_free_fw_ctrlblock(mqpcb);
+
+ return ret;
+}
+
+int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
+{
+ struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
+ struct ehca_pd *my_pd = container_of(srq->pd, struct ehca_pd, ib_pd);
+ struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
ib_device);
+ struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
+ struct hcp_modify_qp_control_block *qpcb;
+ u32 cur_pid = current->tgid;
+ int ret = 0;
+ u64 h_ret;
+
+ if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
+ my_pd->ownpid != cur_pid) {
+ ehca_err(srq->device, "Invalid caller pid=%x ownpid=%x",
+ cur_pid, my_pd->ownpid);
+ return -EINVAL;
+ }
+
+ qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
+ if (!qpcb) {
+ ehca_err(srq->device, "Out of memory for qpcb "
+ "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
+ return -ENOMEM;
+ }
+
+ h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
+ NULL, qpcb, my_qp->galpas.kernel);
+
+ if (h_ret != H_SUCCESS) {
+ ret = ehca2ib_return_code(h_ret);
+ ehca_err(srq->device, "hipz_h_query_qp() failed "
+ "ehca_qp=%p qp_num=%x h_ret=%lx",
+ my_qp, my_qp->real_qp_num, h_ret);
+ goto query_srq_exit1;
+ }
+
+ srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
+ srq_attr->srq_limit = EHCA_BMASK_GET(
+ MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
+
+ if (ehca_debug_level)
+ ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
+
+query_srq_exit1:
+ ehca_free_fw_ctrlblock(qpcb);
+
+ return ret;
+}
+
+static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
+ struct ib_uobject *uobject)
+{
+ struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
ib_pd);
u32 cur_pid = current->tgid;
- u32 qp_num = ibqp->qp_num;
+ u32 qp_num = my_qp->real_qp_num;
int ret;
u64 h_ret;
u8 port_num;
enum ib_qp_type qp_type;
unsigned long flags;
- if (ibqp->uobject) {
+ if (uobject) {
if (my_qp->mm_count_galpa ||
my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
- ehca_err(ibqp->device, "Resources still referenced in "
- "user space qp_num=%x", ibqp->qp_num);
+ ehca_err(dev, "Resources still referenced in "
+ "user space qp_num=%x", qp_num);
return -EINVAL;
}
if (my_pd->ownpid != cur_pid) {
- ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
+ ehca_err(dev, "Invalid caller pid=%x ownpid=%x",
cur_pid, my_pd->ownpid);
return -EINVAL;
}
}
if (my_qp->send_cq) {
- ret = ehca_cq_unassign_qp(my_qp->send_cq,
- my_qp->real_qp_num);
+ ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
if (ret) {
- ehca_err(ibqp->device, "Couldn't unassign qp from "
+ ehca_err(dev, "Couldn't unassign qp from "
"send_cq ret=%x qp_num=%x cq_num=%x", ret,
- my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number);
+ qp_num, my_qp->send_cq->cq_number);
return ret;
}
}
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
idr_remove(&ehca_qp_idr, my_qp->token);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
- ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
+ ehca_err(dev, "hipz_h_destroy_qp() failed rc=%lx "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
return ehca2ib_return_code(h_ret);
}
@@ -1424,7 +1758,7 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
/* no support for IB_QPT_SMI yet */
if (qp_type == IB_QPT_GSI) {
struct ib_event event;
- ehca_info(ibqp->device, "device %s: port %x is inactive.",
+ ehca_info(dev, "device %s: port %x is inactive.",
shca->ib_device.name, port_num);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
@@ -1433,18 +1767,34 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
ib_dispatch_event(&event);
}
- ipz_queue_dtor(&my_qp->ipz_rqueue);
- ipz_queue_dtor(&my_qp->ipz_squeue);
+ if (HAS_RQ(my_qp))
+ ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
+ if (HAS_SQ(my_qp))
+ ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
kmem_cache_free(qp_cache, my_qp);
return 0;
}
+int ehca_destroy_qp(struct ib_qp *qp)
+{
+ return internal_destroy_qp(qp->device,
+ container_of(qp, struct ehca_qp, ib_qp),
+ qp->uobject);
+}
+
+int ehca_destroy_srq(struct ib_srq *srq)
+{
+ return internal_destroy_qp(srq->device,
+ container_of(srq, struct ehca_qp, ib_srq),
+ srq->uobject);
+}
+
int ehca_init_qp_cache(void)
{
qp_cache = kmem_cache_create("ehca_cache_qp",
sizeof(struct ehca_qp), 0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!qp_cache)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index caec9dee09e1..94eed70fedf5 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -3,8 +3,9 @@
*
* post_send/recv, poll_cq, req_notify
*
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ * Joachim Fenkes <fenkes@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
@@ -78,7 +79,8 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
}
if (ehca_debug_level) {
- ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
+ ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
+ ipz_rqueue);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
}
@@ -98,7 +100,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
struct ib_sge *sge = send_wr->sg_list;
ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
- "send_flags=%x opcode=%x",idx, send_wr->wr_id,
+ "send_flags=%x opcode=%x", idx, send_wr->wr_id,
send_wr->num_sge, send_wr->send_flags,
send_wr->opcode);
if (mad_hdr) {
@@ -115,7 +117,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
mad_hdr->attr_mod);
}
for (j = 0; j < send_wr->num_sge; j++) {
- u8 *data = (u8 *) abs_to_virt(sge->addr);
+ u8 *data = (u8 *)abs_to_virt(sge->addr);
ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
"lkey=%x",
idx, j, data, sge->length, sge->lkey);
@@ -362,10 +364,10 @@ int ehca_post_send(struct ib_qp *qp,
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
- unsigned long spl_flags;
+ unsigned long flags;
/* LOCK the QUEUE */
- spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+ spin_lock_irqsave(&my_qp->spinlock_s, flags);
/* loop processes list of send reqs */
for (cur_send_wr = send_wr; cur_send_wr != NULL;
@@ -406,26 +408,31 @@ int ehca_post_send(struct ib_qp *qp,
} /* eof for cur_send_wr */
post_send_exit0:
- /* UNLOCK the QUEUE */
- spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt);
+ spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
return ret;
}
-int ehca_post_recv(struct ib_qp *qp,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
+static int internal_post_recv(struct ehca_qp *my_qp,
+ struct ib_device *dev,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
{
- struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
struct ib_recv_wr *cur_recv_wr;
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
- unsigned long spl_flags;
+ unsigned long flags;
+
+ if (unlikely(!HAS_RQ(my_qp))) {
+ ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
+ my_qp, my_qp->real_qp_num, my_qp->ext_type);
+ return -ENODEV;
+ }
/* LOCK the QUEUE */
- spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
+ spin_lock_irqsave(&my_qp->spinlock_r, flags);
/* loop processes list of send reqs */
for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
@@ -439,8 +446,8 @@ int ehca_post_recv(struct ib_qp *qp,
*bad_recv_wr = cur_recv_wr;
if (wqe_cnt == 0) {
ret = -ENOMEM;
- ehca_err(qp->device, "Too many posted WQEs "
- "qp_num=%x", qp->qp_num);
+ ehca_err(dev, "Too many posted WQEs "
+ "qp_num=%x", my_qp->real_qp_num);
}
goto post_recv_exit0;
}
@@ -455,23 +462,39 @@ int ehca_post_recv(struct ib_qp *qp,
*bad_recv_wr = cur_recv_wr;
if (wqe_cnt == 0) {
ret = -EINVAL;
- ehca_err(qp->device, "Could not write WQE "
- "qp_num=%x", qp->qp_num);
+ ehca_err(dev, "Could not write WQE "
+ "qp_num=%x", my_qp->real_qp_num);
}
goto post_recv_exit0;
}
wqe_cnt++;
- ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
+ ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, my_qp->real_qp_num, wqe_cnt);
} /* eof for cur_recv_wr */
post_recv_exit0:
- spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt);
+ spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
return ret;
}
+int ehca_post_recv(struct ib_qp *qp,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
+ qp->device, recv_wr, bad_recv_wr);
+}
+
+int ehca_post_srq_recv(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
+ srq->device, recv_wr, bad_recv_wr);
+}
+
/*
* ib_wc_opcode table converts ehca wc opcode to ib
* Since we use zero to indicate invalid opcode, the actual ib opcode must
@@ -494,6 +517,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
int ret = 0;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe;
+ struct ehca_qp *my_qp;
int cqe_count = 0;
poll_cq_one_read_cqe:
@@ -511,9 +535,11 @@ poll_cq_one_read_cqe:
cqe_count++;
if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
- struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
+ struct ehca_qp *qp;
int purgeflag;
- unsigned long spl_flags;
+ unsigned long flags;
+
+ qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
if (!qp) {
ehca_err(cq->device, "cq_num=%x qp_num=%x "
"could not find qp -> ignore cqe",
@@ -523,13 +549,13 @@ poll_cq_one_read_cqe:
/* ignore this purged cqe */
goto poll_cq_one_read_cqe;
}
- spin_lock_irqsave(&qp->spinlock_s, spl_flags);
+ spin_lock_irqsave(&qp->spinlock_s, flags);
purgeflag = qp->sqerr_purgeflag;
- spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
+ spin_unlock_irqrestore(&qp->spinlock_s, flags);
if (purgeflag) {
- ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
- "src_qp=%x",
+ ehca_dbg(cq->device,
+ "Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
if (ehca_debug_level)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
@@ -545,7 +571,7 @@ poll_cq_one_read_cqe:
}
/* tracing cqe */
- if (ehca_debug_level) {
+ if (unlikely(ehca_debug_level)) {
ehca_dbg(cq->device,
"Received COMPLETION ehca_cq=%p cq_num=%x -----",
my_cq, my_cq->cq_number);
@@ -579,7 +605,11 @@ poll_cq_one_read_cqe:
} else
wc->status = IB_WC_SUCCESS;
- wc->qp = NULL;
+ read_lock(&ehca_qp_idr_lock);
+ my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
+ wc->qp = &my_qp->ib_qp;
+ read_unlock(&ehca_qp_idr_lock);
+
wc->byte_len = cqe->nr_bytes_transferred;
wc->pkey_index = cqe->pkey_index;
wc->slid = cqe->rlid;
@@ -589,7 +619,7 @@ poll_cq_one_read_cqe:
wc->imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
- if (wc->status != IB_WC_SUCCESS)
+ if (unlikely(wc->status != IB_WC_SUCCESS))
ehca_dbg(cq->device,
"ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
"OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
@@ -610,7 +640,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
int nr;
struct ib_wc *current_wc = wc;
int ret = 0;
- unsigned long spl_flags;
+ unsigned long flags;
if (num_entries < 1) {
ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
@@ -619,14 +649,14 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
goto poll_cq_exit0;
}
- spin_lock_irqsave(&my_cq->spinlock, spl_flags);
+ spin_lock_irqsave(&my_cq->spinlock, flags);
for (nr = 0; nr < num_entries; nr++) {
ret = ehca_poll_cq_one(cq, current_wc);
if (ret)
break;
current_wc++;
} /* eof for nr */
- spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
+ spin_unlock_irqrestore(&my_cq->spinlock, flags);
if (ret == -EAGAIN || !ret)
ret = nr;
@@ -637,7 +667,6 @@ poll_cq_exit0:
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
{
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- unsigned long spl_flags;
int ret = 0;
switch (notify_flags & IB_CQ_SOLICITED_MASK) {
@@ -652,6 +681,7 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
}
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ unsigned long spl_flags;
spin_lock_irqsave(&my_cq->spinlock, spl_flags);
ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 973c4b591545..57c77a715f46 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -59,6 +59,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
+#include <asm/atomic.h>
#include <asm/abs_addr.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
@@ -92,14 +93,14 @@ extern int ehca_debug_level;
#define ehca_gen_dbg(format, arg...) \
do { \
if (unlikely(ehca_debug_level)) \
- printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
+ printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
get_paca()->paca_index, __FUNCTION__, ## arg); \
} while (0)
#define ehca_gen_warn(format, arg...) \
do { \
if (unlikely(ehca_debug_level)) \
- printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
+ printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
get_paca()->paca_index, __FUNCTION__, ## arg); \
} while (0)
@@ -113,12 +114,12 @@ extern int ehca_debug_level;
* <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
*/
#define ehca_dmp(adr, len, format, args...) \
- do { \
- unsigned int x; \
+ do { \
+ unsigned int x; \
unsigned int l = (unsigned int)(len); \
- unsigned char *deb = (unsigned char*)(adr); \
+ unsigned char *deb = (unsigned char *)(adr); \
for (x = 0; x < l; x += 16) { \
- printk("EHCA_DMP:%s " format \
+ printk(KERN_INFO "EHCA_DMP:%s " format \
" adr=%p ofs=%04x %016lx %016lx\n", \
__FUNCTION__, ##args, deb, x, \
*((u64 *)&deb[0]), *((u64 *)&deb[8])); \
@@ -127,16 +128,16 @@ extern int ehca_debug_level;
} while (0)
/* define a bitmask, little endian version */
-#define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
+#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
/* define a bitmask, the ibm way... */
-#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
+#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
/* internal function, don't use */
-#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
+#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
/* internal function, don't use */
-#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
+#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
/**
* EHCA_BMASK_SET - return value shifted and masked by mask
@@ -144,30 +145,16 @@ extern int ehca_debug_level;
* variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
* in variable
*/
-#define EHCA_BMASK_SET(mask,value) \
- ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
+#define EHCA_BMASK_SET(mask, value) \
+ ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
/**
* EHCA_BMASK_GET - extract a parameter from value by mask
*/
-#define EHCA_BMASK_GET(mask,value) \
- (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
-
+#define EHCA_BMASK_GET(mask, value) \
+ (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
/* Converts ehca to ib return code */
-static inline int ehca2ib_return_code(u64 ehca_rc)
-{
- switch (ehca_rc) {
- case H_SUCCESS:
- return 0;
- case H_BUSY:
- return -EBUSY;
- case H_NO_MEM:
- return -ENOMEM;
- default:
- return -EINVAL;
- }
-}
-
+int ehca2ib_return_code(u64 ehca_rc);
#endif /* EHCA_TOOLS_H */
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 73db920b6945..4bc687fdf531 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -70,7 +70,7 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
static void ehca_mm_open(struct vm_area_struct *vma)
{
- u32 *count = (u32*)vma->vm_private_data;
+ u32 *count = (u32 *)vma->vm_private_data;
if (!count) {
ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
@@ -86,7 +86,7 @@ static void ehca_mm_open(struct vm_area_struct *vma)
static void ehca_mm_close(struct vm_area_struct *vma)
{
- u32 *count = (u32*)vma->vm_private_data;
+ u32 *count = (u32 *)vma->vm_private_data;
if (!count) {
ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
@@ -149,7 +149,7 @@ static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
ehca_gen_err("vm_insert_page() failed rc=%x", ret);
return ret;
}
- start += PAGE_SIZE;
+ start += PAGE_SIZE;
}
vma->vm_private_data = mm_count;
(*mm_count)++;
@@ -215,7 +215,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
case 2: /* qp rqueue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
qp->ib_qp.qp_num);
- ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
+ ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
+ &qp->mm_count_rqueue);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
@@ -227,7 +228,8 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
case 3: /* qp squeue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
qp->ib_qp.qp_num);
- ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
+ ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
+ &qp->mm_count_squeue);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
@@ -253,16 +255,16 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
u32 cur_pid = current->tgid;
u32 ret;
- unsigned long flags;
struct ehca_cq *cq;
struct ehca_qp *qp;
struct ehca_pd *pd;
+ struct ib_uobject *uobject;
switch (q_type) {
case 1: /* CQ */
- spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+ read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, idr_handle);
- spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ read_unlock(&ehca_cq_idr_lock);
/* make sure this mmap really belongs to the authorized user */
if (!cq)
@@ -288,9 +290,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
break;
case 2: /* QP */
- spin_lock_irqsave(&ehca_qp_idr_lock, flags);
+ read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, idr_handle);
- spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ read_unlock(&ehca_qp_idr_lock);
/* make sure this mmap really belongs to the authorized user */
if (!qp)
@@ -304,7 +306,8 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return -ENOMEM;
}
- if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
+ uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
+ if (!uobject || uobject->context != context)
return -EINVAL;
ret = ehca_mmap_qp(vma, qp, rsrc_type);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 5766ae3a2029..fdbfebea7d11 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -5,6 +5,7 @@
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Joachim Fenkes <fenkes@de.ibm.com>
* Gerd Bayer <gerd.bayer@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
*
@@ -51,10 +52,13 @@
#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
+#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
+#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
@@ -62,6 +66,12 @@
#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
+#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
+#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
+
#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
@@ -74,10 +84,7 @@
#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
-/* direct access qp controls */
-#define DAQP_CTRL_ENABLE 0x01
-#define DAQP_CTRL_SEND_COMP 0x20
-#define DAQP_CTRL_RECV_COMP 0x40
+static DEFINE_SPINLOCK(hcall_lock);
static u32 get_longbusy_msecs(int longbusy_rc)
{
@@ -155,7 +162,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
{
long ret;
int i, sleep_msecs, lock_is_set = 0;
- unsigned long flags;
+ unsigned long flags = 0;
ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
"arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
@@ -284,68 +291,73 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
}
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_qp *qp,
struct ehca_alloc_qp_parms *parms)
{
u64 ret;
- u64 allocate_controls;
- u64 max_r10_reg;
+ u64 allocate_controls, max_r10_reg, r11, r12;
u64 outs[PLPAR_HCALL9_BUFSIZE];
- u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
- u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
- int daqp_ctrl = parms->daqp_ctrl;
allocate_controls =
- EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
- (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
+ EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
+ parms->squeue.page_size)
+ | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
+ parms->rqueue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
- (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0)
+ !!(parms->ll_comp_flags & LLQP_RECV_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
- (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0)
+ !!(parms->ll_comp_flags & LLQP_SEND_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
parms->ud_av_l_key_ctl)
| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
max_r10_reg =
EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
- max_nr_send_wqes)
+ parms->squeue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
- max_nr_receive_wqes)
+ parms->rqueue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
- parms->max_send_sge)
+ parms->squeue.max_sge)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
- parms->max_recv_sge);
+ parms->rqueue.max_sge);
+
+ r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
+
+ if (parms->ext_type == EQPT_SRQ)
+ r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
+ else
+ r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
- qp->send_cq->ipz_cq_handle.handle,
- qp->recv_cq->ipz_cq_handle.handle,
- parms->ipz_eq_handle.handle,
- ((u64)qp->token << 32) | parms->pd.value,
- max_r10_reg, /* r10 */
- parms->ud_av_l_key_ctl, /* r11 */
- 0);
- qp->ipz_qp_handle.handle = outs[0];
- qp->real_qp_num = (u32)outs[1];
- parms->act_nr_send_wqes =
+ parms->send_cq_handle.handle,
+ parms->recv_cq_handle.handle,
+ parms->eq_handle.handle,
+ ((u64)parms->token << 32) | parms->pd.value,
+ max_r10_reg, r11, r12);
+
+ parms->qp_handle.handle = outs[0];
+ parms->real_qp_num = (u32)outs[1];
+ parms->squeue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
- parms->act_nr_recv_wqes =
+ parms->rqueue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
- parms->act_nr_send_sges =
+ parms->squeue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
- parms->act_nr_recv_sges =
+ parms->rqueue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
- parms->nr_sq_pages =
+ parms->squeue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
- parms->nr_rq_pages =
+ parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS)
- hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]);
+ hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lx", ret);
@@ -423,7 +435,8 @@ u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
{
return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
adapter_handle.handle, /* r4 */
- queue_type | pagesize << 8, /* r5 */
+ (u64)queue_type | ((u64)pagesize) << 8,
+ /* r5 */
resource_handle, /* r6 */
logical_address_of_page, /* r7 */
count, /* r8 */
@@ -492,13 +505,13 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
const u64 count,
const struct h_galpa galpa)
{
- if (count != 1) {
+ if (count > 1) {
ehca_gen_err("Page counter=%lx", count);
return H_PARAMETER;
}
- return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
- qp_handle.handle,logical_address_of_page,
+ return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
+ qp_handle.handle, logical_address_of_page,
count);
}
@@ -518,9 +531,9 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (log_addr_next_sq_wqe2processed)
- *log_addr_next_sq_wqe2processed = (void*)outs[0];
+ *log_addr_next_sq_wqe2processed = (void *)outs[0];
if (log_addr_next_rq_wqe2processed)
- *log_addr_next_rq_wqe2processed = (void*)outs[1];
+ *log_addr_next_rq_wqe2processed = (void *)outs[1];
return ret;
}
@@ -720,6 +733,9 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
+ ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
+ "vaddr=%lx length=%lx",
+ (u32)PAGE_SIZE, access_ctrl, vaddr, length);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
5, /* r5 */
@@ -742,8 +758,22 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
const u64 logical_address_of_page,
const u64 count)
{
+ extern int ehca_debug_level;
u64 ret;
+ if (unlikely(ehca_debug_level >= 2)) {
+ if (count > 1) {
+ u64 *kpage;
+ int i;
+ kpage = (u64 *)abs_to_virt(logical_address_of_page);
+ for (i = 0; i < count; i++)
+ ehca_gen_dbg("kpage[%d]=%p",
+ i, (void *)kpage[i]);
+ } else
+ ehca_gen_dbg("kpage=%p",
+ (void *)logical_address_of_page);
+ }
+
if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
ehca_gen_err("logical_address_of_page not on a 4k boundary "
"adapter_handle=%lx mr=%p mr_handle=%lx "
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 2869f7dd6196..60ce02b70663 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -78,7 +78,6 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
* initialize resources, create empty QPPTs (2 rings).
*/
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
- struct ehca_qp *qp,
struct ehca_alloc_qp_parms *parms);
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index 0b1a4772c78a..214821095cb1 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -50,7 +50,7 @@ int hcall_map_page(u64 physaddr, u64 *mapaddr)
int hcall_unmap_page(u64 mapaddr)
{
- iounmap((volatile void __iomem*)mapaddr);
+ iounmap((volatile void __iomem *) mapaddr);
return 0;
}
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h
index 20898a153446..868735fd3187 100644
--- a/drivers/infiniband/hw/ehca/hipz_fns_core.h
+++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h
@@ -53,10 +53,10 @@
#define hipz_galpa_load_cq(gal, offset) \
hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
-#define hipz_galpa_store_qp(gal,offset, value) \
+#define hipz_galpa_store_qp(gal, offset, value) \
hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
#define hipz_galpa_load_qp(gal, offset) \
- hipz_galpa_load(gal,QPTEMM_OFFSET(offset))
+ hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
{
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
index fad91368dc5a..d9739e554515 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -161,10 +161,11 @@ struct hipz_qptemm {
/* 0x1000 */
};
-#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
-#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
+#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
+#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
+#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
-#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
+#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
/* MRMWPT Entry Memory Map */
struct hipz_mrmwmm {
@@ -186,7 +187,7 @@ struct hipz_mrmwmm {
};
-#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x)
+#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
struct hipz_qpedmm {
/* 0x00 */
@@ -237,7 +238,7 @@ struct hipz_qpedmm {
u64 qpedx_rrva3;
};
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
+#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
/* CQ Table Entry Memory Map */
struct hipz_cqtemm {
@@ -262,12 +263,12 @@ struct hipz_cqtemm {
/* 0x1000 */
};
-#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63)
-#define CQX_FECADDER EHCA_BMASK_IBM(32,63)
-#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0)
-#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0)
+#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63)
+#define CQX_FECADDER EHCA_BMASK_IBM(32, 63)
+#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
+#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
+#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
/* EQ Table Entry Memory Map */
struct hipz_eqtemm {
@@ -292,7 +293,7 @@ struct hipz_eqtemm {
};
-#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x)
+#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
/* access control defines for MR/MW */
#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
@@ -360,6 +361,24 @@ struct hipz_query_hca {
u32 max_neq;
} __attribute__ ((packed));
+#define HCA_CAP_AH_PORT_NR_CHECK EHCA_BMASK_IBM( 0, 0)
+#define HCA_CAP_ATOMIC EHCA_BMASK_IBM( 1, 1)
+#define HCA_CAP_AUTO_PATH_MIG EHCA_BMASK_IBM( 2, 2)
+#define HCA_CAP_BAD_P_KEY_CTR EHCA_BMASK_IBM( 3, 3)
+#define HCA_CAP_SQD_RTS_PORT_CHANGE EHCA_BMASK_IBM( 4, 4)
+#define HCA_CAP_CUR_QP_STATE_MOD EHCA_BMASK_IBM( 5, 5)
+#define HCA_CAP_INIT_TYPE EHCA_BMASK_IBM( 6, 6)
+#define HCA_CAP_PORT_ACTIVE_EVENT EHCA_BMASK_IBM( 7, 7)
+#define HCA_CAP_Q_KEY_VIOL_CTR EHCA_BMASK_IBM( 8, 8)
+#define HCA_CAP_WQE_RESIZE EHCA_BMASK_IBM( 9, 9)
+#define HCA_CAP_RAW_PACKET_MCAST EHCA_BMASK_IBM(10, 10)
+#define HCA_CAP_SHUTDOWN_PORT EHCA_BMASK_IBM(11, 11)
+#define HCA_CAP_RC_LL_QP EHCA_BMASK_IBM(12, 12)
+#define HCA_CAP_SRQ EHCA_BMASK_IBM(13, 13)
+#define HCA_CAP_UD_LL_QP EHCA_BMASK_IBM(16, 16)
+#define HCA_CAP_RESIZE_MR EHCA_BMASK_IBM(17, 17)
+#define HCA_CAP_MINI_QP EHCA_BMASK_IBM(18, 18)
+
/* query port response block */
struct hipz_query_port {
u32 state;
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index bf7a40088f61..a090c679c397 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -40,6 +40,11 @@
#include "ehca_tools.h"
#include "ipz_pt_fn.h"
+#include "ehca_classes.h"
+
+#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
+
+struct kmem_cache *small_qp_cache;
void *ipz_qpageit_get_inc(struct ipz_queue *queue)
{
@@ -49,7 +54,7 @@ void *ipz_qpageit_get_inc(struct ipz_queue *queue)
queue->current_q_offset -= queue->pagesize;
ret = NULL;
}
- if (((u64)ret) % EHCA_PAGESIZE) {
+ if (((u64)ret) % queue->pagesize) {
ehca_gen_err("ERROR!! not at PAGE-Boundary");
return NULL;
}
@@ -83,80 +88,195 @@ int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
return -EINVAL;
}
-int ipz_queue_ctor(struct ipz_queue *queue,
- const u32 nr_of_pages,
- const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
+#if PAGE_SHIFT < EHCA_PAGESHIFT
+#error Kernel pages must be at least as large than eHCA pages (4K) !
+#endif
+
+/*
+ * allocate pages for queue:
+ * outer loop allocates whole kernel pages (page aligned) and
+ * inner loop divides a kernel page into smaller hca queue pages
+ */
+static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
{
- int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
- int f;
+ int k, f = 0;
+ u8 *kpage;
- if (pagesize > PAGE_SIZE) {
- ehca_gen_err("FATAL ERROR: pagesize=%x is greater "
- "than kernel page size", pagesize);
- return 0;
- }
- if (!pages_per_kpage) {
- ehca_gen_err("FATAL ERROR: invalid kernel page size. "
- "pages_per_kpage=%x", pages_per_kpage);
- return 0;
- }
- queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
- if (!queue->queue_pages) {
- ehca_gen_err("ERROR!! didn't get the memory");
- return 0;
- }
- memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
- /*
- * allocate pages for queue:
- * outer loop allocates whole kernel pages (page aligned) and
- * inner loop divides a kernel page into smaller hca queue pages
- */
- f = 0;
while (f < nr_of_pages) {
- u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
- int k;
+ kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!kpage)
- goto ipz_queue_ctor_exit0; /*NOMEM*/
- for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) {
- (queue->queue_pages)[f] = (struct ipz_page *)kpage;
+ goto out;
+
+ for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
+ queue->queue_pages[f] = (struct ipz_page *)kpage;
kpage += EHCA_PAGESIZE;
f++;
}
}
+ return 1;
- queue->current_q_offset = 0;
+out:
+ for (f = 0; f < nr_of_pages && queue->queue_pages[f];
+ f += PAGES_PER_KPAGE)
+ free_page((unsigned long)(queue->queue_pages)[f]);
+ return 0;
+}
+
+static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
+{
+ int order = ilog2(queue->pagesize) - 9;
+ struct ipz_small_queue_page *page;
+ unsigned long bit;
+
+ mutex_lock(&pd->lock);
+
+ if (!list_empty(&pd->free[order]))
+ page = list_entry(pd->free[order].next,
+ struct ipz_small_queue_page, list);
+ else {
+ page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
+ if (!page)
+ goto out;
+
+ page->page = get_zeroed_page(GFP_KERNEL);
+ if (!page->page) {
+ kmem_cache_free(small_qp_cache, page);
+ goto out;
+ }
+
+ list_add(&page->list, &pd->free[order]);
+ }
+
+ bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
+ __set_bit(bit, page->bitmap);
+ page->fill++;
+
+ if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
+ list_move(&page->list, &pd->full[order]);
+
+ mutex_unlock(&pd->lock);
+
+ queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
+ queue->small_page = page;
+ return 1;
+
+out:
+ ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
+ return 0;
+}
+
+static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
+{
+ int order = ilog2(queue->pagesize) - 9;
+ struct ipz_small_queue_page *page = queue->small_page;
+ unsigned long bit;
+ int free_page = 0;
+
+ bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK)
+ >> (order + 9);
+
+ mutex_lock(&pd->lock);
+
+ __clear_bit(bit, page->bitmap);
+ page->fill--;
+
+ if (page->fill == 0) {
+ list_del(&page->list);
+ free_page = 1;
+ }
+
+ if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
+ /* the page was full until we freed the chunk */
+ list_move_tail(&page->list, &pd->free[order]);
+
+ mutex_unlock(&pd->lock);
+
+ if (free_page) {
+ free_page(page->page);
+ kmem_cache_free(small_qp_cache, page);
+ }
+}
+
+int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
+ const u32 nr_of_pages, const u32 pagesize,
+ const u32 qe_size, const u32 nr_of_sg,
+ int is_small)
+{
+ if (pagesize > PAGE_SIZE) {
+ ehca_gen_err("FATAL ERROR: pagesize=%x "
+ "is greater than kernel page size", pagesize);
+ return 0;
+ }
+
+ /* init queue fields */
+ queue->queue_length = nr_of_pages * pagesize;
+ queue->pagesize = pagesize;
queue->qe_size = qe_size;
queue->act_nr_of_sg = nr_of_sg;
- queue->pagesize = pagesize;
+ queue->current_q_offset = 0;
queue->toggle_state = 1;
- return 1;
+ queue->small_page = NULL;
- ipz_queue_ctor_exit0:
- ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x",
- queue, f, nr_of_pages);
- for (f = 0; f < nr_of_pages; f += pages_per_kpage) {
- if (!(queue->queue_pages)[f])
- break;
- free_page((unsigned long)(queue->queue_pages)[f]);
+ /* allocate queue page pointers */
+ queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
+ if (!queue->queue_pages) {
+ ehca_gen_err("Couldn't allocate queue page list");
+ return 0;
}
+ memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
+
+ /* allocate actual queue pages */
+ if (is_small) {
+ if (!alloc_small_queue_page(queue, pd))
+ goto ipz_queue_ctor_exit0;
+ } else
+ if (!alloc_queue_pages(queue, nr_of_pages))
+ goto ipz_queue_ctor_exit0;
+
+ return 1;
+
+ipz_queue_ctor_exit0:
+ ehca_gen_err("Couldn't alloc pages queue=%p "
+ "nr_of_pages=%x", queue, nr_of_pages);
+ vfree(queue->queue_pages);
+
return 0;
}
-int ipz_queue_dtor(struct ipz_queue *queue)
+int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
{
- int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
- int g;
- int nr_pages;
+ int i, nr_pages;
if (!queue || !queue->queue_pages) {
ehca_gen_dbg("queue or queue_pages is NULL");
return 0;
}
- nr_pages = queue->queue_length / queue->pagesize;
- for (g = 0; g < nr_pages; g += pages_per_kpage)
- free_page((unsigned long)(queue->queue_pages)[g]);
+
+ if (queue->small_page)
+ free_small_queue_page(queue, pd);
+ else {
+ nr_pages = queue->queue_length / queue->pagesize;
+ for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
+ free_page((unsigned long)queue->queue_pages[i]);
+ }
+
vfree(queue->queue_pages);
return 1;
}
+
+int ehca_init_small_qp_cache(void)
+{
+ small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
+ sizeof(struct ipz_small_queue_page),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!small_qp_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void ehca_cleanup_small_qp_cache(void)
+{
+ kmem_cache_destroy(small_qp_cache);
+}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 57f141a36bce..c6937a044e8a 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -51,11 +51,25 @@
#include "ehca_tools.h"
#include "ehca_qes.h"
+struct ehca_pd;
+struct ipz_small_queue_page;
+
/* struct generic ehca page */
struct ipz_page {
u8 entries[EHCA_PAGESIZE];
};
+#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
+
+struct ipz_small_queue_page {
+ unsigned long page;
+ unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
+ int fill;
+ void *mapped_addr;
+ u32 mmap_count;
+ struct list_head list;
+};
+
/* struct generic queue in linux kernel virtual memory (kv) */
struct ipz_queue {
u64 current_q_offset; /* current queue entry */
@@ -66,7 +80,8 @@ struct ipz_queue {
u32 queue_length; /* queue length allocated in bytes */
u32 pagesize;
u32 toggle_state; /* toggle flag - per page */
- u32 dummy3; /* 64 bit alignment */
+ u32 offset; /* save offset within page for small_qp */
+ struct ipz_small_queue_page *small_page;
};
/*
@@ -105,7 +120,6 @@ void *ipz_qpageit_get_inc(struct ipz_queue *queue);
* step in struct ipz_queue, will wrap in ringbuffer
* returns address (kv) of Queue Entry BEFORE increment
* warning don't use in parallel with ipz_qpageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
*/
static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
{
@@ -121,31 +135,24 @@ static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
}
/*
+ * return a bool indicating whether current Queue Entry is valid
+ */
+static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
+{
+ struct ehca_cqe *cqe = ipz_qeit_get(queue);
+ return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
+}
+
+/*
* return current Queue Entry, increment Queue Entry iterator by one
* step in struct ipz_queue, will wrap in ringbuffer
* returns address (kv) of Queue Entry BEFORE increment
* returns 0 and does not increment, if wrong valid state
* warning don't use in parallel with ipz_qpageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
*/
static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
{
- struct ehca_cqe *cqe = ipz_qeit_get(queue);
- u32 cqe_flags = cqe->cqe_flags;
-
- if ((cqe_flags >> 7) != (queue->toggle_state & 1))
- return NULL;
-
- ipz_qeit_get_inc(queue);
- return cqe;
-}
-
-static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
-{
- struct ehca_cqe *cqe = ipz_qeit_get(queue);
- u32 cqe_flags = cqe->cqe_flags;
-
- return cqe_flags >> 7 == (queue->toggle_state & 1);
+ return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
}
/*
@@ -196,9 +203,10 @@ struct ipz_qpt {
* see ipz_qpt_ctor()
* returns true if ok, false if out of memory
*/
-int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages,
- const u32 pagesize, const u32 qe_size,
- const u32 nr_of_sg);
+int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
+ const u32 nr_of_pages, const u32 pagesize,
+ const u32 qe_size, const u32 nr_of_sg,
+ int is_small);
/*
* destructor for a ipz_queue_t
@@ -206,7 +214,7 @@ int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages,
* see ipz_queue_ctor()
* returns true if ok, false if queue was NULL-ptr of free failed
*/
-int ipz_queue_dtor(struct ipz_queue *queue);
+int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
/*
* constructor for a ipz_qpt_t,
@@ -248,7 +256,7 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
- u32 qe = *(u8 *) ret;
+ u32 qe = *(u8 *)ret;
if ((qe >> 7) != (queue->toggle_state & 1))
return NULL;
ipz_qeit_eq_get_inc(queue); /* this is a good one */
@@ -258,7 +266,7 @@ static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
- u32 qe = *(u8 *) ret;
+ u32 qe = *(u8 *)ret;
if ((qe >> 7) != (queue->toggle_state & 1))
return NULL;
return ret;
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 90c14543677d..044da5828a78 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_IPATH
tristate "QLogic InfiniPath Driver"
- depends on (PCI_MSI || HT_IRQ) && 64BIT && INFINIBAND && NET
+ depends on (PCI_MSI || HT_IRQ) && 64BIT && NET
---help---
This is a driver for QLogic InfiniPath host channel adapters,
including InfiniBand verbs support. This driver allows these
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index ec2e603ea241..fe6738826865 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -14,7 +14,6 @@ ib_ipath-y := \
ipath_init_chip.o \
ipath_intr.o \
ipath_keys.o \
- ipath_layer.o \
ipath_mad.o \
ipath_mmap.o \
ipath_mr.o \
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 10c008f22ba6..b4b786d0dfca 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -189,8 +189,7 @@ typedef enum _ipath_ureg {
#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
#define IPATH_RUNTIME_RCVHDR_COPY 0x8
#define IPATH_RUNTIME_MASTER 0x10
-#define IPATH_RUNTIME_PBC_REWRITE 0x20
-#define IPATH_RUNTIME_LOOSE_DMA_ALIGN 0x40
+/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */
/*
* This structure is returned by ipath_userinit() immediately after
@@ -432,8 +431,15 @@ struct ipath_user_info {
#define IPATH_CMD_UNUSED_1 25
#define IPATH_CMD_UNUSED_2 26
#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
+#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
-#define IPATH_CMD_MAX 27
+#define IPATH_CMD_MAX 28
+
+/*
+ * Poll types
+ */
+#define IPATH_POLL_TYPE_URGENT 0x01
+#define IPATH_POLL_TYPE_OVERFLOW 0x02
struct ipath_port_info {
__u32 num_active; /* number of active units */
@@ -474,6 +480,8 @@ struct ipath_cmd {
__u16 part_key;
/* user address of __u32 bitmask of active slaves */
__u64 slave_mask_addr;
+ /* type of polling we want */
+ __u16 poll_type;
} cmd;
};
@@ -502,13 +510,30 @@ struct __ipath_sendpkt {
struct ipath_iovec sps_iov[4];
};
-/* Passed into diag data special file's ->write method. */
+/*
+ * diagnostics can send a packet by "writing" one of the following
+ * two structs to diag data special file
+ * The first is the legacy version for backward compatibility
+ */
struct ipath_diag_pkt {
__u32 unit;
__u64 data;
__u32 len;
};
+/* The second diag_pkt struct is the expanded version that allows
+ * more control over the packet, specifically, by allowing a custom
+ * pbc (+ extra) qword, so that special modes and deliberate
+ * changes to CRCs can be used. The elements were also re-ordered
+ * for better alignment and to avoid padding issues.
+ */
+struct ipath_diag_xpkt {
+ __u64 data;
+ __u64 pbc_wd;
+ __u32 unit;
+ __u32 len;
+};
+
/*
* Data layout in I2C flash (for GUID, etc.)
* All fields are little-endian binary unless otherwise stated
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 3e9241badba0..a6f04d27ec57 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -90,6 +90,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
wc->queue[head].sl = entry->sl;
wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
wc->queue[head].port_num = entry->port_num;
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -139,7 +141,8 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
if (tail == wc->head)
break;
-
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
wc->queue[tail].qp_num);
entry->qp = &qp->ibqp;
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 42bfbdb0d3e6..19c56e6491eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 63e8368b0e95..a698f1949d10 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -323,13 +323,14 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
{
u32 __iomem *piobuf;
u32 plen, clen, pbufn;
- struct ipath_diag_pkt dp;
+ struct ipath_diag_pkt odp;
+ struct ipath_diag_xpkt dp;
u32 *tmpbuf = NULL;
struct ipath_devdata *dd;
ssize_t ret = 0;
u64 val;
- if (count < sizeof(dp)) {
+ if (count != sizeof(dp)) {
ret = -EINVAL;
goto bail;
}
@@ -339,6 +340,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
goto bail;
}
+ /*
+ * Due to padding/alignment issues (lessened with new struct)
+ * the old and new structs are the same length. We need to
+ * disambiguate them, which we can do because odp.len has never
+ * been less than the total of LRH+BTH+DETH so far, while
+ * dp.unit (same offset) unit is unlikely to get that high.
+ * Similarly, dp.data, the pointer to user at the same offset
+ * as odp.unit, is almost certainly at least one (512byte)page
+ * "above" NULL. The if-block below can be omitted if compatibility
+ * between a new driver and older diagnostic code is unimportant.
+ * compatibility the other direction (new diags, old driver) is
+ * handled in the diagnostic code, with a warning.
+ */
+ if (dp.unit >= 20 && dp.data < 512) {
+ /* very probable version mismatch. Fix it up */
+ memcpy(&odp, &dp, sizeof(odp));
+ /* We got a legacy dp, copy elements to dp */
+ dp.unit = odp.unit;
+ dp.data = odp.data;
+ dp.len = odp.len;
+ dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
+ }
+
/* send count must be an exact number of dwords */
if (dp.len & 3) {
ret = -EINVAL;
@@ -371,9 +395,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
ret = -ENODEV;
goto bail;
}
+ /* Check link state, but not if we have custom PBC */
val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
- if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM &&
- val != IPATH_IBSTATE_ACTIVE) {
+ if (!dp.pbc_wd && val != IPATH_IBSTATE_INIT &&
+ val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) {
ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
dd->ipath_unit, (unsigned long long) val);
ret = -EINVAL;
@@ -419,9 +444,13 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
dd->ipath_unit, plen - 1, pbufn);
+ if (dp.pbc_wd == 0)
+ /* Legacy operation, use computed pbc_wd */
+ dp.pbc_wd = plen;
+
/* we have to flush after the PBC for correctness on some cpus
* or WC buffer can be written out of order */
- writeq(plen, piobuf);
+ writeq(dp.pbc_wd, piobuf);
ipath_flush_wc();
/* copy all by the trigger word, then flush, so it's written
* to chip before trigger word, then write trigger word, then
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index e3a223209710..09c5fd84b1e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -104,6 +104,9 @@ static int __devinit ipath_init_one(struct pci_dev *,
#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
+/* Number of seconds before our card status check... */
+#define STATUS_TIMEOUT 60
+
static const struct pci_device_id ipath_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
@@ -119,6 +122,18 @@ static struct pci_driver ipath_driver = {
.id_table = ipath_pci_tbl,
};
+static void ipath_check_status(struct work_struct *work)
+{
+ struct ipath_devdata *dd = container_of(work, struct ipath_devdata,
+ status_work.work);
+
+ /*
+ * If we don't have any interrupts, let the user know and
+ * don't bother checking again.
+ */
+ if (dd->ipath_int_counter == 0)
+ dev_err(&dd->pcidev->dev, "No interrupts detected.\n");
+}
static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
u32 *bar0, u32 *bar1)
@@ -187,6 +202,8 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
+ INIT_DELAYED_WORK(&dd->status_work, ipath_check_status);
+
list_add(&dd->ipath_list, &ipath_dev_list);
bail_unlock:
@@ -270,7 +287,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
struct ipath_devdata *dd;
unsigned long long addr;
u32 bar0 = 0, bar1 = 0;
- u8 rev;
dd = ipath_alloc_devdata(pdev);
if (IS_ERR(dd)) {
@@ -432,13 +448,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
dd->ipath_deviceid = ent->device; /* save for later use */
dd->ipath_vendorid = ent->vendor;
- ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
- if (ret) {
- ipath_dev_err(dd, "Failed to read PCI revision ID unit "
- "%u: err %d\n", dd->ipath_unit, -ret);
- goto bail_regions; /* shouldn't ever happen */
- }
- dd->ipath_pcirev = rev;
+ dd->ipath_pcirev = pdev->revision;
#if defined(__powerpc__)
/* There isn't a generic way to specify writethrough mappings */
@@ -511,6 +521,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
ipath_diag_add(dd);
ipath_register_ib_device(dd);
+ /* Check that card status in STATUS_TIMEOUT seconds. */
+ schedule_delayed_work(&dd->status_work, HZ * STATUS_TIMEOUT);
+
goto bail;
bail_irqsetup:
@@ -638,6 +651,9 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
*/
ipath_shutdown_device(dd);
+ cancel_delayed_work(&dd->status_work);
+ flush_scheduled_work();
+
if (dd->verbs_dev)
ipath_unregister_ib_device(dd->verbs_dev);
@@ -706,9 +722,9 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
u64 sendctrl, sendorig;
ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
- sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
+ sendorig = dd->ipath_sendctrl;
for (i = first; i < last; i++) {
- sendctrl = sendorig |
+ sendctrl = sendorig | INFINIPATH_S_DISARM |
(i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
sendctrl);
@@ -719,12 +735,12 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
* while we were looping; no critical bits that would require
* locking.
*
- * Write a 0, and then the original value, reading scratch in
+ * disable PIOAVAILUPD, then re-enable, reading scratch in
* between. This seems to avoid a chip timing race that causes
* pioavail updates to memory to stop.
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- 0);
+ sendorig & ~IPATH_S_PIOBUFAVAILUPD);
sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
@@ -1021,14 +1037,10 @@ void ipath_kreceive(struct ipath_devdata *dd)
goto bail;
}
- /* There is already a thread processing this queue. */
- if (test_and_set_bit(0, &dd->ipath_rcv_pending))
- goto bail;
-
l = dd->ipath_port0head;
hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
if (l == hdrqtail)
- goto done;
+ goto bail;
reloop:
for (i = 0; l != hdrqtail; i++) {
@@ -1163,10 +1175,6 @@ reloop:
ipath_stats.sps_avgpkts_call =
ipath_stats.sps_port0pkts / ++totcalls;
-done:
- clear_bit(0, &dd->ipath_rcv_pending);
- smp_mb__after_clear_bit();
-
bail:;
}
@@ -1596,6 +1604,35 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
return ret;
}
+
+/*
+ * Flush all sends that might be in the ready to send state, as well as any
+ * that are in the process of being sent. Used whenever we need to be
+ * sure the send side is idle. Cleans up all buffer state by canceling
+ * all pio buffers, and issuing an abort, which cleans up anything in the
+ * launch fifo. The cancel is superfluous on some chip versions, but
+ * it's safer to always do it.
+ * PIOAvail bits are updated by the chip as if normal send had happened.
+ */
+void ipath_cancel_sends(struct ipath_devdata *dd)
+{
+ ipath_dbg("Cancelling all in-progress send buffers\n");
+ dd->ipath_lastcancel = jiffies+HZ/2; /* skip armlaunch errs a bit */
+ /*
+ * the abort bit is auto-clearing. We read scratch to be sure
+ * that cancels and the abort have taken effect in the chip.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ INFINIPATH_S_ABORT);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_disarm_piobufs(dd, 0,
+ (unsigned)(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k));
+
+ /* and again, be sure all have hit the chip */
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+}
+
+
static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
{
static const char *what[4] = {
@@ -1617,14 +1654,8 @@ static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
/* flush all queued sends when going to DOWN or INIT, to be sure that
* they don't block MAD packets */
- if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- INFINIPATH_S_ABORT);
- ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
- (unsigned)(dd->ipath_piobcnt2k +
- dd->ipath_piobcnt4k) -
- dd->ipath_lastport_piobuf);
- }
+ if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT)
+ ipath_cancel_sends(dd);
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl | which);
@@ -1846,6 +1877,87 @@ void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
ipath_write_kreg(dd, where, value);
}
+/*
+ * Following deal with the "obviously simple" task of overriding the state
+ * of the LEDS, which normally indicate link physical and logical status.
+ * The complications arise in dealing with different hardware mappings
+ * and the board-dependent routine being called from interrupts.
+ * and then there's the requirement to _flash_ them.
+ */
+#define LED_OVER_FREQ_SHIFT 8
+#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
+/* Below is "non-zero" to force override, but both actual LEDs are off */
+#define LED_OVER_BOTH_OFF (8)
+
+static void ipath_run_led_override(unsigned long opaque)
+{
+ struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
+ int timeoff;
+ int pidx;
+ u64 lstate, ltstate, val;
+
+ if (!(dd->ipath_flags & IPATH_INITTED))
+ return;
+
+ pidx = dd->ipath_led_override_phase++ & 1;
+ dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
+ timeoff = dd->ipath_led_override_timeoff;
+
+ /*
+ * below potentially restores the LED values per current status,
+ * should also possibly setup the traffic-blink register,
+ * but leave that to per-chip functions.
+ */
+ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
+ ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
+ INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
+ lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
+ INFINIPATH_IBCS_LINKSTATE_MASK;
+
+ dd->ipath_f_setextled(dd, lstate, ltstate);
+ mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
+}
+
+void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
+{
+ int timeoff, freq;
+
+ if (!(dd->ipath_flags & IPATH_INITTED))
+ return;
+
+ /* First check if we are blinking. If not, use 1HZ polling */
+ timeoff = HZ;
+ freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+
+ if (freq) {
+ /* For blink, set each phase from one nybble of val */
+ dd->ipath_led_override_vals[0] = val & 0xF;
+ dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
+ timeoff = (HZ << 4)/freq;
+ } else {
+ /* Non-blink set both phases the same. */
+ dd->ipath_led_override_vals[0] = val & 0xF;
+ dd->ipath_led_override_vals[1] = val & 0xF;
+ }
+ dd->ipath_led_override_timeoff = timeoff;
+
+ /*
+ * If the timer has not already been started, do so. Use a "quick"
+ * timeout so the function will be called soon, to look at our request.
+ */
+ if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
+ /* Need to start timer */
+ init_timer(&dd->ipath_led_override_timer);
+ dd->ipath_led_override_timer.function =
+ ipath_run_led_override;
+ dd->ipath_led_override_timer.data = (unsigned long) dd;
+ dd->ipath_led_override_timer.expires = jiffies + 1;
+ add_timer(&dd->ipath_led_override_timer);
+ } else {
+ atomic_dec(&dd->ipath_led_override_timer_active);
+ }
+}
+
/**
* ipath_shutdown_device - shut down a device
* @dd: the infinipath device
@@ -1886,17 +1998,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
*/
udelay(5);
- /*
- * abort any armed or launched PIO buffers that didn't go. (self
- * clearing). Will cause any packet currently being transmitted to
- * go out with an EBP, and may also cause a short packet error on
- * the receiver.
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- INFINIPATH_S_ABORT);
-
ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
INFINIPATH_IBCC_LINKINITCMD_SHIFT);
+ ipath_cancel_sends(dd);
/* disable IBC */
dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
@@ -1909,7 +2013,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
* Turn the LEDs off explictly for the same reason.
*/
dd->ipath_f_quiet_serdes(dd);
- dd->ipath_f_setextled(dd, 0, 0);
if (dd->ipath_stats_timer_active) {
del_timer_sync(&dd->ipath_stats_timer);
@@ -1925,6 +2028,9 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
+
+ ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
+ ipath_update_eeprom_log(dd);
}
/**
@@ -2085,6 +2191,16 @@ int ipath_reset_device(int unit)
goto bail;
}
+ if (atomic_read(&dd->ipath_led_override_timer_active)) {
+ /* Need to stop LED timer, _then_ shut off LEDs */
+ del_timer_sync(&dd->ipath_led_override_timer);
+ atomic_set(&dd->ipath_led_override_timer_active, 0);
+ }
+
+ /* Shut off LEDs after we are sure timer is not running */
+ dd->ipath_led_override = LED_OVER_BOTH_OFF;
+ dd->ipath_f_setextled(dd, 0, 0);
+
dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index 030185f90ee2..b4503e9c1e95 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -95,39 +95,37 @@ static int i2c_gpio_set(struct ipath_devdata *dd,
enum i2c_type line,
enum i2c_state new_line_state)
{
- u64 read_val, write_val, mask, *gpioval;
+ u64 out_mask, dir_mask, *gpioval;
+ unsigned long flags = 0;
gpioval = &dd->ipath_gpio_out;
- read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
- if (line == i2c_line_scl)
- mask = dd->ipath_gpio_scl;
- else
- mask = dd->ipath_gpio_sda;
- if (new_line_state == i2c_line_high)
+ if (line == i2c_line_scl) {
+ dir_mask = dd->ipath_gpio_scl;
+ out_mask = (1UL << dd->ipath_gpio_scl_num);
+ } else {
+ dir_mask = dd->ipath_gpio_sda;
+ out_mask = (1UL << dd->ipath_gpio_sda_num);
+ }
+
+ spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
+ if (new_line_state == i2c_line_high) {
/* tri-state the output rather than force high */
- write_val = read_val & ~mask;
- else
+ dd->ipath_extctrl &= ~dir_mask;
+ } else {
/* config line to be an output */
- write_val = read_val | mask;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
+ dd->ipath_extctrl |= dir_mask;
+ }
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
- /* set high and verify */
+ /* set output as well (no real verify) */
if (new_line_state == i2c_line_high)
- write_val = 0x1UL;
+ *gpioval |= out_mask;
else
- write_val = 0x0UL;
+ *gpioval &= ~out_mask;
- if (line == i2c_line_scl) {
- write_val <<= dd->ipath_gpio_scl_num;
- *gpioval = *gpioval & ~(1UL << dd->ipath_gpio_scl_num);
- *gpioval |= write_val;
- } else {
- write_val <<= dd->ipath_gpio_sda_num;
- *gpioval = *gpioval & ~(1UL << dd->ipath_gpio_sda_num);
- *gpioval |= write_val;
- }
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
+ spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
return 0;
}
@@ -145,8 +143,9 @@ static int i2c_gpio_get(struct ipath_devdata *dd,
enum i2c_type line,
enum i2c_state *curr_statep)
{
- u64 read_val, write_val, mask;
+ u64 read_val, mask;
int ret;
+ unsigned long flags = 0;
/* check args */
if (curr_statep == NULL) {
@@ -154,15 +153,21 @@ static int i2c_gpio_get(struct ipath_devdata *dd,
goto bail;
}
- read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
/* config line to be an input */
if (line == i2c_line_scl)
mask = dd->ipath_gpio_scl;
else
mask = dd->ipath_gpio_sda;
- write_val = read_val & ~mask;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, write_val);
+
+ spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
+ dd->ipath_extctrl &= ~mask;
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
+ /*
+ * Below is very unlikely to reflect true input state if Output
+ * Enable actually changed.
+ */
read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
+ spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
if (read_val & mask)
*curr_statep = i2c_line_high;
@@ -192,6 +197,7 @@ static void i2c_wait_for_writes(struct ipath_devdata *dd)
static void scl_out(struct ipath_devdata *dd, u8 bit)
{
+ udelay(1);
i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
i2c_wait_for_writes(dd);
@@ -314,12 +320,18 @@ static int eeprom_reset(struct ipath_devdata *dd)
int clock_cycles_left = 9;
u64 *gpioval = &dd->ipath_gpio_out;
int ret;
+ unsigned long flags;
- eeprom_init = 1;
+ spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
+ /* Make sure shadows are consistent */
+ dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
*gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
+ spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
+
ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
"is %llx\n", (unsigned long long) *gpioval);
+ eeprom_init = 1;
/*
* This is to get the i2c into a known state, by first going low,
* then tristate sda (and then tristate scl as first thing
@@ -355,8 +367,8 @@ bail:
* @len: number of bytes to receive
*/
-int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
- void *buffer, int len)
+static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
+ u8 eeprom_offset, void *buffer, int len)
{
/* compiler complains unless initialized */
u8 single_byte = 0;
@@ -406,6 +418,7 @@ bail:
return ret;
}
+
/**
* ipath_eeprom_write - writes data to the eeprom via I2C
* @dd: the infinipath device
@@ -413,8 +426,8 @@ bail:
* @buffer: data to write
* @len: number of bytes to write
*/
-int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
- const void *buffer, int len)
+static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
+ const void *buffer, int len)
{
u8 single_byte;
int sub_len;
@@ -488,6 +501,38 @@ bail:
return ret;
}
+/*
+ * The public entry-points ipath_eeprom_read() and ipath_eeprom_write()
+ * are now just wrappers around the internal functions.
+ */
+int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
+ void *buff, int len)
+{
+ int ret;
+
+ ret = down_interruptible(&dd->ipath_eep_sem);
+ if (!ret) {
+ ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
+ up(&dd->ipath_eep_sem);
+ }
+
+ return ret;
+}
+
+int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
+ const void *buff, int len)
+{
+ int ret;
+
+ ret = down_interruptible(&dd->ipath_eep_sem);
+ if (!ret) {
+ ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
+ up(&dd->ipath_eep_sem);
+ }
+
+ return ret;
+}
+
static u8 flash_csum(struct ipath_flash *ifp, int adjust)
{
u8 *ip = (u8 *) ifp;
@@ -515,7 +560,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
void *buf;
struct ipath_flash *ifp;
__be64 guid;
- int len;
+ int len, eep_stat;
u8 csum, *bguid;
int t = dd->ipath_unit;
struct ipath_devdata *dd0 = ipath_lookup(0);
@@ -559,7 +604,11 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
goto bail;
}
- if (ipath_eeprom_read(dd, 0, buf, len)) {
+ down(&dd->ipath_eep_sem);
+ eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
+ up(&dd->ipath_eep_sem);
+
+ if (eep_stat) {
ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
goto done;
}
@@ -634,8 +683,192 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
(unsigned long long) be64_to_cpu(dd->ipath_guid));
+ memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
+ /*
+ * Power-on (actually "active") hours are kept as little-endian value
+ * in EEPROM, but as seconds in a (possibly as small as 24-bit)
+ * atomic_t while running.
+ */
+ atomic_set(&dd->ipath_active_time, 0);
+ dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
+
done:
vfree(buf);
bail:;
}
+
+/**
+ * ipath_update_eeprom_log - copy active-time and error counters to eeprom
+ * @dd: the infinipath device
+ *
+ * Although the time is kept as seconds in the ipath_devdata struct, it is
+ * rounded to hours for re-write, as we have only 16 bits in EEPROM.
+ * First-cut code reads whole (expected) struct ipath_flash, modifies,
+ * re-writes. Future direction: read/write only what we need, assuming
+ * that the EEPROM had to have been "good enough" for driver init, and
+ * if not, we aren't making it worse.
+ *
+ */
+
+int ipath_update_eeprom_log(struct ipath_devdata *dd)
+{
+ void *buf;
+ struct ipath_flash *ifp;
+ int len, hi_water;
+ uint32_t new_time, new_hrs;
+ u8 csum;
+ int ret, idx;
+ unsigned long flags;
+
+ /* first, check if we actually need to do anything. */
+ ret = 0;
+ for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
+ if (dd->ipath_eep_st_new_errs[idx]) {
+ ret = 1;
+ break;
+ }
+ }
+ new_time = atomic_read(&dd->ipath_active_time);
+
+ if (ret == 0 && new_time < 3600)
+ return 0;
+
+ /*
+ * The quick-check above determined that there is something worthy
+ * of logging, so get current contents and do a more detailed idea.
+ */
+ len = offsetof(struct ipath_flash, if_future);
+ buf = vmalloc(len);
+ ret = 1;
+ if (!buf) {
+ ipath_dev_err(dd, "Couldn't allocate memory to read %u "
+ "bytes from eeprom for logging\n", len);
+ goto bail;
+ }
+
+ /* Grab semaphore and read current EEPROM. If we get an
+ * error, let go, but if not, keep it until we finish write.
+ */
+ ret = down_interruptible(&dd->ipath_eep_sem);
+ if (ret) {
+ ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
+ goto free_bail;
+ }
+ ret = ipath_eeprom_internal_read(dd, 0, buf, len);
+ if (ret) {
+ up(&dd->ipath_eep_sem);
+ ipath_dev_err(dd, "Unable read EEPROM for logging\n");
+ goto free_bail;
+ }
+ ifp = (struct ipath_flash *)buf;
+
+ csum = flash_csum(ifp, 0);
+ if (csum != ifp->if_csum) {
+ up(&dd->ipath_eep_sem);
+ ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
+ csum, ifp->if_csum);
+ ret = 1;
+ goto free_bail;
+ }
+ hi_water = 0;
+ spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
+ for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
+ int new_val = dd->ipath_eep_st_new_errs[idx];
+ if (new_val) {
+ /*
+ * If we have seen any errors, add to EEPROM values
+ * We need to saturate at 0xFF (255) and we also
+ * would need to adjust the checksum if we were
+ * trying to minimize EEPROM traffic
+ * Note that we add to actual current count in EEPROM,
+ * in case it was altered while we were running.
+ */
+ new_val += ifp->if_errcntp[idx];
+ if (new_val > 0xFF)
+ new_val = 0xFF;
+ if (ifp->if_errcntp[idx] != new_val) {
+ ifp->if_errcntp[idx] = new_val;
+ hi_water = offsetof(struct ipath_flash,
+ if_errcntp) + idx;
+ }
+ /*
+ * update our shadow (used to minimize EEPROM
+ * traffic), to match what we are about to write.
+ */
+ dd->ipath_eep_st_errs[idx] = new_val;
+ dd->ipath_eep_st_new_errs[idx] = 0;
+ }
+ }
+ /*
+ * now update active-time. We would like to round to the nearest hour
+ * but unless atomic_t are sure to be proper signed ints we cannot,
+ * because we need to account for what we "transfer" to EEPROM and
+ * if we log an hour at 31 minutes, then we would need to set
+ * active_time to -29 to accurately count the _next_ hour.
+ */
+ if (new_time > 3600) {
+ new_hrs = new_time / 3600;
+ atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
+ new_hrs += dd->ipath_eep_hrs;
+ if (new_hrs > 0xFFFF)
+ new_hrs = 0xFFFF;
+ dd->ipath_eep_hrs = new_hrs;
+ if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
+ ifp->if_powerhour[0] = new_hrs & 0xFF;
+ hi_water = offsetof(struct ipath_flash, if_powerhour);
+ }
+ if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
+ ifp->if_powerhour[1] = new_hrs >> 8;
+ hi_water = offsetof(struct ipath_flash, if_powerhour)
+ + 1;
+ }
+ }
+ /*
+ * There is a tiny possibility that we could somehow fail to write
+ * the EEPROM after updating our shadows, but problems from holding
+ * the spinlock too long are a much bigger issue.
+ */
+ spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
+ if (hi_water) {
+ /* we made some change to the data, uopdate cksum and write */
+ csum = flash_csum(ifp, 1);
+ ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
+ }
+ up(&dd->ipath_eep_sem);
+ if (ret)
+ ipath_dev_err(dd, "Failed updating EEPROM\n");
+
+free_bail:
+ vfree(buf);
+bail:
+ return ret;
+
+}
+
+/**
+ * ipath_inc_eeprom_err - increment one of the four error counters
+ * that are logged to EEPROM.
+ * @dd: the infinipath device
+ * @eidx: 0..3, the counter to increment
+ * @incr: how much to add
+ *
+ * Each counter is 8-bits, and saturates at 255 (0xFF). They
+ * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
+ * is called, but it can only be called in a context that allows sleep.
+ * This function can be called even at interrupt level.
+ */
+
+void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
+{
+ uint new_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
+ new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
+ if (new_val > 255)
+ new_val = 255;
+ dd->ipath_eep_st_new_errs[eidx] = new_val;
+ spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
+ return;
+}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 1272aaf2a785..33ab0d6b80ff 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -396,7 +396,8 @@ static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
"TID %u, vaddr %lx, physaddr %llx pgp %p\n",
tid, vaddr, (unsigned long long) physaddr,
pagep[i]);
- dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
+ dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
+ physaddr);
/*
* don't check this tid in ipath_portshadow, since we
* just filled it in; start with the next one.
@@ -422,7 +423,8 @@ static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
if (dd->ipath_pageshadow[porttid + tid]) {
ipath_cdbg(VERBOSE, "Freeing TID %u\n",
tid);
- dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
+ dd->ipath_f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED,
dd->ipath_tidinvalid);
pci_unmap_page(dd->pcidev,
dd->ipath_physshadow[porttid + tid],
@@ -538,7 +540,8 @@ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
if (dd->ipath_pageshadow[porttid + tid]) {
ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
pd->port_pid, tid);
- dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
+ dd->ipath_f_put_tid(dd, &tidbase[tid],
+ RCVHQ_RCV_TYPE_EXPECTED,
dd->ipath_tidinvalid);
pci_unmap_page(dd->pcidev,
dd->ipath_physshadow[porttid + tid],
@@ -921,7 +924,8 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
(u64 __iomem *)
((char __iomem *)
dd->ipath_kregbase +
- dd->ipath_rcvegrbase), 0, pa);
+ dd->ipath_rcvegrbase),
+ RCVHQ_RCV_TYPE_EAGER, pa);
pa += egrsize;
}
cond_resched(); /* don't hog the cpu */
@@ -1337,68 +1341,133 @@ bail:
return ret;
}
-static unsigned int ipath_poll(struct file *fp,
- struct poll_table_struct *pt)
+static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
+ struct file *fp,
+ struct poll_table_struct *pt)
{
- struct ipath_portdata *pd;
- u32 head, tail;
- int bit;
unsigned pollflag = 0;
struct ipath_devdata *dd;
- pd = port_fp(fp);
- if (!pd)
- goto bail;
dd = pd->port_dd;
- bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
- set_bit(bit, &dd->ipath_rcvctrl);
+ if (test_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag)) {
+ pollflag |= POLLERR;
+ clear_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag);
+ }
- /*
- * Before blocking, make sure that head is still == tail,
- * reading from the chip, so we can be sure the interrupt
- * enable has made it to the chip. If not equal, disable
- * interrupt again and return immediately. This avoids races,
- * and the overhead of the chip read doesn't matter much at
- * this point, since we are waiting for something anyway.
- */
+ if (test_bit(IPATH_PORT_WAITING_URG, &pd->int_flag)) {
+ pollflag |= POLLIN | POLLRDNORM;
+ clear_bit(IPATH_PORT_WAITING_URG, &pd->int_flag);
+ }
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
+ if (!pollflag) {
+ set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
+ if (pd->poll_type & IPATH_POLL_TYPE_OVERFLOW)
+ set_bit(IPATH_PORT_WAITING_OVERFLOW,
+ &pd->port_flag);
+
+ poll_wait(fp, &pd->port_wait, pt);
+ }
+
+ return pollflag;
+}
+
+static unsigned int ipath_poll_next(struct ipath_portdata *pd,
+ struct file *fp,
+ struct poll_table_struct *pt)
+{
+ u32 head, tail;
+ unsigned pollflag = 0;
+ struct ipath_devdata *dd;
+
+ dd = pd->port_dd;
head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
- tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
+ tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr;
- if (tail == head) {
+ if (test_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag)) {
+ pollflag |= POLLERR;
+ clear_bit(IPATH_PORT_WAITING_OVERFLOW, &pd->int_flag);
+ }
+
+ if (tail != head ||
+ test_bit(IPATH_PORT_WAITING_RCV, &pd->int_flag)) {
+ pollflag |= POLLIN | POLLRDNORM;
+ clear_bit(IPATH_PORT_WAITING_RCV, &pd->int_flag);
+ }
+
+ if (!pollflag) {
set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
+ if (pd->poll_type & IPATH_POLL_TYPE_OVERFLOW)
+ set_bit(IPATH_PORT_WAITING_OVERFLOW,
+ &pd->port_flag);
+
+ set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT,
+ &dd->ipath_rcvctrl);
+
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
+ dd->ipath_rcvctrl);
+
if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
- (void)ipath_write_ureg(dd, ur_rcvhdrhead,
- dd->ipath_rhdrhead_intr_off
- | head, pd->port_port);
- poll_wait(fp, &pd->port_wait, pt);
+ ipath_write_ureg(dd, ur_rcvhdrhead,
+ dd->ipath_rhdrhead_intr_off | head,
+ pd->port_port);
- if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
- /* timed out, no packets received */
- clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
- pd->port_rcvwait_to++;
- }
- else
- pollflag = POLLIN | POLLRDNORM;
- }
- else {
- /* it's already happened; don't do wait_event overhead */
- pollflag = POLLIN | POLLRDNORM;
- pd->port_rcvnowait++;
+ poll_wait(fp, &pd->port_wait, pt);
}
- clear_bit(bit, &dd->ipath_rcvctrl);
- ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
- dd->ipath_rcvctrl);
+ return pollflag;
+}
+
+static unsigned int ipath_poll(struct file *fp,
+ struct poll_table_struct *pt)
+{
+ struct ipath_portdata *pd;
+ unsigned pollflag;
+
+ pd = port_fp(fp);
+ if (!pd)
+ pollflag = 0;
+ else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
+ pollflag = ipath_poll_urgent(pd, fp, pt);
+ else
+ pollflag = ipath_poll_next(pd, fp, pt);
-bail:
return pollflag;
}
+static int ipath_supports_subports(int user_swmajor, int user_swminor)
+{
+ /* no subport implementation prior to software version 1.3 */
+ return (user_swmajor > 1) || (user_swminor >= 3);
+}
+
+static int ipath_compatible_subports(int user_swmajor, int user_swminor)
+{
+ /* this code is written long-hand for clarity */
+ if (IPATH_USER_SWMAJOR != user_swmajor) {
+ /* no promise of compatibility if major mismatch */
+ return 0;
+ }
+ if (IPATH_USER_SWMAJOR == 1) {
+ switch (IPATH_USER_SWMINOR) {
+ case 0:
+ case 1:
+ case 2:
+ /* no subport implementation so cannot be compatible */
+ return 0;
+ case 3:
+ /* 3 is only compatible with itself */
+ return user_swminor == 3;
+ default:
+ /* >= 4 are compatible (or are expected to be) */
+ return user_swminor >= 4;
+ }
+ }
+ /* make no promises yet for future major versions */
+ return 0;
+}
+
static int init_subports(struct ipath_devdata *dd,
struct ipath_portdata *pd,
const struct ipath_user_info *uinfo)
@@ -1408,20 +1477,32 @@ static int init_subports(struct ipath_devdata *dd,
size_t size;
/*
- * If the user is requesting zero or one port,
+ * If the user is requesting zero subports,
* skip the subport allocation.
*/
- if (uinfo->spu_subport_cnt <= 1)
+ if (uinfo->spu_subport_cnt <= 0)
+ goto bail;
+
+ /* Self-consistency check for ipath_compatible_subports() */
+ if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
+ !ipath_compatible_subports(IPATH_USER_SWMAJOR,
+ IPATH_USER_SWMINOR)) {
+ dev_info(&dd->pcidev->dev,
+ "Inconsistent ipath_compatible_subports()\n");
goto bail;
+ }
- /* Old user binaries don't know about new subport implementation */
- if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) {
+ /* Check for subport compatibility */
+ if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
+ uinfo->spu_userversion & 0xffff)) {
dev_info(&dd->pcidev->dev,
- "Mismatched user minor version (%d) and driver "
- "minor version (%d) while port sharing. Ensure "
+ "Mismatched user version (%d.%d) and driver "
+ "version (%d.%d) while port sharing. Ensure "
"that driver and library are from the same "
"release.\n",
+ (int) (uinfo->spu_userversion >> 16),
(int) (uinfo->spu_userversion & 0xffff),
+ IPATH_USER_SWMAJOR,
IPATH_USER_SWMINOR);
goto bail;
}
@@ -1725,14 +1806,13 @@ static int ipath_open(struct inode *in, struct file *fp)
return fp->private_data ? 0 : -ENOMEM;
}
-
/* Get port early, so can set affinity prior to memory allocation */
static int ipath_assign_port(struct file *fp,
const struct ipath_user_info *uinfo)
{
int ret;
int i_minor;
- unsigned swminor;
+ unsigned swmajor, swminor;
/* Check to be sure we haven't already initialized this file */
if (port_fp(fp)) {
@@ -1741,7 +1821,8 @@ static int ipath_assign_port(struct file *fp,
}
/* for now, if major version is different, bail */
- if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
+ swmajor = uinfo->spu_userversion >> 16;
+ if (swmajor != IPATH_USER_SWMAJOR) {
ipath_dbg("User major version %d not same as driver "
"major %d\n", uinfo->spu_userversion >> 16,
IPATH_USER_SWMAJOR);
@@ -1756,7 +1837,8 @@ static int ipath_assign_port(struct file *fp,
mutex_lock(&ipath_mutex);
- if (swminor == IPATH_USER_SWMINOR && uinfo->spu_subport_cnt &&
+ if (ipath_compatible_subports(swmajor, swminor) &&
+ uinfo->spu_subport_cnt &&
(ret = find_shared_port(fp, uinfo))) {
mutex_unlock(&ipath_mutex);
if (ret > 0)
@@ -2020,7 +2102,8 @@ static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
info.port = pd->port_port;
info.subport = subport;
/* Don't return new fields if old library opened the port. */
- if ((pd->userversion & 0xffff) == IPATH_USER_SWMINOR) {
+ if (ipath_supports_subports(pd->userversion >> 16,
+ pd->userversion & 0xffff)) {
/* Number of user ports available for this device. */
info.num_ports = pd->port_dd->ipath_cfgports - 1;
info.num_subports = pd->port_subport_cnt;
@@ -2123,6 +2206,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
src = NULL;
dest = NULL;
break;
+ case IPATH_CMD_POLL_TYPE:
+ copy = sizeof(cmd.cmd.poll_type);
+ dest = &cmd.cmd.poll_type;
+ src = &ucmd->cmd.poll_type;
+ break;
default:
ret = -EINVAL;
goto bail;
@@ -2195,6 +2283,9 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
case IPATH_CMD_PIOAVAILUPD:
ret = ipath_force_pio_avail_update(pd->port_dd);
break;
+ case IPATH_CMD_POLL_TYPE:
+ pd->poll_type = cmd.cmd.poll_type;
+ break;
}
if (ret >= 0)
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index ebd5c7bd2cdb..2e689b974e1f 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -257,9 +257,14 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
/* Notimpl InitType (actually, an SMA decision) */
/* VLHighLimit is 0 (only one VL) */
; /* VLArbitrationHighCap is 0 (only one VL) */
+ /*
+ * Note: the chips support a maximum MTU of 4096, but the driver
+ * hasn't implemented this feature yet, so set the maximum
+ * to 2048.
+ */
portinfo[10] = /* VLArbitrationLowCap is 0 (only one VL) */
/* InitTypeReply is SMA decision */
- (5 << 16) /* MTUCap 4096 */
+ (4 << 16) /* MTUCap 2048 */
| (7 << 13) /* VLStallCount */
| (0x1f << 8) /* HOQLife */
| (1 << 4)
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 4171198fc202..650745d83fac 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -36,6 +36,7 @@
* HT chip.
*/
+#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/htirq.h>
@@ -439,6 +440,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
u32 bits, ctrl;
int isfatal = 0;
char bitsmsg[64];
+ int log_idx;
hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
@@ -467,6 +469,11 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
hwerrs &= dd->ipath_hwerrmask;
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
+ ipath_inc_eeprom_err(dd, log_idx, 1);
+
/*
* make sure we get this much out, unless told to be quiet,
* it's a parity error we may recover from,
@@ -502,9 +509,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
if (!hwerrs) {
ipath_dbg("Clearing freezemode on ignored or "
"recovered hardware error\n");
- ctrl &= ~INFINIPATH_C_FREEZEMODE;
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- ctrl);
+ ipath_clear_freeze(dd);
}
}
@@ -672,10 +677,16 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
if (n)
snprintf(name, namelen, "%s", n);
+ if (dd->ipath_boardrev != 6 && dd->ipath_boardrev != 7 &&
+ dd->ipath_boardrev != 11) {
+ ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
+ ret = 1;
+ goto bail;
+ }
if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
- dd->ipath_minrev > 3)) {
+ dd->ipath_minrev > 4)) {
/*
- * This version of the driver only supports Rev 3.2 and 3.3
+ * This version of the driver only supports Rev 3.2 - 3.4
*/
ipath_dev_err(dd,
"Unsupported InfiniPath hardware revision %u.%u!\n",
@@ -689,36 +700,11 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
* copies
*/
dd->ipath_flags |= IPATH_32BITCOUNTERS;
+ dd->ipath_flags |= IPATH_GPIO_INTR;
if (dd->ipath_htspeed != 800)
ipath_dev_err(dd,
"Incorrectly configured for HT @ %uMHz\n",
dd->ipath_htspeed);
- if (dd->ipath_boardrev == 7 || dd->ipath_boardrev == 11 ||
- dd->ipath_boardrev == 6)
- dd->ipath_flags |= IPATH_GPIO_INTR;
- else
- dd->ipath_flags |= IPATH_POLL_RX_INTR;
- if (dd->ipath_boardrev == 8) { /* LS/X-1 */
- u64 val;
- val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
- if (val & INFINIPATH_EXTS_SERDESSEL) {
- /*
- * hardware disabled
- *
- * This means that the chip is hardware disabled,
- * and will not be able to bring up the link,
- * in any case. We special case this and abort
- * early, to avoid later messages. We also set
- * the DISABLED status bit
- */
- ipath_dbg("Unit %u is hardware-disabled\n",
- dd->ipath_unit);
- *dd->ipath_statusp |= IPATH_STATUS_DISABLED;
- /* this value is handled differently */
- ret = 2;
- goto bail;
- }
- }
ret = 0;
bail:
@@ -1058,12 +1044,24 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
u64 lst, u64 ltst)
{
u64 extctl;
+ unsigned long flags = 0;
/* the diags use the LED to indicate diag info, so we leave
* the external LED alone when the diags are running */
if (ipath_diag_inuse)
return;
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (dd->ipath_led_override) {
+ ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
+ ? INFINIPATH_IBCS_LT_STATE_LINKUP
+ : INFINIPATH_IBCS_LT_STATE_DISABLED;
+ lst = (dd->ipath_led_override & IPATH_LED_LOG)
+ ? INFINIPATH_IBCS_L_STATE_ACTIVE
+ : INFINIPATH_IBCS_L_STATE_DOWN;
+ }
+
+ spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
/*
* start by setting both LED control bits to off, then turn
* on the appropriate bit(s).
@@ -1092,6 +1090,7 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
}
dd->ipath_extctrl = extctl;
ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
}
static void ipath_init_ht_variables(struct ipath_devdata *dd)
@@ -1157,6 +1156,22 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->ipath_eep_st_masks[0].hwerrs_to_log =
+ INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
+
+ dd->ipath_eep_st_masks[1].hwerrs_to_log =
+ INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
+
+ dd->ipath_eep_st_masks[2].errs_to_log =
+ INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
+
}
/**
@@ -1372,7 +1387,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
* ipath_pe_put_tid - write a TID in chip
* @dd: the infinipath device
* @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
* This exists as a separate routine to allow for special locking etc.
@@ -1393,7 +1408,7 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
"40 bits, using only 40!!!\n", pa);
pa &= INFINIPATH_RT_ADDR_MASK;
}
- if (type == 0)
+ if (type == RCVHQ_RCV_TYPE_EAGER)
pa |= dd->ipath_tidtemplate;
else {
/* in words (fixed, full page). */
@@ -1433,7 +1448,8 @@ static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
port * dd->ipath_rcvtidcnt *
sizeof(*tidbase));
for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- ipath_ht_put_tid(dd, &tidbase[i], 1, dd->ipath_tidinvalid);
+ ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ dd->ipath_tidinvalid);
tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
dd->ipath_rcvegrbase +
@@ -1441,7 +1457,8 @@ static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
sizeof(*tidbase));
for (i = 0; i < dd->ipath_rcvegrcnt; i++)
- ipath_ht_put_tid(dd, &tidbase[i], 0, dd->ipath_tidinvalid);
+ ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ dd->ipath_tidinvalid);
}
/**
@@ -1528,11 +1545,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
writel(16, piobuf);
piobuf += pioincr;
}
- /*
- * self-clearing
- */
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- INFINIPATH_S_ABORT);
ipath_get_eeprom_info(dd);
if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
@@ -1543,8 +1555,10 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
* with 128, rather than 112.
*/
dd->ipath_flags |= IPATH_GPIO_INTR;
- dd->ipath_flags &= ~IPATH_POLL_RX_INTR;
- }
+ } else
+ ipath_dev_err(dd, "Unsupported InfiniPath serial "
+ "number %.16s!\n", dd->ipath_serial);
+
return 0;
}
@@ -1561,7 +1575,6 @@ static int ipath_ht_txe_recover(struct ipath_devdata *dd)
}
dev_info(&dd->pcidev->dev,
"Recovering from TXE PIO parity error\n");
- ipath_disarm_senderrbufs(dd, 1);
return 1;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 4e2e3dfeb2c8..9868ccda5f26 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -296,13 +296,6 @@ static const struct ipath_cregs ipath_pe_cregs = {
#define IPATH_GPIO_SCL (1ULL << \
(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-/*
- * Rev2 silicon allows suppressing check for ArmLaunch errors.
- * this can speed up short packet sends on systems that do
- * not guaranteee write-order.
- */
-#define INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR (1ULL<<63)
-
/* 6120 specific hardware errors... */
static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
@@ -347,6 +340,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
u32 bits, ctrl;
int isfatal = 0;
char bitsmsg[64];
+ int log_idx;
hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
if (!hwerrs) {
@@ -374,6 +368,11 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
hwerrs &= dd->ipath_hwerrmask;
+ /* We log some errors to EEPROM, check if we have any of those. */
+ for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
+ if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
+ ipath_inc_eeprom_err(dd, log_idx, 1);
+
/*
* make sure we get this much out, unless told to be quiet,
* or it's occurred within the last 5 seconds
@@ -431,10 +430,12 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
dd->ipath_flags &= ~IPATH_INITTED;
} else {
- ipath_dbg("Clearing freezemode on ignored hardware "
- "error\n");
- ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
- dd->ipath_control);
+ static u32 freeze_cnt;
+
+ freeze_cnt++;
+ ipath_dbg("Clearing freezemode on ignored or recovered "
+ "hardware error (%u)\n", freeze_cnt);
+ ipath_clear_freeze(dd);
}
}
@@ -680,17 +681,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
val |= dd->ipath_rx_pol_inv <<
INFINIPATH_XGXS_RX_POL_SHIFT;
}
- if (dd->ipath_minrev >= 2) {
- /* Rev 2. can tolerate multiple writes to PBC, and
- * allowing them can provide lower latency on some
- * CPUs, but this feature is off by default, only
- * turned on by setting D63 of XGXSconfig reg.
- * May want to make this conditional more
- * fine-grained in future. This is not exactly
- * related to XGXS, but where the bit ended up.
- */
- val |= INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR;
- }
if (val != prev_val)
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
@@ -791,12 +781,24 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
u64 ltst)
{
u64 extctl;
+ unsigned long flags = 0;
/* the diags use the LED to indicate diag info, so we leave
* the external LED alone when the diags are running */
if (ipath_diag_inuse)
return;
+ /* Allow override of LED display for, e.g. Locating system in rack */
+ if (dd->ipath_led_override) {
+ ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
+ ? INFINIPATH_IBCS_LT_STATE_LINKUP
+ : INFINIPATH_IBCS_LT_STATE_DISABLED;
+ lst = (dd->ipath_led_override & IPATH_LED_LOG)
+ ? INFINIPATH_IBCS_L_STATE_ACTIVE
+ : INFINIPATH_IBCS_L_STATE_DOWN;
+ }
+
+ spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
INFINIPATH_EXTC_LED2PRIPORT_ON);
@@ -806,6 +808,7 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
dd->ipath_extctrl = extctl;
ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
+ spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
}
/**
@@ -955,6 +958,27 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
+
+ /*
+ * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
+ * 2 is Some Misc, 3 is reserved for future.
+ */
+ dd->ipath_eep_st_masks[0].hwerrs_to_log =
+ INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
+
+ /* Ignore errors in PIO/PBC on systems with unordered write-combining */
+ if (ipath_unordered_wc())
+ dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
+
+ dd->ipath_eep_st_masks[1].hwerrs_to_log =
+ INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
+ INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
+
+ dd->ipath_eep_st_masks[2].errs_to_log =
+ INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
+
+
}
/* setup the MSI stuff again after a reset. I'd like to just call
@@ -1082,7 +1106,7 @@ bail:
* ipath_pe_put_tid - write a TID in chip
* @dd: the infinipath device
* @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
* This exists as a separate routine to allow for special locking etc.
@@ -1108,7 +1132,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
"BUG: Physical page address 0x%lx "
"has bits set in 31-29\n", pa);
- if (type == 0)
+ if (type == RCVHQ_RCV_TYPE_EAGER)
pa |= dd->ipath_tidtemplate;
else /* for now, always full 4KB page */
pa |= 2 << 29;
@@ -1132,7 +1156,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
* ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
* @dd: the infinipath device
* @tidptr: pointer to the expected TID (in chip) to udpate
- * @tidtype: 0 for eager, 1 for expected
+ * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
* @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
*
* This exists as a separate routine to allow for selection of the
@@ -1157,7 +1181,7 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
"BUG: Physical page address 0x%lx "
"has bits set in 31-29\n", pa);
- if (type == 0)
+ if (type == RCVHQ_RCV_TYPE_EAGER)
pa |= dd->ipath_tidtemplate;
else /* for now, always full 4KB page */
pa |= 2 << 29;
@@ -1196,7 +1220,8 @@ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
for (i = 0; i < dd->ipath_rcvtidcnt; i++)
- ipath_pe_put_tid(dd, &tidbase[i], 0, tidinv);
+ ipath_pe_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
+ tidinv);
tidbase = (u64 __iomem *)
((char __iomem *)(dd->ipath_kregbase) +
@@ -1204,7 +1229,8 @@ static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
for (i = 0; i < dd->ipath_rcvegrcnt; i++)
- ipath_pe_put_tid(dd, &tidbase[i], 1, tidinv);
+ ipath_pe_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
+ tidinv);
}
/**
@@ -1311,13 +1337,6 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
dd = pd->port_dd;
- if (dd != NULL && dd->ipath_minrev >= 2) {
- ipath_cdbg(PROC, "IBA6120 Rev2, allow multiple PBC write\n");
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_PBC_REWRITE;
- ipath_cdbg(PROC, "IBA6120 Rev2, allow loose DMA alignment\n");
- kinfo->spi_runtime_flags |= IPATH_RUNTIME_LOOSE_DMA_ALIGN;
- }
-
done:
kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
return 0;
@@ -1354,7 +1373,6 @@ static int ipath_pe_txe_recover(struct ipath_devdata *dd)
dev_info(&dd->pcidev->dev,
"Recovering from TXE PIO parity error\n");
}
- ipath_disarm_senderrbufs(dd, 1);
return 1;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 7045ba689494..49951d583804 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -133,7 +133,8 @@ static int create_port0_egr(struct ipath_devdata *dd)
dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
((char __iomem *) dd->ipath_kregbase +
- dd->ipath_rcvegrbase), 0,
+ dd->ipath_rcvegrbase),
+ RCVHQ_RCV_TYPE_EAGER,
dd->ipath_port0_skbinfo[e].phys);
}
@@ -310,7 +311,12 @@ static int init_chip_first(struct ipath_devdata *dd,
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
dd->ipath_piosize2k = val & ~0U;
dd->ipath_piosize4k = val >> 32;
- dd->ipath_ibmtu = 4096; /* default to largest legal MTU */
+ /*
+ * Note: the chips support a maximum MTU of 4096, but the driver
+ * hasn't implemented this feature yet, so set the initial value
+ * to 2048.
+ */
+ dd->ipath_ibmtu = 2048;
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
dd->ipath_piobcnt2k = val & ~0U;
dd->ipath_piobcnt4k = val >> 32;
@@ -340,6 +346,10 @@ static int init_chip_first(struct ipath_devdata *dd,
spin_lock_init(&dd->ipath_tid_lock);
+ spin_lock_init(&dd->ipath_gpio_lock);
+ spin_lock_init(&dd->ipath_eep_st_lock);
+ sema_init(&dd->ipath_eep_sem, 1);
+
done:
*pdp = pd;
return ret;
@@ -646,7 +656,7 @@ static int init_housekeeping(struct ipath_devdata *dd,
ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
- "Driver %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
+ "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
"SW Compat %u\n",
IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
(unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
@@ -727,7 +737,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
if (ipath_kpiobufs == 0) {
/* not set by user (this is default) */
- if (piobufs >= (uports * IPATH_MIN_USER_PORT_BUFCNT) + 32)
+ if (piobufs > 144)
kpiobufs = 32;
else
kpiobufs = 16;
@@ -767,6 +777,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
piobufs, dd->ipath_pbufsport, uports);
dd->ipath_f_early_init(dd);
+ /*
+ * cancel any possible active sends from early driver load.
+ * Follows early_init because some chips have to initialize
+ * PIO buffers in early_init to avoid false parity errors.
+ */
+ ipath_cancel_sends(dd);
/* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
* done after early_init */
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index a90d3b5699c4..1fd91c59f246 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -70,7 +70,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
* If rewrite is true, and bits are set in the sendbufferror registers,
* we'll write to the buffer, for error recovery on parity errors.
*/
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
+static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
{
u32 piobcnt;
unsigned long sbuf[4];
@@ -93,7 +93,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
int i;
- if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG)) {
+ if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
+ dd->ipath_lastcancel > jiffies) {
__IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
"SendbufErrs %lx %lx", sbuf[0],
sbuf[1]);
@@ -108,7 +109,8 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
ipath_clrpiobuf(dd, i);
ipath_disarm_piobufs(dd, i, 1);
}
- dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */
+ /* ignore armlaunch errs for a bit */
+ dd->ipath_lastcancel = jiffies+3;
}
}
@@ -131,6 +133,17 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
INFINIPATH_E_INVALIDADDR)
/*
+ * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
+ * errors not related to freeze and cancelling buffers. Can't ignore
+ * armlaunch because could get more while still cleaning up, and need
+ * to cancel those as they happen.
+ */
+#define E_SPKT_ERRS_IGNORE \
+ (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
+ INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
+ INFINIPATH_E_SPKTLEN)
+
+/*
* these are errors that can occur when the link changes state while
* a packet is being sent or received. This doesn't cover things
* like EBP or VCRC that can be the result of a sending having the
@@ -290,12 +303,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
* Flush all queued sends when link went to DOWN or INIT,
* to be sure that they don't block SMA and other MAD packets
*/
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- INFINIPATH_S_ABORT);
- ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
- (unsigned)(dd->ipath_piobcnt2k +
- dd->ipath_piobcnt4k) -
- dd->ipath_lastport_piobuf);
+ ipath_cancel_sends(dd);
}
else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
lstate == IPATH_IBSTATE_ACTIVE) {
@@ -505,6 +513,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
int i, iserr = 0;
int chkerrpkts = 0, noprint = 0;
unsigned supp_msgs;
+ int log_idx;
supp_msgs = handle_frequent_errors(dd, errs, msg, &noprint);
@@ -518,6 +527,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
if (errs & INFINIPATH_E_HARDWARE) {
/* reuse same msg buf */
dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
+ } else {
+ u64 mask;
+ for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
+ mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
+ if (errs & mask)
+ ipath_inc_eeprom_err(dd, log_idx, 1);
+ }
}
if (!noprint && (errs & ~dd->ipath_e_bitsextant))
@@ -675,6 +691,17 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
chkerrpkts = 1;
dd->ipath_lastrcvhdrqtails[i] = tl;
pd->port_hdrqfull++;
+ if (test_bit(IPATH_PORT_WAITING_OVERFLOW,
+ &pd->port_flag)) {
+ clear_bit(
+ IPATH_PORT_WAITING_OVERFLOW,
+ &pd->port_flag);
+ set_bit(
+ IPATH_PORT_WAITING_OVERFLOW,
+ &pd->int_flag);
+ wake_up_interruptible(
+ &pd->port_wait);
+ }
}
}
}
@@ -744,6 +771,72 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
return chkerrpkts;
}
+
+/*
+ * try to cleanup as much as possible for anything that might have gone
+ * wrong while in freeze mode, such as pio buffers being written by user
+ * processes (causing armlaunch), send errors due to going into freeze mode,
+ * etc., and try to avoid causing extra interrupts while doing so.
+ * Forcibly update the in-memory pioavail register copies after cleanup
+ * because the chip won't do it for anything changing while in freeze mode
+ * (we don't want to wait for the next pio buffer state change).
+ * Make sure that we don't lose any important interrupts by using the chip
+ * feature that says that writing 0 to a bit in *clear that is set in
+ * *status will cause an interrupt to be generated again (if allowed by
+ * the *mask value).
+ */
+void ipath_clear_freeze(struct ipath_devdata *dd)
+{
+ int i, im;
+ __le64 val;
+
+ /* disable error interrupts, to avoid confusion */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
+
+ /*
+ * clear all sends, because they have may been
+ * completed by usercode while in freeze mode, and
+ * therefore would not be sent, and eventually
+ * might cause the process to run out of bufs
+ */
+ ipath_cancel_sends(dd);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
+ dd->ipath_control);
+
+ /* ensure pio avail updates continue */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl & ~IPATH_S_PIOBUFAVAILUPD);
+ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
+ dd->ipath_sendctrl);
+
+ /*
+ * We just enabled pioavailupdate, so dma copy is almost certainly
+ * not yet right, so read the registers directly. Similar to init
+ */
+ for (i = 0; i < dd->ipath_pioavregs; i++) {
+ /* deal with 6110 chip bug */
+ im = i > 3 ? ((i&1) ? i-1 : i+1) : i;
+ val = ipath_read_kreg64(dd, 0x1000+(im*sizeof(u64)));
+ dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
+ = le64_to_cpu(val);
+ }
+
+ /*
+ * force new interrupt if any hwerr, error or interrupt bits are
+ * still set, and clear "safe" send packet errors related to freeze
+ * and cancelling sends. Re-enable error interrupts before possible
+ * force of re-interrupt on pending interrupts.
+ */
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
+ E_SPKT_ERRS_IGNORE);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
+ ~dd->ipath_maskederrs);
+ ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
+}
+
+
/* this is separate to allow for better optimization of ipath_intr() */
static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
@@ -872,14 +965,25 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
dd->ipath_i_rcvurg_mask);
for (i = 1; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i];
- if (portr & (1 << i) && pd && pd->port_cnt &&
- test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
- clear_bit(IPATH_PORT_WAITING_RCV,
- &pd->port_flag);
- clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
- &dd->ipath_rcvctrl);
- wake_up_interruptible(&pd->port_wait);
- rcvdint = 1;
+ if (portr & (1 << i) && pd && pd->port_cnt) {
+ if (test_bit(IPATH_PORT_WAITING_RCV,
+ &pd->port_flag)) {
+ clear_bit(IPATH_PORT_WAITING_RCV,
+ &pd->port_flag);
+ set_bit(IPATH_PORT_WAITING_RCV,
+ &pd->int_flag);
+ clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
+ &dd->ipath_rcvctrl);
+ wake_up_interruptible(&pd->port_wait);
+ rcvdint = 1;
+ } else if (test_bit(IPATH_PORT_WAITING_URG,
+ &pd->port_flag)) {
+ clear_bit(IPATH_PORT_WAITING_URG,
+ &pd->port_flag);
+ set_bit(IPATH_PORT_WAITING_URG,
+ &pd->int_flag);
+ wake_up_interruptible(&pd->port_wait);
+ }
}
}
if (rcvdint) {
@@ -905,6 +1009,9 @@ irqreturn_t ipath_intr(int irq, void *data)
ipath_stats.sps_ints++;
+ if (dd->ipath_int_counter != (u32) -1)
+ dd->ipath_int_counter++;
+
if (!(dd->ipath_flags & IPATH_PRESENT)) {
/*
* This return value is not great, but we do not want the
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 12194f3dd8cc..ace63ef78e6f 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1,7 +1,7 @@
#ifndef _IPATH_KERNEL_H
#define _IPATH_KERNEL_H
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -57,6 +57,24 @@
extern struct infinipath_stats ipath_stats;
#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
+/*
+ * First-cut critierion for "device is active" is
+ * two thousand dwords combined Tx, Rx traffic per
+ * 5-second interval. SMA packets are 64 dwords,
+ * and occur "a few per second", presumably each way.
+ */
+#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
+/*
+ * Struct used to indicate which errors are logged in each of the
+ * error-counters that are logged to EEPROM. A counter is incremented
+ * _once_ (saturating at 255) for each event with any bits set in
+ * the error or hwerror register masks below.
+ */
+#define IPATH_EEP_LOG_CNT (4)
+struct ipath_eep_log_mask {
+ u64 errs_to_log;
+ u64 hwerrs_to_log;
+};
struct ipath_portdata {
void **port_rcvegrbuf;
@@ -109,6 +127,8 @@ struct ipath_portdata {
u32 port_tidcursor;
/* next expected TID to check */
unsigned long port_flag;
+ /* what happened */
+ unsigned long int_flag;
/* WAIT_RCV that timed out, no interrupt */
u32 port_rcvwait_to;
/* WAIT_PIO that timed out, no interrupt */
@@ -137,6 +157,8 @@ struct ipath_portdata {
u32 userversion;
/* Bitmask of active slaves */
u32 active_slaves;
+ /* Type of packets or conditions we want to poll for */
+ u16 poll_type;
};
struct sk_buff;
@@ -275,6 +297,8 @@ struct ipath_devdata {
u32 ipath_lastport_piobuf;
/* is a stats timer active */
u32 ipath_stats_timer_active;
+ /* number of interrupts for this device -- saturates... */
+ u32 ipath_int_counter;
/* dwords sent read from counter */
u32 ipath_lastsword;
/* dwords received read from counter */
@@ -369,9 +393,6 @@ struct ipath_devdata {
struct class_device *diag_class_dev;
/* timer used to prevent stats overflow, error throttling, etc. */
struct timer_list ipath_stats_timer;
- /* check for stale messages in rcv queue */
- /* only allow one intr at a time. */
- unsigned long ipath_rcv_pending;
void *ipath_dummy_hdrq; /* used after port close */
dma_addr_t ipath_dummy_hdrq_phys;
@@ -399,6 +420,8 @@ struct ipath_devdata {
u64 ipath_gpio_out;
/* shadow the gpio mask register */
u64 ipath_gpio_mask;
+ /* shadow the gpio output enable, etc... */
+ u64 ipath_extctrl;
/* kr_revision shadow */
u64 ipath_revision;
/*
@@ -473,8 +496,6 @@ struct ipath_devdata {
u32 ipath_cregbase;
/* shadow the control register contents */
u32 ipath_control;
- /* shadow the gpio output contents */
- u32 ipath_extctrl;
/* PCI revision register (HTC rev on FPGA) */
u32 ipath_pcirev;
@@ -552,6 +573,9 @@ struct ipath_devdata {
u32 ipath_overrun_thresh_errs;
u32 ipath_lli_errs;
+ /* status check work */
+ struct delayed_work status_work;
+
/*
* Not all devices managed by a driver instance are the same
* type, so these fields must be per-device.
@@ -575,6 +599,37 @@ struct ipath_devdata {
u16 ipath_gpio_scl_num;
u64 ipath_gpio_sda;
u64 ipath_gpio_scl;
+
+ /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
+ spinlock_t ipath_gpio_lock;
+
+ /* used to override LED behavior */
+ u8 ipath_led_override; /* Substituted for normal value, if non-zero */
+ u16 ipath_led_override_timeoff; /* delta to next timer event */
+ u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
+ u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
+ atomic_t ipath_led_override_timer_active;
+ /* Used to flash LEDs in override mode */
+ struct timer_list ipath_led_override_timer;
+
+ /* Support (including locks) for EEPROM logging of errors and time */
+ /* control access to actual counters, timer */
+ spinlock_t ipath_eep_st_lock;
+ /* control high-level access to EEPROM */
+ struct semaphore ipath_eep_sem;
+ /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
+ uint64_t ipath_traffic_wds;
+ /* active time is kept in seconds, but logged in hours */
+ atomic_t ipath_active_time;
+ /* Below are nominal shadow of EEPROM, new since last EEPROM update */
+ uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
+ uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
+ uint16_t ipath_eep_hrs;
+ /*
+ * masks for which bits of errs, hwerrs that cause
+ * each of the counters to increment.
+ */
+ struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
};
/* Private data for file operations */
@@ -592,6 +647,7 @@ int ipath_enable_wc(struct ipath_devdata *dd);
void ipath_disable_wc(struct ipath_devdata *dd);
int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
void ipath_shutdown_device(struct ipath_devdata *);
+void ipath_clear_freeze(struct ipath_devdata *);
struct file_operations;
int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
@@ -627,6 +683,7 @@ int ipath_unordered_wc(void);
void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
unsigned cnt);
+void ipath_cancel_sends(struct ipath_devdata *);
int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
@@ -685,7 +742,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
* are 64bit */
#define IPATH_32BITCOUNTERS 0x20000
/* can miss port0 rx interrupts */
-#define IPATH_POLL_RX_INTR 0x40000
#define IPATH_DISABLED 0x80000 /* administratively disabled */
/* Use GPIO interrupts for new counters */
#define IPATH_GPIO_ERRINTRS 0x100000
@@ -704,6 +760,10 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
#define IPATH_PORT_WAITING_PIO 3
/* master has not finished initializing */
#define IPATH_PORT_MASTER_UNINIT 4
+ /* waiting for an urgent packet to arrive */
+#define IPATH_PORT_WAITING_URG 5
+ /* waiting for a header overflow */
+#define IPATH_PORT_WAITING_OVERFLOW 6
/* free up any allocated data at closes */
void ipath_free_data(struct ipath_portdata *dd);
@@ -713,8 +773,18 @@ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
+int ipath_update_eeprom_log(struct ipath_devdata *dd);
+void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
+
+/*
+ * Set LED override, only the two LSBs have "public" meaning, but
+ * any non-zero value substitutes them for the Link and LinkTrain
+ * LED states.
+ */
+#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
+#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
+void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
/*
* number of words used for protocol header if not set by ipath_userinit();
@@ -749,7 +819,6 @@ static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
int ipath_get_user_pages(unsigned long, size_t, struct page **);
-int ipath_get_user_pages_nocopy(unsigned long, struct page **);
void ipath_release_user_pages(struct page **, size_t);
void ipath_release_user_pages_on_close(struct page **, size_t);
int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index dd487c100f5b..85a4aefc6c03 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
deleted file mode 100644
index 05a1d2b01d9d..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * These are the routines used by layered drivers, currently just the
- * layered ethernet driver and verbs layer.
- */
-
-#include <linux/io.h>
-#include <asm/byteorder.h>
-
-#include "ipath_kernel.h"
-#include "ipath_layer.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-/* Acquire before ipath_devs_lock. */
-static DEFINE_MUTEX(ipath_layer_mutex);
-
-u16 ipath_layer_rcv_opcode;
-
-static int (*layer_intr)(void *, u32);
-static int (*layer_rcv)(void *, void *, struct sk_buff *);
-static int (*layer_rcv_lid)(void *, void *);
-
-static void *(*layer_add_one)(int, struct ipath_devdata *);
-static void (*layer_remove_one)(void *);
-
-int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
-{
- int ret = -ENODEV;
-
- if (dd->ipath_layer.l_arg && layer_intr)
- ret = layer_intr(dd->ipath_layer.l_arg, arg);
-
- return ret;
-}
-
-int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
-{
- int ret;
-
- mutex_lock(&ipath_layer_mutex);
-
- ret = __ipath_layer_intr(dd, arg);
-
- mutex_unlock(&ipath_layer_mutex);
-
- return ret;
-}
-
-int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
- struct sk_buff *skb)
-{
- int ret = -ENODEV;
-
- if (dd->ipath_layer.l_arg && layer_rcv)
- ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
-
- return ret;
-}
-
-int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
-{
- int ret = -ENODEV;
-
- if (dd->ipath_layer.l_arg && layer_rcv_lid)
- ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
-
- return ret;
-}
-
-void ipath_layer_lid_changed(struct ipath_devdata *dd)
-{
- mutex_lock(&ipath_layer_mutex);
-
- if (dd->ipath_layer.l_arg && layer_intr)
- layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
-
- mutex_unlock(&ipath_layer_mutex);
-}
-
-void ipath_layer_add(struct ipath_devdata *dd)
-{
- mutex_lock(&ipath_layer_mutex);
-
- if (layer_add_one)
- dd->ipath_layer.l_arg =
- layer_add_one(dd->ipath_unit, dd);
-
- mutex_unlock(&ipath_layer_mutex);
-}
-
-void ipath_layer_remove(struct ipath_devdata *dd)
-{
- mutex_lock(&ipath_layer_mutex);
-
- if (dd->ipath_layer.l_arg && layer_remove_one) {
- layer_remove_one(dd->ipath_layer.l_arg);
- dd->ipath_layer.l_arg = NULL;
- }
-
- mutex_unlock(&ipath_layer_mutex);
-}
-
-int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
- void (*l_remove)(void *),
- int (*l_intr)(void *, u32),
- int (*l_rcv)(void *, void *, struct sk_buff *),
- u16 l_rcv_opcode,
- int (*l_rcv_lid)(void *, void *))
-{
- struct ipath_devdata *dd, *tmp;
- unsigned long flags;
-
- mutex_lock(&ipath_layer_mutex);
-
- layer_add_one = l_add;
- layer_remove_one = l_remove;
- layer_intr = l_intr;
- layer_rcv = l_rcv;
- layer_rcv_lid = l_rcv_lid;
- ipath_layer_rcv_opcode = l_rcv_opcode;
-
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
- if (!(dd->ipath_flags & IPATH_INITTED))
- continue;
-
- if (dd->ipath_layer.l_arg)
- continue;
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
- spin_lock_irqsave(&ipath_devs_lock, flags);
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- mutex_unlock(&ipath_layer_mutex);
-
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_register);
-
-void ipath_layer_unregister(void)
-{
- struct ipath_devdata *dd, *tmp;
- unsigned long flags;
-
- mutex_lock(&ipath_layer_mutex);
- spin_lock_irqsave(&ipath_devs_lock, flags);
-
- list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
- if (dd->ipath_layer.l_arg && layer_remove_one) {
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
- layer_remove_one(dd->ipath_layer.l_arg);
- spin_lock_irqsave(&ipath_devs_lock, flags);
- dd->ipath_layer.l_arg = NULL;
- }
- }
-
- spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
- layer_add_one = NULL;
- layer_remove_one = NULL;
- layer_intr = NULL;
- layer_rcv = NULL;
- layer_rcv_lid = NULL;
-
- mutex_unlock(&ipath_layer_mutex);
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_unregister);
-
-int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
-{
- int ret;
- u32 intval = 0;
-
- mutex_lock(&ipath_layer_mutex);
-
- if (!dd->ipath_layer.l_arg) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
-
- if (ret < 0)
- goto bail;
-
- *pktmax = dd->ipath_ibmaxlen;
-
- if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
- intval |= IPATH_LAYER_INT_IF_UP;
- if (dd->ipath_lid)
- intval |= IPATH_LAYER_INT_LID;
- if (dd->ipath_mlid)
- intval |= IPATH_LAYER_INT_BCAST;
- /*
- * do this on open, in case low level is already up and
- * just layered driver was reloaded, etc.
- */
- if (intval)
- layer_intr(dd->ipath_layer.l_arg, intval);
-
- ret = 0;
-bail:
- mutex_unlock(&ipath_layer_mutex);
-
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_open);
-
-u16 ipath_layer_get_lid(struct ipath_devdata *dd)
-{
- return dd->ipath_lid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
-
-/**
- * ipath_layer_get_mac - get the MAC address
- * @dd: the infinipath device
- * @mac: the MAC is put here
- *
- * This is the EUID-64 OUI octets (top 3), then
- * skip the next 2 (which should both be zero or 0xff).
- * The returned MAC is in network order
- * mac points to at least 6 bytes of buffer
- * We assume that by the time the LID is set, that the GUID is as valid
- * as it's ever going to be, rather than adding yet another status bit.
- */
-
-int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
-{
- u8 *guid;
-
- guid = (u8 *) &dd->ipath_guid;
-
- mac[0] = guid[0];
- mac[1] = guid[1];
- mac[2] = guid[2];
- mac[3] = guid[5];
- mac[4] = guid[6];
- mac[5] = guid[7];
- if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
- ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
- "%x %x\n", guid[3], guid[4]);
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
-
-u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
-{
- return dd->ipath_mlid;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
-
-int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
-{
- int ret = 0;
- u32 __iomem *piobuf;
- u32 plen, *uhdr;
- size_t count;
- __be16 vlsllnh;
-
- if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
- ipath_dbg("send while not open\n");
- ret = -EINVAL;
- } else
- if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
- dd->ipath_lid == 0) {
- /*
- * lid check is for when sma hasn't yet configured
- */
- ret = -ENETDOWN;
- ipath_cdbg(VERBOSE, "send while not ready, "
- "mylid=%u, flags=0x%x\n",
- dd->ipath_lid, dd->ipath_flags);
- }
-
- vlsllnh = *((__be16 *) hdr);
- if (vlsllnh != htons(IPATH_LRH_BTH)) {
- ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
- "not sending\n", be16_to_cpu(vlsllnh),
- IPATH_LRH_BTH);
- ret = -EINVAL;
- }
- if (ret)
- goto done;
-
- /* Get a PIO buffer to use. */
- piobuf = ipath_getpiobuf(dd, NULL);
- if (piobuf == NULL) {
- ret = -EBUSY;
- goto done;
- }
-
- plen = (sizeof(*hdr) >> 2); /* actual length */
- ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
-
- writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
- ipath_flush_wc();
- piobuf += 2;
- uhdr = (u32 *)hdr;
- count = plen-1; /* amount we can copy before trigger word */
- __iowrite32_copy(piobuf, uhdr, count);
- ipath_flush_wc();
- __raw_writel(uhdr[count], piobuf + count);
- ipath_flush_wc(); /* ensure it's sent, now */
-
- ipath_stats.sps_ether_spkts++; /* ether packet sent */
-
-done:
- return ret;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
-
-int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
-{
- set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
-
- ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
- dd->ipath_sendctrl);
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
deleted file mode 100644
index 3854a4eae684..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_LAYER_H
-#define _IPATH_LAYER_H
-
-/*
- * This header file is for symbols shared between the infinipath driver
- * and drivers layered upon it (such as ipath).
- */
-
-struct sk_buff;
-struct ipath_devdata;
-struct ether_header;
-
-int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
- void (*l_remove)(void *),
- int (*l_intr)(void *, u32),
- int (*l_rcv)(void *, void *,
- struct sk_buff *),
- u16 rcv_opcode,
- int (*l_rcv_lid)(void *, void *));
-void ipath_layer_unregister(void);
-int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
-u16 ipath_layer_get_lid(struct ipath_devdata *dd);
-int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
-u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
-int ipath_layer_send_hdr(struct ipath_devdata *dd,
- struct ether_header *hdr);
-int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
-
-/* ipath_ether interrupt values */
-#define IPATH_LAYER_INT_IF_UP 0x2
-#define IPATH_LAYER_INT_IF_DOWN 0x4
-#define IPATH_LAYER_INT_LID 0x8
-#define IPATH_LAYER_INT_SEND_CONTINUE 0x10
-#define IPATH_LAYER_INT_BCAST 0x40
-
-extern unsigned ipath_debug; /* debugging bit mask */
-
-#endif /* _IPATH_LAYER_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 25908b02fbe5..d61c03044545 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -103,7 +103,7 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
/* This is already in network order */
nip->sys_guid = to_idev(ibdev)->sys_image_guid;
nip->node_guid = dd->ipath_guid;
- nip->port_guid = nip->sys_guid;
+ nip->port_guid = dd->ipath_guid;
nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
nip->device_id = cpu_to_be16(dd->ipath_deviceid);
majrev = dd->ipath_majrev;
@@ -292,7 +292,12 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
/* pip->vl_arb_high_cap; // only one VL */
/* pip->vl_arb_low_cap; // only one VL */
/* InitTypeReply = 0 */
- pip->inittypereply_mtucap = IB_MTU_4096;
+ /*
+ * Note: the chips support a maximum MTU of 4096, but the driver
+ * hasn't implemented this feature yet, so set the maximum value
+ * to 2048.
+ */
+ pip->inittypereply_mtucap = IB_MTU_2048;
// HCAs ignore VLStallCount and HOQLife
/* pip->vlstallcnt_hoqlife; */
pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index 937bc3396b53..fa830e22002f 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index bdeef8d4f279..e442470a2375 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index bfef08ecd342..1324b35ff1f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -336,7 +336,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->qkey = 0;
qp->qp_access_flags = 0;
qp->s_busy = 0;
- qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR;
+ qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_psn = 0;
qp->r_psn = 0;
@@ -507,16 +507,13 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->port_num > ibqp->device->phys_port_cnt)
goto inval;
+ /*
+ * Note: the chips support a maximum MTU of 4096, but the driver
+ * hasn't implemented this feature yet, so don't allow Path MTU
+ * values greater than 2048.
+ */
if (attr_mask & IB_QP_PATH_MTU)
- if (attr->path_mtu > IB_MTU_4096)
- goto inval;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- if (attr->max_dest_rd_atomic > 1)
- goto inval;
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
- if (attr->max_rd_atomic > 1)
+ if (attr->path_mtu > IB_MTU_2048)
goto inval;
if (attr_mask & IB_QP_PATH_MIG_STATE)
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 1915771fd038..46744ea2babd 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -125,8 +125,10 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
if (len > pmtu) {
len = pmtu;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- } else
+ } else {
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
+ e->sent = 1;
+ }
ohdr->u.aeth = ipath_compute_aeth(qp);
hwords++;
qp->s_ack_rdma_psn = e->psn;
@@ -143,6 +145,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
cpu_to_be32(e->atomic_data);
hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = e->psn;
+ e->sent = 1;
}
bth0 = qp->s_ack_state << 24;
break;
@@ -158,6 +161,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
ohdr->u.aeth = ipath_compute_aeth(qp);
hwords++;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
+ qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
}
bth0 = qp->s_ack_state << 24;
bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
@@ -188,7 +192,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
}
qp->s_hdrwords = hwords;
qp->s_cur_size = len;
- *bth0p = bth0;
+ *bth0p = bth0 | (1 << 22); /* Set M bit */
*bth2p = bth2;
return 1;
@@ -240,7 +244,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
- bth0 = 0;
+ bth0 = 1 << 22; /* Set M bit */
/* Send a request. */
wqe = get_swqe_ptr(qp, qp->s_cur);
@@ -604,7 +608,7 @@ static void send_rc_ack(struct ipath_qp *qp)
}
/* read pkey_index w/o lock (its atomic) */
bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
- OP(ACKNOWLEDGE) << 24;
+ (OP(ACKNOWLEDGE) << 24) | (1 << 22);
if (qp->r_nak_state)
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
(qp->r_nak_state <<
@@ -806,13 +810,15 @@ static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
* Called at interrupt level with the QP s_lock held and interrupts disabled.
* Returns 1 if OK, 0 if current operation should be aborted (NAK).
*/
-static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
+static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
+ u64 val)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
struct ipath_swqe *wqe;
int ret = 0;
u32 ack_psn;
+ int diff;
/*
* Remove the QP from the timeout queue (or RNR timeout queue).
@@ -840,7 +846,19 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
* The MSN might be for a later WQE than the PSN indicates so
* only complete WQEs that the PSN finishes.
*/
- while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) {
+ while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
+ /*
+ * RDMA_READ_RESPONSE_ONLY is a special case since
+ * we want to generate completion events for everything
+ * before the RDMA read, copy the data, then generate
+ * the completion for the read.
+ */
+ if (wqe->wr.opcode == IB_WR_RDMA_READ &&
+ opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
+ diff == 0) {
+ ret = 1;
+ goto bail;
+ }
/*
* If this request is a RDMA read or atomic, and the ACK is
* for a later operation, this ACK NAKs the RDMA read or
@@ -851,12 +869,10 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
* is sent but before the response is received.
*/
if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
- (opcode != OP(RDMA_READ_RESPONSE_LAST) ||
- ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
+ (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
- ipath_cmp24(wqe->psn, psn) != 0))) {
+ (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
/*
* The last valid PSN seen is the previous
* request's.
@@ -870,6 +886,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
*/
goto bail;
}
+ if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ *(u64 *) wqe->sg_list[0].vaddr = val;
if (qp->s_num_rd_atomic &&
(wqe->wr.opcode == IB_WR_RDMA_READ ||
wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
@@ -1079,6 +1098,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
int diff;
u32 pad;
u32 aeth;
+ u64 val;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1118,8 +1138,6 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
data += sizeof(__be32);
}
if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
- u64 val;
-
if (!header_in_data) {
__be32 *p = ohdr->u.at.atomic_ack_eth;
@@ -1127,12 +1145,13 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
be32_to_cpu(p[1]);
} else
val = be64_to_cpu(((__be64 *) data)[0]);
- *(u64 *) wqe->sg_list[0].vaddr = val;
- }
- if (!do_rc_ack(qp, aeth, psn, opcode) ||
+ } else
+ val = 0;
+ if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
opcode != OP(RDMA_READ_RESPONSE_FIRST))
goto ack_done;
hdrsize += 4;
+ wqe = get_swqe_ptr(qp, qp->s_last);
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
/*
@@ -1176,13 +1195,12 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
- if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
- dev->n_rdma_seq++;
- ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
+ if (!header_in_data)
+ aeth = be32_to_cpu(ohdr->u.aeth);
+ else
+ aeth = be32_to_cpu(((__be32 *) data)[0]);
+ if (!do_rc_ack(qp, aeth, psn, opcode, 0))
goto ack_done;
- }
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/*
@@ -1197,6 +1215,7 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
* have to be careful to copy the data to the right
* location.
*/
+ wqe = get_swqe_ptr(qp, qp->s_last);
qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
wqe, psn, pmtu);
goto read_last;
@@ -1230,7 +1249,8 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
data += sizeof(__be32);
}
ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
- (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST));
+ (void) do_rc_ack(qp, aeth, psn,
+ OP(RDMA_READ_RESPONSE_LAST), 0);
goto ack_done;
}
@@ -1344,8 +1364,11 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
e = NULL;
break;
}
- if (ipath_cmp24(psn, e->psn) >= 0)
+ if (ipath_cmp24(psn, e->psn) >= 0) {
+ if (prev == qp->s_tail_ack_queue)
+ old_req = 0;
break;
+ }
}
switch (opcode) {
case OP(RDMA_READ_REQUEST): {
@@ -1460,6 +1483,22 @@ static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
spin_unlock_irqrestore(&qp->s_lock, flags);
}
+static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
+{
+ unsigned long flags;
+ unsigned next;
+
+ next = n + 1;
+ if (next > IPATH_MAX_RDMA_ATOMIC)
+ next = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (n == qp->s_tail_ack_queue) {
+ qp->s_tail_ack_queue = next;
+ qp->s_ack_state = OP(ACKNOWLEDGE);
+ }
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
/**
* ipath_rc_rcv - process an incoming RC packet
* @dev: the device this packet came in on
@@ -1672,6 +1711,9 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY):
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE)))
+ goto nack_inv;
/* consume RWQE */
/* RETH comes after BTH */
if (!header_in_data)
@@ -1701,9 +1743,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
qp->r_sge.sge.length = 0;
qp->r_sge.sge.sge_length = 0;
}
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE)))
- goto nack_acc;
if (opcode == OP(RDMA_WRITE_FIRST))
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
@@ -1717,13 +1756,17 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
u32 len;
u8 next;
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto nack_acc;
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_READ)))
+ goto nack_inv;
next = qp->r_head_ack_queue + 1;
if (next > IPATH_MAX_RDMA_ATOMIC)
next = 0;
- if (unlikely(next == qp->s_tail_ack_queue))
- goto nack_inv;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv;
+ ipath_update_ack_queue(qp, next);
+ }
e = &qp->s_ack_queue[qp->r_head_ack_queue];
/* RETH comes after BTH */
if (!header_in_data)
@@ -1758,6 +1801,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
e->rdma_sge.sge.sge_length = 0;
}
e->opcode = opcode;
+ e->sent = 0;
e->psn = psn;
/*
* We need to increment the MSN here instead of when we
@@ -1789,12 +1833,15 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
if (unlikely(!(qp->qp_access_flags &
IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc;
+ goto nack_inv;
next = qp->r_head_ack_queue + 1;
if (next > IPATH_MAX_RDMA_ATOMIC)
next = 0;
- if (unlikely(next == qp->s_tail_ack_queue))
- goto nack_inv;
+ if (unlikely(next == qp->s_tail_ack_queue)) {
+ if (!qp->s_ack_queue[next].sent)
+ goto nack_inv;
+ ipath_update_ack_queue(qp, next);
+ }
if (!header_in_data)
ateth = &ohdr->u.atomic_eth;
else
@@ -1819,6 +1866,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
be64_to_cpu(ateth->compare_data),
sdata);
e->opcode = opcode;
+ e->sent = 0;
e->psn = psn & IPATH_PSN_MASK;
qp->r_msn++;
qp->r_psn++;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index c182bcd62098..708eba3165d7 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index d9c2a9b15d86..c69c25239443 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -194,6 +194,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
ret = 0;
goto bail;
}
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
wqe = get_rwqe_ptr(rq, tail);
if (++tail >= rq->size)
tail = 0;
@@ -267,7 +269,7 @@ again:
spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
- qp->s_rnr_timeout) {
+ sqp->s_rnr_timeout) {
spin_unlock_irqrestore(&sqp->s_lock, flags);
goto done;
}
@@ -319,12 +321,22 @@ again:
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ wc.status = IB_WC_REM_INV_REQ_ERR;
+ goto err;
+ }
wc.wc_flags = IB_WC_WITH_IMM;
wc.imm_data = wqe->wr.imm_data;
if (!ipath_get_rwqe(qp, 1))
goto rnr_nak;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE:
+ if (unlikely(!(qp->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE))) {
+ wc.status = IB_WC_REM_INV_REQ_ERR;
+ goto err;
+ }
if (wqe->length == 0)
break;
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
@@ -354,8 +366,10 @@ again:
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
+ IB_ACCESS_REMOTE_READ))) {
+ wc.status = IB_WC_REM_INV_REQ_ERR;
+ goto err;
+ }
if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
@@ -369,8 +383,10 @@ again:
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
+ IB_ACCESS_REMOTE_ATOMIC))) {
+ wc.status = IB_WC_REM_INV_REQ_ERR;
+ goto err;
+ }
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
wqe->wr.wr.atomic.remote_addr,
wqe->wr.wr.atomic.rkey,
@@ -396,6 +412,8 @@ again:
if (len > sge->length)
len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
BUG_ON(len == 0);
ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
sge->vaddr += len;
@@ -489,7 +507,7 @@ static int want_buffer(struct ipath_devdata *dd)
*
* Called when we run out of PIO buffers.
*/
-void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
+static void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
{
unsigned long flags;
@@ -503,11 +521,9 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
* could be called. If we are still in the tasklet function,
* tasklet_hi_schedule() will not call us until the next time
* tasklet_hi_schedule() is called.
- * We clear the tasklet flag now since we are committing to return
- * from the tasklet function.
+ * We leave the busy flag set so that another post send doesn't
+ * try to put the same QP on the piowait list again.
*/
- clear_bit(IPATH_S_BUSY, &qp->s_busy);
- tasklet_unlock(&qp->s_task);
want_buffer(dev->dd);
dev->n_piowait++;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 03acae66ba81..40c36ec19016 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -80,6 +80,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
wqe->num_sge = wr->num_sge;
for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
wq->head = next;
spin_unlock_irqrestore(&srq->rq.lock, flags);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index d8b5e4cefe25..73ed17d03188 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -55,6 +55,7 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
u64 val64;
unsigned long t0, t1;
u64 ret;
+ unsigned long flags;
t0 = jiffies;
/* If fast increment counters are only 32 bits, snapshot them,
@@ -91,12 +92,18 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
if (creg == dd->ipath_cregs->cr_wordsendcnt) {
if (val != dd->ipath_lastsword) {
dd->ipath_sword += val - dd->ipath_lastsword;
+ spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
+ dd->ipath_traffic_wds += val - dd->ipath_lastsword;
+ spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
dd->ipath_lastsword = val;
}
val64 = dd->ipath_sword;
} else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
if (val != dd->ipath_lastrword) {
dd->ipath_rword += val - dd->ipath_lastrword;
+ spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
+ dd->ipath_traffic_wds += val - dd->ipath_lastrword;
+ spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
dd->ipath_lastrword = val;
}
val64 = dd->ipath_rword;
@@ -200,6 +207,7 @@ void ipath_get_faststats(unsigned long opaque)
struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
u32 val;
static unsigned cnt;
+ unsigned long flags;
/*
* don't access the chip while running diags, or memory diags can
@@ -210,9 +218,20 @@ void ipath_get_faststats(unsigned long opaque)
/* but re-arm the timer, for diags case; won't hurt other */
goto done;
+ /*
+ * We now try to maintain a "active timer", based on traffic
+ * exceeding a threshold, so we need to check the word-counts
+ * even if they are 64-bit.
+ */
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
+ ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
+ spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
+ if (dd->ipath_traffic_wds >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
+ atomic_add(5, &dd->ipath_active_time); /* S/B #define */
+ dd->ipath_traffic_wds = 0;
+ spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
+
if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
- ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
}
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index 4dc398d5e011..16238cd3a036 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -596,6 +596,43 @@ bail:
return ret;
}
+static ssize_t store_led_override(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int ret;
+ u16 val;
+
+ ret = ipath_parse_ushort(buf, &val);
+ if (ret > 0)
+ ipath_set_led_override(dd, val);
+ else
+ ipath_dev_err(dd, "attempt to set invalid LED override\n");
+ return ret;
+}
+
+static ssize_t show_logged_errs(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipath_devdata *dd = dev_get_drvdata(dev);
+ int idx, count;
+
+ /* force consistency with actual EEPROM */
+ if (ipath_update_eeprom_log(dd) != 0)
+ return -ENXIO;
+
+ count = 0;
+ for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
+ dd->ipath_eep_st_errs[idx],
+ idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
+ }
+
+ return count;
+}
static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
@@ -625,6 +662,8 @@ static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
+static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
+static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
static struct attribute *dev_attributes[] = {
&dev_attr_guid.attr,
@@ -641,6 +680,8 @@ static struct attribute *dev_attributes[] = {
&dev_attr_unit.attr,
&dev_attr_enabled.attr,
&dev_attr_rx_pol_inv.attr,
+ &dev_attr_led_override.attr,
+ &dev_attr_logged_errors.attr,
NULL
};
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index 1c2b03c2ef5e..8380fbc50d2c 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -58,7 +58,6 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
wc->port_num = 0;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
}
- wqe = get_swqe_ptr(qp, qp->s_last);
}
/**
@@ -87,7 +86,7 @@ int ipath_make_uc_req(struct ipath_qp *qp,
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
- bth0 = 0;
+ bth0 = 1 << 22; /* Set M bit */
/* Get the next send request. */
wqe = get_swqe_ptr(qp, qp->s_last);
@@ -97,8 +96,10 @@ int ipath_make_uc_req(struct ipath_qp *qp,
* Signal the completion of the last send
* (if there is one).
*/
- if (qp->s_last != qp->s_tail)
+ if (qp->s_last != qp->s_tail) {
complete_last_send(qp, wqe, &wc);
+ wqe = get_swqe_ptr(qp, qp->s_last);
+ }
/* Check if send work queue is empty. */
if (qp->s_tail == qp->s_head)
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index a518f7c8fa83..f9a3338a5fb7 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -176,6 +176,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
dev->n_pkt_drops++;
goto bail_sge;
}
+ /* Make sure entry is read after head index is read. */
+ smp_rmb();
wqe = get_rwqe_ptr(rq, tail);
if (++tail >= rq->size)
tail = 0;
@@ -231,6 +233,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
if (len > length)
len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
BUG_ON(len == 0);
ipath_copy_sge(&rsge, sge->vaddr, len);
sge->vaddr += len;
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 8536aeb96af8..0190edc8044e 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -171,32 +171,6 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
return ret;
}
-/**
- * ipath_get_user_pages_nocopy - lock a single page for I/O and mark shared
- * @start_page: the page to lock
- * @p: the output page structure
- *
- * This is similar to ipath_get_user_pages, but it's always one page, and we
- * mark the page as locked for I/O, and shared. This is used for the user
- * process page that contains the destination address for the rcvhdrq tail
- * update, so we need to have the vma. If we don't do this, the page can be
- * taken away from us on fork, even if the child never touches it, and then
- * the user process never sees the tail register updates.
- */
-int ipath_get_user_pages_nocopy(unsigned long page, struct page **p)
-{
- struct vm_area_struct *vma;
- int ret;
-
- down_write(&current->mm->mmap_sem);
-
- ret = __get_user_pages(page, 1, p, &vma);
-
- up_write(&current->mm->mmap_sem);
-
- return ret;
-}
-
void ipath_release_user_pages(struct page **p, size_t num_pages)
{
down_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index bb70845279b8..16aa61fd8085 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -164,9 +164,11 @@ void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
while (length) {
u32 len = sge->length;
- BUG_ON(len == 0);
if (len > length)
len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
memcpy(sge->vaddr, data, len);
sge->vaddr += len;
sge->length -= len;
@@ -202,9 +204,11 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
while (length) {
u32 len = sge->length;
- BUG_ON(len == 0);
if (len > length)
len = length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ BUG_ON(len == 0);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -323,6 +327,8 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->num_sge = wr->num_sge;
for (i = 0; i < wr->num_sge; i++)
wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
wq->head = next;
spin_unlock_irqrestore(&qp->r_rq.lock, flags);
}
@@ -482,7 +488,7 @@ bail:;
* This is called from ipath_do_rcv_timer() at interrupt level to check for
* QPs which need retransmits and to collect performance numbers.
*/
-void ipath_ib_timer(struct ipath_ibdev *dev)
+static void ipath_ib_timer(struct ipath_ibdev *dev)
{
struct ipath_qp *resend = NULL;
struct list_head *last;
@@ -948,6 +954,7 @@ int ipath_ib_piobufavail(struct ipath_ibdev *dev)
qp = list_entry(dev->piowait.next, struct ipath_qp,
piowait);
list_del_init(&qp->piowait);
+ clear_bit(IPATH_S_BUSY, &qp->s_busy);
tasklet_hi_schedule(&qp->s_task);
}
spin_unlock_irqrestore(&dev->pending_lock, flags);
@@ -981,6 +988,8 @@ static int ipath_query_device(struct ib_device *ibdev,
props->max_ah = ib_ipath_max_ahs;
props->max_cqe = ib_ipath_max_cqes;
props->max_mr = dev->lk_table.max;
+ props->max_fmr = dev->lk_table.max;
+ props->max_map_per_fmr = 32767;
props->max_pd = ib_ipath_max_pds;
props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
props->max_qp_init_rd_atom = 255;
@@ -1051,7 +1060,12 @@ static int ipath_query_port(struct ib_device *ibdev,
props->max_vl_num = 1; /* VLCap = VL0 */
props->init_type_reply = 0;
- props->max_mtu = IB_MTU_4096;
+ /*
+ * Note: the chips support a maximum MTU of 4096, but the driver
+ * hasn't implemented this feature yet, so set the maximum value
+ * to 2048.
+ */
+ props->max_mtu = IB_MTU_2048;
switch (dev->dd->ipath_ibmtu) {
case 4096:
mtu = IB_MTU_4096;
@@ -1361,13 +1375,6 @@ static void __verbs_timer(unsigned long arg)
{
struct ipath_devdata *dd = (struct ipath_devdata *) arg;
- /*
- * If port 0 receive packet interrupts are not available, or
- * can be missed, poll the receive queue
- */
- if (dd->ipath_flags & IPATH_POLL_RX_INTR)
- ipath_kreceive(dd);
-
/* Handle verbs layer timeouts. */
ipath_ib_timer(dd->verbs_dev);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 088b837ebea8..1a24c6a4a814 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -42,8 +42,6 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
-#include "ipath_layer.h"
-
#define IPATH_MAX_RDMA_ATOMIC 4
#define QPN_MAX (1 << 24)
@@ -321,6 +319,7 @@ struct ipath_sge_state {
*/
struct ipath_ack_entry {
u8 opcode;
+ u8 sent;
u32 psn;
union {
struct ipath_sge_state rdma_sge;
@@ -781,8 +780,6 @@ void ipath_update_mmap_info(struct ipath_ibdev *dev,
int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
-
void ipath_insert_rnr_queue(struct ipath_qp *qp);
int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
@@ -806,8 +803,6 @@ void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
int ipath_ib_piobufavail(struct ipath_ibdev *);
-void ipath_ib_timer(struct ipath_ibdev *);
-
unsigned ipath_get_npkeys(struct ipath_devdata *);
u32 ipath_get_cr_errpkey(struct ipath_devdata *);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index dd691cfa5079..9e5abf9c309d 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
index 0095bb70f34e..1d7bd82a1fb1 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
index 04696e62da87..3428acb0868c 100644
--- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
+++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 QLogic, Inc. All rights reserved.
+ * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -63,12 +63,29 @@ int ipath_enable_wc(struct ipath_devdata *dd)
* of 2 address matching the length (which has to be a power of 2).
* For rev1, that means the base address, for rev2, it will be just
* the PIO buffers themselves.
+ * For chips with two sets of buffers, the calculations are
+ * somewhat more complicated; we need to sum, and the piobufbase
+ * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
+ * The buffers are still packed, so a single range covers both.
*/
- pioaddr = addr + dd->ipath_piobufbase;
- piolen = (dd->ipath_piobcnt2k +
- dd->ipath_piobcnt4k) *
- ALIGN(dd->ipath_piobcnt2k +
- dd->ipath_piobcnt4k, dd->ipath_palign);
+ if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
+ unsigned long pio2kbase, pio4kbase;
+ pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
+ pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
+ if (pio2kbase < pio4kbase) { /* all, for now */
+ pioaddr = addr + pio2kbase;
+ piolen = pio4kbase - pio2kbase +
+ dd->ipath_piobcnt4k * dd->ipath_4kalign;
+ } else {
+ pioaddr = addr + pio4kbase;
+ piolen = pio2kbase - pio4kbase +
+ dd->ipath_piobcnt2k * dd->ipath_palign;
+ }
+ } else { /* single buffer size (2K, currently) */
+ pioaddr = addr + dd->ipath_piobufbase;
+ piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
+ dd->ipath_piobcnt4k * dd->ipath_4kalign;
+ }
for (bits = 0; !(piolen & (1ULL << bits)); bits++)
/* do nothing */ ;
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index b8912cdb9663..4175a4bd0c78 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,5 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
- depends on INFINIBAND
select MLX4_CORE
---help---
This driver provides low-level InfiniBand support for
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c591616dccde..dde8fe9af47e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -169,7 +169,7 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
- props->max_msg_sz = 0x80000000;
+ props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
@@ -523,11 +523,13 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
ibdev->ib_dev.query_device = mlx4_ib_query_device;
@@ -546,10 +548,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
+ ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
+ ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
ibdev->ib_dev.post_send = mlx4_ib_post_send;
ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 24ccadd6e4f8..705ff2fa237e 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -35,6 +35,7 @@
#include <linux/compiler.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
@@ -255,6 +256,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
struct ib_udata *udata);
int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int mlx4_ib_destroy_srq(struct ib_srq *srq);
void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
@@ -266,6 +268,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
int mlx4_ib_destroy_qp(struct ib_qp *qp);
int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
+int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr);
int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 28a08bdd1800..f6315dfb213e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -415,9 +415,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0;
err_wrid:
- if (pd->uobject && !init_attr->srq)
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
- else {
+ if (pd->uobject) {
+ if (!init_attr->srq)
+ mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context),
+ &qp->db);
+ } else {
kfree(qp->sq.wrid);
kfree(qp->rq.wrid);
}
@@ -742,7 +744,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
printk(KERN_ERR "path MTU (%u) is invalid\n",
attr->path_mtu);
- return -EINVAL;
+ goto out;
}
context->mtu_msgmax = (attr->path_mtu << 5) | 31;
}
@@ -781,10 +783,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_AV) {
if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
- attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) {
- err = -EINVAL;
+ attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
goto out;
- }
optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
MLX4_QP_OPTPAR_SCHED_QUEUE);
@@ -798,15 +798,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_ALT_PATH) {
if (attr->alt_port_num == 0 ||
attr->alt_port_num > dev->dev->caps.num_ports)
- return -EINVAL;
+ goto out;
if (attr->alt_pkey_index >=
dev->dev->caps.pkey_table_len[attr->alt_port_num])
- return -EINVAL;
+ goto out;
if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
attr->alt_port_num))
- return -EINVAL;
+ goto out;
context->alt_path.pkey_index = attr->alt_pkey_index;
context->alt_path.ackto = attr->alt_timeout << 3;
@@ -1183,6 +1183,43 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq
return cur + nreq >= wq->max_post;
}
+static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
+{
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ } else {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare = 0;
+ }
+
+}
+
+static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
+ struct ib_send_wr *wr)
+{
+ memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
+ dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+
+}
+
+static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
+ struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -1238,26 +1275,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.atomic.remote_addr);
- ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.atomic.rkey);
- ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
-
+ set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+ wr->wr.atomic.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
- cpu_to_be64(wr->wr.atomic.swap);
- ((struct mlx4_wqe_atomic_seg *) wqe)->compare =
- cpu_to_be64(wr->wr.atomic.compare_add);
- } else {
- ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
- cpu_to_be64(wr->wr.atomic.compare_add);
- ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
- }
-
+ set_atomic_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_atomic_seg);
+
size += (sizeof (struct mlx4_wqe_raddr_seg) +
sizeof (struct mlx4_wqe_atomic_seg)) / 16;
@@ -1266,15 +1290,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.rdma.remote_addr);
- ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.rdma.rkey);
- ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
-
+ set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
wqe += sizeof (struct mlx4_wqe_raddr_seg);
size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
-
break;
default:
@@ -1284,13 +1303,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case IB_QPT_UD:
- memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av,
- &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
- ((struct mlx4_wqe_datagram_seg *) wqe)->dqpn =
- cpu_to_be32(wr->wr.ud.remote_qpn);
- ((struct mlx4_wqe_datagram_seg *) wqe)->qkey =
- cpu_to_be32(wr->wr.ud.remote_qkey);
-
+ set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
break;
@@ -1313,12 +1326,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mlx4_wqe_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mlx4_wqe_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mlx4_wqe_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
+ set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mlx4_wqe_data_seg);
size += sizeof (struct mlx4_wqe_data_seg) / 16;
@@ -1455,3 +1463,151 @@ out:
return err;
}
+
+static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state)
+{
+ switch (mlx4_state) {
+ case MLX4_QP_STATE_RST: return IB_QPS_RESET;
+ case MLX4_QP_STATE_INIT: return IB_QPS_INIT;
+ case MLX4_QP_STATE_RTR: return IB_QPS_RTR;
+ case MLX4_QP_STATE_RTS: return IB_QPS_RTS;
+ case MLX4_QP_STATE_SQ_DRAINING:
+ case MLX4_QP_STATE_SQD: return IB_QPS_SQD;
+ case MLX4_QP_STATE_SQER: return IB_QPS_SQE;
+ case MLX4_QP_STATE_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state)
+{
+ switch (mlx4_mig_state) {
+ case MLX4_QP_PM_ARMED: return IB_MIG_ARMED;
+ case MLX4_QP_PM_REARM: return IB_MIG_REARM;
+ case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
+ default: return -1;
+ }
+}
+
+static int to_ib_qp_access_flags(int mlx4_flags)
+{
+ int ib_flags = 0;
+
+ if (mlx4_flags & MLX4_QP_BIT_RRE)
+ ib_flags |= IB_ACCESS_REMOTE_READ;
+ if (mlx4_flags & MLX4_QP_BIT_RWE)
+ ib_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (mlx4_flags & MLX4_QP_BIT_RAE)
+ ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,
+ struct mlx4_qp_path *path)
+{
+ memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
+ ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1;
+
+ if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
+ return;
+
+ ib_ah_attr->dlid = be16_to_cpu(path->rlid);
+ ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf;
+ ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;
+ ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
+ ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
+ if (ib_ah_attr->ah_flags) {
+ ib_ah_attr->grh.sgid_index = path->mgid_index;
+ ib_ah_attr->grh.hop_limit = path->hop_limit;
+ ib_ah_attr->grh.traffic_class =
+ (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
+ ib_ah_attr->grh.flow_label =
+ be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
+ memcpy(ib_ah_attr->grh.dgid.raw,
+ path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+ }
+}
+
+int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct mlx4_qp_context context;
+ int mlx4_state;
+ int err;
+
+ if (qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ goto done;
+ }
+
+ err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
+ if (err)
+ return -EINVAL;
+
+ mlx4_state = be32_to_cpu(context.flags) >> 28;
+
+ qp_attr->qp_state = to_ib_qp_state(mlx4_state);
+ qp_attr->path_mtu = context.mtu_msgmax >> 5;
+ qp_attr->path_mig_state =
+ to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
+ qp_attr->qkey = be32_to_cpu(context.qkey);
+ qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;
+ qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff;
+ qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff;
+ qp_attr->qp_access_flags =
+ to_ib_qp_access_flags(be32_to_cpu(context.params2));
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path);
+ to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path);
+ qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;
+ qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ }
+
+ qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;
+ if (qp_attr->qp_state == IB_QPS_INIT)
+ qp_attr->port_num = qp->port;
+ else
+ qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;
+
+ /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
+ qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;
+
+ qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);
+
+ qp_attr->max_dest_rd_atomic =
+ 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);
+ qp_attr->min_rnr_timer =
+ (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;
+ qp_attr->timeout = context.pri_path.ackto >> 3;
+ qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7;
+ qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7;
+ qp_attr->alt_timeout = context.alt_path.ackto >> 3;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ /*
+ * We don't support inline sends for kernel QPs (yet), and we
+ * don't know what userspace's value should be.
+ */
+ qp_attr->cap.max_inline_data = 0;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ return 0;
+}
+
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 12fac1c8989d..408748fb5285 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -240,6 +240,24 @@ int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return 0;
}
+int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx4_ib_srq *srq = to_msrq(ibsrq);
+ int ret;
+ int limit_watermark;
+
+ ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
+ if (ret)
+ return ret;
+
+ srq_attr->srq_limit = be16_to_cpu(limit_watermark);
+ srq_attr->max_wr = srq->msrq.max - 1;
+ srq_attr->max_sge = srq->msrq.max_gs;
+
+ return 0;
+}
+
int mlx4_ib_destroy_srq(struct ib_srq *srq)
{
struct mlx4_ib_dev *dev = to_mdev(srq->device);
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index 9aa5a4468a75..03efc074967e 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_MTHCA
tristate "Mellanox HCA support"
- depends on PCI && INFINIBAND
+ depends on PCI
---help---
This is a low-level driver for Mellanox InfiniHost host
channel adapters (HCAs), including the MT23108 PCI-X HCA
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index f930e55b58fc..a76306709618 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -255,7 +255,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
dma_list[i] = t;
pci_unmap_addr_set(&buf->page_list[i], mapping, t);
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ clear_page(buf->page_list[i].buf);
}
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index f40558d76475..acc95892713a 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -357,8 +357,6 @@ void mthca_cmd_event(struct mthca_dev *dev,
context->status = status;
context->out_param = out_param;
- context->token += dev->cmd.token_mask + 1;
-
complete(&context->done);
}
@@ -380,6 +378,7 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
spin_lock(&dev->cmd.context_lock);
BUG_ON(dev->cmd.free_head < 0);
context = &dev->cmd.context[dev->cmd.free_head];
+ context->token += dev->cmd.token_mask + 1;
dev->cmd.free_head = context->next;
spin_unlock(&dev->cmd.context_lock);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 8ec9fa1ff9ea..8592b26dc4e1 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -522,7 +522,7 @@ static int mthca_create_eq(struct mthca_dev *dev,
dma_list[i] = t;
pci_unmap_addr_set(&eq->page_list[i], mapping, t);
- memset(eq->page_list[i].buf, 0, PAGE_SIZE);
+ clear_page(eq->page_list[i].buf);
}
for (i = 0; i < eq->nent; ++i)
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index aa563e61de65..76fed7545c53 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
static int msi = 0;
module_param(msi, int, 0444);
-MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
+MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
#else /* CONFIG_PCI_MSI */
@@ -1117,9 +1117,21 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
if (msi_x && !mthca_enable_msi_x(mdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
- if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
- !pci_enable_msi(pdev))
- mdev->mthca_flags |= MTHCA_FLAG_MSI;
+ else if (msi) {
+ static int warned;
+
+ if (!warned) {
+ printk(KERN_WARNING PFX "WARNING: MSI support will be "
+ "removed from the ib_mthca driver in January 2008.\n");
+ printk(KERN_WARNING " If you are using MSI and cannot "
+ "switch to MSI-X, please tell "
+ "<general@lists.openfabrics.org>.\n");
+ ++warned;
+ }
+
+ if (!pci_enable_msi(pdev))
+ mdev->mthca_flags |= MTHCA_FLAG_MSI;
+ }
if (mthca_cmd_init(mdev)) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
@@ -1135,7 +1147,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
- mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n",
+ mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[hca_type].latest_fw >> 32),
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index eef415b12b2e..df01b2026a64 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1578,6 +1578,45 @@ static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
return cur + nreq >= wq->max;
}
+static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
+ struct ib_send_wr *wr)
+{
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
+ aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
+ } else {
+ aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
+ aseg->compare = 0;
+ }
+
+}
+
+static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
+ struct ib_send_wr *wr)
+{
+ useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
+ useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
+ useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+
+}
+
+static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
+ struct ib_send_wr *wr)
+{
+ memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
+ useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+ useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+}
+
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -1590,8 +1629,15 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int i;
int size;
- int size0 = 0;
- u32 f0;
+ /*
+ * f0 and size0 are only used if nreq != 0, and they will
+ * always be initialized the first time through the main loop
+ * before nreq is incremented. So nreq cannot become non-zero
+ * without initializing f0 and size0, and they are in fact
+ * never used uninitialized.
+ */
+ int uninitialized_var(size0);
+ u32 uninitialized_var(f0);
int ind;
u8 op0 = 0;
@@ -1636,25 +1682,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- ((struct mthca_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.atomic.remote_addr);
- ((struct mthca_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.atomic.rkey);
- ((struct mthca_raddr_seg *) wqe)->reserved = 0;
-
+ set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+ wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- ((struct mthca_atomic_seg *) wqe)->swap_add =
- cpu_to_be64(wr->wr.atomic.swap);
- ((struct mthca_atomic_seg *) wqe)->compare =
- cpu_to_be64(wr->wr.atomic.compare_add);
- } else {
- ((struct mthca_atomic_seg *) wqe)->swap_add =
- cpu_to_be64(wr->wr.atomic.compare_add);
- ((struct mthca_atomic_seg *) wqe)->compare = 0;
- }
-
+ set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
@@ -1663,12 +1695,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
- ((struct mthca_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.rdma.remote_addr);
- ((struct mthca_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.rdma.rkey);
- ((struct mthca_raddr_seg *) wqe)->reserved = 0;
- wqe += sizeof (struct mthca_raddr_seg);
+ set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
+ wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -1683,12 +1712,9 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ((struct mthca_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.rdma.remote_addr);
- ((struct mthca_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.rdma.rkey);
- ((struct mthca_raddr_seg *) wqe)->reserved = 0;
- wqe += sizeof (struct mthca_raddr_seg);
+ set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
+ wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -1700,16 +1726,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UD:
- ((struct mthca_tavor_ud_seg *) wqe)->lkey =
- cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
- ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
- cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
- ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
- cpu_to_be32(wr->wr.ud.remote_qpn);
- ((struct mthca_tavor_ud_seg *) wqe)->qkey =
- cpu_to_be32(wr->wr.ud.remote_qkey);
-
- wqe += sizeof (struct mthca_tavor_ud_seg);
+ set_tavor_ud_seg(wqe, wr);
+ wqe += sizeof (struct mthca_tavor_ud_seg);
size += sizeof (struct mthca_tavor_ud_seg) / 16;
break;
@@ -1734,13 +1752,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mthca_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mthca_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mthca_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
- wqe += sizeof (struct mthca_data_seg);
+ mthca_set_data_seg(wqe, wr->sg_list + i);
+ wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
@@ -1768,11 +1781,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
mthca_opcode[wr->opcode]);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
- cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
+ cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
((wr->send_flags & IB_SEND_FENCE) ?
MTHCA_NEXT_FENCE : 0));
- if (!size0) {
+ if (!nreq) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
@@ -1822,7 +1835,14 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int nreq;
int i;
int size;
- int size0 = 0;
+ /*
+ * size0 is only used if nreq != 0, and it will always be
+ * initialized the first time through the main loop before
+ * nreq is incremented. So nreq cannot become non-zero
+ * without initializing size0, and it is in fact never used
+ * uninitialized.
+ */
+ int uninitialized_var(size0);
int ind;
void *wqe;
void *prev_wqe;
@@ -1863,13 +1883,8 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mthca_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mthca_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mthca_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
- wqe += sizeof (struct mthca_data_seg);
+ mthca_set_data_seg(wqe, wr->sg_list + i);
+ wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
@@ -1881,7 +1896,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
- if (!size0)
+ if (!nreq)
size0 = size;
++ind;
@@ -1903,7 +1918,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->rq.next_ind = ind;
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
- size0 = 0;
}
}
@@ -1945,8 +1959,15 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int nreq;
int i;
int size;
- int size0 = 0;
- u32 f0;
+ /*
+ * f0 and size0 are only used if nreq != 0, and they will
+ * always be initialized the first time through the main loop
+ * before nreq is incremented. So nreq cannot become non-zero
+ * without initializing f0 and size0, and they are in fact
+ * never used uninitialized.
+ */
+ int uninitialized_var(size0);
+ u32 uninitialized_var(f0);
int ind;
u8 op0 = 0;
@@ -1966,7 +1987,6 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
- size0 = 0;
/*
* Make sure that descriptors are written before
@@ -2017,26 +2037,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
- ((struct mthca_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.atomic.remote_addr);
- ((struct mthca_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.atomic.rkey);
- ((struct mthca_raddr_seg *) wqe)->reserved = 0;
-
+ set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
+ wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
- if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- ((struct mthca_atomic_seg *) wqe)->swap_add =
- cpu_to_be64(wr->wr.atomic.swap);
- ((struct mthca_atomic_seg *) wqe)->compare =
- cpu_to_be64(wr->wr.atomic.compare_add);
- } else {
- ((struct mthca_atomic_seg *) wqe)->swap_add =
- cpu_to_be64(wr->wr.atomic.compare_add);
- ((struct mthca_atomic_seg *) wqe)->compare = 0;
- }
-
- wqe += sizeof (struct mthca_atomic_seg);
+ set_atomic_seg(wqe, wr);
+ wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
break;
@@ -2044,12 +2050,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ((struct mthca_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.rdma.remote_addr);
- ((struct mthca_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.rdma.rkey);
- ((struct mthca_raddr_seg *) wqe)->reserved = 0;
- wqe += sizeof (struct mthca_raddr_seg);
+ set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
+ wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -2064,12 +2067,9 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
- ((struct mthca_raddr_seg *) wqe)->raddr =
- cpu_to_be64(wr->wr.rdma.remote_addr);
- ((struct mthca_raddr_seg *) wqe)->rkey =
- cpu_to_be32(wr->wr.rdma.rkey);
- ((struct mthca_raddr_seg *) wqe)->reserved = 0;
- wqe += sizeof (struct mthca_raddr_seg);
+ set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
+ wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
@@ -2081,14 +2081,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
case UD:
- memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
- to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
- ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
- cpu_to_be32(wr->wr.ud.remote_qpn);
- ((struct mthca_arbel_ud_seg *) wqe)->qkey =
- cpu_to_be32(wr->wr.ud.remote_qkey);
-
- wqe += sizeof (struct mthca_arbel_ud_seg);
+ set_arbel_ud_seg(wqe, wr);
+ wqe += sizeof (struct mthca_arbel_ud_seg);
size += sizeof (struct mthca_arbel_ud_seg) / 16;
break;
@@ -2113,13 +2107,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mthca_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mthca_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mthca_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
- wqe += sizeof (struct mthca_data_seg);
+ mthca_set_data_seg(wqe, wr->sg_list + i);
+ wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
@@ -2151,7 +2140,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
((wr->send_flags & IB_SEND_FENCE) ?
MTHCA_NEXT_FENCE : 0));
- if (!size0) {
+ if (!nreq) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
@@ -2241,20 +2230,12 @@ int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mthca_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mthca_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mthca_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
+ mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
- if (i < qp->rq.max_gs) {
- ((struct mthca_data_seg *) wqe)->byte_count = 0;
- ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
- ((struct mthca_data_seg *) wqe)->addr = 0;
- }
+ if (i < qp->rq.max_gs)
+ mthca_set_data_seg_inval(wqe);
qp->wrid[ind] = wr->wr_id;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index b8f05a526673..88d219e730ad 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -543,20 +543,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mthca_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mthca_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mthca_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
+ mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
- if (i < srq->max_gs) {
- ((struct mthca_data_seg *) wqe)->byte_count = 0;
- ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
- ((struct mthca_data_seg *) wqe)->addr = 0;
- }
+ if (i < srq->max_gs)
+ mthca_set_data_seg_inval(wqe);
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32((ind << srq->wqe_shift) | 1);
@@ -662,20 +654,12 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
}
for (i = 0; i < wr->num_sge; ++i) {
- ((struct mthca_data_seg *) wqe)->byte_count =
- cpu_to_be32(wr->sg_list[i].length);
- ((struct mthca_data_seg *) wqe)->lkey =
- cpu_to_be32(wr->sg_list[i].lkey);
- ((struct mthca_data_seg *) wqe)->addr =
- cpu_to_be64(wr->sg_list[i].addr);
+ mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
- if (i < srq->max_gs) {
- ((struct mthca_data_seg *) wqe)->byte_count = 0;
- ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
- ((struct mthca_data_seg *) wqe)->addr = 0;
- }
+ if (i < srq->max_gs)
+ mthca_set_data_seg_inval(wqe);
srq->wrid[ind] = wr->wr_id;
srq->first_free = next_ind;
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index e7d2c1e86199..f6a66fe78e48 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -113,4 +113,19 @@ struct mthca_mlx_seg {
__be16 vcrc;
};
+static __always_inline void mthca_set_data_seg(struct mthca_data_seg *dseg,
+ struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static __always_inline void mthca_set_data_seg_inval(struct mthca_data_seg *dseg)
+{
+ dseg->byte_count = 0;
+ dseg->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ dseg->addr = 0;
+}
+
#endif /* MTHCA_WQE_H */
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index af78ccc4ce71..1f76bad020f3 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_IPOIB
tristate "IP-over-InfiniBand"
- depends on INFINIBAND && NETDEVICES && INET && (IPV6 || IPV6=n)
+ depends on NETDEVICES && INET && (IPV6 || IPV6=n)
---help---
Support for the IP-over-InfiniBand protocol (IPoIB). This
transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index ea74d1eaf004..08b4676a3820 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -281,7 +281,6 @@ static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
rep.private_data_len = sizeof data;
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
- rep.target_ack_delay = 20; /* FIXME */
rep.srq = 1;
rep.qp_num = qp->qp_num;
rep.starting_psn = psn;
@@ -1148,7 +1147,6 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
- struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned mtu = priv->mcast_mtu;
@@ -1162,7 +1160,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
#endif
dev_kfree_skb_any(skb);
spin_lock_irq(&priv->tx_lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 8404f05b2b6e..10944888cffd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -197,6 +197,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
}
/*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
+ goto repost;
+
+ /*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
@@ -213,24 +220,18 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
- if (wc->slid != priv->local_lid ||
- wc->src_qp != priv->qp->qp_num) {
- skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb->protocol = ((struct ipoib_header *) skb->data)->proto;
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_ENCAP_LEN);
- dev->last_rx = jiffies;
- ++priv->stats.rx_packets;
- priv->stats.rx_bytes += skb->len;
+ dev->last_rx = jiffies;
+ ++priv->stats.rx_packets;
+ priv->stats.rx_bytes += skb->len;
- skb->dev = dev;
- /* XXX get correct PACKET_ type here */
- skb->pkt_type = PACKET_HOST;
- netif_receive_skb(skb);
- } else {
- ipoib_dbg_data(priv, "dropping loopback packet\n");
- dev_kfree_skb_any(skb);
- }
+ skb->dev = dev;
+ /* XXX get correct PACKET_ type here */
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index aecbb9083f0c..fe604c8d2996 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_ISER
tristate "iSCSI Extensions for RDMA (iSER)"
- depends on INFINIBAND && SCSI && INET
+ depends on SCSI && INET
select SCSI_ISCSI_ATTRS
---help---
Support for the iSCSI Extensions for RDMA (iSER) Protocol
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index dd221eda3ea6..5db314380271 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -134,19 +134,9 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
{
struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
- struct scsi_cmnd *sc = ctask->sc;
iser_ctask->command_sent = 0;
iser_ctask->iser_conn = iser_conn;
-
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- BUG_ON(ctask->total_length == 0);
-
- debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
- ctask->itt, ctask->total_length, ctask->imm_count,
- ctask->unsol_count);
- }
-
iser_ctask_rdma_init(iser_ctask);
}
@@ -219,6 +209,14 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
int error = 0;
+ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+ ctask->itt, scsi_bufflen(ctask->sc),
+ ctask->imm_count, ctask->unsol_count);
+ }
+
debug_scsi("ctask deq [cid %d itt 0x%x]\n",
conn->id, ctask->itt);
@@ -375,7 +373,8 @@ static struct iscsi_transport iscsi_iser_transport;
static struct iscsi_cls_session *
iscsi_iser_session_create(struct iscsi_transport *iscsit,
struct scsi_transport_template *scsit,
- uint32_t initial_cmdsn, uint32_t *hostno)
+ uint16_t cmds_max, uint16_t qdepth,
+ uint32_t initial_cmdsn, uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -386,7 +385,13 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
struct iscsi_iser_cmd_task *iser_ctask;
struct iser_desc *desc;
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+ ISCSI_MAX_CMD_PER_LUN,
sizeof(struct iscsi_iser_cmd_task),
sizeof(struct iser_desc),
initial_cmdsn, &hn);
@@ -545,7 +550,7 @@ iscsi_iser_ep_disconnect(__u64 ep_handle)
static struct scsi_host_template iscsi_iser_sht = {
.name = "iSCSI Initiator over iSER, v." DRV_VER,
.queuecommand = iscsi_queuecommand,
- .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
.max_sectors = 1024,
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
@@ -574,8 +579,12 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME |
- ISCSI_TPGT,
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
.host_template = &iscsi_iser_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_lun = ISCSI_ISER_MAX_LUN,
@@ -592,6 +601,9 @@ static struct iscsi_transport iscsi_iser_transport = {
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats,
@@ -625,7 +637,7 @@ static int __init iser_init(void)
ig.desc_cache = kmem_cache_create("iser_descriptors",
sizeof (struct iser_desc),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (ig.desc_cache == NULL)
return -ENOMEM;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 8960196ffb0f..1ee867b1b341 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -98,7 +98,7 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
ISER_MAX_RX_MISC_PDUS + \
ISER_MAX_TX_MISC_PDUS)
@@ -110,7 +110,7 @@
#define ISER_INFLIGHT_DATAOUTS 8
-#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
@@ -310,8 +310,6 @@ int iser_conn_init(struct iser_conn **ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
-void iser_conn_release(struct iser_conn *ib_conn);
-
void iser_rcv_completion(struct iser_desc *desc,
unsigned long dto_xfer_len);
@@ -329,9 +327,6 @@ void iser_reg_single(struct iser_device *device,
struct iser_regd_buf *regd_buf,
enum dma_data_direction direction);
-int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
- enum iser_data_dir cmd_dir);
-
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
enum iser_data_dir cmd_dir);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 3651072f6c1f..9ea5b9aaba7c 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -351,18 +351,12 @@ int iser_send_command(struct iscsi_conn *conn,
else
data_buf = &iser_ctask->data[ISER_DIR_OUT];
- if (sc->use_sg) { /* using a scatter list */
- data_buf->buf = sc->request_buffer;
- data_buf->size = sc->use_sg;
- } else if (sc->request_bufflen) {
- /* using a single buffer - convert it into one entry SG */
- sg_init_one(&data_buf->sg_single,
- sc->request_buffer, sc->request_bufflen);
- data_buf->buf = &data_buf->sg_single;
- data_buf->size = 1;
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+ data_buf->size = scsi_sg_count(sc);
}
- data_buf->data_len = sc->request_bufflen;
+ data_buf->data_len = scsi_bufflen(sc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(ctask, edtl);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index fc9f1fd0ae54..36cdf77ae92a 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -103,8 +103,8 @@ void iser_reg_single(struct iser_device *device,
/**
* iser_start_rdma_unaligned_sg
*/
-int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
- enum iser_data_dir cmd_dir)
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
{
int dma_nents;
struct ib_device *dev;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 3702e2375553..d42ec0156eec 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -155,8 +155,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
- params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
+ params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
params.cache = 0;
params.flush_function = NULL;
params.access = (IB_ACCESS_LOCAL_WRITE |
@@ -311,6 +311,29 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
}
/**
+ * Frees all conn objects and deallocs conn descriptor
+ */
+static void iser_conn_release(struct iser_conn *ib_conn)
+{
+ struct iser_device *device = ib_conn->device;
+
+ BUG_ON(ib_conn->state != ISER_CONN_DOWN);
+
+ mutex_lock(&ig.connlist_mutex);
+ list_del(&ib_conn->conn_list);
+ mutex_unlock(&ig.connlist_mutex);
+
+ iser_free_ib_conn_res(ib_conn);
+ ib_conn->device = NULL;
+ /* on EVENT_ADDR_ERROR there's no device yet for this conn */
+ if (device != NULL)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+ kfree(ib_conn);
+}
+
+/**
* triggers start of the disconnect procedures and wait for them to be done
*/
void iser_conn_terminate(struct iser_conn *ib_conn)
@@ -550,30 +573,6 @@ connect_failure:
}
/**
- * Frees all conn objects and deallocs conn descriptor
- */
-void iser_conn_release(struct iser_conn *ib_conn)
-{
- struct iser_device *device = ib_conn->device;
-
- BUG_ON(ib_conn->state != ISER_CONN_DOWN);
-
- mutex_lock(&ig.connlist_mutex);
- list_del(&ib_conn->conn_list);
- mutex_unlock(&ig.connlist_mutex);
-
- iser_free_ib_conn_res(ib_conn);
- ib_conn->device = NULL;
- /* on EVENT_ADDR_ERROR there's no device yet for this conn */
- if (device != NULL)
- iser_device_try_release(device);
- if (ib_conn->iser_conn)
- ib_conn->iser_conn->ib_conn = NULL;
- kfree(ib_conn);
-}
-
-
-/**
* iser_reg_page_vec - Register physical memory
*
* returns: 0 on success, errno code on failure
diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
index 8fe3be4e9910..3432dce29520 100644
--- a/drivers/infiniband/ulp/srp/Kconfig
+++ b/drivers/infiniband/ulp/srp/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_SRP
tristate "InfiniBand SCSI RDMA Protocol"
- depends on INFINIBAND && SCSI
+ depends on SCSI
---help---
Support for the SCSI RDMA Protocol over InfiniBand. This
allows you to access storage devices that speak SRP over
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 39bf057fbc43..f01ca182f226 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -455,10 +455,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target,
struct srp_request *req)
{
- struct scatterlist *scat;
- int nents;
-
- if (!scmnd->request_buffer ||
+ if (!scsi_sglist(scmnd) ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
@@ -468,20 +465,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
req->fmr = NULL;
}
- /*
- * This handling of non-SG commands can be killed when the
- * SCSI midlayer no longer generates non-SG commands.
- */
- if (likely(scmnd->use_sg)) {
- nents = scmnd->use_sg;
- scat = scmnd->request_buffer;
- } else {
- nents = 1;
- scat = &req->fake_sg;
- }
-
- ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents,
- scmnd->sc_data_direction);
+ ib_dma_unmap_sg(target->srp_host->dev->dev, scsi_sglist(scmnd),
+ scsi_sg_count(scmnd), scmnd->sc_data_direction);
}
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
@@ -595,6 +580,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int ret;
struct srp_device *dev = target->srp_host->dev;
struct ib_device *ibdev = dev->dev;
+ struct scatterlist *sg;
if (!dev->fmr_pool)
return -ENODEV;
@@ -604,16 +590,16 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -EINVAL;
len = page_cnt = 0;
- for (i = 0; i < sg_cnt; ++i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+ scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
- if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) {
+ if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
if (i > 0)
return -EINVAL;
else
++page_cnt;
}
- if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) &
+ if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
~dev->fmr_page_mask) {
if (i < sg_cnt - 1)
return -EINVAL;
@@ -633,12 +619,12 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
return -ENOMEM;
page_cnt = 0;
- for (i = 0; i < sg_cnt; ++i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+ scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
for (j = 0; j < dma_len; j += dev->fmr_page_size)
dma_pages[page_cnt++] =
- (ib_sg_dma_address(ibdev, &scat[i]) &
+ (ib_sg_dma_address(ibdev, sg) &
dev->fmr_page_mask) + j;
}
@@ -673,7 +659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
struct srp_device *dev;
struct ib_device *ibdev;
- if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
+ if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
return sizeof (struct srp_cmd);
if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
@@ -683,18 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return -EINVAL;
}
- /*
- * This handling of non-SG commands can be killed when the
- * SCSI midlayer no longer generates non-SG commands.
- */
- if (likely(scmnd->use_sg)) {
- nents = scmnd->use_sg;
- scat = scmnd->request_buffer;
- } else {
- nents = 1;
- scat = &req->fake_sg;
- sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
- }
+ nents = scsi_sg_count(scmnd);
+ scat = scsi_sglist(scmnd);
dev = target->srp_host->dev;
ibdev = dev->dev;
@@ -724,6 +700,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
* descriptor.
*/
struct srp_indirect_buf *buf = (void *) cmd->add_data;
+ struct scatterlist *sg;
u32 datalen = 0;
int i;
@@ -732,11 +709,11 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
sizeof (struct srp_indirect_buf) +
count * sizeof (struct srp_direct_buf);
- for (i = 0; i < count; ++i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]);
+ scsi_for_each_sg(scmnd, sg, count, i) {
+ unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
buf->desc_list[i].va =
- cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i]));
+ cpu_to_be64(ib_sg_dma_address(ibdev, sg));
buf->desc_list[i].key =
cpu_to_be32(dev->mr->rkey);
buf->desc_list[i].len = cpu_to_be32(dma_len);
@@ -802,9 +779,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
}
if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
- scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
- scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
if (!req->tsk_mgmt) {
scmnd->host_scribble = (void *) -1L;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 1d53c7bc368f..e3573e7038c4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -106,11 +106,6 @@ struct srp_request {
struct srp_iu *cmd;
struct srp_iu *tsk_mgmt;
struct ib_pool_fmr *fmr;
- /*
- * Fake scatterlist used when scmnd->use_sg==0. Can be killed
- * when the SCSI midlayer no longer generates non-SG commands.
- */
- struct scatterlist fake_sg;
struct completion done;
short index;
u8 cmd_done;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index bd686a2a517d..20896d5e5f0e 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -445,6 +445,7 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
static int gameport_thread(void *nothing)
{
+ set_freezable();
do {
gameport_handle_event();
wait_event_interruptible(gameport_wait,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75b4d2a83dd9..5fe755586623 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -471,37 +471,16 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
return 0;
}
-static struct list_head *list_get_nth_element(struct list_head *list, loff_t *pos)
-{
- struct list_head *node;
- loff_t i = 0;
-
- list_for_each(node, list)
- if (i++ == *pos)
- return node;
-
- return NULL;
-}
-
-static struct list_head *list_get_next_element(struct list_head *list, struct list_head *element, loff_t *pos)
-{
- if (element->next == list)
- return NULL;
-
- ++(*pos);
- return element->next;
-}
-
static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
{
/* acquire lock here ... Yes, we do need locking, I knowi, I know... */
- return list_get_nth_element(&input_dev_list, pos);
+ return seq_list_start(&input_dev_list, *pos);
}
static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- return list_get_next_element(&input_dev_list, v, pos);
+ return seq_list_next(v, &input_dev_list, pos);
}
static void input_devices_seq_stop(struct seq_file *seq, void *v)
@@ -592,13 +571,13 @@ static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
{
/* acquire lock here ... Yes, we do need locking, I knowi, I know... */
seq->private = (void *)(unsigned long)*pos;
- return list_get_nth_element(&input_handler_list, pos);
+ return seq_list_start(&input_handler_list, *pos);
}
static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
seq->private = (void *)(unsigned long)(*pos + 1);
- return list_get_next_element(&input_handler_list, v, pos);
+ return seq_list_next(v, &input_handler_list, pos);
}
static void input_handlers_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 12db72d83ea0..e2abe18e575d 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -275,4 +275,11 @@ config JOYSTICK_XPAD_FF
---help---
Say Y here if you want to take advantage of xbox 360 rumble features.
+config JOYSTICK_XPAD_LEDS
+ bool "LED Support for Xbox360 controller 'BigX' LED"
+ depends on LEDS_CLASS && JOYSTICK_XPAD
+ ---help---
+ This option enables support for the LED which surrounds the Big X on
+ XBox 360 controller.
+
endif
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 244089c52650..28080395899c 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -191,13 +191,18 @@ struct usb_xpad {
unsigned char *idata; /* input data */
dma_addr_t idata_dma;
-#ifdef CONFIG_JOYSTICK_XPAD_FF
+#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct urb *irq_out; /* urb for interrupt out report */
unsigned char *odata; /* output data */
dma_addr_t odata_dma;
+ struct mutex odata_mutex;
+#endif
+
+#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
+ struct xpad_led *led;
#endif
- char phys[65]; /* physical device path */
+ char phys[64]; /* physical device path */
int dpad_mapping; /* map d-pad to buttons or to axes */
int xtype; /* type of xbox device */
@@ -349,7 +354,7 @@ exit:
__FUNCTION__, retval);
}
-#ifdef CONFIG_JOYSTICK_XPAD_FF
+#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
static void xpad_irq_out(struct urb *urb)
{
int retval;
@@ -376,29 +381,7 @@ exit:
__FUNCTION__, retval);
}
-static int xpad_play_effect(struct input_dev *dev, void *data,
- struct ff_effect *effect)
-{
- struct usb_xpad *xpad = input_get_drvdata(dev);
-
- if (effect->type == FF_RUMBLE) {
- __u16 strong = effect->u.rumble.strong_magnitude;
- __u16 weak = effect->u.rumble.weak_magnitude;
- xpad->odata[0] = 0x00;
- xpad->odata[1] = 0x08;
- xpad->odata[2] = 0x00;
- xpad->odata[3] = strong / 256;
- xpad->odata[4] = weak / 256;
- xpad->odata[5] = 0x00;
- xpad->odata[6] = 0x00;
- xpad->odata[7] = 0x00;
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
- }
-
- return 0;
-}
-
-static int xpad_init_ff(struct usb_interface *intf, struct usb_xpad *xpad)
+static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
{
struct usb_endpoint_descriptor *ep_irq_out;
int error = -ENOMEM;
@@ -411,6 +394,8 @@ static int xpad_init_ff(struct usb_interface *intf, struct usb_xpad *xpad)
if (!xpad->odata)
goto fail1;
+ mutex_init(&xpad->odata_mutex);
+
xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->irq_out)
goto fail2;
@@ -423,25 +408,19 @@ static int xpad_init_ff(struct usb_interface *intf, struct usb_xpad *xpad)
xpad->irq_out->transfer_dma = xpad->odata_dma;
xpad->irq_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
-
- error = input_ff_create_memless(xpad->dev, NULL, xpad_play_effect);
- if (error)
- goto fail2;
-
return 0;
fail2: usb_buffer_free(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
fail1: return error;
}
-static void xpad_stop_ff(struct usb_xpad *xpad)
+static void xpad_stop_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360)
usb_kill_urb(xpad->irq_out);
}
-static void xpad_deinit_ff(struct usb_xpad *xpad)
+static void xpad_deinit_output(struct usb_xpad *xpad)
{
if (xpad->xtype == XTYPE_XBOX360) {
usb_free_urb(xpad->irq_out);
@@ -449,13 +428,130 @@ static void xpad_deinit_ff(struct usb_xpad *xpad)
xpad->odata, xpad->odata_dma);
}
}
+#else
+static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; }
+static void xpad_deinit_output(struct usb_xpad *xpad) {}
+static void xpad_stop_output(struct usb_xpad *xpad) {}
+#endif
+
+#ifdef CONFIG_JOYSTICK_XPAD_FF
+static int xpad_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct usb_xpad *xpad = input_get_drvdata(dev);
+ if (effect->type == FF_RUMBLE) {
+ __u16 strong = effect->u.rumble.strong_magnitude;
+ __u16 weak = effect->u.rumble.weak_magnitude;
+ xpad->odata[0] = 0x00;
+ xpad->odata[1] = 0x08;
+ xpad->odata[2] = 0x00;
+ xpad->odata[3] = strong / 256;
+ xpad->odata[4] = weak / 256;
+ xpad->odata[5] = 0x00;
+ xpad->odata[6] = 0x00;
+ xpad->odata[7] = 0x00;
+ usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ }
+
+ return 0;
+}
+
+static int xpad_init_ff(struct usb_xpad *xpad)
+{
+ input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
+
+ return input_ff_create_memless(xpad->dev, NULL, xpad_play_effect);
+}
+
+#else
+static int xpad_init_ff(struct usb_xpad *xpad) { return 0; }
+#endif
+
+#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
+#include <linux/leds.h>
+
+struct xpad_led {
+ char name[16];
+ struct led_classdev led_cdev;
+ struct usb_xpad *xpad;
+};
+
+static void xpad_send_led_command(struct usb_xpad *xpad, int command)
+{
+ if (command >= 0 && command < 14) {
+ mutex_lock(&xpad->odata_mutex);
+ xpad->odata[0] = 0x01;
+ xpad->odata[1] = 0x03;
+ xpad->odata[2] = command;
+ usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ mutex_unlock(&xpad->odata_mutex);
+ }
+}
+
+static void xpad_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct xpad_led *xpad_led = container_of(led_cdev,
+ struct xpad_led, led_cdev);
+
+ xpad_send_led_command(xpad_led->xpad, value);
+}
+
+static int xpad_led_probe(struct usb_xpad *xpad)
+{
+ static atomic_t led_seq = ATOMIC_INIT(0);
+ long led_no;
+ struct xpad_led *led;
+ struct led_classdev *led_cdev;
+ int error;
+
+ if (xpad->xtype != XTYPE_XBOX360)
+ return 0;
+
+ xpad->led = led = kzalloc(sizeof(struct xpad_led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led_no = (long)atomic_inc_return(&led_seq) - 1;
+
+ snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
+ led->xpad = xpad;
+
+ led_cdev = &led->led_cdev;
+ led_cdev->name = led->name;
+ led_cdev->brightness_set = xpad_led_set;
+
+ error = led_classdev_register(&xpad->udev->dev, led_cdev);
+ if (error) {
+ kfree(led);
+ xpad->led = NULL;
+ return error;
+ }
+
+ /*
+ * Light up the segment corresponding to controller number
+ */
+ xpad_send_led_command(xpad, (led_no % 4) + 2);
+
+ return 0;
+}
+
+static void xpad_led_disconnect(struct usb_xpad *xpad)
+{
+ struct xpad_led *xpad_led = xpad->led;
+
+ if (xpad_led) {
+ led_classdev_unregister(&xpad_led->led_cdev);
+ kfree(xpad_led->name);
+ }
+}
#else
-static int xpad_init_ff(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; }
-static void xpad_stop_ff(struct usb_xpad *xpad) { }
-static void xpad_deinit_ff(struct usb_xpad *xpad) { }
+static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
+static void xpad_led_disconnect(struct usb_xpad *xpad) { }
#endif
+
static int xpad_open(struct input_dev *dev)
{
struct usb_xpad *xpad = input_get_drvdata(dev);
@@ -472,7 +568,7 @@ static void xpad_close(struct input_dev *dev)
struct usb_xpad *xpad = input_get_drvdata(dev);
usb_kill_urb(xpad->irq_in);
- xpad_stop_ff(xpad);
+ xpad_stop_output(xpad);
}
static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
@@ -564,10 +660,18 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
for (i = 0; xpad_abs_pad[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
- error = xpad_init_ff(intf, xpad);
+ error = xpad_init_output(intf, xpad);
if (error)
goto fail2;
+ error = xpad_init_ff(xpad);
+ if (error)
+ goto fail3;
+
+ error = xpad_led_probe(xpad);
+ if (error)
+ goto fail3;
+
ep_irq_in = &intf->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(xpad->irq_in, udev,
usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
@@ -578,12 +682,13 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
error = input_register_device(xpad->dev);
if (error)
- goto fail3;
+ goto fail4;
usb_set_intfdata(intf, xpad);
return 0;
- fail3: usb_free_urb(xpad->irq_in);
+ fail4: usb_free_urb(xpad->irq_in);
+ fail3: xpad_deinit_output(xpad);
fail2: usb_buffer_free(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
fail1: input_free_device(input_dev);
kfree(xpad);
@@ -597,8 +702,9 @@ static void xpad_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (xpad) {
+ xpad_led_disconnect(xpad);
input_unregister_device(xpad->dev);
- xpad_deinit_ff(xpad);
+ xpad_deinit_output(xpad);
usb_free_urb(xpad->irq_in);
usb_buffer_free(xpad->udev, XPAD_PKT_LEN,
xpad->idata, xpad->idata_dma);
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 31989dcd922c..906bf5e8de89 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -24,7 +24,12 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("PC Speaker beeper driver");
MODULE_LICENSE("GPL");
-static DEFINE_SPINLOCK(i8253_beep_lock);
+#ifdef CONFIG_X86
+/* Use the global PIT lock ! */
+#include <asm/i8253.h>
+#else
+static DEFINE_SPINLOCK(i8253_lock);
+#endif
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
@@ -43,7 +48,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
if (value > 20 && value < 32767)
count = PIT_TICK_RATE / value;
- spin_lock_irqsave(&i8253_beep_lock, flags);
+ spin_lock_irqsave(&i8253_lock, flags);
if (count) {
/* enable counter 2 */
@@ -58,7 +63,7 @@ static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int c
outb(inb_p(0x61) & 0xFC, 0x61);
}
- spin_unlock_irqrestore(&i8253_beep_lock, flags);
+ spin_unlock_irqrestore(&i8253_lock, flags);
return 0;
}
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index e3215267db11..2bea1b2c631c 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -155,6 +155,8 @@ struct atp {
int xy_acc[ATP_XSENSORS + ATP_YSENSORS];
int overflowwarn; /* overflow warning printed? */
int datalen; /* size of an USB urb transfer */
+ int idlecount; /* number of empty packets */
+ struct work_struct work;
};
#define dbg_dump(msg, tab) \
@@ -208,6 +210,55 @@ static inline int atp_is_geyser_3(struct atp *dev)
(productId == GEYSER4_JIS_PRODUCT_ID);
}
+/*
+ * By default Geyser 3 device sends standard USB HID mouse
+ * packets (Report ID 2). This code changes device mode, so it
+ * sends raw sensor reports (Report ID 5).
+ */
+static int atp_geyser3_init(struct usb_device *udev)
+{
+ char data[8];
+ int size;
+
+ size = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ ATP_GEYSER3_MODE_READ_REQUEST_ID,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ATP_GEYSER3_MODE_REQUEST_VALUE,
+ ATP_GEYSER3_MODE_REQUEST_INDEX, &data, 8, 5000);
+
+ if (size != 8) {
+ err("Could not do mode read request from device"
+ " (Geyser 3 mode)");
+ return -EIO;
+ }
+
+ /* Apply the mode switch */
+ data[0] = ATP_GEYSER3_MODE_VENDOR_VALUE;
+
+ size = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ ATP_GEYSER3_MODE_WRITE_REQUEST_ID,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ ATP_GEYSER3_MODE_REQUEST_VALUE,
+ ATP_GEYSER3_MODE_REQUEST_INDEX, &data, 8, 5000);
+
+ if (size != 8) {
+ err("Could not do mode write request to device"
+ " (Geyser 3 mode)");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Reinitialise the device if it's a geyser 3 */
+static void atp_reinit(struct work_struct *work)
+{
+ struct atp *dev = container_of(work, struct atp, work);
+ struct usb_device *udev = dev->udev;
+
+ dev->idlecount = 0;
+ atp_geyser3_init(udev);
+}
+
static int atp_calculate_abs(int *xy_sensors, int nb_sensors, int fact,
int *z, int *fingers)
{
@@ -439,8 +490,8 @@ static void atp_complete(struct urb* urb)
}
dev->x_old = x;
dev->y_old = y;
- }
- else if (!x && !y) {
+
+ } else if (!x && !y) {
dev->x_old = dev->y_old = -1;
input_report_key(dev->input, BTN_TOUCH, 0);
@@ -449,11 +500,21 @@ static void atp_complete(struct urb* urb)
/* reset the accumulator on release */
memset(dev->xy_acc, 0, sizeof(dev->xy_acc));
- }
- input_report_key(dev->input, BTN_LEFT,
- !!dev->data[dev->datalen - 1]);
+ /* Geyser 3 will continue to send packets continually after
+ the first touch unless reinitialised. Do so if it's been
+ idle for a while in order to avoid waking the kernel up
+ several hundred times a second */
+ if (atp_is_geyser_3(dev)) {
+ dev->idlecount++;
+ if (dev->idlecount == 10) {
+ dev->valid = 0;
+ schedule_work(&dev->work);
+ }
+ }
+ }
+ input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1);
input_sync(dev->input);
exit:
@@ -480,6 +541,7 @@ static void atp_close(struct input_dev *input)
struct atp *dev = input_get_drvdata(input);
usb_kill_urb(dev->urb);
+ cancel_work_sync(&dev->work);
dev->open = 0;
}
@@ -528,40 +590,10 @@ static int atp_probe(struct usb_interface *iface, const struct usb_device_id *id
dev->datalen = 81;
if (atp_is_geyser_3(dev)) {
- /*
- * By default Geyser 3 device sends standard USB HID mouse
- * packets (Report ID 2). This code changes device mode, so it
- * sends raw sensor reports (Report ID 5).
- */
- char data[8];
- int size;
-
- size = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- ATP_GEYSER3_MODE_READ_REQUEST_ID,
- USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ATP_GEYSER3_MODE_REQUEST_VALUE,
- ATP_GEYSER3_MODE_REQUEST_INDEX, &data, 8, 5000);
-
- if (size != 8) {
- err("Could not do mode read request from device"
- " (Geyser 3 mode)");
+ /* switch to raw sensor mode */
+ if (atp_geyser3_init(udev))
goto err_free_devs;
- }
-
- /* Apply the mode switch */
- data[0] = ATP_GEYSER3_MODE_VENDOR_VALUE;
-
- size = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- ATP_GEYSER3_MODE_WRITE_REQUEST_ID,
- USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- ATP_GEYSER3_MODE_REQUEST_VALUE,
- ATP_GEYSER3_MODE_REQUEST_INDEX, &data, 8, 5000);
- if (size != 8) {
- err("Could not do mode write request to device"
- " (Geyser 3 mode)");
- goto err_free_devs;
- }
printk("appletouch Geyser 3 inited.\n");
}
@@ -636,6 +668,8 @@ static int atp_probe(struct usb_interface *iface, const struct usb_device_id *id
/* save our data pointer in this interface device */
usb_set_intfdata(iface, dev);
+ INIT_WORK(&dev->work, atp_reinit);
+
return 0;
err_free_buffer:
@@ -669,14 +703,17 @@ static void atp_disconnect(struct usb_interface *iface)
static int atp_suspend(struct usb_interface *iface, pm_message_t message)
{
struct atp *dev = usb_get_intfdata(iface);
+
usb_kill_urb(dev->urb);
dev->valid = 0;
+
return 0;
}
static int atp_resume(struct usb_interface *iface)
{
struct atp *dev = usb_get_intfdata(iface);
+
if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC))
return -EIO;
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 1740cadd9594..91109b49fde1 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -109,7 +109,7 @@ static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse)
{
struct lifebook_data *priv = psmouse->private;
struct input_dev *dev1 = psmouse->dev;
- struct input_dev *dev2 = priv->dev2;
+ struct input_dev *dev2 = priv ? priv->dev2 : NULL;
unsigned char *packet = psmouse->packet;
int relative_packet = packet[0] & 0x08;
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 27a68835b5ba..1317bdd8cc7c 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -119,7 +119,6 @@ static struct psmouse_attribute psmouse_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
.mode = _mode, \
- .owner = THIS_MODULE, \
}, \
.show = psmouse_attr_show_helper, \
.store = psmouse_attr_set_helper, \
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 5a7b49c35539..b10ffae7c39b 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -117,15 +117,13 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
if (ret)
return ret;
- kmi = kmalloc(sizeof(struct amba_kmi_port), GFP_KERNEL);
- io = kmalloc(sizeof(struct serio), GFP_KERNEL);
+ kmi = kzalloc(sizeof(struct amba_kmi_port), GFP_KERNEL);
+ io = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!kmi || !io) {
ret = -ENOMEM;
goto out;
}
- memset(kmi, 0, sizeof(struct amba_kmi_port));
- memset(io, 0, sizeof(struct serio));
io->id.type = SERIO_8042;
io->write = amba_kmi_write;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 4fca1e7f2678..702a526cf45b 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -366,6 +366,7 @@ static void i8042_pnp_exit(void)
static int __init i8042_pnp_init(void)
{
char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
+ int pnp_data_busted = 0;
int err;
if (i8042_nopnp) {
@@ -413,27 +414,48 @@ static int __init i8042_pnp_init(void)
#endif
if (((i8042_pnp_data_reg & ~0xf) == (i8042_data_reg & ~0xf) &&
- i8042_pnp_data_reg != i8042_data_reg) || !i8042_pnp_data_reg) {
- printk(KERN_WARNING "PNP: PS/2 controller has invalid data port %#x; using default %#x\n",
+ i8042_pnp_data_reg != i8042_data_reg) ||
+ !i8042_pnp_data_reg) {
+ printk(KERN_WARNING
+ "PNP: PS/2 controller has invalid data port %#x; "
+ "using default %#x\n",
i8042_pnp_data_reg, i8042_data_reg);
i8042_pnp_data_reg = i8042_data_reg;
+ pnp_data_busted = 1;
}
if (((i8042_pnp_command_reg & ~0xf) == (i8042_command_reg & ~0xf) &&
- i8042_pnp_command_reg != i8042_command_reg) || !i8042_pnp_command_reg) {
- printk(KERN_WARNING "PNP: PS/2 controller has invalid command port %#x; using default %#x\n",
+ i8042_pnp_command_reg != i8042_command_reg) ||
+ !i8042_pnp_command_reg) {
+ printk(KERN_WARNING
+ "PNP: PS/2 controller has invalid command port %#x; "
+ "using default %#x\n",
i8042_pnp_command_reg, i8042_command_reg);
i8042_pnp_command_reg = i8042_command_reg;
+ pnp_data_busted = 1;
}
if (!i8042_nokbd && !i8042_pnp_kbd_irq) {
- printk(KERN_WARNING "PNP: PS/2 controller doesn't have KBD irq; using default %d\n", i8042_kbd_irq);
+ printk(KERN_WARNING
+ "PNP: PS/2 controller doesn't have KBD irq; "
+ "using default %d\n", i8042_kbd_irq);
i8042_pnp_kbd_irq = i8042_kbd_irq;
+ pnp_data_busted = 1;
}
if (!i8042_noaux && !i8042_pnp_aux_irq) {
- printk(KERN_WARNING "PNP: PS/2 controller doesn't have AUX irq; using default %d\n", i8042_aux_irq);
- i8042_pnp_aux_irq = i8042_aux_irq;
+ if (!pnp_data_busted && i8042_pnp_kbd_irq) {
+ printk(KERN_WARNING
+ "PNP: PS/2 appears to have AUX port disabled, "
+ "if this is incorrect please boot with "
+ "i8042.nopnp\n");
+ i8042_noaux = 1;
+ } else {
+ printk(KERN_WARNING
+ "PNP: PS/2 controller doesn't have AUX irq; "
+ "using default %d\n", i8042_aux_irq);
+ i8042_pnp_aux_irq = i8042_aux_irq;
+ }
}
i8042_data_reg = i8042_pnp_data_reg;
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index ea5e3c6ddb62..1b404f9e3bff 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -140,15 +140,13 @@ static int __devinit pcips2_probe(struct pci_dev *dev, const struct pci_device_i
if (ret)
goto disable;
- ps2if = kmalloc(sizeof(struct pcips2_data), GFP_KERNEL);
- serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+ ps2if = kzalloc(sizeof(struct pcips2_data), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ps2if || !serio) {
ret = -ENOMEM;
goto release;
}
- memset(ps2if, 0, sizeof(struct pcips2_data));
- memset(serio, 0, sizeof(struct serio));
serio->id.type = SERIO_8042;
serio->write = pcips2_write;
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index d31ece8f68e9..2ad88780a170 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -234,15 +234,13 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
struct serio *serio;
int ret;
- ps2if = kmalloc(sizeof(struct ps2if), GFP_KERNEL);
- serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
+ ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ps2if || !serio) {
ret = -ENOMEM;
goto free;
}
- memset(ps2if, 0, sizeof(struct ps2if));
- memset(serio, 0, sizeof(struct serio));
serio->id.type = SERIO_8042;
serio->write = ps2_write;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a8f3bc1dff22..372ca4931194 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -384,6 +384,7 @@ static struct serio *serio_get_pending_child(struct serio *parent)
static int serio_thread(void *nothing)
{
+ set_freezable();
do {
serio_handle_event();
wait_event_interruptible(serio_wait,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 69371779806a..f929fcdbae2e 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -54,6 +54,19 @@ config TOUCHSCREEN_CORGI
To compile this driver as a module, choose M here: the
module will be called corgi_ts.
+config TOUCHSCREEN_FUJITSU
+ tristate "Fujitsu serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have the Fujitsu touchscreen (such as one
+ installed in Lifebook P series laptop) connected to your
+ system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fujitsu-ts.
+
config TOUCHSCREEN_GUNZE
tristate "Gunze AHL-51S touchscreen"
select SERIO
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 2f86d6ad06d3..5de8933c4993 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_CORGI) += corgi_ts.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o
obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 1c9069cd3bae..96581d08774f 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -95,7 +95,7 @@ struct ads7846 {
u16 dummy; /* for the pwrdown read */
struct ts_event tc;
- struct spi_transfer xfer[10];
+ struct spi_transfer xfer[18];
struct spi_message msg[5];
struct spi_message *last_msg;
int msg_idx;
@@ -107,6 +107,8 @@ struct ads7846 {
u16 debounce_tol;
u16 debounce_rep;
+ u16 penirq_recheck_delay_usecs;
+
spinlock_t lock;
struct hrtimer timer;
unsigned pendown:1; /* P: lock */
@@ -553,6 +555,15 @@ static void ads7846_rx(void *ads)
return;
}
+ /* Maybe check the pendown state before reporting. This discards
+ * false readings when the pen is lifted.
+ */
+ if (ts->penirq_recheck_delay_usecs) {
+ udelay(ts->penirq_recheck_delay_usecs);
+ if (!ts->get_pendown_state())
+ Rt = 0;
+ }
+
/* NOTE: We can't rely on the pressure to determine the pen down
* state, even this controller has a pressure sensor. The pressure
* value can fluctuate for quite a while after lifting the pen and
@@ -896,6 +907,10 @@ static int __devinit ads7846_probe(struct spi_device *spi)
ts->filter = ads7846_no_filter;
ts->get_pendown_state = pdata->get_pendown_state;
+ if (pdata->penirq_recheck_delay_usecs)
+ ts->penirq_recheck_delay_usecs =
+ pdata->penirq_recheck_delay_usecs;
+
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", spi->dev.bus_id);
input_dev->name = "ADS784x Touchscreen";
@@ -936,6 +951,24 @@ static int __devinit ads7846_probe(struct spi_device *spi)
x->len = 2;
spi_message_add_tail(x, m);
+ /* the first sample after switching drivers can be low quality;
+ * optionally discard it, using a second one after the signals
+ * have had enough time to stabilize.
+ */
+ if (pdata->settle_delay_usecs) {
+ x->delay_usecs = pdata->settle_delay_usecs;
+
+ x++;
+ x->tx_buf = &ts->read_y;
+ x->len = 1;
+ spi_message_add_tail(x, m);
+
+ x++;
+ x->rx_buf = &ts->tc.y;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+ }
+
m->complete = ads7846_rx_val;
m->context = ts;
@@ -954,6 +987,21 @@ static int __devinit ads7846_probe(struct spi_device *spi)
x->len = 2;
spi_message_add_tail(x, m);
+ /* ... maybe discard first sample ... */
+ if (pdata->settle_delay_usecs) {
+ x->delay_usecs = pdata->settle_delay_usecs;
+
+ x++;
+ x->tx_buf = &ts->read_x;
+ x->len = 1;
+ spi_message_add_tail(x, m);
+
+ x++;
+ x->rx_buf = &ts->tc.x;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+ }
+
m->complete = ads7846_rx_val;
m->context = ts;
@@ -973,6 +1021,21 @@ static int __devinit ads7846_probe(struct spi_device *spi)
x->len = 2;
spi_message_add_tail(x, m);
+ /* ... maybe discard first sample ... */
+ if (pdata->settle_delay_usecs) {
+ x->delay_usecs = pdata->settle_delay_usecs;
+
+ x++;
+ x->tx_buf = &ts->read_z1;
+ x->len = 1;
+ spi_message_add_tail(x, m);
+
+ x++;
+ x->rx_buf = &ts->tc.z1;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+ }
+
m->complete = ads7846_rx_val;
m->context = ts;
@@ -990,6 +1053,21 @@ static int __devinit ads7846_probe(struct spi_device *spi)
x->len = 2;
spi_message_add_tail(x, m);
+ /* ... maybe discard first sample ... */
+ if (pdata->settle_delay_usecs) {
+ x->delay_usecs = pdata->settle_delay_usecs;
+
+ x++;
+ x->tx_buf = &ts->read_z2;
+ x->len = 1;
+ spi_message_add_tail(x, m);
+
+ x++;
+ x->rx_buf = &ts->tc.z2;
+ x->len = 2;
+ spi_message_add_tail(x, m);
+ }
+
m->complete = ads7846_rx_val;
m->context = ts;
}
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c
new file mode 100644
index 000000000000..daf7a4afc935
--- /dev/null
+++ b/drivers/input/touchscreen/fujitsu_ts.c
@@ -0,0 +1,189 @@
+/*
+ * Fujitsu serial touchscreen driver
+ *
+ * Copyright (c) Dmitry Torokhov <dtor@mail.ru>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/init.h>
+
+#define DRIVER_DESC "Fujitsu serial touchscreen driver"
+
+MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+#define FUJITSU_LENGTH 5
+
+/*
+ * Per-touchscreen data.
+ */
+struct fujitsu {
+ struct input_dev *dev;
+ struct serio *serio;
+ int idx;
+ unsigned char data[FUJITSU_LENGTH];
+ char phys[32];
+};
+
+/*
+ * Decode serial data (5 bytes per packet)
+ * First byte
+ * 1 C 0 0 R S S S
+ * Where C is 1 while in calibration mode (which we don't use)
+ * R is 1 when no coordinate corection was done.
+ * S are button state
+ */
+static irqreturn_t fujitsu_interrupt(struct serio *serio,
+ unsigned char data, unsigned int flags)
+{
+ struct fujitsu *fujitsu = serio_get_drvdata(serio);
+ struct input_dev *dev = fujitsu->dev;
+
+ if (fujitsu->idx == 0) {
+ /* resync skip until start of frame */
+ if ((data & 0xf0) != 0x80)
+ return IRQ_HANDLED;
+ } else {
+ /* resync skip garbage */
+ if (data & 0x80) {
+ fujitsu->idx = 0;
+ return IRQ_HANDLED;
+ }
+ }
+
+ fujitsu->data[fujitsu->idx++] = data;
+ if (fujitsu->idx == FUJITSU_LENGTH) {
+ input_report_abs(dev, ABS_X,
+ (fujitsu->data[2] << 7) | fujitsu->data[1]);
+ input_report_abs(dev, ABS_Y,
+ (fujitsu->data[4] << 7) | fujitsu->data[3]);
+ input_report_key(dev, BTN_TOUCH,
+ (fujitsu->data[0] & 0x03) != 2);
+ input_sync(dev);
+ fujitsu->idx = 0;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * fujitsu_disconnect() is the opposite of fujitsu_connect()
+ */
+static void fujitsu_disconnect(struct serio *serio)
+{
+ struct fujitsu *fujitsu = serio_get_drvdata(serio);
+
+ input_get_device(fujitsu->dev);
+ input_unregister_device(fujitsu->dev);
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ input_put_device(fujitsu->dev);
+ kfree(fujitsu);
+}
+
+/*
+ * fujitsu_connect() is the routine that is called when someone adds a
+ * new serio device that supports the Fujitsu protocol and registers it
+ * as input device.
+ */
+static int fujitsu_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct fujitsu *fujitsu;
+ struct input_dev *input_dev;
+ int err;
+
+ fujitsu = kzalloc(sizeof(struct fujitsu), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!fujitsu || !input_dev) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ fujitsu->serio = serio;
+ fujitsu->dev = input_dev;
+ snprintf(fujitsu->phys, sizeof(fujitsu->phys),
+ "%s/input0", serio->phys);
+
+ input_dev->name = "Fujitsu Serial Touchscreen";
+ input_dev->phys = fujitsu->phys;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = SERIO_FUJITSU;
+ input_dev->id.product = 0;
+ input_dev->id.version = 0x0100;
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+ input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
+
+ input_set_abs_params(input_dev, ABS_X, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 4096, 0, 0);
+ serio_set_drvdata(serio, fujitsu);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto fail2;
+
+ err = input_register_device(fujitsu->dev);
+ if (err)
+ goto fail3;
+
+ return 0;
+
+ fail3:
+ serio_close(serio);
+ fail2:
+ serio_set_drvdata(serio, NULL);
+ fail1:
+ input_free_device(input_dev);
+ kfree(fujitsu);
+ return err;
+}
+
+/*
+ * The serio driver structure.
+ */
+static struct serio_device_id fujitsu_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_FUJITSU,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, fujitsu_serio_ids);
+
+static struct serio_driver fujitsu_drv = {
+ .driver = {
+ .name = "fujitsu_ts",
+ },
+ .description = DRIVER_DESC,
+ .id_table = fujitsu_serio_ids,
+ .interrupt = fujitsu_interrupt,
+ .connect = fujitsu_connect,
+ .disconnect = fujitsu_disconnect,
+};
+
+static int __init fujitsu_init(void)
+{
+ return serio_register_driver(&fujitsu_drv);
+}
+
+static void __exit fujitsu_exit(void)
+{
+ serio_unregister_driver(&fujitsu_drv);
+}
+
+module_init(fujitsu_init);
+module_exit(fujitsu_exit);
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f0cbcdb008ed..36f944019158 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -292,6 +292,7 @@ static int ucb1400_ts_thread(void *_ucb)
sched_setscheduler(tsk, SCHED_FIFO, &param);
+ set_freezable();
while (!kthread_should_stop()) {
unsigned int x, y, p;
long timeout;
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 3e088c42b222..66f946aa30b3 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -2,12 +2,10 @@
# ISDN device configuration
#
-menu "ISDN subsystem"
- depends on !S390
-
-config ISDN
+menuconfig ISDN
tristate "ISDN support"
depends on NET
+ depends on !S390
---help---
ISDN ("Integrated Services Digital Networks", called RNIS in France)
is a special type of fully digital telephone service; it's mostly
@@ -21,11 +19,9 @@ config ISDN
Select this option if you want your kernel to support ISDN.
+if ISDN
-menu "Old ISDN4Linux"
- depends on NET && ISDN
-
-config ISDN_I4L
+menuconfig ISDN_I4L
tristate "Old ISDN4Linux (deprecated)"
---help---
This driver allows you to use an ISDN adapter for networking
@@ -47,23 +43,20 @@ if ISDN_I4L
source "drivers/isdn/i4l/Kconfig"
endif
-endmenu
-
-comment "CAPI subsystem"
- depends on NET && ISDN
-
-config ISDN_CAPI
- tristate "CAPI2.0 support"
- depends on ISDN
+menuconfig ISDN_CAPI
+ tristate "CAPI 2.0 subsystem"
help
This provides the CAPI (Common ISDN Application Programming
Interface, a standard making it easy for programs to access ISDN
hardware, see <http://www.capi.org/>. This is needed for AVM's set
of active ISDN controllers like B1, T1, M1.
+if ISDN_CAPI
+
source "drivers/isdn/capi/Kconfig"
source "drivers/isdn/hardware/Kconfig"
-endmenu
+endif # ISDN_CAPI
+endif # ISDN
diff --git a/drivers/isdn/act2000/Kconfig b/drivers/isdn/act2000/Kconfig
index 78e6ad8d57c5..3fc1a5434ef7 100644
--- a/drivers/isdn/act2000/Kconfig
+++ b/drivers/isdn/act2000/Kconfig
@@ -3,7 +3,7 @@
#
config ISDN_DRV_ACT2000
tristate "IBM Active 2000 support"
- depends on ISDN_I4L && ISA
+ depends on ISA
help
Say Y here if you have an IBM Active 2000 ISDN card. In order to use
this card, additional firmware is necessary, which has to be loaded
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c92f9d764fce..e1afd60924fb 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -3,7 +3,6 @@
#
config ISDN_DRV_AVMB1_VERBOSE_REASON
bool "Verbose reason code reporting"
- depends on ISDN_CAPI
default y
help
If you say Y here, the CAPI drivers will give verbose reasons for
@@ -12,7 +11,6 @@ config ISDN_DRV_AVMB1_VERBOSE_REASON
config CAPI_TRACE
bool "CAPI trace support"
- depends on ISDN_CAPI
default y
help
If you say Y here, the kernelcapi driver can make verbose traces
@@ -23,7 +21,7 @@ config CAPI_TRACE
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
- depends on ISDN_CAPI && EXPERIMENTAL
+ depends on EXPERIMENTAL
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
@@ -33,7 +31,6 @@ config ISDN_CAPI_MIDDLEWARE
config ISDN_CAPI_CAPI20
tristate "CAPI2.0 /dev/capi support"
- depends on ISDN_CAPI
help
This option will provide the CAPI 2.0 interface to userspace
applications via /dev/capi20. Applications should use the
@@ -56,7 +53,7 @@ config ISDN_CAPI_CAPIFS
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
- depends on ISDN_CAPI && ISDN_I4L
+ depends on ISDN_I4L
help
This option provides the glue code to hook up CAPI driven cards to
the legacy isdn4linux link layer. If you have a card which is
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 81661b8bd3a8..f449daef3eed 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -549,7 +549,7 @@ static int handle_minor_send(struct capiminor *mp)
capimsg_setu8 (skb->data, 5, CAPI_REQ);
capimsg_setu16(skb->data, 6, mp->msgid++);
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
- capimsg_setu32(skb->data, 12, (u32) skb->data); /* Data32 */
+ capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
capimsg_setu16(skb->data, 18, datahandle);
capimsg_setu16(skb->data, 20, 0); /* Flags */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 3ed34f7a1c4f..9f73bc2727c2 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -258,7 +258,7 @@ static void recv_handler(struct work_struct *work)
if ((!ap) || (ap->release_in_progress))
return;
- down(&ap->recv_sem);
+ mutex_lock(&ap->recv_mtx);
while ((skb = skb_dequeue(&ap->recv_queue))) {
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
ap->nrecvdatapkt++;
@@ -267,7 +267,7 @@ static void recv_handler(struct work_struct *work)
ap->recv_message(ap, skb);
}
- up(&ap->recv_sem);
+ mutex_unlock(&ap->recv_mtx);
}
void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
@@ -547,7 +547,7 @@ u16 capi20_register(struct capi20_appl *ap)
ap->nsentctlpkt = 0;
ap->nsentdatapkt = 0;
ap->callback = NULL;
- init_MUTEX(&ap->recv_sem);
+ mutex_init(&ap->recv_mtx);
skb_queue_head_init(&ap->recv_queue);
INIT_WORK(&ap->recv_work, recv_handler);
ap->release_in_progress = 0;
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 31f4fd8b8b0a..845a797b0030 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -243,36 +243,15 @@ create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
// ---------------------------------------------------------------------------
-
-static __inline__ struct capi_driver *capi_driver_get_idx(loff_t pos)
-{
- struct capi_driver *drv = NULL;
- struct list_head *l;
- loff_t i;
-
- i = 0;
- list_for_each(l, &capi_drivers) {
- drv = list_entry(l, struct capi_driver, list);
- if (i++ == pos)
- return drv;
- }
- return NULL;
-}
-
static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
{
- struct capi_driver *drv;
read_lock(&capi_drivers_list_lock);
- drv = capi_driver_get_idx(*pos);
- return drv;
+ return seq_list_start(&capi_drivers, *pos);
}
static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct capi_driver *drv = (struct capi_driver *)v;
- ++*pos;
- if (drv->list.next == &capi_drivers) return NULL;
- return list_entry(drv->list.next, struct capi_driver, list);
+ return seq_list_next(v, &capi_drivers, pos);
}
static void capi_driver_stop(struct seq_file *seq, void *v)
@@ -282,7 +261,8 @@ static void capi_driver_stop(struct seq_file *seq, void *v)
static int capi_driver_show(struct seq_file *seq, void *v)
{
- struct capi_driver *drv = (struct capi_driver *)v;
+ struct capi_driver *drv = list_entry(v, struct capi_driver, list);
+
seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
return 0;
}
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index bcbb6502a773..0017e50c6948 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,9 +1,5 @@
-menu "Siemens Gigaset"
- depends on ISDN_I4L
-
-config ISDN_DRV_GIGASET
+menuconfig ISDN_DRV_GIGASET
tristate "Siemens Gigaset support (isdn)"
- depends on ISDN_I4L
select CRC_CCITT
select BITREVERSE
help
@@ -55,6 +51,4 @@ config GIGASET_UNDOCREQ
features like configuration mode of M105, say yes. If you
care about your device, say no.
-endif
-
-endmenu
+endif # ISDN_DRV_GIGASET != n
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 139f19797713..30d028d24955 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -2,7 +2,6 @@
# ISDN hardware drivers
#
comment "CAPI hardware drivers"
- depends on NET && ISDN && ISDN_CAPI
source "drivers/isdn/hardware/avm/Kconfig"
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 29a32a8830c0..5dbcbe3a54a6 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -2,23 +2,22 @@
# ISDN AVM drivers
#
-menu "Active AVM cards"
- depends on NET && ISDN && ISDN_CAPI!=n
-
-config CAPI_AVM
- bool "Support AVM cards"
+menuconfig CAPI_AVM
+ bool "Active AVM cards"
help
Enable support for AVM active ISDN cards.
+if CAPI_AVM
+
config ISDN_DRV_AVMB1_B1ISA
tristate "AVM B1 ISA support"
- depends on CAPI_AVM && ISDN_CAPI && ISA
+ depends on ISA
help
Enable support for the ISA version of the AVM B1 card.
config ISDN_DRV_AVMB1_B1PCI
tristate "AVM B1 PCI support"
- depends on CAPI_AVM && ISDN_CAPI && PCI
+ depends on PCI
help
Enable support for the PCI version of the AVM B1 card.
@@ -30,14 +29,13 @@ config ISDN_DRV_AVMB1_B1PCIV4
config ISDN_DRV_AVMB1_T1ISA
tristate "AVM T1/T1-B ISA support"
- depends on CAPI_AVM && ISDN_CAPI && ISA
+ depends on ISA
help
Enable support for the AVM T1 T1B card.
Note: This is a PRI card and handle 30 B-channels.
config ISDN_DRV_AVMB1_B1PCMCIA
tristate "AVM B1/M1/M2 PCMCIA support"
- depends on CAPI_AVM && ISDN_CAPI
help
Enable support for the PCMCIA version of the AVM B1 card.
@@ -50,17 +48,16 @@ config ISDN_DRV_AVMB1_AVM_CS
config ISDN_DRV_AVMB1_T1PCI
tristate "AVM T1/T1-B PCI support"
- depends on CAPI_AVM && ISDN_CAPI && PCI
+ depends on PCI
help
Enable support for the AVM T1 T1B card.
Note: This is a PRI card and handle 30 B-channels.
config ISDN_DRV_AVMB1_C4
tristate "AVM C4/C2 support"
- depends on CAPI_AVM && ISDN_CAPI && PCI
+ depends on PCI
help
Enable support for the AVM C4/C2 PCI cards.
These cards handle 4/2 BRI ISDN lines (8/4 channels).
-endmenu
-
+endif # CAPI_AVM
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
index 01d4afd9d843..6082b6a5ced3 100644
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ b/drivers/isdn/hardware/eicon/Kconfig
@@ -2,52 +2,50 @@
# ISDN DIVAS Eicon driver
#
-menu "Active Eicon DIVA Server cards"
- depends on NET && ISDN && ISDN_CAPI!=n
-
-config CAPI_EICON
- bool "Support Eicon cards"
+menuconfig CAPI_EICON
+ bool "Active Eicon DIVA Server cards"
help
Enable support for Eicon Networks active ISDN cards.
+if CAPI_EICON
+
config ISDN_DIVAS
tristate "Support Eicon DIVA Server cards"
- depends on CAPI_EICON && PROC_FS && PCI
+ depends on PROC_FS && PCI
help
Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
In order to use this card, additional firmware is necessary, which
has to be downloaded into the card using the divactrl utility.
+if ISDN_DIVAS
+
config ISDN_DIVAS_BRIPCI
bool "DIVA Server BRI/PCI support"
- depends on ISDN_DIVAS
help
Enable support for DIVA Server BRI-PCI.
config ISDN_DIVAS_PRIPCI
bool "DIVA Server PRI/PCI support"
- depends on ISDN_DIVAS
help
Enable support for DIVA Server PRI-PCI.
config ISDN_DIVAS_DIVACAPI
tristate "DIVA CAPI2.0 interface support"
- depends on ISDN_DIVAS && ISDN_CAPI
help
You need this to provide the CAPI interface
for DIVA Server cards.
config ISDN_DIVAS_USERIDI
tristate "DIVA User-IDI interface support"
- depends on ISDN_DIVAS
help
Enable support for user-mode IDI interface.
config ISDN_DIVAS_MAINT
tristate "DIVA Maint driver support"
- depends on ISDN_DIVAS && m
+ depends on m
help
Enable Divas Maintenance driver.
-endmenu
+endif # ISDN_DIVAS
+endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index 4cbc68cf4dba..db87d5105422 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -106,6 +106,7 @@ static void um_new_card(DESCRIPTOR * d)
} else {
DBG_ERR(("could not create user mode idi card %d",
adapter_nr));
+ diva_os_free(0, card);
}
}
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index 12d91fb9f8cb..a3b945ac3256 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -1,6 +1,5 @@
menu "Passive cards"
- depends on ISDN_I4L
config ISDN_DRV_HISAX
tristate "HiSax SiemensChipSet driver support"
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index 871310d56a6e..3d1bdc8431ad 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -255,54 +255,38 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return (0);
}
-static struct pci_dev *dev_a4t __devinitdata = NULL;
+static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
+ struct IsdnCardState *cs,
+ u_int *found,
+ u_int *pci_memaddr)
+{
+ u16 sub_sys;
+ u16 sub_vendor;
+
+ sub_vendor = dev_a4t->subsystem_vendor;
+ sub_sys = dev_a4t->subsystem_device;
+ if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
+ if (pci_enable_device(dev_a4t))
+ return (0); /* end loop & function */
+ *found = 1;
+ *pci_memaddr = pci_resource_start(dev_a4t, 0);
+ cs->irq = dev_a4t->irq;
+ return (1); /* end loop */
+ }
-int __devinit
-setup_bkm_a4t(struct IsdnCard *card)
+ return (-1); /* continue looping */
+}
+
+static int __devinit a4t_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs,
+ u_int pci_memaddr)
{
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
- u_int pci_memaddr = 0, found = 0;
I20_REGISTER_FILE *pI20_Regs;
-#ifdef CONFIG_PCI
-#endif
-
- strcpy(tmp, bkm_a4t_revision);
- printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ == ISDN_CTYPE_BKM_A4T) {
- cs->subtyp = BKM_A4T;
- } else
- return (0);
-#ifdef CONFIG_PCI
- while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
- PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
- u16 sub_sys;
- u16 sub_vendor;
-
- sub_vendor = dev_a4t->subsystem_vendor;
- sub_sys = dev_a4t->subsystem_device;
- if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
- if (pci_enable_device(dev_a4t))
- return(0);
- found = 1;
- pci_memaddr = pci_resource_start(dev_a4t, 0);
- cs->irq = dev_a4t->irq;
- break;
- }
- }
- if (!found) {
- printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
- return (0);
- }
if (!cs->irq) { /* IRQ range check ?? */
printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]);
return (0);
}
- if (!pci_memaddr) {
- printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
- return (0);
- }
cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096);
/* Check suspecious address */
pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
@@ -317,11 +301,7 @@ setup_bkm_a4t(struct IsdnCard *card)
cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET;
cs->hw.ax.isac_ale = GCS_1;
cs->hw.ax.jade_ale = GCS_3;
-#else
- printk(KERN_WARNING "HiSax: %s: NO_PCI_BIOS\n", CardType[card->typ]);
- printk(KERN_WARNING "HiSax: %s: unable to configure\n", CardType[card->typ]);
- return (0);
-#endif /* CONFIG_PCI */
+
printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n",
CardType[card->typ], cs->hw.ax.base, cs->irq);
@@ -339,5 +319,43 @@ setup_bkm_a4t(struct IsdnCard *card)
ISACVersion(cs, "Telekom A4T:");
/* Jade version */
JadeVersion(cs, "Telekom A4T:");
+
return (1);
}
+
+static struct pci_dev *dev_a4t __devinitdata = NULL;
+
+int __devinit
+setup_bkm_a4t(struct IsdnCard *card)
+{
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+ u_int pci_memaddr = 0, found = 0;
+ int ret;
+
+ strcpy(tmp, bkm_a4t_revision);
+ printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ == ISDN_CTYPE_BKM_A4T) {
+ cs->subtyp = BKM_A4T;
+ } else
+ return (0);
+
+ while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
+ PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
+ ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
+ if (!ret)
+ return (0);
+ if (ret > 0)
+ break;
+ }
+ if (!found) {
+ printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
+ return (0);
+ }
+ if (!pci_memaddr) {
+ printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
+ return (0);
+ }
+
+ return a4t_cs_init(card, cs, pci_memaddr);
+}
diff --git a/drivers/isdn/hisax/bkm_a8.c b/drivers/isdn/hisax/bkm_a8.c
index 340310645346..6339bb443f62 100644
--- a/drivers/isdn/hisax/bkm_a8.c
+++ b/drivers/isdn/hisax/bkm_a8.c
@@ -287,7 +287,6 @@ setup_sct_quadro(struct IsdnCard *card)
#ifdef CONFIG_PCI
struct IsdnCardState *cs = card->cs;
char tmp[64];
- u_char pci_rev_id;
u_int found = 0;
u_int pci_ioaddr1, pci_ioaddr2, pci_ioaddr3, pci_ioaddr4, pci_ioaddr5;
@@ -335,8 +334,7 @@ setup_sct_quadro(struct IsdnCard *card)
}
#ifdef ATTEMPT_PCI_REMAPPING
/* HACK: PLX revision 1 bug: PLX address bit 7 must not be set */
- pci_read_config_byte(dev_a8, PCI_REVISION_ID, &pci_rev_id);
- if ((pci_ioaddr1 & 0x80) && (pci_rev_id == 1)) {
+ if ((pci_ioaddr1 & 0x80) && (dev_a8->revision == 1)) {
printk(KERN_WARNING "HiSax: %s (%s): PLX rev 1, remapping required!\n",
CardType[card->typ],
sct_quadro_subtypes[cs->subtyp]);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 8d53a7fd2671..97097ef3491e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -361,11 +361,11 @@ module_param_array(io1, int, NULL, 0);
int nrcards;
-extern char *l1_revision;
-extern char *l2_revision;
-extern char *l3_revision;
-extern char *lli_revision;
-extern char *tei_revision;
+extern const char *l1_revision;
+extern const char *l2_revision;
+extern const char *l3_revision;
+extern const char *lli_revision;
+extern const char *tei_revision;
char *HiSax_getrev(const char *revision)
{
@@ -847,95 +847,10 @@ static int init_card(struct IsdnCardState *cs)
return 3;
}
-static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
+static int hisax_cs_setup_card(struct IsdnCard *card)
{
- int ret = 0;
- struct IsdnCard *card = cards + cardnr;
- struct IsdnCardState *cs;
-
- cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
- if (!cs) {
- printk(KERN_WARNING
- "HiSax: No memory for IsdnCardState(card %d)\n",
- cardnr + 1);
- goto out;
- }
- card->cs = cs;
- spin_lock_init(&cs->statlock);
- spin_lock_init(&cs->lock);
- cs->chanlimit = 2; /* maximum B-channel number */
- cs->logecho = 0; /* No echo logging */
- cs->cardnr = cardnr;
- cs->debug = L1_DEB_WARN;
- cs->HW_Flags = 0;
- cs->busy_flag = busy_flag;
- cs->irq_flags = I4L_IRQ_FLAG;
-#if TEI_PER_CARD
- if (card->protocol == ISDN_PTYPE_NI1)
- test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#else
- test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
-#endif
- cs->protocol = card->protocol;
-
- if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
- printk(KERN_WARNING
- "HiSax: Card Type %d out of range\n", card->typ);
- goto outf_cs;
- }
- if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
- goto outf_cs;
- }
- if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
- printk(KERN_WARNING
- "HiSax: No memory for status_buf(card %d)\n",
- cardnr + 1);
- goto outf_dlog;
- }
- cs->stlist = NULL;
- cs->status_read = cs->status_buf;
- cs->status_write = cs->status_buf;
- cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
- cs->typ = card->typ;
-#ifdef MODULE
- cs->iif.owner = lockowner;
-#endif
- strcpy(cs->iif.id, id);
- cs->iif.channels = 2;
- cs->iif.maxbufsize = MAX_DATA_SIZE;
- cs->iif.hl_hdrlen = MAX_HEADER_LEN;
- cs->iif.features =
- ISDN_FEATURE_L2_X75I |
- ISDN_FEATURE_L2_HDLC |
- ISDN_FEATURE_L2_HDLC_56K |
- ISDN_FEATURE_L2_TRANS |
- ISDN_FEATURE_L3_TRANS |
-#ifdef CONFIG_HISAX_1TR6
- ISDN_FEATURE_P_1TR6 |
-#endif
-#ifdef CONFIG_HISAX_EURO
- ISDN_FEATURE_P_EURO |
-#endif
-#ifdef CONFIG_HISAX_NI1
- ISDN_FEATURE_P_NI1 |
-#endif
- 0;
+ int ret;
- cs->iif.command = HiSax_command;
- cs->iif.writecmd = NULL;
- cs->iif.writebuf_skb = HiSax_writebuf_skb;
- cs->iif.readstat = HiSax_readstatus;
- register_isdn(&cs->iif);
- cs->myid = cs->iif.channels;
- printk(KERN_INFO
- "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
- (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
- (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
- (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
- (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
- "NONE", cs->iif.id, cs->myid);
switch (card->typ) {
#if CARD_TELES0
case ISDN_CTYPE_16_0:
@@ -1094,13 +1009,115 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
printk(KERN_WARNING
"HiSax: Support for %s Card not selected\n",
CardType[card->typ]);
- ll_unload(cs);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
+ struct IsdnCardState **cs_out, int *busy_flag,
+ struct module *lockowner)
+{
+ struct IsdnCardState *cs;
+
+ *cs_out = NULL;
+
+ cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
+ if (!cs) {
+ printk(KERN_WARNING
+ "HiSax: No memory for IsdnCardState(card %d)\n",
+ cardnr + 1);
+ goto out;
+ }
+ card->cs = cs;
+ spin_lock_init(&cs->statlock);
+ spin_lock_init(&cs->lock);
+ cs->chanlimit = 2; /* maximum B-channel number */
+ cs->logecho = 0; /* No echo logging */
+ cs->cardnr = cardnr;
+ cs->debug = L1_DEB_WARN;
+ cs->HW_Flags = 0;
+ cs->busy_flag = busy_flag;
+ cs->irq_flags = I4L_IRQ_FLAG;
+#if TEI_PER_CARD
+ if (card->protocol == ISDN_PTYPE_NI1)
+ test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
+#else
+ test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
+#endif
+ cs->protocol = card->protocol;
+
+ if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
+ printk(KERN_WARNING
+ "HiSax: Card Type %d out of range\n", card->typ);
goto outf_cs;
}
- if (!ret) {
- ll_unload(cs);
+ if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
+ printk(KERN_WARNING
+ "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
goto outf_cs;
}
+ if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
+ printk(KERN_WARNING
+ "HiSax: No memory for status_buf(card %d)\n",
+ cardnr + 1);
+ goto outf_dlog;
+ }
+ cs->stlist = NULL;
+ cs->status_read = cs->status_buf;
+ cs->status_write = cs->status_buf;
+ cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
+ cs->typ = card->typ;
+#ifdef MODULE
+ cs->iif.owner = lockowner;
+#endif
+ strcpy(cs->iif.id, id);
+ cs->iif.channels = 2;
+ cs->iif.maxbufsize = MAX_DATA_SIZE;
+ cs->iif.hl_hdrlen = MAX_HEADER_LEN;
+ cs->iif.features =
+ ISDN_FEATURE_L2_X75I |
+ ISDN_FEATURE_L2_HDLC |
+ ISDN_FEATURE_L2_HDLC_56K |
+ ISDN_FEATURE_L2_TRANS |
+ ISDN_FEATURE_L3_TRANS |
+#ifdef CONFIG_HISAX_1TR6
+ ISDN_FEATURE_P_1TR6 |
+#endif
+#ifdef CONFIG_HISAX_EURO
+ ISDN_FEATURE_P_EURO |
+#endif
+#ifdef CONFIG_HISAX_NI1
+ ISDN_FEATURE_P_NI1 |
+#endif
+ 0;
+
+ cs->iif.command = HiSax_command;
+ cs->iif.writecmd = NULL;
+ cs->iif.writebuf_skb = HiSax_writebuf_skb;
+ cs->iif.readstat = HiSax_readstatus;
+ register_isdn(&cs->iif);
+ cs->myid = cs->iif.channels;
+
+ *cs_out = cs;
+ return 1; /* success */
+
+outf_dlog:
+ kfree(cs->dlog);
+outf_cs:
+ kfree(cs);
+ card->cs = NULL;
+out:
+ return 0; /* error */
+}
+
+static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ int ret;
+
if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
ll_unload(cs);
@@ -1129,25 +1146,53 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
}
if (ret) {
closecard(cardnr);
- ret = 0;
goto outf_cs;
}
init_tei(cs, cs->protocol);
ret = CallcNewChan(cs);
if (ret) {
closecard(cardnr);
- ret = 0;
goto outf_cs;
}
/* ISAR needs firmware download first */
if (!test_bit(HW_ISAR, &cs->HW_Flags))
ll_run(cs, 0);
- ret = 1;
+ return 1;
+
+outf_cs:
+ kfree(cs);
+ card->cs = NULL;
+ return 0;
+}
+
+static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
+{
+ int ret;
+ struct IsdnCard *card = cards + cardnr;
+ struct IsdnCardState *cs;
+
+ ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner);
+ if (!ret)
+ return 0;
+
+ printk(KERN_INFO
+ "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
+ (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
+ (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
+ (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
+ (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
+ "NONE", cs->iif.id, cs->myid);
+
+ ret = hisax_cs_setup_card(card);
+ if (!ret) {
+ ll_unload(cs);
+ goto outf_cs;
+ }
+
+ ret = hisax_cs_setup(cardnr, card, cs);
goto out;
- outf_dlog:
- kfree(cs->dlog);
outf_cs:
kfree(cs);
card->cs = NULL;
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b45de9d408d1..b73027ff50e8 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,98 +300,72 @@ enpci_interrupt(int intno, void *dev_id)
return IRQ_HANDLED;
}
-
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-/* called by config.c */
-int __devinit
-setup_enternow_pci(struct IsdnCard *card)
+static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
+ struct IsdnCardState *cs)
{
- int bytecnt;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-
-#ifdef CONFIG_PCI
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, enternow_pci_rev);
- printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_ENTERNOW)
+ if (pci_enable_device(dev_netjet))
return(0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
- for ( ;; )
- {
- if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- if (pci_enable_device(dev_netjet))
- return(0);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
- return(0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
- return(0);
- }
- /* checks Sub-Vendor ID because system crashes with Traverse-Card */
- if ((dev_netjet->subsystem_vendor != 0x55) ||
- (dev_netjet->subsystem_device != 0x02)) {
- printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
- printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
- return(0);
- }
- } else {
- printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
- return(0);
- }
-
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
-
- /* Reset an */
- cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- /* 20 ms Pause */
- mdelay(20);
+ cs->irq = dev_netjet->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
+ return(0);
+ }
+ cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+ if (!cs->hw.njet.base) {
+ printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
+ return(0);
+ }
+ /* checks Sub-Vendor ID because system crashes with Traverse-Card */
+ if ((dev_netjet->subsystem_vendor != 0x55) ||
+ (dev_netjet->subsystem_device != 0x02)) {
+ printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
+ printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
+ return(0);
+ }
- cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
- outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
- mdelay(10);
+ return(1);
+}
- cs->hw.njet.auxd = 0x00; // war 0xc0
- cs->hw.njet.dmactrl = 0;
+static void __devinit en_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+ cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
- outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
- outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
- outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
+ /* Reset an */
+ cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
+ outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
+ /* 20 ms Pause */
+ mdelay(20);
- break;
- }
-#else
+ cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
+ outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
+ mdelay(10);
- printk(KERN_WARNING "enter:now PCI: NO_PCI_BIOS\n");
- printk(KERN_WARNING "enter:now PCI: unable to config Formula-n enter:now ISDN PCI ab\n");
- return (0);
+ cs->hw.njet.auxd = 0x00; // war 0xc0
+ cs->hw.njet.dmactrl = 0;
-#endif /* CONFIG_PCI */
+ outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
+ outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
+ outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
+}
- bytecnt = 256;
+static int __devinit en_cs_init_rest(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ const int bytecnt = 256;
printk(KERN_INFO
"enter:now PCI: PCI card configured at 0x%lx IRQ %d\n",
cs->hw.njet.base, cs->irq);
if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) {
printk(KERN_WARNING
- "HiSax: %s config port %lx-%lx already in use\n",
- CardType[card->typ],
- cs->hw.njet.base,
- cs->hw.njet.base + bytecnt);
+ "HiSax: enter:now config port %lx-%lx already in use\n",
+ cs->hw.njet.base,
+ cs->hw.njet.base + bytecnt);
return (0);
}
+
setup_Amd7930(cs);
cs->hw.njet.last_is0 = 0;
/* macro rByteAMD */
@@ -407,5 +381,44 @@ setup_enternow_pci(struct IsdnCard *card)
cs->irq_func = &enpci_interrupt;
cs->irq_flags |= IRQF_SHARED;
- return (1);
+ return (1);
+}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+/* called by config.c */
+int __devinit
+setup_enternow_pci(struct IsdnCard *card)
+{
+ int ret;
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+
+ strcpy(tmp, enternow_pci_rev);
+ printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ != ISDN_CTYPE_ENTERNOW)
+ return(0);
+ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+ for ( ;; )
+ {
+ if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+ PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
+ ret = en_pci_probe(dev_netjet, cs);
+ if (!ret)
+ return(0);
+ } else {
+ printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
+ return(0);
+ }
+
+ en_cs_init(card, cs);
+ break;
+ }
+
+ return en_cs_init_rest(card, cs);
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8a48a3ce0a55..077080aca799 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -6,7 +6,7 @@
* based on existing driver for CCD hfc ISA cards
* Copyright by Werner Cornelius <werner@isdn4linux.de>
* by Karsten Keil <keil@isdn4linux.de>
- *
+ *
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
@@ -67,8 +67,6 @@ static const PCI_ENTRY id_list[] =
};
-#ifdef CONFIG_PCI
-
/******************************************/
/* free hardware resources used by driver */
/******************************************/
@@ -237,7 +235,7 @@ static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
if (fifo_state)
cs->hw.hfcpci.fifo_en |= fifo_state;
Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}
+}
/***************************************/
/* clear the desired B-channel tx fifo */
@@ -263,7 +261,7 @@ static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
if (fifo_state)
cs->hw.hfcpci.fifo_en |= fifo_state;
Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
-}
+}
/*********************************************/
/* read a complete B-frame out of the buffer */
@@ -511,7 +509,6 @@ main_rec_hfcpci(struct BCState *bcs)
test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
if (count && receive)
goto Begin;
- return;
}
/**************************/
@@ -582,7 +579,6 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
dev_kfree_skb_any(cs->tx_skb);
cs->tx_skb = NULL;
- return;
}
/**************************/
@@ -729,7 +725,6 @@ hfcpci_fill_fifo(struct BCState *bcs)
dev_kfree_skb_any(bcs->tx_skb);
bcs->tx_skb = NULL;
test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
- return;
}
/**********************************************/
@@ -924,7 +919,6 @@ receive_emsg(struct IsdnCardState *cs)
test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
if (count && receive)
goto Begin;
- return;
} /* receive_emsg */
/*********************/
@@ -1350,13 +1344,13 @@ mode_hfcpci(struct BCState *bcs, int mode, int bc)
cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
}
if (fifo2) {
- cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
+ cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
cs->hw.hfcpci.ctmt &= ~2;
cs->hw.hfcpci.conn &= ~0x18;
} else {
- cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
+ cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
cs->hw.hfcpci.ctmt &= ~1;
@@ -1642,8 +1636,6 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
/* this variable is used as card index when more than one cards are present */
static struct pci_dev *dev_hfcpci __devinitdata = NULL;
-#endif /* CONFIG_PCI */
-
int __devinit
setup_hfcpci(struct IsdnCard *card)
{
@@ -1656,96 +1648,99 @@ setup_hfcpci(struct IsdnCard *card)
#ifdef __BIG_ENDIAN
#error "not running on big endian machines now"
#endif
+
strcpy(tmp, hfcpci_revision);
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
-#ifdef CONFIG_PCI
+
cs->hw.hfcpci.int_s1 = 0;
cs->dc.hfcpci.ph_state = 0;
cs->hw.hfcpci.fifo = 255;
- if (cs->typ == ISDN_CTYPE_HFC_PCI) {
- i = 0;
- while (id_list[i].vendor_id) {
- tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
- id_list[i].device_id,
- dev_hfcpci);
- i++;
- if (tmp_hfcpci) {
- if (pci_enable_device(tmp_hfcpci))
- continue;
- pci_set_master(tmp_hfcpci);
- if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
- continue;
- else
- break;
- }
- }
-
+ if (cs->typ != ISDN_CTYPE_HFC_PCI)
+ return(0);
+
+ i = 0;
+ while (id_list[i].vendor_id) {
+ tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
+ id_list[i].device_id,
+ dev_hfcpci);
+ i++;
if (tmp_hfcpci) {
- i--;
- dev_hfcpci = tmp_hfcpci; /* old device */
- cs->hw.hfcpci.dev = dev_hfcpci;
- cs->irq = dev_hfcpci->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return (0);
- }
- cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
- printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
- } else {
- printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
- return (0);
- }
- if (!cs->hw.hfcpci.pci_io) {
- printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return (0);
- }
- /* Allocate memory for FIFOS */
- /* Because the HFC-PCI needs a 32K physical alignment, we */
- /* need to allocate the double mem and align the address */
- if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
- printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
- return 0;
+ if (pci_enable_device(tmp_hfcpci))
+ continue;
+ pci_set_master(tmp_hfcpci);
+ if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
+ continue;
+ else
+ break;
}
- cs->hw.hfcpci.fifos = (void *)
- (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
- pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
- cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
- printk(KERN_INFO
- "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
- cs->hw.hfcpci.pci_io,
- cs->hw.hfcpci.fifos,
- (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
- cs->irq, HZ);
- spin_lock_irqsave(&cs->lock, flags);
- pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
- cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
- cs->hw.hfcpci.int_m1 = 0;
- Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
- Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
- /* At this point the needed PCI config is done */
- /* fifos are still not enabled */
- INIT_WORK(&cs->tqueue, hfcpci_bh);
- cs->setstack_d = setstack_hfcpci;
- cs->BC_Send_Data = &hfcpci_send_data;
- cs->readisac = NULL;
- cs->writeisac = NULL;
- cs->readisacfifo = NULL;
- cs->writeisacfifo = NULL;
- cs->BC_Read_Reg = NULL;
- cs->BC_Write_Reg = NULL;
- cs->irq_func = &hfcpci_interrupt;
- cs->irq_flags |= IRQF_SHARED;
- cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
- cs->hw.hfcpci.timer.data = (long) cs;
- init_timer(&cs->hw.hfcpci.timer);
- cs->cardmsg = &hfcpci_card_msg;
- cs->auxcmd = &hfcpci_auxcmd;
- spin_unlock_irqrestore(&cs->lock, flags);
- return (1);
- } else
- return (0); /* no valid card type */
-#else
- printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n");
- return (0);
-#endif /* CONFIG_PCI */
+ }
+
+ if (!tmp_hfcpci) {
+ printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
+ return (0);
+ }
+
+ i--;
+ dev_hfcpci = tmp_hfcpci; /* old device */
+ cs->hw.hfcpci.dev = dev_hfcpci;
+ cs->irq = dev_hfcpci->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
+ return (0);
+ }
+ cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
+ printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
+
+ if (!cs->hw.hfcpci.pci_io) {
+ printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
+ return (0);
+ }
+ /* Allocate memory for FIFOS */
+ /* Because the HFC-PCI needs a 32K physical alignment, we */
+ /* need to allocate the double mem and align the address */
+ if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
+ printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
+ return 0;
+ }
+ cs->hw.hfcpci.fifos = (void *)
+ (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
+ pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
+ cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
+ printk(KERN_INFO
+ "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
+ cs->hw.hfcpci.pci_io,
+ cs->hw.hfcpci.fifos,
+ (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
+ cs->irq, HZ);
+
+ spin_lock_irqsave(&cs->lock, flags);
+
+ pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
+ cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
+ cs->hw.hfcpci.int_m1 = 0;
+ Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
+ Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
+ /* At this point the needed PCI config is done */
+ /* fifos are still not enabled */
+
+ INIT_WORK(&cs->tqueue, hfcpci_bh);
+ cs->setstack_d = setstack_hfcpci;
+ cs->BC_Send_Data = &hfcpci_send_data;
+ cs->readisac = NULL;
+ cs->writeisac = NULL;
+ cs->readisacfifo = NULL;
+ cs->writeisacfifo = NULL;
+ cs->BC_Read_Reg = NULL;
+ cs->BC_Write_Reg = NULL;
+ cs->irq_func = &hfcpci_interrupt;
+ cs->irq_flags |= IRQF_SHARED;
+ cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
+ cs->hw.hfcpci.timer.data = (long) cs;
+ init_timer(&cs->hw.hfcpci.timer);
+ cs->cardmsg = &hfcpci_card_msg;
+ cs->auxcmd = &hfcpci_auxcmd;
+
+ spin_unlock_irqrestore(&cs->lock, flags);
+
+ return (1);
}
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index c09ffb135330..fa2db87667c8 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,107 +148,87 @@ NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return(0);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-int __devinit
-setup_netjet_s(struct IsdnCard *card)
+static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
+ struct IsdnCardState *cs)
{
- int bytecnt,cfg;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
+ int cfg;
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, NETjet_S_revision);
- printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NETJET_S)
+ if (pci_enable_device(dev_netjet))
return(0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+ pci_set_master(dev_netjet);
+ cs->irq = dev_netjet->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
+ return(0);
+ }
+ cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+ if (!cs->hw.njet.base) {
+ printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
+ return(0);
+ }
+ /* the TJ300 and TJ320 must be detected, the IRQ handling is different
+ * unfortunatly the chips use the same device ID, but the TJ320 has
+ * the bit20 in status PCI cfg register set
+ */
+ pci_read_config_dword(dev_netjet, 0x04, &cfg);
+ if (cfg & 0x00100000)
+ cs->subtyp = 1; /* TJ320 */
+ else
+ cs->subtyp = 0; /* TJ300 */
+ /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
+ if ((dev_netjet->subsystem_vendor == 0x55) &&
+ (dev_netjet->subsystem_device == 0x02)) {
+ printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
+ printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
+ return(0);
+ }
+ /* end new code */
-#ifdef CONFIG_PCI
+ return(1);
+}
- for ( ;; )
- {
- if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- if (pci_enable_device(dev_netjet))
- return(0);
- pci_set_master(dev_netjet);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
- return(0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
- return(0);
- }
- /* the TJ300 and TJ320 must be detected, the IRQ handling is different
- * unfortunatly the chips use the same device ID, but the TJ320 has
- * the bit20 in status PCI cfg register set
- */
- pci_read_config_dword(dev_netjet, 0x04, &cfg);
- if (cfg & 0x00100000)
- cs->subtyp = 1; /* TJ320 */
- else
- cs->subtyp = 0; /* TJ300 */
- /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
- if ((dev_netjet->subsystem_vendor == 0x55) &&
- (dev_netjet->subsystem_device == 0x02)) {
- printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
- printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
- return(0);
- }
- /* end new code */
- } else {
- printk(KERN_WARNING "NETjet-S: No PCI card found\n");
- return(0);
- }
+static int __devinit njs_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
+ cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+ cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
+ cs->hw.njet.auxd = 0xC0;
+ cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
+ byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
+ byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
+ byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
- {
- case 0 :
- break;
+ switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
+ {
+ case 0 :
+ return 1; /* end loop */
- case 3 :
- printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
- continue;
+ case 3 :
+ printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
+ return -1; /* continue looping */
- default :
- printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
- return 0;
- }
- break;
+ default :
+ printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
+ return 0; /* end loop & function */
}
-#else
-
- printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
- printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
- return (0);
-
-#endif /* CONFIG_PCI */
+ return 1; /* end loop */
+}
- bytecnt = 256;
+static int __devinit njs_cs_init_rest(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ const int bytecnt = 256;
printk(KERN_INFO
"NETjet-S: %s card configured at %#lx IRQ %d\n",
@@ -273,5 +253,47 @@ setup_netjet_s(struct IsdnCard *card)
cs->irq_func = &netjet_s_interrupt;
cs->irq_flags |= IRQF_SHARED;
ISACVersion(cs, "NETjet-S:");
+
return (1);
}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+int __devinit
+setup_netjet_s(struct IsdnCard *card)
+{
+ int ret;
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+ strcpy(tmp, NETjet_S_revision);
+ printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ != ISDN_CTYPE_NETJET_S)
+ return(0);
+ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+ for ( ;; )
+ {
+ if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+ PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
+ ret = njs_pci_probe(dev_netjet, cs);
+ if (!ret)
+ return(0);
+ } else {
+ printk(KERN_WARNING "NETjet-S: No PCI card found\n");
+ return(0);
+ }
+
+ ret = njs_cs_init(card, cs);
+ if (!ret)
+ return(0);
+ if (ret > 0)
+ break;
+ /* otherwise, ret < 0, continue looping */
+ }
+
+ return njs_cs_init_rest(card, cs);
+}
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 8202cf34ecae..f017d3816b1d 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,93 +128,69 @@ NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return(0);
}
-static struct pci_dev *dev_netjet __devinitdata = NULL;
-
-int __devinit
-setup_netjet_u(struct IsdnCard *card)
+static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
+ struct IsdnCardState *cs)
{
- int bytecnt;
- struct IsdnCardState *cs = card->cs;
- char tmp[64];
-#ifdef CONFIG_PCI
-#endif
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
- strcpy(tmp, NETjet_U_revision);
- printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
- if (cs->typ != ISDN_CTYPE_NETJET_U)
+ if (pci_enable_device(dev_netjet))
return(0);
- test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
-
-#ifdef CONFIG_PCI
+ pci_set_master(dev_netjet);
+ cs->irq = dev_netjet->irq;
+ if (!cs->irq) {
+ printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
+ return(0);
+ }
+ cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
+ if (!cs->hw.njet.base) {
+ printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
+ return(0);
+ }
- for ( ;; )
- {
- if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
- PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
- if (pci_enable_device(dev_netjet))
- return(0);
- pci_set_master(dev_netjet);
- cs->irq = dev_netjet->irq;
- if (!cs->irq) {
- printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
- return(0);
- }
- cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
- if (!cs->hw.njet.base) {
- printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
- return(0);
- }
- } else {
- printk(KERN_WARNING "NETspider-U: No PCI card found\n");
- return(0);
- }
+ return (1);
+}
- cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
- cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
- mdelay(10);
+static int __devinit nju_cs_init(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
+ cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
+ mdelay(10);
- cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
- byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
- mdelay(10);
+ cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
+ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
+ mdelay(10);
- cs->hw.njet.auxd = 0xC0;
- cs->hw.njet.dmactrl = 0;
+ cs->hw.njet.auxd = 0xC0;
+ cs->hw.njet.dmactrl = 0;
- byteout(cs->hw.njet.auxa, 0);
- byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
- byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
- byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
+ byteout(cs->hw.njet.auxa, 0);
+ byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
+ byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
+ byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
- switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
- {
- case 3 :
- break;
+ switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
+ {
+ case 3 :
+ return 1; /* end loop */
- case 0 :
- printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
- continue;
+ case 0 :
+ printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
+ return -1; /* continue looping */
- default :
- printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
- return 0;
- }
- break;
+ default :
+ printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
+ return 0; /* end loop & function */
}
-#else
-
- printk(KERN_WARNING "NETspider-U: NO_PCI_BIOS\n");
- printk(KERN_WARNING "NETspider-U: unable to config NETspider-U PCI\n");
- return (0);
-
-#endif /* CONFIG_PCI */
+ return 1; /* end loop */
+}
- bytecnt = 256;
+static int __devinit nju_cs_init_rest(struct IsdnCard *card,
+ struct IsdnCardState *cs)
+{
+ const int bytecnt = 256;
printk(KERN_INFO
"NETspider-U: PCI card configured at %#lx IRQ %d\n",
@@ -239,5 +215,48 @@ setup_netjet_u(struct IsdnCard *card)
cs->irq_func = &netjet_u_interrupt;
cs->irq_flags |= IRQF_SHARED;
ICCVersion(cs, "NETspider-U:");
+
return (1);
}
+
+static struct pci_dev *dev_netjet __devinitdata = NULL;
+
+int __devinit
+setup_netjet_u(struct IsdnCard *card)
+{
+ int ret;
+ struct IsdnCardState *cs = card->cs;
+ char tmp[64];
+
+#ifdef __BIG_ENDIAN
+#error "not running on big endian machines now"
+#endif
+
+ strcpy(tmp, NETjet_U_revision);
+ printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
+ if (cs->typ != ISDN_CTYPE_NETJET_U)
+ return(0);
+ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
+
+ for ( ;; )
+ {
+ if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
+ PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
+ ret = nju_pci_probe(dev_netjet, cs);
+ if (!ret)
+ return(0);
+ } else {
+ printk(KERN_WARNING "NETspider-U: No PCI card found\n");
+ return(0);
+ }
+
+ ret = nju_cs_init(card, cs);
+ if (!ret)
+ return (0);
+ if (ret > 0)
+ break;
+ /* ret < 0 == continue looping */
+ }
+
+ return nju_cs_init_rest(card, cs);
+}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 030d1625c5c6..ad06f3cc60fb 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -451,6 +451,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
+ if (cs->hw.sedl.bus == SEDL_BUS_PCI)
+ /* disable all IRQ */
+ byteout(cs->hw.sedl.cfg_reg+ 5, 0);
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
spin_lock_irqsave(&cs->lock, flags);
writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
@@ -468,6 +471,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
+ if (cs->hw.sedl.bus == SEDL_BUS_PCI)
+ /* enable all IRQ */
+ byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
reset_sedlbauer(cs);
if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
clear_pending_isac_ints(cs);
@@ -667,7 +673,7 @@ setup_sedlbauer(struct IsdnCard *card)
byteout(cs->hw.sedl.cfg_reg, 0xff);
byteout(cs->hw.sedl.cfg_reg, 0x00);
byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
- byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
+ byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
mdelay(2);
byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 3ef567b99c74..36778b270c30 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -86,7 +86,6 @@ config ISDN_X25
menu "ISDN feature submodules"
- depends on ISDN
config ISDN_DRV_LOOP
tristate "isdnloop support"
@@ -100,7 +99,6 @@ config ISDN_DRV_LOOP
config ISDN_DIVERSION
tristate "Support isdn diversion services"
- depends on ISDN && ISDN_I4L
help
This option allows you to use some supplementary diversion
services in conjunction with the HiSax driver on an EURO/DSS1
@@ -120,13 +118,11 @@ config ISDN_DIVERSION
endmenu
comment "ISDN4Linux hardware drivers"
- depends on NET && ISDN && ISDN_I4L
source "drivers/isdn/hisax/Kconfig"
menu "Active cards"
- depends on NET && ISDN && ISDN_I4L!=n
source "drivers/isdn/icn/Kconfig"
diff --git a/drivers/isdn/icn/Kconfig b/drivers/isdn/icn/Kconfig
index fcb99f5f0b26..89d15eed765e 100644
--- a/drivers/isdn/icn/Kconfig
+++ b/drivers/isdn/icn/Kconfig
@@ -3,7 +3,7 @@
#
config ISDN_DRV_ICN
tristate "ICN 2B and 4B support"
- depends on ISDN_I4L && ISA
+ depends on ISA
help
This enables support for two kinds of ISDN-cards made by a German
company called ICN. 2B is the standard version for a single ISDN
diff --git a/drivers/isdn/pcbit/Kconfig b/drivers/isdn/pcbit/Kconfig
index 0933881ab0c2..ffba6eca1244 100644
--- a/drivers/isdn/pcbit/Kconfig
+++ b/drivers/isdn/pcbit/Kconfig
@@ -3,7 +3,7 @@
#
config ISDN_DRV_PCBIT
tristate "PCBIT-D support"
- depends on ISDN_I4L && ISA && (BROKEN || X86)
+ depends on ISA && (BROKEN || X86)
help
This enables support for the PCBIT ISDN-card. This card is
manufactured in Portugal by Octal. For running this card,
diff --git a/drivers/isdn/sc/Kconfig b/drivers/isdn/sc/Kconfig
index 5346e33d816c..e6510ca7bf43 100644
--- a/drivers/isdn/sc/Kconfig
+++ b/drivers/isdn/sc/Kconfig
@@ -3,7 +3,7 @@
#
config ISDN_DRV_SC
tristate "Spellcaster support"
- depends on ISDN_I4L && ISA
+ depends on ISA
help
This enables support for the Spellcaster BRI ISDN boards. This
driver currently builds only in a modularized version.
diff --git a/drivers/isdn/sc/card.h b/drivers/isdn/sc/card.h
index 4fbfa825c3a2..5992f63c383e 100644
--- a/drivers/isdn/sc/card.h
+++ b/drivers/isdn/sc/card.h
@@ -125,7 +125,7 @@ int sendmessage(int card, unsigned int procid, unsigned int type,
int receivemessage(int card, RspMessage *rspmsg);
int sc_ioctl(int card, scs_ioctl *data);
int setup_buffers(int card, int c);
-void check_reset(unsigned long data);
+void sc_check_reset(unsigned long data);
void check_phystat(unsigned long data);
#endif /* CARD_H */
diff --git a/drivers/isdn/sc/command.c b/drivers/isdn/sc/command.c
index b7bb7cbcf503..0e4969c2ef95 100644
--- a/drivers/isdn/sc/command.c
+++ b/drivers/isdn/sc/command.c
@@ -344,7 +344,7 @@ int reset(int card)
spin_lock_irqsave(&sc_adapter[card]->lock, flags);
init_timer(&sc_adapter[card]->reset_timer);
- sc_adapter[card]->reset_timer.function = check_reset;
+ sc_adapter[card]->reset_timer.function = sc_check_reset;
sc_adapter[card]->reset_timer.data = card;
sc_adapter[card]->reset_timer.expires = jiffies + CHECKRESET_TIME;
add_timer(&sc_adapter[card]->reset_timer);
diff --git a/drivers/isdn/sc/timer.c b/drivers/isdn/sc/timer.c
index cc1b8861be2a..91fbe0dc28ec 100644
--- a/drivers/isdn/sc/timer.c
+++ b/drivers/isdn/sc/timer.c
@@ -43,7 +43,7 @@ static void setup_ports(int card)
* Then, check to see if the signate has been set. Next, set the
* signature to a known value and issue a startproc if needed.
*/
-void check_reset(unsigned long data)
+void sc_check_reset(unsigned long data)
{
unsigned long flags;
unsigned long sig;
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index e8e37d826478..6cecc396e040 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -1,12 +1,17 @@
#
# KVM configuration
#
-menu "Virtualization"
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
depends on X86
+ default y
+
+if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on X86 && EXPERIMENTAL
+ select ANON_INODES
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -35,4 +40,4 @@ config KVM_AMD
Provides support for KVM on AMD processors equipped with the AMD-V
(SVM) extensions.
-endmenu
+endif # VIRTUALIZATION
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 152312c1fafa..3ac9cbce3369 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -10,6 +10,8 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/signal.h>
@@ -18,6 +20,7 @@
#include <linux/kvm_para.h>
#define CR0_PE_MASK (1ULL << 0)
+#define CR0_MP_MASK (1ULL << 1)
#define CR0_TS_MASK (1ULL << 3)
#define CR0_NE_MASK (1ULL << 5)
#define CR0_WP_MASK (1ULL << 16)
@@ -42,7 +45,8 @@
(CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
| CR0_NW_MASK | CR0_CD_MASK)
#define KVM_VM_CR0_ALWAYS_ON \
- (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
+ (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \
+ | CR0_MP_MASK)
#define KVM_GUEST_CR4_MASK \
(CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
@@ -51,10 +55,10 @@
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
-#define KVM_MAX_VCPUS 1
+#define KVM_MAX_VCPUS 4
#define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 4
-#define KVM_NUM_MMU_PAGES 256
+#define KVM_NUM_MMU_PAGES 1024
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
@@ -80,6 +84,11 @@
#define KVM_PIO_PAGE_OFFSET 1
/*
+ * vcpu->requests bit members
+ */
+#define KVM_TLB_FLUSH 0
+
+/*
* Address types:
*
* gva - guest virtual address
@@ -112,7 +121,7 @@ struct kvm_pte_chain {
* bits 4:7 - page table level for this shadow (1-4)
* bits 8:9 - page table quadrant for 2-level guests
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
- * bits 17:18 - "access" - the user and writable bits of a huge page pde
+ * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
*/
union kvm_mmu_page_role {
unsigned word;
@@ -122,7 +131,7 @@ union kvm_mmu_page_role {
unsigned quadrant : 2;
unsigned pad_for_nice_hex_output : 6;
unsigned metaphysical : 1;
- unsigned hugepage_access : 2;
+ unsigned hugepage_access : 3;
};
};
@@ -137,7 +146,7 @@ struct kvm_mmu_page {
gfn_t gfn;
union kvm_mmu_page_role role;
- hpa_t page_hpa;
+ u64 *spt;
unsigned long slot_bitmap; /* One bit set per slot which has memory
* in this shadow page.
*/
@@ -232,6 +241,7 @@ struct kvm_pio_request {
struct page *guest_pages[2];
unsigned guest_page_offset;
int in;
+ int port;
int size;
int string;
int down;
@@ -252,8 +262,70 @@ struct kvm_stat {
u32 halt_exits;
u32 request_irq_exits;
u32 irq_exits;
+ u32 light_exits;
+ u32 efer_reload;
+};
+
+struct kvm_io_device {
+ void (*read)(struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ void *val);
+ void (*write)(struct kvm_io_device *this,
+ gpa_t addr,
+ int len,
+ const void *val);
+ int (*in_range)(struct kvm_io_device *this, gpa_t addr);
+ void (*destructor)(struct kvm_io_device *this);
+
+ void *private;
+};
+
+static inline void kvm_iodevice_read(struct kvm_io_device *dev,
+ gpa_t addr,
+ int len,
+ void *val)
+{
+ dev->read(dev, addr, len, val);
+}
+
+static inline void kvm_iodevice_write(struct kvm_io_device *dev,
+ gpa_t addr,
+ int len,
+ const void *val)
+{
+ dev->write(dev, addr, len, val);
+}
+
+static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
+{
+ return dev->in_range(dev, addr);
+}
+
+static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
+{
+ if (dev->destructor)
+ dev->destructor(dev);
+}
+
+/*
+ * It would be nice to use something smarter than a linear search, TBD...
+ * Thankfully we dont expect many devices to register (famous last words :),
+ * so until then it will suffice. At least its abstracted so we can change
+ * in one place.
+ */
+struct kvm_io_bus {
+ int dev_count;
+#define NR_IOBUS_DEVS 6
+ struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
+void kvm_io_bus_init(struct kvm_io_bus *bus);
+void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
+ struct kvm_io_device *dev);
+
struct kvm_vcpu {
struct kvm *kvm;
union {
@@ -266,6 +338,8 @@ struct kvm_vcpu {
u64 host_tsc;
struct kvm_run *run;
int interrupt_window_open;
+ int guest_mode;
+ unsigned long requests;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
unsigned long irq_pending[NR_IRQ_WORDS];
@@ -285,15 +359,20 @@ struct kvm_vcpu {
u64 apic_base;
u64 ia32_misc_enable_msr;
int nmsrs;
+ int save_nmsrs;
+ int msr_offset_efer;
+#ifdef CONFIG_X86_64
+ int msr_offset_kernel_gs_base;
+#endif
struct vmx_msr_entry *guest_msrs;
struct vmx_msr_entry *host_msrs;
- struct list_head free_pages;
- struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
struct kvm_mmu mmu;
struct kvm_mmu_memory_cache mmu_pte_chain_cache;
struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+ struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memory_cache mmu_page_header_cache;
gfn_t last_pt_write_gfn;
int last_pt_write_count;
@@ -305,6 +384,11 @@ struct kvm_vcpu {
char *guest_fx_image;
int fpu_active;
int guest_fpu_loaded;
+ struct vmx_host_state {
+ int loaded;
+ u16 fs_sel, gs_sel, ldt_sel;
+ int fs_gs_ldt_reload_needed;
+ } vmx_host_state;
int mmio_needed;
int mmio_read_completed;
@@ -331,6 +415,7 @@ struct kvm_vcpu {
u32 ar;
} tr, es, ds, fs, gs;
} rmode;
+ int halt_request; /* real mode on Intel only */
int cpuid_nent;
struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
@@ -362,12 +447,15 @@ struct kvm {
struct list_head active_mmu_pages;
int n_free_mmu_pages;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ int nvcpus;
struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
int memory_config_version;
int busy;
unsigned long rmap_overflow;
struct list_head vm_list;
struct file *filp;
+ struct kvm_io_bus mmio_bus;
+ struct kvm_io_bus pio_bus;
};
struct descriptor_table {
@@ -447,8 +535,8 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
-void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
-void kvm_mmu_zap_all(struct kvm_vcpu *vcpu);
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
+void kvm_mmu_zap_all(struct kvm *kvm);
hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
@@ -481,6 +569,8 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
unsigned long *rflags);
+int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
struct x86_emulate_ctxt;
@@ -488,6 +578,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned long count, int string, int down,
gva_t address, int rep, unsigned port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
int emulate_clts(struct kvm_vcpu *vcpu);
int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
@@ -511,6 +602,7 @@ void save_msrs(struct vmx_msr_entry *e, int n);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_flush_remote_tlbs(struct kvm *kvm);
int kvm_read_guest(struct kvm_vcpu *vcpu,
gva_t addr,
@@ -524,10 +616,12 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
unsigned long segment_base(u16 selector);
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *old, const u8 *new, int bytes);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+int kvm_mmu_load(struct kvm_vcpu *vcpu);
+void kvm_mmu_unload(struct kvm_vcpu *vcpu);
int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -539,6 +633,14 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
return vcpu->mmu.page_fault(vcpu, gva, error_code);
}
+static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
+{
+ if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+ return 0;
+
+ return kvm_mmu_load(vcpu);
+}
+
static inline int is_long_mode(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8f1f07adb04e..bcbe6835beb4 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -16,34 +16,33 @@
*/
#include "kvm.h"
+#include "x86_emulate.h"
+#include "segment_descriptor.h"
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
-#include <linux/magic.h>
-#include <asm/processor.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
-#include <asm/msr.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
-#include <asm/uaccess.h>
#include <linux/reboot.h>
-#include <asm/io.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
-#include <asm/desc.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/anon_inodes.h>
-#include "x86_emulate.h"
-#include "segment_descriptor.h"
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/desc.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -51,8 +50,12 @@ MODULE_LICENSE("GPL");
static DEFINE_SPINLOCK(kvm_lock);
static LIST_HEAD(vm_list);
+static cpumask_t cpus_hardware_enabled;
+
struct kvm_arch_ops *kvm_arch_ops;
+static void hardware_disable(void *ignored);
+
#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
static struct kvm_stats_debugfs_item {
@@ -72,13 +75,13 @@ static struct kvm_stats_debugfs_item {
{ "halt_exits", STAT_OFFSET(halt_exits) },
{ "request_irq", STAT_OFFSET(request_irq_exits) },
{ "irq_exits", STAT_OFFSET(irq_exits) },
+ { "light_exits", STAT_OFFSET(light_exits) },
+ { "efer_reload", STAT_OFFSET(efer_reload) },
{ NULL }
};
static struct dentry *debugfs_dir;
-struct vfsmount *kvmfs_mnt;
-
#define MAX_IO_MSRS 256
#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
@@ -100,55 +103,6 @@ struct segment_descriptor_64 {
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
-static struct inode *kvmfs_inode(struct file_operations *fops)
-{
- int error = -ENOMEM;
- struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
-
- if (!inode)
- goto eexit_1;
-
- inode->i_fop = fops;
-
- /*
- * Mark the inode dirty from the very beginning,
- * that way it will never be moved to the dirty
- * list because mark_inode_dirty() will think
- * that it already _is_ on the dirty list.
- */
- inode->i_state = I_DIRTY;
- inode->i_mode = S_IRUSR | S_IWUSR;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- return inode;
-
-eexit_1:
- return ERR_PTR(error);
-}
-
-static struct file *kvmfs_file(struct inode *inode, void *private_data)
-{
- struct file *file = get_empty_filp();
-
- if (!file)
- return ERR_PTR(-ENFILE);
-
- file->f_path.mnt = mntget(kvmfs_mnt);
- file->f_path.dentry = d_alloc_anon(inode);
- if (!file->f_path.dentry)
- return ERR_PTR(-ENOMEM);
- file->f_mapping = inode->i_mapping;
-
- file->f_pos = 0;
- file->f_flags = O_RDWR;
- file->f_op = inode->i_fop;
- file->f_mode = FMODE_READ | FMODE_WRITE;
- file->f_version = 0;
- file->private_data = private_data;
- return file;
-}
-
unsigned long segment_base(u16 selector)
{
struct descriptor_table gdt;
@@ -284,27 +238,52 @@ static void vcpu_load(struct kvm_vcpu *vcpu)
kvm_arch_ops->vcpu_load(vcpu);
}
-/*
- * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
- * if the slot is not populated.
- */
-static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
+static void vcpu_put(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_ops->vcpu_put(vcpu);
+ mutex_unlock(&vcpu->mutex);
+}
+
+static void ack_flush(void *_completed)
{
- struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
+ atomic_t *completed = _completed;
- mutex_lock(&vcpu->mutex);
- if (!vcpu->vmcs) {
- mutex_unlock(&vcpu->mutex);
- return NULL;
- }
- kvm_arch_ops->vcpu_load(vcpu);
- return vcpu;
+ atomic_inc(completed);
}
-static void vcpu_put(struct kvm_vcpu *vcpu)
+void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- kvm_arch_ops->vcpu_put(vcpu);
- mutex_unlock(&vcpu->mutex);
+ int i, cpu, needed;
+ cpumask_t cpus;
+ struct kvm_vcpu *vcpu;
+ atomic_t completed;
+
+ atomic_set(&completed, 0);
+ cpus_clear(cpus);
+ needed = 0;
+ for (i = 0; i < kvm->nvcpus; ++i) {
+ vcpu = &kvm->vcpus[i];
+ if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ continue;
+ cpu = vcpu->cpu;
+ if (cpu != -1 && cpu != raw_smp_processor_id())
+ if (!cpu_isset(cpu, cpus)) {
+ cpu_set(cpu, cpus);
+ ++needed;
+ }
+ }
+
+ /*
+ * We really want smp_call_function_mask() here. But that's not
+ * available, so ipi all cpus in parallel and wait for them
+ * to complete.
+ */
+ for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
+ smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
+ while (atomic_read(&completed) != needed) {
+ cpu_relax();
+ barrier();
+ }
}
static struct kvm *kvm_create_vm(void)
@@ -315,8 +294,13 @@ static struct kvm *kvm_create_vm(void)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ kvm_io_bus_init(&kvm->pio_bus);
spin_lock_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages);
+ spin_lock(&kvm_lock);
+ list_add(&kvm->vm_list, &vm_list);
+ spin_unlock(&kvm_lock);
+ kvm_io_bus_init(&kvm->mmio_bus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu = &kvm->vcpus[i];
@@ -324,10 +308,6 @@ static struct kvm *kvm_create_vm(void)
vcpu->cpu = -1;
vcpu->kvm = kvm;
vcpu->mmu.root_hpa = INVALID_PAGE;
- INIT_LIST_HEAD(&vcpu->free_pages);
- spin_lock(&kvm_lock);
- list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
}
return kvm;
}
@@ -380,6 +360,16 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
}
}
+static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->vmcs)
+ return;
+
+ vcpu_load(vcpu);
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+}
+
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->vmcs)
@@ -400,6 +390,11 @@ static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ /*
+ * Unpin any mmu pages first.
+ */
+ for (i = 0; i < KVM_MAX_VCPUS; ++i)
+ kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
for (i = 0; i < KVM_MAX_VCPUS; ++i)
kvm_free_vcpu(&kvm->vcpus[i]);
}
@@ -414,6 +409,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
+ kvm_io_bus_destroy(&kvm->pio_bus);
+ kvm_io_bus_destroy(&kvm->mmio_bus);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
kfree(kvm);
@@ -649,13 +646,6 @@ void fx_init(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(fx_init);
-static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
-{
- spin_lock(&vcpu->kvm->lock);
- kvm_mmu_slot_remove_write_access(vcpu, slot);
- spin_unlock(&vcpu->kvm->lock);
-}
-
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -778,19 +768,10 @@ raced:
*memslot = new;
++kvm->memory_config_version;
- spin_unlock(&kvm->lock);
-
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- struct kvm_vcpu *vcpu;
+ kvm_mmu_slot_remove_write_access(kvm, mem->slot);
+ kvm_flush_remote_tlbs(kvm);
- vcpu = vcpu_load_slot(kvm, i);
- if (!vcpu)
- continue;
- if (new.flags & KVM_MEM_LOG_DIRTY_PAGES)
- do_remove_write_access(vcpu, mem->slot);
- kvm_mmu_reset_context(vcpu);
- vcpu_put(vcpu);
- }
+ spin_unlock(&kvm->lock);
kvm_free_physmem_slot(&old, &new);
return 0;
@@ -812,7 +793,6 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int r, i;
int n;
- int cleared;
unsigned long any = 0;
spin_lock(&kvm->lock);
@@ -841,23 +821,11 @@ static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
goto out;
- if (any) {
- cleared = 0;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- struct kvm_vcpu *vcpu;
-
- vcpu = vcpu_load_slot(kvm, i);
- if (!vcpu)
- continue;
- if (!cleared) {
- do_remove_write_access(vcpu, log->slot);
- memset(memslot->dirty_bitmap, 0, n);
- cleared = 1;
- }
- kvm_arch_ops->tlb_flush(vcpu);
- vcpu_put(vcpu);
- }
- }
+ spin_lock(&kvm->lock);
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ kvm_flush_remote_tlbs(kvm);
+ memset(memslot->dirty_bitmap, 0, n);
+ spin_unlock(&kvm->lock);
r = 0;
@@ -906,13 +874,9 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
break;
kvm->naliases = n;
- spin_unlock(&kvm->lock);
+ kvm_mmu_zap_all(kvm);
- vcpu_load(&kvm->vcpus[0]);
- spin_lock(&kvm->lock);
- kvm_mmu_zap_all(&kvm->vcpus[0]);
spin_unlock(&kvm->lock);
- vcpu_put(&kvm->vcpus[0]);
return 0;
@@ -969,7 +933,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
int i;
- struct kvm_memory_slot *memslot = NULL;
+ struct kvm_memory_slot *memslot;
unsigned long rel_gfn;
for (i = 0; i < kvm->nmemslots; ++i) {
@@ -978,7 +942,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages) {
- if (!memslot || !memslot->dirty_bitmap)
+ if (!memslot->dirty_bitmap)
return;
rel_gfn = gfn - memslot->base_gfn;
@@ -1037,12 +1001,31 @@ static int emulator_write_std(unsigned long addr,
return X86EMUL_UNHANDLEABLE;
}
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ /*
+ * Note that its important to have this wrapper function because
+ * in the very near future we will be checking for MMIOs against
+ * the LAPIC as well as the general MMIO bus
+ */
+ return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+}
+
+static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
+ gpa_t addr)
+{
+ return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
+}
+
static int emulator_read_emulated(unsigned long addr,
void *val,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+ struct kvm_io_device *mmio_dev;
+ gpa_t gpa;
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
@@ -1051,18 +1034,26 @@ static int emulator_read_emulated(unsigned long addr,
} else if (emulator_read_std(addr, val, bytes, ctxt)
== X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
- else {
- gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
- if (gpa == UNMAPPED_GVA)
- return X86EMUL_PROPAGATE_FAULT;
- vcpu->mmio_needed = 1;
- vcpu->mmio_phys_addr = gpa;
- vcpu->mmio_size = bytes;
- vcpu->mmio_is_write = 0;
+ gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+ if (gpa == UNMAPPED_GVA)
+ return X86EMUL_PROPAGATE_FAULT;
- return X86EMUL_UNHANDLEABLE;
+ /*
+ * Is this MMIO handled locally?
+ */
+ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+ if (mmio_dev) {
+ kvm_iodevice_read(mmio_dev, gpa, bytes, val);
+ return X86EMUL_CONTINUE;
}
+
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_phys_addr = gpa;
+ vcpu->mmio_size = bytes;
+ vcpu->mmio_is_write = 0;
+
+ return X86EMUL_UNHANDLEABLE;
}
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1070,18 +1061,20 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
{
struct page *page;
void *virt;
+ unsigned offset = offset_in_page(gpa);
if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
return 0;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (!page)
return 0;
- kvm_mmu_pre_write(vcpu, gpa, bytes);
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
- memcpy(virt + offset_in_page(gpa), val, bytes);
+ if (memcmp(virt + offset_in_page(gpa), val, bytes)) {
+ kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
+ memcpy(virt + offset_in_page(gpa), val, bytes);
+ }
kunmap_atomic(virt, KM_USER0);
- kvm_mmu_post_write(vcpu, gpa, bytes);
return 1;
}
@@ -1090,8 +1083,9 @@ static int emulator_write_emulated(unsigned long addr,
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
- struct kvm_vcpu *vcpu = ctxt->vcpu;
- gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
+ struct kvm_vcpu *vcpu = ctxt->vcpu;
+ struct kvm_io_device *mmio_dev;
+ gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
if (gpa == UNMAPPED_GVA) {
kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
@@ -1101,6 +1095,15 @@ static int emulator_write_emulated(unsigned long addr,
if (emulator_write_phys(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE;
+ /*
+ * Is this MMIO handled locally?
+ */
+ mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+ if (mmio_dev) {
+ kvm_iodevice_write(mmio_dev, gpa, bytes, val);
+ return X86EMUL_CONTINUE;
+ }
+
vcpu->mmio_needed = 1;
vcpu->mmio_phys_addr = gpa;
vcpu->mmio_size = bytes;
@@ -1269,6 +1272,17 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(emulate_instruction);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->irq_summary)
+ return 1;
+
+ vcpu->run->exit_reason = KVM_EXIT_HLT;
+ ++vcpu->stat.halt_exits;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+
int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
@@ -1469,6 +1483,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MC0_MISC+16:
case MSR_IA32_UCODE_REV:
case MSR_IA32_PERF_STATUS:
+ case MSR_IA32_EBL_CR_POWERON:
/* MTRR registers */
case 0xfe:
case 0x200 ... 0x2ff:
@@ -1502,7 +1517,7 @@ EXPORT_SYMBOL_GPL(kvm_get_msr_common);
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
}
@@ -1580,7 +1595,7 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-static int set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
return kvm_arch_ops->set_msr(vcpu, msr_index, data);
}
@@ -1727,6 +1742,20 @@ static int complete_pio(struct kvm_vcpu *vcpu)
return 0;
}
+void kernel_pio(struct kvm_io_device *pio_dev, struct kvm_vcpu *vcpu)
+{
+ /* TODO: String I/O for in kernel device */
+
+ if (vcpu->pio.in)
+ kvm_iodevice_read(pio_dev, vcpu->pio.port,
+ vcpu->pio.size,
+ vcpu->pio_data);
+ else
+ kvm_iodevice_write(pio_dev, vcpu->pio.port,
+ vcpu->pio.size,
+ vcpu->pio_data);
+}
+
int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int size, unsigned long count, int string, int down,
gva_t address, int rep, unsigned port)
@@ -1735,6 +1764,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
int i;
int nr_pages = 1;
struct page *page;
+ struct kvm_io_device *pio_dev;
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -1746,17 +1776,27 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
vcpu->pio.cur_count = count;
vcpu->pio.size = size;
vcpu->pio.in = in;
+ vcpu->pio.port = port;
vcpu->pio.string = string;
vcpu->pio.down = down;
vcpu->pio.guest_page_offset = offset_in_page(address);
vcpu->pio.rep = rep;
+ pio_dev = vcpu_find_pio_dev(vcpu, port);
if (!string) {
kvm_arch_ops->cache_regs(vcpu);
memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
kvm_arch_ops->decache_regs(vcpu);
+ if (pio_dev) {
+ kernel_pio(pio_dev, vcpu);
+ complete_pio(vcpu);
+ return 1;
+ }
return 0;
}
+ /* TODO: String I/O for in kernel device */
+ if (pio_dev)
+ printk(KERN_ERR "kvm_setup_pio: no string io support\n");
if (!count) {
kvm_arch_ops->skip_emulated_instruction(vcpu);
@@ -2093,7 +2133,7 @@ static __init void kvm_init_msr_list(void)
*/
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
- return set_msr(vcpu, index, *data);
+ return kvm_set_msr(vcpu, index, *data);
}
/*
@@ -2273,34 +2313,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
struct inode *inode;
struct file *file;
+ r = anon_inode_getfd(&fd, &inode, &file,
+ "kvm-vcpu", &kvm_vcpu_fops, vcpu);
+ if (r)
+ return r;
atomic_inc(&vcpu->kvm->filp->f_count);
- inode = kvmfs_inode(&kvm_vcpu_fops);
- if (IS_ERR(inode)) {
- r = PTR_ERR(inode);
- goto out1;
- }
-
- file = kvmfs_file(inode, vcpu);
- if (IS_ERR(file)) {
- r = PTR_ERR(file);
- goto out2;
- }
-
- r = get_unused_fd();
- if (r < 0)
- goto out3;
- fd = r;
- fd_install(fd, file);
-
return fd;
-
-out3:
- fput(file);
-out2:
- iput(inode);
-out1:
- fput(vcpu->kvm->filp);
- return r;
}
/*
@@ -2363,6 +2381,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
if (r < 0)
goto out_free_vcpus;
+ spin_lock(&kvm_lock);
+ if (n >= kvm->nvcpus)
+ kvm->nvcpus = n + 1;
+ spin_unlock(&kvm_lock);
+
return r;
out_free_vcpus:
@@ -2376,6 +2399,27 @@ out:
return r;
}
+static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+{
+ u64 efer;
+ int i;
+ struct kvm_cpuid_entry *e, *entry;
+
+ rdmsrl(MSR_EFER, efer);
+ entry = NULL;
+ for (i = 0; i < vcpu->cpuid_nent; ++i) {
+ e = &vcpu->cpuid_entries[i];
+ if (e->function == 0x80000001) {
+ entry = e;
+ break;
+ }
+ }
+ if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
+ entry->edx &= ~(1 << 20);
+ printk(KERN_INFO ": guest NX capability removed\n");
+ }
+}
+
static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
struct kvm_cpuid *cpuid,
struct kvm_cpuid_entry __user *entries)
@@ -2390,6 +2434,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
goto out;
vcpu->cpuid_nent = cpuid->nent;
+ cpuid_fix_nx_cap(vcpu);
return 0;
out:
@@ -2572,7 +2617,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
break;
}
case KVM_GET_MSRS:
- r = msr_io(vcpu, argp, get_msr, 1);
+ r = msr_io(vcpu, argp, kvm_get_msr, 1);
break;
case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0);
@@ -2738,41 +2783,18 @@ static int kvm_dev_ioctl_create_vm(void)
struct file *file;
struct kvm *kvm;
- inode = kvmfs_inode(&kvm_vm_fops);
- if (IS_ERR(inode)) {
- r = PTR_ERR(inode);
- goto out1;
- }
-
kvm = kvm_create_vm();
- if (IS_ERR(kvm)) {
- r = PTR_ERR(kvm);
- goto out2;
+ if (IS_ERR(kvm))
+ return PTR_ERR(kvm);
+ r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
+ if (r) {
+ kvm_destroy_vm(kvm);
+ return r;
}
- file = kvmfs_file(inode, kvm);
- if (IS_ERR(file)) {
- r = PTR_ERR(file);
- goto out3;
- }
kvm->filp = file;
- r = get_unused_fd();
- if (r < 0)
- goto out4;
- fd = r;
- fd_install(fd, file);
-
return fd;
-
-out4:
- fput(file);
-out3:
- kvm_destroy_vm(kvm);
-out2:
- iput(inode);
-out1:
- return r;
}
static long kvm_dev_ioctl(struct file *filp,
@@ -2862,7 +2884,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
* in vmx root mode.
*/
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ on_each_cpu(hardware_disable, NULL, 0, 1);
}
return NOTIFY_OK;
}
@@ -2905,33 +2927,88 @@ static void decache_vcpus_on_cpu(int cpu)
spin_unlock(&kvm_lock);
}
+static void hardware_enable(void *junk)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (cpu_isset(cpu, cpus_hardware_enabled))
+ return;
+ cpu_set(cpu, cpus_hardware_enabled);
+ kvm_arch_ops->hardware_enable(NULL);
+}
+
+static void hardware_disable(void *junk)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (!cpu_isset(cpu, cpus_hardware_enabled))
+ return;
+ cpu_clear(cpu, cpus_hardware_enabled);
+ decache_vcpus_on_cpu(cpu);
+ kvm_arch_ops->hardware_disable(NULL);
+}
+
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
switch (val) {
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
- decache_vcpus_on_cpu(cpu);
- smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
- NULL, 0, 1);
+ smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
- smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
- NULL, 0, 1);
+ smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
break;
}
return NOTIFY_OK;
}
+void kvm_io_bus_init(struct kvm_io_bus *bus)
+{
+ memset(bus, 0, sizeof(*bus));
+}
+
+void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < bus->dev_count; i++) {
+ struct kvm_io_device *pos = bus->devs[i];
+
+ kvm_iodevice_destructor(pos);
+ }
+}
+
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
+{
+ int i;
+
+ for (i = 0; i < bus->dev_count; i++) {
+ struct kvm_io_device *pos = bus->devs[i];
+
+ if (pos->in_range(pos, addr))
+ return pos;
+ }
+
+ return NULL;
+}
+
+void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
+{
+ BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
+
+ bus->devs[bus->dev_count++] = dev;
+}
+
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
.priority = 20, /* must be > scheduler priority */
@@ -2983,14 +3060,13 @@ static void kvm_exit_debug(void)
static int kvm_suspend(struct sys_device *dev, pm_message_t state)
{
- decache_vcpus_on_cpu(raw_smp_processor_id());
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ hardware_disable(NULL);
return 0;
}
static int kvm_resume(struct sys_device *dev)
{
- on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+ hardware_enable(NULL);
return 0;
}
@@ -3007,18 +3083,6 @@ static struct sys_device kvm_sysdev = {
hpa_t bad_page_address;
-static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
-{
- return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
-}
-
-static struct file_system_type kvm_fs_type = {
- .name = "kvmfs",
- .get_sb = kvmfs_get_sb,
- .kill_sb = kill_anon_super,
-};
-
int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
{
int r;
@@ -3043,7 +3107,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
if (r < 0)
goto out;
- on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+ on_each_cpu(hardware_enable, NULL, 0, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_1;
@@ -3075,7 +3139,7 @@ out_free_2:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_1:
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_ops->hardware_unsetup();
out:
kvm_arch_ops = NULL;
@@ -3089,7 +3153,7 @@ void kvm_exit_arch(void)
sysdev_class_unregister(&kvm_sysdev_class);
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
- on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
+ on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_ops->hardware_unsetup();
kvm_arch_ops = NULL;
}
@@ -3103,14 +3167,6 @@ static __init int kvm_init(void)
if (r)
goto out4;
- r = register_filesystem(&kvm_fs_type);
- if (r)
- goto out3;
-
- kvmfs_mnt = kern_mount(&kvm_fs_type);
- r = PTR_ERR(kvmfs_mnt);
- if (IS_ERR(kvmfs_mnt))
- goto out2;
kvm_init_debug();
kvm_init_msr_list();
@@ -3127,10 +3183,6 @@ static __init int kvm_init(void)
out:
kvm_exit_debug();
- mntput(kvmfs_mnt);
-out2:
- unregister_filesystem(&kvm_fs_type);
-out3:
kvm_mmu_module_exit();
out4:
return r;
@@ -3140,8 +3192,6 @@ static __exit void kvm_exit(void)
{
kvm_exit_debug();
__free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
- mntput(kvmfs_mnt);
- unregister_filesystem(&kvm_fs_type);
kvm_mmu_module_exit();
}
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e8e228118de9..1a87ba9d5156 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -16,15 +16,18 @@
* the COPYING file in the top-level directory.
*
*/
+
+#include "vmx.h"
+#include "kvm.h"
+
#include <linux/types.h>
#include <linux/string.h>
-#include <asm/page.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
-#include "vmx.h"
-#include "kvm.h"
+#include <asm/page.h>
+#include <asm/cmpxchg.h>
#undef MMU_DEBUG
@@ -90,25 +93,11 @@ static int dbg = 1;
#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
-#define PT32_PTE_COPY_MASK \
- (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
-
-#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
-
#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
-#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
-#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
-#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
-
-#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
-#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
-
-#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
-
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define PT64_LEVEL_BITS 9
@@ -165,6 +154,7 @@ struct kvm_rmap_desc {
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
+static struct kmem_cache *mmu_page_header_cache;
static int is_write_protection(struct kvm_vcpu *vcpu)
{
@@ -202,6 +192,15 @@ static int is_rmap_pte(u64 pte)
== (PT_WRITABLE_MASK | PT_PRESENT_MASK);
}
+static void set_shadow_pte(u64 *sptep, u64 spte)
+{
+#ifdef CONFIG_X86_64
+ set_64bit((unsigned long *)sptep, spte);
+#else
+ set_64bit((unsigned long long *)sptep, spte);
+#endif
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min,
gfp_t gfp_flags)
@@ -225,6 +224,29 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
kfree(mc->objects[--mc->nobjs]);
}
+static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
+ int min, gfp_t gfp_flags)
+{
+ struct page *page;
+
+ if (cache->nobjs >= min)
+ return 0;
+ while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
+ page = alloc_page(gfp_flags);
+ if (!page)
+ return -ENOMEM;
+ set_page_private(page, 0);
+ cache->objects[cache->nobjs++] = page_address(page);
+ }
+ return 0;
+}
+
+static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
+{
+ while (mc->nobjs)
+ free_page((unsigned long)mc->objects[--mc->nobjs]);
+}
+
static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
{
int r;
@@ -235,6 +257,13 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
rmap_desc_cache, 1, gfp_flags);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags);
+ if (r)
+ goto out;
+ r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
+ mmu_page_header_cache, 4, gfp_flags);
out:
return r;
}
@@ -258,6 +287,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
+ mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
+ mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
}
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -271,24 +302,15 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
return p;
}
-static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
-{
- if (mc->nobjs < KVM_NR_MEM_OBJS)
- mc->objects[mc->nobjs++] = obj;
- else
- kfree(obj);
-}
-
static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
sizeof(struct kvm_pte_chain));
}
-static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
- struct kvm_pte_chain *pc)
+static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
{
- mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
+ kfree(pc);
}
static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
@@ -297,10 +319,9 @@ static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
sizeof(struct kvm_rmap_desc));
}
-static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
- struct kvm_rmap_desc *rd)
+static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
{
- mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
+ kfree(rd);
}
/*
@@ -345,8 +366,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
}
}
-static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
- struct page *page,
+static void rmap_desc_remove_entry(struct page *page,
struct kvm_rmap_desc *desc,
int i,
struct kvm_rmap_desc *prev_desc)
@@ -366,10 +386,10 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
prev_desc->more = desc->more;
else
set_page_private(page,(unsigned long)desc->more | 1);
- mmu_free_rmap_desc(vcpu, desc);
+ mmu_free_rmap_desc(desc);
}
-static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
+static void rmap_remove(u64 *spte)
{
struct page *page;
struct kvm_rmap_desc *desc;
@@ -397,7 +417,7 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
while (desc) {
for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
if (desc->shadow_ptes[i] == spte) {
- rmap_desc_remove_entry(vcpu, page,
+ rmap_desc_remove_entry(page,
desc, i,
prev_desc);
return;
@@ -432,20 +452,19 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK));
BUG_ON(!(*spte & PT_WRITABLE_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
- rmap_remove(vcpu, spte);
- kvm_arch_ops->tlb_flush(vcpu);
- *spte &= ~(u64)PT_WRITABLE_MASK;
+ rmap_remove(spte);
+ set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
}
#ifdef MMU_DEBUG
-static int is_empty_shadow_page(hpa_t page_hpa)
+static int is_empty_shadow_page(u64 *spt)
{
u64 *pos;
u64 *end;
- for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
- pos != end; pos++)
+ for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
if (*pos != 0) {
printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
pos, *pos);
@@ -455,14 +474,14 @@ static int is_empty_shadow_page(hpa_t page_hpa)
}
#endif
-static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
+static void kvm_mmu_free_page(struct kvm *kvm,
+ struct kvm_mmu_page *page_head)
{
- struct kvm_mmu_page *page_head = page_header(page_hpa);
-
- ASSERT(is_empty_shadow_page(page_hpa));
- page_head->page_hpa = page_hpa;
- list_move(&page_head->link, &vcpu->free_pages);
- ++vcpu->kvm->n_free_mmu_pages;
+ ASSERT(is_empty_shadow_page(page_head->spt));
+ list_del(&page_head->link);
+ __free_page(virt_to_page(page_head->spt));
+ kfree(page_head);
+ ++kvm->n_free_mmu_pages;
}
static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -475,12 +494,15 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
{
struct kvm_mmu_page *page;
- if (list_empty(&vcpu->free_pages))
+ if (!vcpu->kvm->n_free_mmu_pages)
return NULL;
- page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
- list_move(&page->link, &vcpu->kvm->active_mmu_pages);
- ASSERT(is_empty_shadow_page(page->page_hpa));
+ page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
+ sizeof *page);
+ page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
+ set_page_private(virt_to_page(page->spt), (unsigned long)page);
+ list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+ ASSERT(is_empty_shadow_page(page->spt));
page->slot_bitmap = 0;
page->multimapped = 0;
page->parent_pte = parent_pte;
@@ -525,8 +547,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
pte_chain->parent_ptes[0] = parent_pte;
}
-static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *page,
+static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
u64 *parent_pte)
{
struct kvm_pte_chain *pte_chain;
@@ -553,7 +574,7 @@ static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
pte_chain->parent_ptes[i] = NULL;
if (i == 0) {
hlist_del(&pte_chain->link);
- mmu_free_pte_chain(vcpu, pte_chain);
+ mmu_free_pte_chain(pte_chain);
if (hlist_empty(&page->parent_ptes)) {
page->multimapped = 0;
page->parent_pte = NULL;
@@ -631,22 +652,22 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
return page;
}
-static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
+static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *page)
{
unsigned i;
u64 *pt;
u64 ent;
- pt = __va(page->page_hpa);
+ pt = page->spt;
if (page->role.level == PT_PAGE_TABLE_LEVEL) {
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
if (pt[i] & PT_PRESENT_MASK)
- rmap_remove(vcpu, &pt[i]);
+ rmap_remove(&pt[i]);
pt[i] = 0;
}
- kvm_arch_ops->tlb_flush(vcpu);
+ kvm_flush_remote_tlbs(kvm);
return;
}
@@ -657,18 +678,18 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
if (!(ent & PT_PRESENT_MASK))
continue;
ent &= PT64_BASE_ADDR_MASK;
- mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
+ mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
}
+ kvm_flush_remote_tlbs(kvm);
}
-static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *page,
+static void kvm_mmu_put_page(struct kvm_mmu_page *page,
u64 *parent_pte)
{
- mmu_page_remove_parent_pte(vcpu, page, parent_pte);
+ mmu_page_remove_parent_pte(page, parent_pte);
}
-static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
+static void kvm_mmu_zap_page(struct kvm *kvm,
struct kvm_mmu_page *page)
{
u64 *parent_pte;
@@ -684,15 +705,15 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
parent_pte = chain->parent_ptes[0];
}
BUG_ON(!parent_pte);
- kvm_mmu_put_page(vcpu, page, parent_pte);
- *parent_pte = 0;
+ kvm_mmu_put_page(page, parent_pte);
+ set_shadow_pte(parent_pte, 0);
}
- kvm_mmu_page_unlink_children(vcpu, page);
+ kvm_mmu_page_unlink_children(kvm, page);
if (!page->root_count) {
hlist_del(&page->hash_link);
- kvm_mmu_free_page(vcpu, page->page_hpa);
+ kvm_mmu_free_page(kvm, page);
} else
- list_move(&page->link, &vcpu->kvm->active_mmu_pages);
+ list_move(&page->link, &kvm->active_mmu_pages);
}
static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -711,12 +732,23 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
if (page->gfn == gfn && !page->role.metaphysical) {
pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
page->role.word);
- kvm_mmu_zap_page(vcpu, page);
+ kvm_mmu_zap_page(vcpu->kvm, page);
r = 1;
}
return r;
}
+static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ struct kvm_mmu_page *page;
+
+ while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
+ pgprintk("%s: zap %lx %x\n",
+ __FUNCTION__, gfn, page->role.word);
+ kvm_mmu_zap_page(vcpu->kvm, page);
+ }
+}
+
static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
{
int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
@@ -805,7 +837,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
return -ENOMEM;
}
- table[index] = new_table->page_hpa | PT_PRESENT_MASK
+ table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -817,11 +849,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
int i;
struct kvm_mmu_page *page;
+ if (!VALID_PAGE(vcpu->mmu.root_hpa))
+ return;
#ifdef CONFIG_X86_64
if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->mmu.root_hpa;
- ASSERT(VALID_PAGE(root));
page = page_header(root);
--page->root_count;
vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -832,7 +865,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->mmu.pae_root[i];
if (root) {
- ASSERT(VALID_PAGE(root));
root &= PT64_BASE_ADDR_MASK;
page = page_header(root);
--page->root_count;
@@ -857,7 +889,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root));
page = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, 0, NULL);
- root = page->page_hpa;
+ root = __pa(page->spt);
++page->root_count;
vcpu->mmu.root_hpa = root;
return;
@@ -878,7 +910,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu),
0, NULL);
- root = page->page_hpa;
+ root = __pa(page->spt);
++page->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
@@ -928,9 +960,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
context->free = nonpaging_free;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
+ context->root_hpa = INVALID_PAGE;
return 0;
}
@@ -944,59 +974,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
mmu_free_roots(vcpu);
- if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
- kvm_mmu_free_some_pages(vcpu);
- mmu_alloc_roots(vcpu);
- kvm_mmu_flush_tlb(vcpu);
- kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
-}
-
-static inline void set_pte_common(struct kvm_vcpu *vcpu,
- u64 *shadow_pte,
- gpa_t gaddr,
- int dirty,
- u64 access_bits,
- gfn_t gfn)
-{
- hpa_t paddr;
-
- *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
- if (!dirty)
- access_bits &= ~PT_WRITABLE_MASK;
-
- paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
-
- *shadow_pte |= access_bits;
-
- if (is_error_hpa(paddr)) {
- *shadow_pte |= gaddr;
- *shadow_pte |= PT_SHADOW_IO_MARK;
- *shadow_pte &= ~PT_PRESENT_MASK;
- return;
- }
-
- *shadow_pte |= paddr;
-
- if (access_bits & PT_WRITABLE_MASK) {
- struct kvm_mmu_page *shadow;
-
- shadow = kvm_mmu_lookup_page(vcpu, gfn);
- if (shadow) {
- pgprintk("%s: found shadow page for %lx, marking ro\n",
- __FUNCTION__, gfn);
- access_bits &= ~PT_WRITABLE_MASK;
- if (is_writeble_pte(*shadow_pte)) {
- *shadow_pte &= ~PT_WRITABLE_MASK;
- kvm_arch_ops->tlb_flush(vcpu);
- }
- }
- }
-
- if (access_bits & PT_WRITABLE_MASK)
- mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
-
- page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
- rmap_add(vcpu, shadow_pte);
}
static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -1006,23 +983,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
}
-static inline int fix_read_pf(u64 *shadow_ent)
-{
- if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
- !(*shadow_ent & PT_USER_MASK)) {
- /*
- * If supervisor write protect is disabled, we shadow kernel
- * pages as user pages so we can trap the write access.
- */
- *shadow_ent |= PT_USER_MASK;
- *shadow_ent &= ~PT_WRITABLE_MASK;
-
- return 1;
-
- }
- return 0;
-}
-
static void paging_free(struct kvm_vcpu *vcpu)
{
nonpaging_free(vcpu);
@@ -1047,10 +1007,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
context->free = paging_free;
context->root_level = level;
context->shadow_root_level = level;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
- (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+ context->root_hpa = INVALID_PAGE;
return 0;
}
@@ -1069,10 +1026,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
context->free = paging_free;
context->root_level = PT32_ROOT_LEVEL;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
- (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+ context->root_hpa = INVALID_PAGE;
return 0;
}
@@ -1107,18 +1061,33 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
+ destroy_kvm_mmu(vcpu);
+ return init_kvm_mmu(vcpu);
+}
+
+int kvm_mmu_load(struct kvm_vcpu *vcpu)
+{
int r;
- destroy_kvm_mmu(vcpu);
- r = init_kvm_mmu(vcpu);
- if (r < 0)
- goto out;
+ spin_lock(&vcpu->kvm->lock);
r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ goto out;
+ mmu_alloc_roots(vcpu);
+ kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+ kvm_mmu_flush_tlb(vcpu);
out:
+ spin_unlock(&vcpu->kvm->lock);
return r;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_load);
+
+void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+{
+ mmu_free_roots(vcpu);
+}
-static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page,
u64 *spte)
{
@@ -1128,16 +1097,32 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
pte = *spte;
if (is_present_pte(pte)) {
if (page->role.level == PT_PAGE_TABLE_LEVEL)
- rmap_remove(vcpu, spte);
+ rmap_remove(spte);
else {
child = page_header(pte & PT64_BASE_ADDR_MASK);
- mmu_page_remove_parent_pte(vcpu, child, spte);
+ mmu_page_remove_parent_pte(child, spte);
}
}
*spte = 0;
+ kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
+static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page,
+ u64 *spte,
+ const void *new, int bytes)
+{
+ if (page->role.level != PT_PAGE_TABLE_LEVEL)
+ return;
+
+ if (page->role.glevels == PT32_ROOT_LEVEL)
+ paging32_update_pte(vcpu, page, spte, new, bytes);
+ else
+ paging64_update_pte(vcpu, page, spte, new, bytes);
}
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *old, const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page;
@@ -1149,6 +1134,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
unsigned pte_size;
unsigned page_offset;
unsigned misaligned;
+ unsigned quadrant;
int level;
int flooded = 0;
int npte;
@@ -1169,6 +1155,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
continue;
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+ misaligned |= bytes < 4;
if (misaligned || flooded) {
/*
* Misaligned accesses are too much trouble to fix
@@ -1182,7 +1169,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, page->role.word);
- kvm_mmu_zap_page(vcpu, page);
+ kvm_mmu_zap_page(vcpu->kvm, page);
continue;
}
page_offset = offset;
@@ -1200,21 +1187,20 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
page_offset <<= 1;
npte = 2;
}
+ quadrant = page_offset >> PAGE_SHIFT;
page_offset &= ~PAGE_MASK;
+ if (quadrant != page->role.quadrant)
+ continue;
}
- spte = __va(page->page_hpa);
- spte += page_offset / sizeof(*spte);
+ spte = &page->spt[page_offset / sizeof(*spte)];
while (npte--) {
- mmu_pre_write_zap_pte(vcpu, page, spte);
+ mmu_pte_write_zap_pte(vcpu, page, spte);
+ mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
++spte;
}
}
}
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1229,7 +1215,7 @@ void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
page = container_of(vcpu->kvm->active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(vcpu, page);
+ kvm_mmu_zap_page(vcpu->kvm, page);
}
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
@@ -1241,14 +1227,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
page = container_of(vcpu->kvm->active_mmu_pages.next,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(vcpu, page);
- }
- while (!list_empty(&vcpu->free_pages)) {
- page = list_entry(vcpu->free_pages.next,
- struct kvm_mmu_page, link);
- list_del(&page->link);
- __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
- page->page_hpa = INVALID_PAGE;
+ kvm_mmu_zap_page(vcpu->kvm, page);
}
free_page((unsigned long)vcpu->mmu.pae_root);
}
@@ -1260,18 +1239,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
ASSERT(vcpu);
- for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
- struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
-
- INIT_LIST_HEAD(&page_header->link);
- if ((page = alloc_page(GFP_KERNEL)) == NULL)
- goto error_1;
- set_page_private(page, (unsigned long)page_header);
- page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
- memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
- list_add(&page_header->link, &vcpu->free_pages);
- ++vcpu->kvm->n_free_mmu_pages;
- }
+ vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
/*
* When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1296,7 +1264,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- ASSERT(list_empty(&vcpu->free_pages));
return alloc_mmu_pages(vcpu);
}
@@ -1305,7 +1272,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- ASSERT(!list_empty(&vcpu->free_pages));
return init_kvm_mmu(vcpu);
}
@@ -1319,9 +1285,8 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu);
}
-void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
- struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *page;
list_for_each_entry(page, &kvm->active_mmu_pages, link) {
@@ -1331,31 +1296,24 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
if (!test_bit(slot, &page->slot_bitmap))
continue;
- pt = __va(page->page_hpa);
+ pt = page->spt;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
/* avoid RMW */
if (pt[i] & PT_WRITABLE_MASK) {
- rmap_remove(vcpu, &pt[i]);
+ rmap_remove(&pt[i]);
pt[i] &= ~PT_WRITABLE_MASK;
}
}
}
-void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
+void kvm_mmu_zap_all(struct kvm *kvm)
{
- destroy_kvm_mmu(vcpu);
+ struct kvm_mmu_page *page, *node;
- while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
- struct kvm_mmu_page *page;
+ list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
+ kvm_mmu_zap_page(kvm, page);
- page = container_of(vcpu->kvm->active_mmu_pages.next,
- struct kvm_mmu_page, link);
- kvm_mmu_zap_page(vcpu, page);
- }
-
- mmu_free_memory_caches(vcpu);
- kvm_arch_ops->tlb_flush(vcpu);
- init_kvm_mmu(vcpu);
+ kvm_flush_remote_tlbs(kvm);
}
void kvm_mmu_module_exit(void)
@@ -1364,21 +1322,29 @@ void kvm_mmu_module_exit(void)
kmem_cache_destroy(pte_chain_cache);
if (rmap_desc_cache)
kmem_cache_destroy(rmap_desc_cache);
+ if (mmu_page_header_cache)
+ kmem_cache_destroy(mmu_page_header_cache);
}
int kvm_mmu_module_init(void)
{
pte_chain_cache = kmem_cache_create("kvm_pte_chain",
sizeof(struct kvm_pte_chain),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!pte_chain_cache)
goto nomem;
rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
sizeof(struct kvm_rmap_desc),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!rmap_desc_cache)
goto nomem;
+ mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
+ sizeof(struct kvm_mmu_page),
+ 0, 0, NULL);
+ if (!mmu_page_header_cache)
+ goto nomem;
+
return 0;
nomem:
@@ -1482,7 +1448,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
int i;
list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
- u64 *pt = __va(page->page_hpa);
+ u64 *pt = page->spt;
if (page->role.level != PT_PAGE_TABLE_LEVEL)
continue;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 73ffbffb1097..4b5391c717f8 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -31,7 +31,6 @@
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
- #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#else
@@ -46,7 +45,6 @@
#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
- #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
#define PT_MAX_FULL_LEVELS 2
#else
#error Invalid PTTYPE value
@@ -192,40 +190,143 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
}
-static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
- u64 *shadow_pte, u64 access_bits, gfn_t gfn)
+static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
+ u64 *shadow_pte,
+ gpa_t gaddr,
+ pt_element_t *gpte,
+ u64 access_bits,
+ int user_fault,
+ int write_fault,
+ int *ptwrite,
+ struct guest_walker *walker,
+ gfn_t gfn)
{
- ASSERT(*shadow_pte == 0);
- access_bits &= guest_pte;
- *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
- set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
- guest_pte & PT_DIRTY_MASK, access_bits, gfn);
+ hpa_t paddr;
+ int dirty = *gpte & PT_DIRTY_MASK;
+ u64 spte = *shadow_pte;
+ int was_rmapped = is_rmap_pte(spte);
+
+ pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
+ " user_fault %d gfn %lx\n",
+ __FUNCTION__, spte, (u64)*gpte, access_bits,
+ write_fault, user_fault, gfn);
+
+ if (write_fault && !dirty) {
+ *gpte |= PT_DIRTY_MASK;
+ dirty = 1;
+ FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
+ }
+
+ spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
+ spte |= *gpte & PT64_NX_MASK;
+ if (!dirty)
+ access_bits &= ~PT_WRITABLE_MASK;
+
+ paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
+
+ spte |= PT_PRESENT_MASK;
+ if (access_bits & PT_USER_MASK)
+ spte |= PT_USER_MASK;
+
+ if (is_error_hpa(paddr)) {
+ spte |= gaddr;
+ spte |= PT_SHADOW_IO_MARK;
+ spte &= ~PT_PRESENT_MASK;
+ set_shadow_pte(shadow_pte, spte);
+ return;
+ }
+
+ spte |= paddr;
+
+ if ((access_bits & PT_WRITABLE_MASK)
+ || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
+ struct kvm_mmu_page *shadow;
+
+ spte |= PT_WRITABLE_MASK;
+ if (user_fault) {
+ mmu_unshadow(vcpu, gfn);
+ goto unshadowed;
+ }
+
+ shadow = kvm_mmu_lookup_page(vcpu, gfn);
+ if (shadow) {
+ pgprintk("%s: found shadow page for %lx, marking ro\n",
+ __FUNCTION__, gfn);
+ access_bits &= ~PT_WRITABLE_MASK;
+ if (is_writeble_pte(spte)) {
+ spte &= ~PT_WRITABLE_MASK;
+ kvm_arch_ops->tlb_flush(vcpu);
+ }
+ if (write_fault)
+ *ptwrite = 1;
+ }
+ }
+
+unshadowed:
+
+ if (access_bits & PT_WRITABLE_MASK)
+ mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
+
+ set_shadow_pte(shadow_pte, spte);
+ page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
+ if (!was_rmapped)
+ rmap_add(vcpu, shadow_pte);
}
-static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
- u64 *shadow_pte, u64 access_bits, gfn_t gfn)
+static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
+ u64 *shadow_pte, u64 access_bits,
+ int user_fault, int write_fault, int *ptwrite,
+ struct guest_walker *walker, gfn_t gfn)
+{
+ access_bits &= *gpte;
+ FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
+ gpte, access_bits, user_fault, write_fault,
+ ptwrite, walker, gfn);
+}
+
+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
+ u64 *spte, const void *pte, int bytes)
+{
+ pt_element_t gpte;
+
+ if (bytes < sizeof(pt_element_t))
+ return;
+ gpte = *(const pt_element_t *)pte;
+ if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
+ return;
+ pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+ FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
+ 0, NULL, NULL,
+ (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
+}
+
+static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
+ u64 *shadow_pte, u64 access_bits,
+ int user_fault, int write_fault, int *ptwrite,
+ struct guest_walker *walker, gfn_t gfn)
{
gpa_t gaddr;
- ASSERT(*shadow_pte == 0);
- access_bits &= guest_pde;
+ access_bits &= *gpde;
gaddr = (gpa_t)gfn << PAGE_SHIFT;
if (PTTYPE == 32 && is_cpuid_PSE36())
- gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
+ gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
(32 - PT32_DIR_PSE36_SHIFT);
- *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
- set_pte_common(vcpu, shadow_pte, gaddr,
- guest_pde & PT_DIRTY_MASK, access_bits, gfn);
+ FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
+ gpde, access_bits, user_fault, write_fault,
+ ptwrite, walker, gfn);
}
/*
* Fetch a shadow pte for a specific level in the paging hierarchy.
*/
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
- struct guest_walker *walker)
+ struct guest_walker *walker,
+ int user_fault, int write_fault, int *ptwrite)
{
hpa_t shadow_addr;
int level;
+ u64 *shadow_ent;
u64 *prev_shadow_ent = NULL;
pt_element_t *guest_ent = walker->ptep;
@@ -242,43 +343,31 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
for (; ; level--) {
u32 index = SHADOW_PT_INDEX(addr, level);
- u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
struct kvm_mmu_page *shadow_page;
u64 shadow_pte;
int metaphysical;
gfn_t table_gfn;
unsigned hugepage_access = 0;
+ shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
if (level == PT_PAGE_TABLE_LEVEL)
- return shadow_ent;
+ break;
shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
prev_shadow_ent = shadow_ent;
continue;
}
- if (level == PT_PAGE_TABLE_LEVEL) {
-
- if (walker->level == PT_DIRECTORY_LEVEL) {
- if (prev_shadow_ent)
- *prev_shadow_ent |= PT_SHADOW_PS_MARK;
- FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
- walker->inherited_ar,
- walker->gfn);
- } else {
- ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
- FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
- walker->inherited_ar,
- walker->gfn);
- }
- return shadow_ent;
- }
+ if (level == PT_PAGE_TABLE_LEVEL)
+ break;
if (level - 1 == PT_PAGE_TABLE_LEVEL
&& walker->level == PT_DIRECTORY_LEVEL) {
metaphysical = 1;
hugepage_access = *guest_ent;
hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
+ if (*guest_ent & PT64_NX_MASK)
+ hugepage_access |= (1 << 2);
hugepage_access >>= PT_WRITABLE_SHIFT;
table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
>> PAGE_SHIFT;
@@ -289,90 +378,24 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
metaphysical, hugepage_access,
shadow_ent);
- shadow_addr = shadow_page->page_hpa;
+ shadow_addr = __pa(shadow_page->spt);
shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
*shadow_ent = shadow_pte;
prev_shadow_ent = shadow_ent;
}
-}
-/*
- * The guest faulted for write. We need to
- *
- * - check write permissions
- * - update the guest pte dirty bit
- * - update our own dirty page tracking structures
- */
-static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
- u64 *shadow_ent,
- struct guest_walker *walker,
- gva_t addr,
- int user,
- int *write_pt)
-{
- pt_element_t *guest_ent;
- int writable_shadow;
- gfn_t gfn;
- struct kvm_mmu_page *page;
-
- if (is_writeble_pte(*shadow_ent))
- return !user || (*shadow_ent & PT_USER_MASK);
-
- writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
- if (user) {
- /*
- * User mode access. Fail if it's a kernel page or a read-only
- * page.
- */
- if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
- return 0;
- ASSERT(*shadow_ent & PT_USER_MASK);
- } else
- /*
- * Kernel mode access. Fail if it's a read-only page and
- * supervisor write protection is enabled.
- */
- if (!writable_shadow) {
- if (is_write_protection(vcpu))
- return 0;
- *shadow_ent &= ~PT_USER_MASK;
- }
-
- guest_ent = walker->ptep;
-
- if (!is_present_pte(*guest_ent)) {
- *shadow_ent = 0;
- return 0;
+ if (walker->level == PT_DIRECTORY_LEVEL) {
+ FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
+ walker->inherited_ar, user_fault, write_fault,
+ ptwrite, walker, walker->gfn);
+ } else {
+ ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
+ FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
+ walker->inherited_ar, user_fault, write_fault,
+ ptwrite, walker, walker->gfn);
}
-
- gfn = walker->gfn;
-
- if (user) {
- /*
- * Usermode page faults won't be for page table updates.
- */
- while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
- pgprintk("%s: zap %lx %x\n",
- __FUNCTION__, gfn, page->role.word);
- kvm_mmu_zap_page(vcpu, page);
- }
- } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
- pgprintk("%s: found shadow page for %lx, marking ro\n",
- __FUNCTION__, gfn);
- mark_page_dirty(vcpu->kvm, gfn);
- FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
- *guest_ent |= PT_DIRTY_MASK;
- *write_pt = 1;
- return 0;
- }
- mark_page_dirty(vcpu->kvm, gfn);
- *shadow_ent |= PT_WRITABLE_MASK;
- FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
- *guest_ent |= PT_DIRTY_MASK;
- rmap_add(vcpu, shadow_ent);
-
- return 1;
+ return shadow_ent;
}
/*
@@ -397,7 +420,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int fetch_fault = error_code & PFERR_FETCH_MASK;
struct guest_walker walker;
u64 *shadow_pte;
- int fixed;
int write_pt = 0;
int r;
@@ -421,27 +443,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code);
FNAME(release_walker)(&walker);
+ vcpu->last_pt_write_count = 0; /* reset fork detector */
return 0;
}
- shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
- pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
- shadow_pte, *shadow_pte);
-
- /*
- * Update the shadow pte.
- */
- if (write_fault)
- fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
- user_fault, &write_pt);
- else
- fixed = fix_read_pf(shadow_pte);
-
- pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
- shadow_pte, *shadow_pte);
+ shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+ &write_pt);
+ pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+ shadow_pte, *shadow_pte, write_pt);
FNAME(release_walker)(&walker);
+ if (!write_pt)
+ vcpu->last_pt_write_count = 0; /* reset fork detector */
+
/*
* mmio: emulate if accessible, otherwise its a guest fault.
*/
@@ -478,7 +493,5 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
#undef PT_INDEX
#undef SHADOW_PT_INDEX
#undef PT_LEVEL_MASK
-#undef PT_PTE_COPY_MASK
-#undef PT_NON_PTE_COPY_MASK
#undef PT_DIR_BASE_ADDR_MASK
#undef PT_MAX_FULL_LEVELS
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa17d6d4f0cb..bc818cc126e3 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -14,16 +14,17 @@
*
*/
+#include "kvm_svm.h"
+#include "x86_emulate.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
-#include <asm/desc.h>
-#include "kvm_svm.h"
-#include "x86_emulate.h"
+#include <asm/desc.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -378,7 +379,7 @@ static __init int svm_hardware_setup(void)
int cpu;
struct page *iopm_pages;
struct page *msrpm_pages;
- void *msrpm_va;
+ void *iopm_va, *msrpm_va;
int r;
kvm_emulator_want_group7_invlpg();
@@ -387,8 +388,10 @@ static __init int svm_hardware_setup(void)
if (!iopm_pages)
return -ENOMEM;
- memset(page_address(iopm_pages), 0xff,
- PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+
+ iopm_va = page_address(iopm_pages);
+ memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+ clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
@@ -579,7 +582,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
goto out2;
vcpu->svm->vmcb = page_address(page);
- memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+ clear_page(vcpu->svm->vmcb);
vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
vcpu->svm->asid_generation = 0;
memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
@@ -587,9 +590,9 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
fx_init(vcpu);
vcpu->fpu_active = 1;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vcpu == &vcpu->kvm->vcpus[0])
+ vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
return 0;
@@ -955,7 +958,7 @@ static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
* VMCB is undefined after a SHUTDOWN intercept
* so reinitialize it.
*/
- memset(vcpu->svm->vmcb, 0, PAGE_SIZE);
+ clear_page(vcpu->svm->vmcb);
init_vmcb(vcpu->svm->vmcb);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
@@ -1113,12 +1116,7 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1473,6 +1471,11 @@ static void load_db_regs(unsigned long *db_regs)
asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
}
+static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ force_new_asid(vcpu);
+}
+
static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u16 fs_selector;
@@ -1481,11 +1484,20 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
again:
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ return r;
+
if (!vcpu->mmio_read_completed)
do_interrupt_requests(vcpu, kvm_run);
clgi();
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ svm_flush_tlb(vcpu);
+
pre_svm_run(vcpu);
save_host_msrs(vcpu);
@@ -1617,6 +1629,8 @@ again:
#endif
: "cc", "memory" );
+ vcpu->guest_mode = 0;
+
if (vcpu->fpu_active) {
fx_save(vcpu->guest_fx_image);
fx_restore(vcpu->host_fx_image);
@@ -1681,11 +1695,6 @@ again:
return r;
}
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
-{
- force_new_asid(vcpu);
-}
-
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
vcpu->svm->vmcb->save.cr3 = root;
@@ -1727,6 +1736,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
static int is_disabled(void)
{
+ u64 vm_cr;
+
+ rdmsrl(MSR_VM_CR, vm_cr);
+ if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
+ return 1;
+
return 0;
}
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 5e93814400ce..3b1b0f35b6cb 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -175,8 +175,11 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_CPUID_FUNC 0x8000000a
#define MSR_EFER_SVME_MASK (1ULL << 12)
+#define MSR_VM_CR 0xc0010114
#define MSR_VM_HSAVE_PA 0xc0010117ULL
+#define SVM_VM_CR_SVM_DISABLE 4
+
#define SVM_SELECTOR_S_SHIFT 4
#define SVM_SELECTOR_DPL_SHIFT 5
#define SVM_SELECTOR_P_SHIFT 7
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index c1ac106ace8c..80628f69916d 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -17,28 +17,35 @@
#include "kvm.h"
#include "vmx.h"
+#include "segment_descriptor.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/profile.h>
#include <linux/sched.h>
+
#include <asm/io.h>
#include <asm/desc.h>
-#include "segment_descriptor.h"
-
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static int init_rmode_tss(struct kvm *kvm);
+
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+static struct page *vmx_io_bitmap_a;
+static struct page *vmx_io_bitmap_b;
+
#ifdef CONFIG_X86_64
#define HOST_IS_64 1
#else
#define HOST_IS_64 0
#endif
+#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
static struct vmcs_descriptor {
int size;
@@ -82,18 +89,17 @@ static const u32 vmx_msr_index[] = {
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
+static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
+{
+ return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
+}
+
+static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
+{
+ int efer_offset = vcpu->msr_offset_efer;
+ return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
+ msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
+}
static inline int is_page_fault(u32 intr_info)
{
@@ -115,13 +121,23 @@ static inline int is_external_interrupt(u32 intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
{
int i;
for (i = 0; i < vcpu->nmsrs; ++i)
if (vcpu->guest_msrs[i].index == msr)
- return &vcpu->guest_msrs[i];
+ return i;
+ return -1;
+}
+
+static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+{
+ int i;
+
+ i = __find_msr_index(vcpu, msr);
+ if (i >= 0)
+ return &vcpu->guest_msrs[i];
return NULL;
}
@@ -147,6 +163,7 @@ static void __vcpu_clear(void *arg)
vmcs_clear(vcpu->vmcs);
if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
+ rdtscll(vcpu->host_tsc);
}
static void vcpu_clear(struct kvm_vcpu *vcpu)
@@ -234,6 +251,127 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
vmcs_writel(field, vmcs_readl(field) | mask);
}
+static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+ u32 eb;
+
+ eb = 1u << PF_VECTOR;
+ if (!vcpu->fpu_active)
+ eb |= 1u << NM_VECTOR;
+ if (vcpu->guest_debug.enabled)
+ eb |= 1u << 1;
+ if (vcpu->rmode.active)
+ eb = ~0;
+ vmcs_write32(EXCEPTION_BITMAP, eb);
+}
+
+static void reload_tss(void)
+{
+#ifndef CONFIG_X86_64
+
+ /*
+ * VT restores TR but not its size. Useless.
+ */
+ struct descriptor_table gdt;
+ struct segment_descriptor *descs;
+
+ get_gdt(&gdt);
+ descs = (void *)gdt.base;
+ descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
+ load_TR_desc();
+#endif
+}
+
+static void load_transition_efer(struct kvm_vcpu *vcpu)
+{
+ u64 trans_efer;
+ int efer_offset = vcpu->msr_offset_efer;
+
+ trans_efer = vcpu->host_msrs[efer_offset].data;
+ trans_efer &= ~EFER_SAVE_RESTORE_BITS;
+ trans_efer |= msr_efer_save_restore_bits(
+ vcpu->guest_msrs[efer_offset]);
+ wrmsrl(MSR_EFER, trans_efer);
+ vcpu->stat.efer_reload++;
+}
+
+static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmx_host_state *hs = &vcpu->vmx_host_state;
+
+ if (hs->loaded)
+ return;
+
+ hs->loaded = 1;
+ /*
+ * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
+ * allow segment selectors with cpl > 0 or ti == 1.
+ */
+ hs->ldt_sel = read_ldt();
+ hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
+ hs->fs_sel = read_fs();
+ if (!(hs->fs_sel & 7))
+ vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
+ else {
+ vmcs_write16(HOST_FS_SELECTOR, 0);
+ hs->fs_gs_ldt_reload_needed = 1;
+ }
+ hs->gs_sel = read_gs();
+ if (!(hs->gs_sel & 7))
+ vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
+ else {
+ vmcs_write16(HOST_GS_SELECTOR, 0);
+ hs->fs_gs_ldt_reload_needed = 1;
+ }
+
+#ifdef CONFIG_X86_64
+ vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
+ vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
+#else
+ vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
+ vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
+#endif
+
+#ifdef CONFIG_X86_64
+ if (is_long_mode(vcpu)) {
+ save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
+ }
+#endif
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_transition_efer(vcpu);
+}
+
+static void vmx_load_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmx_host_state *hs = &vcpu->vmx_host_state;
+
+ if (!hs->loaded)
+ return;
+
+ hs->loaded = 0;
+ if (hs->fs_gs_ldt_reload_needed) {
+ load_ldt(hs->ldt_sel);
+ load_fs(hs->fs_sel);
+ /*
+ * If we have to reload gs, we must take care to
+ * preserve our gs base.
+ */
+ local_irq_disable();
+ load_gs(hs->gs_sel);
+#ifdef CONFIG_X86_64
+ wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+#endif
+ local_irq_enable();
+
+ reload_tss();
+ }
+ save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
+ load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
+ if (msr_efer_need_save_restore(vcpu))
+ load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
+}
+
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
@@ -242,6 +380,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
{
u64 phys_addr = __pa(vcpu->vmcs);
int cpu;
+ u64 tsc_this, delta;
cpu = get_cpu();
@@ -275,15 +414,43 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ /*
+ * Make sure the time stamp counter is monotonous.
+ */
+ rdtscll(tsc_this);
+ delta = vcpu->host_tsc - tsc_this;
+ vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
}
}
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
+ vmx_load_host_state(vcpu);
kvm_put_guest_fpu(vcpu);
put_cpu();
}
+static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 1;
+ vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ if (vcpu->cr0 & CR0_TS_MASK)
+ vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+ update_exception_bitmap(vcpu);
+}
+
+static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->fpu_active)
+ return;
+ vcpu->fpu_active = 0;
+ vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
+ update_exception_bitmap(vcpu);
+}
+
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
vcpu_clear(vcpu);
@@ -332,41 +499,61 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
}
/*
+ * Swap MSR entry in host/guest MSR entry array.
+ */
+void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
+{
+ struct vmx_msr_entry tmp;
+ tmp = vcpu->guest_msrs[to];
+ vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
+ vcpu->guest_msrs[from] = tmp;
+ tmp = vcpu->host_msrs[to];
+ vcpu->host_msrs[to] = vcpu->host_msrs[from];
+ vcpu->host_msrs[from] = tmp;
+}
+
+/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive.
*/
static void setup_msrs(struct kvm_vcpu *vcpu)
{
- int nr_skip, nr_good_msrs;
-
- if (is_long_mode(vcpu))
- nr_skip = NR_BAD_MSRS;
- else
- nr_skip = NR_64BIT_MSRS;
- nr_good_msrs = vcpu->nmsrs - nr_skip;
+ int save_nmsrs;
- /*
- * MSR_K6_STAR is only needed on long mode guests, and only
- * if efer.sce is enabled.
- */
- if (find_msr_entry(vcpu, MSR_K6_STAR)) {
- --nr_good_msrs;
+ save_nmsrs = 0;
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE))
- ++nr_good_msrs;
-#endif
+ if (is_long_mode(vcpu)) {
+ int index;
+
+ index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_LSTAR);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_CSTAR);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ if (index >= 0)
+ move_msr_up(vcpu, index, save_nmsrs++);
+ /*
+ * MSR_K6_STAR is only needed on long mode guests, and only
+ * if efer.sce is enabled.
+ */
+ index = __find_msr_index(vcpu, MSR_K6_STAR);
+ if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
+ move_msr_up(vcpu, index, save_nmsrs++);
}
+#endif
+ vcpu->save_nmsrs = save_nmsrs;
- vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
- vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->host_msrs + nr_skip));
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
+#ifdef CONFIG_X86_64
+ vcpu->msr_offset_kernel_gs_base =
+ __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+#endif
+ vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
}
/*
@@ -394,23 +581,6 @@ static void guest_write_tsc(u64 guest_tsc)
vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
}
-static void reload_tss(void)
-{
-#ifndef CONFIG_X86_64
-
- /*
- * VT restores TR but not its size. Useless.
- */
- struct descriptor_table gdt;
- struct segment_descriptor *descs;
-
- get_gdt(&gdt);
- descs = (void *)gdt.base;
- descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
- load_TR_desc();
-#endif
-}
-
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -470,10 +640,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
{
struct vmx_msr_entry *msr;
+ int ret = 0;
+
switch (msr_index) {
#ifdef CONFIG_X86_64
case MSR_EFER:
- return kvm_set_msr_common(vcpu, msr_index, data);
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ if (vcpu->vmx_host_state.loaded)
+ load_transition_efer(vcpu);
+ break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
break;
@@ -497,14 +672,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
msr = find_msr_entry(vcpu, msr_index);
if (msr) {
msr->data = data;
+ if (vcpu->vmx_host_state.loaded)
+ load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
break;
}
- return kvm_set_msr_common(vcpu, msr_index, data);
- msr->data = data;
- break;
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
}
- return 0;
+ return ret;
}
/*
@@ -530,10 +705,8 @@ static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
{
unsigned long dr7 = 0x400;
- u32 exception_bitmap;
int old_singlestep;
- exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
old_singlestep = vcpu->guest_debug.singlestep;
vcpu->guest_debug.enabled = dbg->enabled;
@@ -549,13 +722,9 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
dr7 |= 0 << (i*4+16); /* execution breakpoint */
}
- exception_bitmap |= (1u << 1); /* Trap debug exceptions */
-
vcpu->guest_debug.singlestep = dbg->singlestep;
- } else {
- exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
+ } else
vcpu->guest_debug.singlestep = 0;
- }
if (old_singlestep && !vcpu->guest_debug.singlestep) {
unsigned long flags;
@@ -565,7 +734,7 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
vmcs_writel(GUEST_RFLAGS, flags);
}
- vmcs_write32(EXCEPTION_BITMAP, exception_bitmap);
+ update_exception_bitmap(vcpu);
vmcs_writel(GUEST_DR7, dr7);
return 0;
@@ -679,14 +848,6 @@ static __exit void hardware_unsetup(void)
free_kvm_area();
}
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
-{
- if (vcpu->rmode.active)
- vmcs_write32(EXCEPTION_BITMAP, ~0);
- else
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
-}
-
static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -793,6 +954,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+
+ init_rmode_tss(vcpu->kvm);
}
#ifdef CONFIG_X86_64
@@ -837,6 +1000,8 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
+ vmx_fpu_deactivate(vcpu);
+
if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
enter_pmode(vcpu);
@@ -852,26 +1017,20 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
#endif
- if (!(cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
- }
-
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0,
(cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
vcpu->cr0 = cr0;
+
+ if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
+ vmx_fpu_activate(vcpu);
}
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
vmcs_writel(GUEST_CR3, cr3);
-
- if (!(vcpu->cr0 & CR0_TS_MASK)) {
- vcpu->fpu_active = 0;
- vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
- vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- }
+ if (vcpu->cr0 & CR0_PE_MASK)
+ vmx_fpu_deactivate(vcpu);
}
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -937,23 +1096,11 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
var->unusable = (ar >> 16) & 1;
}
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
+static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
- struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
u32 ar;
- vmcs_writel(sf->base, var->base);
- vmcs_write32(sf->limit, var->limit);
- vmcs_write16(sf->selector, var->selector);
- if (vcpu->rmode.active && var->s) {
- /*
- * Hack real-mode segments into vm86 compatibility.
- */
- if (var->base == 0xffff0000 && var->selector == 0xf000)
- vmcs_writel(sf->base, 0xf0000);
- ar = 0xf3;
- } else if (var->unusable)
+ if (var->unusable)
ar = 1 << 16;
else {
ar = var->type & 15;
@@ -967,6 +1114,35 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
}
if (ar == 0) /* a 0 value means unusable */
ar = AR_UNUSABLE_MASK;
+
+ return ar;
+}
+
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg)
+{
+ struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ u32 ar;
+
+ if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
+ vcpu->rmode.tr.selector = var->selector;
+ vcpu->rmode.tr.base = var->base;
+ vcpu->rmode.tr.limit = var->limit;
+ vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
+ return;
+ }
+ vmcs_writel(sf->base, var->base);
+ vmcs_write32(sf->limit, var->limit);
+ vmcs_write16(sf->selector, var->selector);
+ if (vcpu->rmode.active && var->s) {
+ /*
+ * Hack real-mode segments into vm86 compatibility.
+ */
+ if (var->base == 0xffff0000 && var->selector == 0xf000)
+ vmcs_writel(sf->base, 0xf0000);
+ ar = 0xf3;
+ } else
+ ar = vmx_segment_access_rights(var);
vmcs_write32(sf->ar_bytes, ar);
}
@@ -1018,16 +1194,16 @@ static int init_rmode_tss(struct kvm* kvm)
}
page = kmap_atomic(p1, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p2, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
kunmap_atomic(page, KM_USER0);
page = kmap_atomic(p3, KM_USER0);
- memset(page, 0, PAGE_SIZE);
+ clear_page(page);
*(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
kunmap_atomic(page, KM_USER0);
@@ -1066,7 +1242,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
struct descriptor_table dt;
int i;
int ret = 0;
- extern asmlinkage void kvm_vmx_return(void);
+ unsigned long kvm_vmx_return;
if (!init_rmode_tss(vcpu->kvm)) {
ret = -ENOMEM;
@@ -1076,9 +1252,9 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
memset(vcpu->regs, 0, sizeof(vcpu->regs));
vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 |
- /*for vcpu 0*/ MSR_IA32_APICBASE_BSP |
- MSR_IA32_APICBASE_ENABLE;
+ vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vcpu == &vcpu->kvm->vcpus[0])
+ vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
fx_init(vcpu);
@@ -1129,8 +1305,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
/* I/O */
- vmcs_write64(IO_BITMAP_A, 0);
- vmcs_write64(IO_BITMAP_B, 0);
+ vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
+ vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
guest_write_tsc(0);
@@ -1150,12 +1326,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
CPU_BASED_HLT_EXITING /* 20.6.2 */
| CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
| CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
- | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
+ | CPU_BASED_ACTIVATE_IO_BITMAP /* 20.6.2 */
| CPU_BASED_MOV_DR_EXITING
| CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
);
- vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
@@ -1185,8 +1360,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
get_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
-
- vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */
+ asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
+ vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -1210,10 +1388,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->host_msrs[j].reserved = 0;
vcpu->host_msrs[j].data = data;
vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
- if (index == MSR_KERNEL_GS_BASE)
- msr_offset_kernel_gs_base = j;
-#endif
++vcpu->nmsrs;
}
@@ -1241,6 +1415,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
vmx_set_efer(vcpu, 0);
#endif
+ vmx_fpu_activate(vcpu);
+ update_exception_bitmap(vcpu);
return 0;
@@ -1365,7 +1541,11 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
if (!vcpu->rmode.active)
return 0;
- if (vec == GP_VECTOR && err_code == 0)
+ /*
+ * Instruction with address size override prefix opcode 0x67
+ * Cause the #SS fault with 0 error code in VM86 mode.
+ */
+ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
return 1;
return 0;
@@ -1400,10 +1580,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
if (is_no_device(intr_info)) {
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- if (!(vcpu->cr0 & CR0_TS_MASK))
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_activate(vcpu);
return 1;
}
@@ -1445,8 +1622,13 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->rmode.active &&
handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
- error_code))
+ error_code)) {
+ if (vcpu->halt_request) {
+ vcpu->halt_request = 0;
+ return kvm_emulate_halt(vcpu);
+ }
return 1;
+ }
if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -1595,11 +1777,10 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
break;
case 2: /* clts */
vcpu_load_rsp_rip(vcpu);
- vcpu->fpu_active = 1;
- vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
- vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
+ vmx_fpu_deactivate(vcpu);
vcpu->cr0 &= ~CR0_TS_MASK;
vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
+ vmx_fpu_activate(vcpu);
skip_emulated_instruction(vcpu);
return 1;
case 1: /*mov from cr*/
@@ -1734,12 +1915,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
skip_emulated_instruction(vcpu);
- if (vcpu->irq_summary)
- return 1;
-
- kvm_run->exit_reason = KVM_EXIT_HLT;
- ++vcpu->stat.halt_exits;
- return 0;
+ return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1770,7 +1946,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
};
static const int kvm_vmx_max_exit_handlers =
- sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers);
+ ARRAY_SIZE(kvm_vmx_exit_handlers);
/*
* The guest has exited. See if we can fix it or if we need userspace
@@ -1810,61 +1986,44 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
(vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+}
+
static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
u8 fail;
- u16 fs_sel, gs_sel, ldt_sel;
- int fs_gs_ldt_reload_needed;
int r;
-again:
- /*
- * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
- * allow segment selectors with cpl > 0 or ti == 1.
- */
- fs_sel = read_fs();
- gs_sel = read_gs();
- ldt_sel = read_ldt();
- fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
- if (!fs_gs_ldt_reload_needed) {
- vmcs_write16(HOST_FS_SELECTOR, fs_sel);
- vmcs_write16(HOST_GS_SELECTOR, gs_sel);
- } else {
- vmcs_write16(HOST_FS_SELECTOR, 0);
- vmcs_write16(HOST_GS_SELECTOR, 0);
- }
-
-#ifdef CONFIG_X86_64
- vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
- vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
-#else
- vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
- vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
-#endif
+preempted:
+ if (vcpu->guest_debug.enabled)
+ kvm_guest_debug_pre(vcpu);
+again:
if (!vcpu->mmio_read_completed)
do_interrupt_requests(vcpu, kvm_run);
- if (vcpu->guest_debug.enabled)
- kvm_guest_debug_pre(vcpu);
-
+ vmx_save_host_state(vcpu);
kvm_load_guest_fpu(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r))
+ goto out;
+
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- }
-#endif
+ local_irq_disable();
+
+ vcpu->guest_mode = 1;
+ if (vcpu->requests)
+ if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
+ vmx_flush_tlb(vcpu);
asm (
/* Store host registers */
- "pushf \n\t"
#ifdef CONFIG_X86_64
"push %%rax; push %%rbx; push %%rdx;"
"push %%rsi; push %%rdi; push %%rbp;"
@@ -1909,12 +2068,11 @@ again:
"mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
#endif
/* Enter guest mode */
- "jne launched \n\t"
+ "jne .Llaunched \n\t"
ASM_VMX_VMLAUNCH "\n\t"
- "jmp kvm_vmx_return \n\t"
- "launched: " ASM_VMX_VMRESUME "\n\t"
- ".globl kvm_vmx_return \n\t"
- "kvm_vmx_return: "
+ "jmp .Lkvm_vmx_return \n\t"
+ ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+ ".Lkvm_vmx_return: "
/* Save guest registers, load host registers, keep flags */
#ifdef CONFIG_X86_64
"xchg %3, (%%rsp) \n\t"
@@ -1957,7 +2115,6 @@ again:
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- "popf \n\t"
: "=q" (fail)
: "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
@@ -1981,84 +2138,61 @@ again:
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
- /*
- * Reload segment selectors ASAP. (it's needed for a functional
- * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
- * relies on having 0 in %gs for the CPU PDA to work.)
- */
- if (fs_gs_ldt_reload_needed) {
- load_ldt(ldt_sel);
- load_fs(fs_sel);
- /*
- * If we have to reload gs, we must take care to
- * preserve our gs base.
- */
- local_irq_disable();
- load_gs(gs_sel);
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
-#endif
- local_irq_enable();
+ vcpu->guest_mode = 0;
+ local_irq_enable();
- reload_tss();
- }
++vcpu->stat.exits;
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
- }
-#endif
-
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
- if (fail) {
+ if (unlikely(fail)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= vmcs_read32(VM_INSTRUCTION_ERROR);
r = 0;
- } else {
- /*
- * Profile KVM exit RIPs:
- */
- if (unlikely(prof_on == KVM_PROFILING))
- profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
- vcpu->launched = 1;
- r = kvm_handle_exit(kvm_run, vcpu);
- if (r > 0) {
- /* Give scheduler a change to reschedule. */
- if (signal_pending(current)) {
- ++vcpu->stat.signal_exits;
- post_kvm_run_save(vcpu, kvm_run);
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
- ++vcpu->stat.request_irq_exits;
- post_kvm_run_save(vcpu, kvm_run);
- kvm_run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
- kvm_resched(vcpu);
+ goto out;
+ }
+ /*
+ * Profile KVM exit RIPs:
+ */
+ if (unlikely(prof_on == KVM_PROFILING))
+ profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+ vcpu->launched = 1;
+ r = kvm_handle_exit(kvm_run, vcpu);
+ if (r > 0) {
+ /* Give scheduler a change to reschedule. */
+ if (signal_pending(current)) {
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.signal_exits;
+ goto out;
+ }
+
+ if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+ r = -EINTR;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.request_irq_exits;
+ goto out;
+ }
+ if (!need_resched()) {
+ ++vcpu->stat.light_exits;
goto again;
}
}
+out:
+ if (r > 0) {
+ kvm_resched(vcpu);
+ goto preempted;
+ }
+
post_kvm_run_save(vcpu, kvm_run);
return r;
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
- vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
-}
-
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
unsigned long addr,
u32 err_code)
@@ -2122,7 +2256,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmcs_clear(vmcs);
vcpu->vmcs = vmcs;
vcpu->launched = 0;
- vcpu->fpu_active = 1;
return 0;
@@ -2188,11 +2321,50 @@ static struct kvm_arch_ops vmx_arch_ops = {
static int __init vmx_init(void)
{
- return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ void *iova;
+ int r;
+
+ vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_io_bitmap_a)
+ return -ENOMEM;
+
+ vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!vmx_io_bitmap_b) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Allow direct access to the PC debug port (it is often used for I/O
+ * delays, but the vmexits simply slow things down).
+ */
+ iova = kmap(vmx_io_bitmap_a);
+ memset(iova, 0xff, PAGE_SIZE);
+ clear_bit(0x80, iova);
+ kunmap(vmx_io_bitmap_a);
+
+ iova = kmap(vmx_io_bitmap_b);
+ memset(iova, 0xff, PAGE_SIZE);
+ kunmap(vmx_io_bitmap_b);
+
+ r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ if (r)
+ goto out1;
+
+ return 0;
+
+out1:
+ __free_page(vmx_io_bitmap_b);
+out:
+ __free_page(vmx_io_bitmap_a);
+ return r;
}
static void __exit vmx_exit(void)
{
+ __free_page(vmx_io_bitmap_b);
+ __free_page(vmx_io_bitmap_a);
+
kvm_exit_arch();
}
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7ade09086aa5..1b800fc00342 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -98,8 +98,11 @@ static u8 opcode_table[256] = {
0, 0, 0, 0,
/* 0x40 - 0x4F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x50 - 0x5F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x50 - 0x57 */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x58 - 0x5F */
+ ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
+ ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
/* 0x60 - 0x6F */
0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -128,9 +131,9 @@ static u8 opcode_table[256] = {
/* 0xB0 - 0xBF */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 0xC0 - 0xC7 */
- ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0,
- 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov,
- DstMem | SrcImm | ModRM | Mov,
+ ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+ 0, ImplicitOps, 0, 0,
+ ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
/* 0xC8 - 0xCF */
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xD0 - 0xD7 */
@@ -143,7 +146,8 @@ static u8 opcode_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
- 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+ ImplicitOps, 0,
+ ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
/* 0xF8 - 0xFF */
0, 0, 0, 0,
0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
@@ -152,14 +156,14 @@ static u8 opcode_table[256] = {
static u16 twobyte_table[256] = {
/* 0x00 - 0x0F */
0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
- 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
+ 0, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
/* 0x10 - 0x1F */
0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
/* 0x20 - 0x2F */
ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x30 - 0x3F */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ImplicitOps, 0, ImplicitOps, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 0x40 - 0x47 */
DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -481,6 +485,8 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
int mode = ctxt->mode;
unsigned long modrm_ea;
int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
+ int no_wb = 0;
+ u64 msr_data;
/* Shadow copy of register state. Committed on successful emulation. */
unsigned long _regs[NR_VCPU_REGS];
@@ -1047,7 +1053,7 @@ done_prefixes:
_regs[VCPU_REGS_RSP]),
&dst.val, dst.bytes, ctxt)) != 0)
goto done;
- dst.val = dst.orig_val; /* skanky: disable writeback */
+ no_wb = 1;
break;
default:
goto cannot_emulate;
@@ -1056,7 +1062,7 @@ done_prefixes:
}
writeback:
- if ((d & Mov) || (dst.orig_val != dst.val)) {
+ if (!no_wb) {
switch (dst.type) {
case OP_REG:
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -1149,6 +1155,23 @@ special_insn:
case 0xae ... 0xaf: /* scas */
DPRINTF("Urk! I don't handle SCAS.\n");
goto cannot_emulate;
+ case 0xf4: /* hlt */
+ ctxt->vcpu->halt_request = 1;
+ goto done;
+ case 0xc3: /* ret */
+ dst.ptr = &_eip;
+ goto pop_instruction;
+ case 0x58 ... 0x5f: /* pop reg */
+ dst.ptr = (unsigned long *)&_regs[b & 0x7];
+
+pop_instruction:
+ if ((rc = ops->read_std(register_address(ctxt->ss_base,
+ _regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt)) != 0)
+ goto done;
+
+ register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
+ no_wb = 1; /* Disable writeback. */
+ break;
}
goto writeback;
@@ -1302,8 +1325,10 @@ twobyte_insn:
twobyte_special_insn:
/* Disable writeback. */
- dst.orig_val = dst.val;
+ no_wb = 1;
switch (b) {
+ case 0x09: /* wbinvd */
+ break;
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
break;
@@ -1320,6 +1345,29 @@ twobyte_special_insn:
goto cannot_emulate;
realmode_set_cr(ctxt->vcpu, modrm_reg, modrm_val, &_eflags);
break;
+ case 0x30:
+ /* wrmsr */
+ msr_data = (u32)_regs[VCPU_REGS_RAX]
+ | ((u64)_regs[VCPU_REGS_RDX] << 32);
+ rc = kvm_set_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], msr_data);
+ if (rc) {
+ kvm_arch_ops->inject_gp(ctxt->vcpu, 0);
+ _eip = ctxt->vcpu->rip;
+ }
+ rc = X86EMUL_CONTINUE;
+ break;
+ case 0x32:
+ /* rdmsr */
+ rc = kvm_get_msr(ctxt->vcpu, _regs[VCPU_REGS_RCX], &msr_data);
+ if (rc) {
+ kvm_arch_ops->inject_gp(ctxt->vcpu, 0);
+ _eip = ctxt->vcpu->rip;
+ } else {
+ _regs[VCPU_REGS_RAX] = (u32)msr_data;
+ _regs[VCPU_REGS_RDX] = msr_data >> 32;
+ }
+ rc = X86EMUL_CONTINUE;
+ break;
case 0xc7: /* Grp9 (cmpxchg8b) */
{
u64 old, new;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 87d2046f866c..4468cb3a8d24 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,9 +1,6 @@
-
-menu "LED devices"
- depends on HAS_IOMEM
-
-config NEW_LEDS
+menuconfig NEW_LEDS
bool "LED Support"
+ depends on HAS_IOMEM
help
Say Y to enable Linux LED support. This allows control of supported
LEDs from both userspace and optionally, by kernel events (triggers).
@@ -11,9 +8,10 @@ config NEW_LEDS
This is not related to standard keyboard LEDs which are controlled
via the input system.
+if NEW_LEDS
+
config LEDS_CLASS
tristate "LED Class Support"
- depends on NEW_LEDS
help
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
@@ -95,11 +93,18 @@ config LEDS_COBALT
help
This option enables support for the front LED on Cobalt Server
+config LEDS_GPIO
+ tristate "LED Support for GPIO connected LEDs"
+ depends on LEDS_CLASS && GENERIC_GPIO
+ help
+ This option enables support for the LEDs connected to GPIO
+ outputs. To be useful the particular board must have LEDs
+ and they must be connected to the GPIO lines.
+
comment "LED Triggers"
config LEDS_TRIGGERS
bool "LED Trigger support"
- depends on NEW_LEDS
help
This option enables trigger support for the leds class.
These triggers allow kernel events to drive the LEDs and can
@@ -128,5 +133,4 @@ config LEDS_TRIGGER_HEARTBEAT
load average.
If unsure, say Y.
-endmenu
-
+endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aa2c18efa5b2..f8995c9bc2ea 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
obj-$(CONFIG_LEDS_COBALT) += leds-cobalt.o
+obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 3c1711210e38..4211293ce862 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -2,7 +2,7 @@
* LED Class Core
*
* Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
- * Copyright (C) 2005-2006 Richard Purdie <rpurdie@openedhand.com>
+ * Copyright (C) 2005-2007 Richard Purdie <rpurdie@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -24,9 +24,10 @@
static struct class *leds_class;
-static ssize_t led_brightness_show(struct class_device *dev, char *buf)
+static ssize_t led_brightness_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
ssize_t ret = 0;
/* no lock needed for this */
@@ -36,10 +37,10 @@ static ssize_t led_brightness_show(struct class_device *dev, char *buf)
return ret;
}
-static ssize_t led_brightness_store(struct class_device *dev,
- const char *buf, size_t size)
+static ssize_t led_brightness_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
ssize_t ret = -EINVAL;
char *after;
unsigned long state = simple_strtoul(buf, &after, 10);
@@ -56,10 +57,9 @@ static ssize_t led_brightness_store(struct class_device *dev,
return ret;
}
-static CLASS_DEVICE_ATTR(brightness, 0644, led_brightness_show,
- led_brightness_store);
+static DEVICE_ATTR(brightness, 0644, led_brightness_show, led_brightness_store);
#ifdef CONFIG_LEDS_TRIGGERS
-static CLASS_DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
+static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
#endif
/**
@@ -93,16 +93,15 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
int rc;
- led_cdev->class_dev = class_device_create(leds_class, NULL, 0,
- parent, "%s", led_cdev->name);
- if (unlikely(IS_ERR(led_cdev->class_dev)))
- return PTR_ERR(led_cdev->class_dev);
+ led_cdev->dev = device_create(leds_class, parent, 0, "%s",
+ led_cdev->name);
+ if (unlikely(IS_ERR(led_cdev->dev)))
+ return PTR_ERR(led_cdev->dev);
- class_set_devdata(led_cdev->class_dev, led_cdev);
+ dev_set_drvdata(led_cdev->dev, led_cdev);
/* register the attributes */
- rc = class_device_create_file(led_cdev->class_dev,
- &class_device_attr_brightness);
+ rc = device_create_file(led_cdev->dev, &dev_attr_brightness);
if (rc)
goto err_out;
@@ -114,8 +113,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
#ifdef CONFIG_LEDS_TRIGGERS
rwlock_init(&led_cdev->trigger_lock);
- rc = class_device_create_file(led_cdev->class_dev,
- &class_device_attr_trigger);
+ rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
if (rc)
goto err_out_led_list;
@@ -123,18 +121,17 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
#endif
printk(KERN_INFO "Registered led device: %s\n",
- led_cdev->class_dev->class_id);
+ led_cdev->name);
return 0;
#ifdef CONFIG_LEDS_TRIGGERS
err_out_led_list:
- class_device_remove_file(led_cdev->class_dev,
- &class_device_attr_brightness);
+ device_remove_file(led_cdev->dev, &dev_attr_brightness);
list_del(&led_cdev->node);
#endif
err_out:
- class_device_unregister(led_cdev->class_dev);
+ device_unregister(led_cdev->dev);
return rc;
}
EXPORT_SYMBOL_GPL(led_classdev_register);
@@ -147,18 +144,16 @@ EXPORT_SYMBOL_GPL(led_classdev_register);
*/
void led_classdev_unregister(struct led_classdev *led_cdev)
{
- class_device_remove_file(led_cdev->class_dev,
- &class_device_attr_brightness);
+ device_remove_file(led_cdev->dev, &dev_attr_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
- class_device_remove_file(led_cdev->class_dev,
- &class_device_attr_trigger);
+ device_remove_file(led_cdev->dev, &dev_attr_trigger);
write_lock(&led_cdev->trigger_lock);
if (led_cdev->trigger)
led_trigger_set(led_cdev, NULL);
write_unlock(&led_cdev->trigger_lock);
#endif
- class_device_unregister(led_cdev->class_dev);
+ device_unregister(led_cdev->dev);
write_lock(&leds_list_lock);
list_del(&led_cdev->node);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 454fb0901f82..575368c2b100 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -1,7 +1,7 @@
/*
* LED Triggers Core
*
- * Copyright 2005-2006 Openedhand Ltd.
+ * Copyright 2005-2007 Openedhand Ltd.
*
* Author: Richard Purdie <rpurdie@openedhand.com>
*
@@ -28,10 +28,10 @@
static DEFINE_RWLOCK(triggers_list_lock);
static LIST_HEAD(trigger_list);
-ssize_t led_trigger_store(struct class_device *dev, const char *buf,
- size_t count)
+ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
char trigger_name[TRIG_NAME_MAX];
struct led_trigger *trig;
size_t len;
@@ -67,9 +67,10 @@ ssize_t led_trigger_store(struct class_device *dev, const char *buf,
}
-ssize_t led_trigger_show(struct class_device *dev, char *buf)
+ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
int len = 0;
@@ -183,13 +184,20 @@ int led_trigger_register(struct led_trigger *trigger)
void led_trigger_register_simple(const char *name, struct led_trigger **tp)
{
struct led_trigger *trigger;
+ int err;
trigger = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (trigger) {
trigger->name = name;
- led_trigger_register(trigger);
- }
+ err = led_trigger_register(trigger);
+ if (err < 0)
+ printk(KERN_WARNING "LED trigger %s failed to register"
+ " (%d)\n", name, err);
+ } else
+ printk(KERN_WARNING "LED trigger %s failed to register"
+ " (no memory)\n", name);
+
*tp = trigger;
}
@@ -215,7 +223,8 @@ void led_trigger_unregister(struct led_trigger *trigger)
void led_trigger_unregister_simple(struct led_trigger *trigger)
{
- led_trigger_unregister(trigger);
+ if (trigger)
+ led_trigger_unregister(trigger);
kfree(trigger);
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
new file mode 100644
index 000000000000..47d90db280ce
--- /dev/null
+++ b/drivers/leds/leds-gpio.c
@@ -0,0 +1,199 @@
+/*
+ * LEDs driver for GPIOs
+ *
+ * Copyright (C) 2007 8D Technologies inc.
+ * Raphael Assenat <raph@8d.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+
+#include <asm/gpio.h>
+
+struct gpio_led_data {
+ struct led_classdev cdev;
+ unsigned gpio;
+ struct work_struct work;
+ u8 new_level;
+ u8 can_sleep;
+ u8 active_low;
+};
+
+static void gpio_led_work(struct work_struct *work)
+{
+ struct gpio_led_data *led_dat =
+ container_of(work, struct gpio_led_data, work);
+
+ gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
+}
+
+static void gpio_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct gpio_led_data *led_dat =
+ container_of(led_cdev, struct gpio_led_data, cdev);
+ int level;
+
+ if (value == LED_OFF)
+ level = 0;
+ else
+ level = 1;
+
+ if (led_dat->active_low)
+ level = !level;
+
+ /* setting GPIOs with I2C/etc requires a preemptible task context */
+ if (led_dat->can_sleep) {
+ if (preempt_count()) {
+ led_dat->new_level = level;
+ schedule_work(&led_dat->work);
+ } else
+ gpio_set_value_cansleep(led_dat->gpio, level);
+ } else
+ gpio_set_value(led_dat->gpio, level);
+}
+
+static int __init gpio_led_probe(struct platform_device *pdev)
+{
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led *cur_led;
+ struct gpio_led_data *leds_data, *led_dat;
+ int i, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ leds_data = kzalloc(sizeof(struct gpio_led_data) * pdata->num_leds,
+ GFP_KERNEL);
+ if (!leds_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ cur_led = &pdata->leds[i];
+ led_dat = &leds_data[i];
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->gpio = cur_led->gpio;
+ led_dat->can_sleep = gpio_cansleep(cur_led->gpio);
+ led_dat->active_low = cur_led->active_low;
+ led_dat->cdev.brightness_set = gpio_led_set;
+ led_dat->cdev.brightness = cur_led->active_low ? LED_FULL : LED_OFF;
+
+ ret = gpio_request(led_dat->gpio, led_dat->cdev.name);
+ if (ret < 0)
+ goto err;
+
+ gpio_direction_output(led_dat->gpio, led_dat->active_low);
+
+ ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
+ if (ret < 0) {
+ gpio_free(led_dat->gpio);
+ goto err;
+ }
+
+ INIT_WORK(&led_dat->work, gpio_led_work);
+ }
+
+ platform_set_drvdata(pdev, leds_data);
+
+ return 0;
+
+err:
+ if (i > 0) {
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&leds_data[i].cdev);
+ gpio_free(leds_data[i].gpio);
+ }
+ }
+
+ flush_scheduled_work();
+ kfree(leds_data);
+
+ return ret;
+}
+
+static int __exit gpio_led_remove(struct platform_device *pdev)
+{
+ int i;
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led_data *leds_data;
+
+ leds_data = platform_get_drvdata(pdev);
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&leds_data[i].cdev);
+ gpio_free(leds_data[i].gpio);
+ }
+
+ kfree(leds_data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int gpio_led_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led_data *leds_data;
+ int i;
+
+ leds_data = platform_get_drvdata(pdev);
+
+ for (i = 0; i < pdata->num_leds; i++)
+ led_classdev_suspend(&leds_data[i].cdev);
+
+ return 0;
+}
+
+static int gpio_led_resume(struct platform_device *pdev)
+{
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_led_data *leds_data;
+ int i;
+
+ leds_data = platform_get_drvdata(pdev);
+
+ for (i = 0; i < pdata->num_leds; i++)
+ led_classdev_resume(&leds_data[i].cdev);
+
+ return 0;
+}
+#else
+#define gpio_led_suspend NULL
+#define gpio_led_resume NULL
+#endif
+
+static struct platform_driver gpio_led_driver = {
+ .remove = __exit_p(gpio_led_remove),
+ .suspend = gpio_led_suspend,
+ .resume = gpio_led_resume,
+ .driver = {
+ .name = "leds-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_led_init(void)
+{
+ return platform_driver_probe(&gpio_led_driver, gpio_led_probe);
+}
+
+static void __exit gpio_led_exit(void)
+{
+ platform_driver_unregister(&gpio_led_driver);
+}
+
+module_init(gpio_led_init);
+module_exit(gpio_led_exit);
+
+MODULE_AUTHOR("Raphael Assenat <raph@8d.com>");
+MODULE_DESCRIPTION("GPIO LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 6f2d449ba983..bfac499f3258 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -19,7 +19,7 @@
static void locomoled_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value, int offset)
{
- struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->class_dev->dev);
+ struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->dev);
unsigned long flags;
local_irq_save(flags);
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index a715c4ed93ff..f2f3884fe063 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -13,6 +13,7 @@
#ifndef __LEDS_H_INCLUDED
#define __LEDS_H_INCLUDED
+#include <linux/device.h>
#include <linux/leds.h>
static inline void led_set_brightness(struct led_classdev *led_cdev,
@@ -37,8 +38,9 @@ void led_trigger_set(struct led_classdev *led_cdev,
#define led_trigger_set(x, y) do {} while(0)
#endif
-ssize_t led_trigger_store(struct class_device *dev, const char *buf,
- size_t count);
-ssize_t led_trigger_show(struct class_device *dev, char *buf);
+ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index d756bdb01c59..ed9ff02c77ea 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -52,9 +52,10 @@ static void led_timer_function(unsigned long data)
mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
}
-static ssize_t led_delay_on_show(struct class_device *dev, char *buf)
+static ssize_t led_delay_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct timer_trig_data *timer_data = led_cdev->trigger_data;
sprintf(buf, "%lu\n", timer_data->delay_on);
@@ -62,10 +63,10 @@ static ssize_t led_delay_on_show(struct class_device *dev, char *buf)
return strlen(buf) + 1;
}
-static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
- size_t size)
+static ssize_t led_delay_on_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct timer_trig_data *timer_data = led_cdev->trigger_data;
int ret = -EINVAL;
char *after;
@@ -84,9 +85,10 @@ static ssize_t led_delay_on_store(struct class_device *dev, const char *buf,
return ret;
}
-static ssize_t led_delay_off_show(struct class_device *dev, char *buf)
+static ssize_t led_delay_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct timer_trig_data *timer_data = led_cdev->trigger_data;
sprintf(buf, "%lu\n", timer_data->delay_off);
@@ -94,10 +96,10 @@ static ssize_t led_delay_off_show(struct class_device *dev, char *buf)
return strlen(buf) + 1;
}
-static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
- size_t size)
+static ssize_t led_delay_off_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
{
- struct led_classdev *led_cdev = class_get_devdata(dev);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct timer_trig_data *timer_data = led_cdev->trigger_data;
int ret = -EINVAL;
char *after;
@@ -116,10 +118,8 @@ static ssize_t led_delay_off_store(struct class_device *dev, const char *buf,
return ret;
}
-static CLASS_DEVICE_ATTR(delay_on, 0644, led_delay_on_show,
- led_delay_on_store);
-static CLASS_DEVICE_ATTR(delay_off, 0644, led_delay_off_show,
- led_delay_off_store);
+static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
+static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
static void timer_trig_activate(struct led_classdev *led_cdev)
{
@@ -136,18 +136,17 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
timer_data->timer.function = led_timer_function;
timer_data->timer.data = (unsigned long) led_cdev;
- rc = class_device_create_file(led_cdev->class_dev,
- &class_device_attr_delay_on);
- if (rc) goto err_out;
- rc = class_device_create_file(led_cdev->class_dev,
- &class_device_attr_delay_off);
- if (rc) goto err_out_delayon;
+ rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
+ if (rc)
+ goto err_out_delayon;
return;
err_out_delayon:
- class_device_remove_file(led_cdev->class_dev,
- &class_device_attr_delay_on);
+ device_remove_file(led_cdev->dev, &dev_attr_delay_on);
err_out:
led_cdev->trigger_data = NULL;
kfree(timer_data);
@@ -158,10 +157,8 @@ static void timer_trig_deactivate(struct led_classdev *led_cdev)
struct timer_trig_data *timer_data = led_cdev->trigger_data;
if (timer_data) {
- class_device_remove_file(led_cdev->class_dev,
- &class_device_attr_delay_on);
- class_device_remove_file(led_cdev->class_dev,
- &class_device_attr_delay_off);
+ device_remove_file(led_cdev->dev, &dev_attr_delay_on);
+ device_remove_file(led_cdev->dev, &dev_attr_delay_off);
del_timer_sync(&timer_data->timer);
kfree(timer_data);
}
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
new file mode 100644
index 000000000000..43d901fdc77f
--- /dev/null
+++ b/drivers/lguest/Kconfig
@@ -0,0 +1,20 @@
+config LGUEST
+ tristate "Linux hypervisor example code"
+ depends on X86 && PARAVIRT && NET && EXPERIMENTAL && !X86_PAE
+ select LGUEST_GUEST
+ select HVC_DRIVER
+ ---help---
+ This is a very simple module which allows you to run
+ multiple instances of the same Linux kernel, using the
+ "lguest" command found in the Documentation/lguest directory.
+ Note that "lguest" is pronounced to rhyme with "fell quest",
+ not "rustyvisor". See Documentation/lguest/lguest.txt.
+
+ If unsure, say N. If curious, say M. If masochistic, say Y.
+
+config LGUEST_GUEST
+ bool
+ help
+ The guest needs code built-in, even if the host has lguest
+ support as a module. The drivers are tiny, so we build them
+ in too.
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
new file mode 100644
index 000000000000..55382c7d799c
--- /dev/null
+++ b/drivers/lguest/Makefile
@@ -0,0 +1,7 @@
+# Guest requires the paravirt_ops replacement and the bus driver.
+obj-$(CONFIG_LGUEST_GUEST) += lguest.o lguest_asm.o lguest_bus.o
+
+# Host requires the other files, which can be a module.
+obj-$(CONFIG_LGUEST) += lg.o
+lg-y := core.o hypercalls.o page_tables.o interrupts_and_traps.o \
+ segments.o io.o lguest_user.o switcher.o
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
new file mode 100644
index 000000000000..ce909ec57499
--- /dev/null
+++ b/drivers/lguest/core.c
@@ -0,0 +1,462 @@
+/* World's simplest hypervisor, to test paravirt_ops and show
+ * unbelievers that virtualization is the future. Plus, it's fun! */
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/stddef.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/cpu.h>
+#include <linux/freezer.h>
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/poll.h>
+#include <asm/highmem.h>
+#include <asm/asm-offsets.h>
+#include <asm/i387.h>
+#include "lg.h"
+
+/* Found in switcher.S */
+extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
+extern unsigned long default_idt_entries[];
+
+/* Every guest maps the core switcher code. */
+#define SHARED_SWITCHER_PAGES \
+ DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
+/* Pages for switcher itself, then two pages per cpu */
+#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
+
+/* We map at -4M for ease of mapping into the guest (one PTE page). */
+#define SWITCHER_ADDR 0xFFC00000
+
+static struct vm_struct *switcher_vma;
+static struct page **switcher_page;
+
+static int cpu_had_pge;
+static struct {
+ unsigned long offset;
+ unsigned short segment;
+} lguest_entry;
+
+/* This One Big lock protects all inter-guest data structures. */
+DEFINE_MUTEX(lguest_lock);
+static DEFINE_PER_CPU(struct lguest *, last_guest);
+
+/* FIXME: Make dynamic. */
+#define MAX_LGUEST_GUESTS 16
+struct lguest lguests[MAX_LGUEST_GUESTS];
+
+/* Offset from where switcher.S was compiled to where we've copied it */
+static unsigned long switcher_offset(void)
+{
+ return SWITCHER_ADDR - (unsigned long)start_switcher_text;
+}
+
+/* This cpu's struct lguest_pages. */
+static struct lguest_pages *lguest_pages(unsigned int cpu)
+{
+ return &(((struct lguest_pages *)
+ (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
+}
+
+static __init int map_switcher(void)
+{
+ int i, err;
+ struct page **pagep;
+
+ switcher_page = kmalloc(sizeof(switcher_page[0])*TOTAL_SWITCHER_PAGES,
+ GFP_KERNEL);
+ if (!switcher_page) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ if (!addr) {
+ err = -ENOMEM;
+ goto free_some_pages;
+ }
+ switcher_page[i] = virt_to_page(addr);
+ }
+
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+ VM_ALLOC, SWITCHER_ADDR, VMALLOC_END);
+ if (!switcher_vma) {
+ err = -ENOMEM;
+ printk("lguest: could not map switcher pages high\n");
+ goto free_pages;
+ }
+
+ pagep = switcher_page;
+ err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep);
+ if (err) {
+ printk("lguest: map_vm_area failed: %i\n", err);
+ goto free_vma;
+ }
+ memcpy(switcher_vma->addr, start_switcher_text,
+ end_switcher_text - start_switcher_text);
+
+ /* Fix up IDT entries to point into copied text. */
+ for (i = 0; i < IDT_ENTRIES; i++)
+ default_idt_entries[i] += switcher_offset();
+
+ for_each_possible_cpu(i) {
+ struct lguest_pages *pages = lguest_pages(i);
+ struct lguest_ro_state *state = &pages->state;
+
+ /* These fields are static: rest done in copy_in_guest_info */
+ state->host_gdt_desc.size = GDT_SIZE-1;
+ state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
+ store_idt(&state->host_idt_desc);
+ state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
+ state->guest_idt_desc.address = (long)&state->guest_idt;
+ state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
+ state->guest_gdt_desc.address = (long)&state->guest_gdt;
+ state->guest_tss.esp0 = (long)(&pages->regs + 1);
+ state->guest_tss.ss0 = LGUEST_DS;
+ /* No I/O for you! */
+ state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
+ setup_default_gdt_entries(state);
+ setup_default_idt_entries(state, default_idt_entries);
+
+ /* Setup LGUEST segments on all cpus */
+ get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
+ get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
+ }
+
+ /* Initialize entry point into switcher. */
+ lguest_entry.offset = (long)switch_to_guest + switcher_offset();
+ lguest_entry.segment = LGUEST_CS;
+
+ printk(KERN_INFO "lguest: mapped switcher at %p\n",
+ switcher_vma->addr);
+ return 0;
+
+free_vma:
+ vunmap(switcher_vma->addr);
+free_pages:
+ i = TOTAL_SWITCHER_PAGES;
+free_some_pages:
+ for (--i; i >= 0; i--)
+ __free_pages(switcher_page[i], 0);
+ kfree(switcher_page);
+out:
+ return err;
+}
+
+static void unmap_switcher(void)
+{
+ unsigned int i;
+
+ vunmap(switcher_vma->addr);
+ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
+ __free_pages(switcher_page[i], 0);
+}
+
+/* IN/OUT insns: enough to get us past boot-time probing. */
+static int emulate_insn(struct lguest *lg)
+{
+ u8 insn;
+ unsigned int insnlen = 0, in = 0, shift = 0;
+ unsigned long physaddr = guest_pa(lg, lg->regs->eip);
+
+ /* This only works for addresses in linear mapping... */
+ if (lg->regs->eip < lg->page_offset)
+ return 0;
+ lgread(lg, &insn, physaddr, 1);
+
+ /* Operand size prefix means it's actually for ax. */
+ if (insn == 0x66) {
+ shift = 16;
+ insnlen = 1;
+ lgread(lg, &insn, physaddr + insnlen, 1);
+ }
+
+ switch (insn & 0xFE) {
+ case 0xE4: /* in <next byte>,%al */
+ insnlen += 2;
+ in = 1;
+ break;
+ case 0xEC: /* in (%dx),%al */
+ insnlen += 1;
+ in = 1;
+ break;
+ case 0xE6: /* out %al,<next byte> */
+ insnlen += 2;
+ break;
+ case 0xEE: /* out %al,(%dx) */
+ insnlen += 1;
+ break;
+ default:
+ return 0;
+ }
+
+ if (in) {
+ /* Lower bit tells is whether it's a 16 or 32 bit access */
+ if (insn & 0x1)
+ lg->regs->eax = 0xFFFFFFFF;
+ else
+ lg->regs->eax |= (0xFFFF << shift);
+ }
+ lg->regs->eip += insnlen;
+ return 1;
+}
+
+int lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len)
+{
+ return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
+}
+
+/* Just like get_user, but don't let guest access lguest binary. */
+u32 lgread_u32(struct lguest *lg, unsigned long addr)
+{
+ u32 val = 0;
+
+ /* Don't let them access lguest binary */
+ if (!lguest_address_ok(lg, addr, sizeof(val))
+ || get_user(val, (u32 __user *)addr) != 0)
+ kill_guest(lg, "bad read address %#lx", addr);
+ return val;
+}
+
+void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val)
+{
+ if (!lguest_address_ok(lg, addr, sizeof(val))
+ || put_user(val, (u32 __user *)addr) != 0)
+ kill_guest(lg, "bad write address %#lx", addr);
+}
+
+void lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
+{
+ if (!lguest_address_ok(lg, addr, bytes)
+ || copy_from_user(b, (void __user *)addr, bytes) != 0) {
+ /* copy_from_user should do this, but as we rely on it... */
+ memset(b, 0, bytes);
+ kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
+ }
+}
+
+void lgwrite(struct lguest *lg, unsigned long addr, const void *b,
+ unsigned bytes)
+{
+ if (!lguest_address_ok(lg, addr, bytes)
+ || copy_to_user((void __user *)addr, b, bytes) != 0)
+ kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
+}
+
+static void set_ts(void)
+{
+ u32 cr0;
+
+ cr0 = read_cr0();
+ if (!(cr0 & 8))
+ write_cr0(cr0|8);
+}
+
+static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
+{
+ if (__get_cpu_var(last_guest) != lg || lg->last_pages != pages) {
+ __get_cpu_var(last_guest) = lg;
+ lg->last_pages = pages;
+ lg->changed = CHANGED_ALL;
+ }
+
+ /* These are pretty cheap, so we do them unconditionally. */
+ pages->state.host_cr3 = __pa(current->mm->pgd);
+ map_switcher_in_guest(lg, pages);
+ pages->state.guest_tss.esp1 = lg->esp1;
+ pages->state.guest_tss.ss1 = lg->ss1;
+
+ /* Copy direct trap entries. */
+ if (lg->changed & CHANGED_IDT)
+ copy_traps(lg, pages->state.guest_idt, default_idt_entries);
+
+ /* Copy all GDT entries but the TSS. */
+ if (lg->changed & CHANGED_GDT)
+ copy_gdt(lg, pages->state.guest_gdt);
+ /* If only the TLS entries have changed, copy them. */
+ else if (lg->changed & CHANGED_GDT_TLS)
+ copy_gdt_tls(lg, pages->state.guest_gdt);
+
+ lg->changed = 0;
+}
+
+static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
+{
+ unsigned int clobber;
+
+ copy_in_guest_info(lg, pages);
+
+ /* Put eflags on stack, lcall does rest: suitable for iret return. */
+ asm volatile("pushf; lcall *lguest_entry"
+ : "=a"(clobber), "=b"(clobber)
+ : "0"(pages), "1"(__pa(lg->pgdirs[lg->pgdidx].pgdir))
+ : "memory", "%edx", "%ecx", "%edi", "%esi");
+}
+
+int run_guest(struct lguest *lg, unsigned long __user *user)
+{
+ while (!lg->dead) {
+ unsigned int cr2 = 0; /* Damn gcc */
+
+ /* Hypercalls first: we might have been out to userspace */
+ do_hypercalls(lg);
+ if (lg->dma_is_pending) {
+ if (put_user(lg->pending_dma, user) ||
+ put_user(lg->pending_key, user+1))
+ return -EFAULT;
+ return sizeof(unsigned long)*2;
+ }
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ /* If Waker set break_out, return to Launcher. */
+ if (lg->break_out)
+ return -EAGAIN;
+
+ maybe_do_interrupt(lg);
+
+ try_to_freeze();
+
+ if (lg->dead)
+ break;
+
+ if (lg->halted) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ continue;
+ }
+
+ local_irq_disable();
+
+ /* Even if *we* don't want FPU trap, guest might... */
+ if (lg->ts)
+ set_ts();
+
+ /* Don't let Guest do SYSENTER: we can't handle it. */
+ if (boot_cpu_has(X86_FEATURE_SEP))
+ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
+
+ run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
+
+ /* Save cr2 now if we page-faulted. */
+ if (lg->regs->trapnum == 14)
+ cr2 = read_cr2();
+ else if (lg->regs->trapnum == 7)
+ math_state_restore();
+
+ if (boot_cpu_has(X86_FEATURE_SEP))
+ wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+ local_irq_enable();
+
+ switch (lg->regs->trapnum) {
+ case 13: /* We've intercepted a GPF. */
+ if (lg->regs->errcode == 0) {
+ if (emulate_insn(lg))
+ continue;
+ }
+ break;
+ case 14: /* We've intercepted a page fault. */
+ if (demand_page(lg, cr2, lg->regs->errcode))
+ continue;
+
+ /* If lguest_data is NULL, this won't hurt. */
+ if (put_user(cr2, &lg->lguest_data->cr2))
+ kill_guest(lg, "Writing cr2");
+ break;
+ case 7: /* We've intercepted a Device Not Available fault. */
+ /* If they don't want to know, just absorb it. */
+ if (!lg->ts)
+ continue;
+ break;
+ case 32 ... 255: /* Real interrupt, fall thru */
+ cond_resched();
+ case LGUEST_TRAP_ENTRY: /* Handled at top of loop */
+ continue;
+ }
+
+ if (deliver_trap(lg, lg->regs->trapnum))
+ continue;
+
+ kill_guest(lg, "unhandled trap %li at %#lx (%#lx)",
+ lg->regs->trapnum, lg->regs->eip,
+ lg->regs->trapnum == 14 ? cr2 : lg->regs->errcode);
+ }
+ return -ENOENT;
+}
+
+int find_free_guest(void)
+{
+ unsigned int i;
+ for (i = 0; i < MAX_LGUEST_GUESTS; i++)
+ if (!lguests[i].tsk)
+ return i;
+ return -1;
+}
+
+static void adjust_pge(void *on)
+{
+ if (on)
+ write_cr4(read_cr4() | X86_CR4_PGE);
+ else
+ write_cr4(read_cr4() & ~X86_CR4_PGE);
+}
+
+static int __init init(void)
+{
+ int err;
+
+ if (paravirt_enabled()) {
+ printk("lguest is afraid of %s\n", paravirt_ops.name);
+ return -EPERM;
+ }
+
+ err = map_switcher();
+ if (err)
+ return err;
+
+ err = init_pagetables(switcher_page, SHARED_SWITCHER_PAGES);
+ if (err) {
+ unmap_switcher();
+ return err;
+ }
+ lguest_io_init();
+
+ err = lguest_device_init();
+ if (err) {
+ free_pagetables();
+ unmap_switcher();
+ return err;
+ }
+ lock_cpu_hotplug();
+ if (cpu_has_pge) { /* We have a broader idea of "global". */
+ cpu_had_pge = 1;
+ on_each_cpu(adjust_pge, (void *)0, 0, 1);
+ clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
+ }
+ unlock_cpu_hotplug();
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ lguest_device_remove();
+ free_pagetables();
+ unmap_switcher();
+ lock_cpu_hotplug();
+ if (cpu_had_pge) {
+ set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);
+ on_each_cpu(adjust_pge, (void *)1, 0, 1);
+ }
+ unlock_cpu_hotplug();
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
new file mode 100644
index 000000000000..ea52ca451f74
--- /dev/null
+++ b/drivers/lguest/hypercalls.c
@@ -0,0 +1,192 @@
+/* Actual hypercalls, which allow guests to actually do something.
+ Copyright (C) 2006 Rusty Russell IBM Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <irq_vectors.h>
+#include "lg.h"
+
+static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
+{
+ switch (regs->eax) {
+ case LHCALL_FLUSH_ASYNC:
+ break;
+ case LHCALL_LGUEST_INIT:
+ kill_guest(lg, "already have lguest_data");
+ break;
+ case LHCALL_CRASH: {
+ char msg[128];
+ lgread(lg, msg, regs->edx, sizeof(msg));
+ msg[sizeof(msg)-1] = '\0';
+ kill_guest(lg, "CRASH: %s", msg);
+ break;
+ }
+ case LHCALL_FLUSH_TLB:
+ if (regs->edx)
+ guest_pagetable_clear_all(lg);
+ else
+ guest_pagetable_flush_user(lg);
+ break;
+ case LHCALL_GET_WALLCLOCK: {
+ struct timespec ts;
+ ktime_get_real_ts(&ts);
+ regs->eax = ts.tv_sec;
+ break;
+ }
+ case LHCALL_BIND_DMA:
+ regs->eax = bind_dma(lg, regs->edx, regs->ebx,
+ regs->ecx >> 8, regs->ecx & 0xFF);
+ break;
+ case LHCALL_SEND_DMA:
+ send_dma(lg, regs->edx, regs->ebx);
+ break;
+ case LHCALL_LOAD_GDT:
+ load_guest_gdt(lg, regs->edx, regs->ebx);
+ break;
+ case LHCALL_LOAD_IDT_ENTRY:
+ load_guest_idt_entry(lg, regs->edx, regs->ebx, regs->ecx);
+ break;
+ case LHCALL_NEW_PGTABLE:
+ guest_new_pagetable(lg, regs->edx);
+ break;
+ case LHCALL_SET_STACK:
+ guest_set_stack(lg, regs->edx, regs->ebx, regs->ecx);
+ break;
+ case LHCALL_SET_PTE:
+ guest_set_pte(lg, regs->edx, regs->ebx, mkgpte(regs->ecx));
+ break;
+ case LHCALL_SET_PMD:
+ guest_set_pmd(lg, regs->edx, regs->ebx);
+ break;
+ case LHCALL_LOAD_TLS:
+ guest_load_tls(lg, regs->edx);
+ break;
+ case LHCALL_SET_CLOCKEVENT:
+ guest_set_clockevent(lg, regs->edx);
+ break;
+ case LHCALL_TS:
+ lg->ts = regs->edx;
+ break;
+ case LHCALL_HALT:
+ lg->halted = 1;
+ break;
+ default:
+ kill_guest(lg, "Bad hypercall %li\n", regs->eax);
+ }
+}
+
+/* We always do queued calls before actual hypercall. */
+static void do_async_hcalls(struct lguest *lg)
+{
+ unsigned int i;
+ u8 st[LHCALL_RING_SIZE];
+
+ if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(st); i++) {
+ struct lguest_regs regs;
+ unsigned int n = lg->next_hcall;
+
+ if (st[n] == 0xFF)
+ break;
+
+ if (++lg->next_hcall == LHCALL_RING_SIZE)
+ lg->next_hcall = 0;
+
+ if (get_user(regs.eax, &lg->lguest_data->hcalls[n].eax)
+ || get_user(regs.edx, &lg->lguest_data->hcalls[n].edx)
+ || get_user(regs.ecx, &lg->lguest_data->hcalls[n].ecx)
+ || get_user(regs.ebx, &lg->lguest_data->hcalls[n].ebx)) {
+ kill_guest(lg, "Fetching async hypercalls");
+ break;
+ }
+
+ do_hcall(lg, &regs);
+ if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
+ kill_guest(lg, "Writing result for async hypercall");
+ break;
+ }
+
+ if (lg->dma_is_pending)
+ break;
+ }
+}
+
+static void initialize(struct lguest *lg)
+{
+ u32 tsc_speed;
+
+ if (lg->regs->eax != LHCALL_LGUEST_INIT) {
+ kill_guest(lg, "hypercall %li before LGUEST_INIT",
+ lg->regs->eax);
+ return;
+ }
+
+ /* We only tell the guest to use the TSC if it's reliable. */
+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
+ tsc_speed = tsc_khz;
+ else
+ tsc_speed = 0;
+
+ lg->lguest_data = (struct lguest_data __user *)lg->regs->edx;
+ /* We check here so we can simply copy_to_user/from_user */
+ if (!lguest_address_ok(lg, lg->regs->edx, sizeof(*lg->lguest_data))) {
+ kill_guest(lg, "bad guest page %p", lg->lguest_data);
+ return;
+ }
+ if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
+ || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)
+ /* We reserve the top pgd entry. */
+ || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
+ || put_user(tsc_speed, &lg->lguest_data->tsc_khz)
+ || put_user(lg->guestid, &lg->lguest_data->guestid))
+ kill_guest(lg, "bad guest page %p", lg->lguest_data);
+
+ /* This is the one case where the above accesses might have
+ * been the first write to a Guest page. This may have caused
+ * a copy-on-write fault, but the Guest might be referring to
+ * the old (read-only) page. */
+ guest_pagetable_clear_all(lg);
+}
+
+/* Even if we go out to userspace and come back, we don't want to do
+ * the hypercall again. */
+static void clear_hcall(struct lguest *lg)
+{
+ lg->regs->trapnum = 255;
+}
+
+void do_hypercalls(struct lguest *lg)
+{
+ if (unlikely(!lg->lguest_data)) {
+ if (lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
+ initialize(lg);
+ clear_hcall(lg);
+ }
+ return;
+ }
+
+ do_async_hcalls(lg);
+ if (!lg->dma_is_pending && lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
+ do_hcall(lg, lg->regs);
+ clear_hcall(lg);
+ }
+}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
new file mode 100644
index 000000000000..bee029bb2c7b
--- /dev/null
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -0,0 +1,268 @@
+#include <linux/uaccess.h>
+#include "lg.h"
+
+static unsigned long idt_address(u32 lo, u32 hi)
+{
+ return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
+}
+
+static int idt_type(u32 lo, u32 hi)
+{
+ return (hi >> 8) & 0xF;
+}
+
+static int idt_present(u32 lo, u32 hi)
+{
+ return (hi & 0x8000);
+}
+
+static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
+{
+ *gstack -= 4;
+ lgwrite_u32(lg, *gstack, val);
+}
+
+static void set_guest_interrupt(struct lguest *lg, u32 lo, u32 hi, int has_err)
+{
+ unsigned long gstack;
+ u32 eflags, ss, irq_enable;
+
+ /* If they want a ring change, we use new stack and push old ss/esp */
+ if ((lg->regs->ss&0x3) != GUEST_PL) {
+ gstack = guest_pa(lg, lg->esp1);
+ ss = lg->ss1;
+ push_guest_stack(lg, &gstack, lg->regs->ss);
+ push_guest_stack(lg, &gstack, lg->regs->esp);
+ } else {
+ gstack = guest_pa(lg, lg->regs->esp);
+ ss = lg->regs->ss;
+ }
+
+ /* We use IF bit in eflags to indicate whether irqs were enabled
+ (it's always 1, since irqs are enabled when guest is running). */
+ eflags = lg->regs->eflags;
+ if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0
+ && !(irq_enable & X86_EFLAGS_IF))
+ eflags &= ~X86_EFLAGS_IF;
+
+ push_guest_stack(lg, &gstack, eflags);
+ push_guest_stack(lg, &gstack, lg->regs->cs);
+ push_guest_stack(lg, &gstack, lg->regs->eip);
+
+ if (has_err)
+ push_guest_stack(lg, &gstack, lg->regs->errcode);
+
+ /* Change the real stack so switcher returns to trap handler */
+ lg->regs->ss = ss;
+ lg->regs->esp = gstack + lg->page_offset;
+ lg->regs->cs = (__KERNEL_CS|GUEST_PL);
+ lg->regs->eip = idt_address(lo, hi);
+
+ /* Disable interrupts for an interrupt gate. */
+ if (idt_type(lo, hi) == 0xE)
+ if (put_user(0, &lg->lguest_data->irq_enabled))
+ kill_guest(lg, "Disabling interrupts");
+}
+
+void maybe_do_interrupt(struct lguest *lg)
+{
+ unsigned int irq;
+ DECLARE_BITMAP(blk, LGUEST_IRQS);
+ struct desc_struct *idt;
+
+ if (!lg->lguest_data)
+ return;
+
+ /* Mask out any interrupts they have blocked. */
+ if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
+ sizeof(blk)))
+ return;
+
+ bitmap_andnot(blk, lg->irqs_pending, blk, LGUEST_IRQS);
+
+ irq = find_first_bit(blk, LGUEST_IRQS);
+ if (irq >= LGUEST_IRQS)
+ return;
+
+ if (lg->regs->eip >= lg->noirq_start && lg->regs->eip < lg->noirq_end)
+ return;
+
+ /* If they're halted, we re-enable interrupts. */
+ if (lg->halted) {
+ /* Re-enable interrupts. */
+ if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
+ kill_guest(lg, "Re-enabling interrupts");
+ lg->halted = 0;
+ } else {
+ /* Maybe they have interrupts disabled? */
+ u32 irq_enabled;
+ if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
+ irq_enabled = 0;
+ if (!irq_enabled)
+ return;
+ }
+
+ idt = &lg->idt[FIRST_EXTERNAL_VECTOR+irq];
+ if (idt_present(idt->a, idt->b)) {
+ clear_bit(irq, lg->irqs_pending);
+ set_guest_interrupt(lg, idt->a, idt->b, 0);
+ }
+}
+
+static int has_err(unsigned int trap)
+{
+ return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
+}
+
+int deliver_trap(struct lguest *lg, unsigned int num)
+{
+ u32 lo = lg->idt[num].a, hi = lg->idt[num].b;
+
+ if (!idt_present(lo, hi))
+ return 0;
+ set_guest_interrupt(lg, lo, hi, has_err(num));
+ return 1;
+}
+
+static int direct_trap(const struct lguest *lg,
+ const struct desc_struct *trap,
+ unsigned int num)
+{
+ /* Hardware interrupts don't go to guest (except syscall). */
+ if (num >= FIRST_EXTERNAL_VECTOR && num != SYSCALL_VECTOR)
+ return 0;
+
+ /* We intercept page fault (demand shadow paging & cr2 saving)
+ protection fault (in/out emulation) and device not
+ available (TS handling), and hypercall */
+ if (num == 14 || num == 13 || num == 7 || num == LGUEST_TRAP_ENTRY)
+ return 0;
+
+ /* Interrupt gates (0xE) or not present (0x0) can't go direct. */
+ return idt_type(trap->a, trap->b) == 0xF;
+}
+
+void pin_stack_pages(struct lguest *lg)
+{
+ unsigned int i;
+
+ for (i = 0; i < lg->stack_pages; i++)
+ pin_page(lg, lg->esp1 - i * PAGE_SIZE);
+}
+
+void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages)
+{
+ /* You cannot have a stack segment with priv level 0. */
+ if ((seg & 0x3) != GUEST_PL)
+ kill_guest(lg, "bad stack segment %i", seg);
+ if (pages > 2)
+ kill_guest(lg, "bad stack pages %u", pages);
+ lg->ss1 = seg;
+ lg->esp1 = esp;
+ lg->stack_pages = pages;
+ pin_stack_pages(lg);
+}
+
+/* Set up trap in IDT. */
+static void set_trap(struct lguest *lg, struct desc_struct *trap,
+ unsigned int num, u32 lo, u32 hi)
+{
+ u8 type = idt_type(lo, hi);
+
+ if (!idt_present(lo, hi)) {
+ trap->a = trap->b = 0;
+ return;
+ }
+
+ if (type != 0xE && type != 0xF)
+ kill_guest(lg, "bad IDT type %i", type);
+
+ trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
+ trap->b = (hi&0xFFFFEF00);
+}
+
+void load_guest_idt_entry(struct lguest *lg, unsigned int num, u32 lo, u32 hi)
+{
+ /* Guest never handles: NMI, doublefault, hypercall, spurious irq. */
+ if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
+ return;
+
+ lg->changed |= CHANGED_IDT;
+ if (num < ARRAY_SIZE(lg->idt))
+ set_trap(lg, &lg->idt[num], num, lo, hi);
+ else if (num == SYSCALL_VECTOR)
+ set_trap(lg, &lg->syscall_idt, num, lo, hi);
+}
+
+static void default_idt_entry(struct desc_struct *idt,
+ int trap,
+ const unsigned long handler)
+{
+ u32 flags = 0x8e00;
+
+ /* They can't "int" into any of them except hypercall. */
+ if (trap == LGUEST_TRAP_ENTRY)
+ flags |= (GUEST_PL << 13);
+
+ idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
+ idt->b = (handler&0xFFFF0000) | flags;
+}
+
+void setup_default_idt_entries(struct lguest_ro_state *state,
+ const unsigned long *def)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
+ default_idt_entry(&state->guest_idt[i], i, def[i]);
+}
+
+void copy_traps(const struct lguest *lg, struct desc_struct *idt,
+ const unsigned long *def)
+{
+ unsigned int i;
+
+ /* All hardware interrupts are same whatever the guest: only the
+ * traps might be different. */
+ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) {
+ if (direct_trap(lg, &lg->idt[i], i))
+ idt[i] = lg->idt[i];
+ else
+ default_idt_entry(&idt[i], i, def[i]);
+ }
+ i = SYSCALL_VECTOR;
+ if (direct_trap(lg, &lg->syscall_idt, i))
+ idt[i] = lg->syscall_idt;
+ else
+ default_idt_entry(&idt[i], i, def[i]);
+}
+
+void guest_set_clockevent(struct lguest *lg, unsigned long delta)
+{
+ ktime_t expires;
+
+ if (unlikely(delta == 0)) {
+ /* Clock event device is shutting down. */
+ hrtimer_cancel(&lg->hrt);
+ return;
+ }
+
+ expires = ktime_add_ns(ktime_get_real(), delta);
+ hrtimer_start(&lg->hrt, expires, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
+{
+ struct lguest *lg = container_of(timer, struct lguest, hrt);
+
+ set_bit(0, lg->irqs_pending);
+ if (lg->halted)
+ wake_up_process(lg->tsk);
+ return HRTIMER_NORESTART;
+}
+
+void init_clockdev(struct lguest *lg)
+{
+ hrtimer_init(&lg->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ lg->hrt.function = clockdev_fn;
+}
diff --git a/drivers/lguest/io.c b/drivers/lguest/io.c
new file mode 100644
index 000000000000..c8eb79266991
--- /dev/null
+++ b/drivers/lguest/io.c
@@ -0,0 +1,399 @@
+/* Simple I/O model for guests, based on shared memory.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/types.h>
+#include <linux/futex.h>
+#include <linux/jhash.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/uaccess.h>
+#include "lg.h"
+
+static struct list_head dma_hash[61];
+
+void lguest_io_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dma_hash); i++)
+ INIT_LIST_HEAD(&dma_hash[i]);
+}
+
+/* FIXME: allow multi-page lengths. */
+static int check_dma_list(struct lguest *lg, const struct lguest_dma *dma)
+{
+ unsigned int i;
+
+ for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
+ if (!dma->len[i])
+ return 1;
+ if (!lguest_address_ok(lg, dma->addr[i], dma->len[i]))
+ goto kill;
+ if (dma->len[i] > PAGE_SIZE)
+ goto kill;
+ /* We could do over a page, but is it worth it? */
+ if ((dma->addr[i] % PAGE_SIZE) + dma->len[i] > PAGE_SIZE)
+ goto kill;
+ }
+ return 1;
+
+kill:
+ kill_guest(lg, "bad DMA entry: %u@%#lx", dma->len[i], dma->addr[i]);
+ return 0;
+}
+
+static unsigned int hash(const union futex_key *key)
+{
+ return jhash2((u32*)&key->both.word,
+ (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+ key->both.offset)
+ % ARRAY_SIZE(dma_hash);
+}
+
+static inline int key_eq(const union futex_key *a, const union futex_key *b)
+{
+ return (a->both.word == b->both.word
+ && a->both.ptr == b->both.ptr
+ && a->both.offset == b->both.offset);
+}
+
+/* Must hold read lock on dmainfo owner's current->mm->mmap_sem */
+static void unlink_dma(struct lguest_dma_info *dmainfo)
+{
+ BUG_ON(!mutex_is_locked(&lguest_lock));
+ dmainfo->interrupt = 0;
+ list_del(&dmainfo->list);
+ drop_futex_key_refs(&dmainfo->key);
+}
+
+static int unbind_dma(struct lguest *lg,
+ const union futex_key *key,
+ unsigned long dmas)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < LGUEST_MAX_DMA; i++) {
+ if (key_eq(key, &lg->dma[i].key) && dmas == lg->dma[i].dmas) {
+ unlink_dma(&lg->dma[i]);
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+int bind_dma(struct lguest *lg,
+ unsigned long ukey, unsigned long dmas, u16 numdmas, u8 interrupt)
+{
+ unsigned int i;
+ int ret = 0;
+ union futex_key key;
+ struct rw_semaphore *fshared = &current->mm->mmap_sem;
+
+ if (interrupt >= LGUEST_IRQS)
+ return 0;
+
+ mutex_lock(&lguest_lock);
+ down_read(fshared);
+ if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
+ kill_guest(lg, "bad dma key %#lx", ukey);
+ goto unlock;
+ }
+ get_futex_key_refs(&key);
+
+ if (interrupt == 0)
+ ret = unbind_dma(lg, &key, dmas);
+ else {
+ for (i = 0; i < LGUEST_MAX_DMA; i++) {
+ if (lg->dma[i].interrupt)
+ continue;
+
+ lg->dma[i].dmas = dmas;
+ lg->dma[i].num_dmas = numdmas;
+ lg->dma[i].next_dma = 0;
+ lg->dma[i].key = key;
+ lg->dma[i].guestid = lg->guestid;
+ lg->dma[i].interrupt = interrupt;
+ list_add(&lg->dma[i].list, &dma_hash[hash(&key)]);
+ ret = 1;
+ goto unlock;
+ }
+ }
+ drop_futex_key_refs(&key);
+unlock:
+ up_read(fshared);
+ mutex_unlock(&lguest_lock);
+ return ret;
+}
+
+/* lgread from another guest */
+static int lgread_other(struct lguest *lg,
+ void *buf, u32 addr, unsigned bytes)
+{
+ if (!lguest_address_ok(lg, addr, bytes)
+ || access_process_vm(lg->tsk, addr, buf, bytes, 0) != bytes) {
+ memset(buf, 0, bytes);
+ kill_guest(lg, "bad address in registered DMA struct");
+ return 0;
+ }
+ return 1;
+}
+
+/* lgwrite to another guest */
+static int lgwrite_other(struct lguest *lg, u32 addr,
+ const void *buf, unsigned bytes)
+{
+ if (!lguest_address_ok(lg, addr, bytes)
+ || (access_process_vm(lg->tsk, addr, (void *)buf, bytes, 1)
+ != bytes)) {
+ kill_guest(lg, "bad address writing to registered DMA");
+ return 0;
+ }
+ return 1;
+}
+
+static u32 copy_data(struct lguest *srclg,
+ const struct lguest_dma *src,
+ const struct lguest_dma *dst,
+ struct page *pages[])
+{
+ unsigned int totlen, si, di, srcoff, dstoff;
+ void *maddr = NULL;
+
+ totlen = 0;
+ si = di = 0;
+ srcoff = dstoff = 0;
+ while (si < LGUEST_MAX_DMA_SECTIONS && src->len[si]
+ && di < LGUEST_MAX_DMA_SECTIONS && dst->len[di]) {
+ u32 len = min(src->len[si] - srcoff, dst->len[di] - dstoff);
+
+ if (!maddr)
+ maddr = kmap(pages[di]);
+
+ /* FIXME: This is not completely portable, since
+ archs do different things for copy_to_user_page. */
+ if (copy_from_user(maddr + (dst->addr[di] + dstoff)%PAGE_SIZE,
+ (void __user *)src->addr[si], len) != 0) {
+ kill_guest(srclg, "bad address in sending DMA");
+ totlen = 0;
+ break;
+ }
+
+ totlen += len;
+ srcoff += len;
+ dstoff += len;
+ if (srcoff == src->len[si]) {
+ si++;
+ srcoff = 0;
+ }
+ if (dstoff == dst->len[di]) {
+ kunmap(pages[di]);
+ maddr = NULL;
+ di++;
+ dstoff = 0;
+ }
+ }
+
+ if (maddr)
+ kunmap(pages[di]);
+
+ return totlen;
+}
+
+/* Src is us, ie. current. */
+static u32 do_dma(struct lguest *srclg, const struct lguest_dma *src,
+ struct lguest *dstlg, const struct lguest_dma *dst)
+{
+ int i;
+ u32 ret;
+ struct page *pages[LGUEST_MAX_DMA_SECTIONS];
+
+ if (!check_dma_list(dstlg, dst) || !check_dma_list(srclg, src))
+ return 0;
+
+ /* First get the destination pages */
+ for (i = 0; i < LGUEST_MAX_DMA_SECTIONS; i++) {
+ if (dst->len[i] == 0)
+ break;
+ if (get_user_pages(dstlg->tsk, dstlg->mm,
+ dst->addr[i], 1, 1, 1, pages+i, NULL)
+ != 1) {
+ kill_guest(dstlg, "Error mapping DMA pages");
+ ret = 0;
+ goto drop_pages;
+ }
+ }
+
+ /* Now copy until we run out of src or dst. */
+ ret = copy_data(srclg, src, dst, pages);
+
+drop_pages:
+ while (--i >= 0)
+ put_page(pages[i]);
+ return ret;
+}
+
+static int dma_transfer(struct lguest *srclg,
+ unsigned long udma,
+ struct lguest_dma_info *dst)
+{
+ struct lguest_dma dst_dma, src_dma;
+ struct lguest *dstlg;
+ u32 i, dma = 0;
+
+ dstlg = &lguests[dst->guestid];
+ /* Get our dma list. */
+ lgread(srclg, &src_dma, udma, sizeof(src_dma));
+
+ /* We can't deadlock against them dmaing to us, because this
+ * is all under the lguest_lock. */
+ down_read(&dstlg->mm->mmap_sem);
+
+ for (i = 0; i < dst->num_dmas; i++) {
+ dma = (dst->next_dma + i) % dst->num_dmas;
+ if (!lgread_other(dstlg, &dst_dma,
+ dst->dmas + dma * sizeof(struct lguest_dma),
+ sizeof(dst_dma))) {
+ goto fail;
+ }
+ if (!dst_dma.used_len)
+ break;
+ }
+ if (i != dst->num_dmas) {
+ unsigned long used_lenp;
+ unsigned int ret;
+
+ ret = do_dma(srclg, &src_dma, dstlg, &dst_dma);
+ /* Put used length in src. */
+ lgwrite_u32(srclg,
+ udma+offsetof(struct lguest_dma, used_len), ret);
+ if (ret == 0 && src_dma.len[0] != 0)
+ goto fail;
+
+ /* Make sure destination sees contents before length. */
+ wmb();
+ used_lenp = dst->dmas
+ + dma * sizeof(struct lguest_dma)
+ + offsetof(struct lguest_dma, used_len);
+ lgwrite_other(dstlg, used_lenp, &ret, sizeof(ret));
+ dst->next_dma++;
+ }
+ up_read(&dstlg->mm->mmap_sem);
+
+ /* Do this last so dst doesn't simply sleep on lock. */
+ set_bit(dst->interrupt, dstlg->irqs_pending);
+ wake_up_process(dstlg->tsk);
+ return i == dst->num_dmas;
+
+fail:
+ up_read(&dstlg->mm->mmap_sem);
+ return 0;
+}
+
+void send_dma(struct lguest *lg, unsigned long ukey, unsigned long udma)
+{
+ union futex_key key;
+ int empty = 0;
+ struct rw_semaphore *fshared = &current->mm->mmap_sem;
+
+again:
+ mutex_lock(&lguest_lock);
+ down_read(fshared);
+ if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
+ kill_guest(lg, "bad sending DMA key");
+ goto unlock;
+ }
+ /* Shared mapping? Look for other guests... */
+ if (key.shared.offset & 1) {
+ struct lguest_dma_info *i;
+ list_for_each_entry(i, &dma_hash[hash(&key)], list) {
+ if (i->guestid == lg->guestid)
+ continue;
+ if (!key_eq(&key, &i->key))
+ continue;
+
+ empty += dma_transfer(lg, udma, i);
+ break;
+ }
+ if (empty == 1) {
+ /* Give any recipients one chance to restock. */
+ up_read(&current->mm->mmap_sem);
+ mutex_unlock(&lguest_lock);
+ empty++;
+ goto again;
+ }
+ } else {
+ /* Private mapping: tell our userspace. */
+ lg->dma_is_pending = 1;
+ lg->pending_dma = udma;
+ lg->pending_key = ukey;
+ }
+unlock:
+ up_read(fshared);
+ mutex_unlock(&lguest_lock);
+}
+
+void release_all_dma(struct lguest *lg)
+{
+ unsigned int i;
+
+ BUG_ON(!mutex_is_locked(&lguest_lock));
+
+ down_read(&lg->mm->mmap_sem);
+ for (i = 0; i < LGUEST_MAX_DMA; i++) {
+ if (lg->dma[i].interrupt)
+ unlink_dma(&lg->dma[i]);
+ }
+ up_read(&lg->mm->mmap_sem);
+}
+
+/* Userspace wants a dma buffer from this guest. */
+unsigned long get_dma_buffer(struct lguest *lg,
+ unsigned long ukey, unsigned long *interrupt)
+{
+ unsigned long ret = 0;
+ union futex_key key;
+ struct lguest_dma_info *i;
+ struct rw_semaphore *fshared = &current->mm->mmap_sem;
+
+ mutex_lock(&lguest_lock);
+ down_read(fshared);
+ if (get_futex_key((u32 __user *)ukey, fshared, &key) != 0) {
+ kill_guest(lg, "bad registered DMA buffer");
+ goto unlock;
+ }
+ list_for_each_entry(i, &dma_hash[hash(&key)], list) {
+ if (key_eq(&key, &i->key) && i->guestid == lg->guestid) {
+ unsigned int j;
+ for (j = 0; j < i->num_dmas; j++) {
+ struct lguest_dma dma;
+
+ ret = i->dmas + j * sizeof(struct lguest_dma);
+ lgread(lg, &dma, ret, sizeof(dma));
+ if (dma.used_len == 0)
+ break;
+ }
+ *interrupt = i->interrupt;
+ break;
+ }
+ }
+unlock:
+ up_read(fshared);
+ mutex_unlock(&lguest_lock);
+ return ret;
+}
+
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
new file mode 100644
index 000000000000..3e2ddfbc816e
--- /dev/null
+++ b/drivers/lguest/lg.h
@@ -0,0 +1,261 @@
+#ifndef _LGUEST_H
+#define _LGUEST_H
+
+#include <asm/desc.h>
+
+#define GDT_ENTRY_LGUEST_CS 10
+#define GDT_ENTRY_LGUEST_DS 11
+#define LGUEST_CS (GDT_ENTRY_LGUEST_CS * 8)
+#define LGUEST_DS (GDT_ENTRY_LGUEST_DS * 8)
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/stringify.h>
+#include <linux/binfmts.h>
+#include <linux/futex.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <asm/semaphore.h>
+#include "irq_vectors.h"
+
+#define GUEST_PL 1
+
+struct lguest_regs
+{
+ /* Manually saved part. */
+ unsigned long ebx, ecx, edx;
+ unsigned long esi, edi, ebp;
+ unsigned long gs;
+ unsigned long eax;
+ unsigned long fs, ds, es;
+ unsigned long trapnum, errcode;
+ /* Trap pushed part */
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long esp;
+ unsigned long ss;
+};
+
+void free_pagetables(void);
+int init_pagetables(struct page **switcher_page, unsigned int pages);
+
+/* Full 4G segment descriptors, suitable for CS and DS. */
+#define FULL_EXEC_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9b00})
+#define FULL_SEGMENT ((struct desc_struct){0x0000ffff, 0x00cf9300})
+
+struct lguest_dma_info
+{
+ struct list_head list;
+ union futex_key key;
+ unsigned long dmas;
+ u16 next_dma;
+ u16 num_dmas;
+ u16 guestid;
+ u8 interrupt; /* 0 when not registered */
+};
+
+/* We have separate types for the guest's ptes & pgds and the shadow ptes &
+ * pgds. Since this host might use three-level pagetables and the guest and
+ * shadow pagetables don't, we can't use the normal pte_t/pgd_t. */
+typedef union {
+ struct { unsigned flags:12, pfn:20; };
+ struct { unsigned long val; } raw;
+} spgd_t;
+typedef union {
+ struct { unsigned flags:12, pfn:20; };
+ struct { unsigned long val; } raw;
+} spte_t;
+typedef union {
+ struct { unsigned flags:12, pfn:20; };
+ struct { unsigned long val; } raw;
+} gpgd_t;
+typedef union {
+ struct { unsigned flags:12, pfn:20; };
+ struct { unsigned long val; } raw;
+} gpte_t;
+#define mkgpte(_val) ((gpte_t){.raw.val = _val})
+#define mkgpgd(_val) ((gpgd_t){.raw.val = _val})
+
+struct pgdir
+{
+ unsigned long cr3;
+ spgd_t *pgdir;
+};
+
+/* This is a guest-specific page (mapped ro) into the guest. */
+struct lguest_ro_state
+{
+ /* Host information we need to restore when we switch back. */
+ u32 host_cr3;
+ struct Xgt_desc_struct host_idt_desc;
+ struct Xgt_desc_struct host_gdt_desc;
+ u32 host_sp;
+
+ /* Fields which are used when guest is running. */
+ struct Xgt_desc_struct guest_idt_desc;
+ struct Xgt_desc_struct guest_gdt_desc;
+ struct i386_hw_tss guest_tss;
+ struct desc_struct guest_idt[IDT_ENTRIES];
+ struct desc_struct guest_gdt[GDT_ENTRIES];
+};
+
+/* We have two pages shared with guests, per cpu. */
+struct lguest_pages
+{
+ /* This is the stack page mapped rw in guest */
+ char spare[PAGE_SIZE - sizeof(struct lguest_regs)];
+ struct lguest_regs regs;
+
+ /* This is the host state & guest descriptor page, ro in guest */
+ struct lguest_ro_state state;
+} __attribute__((aligned(PAGE_SIZE)));
+
+#define CHANGED_IDT 1
+#define CHANGED_GDT 2
+#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */
+#define CHANGED_ALL 3
+
+/* The private info the thread maintains about the guest. */
+struct lguest
+{
+ /* At end of a page shared mapped over lguest_pages in guest. */
+ unsigned long regs_page;
+ struct lguest_regs *regs;
+ struct lguest_data __user *lguest_data;
+ struct task_struct *tsk;
+ struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
+ u16 guestid;
+ u32 pfn_limit;
+ u32 page_offset;
+ u32 cr2;
+ int halted;
+ int ts;
+ u32 next_hcall;
+ u32 esp1;
+ u8 ss1;
+
+ /* Do we need to stop what we're doing and return to userspace? */
+ int break_out;
+ wait_queue_head_t break_wq;
+
+ /* Bitmap of what has changed: see CHANGED_* above. */
+ int changed;
+ struct lguest_pages *last_pages;
+
+ /* We keep a small number of these. */
+ u32 pgdidx;
+ struct pgdir pgdirs[4];
+
+ /* Cached wakeup: we hold a reference to this task. */
+ struct task_struct *wake;
+
+ unsigned long noirq_start, noirq_end;
+ int dma_is_pending;
+ unsigned long pending_dma; /* struct lguest_dma */
+ unsigned long pending_key; /* address they're sending to */
+
+ unsigned int stack_pages;
+ u32 tsc_khz;
+
+ struct lguest_dma_info dma[LGUEST_MAX_DMA];
+
+ /* Dead? */
+ const char *dead;
+
+ /* The GDT entries copied into lguest_ro_state when running. */
+ struct desc_struct gdt[GDT_ENTRIES];
+
+ /* The IDT entries: some copied into lguest_ro_state when running. */
+ struct desc_struct idt[FIRST_EXTERNAL_VECTOR+LGUEST_IRQS];
+ struct desc_struct syscall_idt;
+
+ /* Virtual clock device */
+ struct hrtimer hrt;
+
+ /* Pending virtual interrupts */
+ DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
+};
+
+extern struct lguest lguests[];
+extern struct mutex lguest_lock;
+
+/* core.c: */
+u32 lgread_u32(struct lguest *lg, unsigned long addr);
+void lgwrite_u32(struct lguest *lg, unsigned long addr, u32 val);
+void lgread(struct lguest *lg, void *buf, unsigned long addr, unsigned len);
+void lgwrite(struct lguest *lg, unsigned long, const void *buf, unsigned len);
+int find_free_guest(void);
+int lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len);
+int run_guest(struct lguest *lg, unsigned long __user *user);
+
+
+/* interrupts_and_traps.c: */
+void maybe_do_interrupt(struct lguest *lg);
+int deliver_trap(struct lguest *lg, unsigned int num);
+void load_guest_idt_entry(struct lguest *lg, unsigned int i, u32 low, u32 hi);
+void guest_set_stack(struct lguest *lg, u32 seg, u32 esp, unsigned int pages);
+void pin_stack_pages(struct lguest *lg);
+void setup_default_idt_entries(struct lguest_ro_state *state,
+ const unsigned long *def);
+void copy_traps(const struct lguest *lg, struct desc_struct *idt,
+ const unsigned long *def);
+void guest_set_clockevent(struct lguest *lg, unsigned long delta);
+void init_clockdev(struct lguest *lg);
+
+/* segments.c: */
+void setup_default_gdt_entries(struct lguest_ro_state *state);
+void setup_guest_gdt(struct lguest *lg);
+void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num);
+void guest_load_tls(struct lguest *lg, unsigned long tls_array);
+void copy_gdt(const struct lguest *lg, struct desc_struct *gdt);
+void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt);
+
+/* page_tables.c: */
+int init_guest_pagetable(struct lguest *lg, unsigned long pgtable);
+void free_guest_pagetable(struct lguest *lg);
+void guest_new_pagetable(struct lguest *lg, unsigned long pgtable);
+void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 i);
+void guest_pagetable_clear_all(struct lguest *lg);
+void guest_pagetable_flush_user(struct lguest *lg);
+void guest_set_pte(struct lguest *lg, unsigned long cr3,
+ unsigned long vaddr, gpte_t val);
+void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages);
+int demand_page(struct lguest *info, unsigned long cr2, int errcode);
+void pin_page(struct lguest *lg, unsigned long vaddr);
+
+/* lguest_user.c: */
+int lguest_device_init(void);
+void lguest_device_remove(void);
+
+/* io.c: */
+void lguest_io_init(void);
+int bind_dma(struct lguest *lg,
+ unsigned long key, unsigned long udma, u16 numdmas, u8 interrupt);
+void send_dma(struct lguest *info, unsigned long key, unsigned long udma);
+void release_all_dma(struct lguest *lg);
+unsigned long get_dma_buffer(struct lguest *lg, unsigned long key,
+ unsigned long *interrupt);
+
+/* hypercalls.c: */
+void do_hypercalls(struct lguest *lg);
+
+#define kill_guest(lg, fmt...) \
+do { \
+ if (!(lg)->dead) { \
+ (lg)->dead = kasprintf(GFP_ATOMIC, fmt); \
+ if (!(lg)->dead) \
+ (lg)->dead = ERR_PTR(-ENOMEM); \
+ } \
+} while(0)
+
+static inline unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
+{
+ return vaddr - lg->page_offset;
+}
+#endif /* __ASSEMBLY__ */
+#endif /* _LGUEST_H */
diff --git a/drivers/lguest/lguest.c b/drivers/lguest/lguest.c
new file mode 100644
index 000000000000..18dade06d4a9
--- /dev/null
+++ b/drivers/lguest/lguest.c
@@ -0,0 +1,630 @@
+/*
+ * Lguest specific paravirt-ops implementation
+ *
+ * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+#include <linux/lguest_bus.h>
+#include <asm/paravirt.h>
+#include <asm/param.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/e820.h>
+#include <asm/mce.h>
+#include <asm/io.h>
+
+/* Declarations for definitions in lguest_guest.S */
+extern char lguest_noirq_start[], lguest_noirq_end[];
+extern const char lgstart_cli[], lgend_cli[];
+extern const char lgstart_sti[], lgend_sti[];
+extern const char lgstart_popf[], lgend_popf[];
+extern const char lgstart_pushf[], lgend_pushf[];
+extern const char lgstart_iret[], lgend_iret[];
+extern void lguest_iret(void);
+
+struct lguest_data lguest_data = {
+ .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
+ .noirq_start = (u32)lguest_noirq_start,
+ .noirq_end = (u32)lguest_noirq_end,
+ .blocked_interrupts = { 1 }, /* Block timer interrupts */
+};
+struct lguest_device_desc *lguest_devices;
+static cycle_t clock_base;
+
+static enum paravirt_lazy_mode lazy_mode;
+static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
+{
+ if (mode == PARAVIRT_LAZY_FLUSH) {
+ if (unlikely(lazy_mode != PARAVIRT_LAZY_NONE))
+ hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
+ } else {
+ lazy_mode = mode;
+ if (mode == PARAVIRT_LAZY_NONE)
+ hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
+ }
+}
+
+static void lazy_hcall(unsigned long call,
+ unsigned long arg1,
+ unsigned long arg2,
+ unsigned long arg3)
+{
+ if (lazy_mode == PARAVIRT_LAZY_NONE)
+ hcall(call, arg1, arg2, arg3);
+ else
+ async_hcall(call, arg1, arg2, arg3);
+}
+
+void async_hcall(unsigned long call,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+ /* Note: This code assumes we're uniprocessor. */
+ static unsigned int next_call;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (lguest_data.hcall_status[next_call] != 0xFF) {
+ /* Table full, so do normal hcall which will flush table. */
+ hcall(call, arg1, arg2, arg3);
+ } else {
+ lguest_data.hcalls[next_call].eax = call;
+ lguest_data.hcalls[next_call].edx = arg1;
+ lguest_data.hcalls[next_call].ebx = arg2;
+ lguest_data.hcalls[next_call].ecx = arg3;
+ /* Make sure host sees arguments before "valid" flag. */
+ wmb();
+ lguest_data.hcall_status[next_call] = 0;
+ if (++next_call == LHCALL_RING_SIZE)
+ next_call = 0;
+ }
+ local_irq_restore(flags);
+}
+
+void lguest_send_dma(unsigned long key, struct lguest_dma *dma)
+{
+ dma->used_len = 0;
+ hcall(LHCALL_SEND_DMA, key, __pa(dma), 0);
+}
+
+int lguest_bind_dma(unsigned long key, struct lguest_dma *dmas,
+ unsigned int num, u8 irq)
+{
+ if (!hcall(LHCALL_BIND_DMA, key, __pa(dmas), (num << 8) | irq))
+ return -ENOMEM;
+ return 0;
+}
+
+void lguest_unbind_dma(unsigned long key, struct lguest_dma *dmas)
+{
+ hcall(LHCALL_BIND_DMA, key, __pa(dmas), 0);
+}
+
+/* For guests, device memory can be used as normal memory, so we cast away the
+ * __iomem to quieten sparse. */
+void *lguest_map(unsigned long phys_addr, unsigned long pages)
+{
+ return (__force void *)ioremap(phys_addr, PAGE_SIZE*pages);
+}
+
+void lguest_unmap(void *addr)
+{
+ iounmap((__force void __iomem *)addr);
+}
+
+static unsigned long save_fl(void)
+{
+ return lguest_data.irq_enabled;
+}
+
+static void restore_fl(unsigned long flags)
+{
+ /* FIXME: Check if interrupt pending... */
+ lguest_data.irq_enabled = flags;
+}
+
+static void irq_disable(void)
+{
+ lguest_data.irq_enabled = 0;
+}
+
+static void irq_enable(void)
+{
+ /* FIXME: Check if interrupt pending... */
+ lguest_data.irq_enabled = X86_EFLAGS_IF;
+}
+
+static void lguest_write_idt_entry(struct desc_struct *dt,
+ int entrynum, u32 low, u32 high)
+{
+ write_dt_entry(dt, entrynum, low, high);
+ hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, low, high);
+}
+
+static void lguest_load_idt(const struct Xgt_desc_struct *desc)
+{
+ unsigned int i;
+ struct desc_struct *idt = (void *)desc->address;
+
+ for (i = 0; i < (desc->size+1)/8; i++)
+ hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
+}
+
+static void lguest_load_gdt(const struct Xgt_desc_struct *desc)
+{
+ BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
+ hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
+}
+
+static void lguest_write_gdt_entry(struct desc_struct *dt,
+ int entrynum, u32 low, u32 high)
+{
+ write_dt_entry(dt, entrynum, low, high);
+ hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
+}
+
+static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+ lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
+}
+
+static void lguest_set_ldt(const void *addr, unsigned entries)
+{
+}
+
+static void lguest_load_tr_desc(void)
+{
+}
+
+static void lguest_cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ int function = *eax;
+
+ native_cpuid(eax, ebx, ecx, edx);
+ switch (function) {
+ case 1: /* Basic feature request. */
+ /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
+ *ecx &= 0x00002201;
+ /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, FPU. */
+ *edx &= 0x07808101;
+ /* Host wants to know when we flush kernel pages: set PGE. */
+ *edx |= 0x00002000;
+ break;
+ case 0x80000000:
+ /* Futureproof this a little: if they ask how much extended
+ * processor information, limit it to known fields. */
+ if (*eax > 0x80000008)
+ *eax = 0x80000008;
+ break;
+ }
+}
+
+static unsigned long current_cr0, current_cr3;
+static void lguest_write_cr0(unsigned long val)
+{
+ lazy_hcall(LHCALL_TS, val & 8, 0, 0);
+ current_cr0 = val;
+}
+
+static unsigned long lguest_read_cr0(void)
+{
+ return current_cr0;
+}
+
+static void lguest_clts(void)
+{
+ lazy_hcall(LHCALL_TS, 0, 0, 0);
+ current_cr0 &= ~8U;
+}
+
+static unsigned long lguest_read_cr2(void)
+{
+ return lguest_data.cr2;
+}
+
+static void lguest_write_cr3(unsigned long cr3)
+{
+ lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
+ current_cr3 = cr3;
+}
+
+static unsigned long lguest_read_cr3(void)
+{
+ return current_cr3;
+}
+
+/* Used to enable/disable PGE, but we don't care. */
+static unsigned long lguest_read_cr4(void)
+{
+ return 0;
+}
+
+static void lguest_write_cr4(unsigned long val)
+{
+}
+
+static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+ lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
+}
+
+/* We only support two-level pagetables at the moment. */
+static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+ lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
+ (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
+}
+
+/* FIXME: Eliminate all callers of this. */
+static void lguest_set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+ /* Don't bother with hypercall before initial setup. */
+ if (current_cr3)
+ lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
+}
+
+static void lguest_flush_tlb_single(unsigned long addr)
+{
+ /* Simply set it to zero, and it will fault back in. */
+ lazy_hcall(LHCALL_SET_PTE, current_cr3, addr, 0);
+}
+
+static void lguest_flush_tlb_user(void)
+{
+ lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
+}
+
+static void lguest_flush_tlb_kernel(void)
+{
+ lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
+}
+
+static void disable_lguest_irq(unsigned int irq)
+{
+ set_bit(irq, lguest_data.blocked_interrupts);
+}
+
+static void enable_lguest_irq(unsigned int irq)
+{
+ clear_bit(irq, lguest_data.blocked_interrupts);
+ /* FIXME: If it's pending? */
+}
+
+static struct irq_chip lguest_irq_controller = {
+ .name = "lguest",
+ .mask = disable_lguest_irq,
+ .mask_ack = disable_lguest_irq,
+ .unmask = enable_lguest_irq,
+};
+
+static void __init lguest_init_IRQ(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < LGUEST_IRQS; i++) {
+ int vector = FIRST_EXTERNAL_VECTOR + i;
+ if (vector != SYSCALL_VECTOR) {
+ set_intr_gate(vector, interrupt[i]);
+ set_irq_chip_and_handler(i, &lguest_irq_controller,
+ handle_level_irq);
+ }
+ }
+ irq_ctx_init(smp_processor_id());
+}
+
+static unsigned long lguest_get_wallclock(void)
+{
+ return hcall(LHCALL_GET_WALLCLOCK, 0, 0, 0);
+}
+
+static cycle_t lguest_clock_read(void)
+{
+ if (lguest_data.tsc_khz)
+ return native_read_tsc();
+ else
+ return jiffies;
+}
+
+/* This is what we tell the kernel is our clocksource. */
+static struct clocksource lguest_clock = {
+ .name = "lguest",
+ .rating = 400,
+ .read = lguest_clock_read,
+};
+
+static unsigned long long lguest_sched_clock(void)
+{
+ return cyc2ns(&lguest_clock, lguest_clock_read() - clock_base);
+}
+
+/* We also need a "struct clock_event_device": Linux asks us to set it to go
+ * off some time in the future. Actually, James Morris figured all this out, I
+ * just applied the patch. */
+static int lguest_clockevent_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ if (delta < LG_CLOCK_MIN_DELTA) {
+ if (printk_ratelimit())
+ printk(KERN_DEBUG "%s: small delta %lu ns\n",
+ __FUNCTION__, delta);
+ return -ETIME;
+ }
+ hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
+ return 0;
+}
+
+static void lguest_clockevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ /* A 0 argument shuts the clock down. */
+ hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* This is what we expect. */
+ break;
+ case CLOCK_EVT_MODE_PERIODIC:
+ BUG();
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
+}
+
+/* This describes our primitive timer chip. */
+static struct clock_event_device lguest_clockevent = {
+ .name = "lguest",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = lguest_clockevent_set_next_event,
+ .set_mode = lguest_clockevent_set_mode,
+ .rating = INT_MAX,
+ .mult = 1,
+ .shift = 0,
+ .min_delta_ns = LG_CLOCK_MIN_DELTA,
+ .max_delta_ns = LG_CLOCK_MAX_DELTA,
+};
+
+/* This is the Guest timer interrupt handler (hardware interrupt 0). We just
+ * call the clockevent infrastructure and it does whatever needs doing. */
+static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long flags;
+
+ /* Don't interrupt us while this is running. */
+ local_irq_save(flags);
+ lguest_clockevent.event_handler(&lguest_clockevent);
+ local_irq_restore(flags);
+}
+
+static void lguest_time_init(void)
+{
+ set_irq_handler(0, lguest_time_irq);
+
+ /* We use the TSC if the Host tells us we can, otherwise a dumb
+ * jiffies-based clock. */
+ if (lguest_data.tsc_khz) {
+ lguest_clock.shift = 22;
+ lguest_clock.mult = clocksource_khz2mult(lguest_data.tsc_khz,
+ lguest_clock.shift);
+ lguest_clock.mask = CLOCKSOURCE_MASK(64);
+ lguest_clock.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ } else {
+ /* To understand this, start at kernel/time/jiffies.c... */
+ lguest_clock.shift = 8;
+ lguest_clock.mult = (((u64)NSEC_PER_SEC<<8)/ACTHZ) << 8;
+ lguest_clock.mask = CLOCKSOURCE_MASK(32);
+ }
+ clock_base = lguest_clock_read();
+ clocksource_register(&lguest_clock);
+
+ /* We can't set cpumask in the initializer: damn C limitations! */
+ lguest_clockevent.cpumask = cpumask_of_cpu(0);
+ clockevents_register_device(&lguest_clockevent);
+
+ enable_lguest_irq(0);
+}
+
+static void lguest_load_esp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
+ THREAD_SIZE/PAGE_SIZE);
+}
+
+static void lguest_set_debugreg(int regno, unsigned long value)
+{
+ /* FIXME: Implement */
+}
+
+static void lguest_wbinvd(void)
+{
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static void lguest_apic_write(unsigned long reg, unsigned long v)
+{
+}
+
+static unsigned long lguest_apic_read(unsigned long reg)
+{
+ return 0;
+}
+#endif
+
+static void lguest_safe_halt(void)
+{
+ hcall(LHCALL_HALT, 0, 0, 0);
+}
+
+static void lguest_power_off(void)
+{
+ hcall(LHCALL_CRASH, __pa("Power down"), 0, 0);
+}
+
+static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
+{
+ hcall(LHCALL_CRASH, __pa(p), 0, 0);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block paniced = {
+ .notifier_call = lguest_panic
+};
+
+static __init char *lguest_memory_setup(void)
+{
+ /* We do this here because lockcheck barfs if before start_kernel */
+ atomic_notifier_chain_register(&panic_notifier_list, &paniced);
+
+ add_memory_region(E820_MAP->addr, E820_MAP->size, E820_MAP->type);
+ return "LGUEST";
+}
+
+static const struct lguest_insns
+{
+ const char *start, *end;
+} lguest_insns[] = {
+ [PARAVIRT_PATCH(irq_disable)] = { lgstart_cli, lgend_cli },
+ [PARAVIRT_PATCH(irq_enable)] = { lgstart_sti, lgend_sti },
+ [PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf },
+ [PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf },
+};
+static unsigned lguest_patch(u8 type, u16 clobber, void *insns, unsigned len)
+{
+ unsigned int insn_len;
+
+ /* Don't touch it if we don't have a replacement */
+ if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
+ return paravirt_patch_default(type, clobber, insns, len);
+
+ insn_len = lguest_insns[type].end - lguest_insns[type].start;
+
+ /* Similarly if we can't fit replacement. */
+ if (len < insn_len)
+ return paravirt_patch_default(type, clobber, insns, len);
+
+ memcpy(insns, lguest_insns[type].start, insn_len);
+ return insn_len;
+}
+
+__init void lguest_init(void *boot)
+{
+ /* Copy boot parameters first. */
+ memcpy(&boot_params, boot, PARAM_SIZE);
+ memcpy(boot_command_line, __va(boot_params.hdr.cmd_line_ptr),
+ COMMAND_LINE_SIZE);
+
+ paravirt_ops.name = "lguest";
+ paravirt_ops.paravirt_enabled = 1;
+ paravirt_ops.kernel_rpl = 1;
+
+ paravirt_ops.save_fl = save_fl;
+ paravirt_ops.restore_fl = restore_fl;
+ paravirt_ops.irq_disable = irq_disable;
+ paravirt_ops.irq_enable = irq_enable;
+ paravirt_ops.load_gdt = lguest_load_gdt;
+ paravirt_ops.memory_setup = lguest_memory_setup;
+ paravirt_ops.cpuid = lguest_cpuid;
+ paravirt_ops.write_cr3 = lguest_write_cr3;
+ paravirt_ops.flush_tlb_user = lguest_flush_tlb_user;
+ paravirt_ops.flush_tlb_single = lguest_flush_tlb_single;
+ paravirt_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
+ paravirt_ops.set_pte = lguest_set_pte;
+ paravirt_ops.set_pte_at = lguest_set_pte_at;
+ paravirt_ops.set_pmd = lguest_set_pmd;
+#ifdef CONFIG_X86_LOCAL_APIC
+ paravirt_ops.apic_write = lguest_apic_write;
+ paravirt_ops.apic_write_atomic = lguest_apic_write;
+ paravirt_ops.apic_read = lguest_apic_read;
+#endif
+ paravirt_ops.load_idt = lguest_load_idt;
+ paravirt_ops.iret = lguest_iret;
+ paravirt_ops.load_esp0 = lguest_load_esp0;
+ paravirt_ops.load_tr_desc = lguest_load_tr_desc;
+ paravirt_ops.set_ldt = lguest_set_ldt;
+ paravirt_ops.load_tls = lguest_load_tls;
+ paravirt_ops.set_debugreg = lguest_set_debugreg;
+ paravirt_ops.clts = lguest_clts;
+ paravirt_ops.read_cr0 = lguest_read_cr0;
+ paravirt_ops.write_cr0 = lguest_write_cr0;
+ paravirt_ops.init_IRQ = lguest_init_IRQ;
+ paravirt_ops.read_cr2 = lguest_read_cr2;
+ paravirt_ops.read_cr3 = lguest_read_cr3;
+ paravirt_ops.read_cr4 = lguest_read_cr4;
+ paravirt_ops.write_cr4 = lguest_write_cr4;
+ paravirt_ops.write_gdt_entry = lguest_write_gdt_entry;
+ paravirt_ops.write_idt_entry = lguest_write_idt_entry;
+ paravirt_ops.patch = lguest_patch;
+ paravirt_ops.safe_halt = lguest_safe_halt;
+ paravirt_ops.get_wallclock = lguest_get_wallclock;
+ paravirt_ops.time_init = lguest_time_init;
+ paravirt_ops.set_lazy_mode = lguest_lazy_mode;
+ paravirt_ops.wbinvd = lguest_wbinvd;
+ paravirt_ops.sched_clock = lguest_sched_clock;
+
+ hcall(LHCALL_LGUEST_INIT, __pa(&lguest_data), 0, 0);
+
+ /* We use top of mem for initial pagetables. */
+ init_pg_tables_end = __pa(pg0);
+
+ asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
+
+ reserve_top_address(lguest_data.reserve_mem);
+
+ lockdep_init();
+
+ paravirt_disable_iospace();
+
+ cpu_detect(&new_cpu_data);
+ /* head.S usually sets up the first capability word, so do it here. */
+ new_cpu_data.x86_capability[0] = cpuid_edx(1);
+
+ /* Math is always hard! */
+ new_cpu_data.hard_math = 1;
+
+#ifdef CONFIG_X86_MCE
+ mce_disabled = 1;
+#endif
+
+#ifdef CONFIG_ACPI
+ acpi_disabled = 1;
+ acpi_ht = 0;
+#endif
+
+ add_preferred_console("hvc", 0, NULL);
+
+ pm_power_off = lguest_power_off;
+ start_kernel();
+}
diff --git a/drivers/lguest/lguest_asm.S b/drivers/lguest/lguest_asm.S
new file mode 100644
index 000000000000..a3dbf22ee365
--- /dev/null
+++ b/drivers/lguest/lguest_asm.S
@@ -0,0 +1,54 @@
+#include <linux/linkage.h>
+#include <linux/lguest.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/processor-flags.h>
+
+/*
+ * This is where we begin: we have a magic signature which the launcher looks
+ * for. The plan is that the Linux boot protocol will be extended with a
+ * "platform type" field which will guide us here from the normal entry point,
+ * but for the moment this suffices. We pass the virtual address of the boot
+ * info to lguest_init().
+ *
+ * We put it in .init.text will be discarded after boot.
+ */
+.section .init.text, "ax", @progbits
+.ascii "GenuineLguest"
+ /* Set up initial stack. */
+ movl $(init_thread_union+THREAD_SIZE),%esp
+ movl %esi, %eax
+ addl $__PAGE_OFFSET, %eax
+ jmp lguest_init
+
+/* The templates for inline patching. */
+#define LGUEST_PATCH(name, insns...) \
+ lgstart_##name: insns; lgend_##name:; \
+ .globl lgstart_##name; .globl lgend_##name
+
+LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
+LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled)
+LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
+LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
+
+.text
+/* These demark the EIP range where host should never deliver interrupts. */
+.global lguest_noirq_start
+.global lguest_noirq_end
+
+/*
+ * We move eflags word to lguest_data.irq_enabled to restore interrupt state.
+ * For page faults, gpfs and virtual interrupts, the hypervisor has saved
+ * eflags manually, otherwise it was delivered directly and so eflags reflects
+ * the real machine IF state, ie. interrupts on. Since the kernel always dies
+ * if it takes such a trap with interrupts disabled anyway, turning interrupts
+ * back on unconditionally here is OK.
+ */
+ENTRY(lguest_iret)
+ pushl %eax
+ movl 12(%esp), %eax
+lguest_noirq_start:
+ movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
+ popl %eax
+ iret
+lguest_noirq_end:
diff --git a/drivers/lguest/lguest_bus.c b/drivers/lguest/lguest_bus.c
new file mode 100644
index 000000000000..18d6ab21a43b
--- /dev/null
+++ b/drivers/lguest/lguest_bus.c
@@ -0,0 +1,148 @@
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/lguest_bus.h>
+#include <asm/io.h>
+
+static ssize_t type_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ return sprintf(buf, "%hu", lguest_devices[dev->index].type);
+}
+static ssize_t features_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ return sprintf(buf, "%hx", lguest_devices[dev->index].features);
+}
+static ssize_t pfn_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ return sprintf(buf, "%u", lguest_devices[dev->index].pfn);
+}
+static ssize_t status_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ return sprintf(buf, "%hx", lguest_devices[dev->index].status);
+}
+static ssize_t status_store(struct device *_dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ if (sscanf(buf, "%hi", &lguest_devices[dev->index].status) != 1)
+ return -EINVAL;
+ return count;
+}
+static struct device_attribute lguest_dev_attrs[] = {
+ __ATTR_RO(type),
+ __ATTR_RO(features),
+ __ATTR_RO(pfn),
+ __ATTR(status, 0644, status_show, status_store),
+ __ATTR_NULL
+};
+
+static int lguest_dev_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ struct lguest_driver *drv = container_of(_drv,struct lguest_driver,drv);
+
+ return (drv->device_type == lguest_devices[dev->index].type);
+}
+
+struct lguest_bus {
+ struct bus_type bus;
+ struct device dev;
+};
+
+static struct lguest_bus lguest_bus = {
+ .bus = {
+ .name = "lguest",
+ .match = lguest_dev_match,
+ .dev_attrs = lguest_dev_attrs,
+ },
+ .dev = {
+ .parent = NULL,
+ .bus_id = "lguest",
+ }
+};
+
+static int lguest_dev_probe(struct device *_dev)
+{
+ int ret;
+ struct lguest_device *dev = container_of(_dev,struct lguest_device,dev);
+ struct lguest_driver *drv = container_of(dev->dev.driver,
+ struct lguest_driver, drv);
+
+ lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER;
+ ret = drv->probe(dev);
+ if (ret == 0)
+ lguest_devices[dev->index].status |= LGUEST_DEVICE_S_DRIVER_OK;
+ return ret;
+}
+
+int register_lguest_driver(struct lguest_driver *drv)
+{
+ if (!lguest_devices)
+ return 0;
+
+ drv->drv.bus = &lguest_bus.bus;
+ drv->drv.name = drv->name;
+ drv->drv.owner = drv->owner;
+ drv->drv.probe = lguest_dev_probe;
+
+ return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(register_lguest_driver);
+
+static void add_lguest_device(unsigned int index)
+{
+ struct lguest_device *new;
+
+ lguest_devices[index].status |= LGUEST_DEVICE_S_ACKNOWLEDGE;
+ new = kmalloc(sizeof(struct lguest_device), GFP_KERNEL);
+ if (!new) {
+ printk(KERN_EMERG "Cannot allocate lguest device %u\n", index);
+ lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
+ return;
+ }
+
+ new->index = index;
+ new->private = NULL;
+ memset(&new->dev, 0, sizeof(new->dev));
+ new->dev.parent = &lguest_bus.dev;
+ new->dev.bus = &lguest_bus.bus;
+ sprintf(new->dev.bus_id, "%u", index);
+ if (device_register(&new->dev) != 0) {
+ printk(KERN_EMERG "Cannot register lguest device %u\n", index);
+ lguest_devices[index].status |= LGUEST_DEVICE_S_FAILED;
+ kfree(new);
+ }
+}
+
+static void scan_devices(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < LGUEST_MAX_DEVICES; i++)
+ if (lguest_devices[i].type)
+ add_lguest_device(i);
+}
+
+static int __init lguest_bus_init(void)
+{
+ if (strcmp(paravirt_ops.name, "lguest") != 0)
+ return 0;
+
+ /* Devices are in page above top of "normal" mem. */
+ lguest_devices = lguest_map(max_pfn<<PAGE_SHIFT, 1);
+
+ if (bus_register(&lguest_bus.bus) != 0
+ || device_register(&lguest_bus.dev) != 0)
+ panic("lguest bus registration failed");
+
+ scan_devices();
+ return 0;
+}
+postcore_initcall(lguest_bus_init);
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
new file mode 100644
index 000000000000..e90d7a783daf
--- /dev/null
+++ b/drivers/lguest/lguest_user.c
@@ -0,0 +1,236 @@
+/* Userspace control of the guest, via /dev/lguest. */
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include "lg.h"
+
+static void setup_regs(struct lguest_regs *regs, unsigned long start)
+{
+ /* Write out stack in format lguest expects, so we can switch to it. */
+ regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
+ regs->cs = __KERNEL_CS|GUEST_PL;
+ regs->eflags = 0x202; /* Interrupts enabled. */
+ regs->eip = start;
+ /* esi points to our boot information (physical address 0) */
+}
+
+/* + addr */
+static long user_get_dma(struct lguest *lg, const u32 __user *input)
+{
+ unsigned long key, udma, irq;
+
+ if (get_user(key, input) != 0)
+ return -EFAULT;
+ udma = get_dma_buffer(lg, key, &irq);
+ if (!udma)
+ return -ENOENT;
+
+ /* We put irq number in udma->used_len. */
+ lgwrite_u32(lg, udma + offsetof(struct lguest_dma, used_len), irq);
+ return udma;
+}
+
+/* To force the Guest to stop running and return to the Launcher, the
+ * Waker sets writes LHREQ_BREAK and the value "1" to /dev/lguest. The
+ * Launcher then writes LHREQ_BREAK and "0" to release the Waker. */
+static int break_guest_out(struct lguest *lg, const u32 __user *input)
+{
+ unsigned long on;
+
+ /* Fetch whether they're turning break on or off.. */
+ if (get_user(on, input) != 0)
+ return -EFAULT;
+
+ if (on) {
+ lg->break_out = 1;
+ /* Pop it out (may be running on different CPU) */
+ wake_up_process(lg->tsk);
+ /* Wait for them to reset it */
+ return wait_event_interruptible(lg->break_wq, !lg->break_out);
+ } else {
+ lg->break_out = 0;
+ wake_up(&lg->break_wq);
+ return 0;
+ }
+}
+
+/* + irq */
+static int user_send_irq(struct lguest *lg, const u32 __user *input)
+{
+ u32 irq;
+
+ if (get_user(irq, input) != 0)
+ return -EFAULT;
+ if (irq >= LGUEST_IRQS)
+ return -EINVAL;
+ set_bit(irq, lg->irqs_pending);
+ return 0;
+}
+
+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
+{
+ struct lguest *lg = file->private_data;
+
+ if (!lg)
+ return -EINVAL;
+
+ /* If you're not the task which owns the guest, go away. */
+ if (current != lg->tsk)
+ return -EPERM;
+
+ if (lg->dead) {
+ size_t len;
+
+ if (IS_ERR(lg->dead))
+ return PTR_ERR(lg->dead);
+
+ len = min(size, strlen(lg->dead)+1);
+ if (copy_to_user(user, lg->dead, len) != 0)
+ return -EFAULT;
+ return len;
+ }
+
+ if (lg->dma_is_pending)
+ lg->dma_is_pending = 0;
+
+ return run_guest(lg, (unsigned long __user *)user);
+}
+
+/* Take: pfnlimit, pgdir, start, pageoffset. */
+static int initialize(struct file *file, const u32 __user *input)
+{
+ struct lguest *lg;
+ int err, i;
+ u32 args[4];
+
+ /* We grab the Big Lguest lock, which protects the global array
+ * "lguests" and multiple simultaneous initializations. */
+ mutex_lock(&lguest_lock);
+
+ if (file->private_data) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (copy_from_user(args, input, sizeof(args)) != 0) {
+ err = -EFAULT;
+ goto unlock;
+ }
+
+ i = find_free_guest();
+ if (i < 0) {
+ err = -ENOSPC;
+ goto unlock;
+ }
+ lg = &lguests[i];
+ lg->guestid = i;
+ lg->pfn_limit = args[0];
+ lg->page_offset = args[3];
+ lg->regs_page = get_zeroed_page(GFP_KERNEL);
+ if (!lg->regs_page) {
+ err = -ENOMEM;
+ goto release_guest;
+ }
+ lg->regs = (void *)lg->regs_page + PAGE_SIZE - sizeof(*lg->regs);
+
+ err = init_guest_pagetable(lg, args[1]);
+ if (err)
+ goto free_regs;
+
+ setup_regs(lg->regs, args[2]);
+ setup_guest_gdt(lg);
+ init_clockdev(lg);
+ lg->tsk = current;
+ lg->mm = get_task_mm(lg->tsk);
+ init_waitqueue_head(&lg->break_wq);
+ lg->last_pages = NULL;
+ file->private_data = lg;
+
+ mutex_unlock(&lguest_lock);
+
+ return sizeof(args);
+
+free_regs:
+ free_page(lg->regs_page);
+release_guest:
+ memset(lg, 0, sizeof(*lg));
+unlock:
+ mutex_unlock(&lguest_lock);
+ return err;
+}
+
+static ssize_t write(struct file *file, const char __user *input,
+ size_t size, loff_t *off)
+{
+ struct lguest *lg = file->private_data;
+ u32 req;
+
+ if (get_user(req, input) != 0)
+ return -EFAULT;
+ input += sizeof(req);
+
+ if (req != LHREQ_INITIALIZE && !lg)
+ return -EINVAL;
+ if (lg && lg->dead)
+ return -ENOENT;
+
+ /* If you're not the task which owns the Guest, you can only break */
+ if (lg && current != lg->tsk && req != LHREQ_BREAK)
+ return -EPERM;
+
+ switch (req) {
+ case LHREQ_INITIALIZE:
+ return initialize(file, (const u32 __user *)input);
+ case LHREQ_GETDMA:
+ return user_get_dma(lg, (const u32 __user *)input);
+ case LHREQ_IRQ:
+ return user_send_irq(lg, (const u32 __user *)input);
+ case LHREQ_BREAK:
+ return break_guest_out(lg, (const u32 __user *)input);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int close(struct inode *inode, struct file *file)
+{
+ struct lguest *lg = file->private_data;
+
+ if (!lg)
+ return 0;
+
+ mutex_lock(&lguest_lock);
+ /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
+ hrtimer_cancel(&lg->hrt);
+ release_all_dma(lg);
+ free_guest_pagetable(lg);
+ mmput(lg->mm);
+ if (!IS_ERR(lg->dead))
+ kfree(lg->dead);
+ free_page(lg->regs_page);
+ memset(lg, 0, sizeof(*lg));
+ mutex_unlock(&lguest_lock);
+ return 0;
+}
+
+static struct file_operations lguest_fops = {
+ .owner = THIS_MODULE,
+ .release = close,
+ .write = write,
+ .read = read,
+};
+static struct miscdevice lguest_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "lguest",
+ .fops = &lguest_fops,
+};
+
+int __init lguest_device_init(void)
+{
+ return misc_register(&lguest_dev);
+}
+
+void __exit lguest_device_remove(void)
+{
+ misc_deregister(&lguest_dev);
+}
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
new file mode 100644
index 000000000000..1b0ba09b1269
--- /dev/null
+++ b/drivers/lguest/page_tables.c
@@ -0,0 +1,411 @@
+/* Shadow page table operations.
+ * Copyright (C) Rusty Russell IBM Corporation 2006.
+ * GPL v2 and any later version */
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/percpu.h>
+#include <asm/tlbflush.h>
+#include "lg.h"
+
+#define PTES_PER_PAGE_SHIFT 10
+#define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT)
+#define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1)
+
+static DEFINE_PER_CPU(spte_t *, switcher_pte_pages);
+#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
+
+static unsigned vaddr_to_pgd_index(unsigned long vaddr)
+{
+ return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
+}
+
+/* These access the shadow versions (ie. the ones used by the CPU). */
+static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
+{
+ unsigned int index = vaddr_to_pgd_index(vaddr);
+
+ if (index >= SWITCHER_PGD_INDEX) {
+ kill_guest(lg, "attempt to access switcher pages");
+ index = 0;
+ }
+ return &lg->pgdirs[i].pgdir[index];
+}
+
+static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr)
+{
+ spte_t *page = __va(spgd.pfn << PAGE_SHIFT);
+ BUG_ON(!(spgd.flags & _PAGE_PRESENT));
+ return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE];
+}
+
+/* These access the guest versions. */
+static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
+{
+ unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT);
+ return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(gpgd_t);
+}
+
+static unsigned long gpte_addr(struct lguest *lg,
+ gpgd_t gpgd, unsigned long vaddr)
+{
+ unsigned long gpage = gpgd.pfn << PAGE_SHIFT;
+ BUG_ON(!(gpgd.flags & _PAGE_PRESENT));
+ return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t);
+}
+
+/* Do a virtual -> physical mapping on a user page. */
+static unsigned long get_pfn(unsigned long virtpfn, int write)
+{
+ struct page *page;
+ unsigned long ret = -1UL;
+
+ down_read(&current->mm->mmap_sem);
+ if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
+ 1, write, 1, &page, NULL) == 1)
+ ret = page_to_pfn(page);
+ up_read(&current->mm->mmap_sem);
+ return ret;
+}
+
+static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write)
+{
+ spte_t spte;
+ unsigned long pfn;
+
+ /* We ignore the global flag. */
+ spte.flags = (gpte.flags & ~_PAGE_GLOBAL);
+ pfn = get_pfn(gpte.pfn, write);
+ if (pfn == -1UL) {
+ kill_guest(lg, "failed to get page %u", gpte.pfn);
+ /* Must not put_page() bogus page on cleanup. */
+ spte.flags = 0;
+ }
+ spte.pfn = pfn;
+ return spte;
+}
+
+static void release_pte(spte_t pte)
+{
+ if (pte.flags & _PAGE_PRESENT)
+ put_page(pfn_to_page(pte.pfn));
+}
+
+static void check_gpte(struct lguest *lg, gpte_t gpte)
+{
+ if ((gpte.flags & (_PAGE_PWT|_PAGE_PSE)) || gpte.pfn >= lg->pfn_limit)
+ kill_guest(lg, "bad page table entry");
+}
+
+static void check_gpgd(struct lguest *lg, gpgd_t gpgd)
+{
+ if ((gpgd.flags & ~_PAGE_TABLE) || gpgd.pfn >= lg->pfn_limit)
+ kill_guest(lg, "bad page directory entry");
+}
+
+/* FIXME: We hold reference to pages, which prevents them from being
+ swapped. It'd be nice to have a callback when Linux wants to swap out. */
+
+/* We fault pages in, which allows us to update accessed/dirty bits.
+ * Return true if we got page. */
+int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
+{
+ gpgd_t gpgd;
+ spgd_t *spgd;
+ unsigned long gpte_ptr;
+ gpte_t gpte;
+ spte_t *spte;
+
+ gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr)));
+ if (!(gpgd.flags & _PAGE_PRESENT))
+ return 0;
+
+ spgd = spgd_addr(lg, lg->pgdidx, vaddr);
+ if (!(spgd->flags & _PAGE_PRESENT)) {
+ /* Get a page of PTEs for them. */
+ unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+ /* FIXME: Steal from self in this case? */
+ if (!ptepage) {
+ kill_guest(lg, "out of memory allocating pte page");
+ return 0;
+ }
+ check_gpgd(lg, gpgd);
+ spgd->raw.val = (__pa(ptepage) | gpgd.flags);
+ }
+
+ gpte_ptr = gpte_addr(lg, gpgd, vaddr);
+ gpte = mkgpte(lgread_u32(lg, gpte_ptr));
+
+ /* No page? */
+ if (!(gpte.flags & _PAGE_PRESENT))
+ return 0;
+
+ /* Write to read-only page? */
+ if ((errcode & 2) && !(gpte.flags & _PAGE_RW))
+ return 0;
+
+ /* User access to a non-user page? */
+ if ((errcode & 4) && !(gpte.flags & _PAGE_USER))
+ return 0;
+
+ check_gpte(lg, gpte);
+ gpte.flags |= _PAGE_ACCESSED;
+ if (errcode & 2)
+ gpte.flags |= _PAGE_DIRTY;
+
+ /* We're done with the old pte. */
+ spte = spte_addr(lg, *spgd, vaddr);
+ release_pte(*spte);
+
+ /* We don't make it writable if this isn't a write: later
+ * write will fault so we can set dirty bit in guest. */
+ if (gpte.flags & _PAGE_DIRTY)
+ *spte = gpte_to_spte(lg, gpte, 1);
+ else {
+ gpte_t ro_gpte = gpte;
+ ro_gpte.flags &= ~_PAGE_RW;
+ *spte = gpte_to_spte(lg, ro_gpte, 0);
+ }
+
+ /* Now we update dirty/accessed on guest. */
+ lgwrite_u32(lg, gpte_ptr, gpte.raw.val);
+ return 1;
+}
+
+/* This is much faster than the full demand_page logic. */
+static int page_writable(struct lguest *lg, unsigned long vaddr)
+{
+ spgd_t *spgd;
+ unsigned long flags;
+
+ spgd = spgd_addr(lg, lg->pgdidx, vaddr);
+ if (!(spgd->flags & _PAGE_PRESENT))
+ return 0;
+
+ flags = spte_addr(lg, *spgd, vaddr)->flags;
+ return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
+}
+
+void pin_page(struct lguest *lg, unsigned long vaddr)
+{
+ if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
+ kill_guest(lg, "bad stack page %#lx", vaddr);
+}
+
+static void release_pgd(struct lguest *lg, spgd_t *spgd)
+{
+ if (spgd->flags & _PAGE_PRESENT) {
+ unsigned int i;
+ spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT);
+ for (i = 0; i < PTES_PER_PAGE; i++)
+ release_pte(ptepage[i]);
+ free_page((long)ptepage);
+ spgd->raw.val = 0;
+ }
+}
+
+static void flush_user_mappings(struct lguest *lg, int idx)
+{
+ unsigned int i;
+ for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++)
+ release_pgd(lg, lg->pgdirs[idx].pgdir + i);
+}
+
+void guest_pagetable_flush_user(struct lguest *lg)
+{
+ flush_user_mappings(lg, lg->pgdidx);
+}
+
+static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
+{
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+ if (lg->pgdirs[i].cr3 == pgtable)
+ break;
+ return i;
+}
+
+static unsigned int new_pgdir(struct lguest *lg,
+ unsigned long cr3,
+ int *blank_pgdir)
+{
+ unsigned int next;
+
+ next = random32() % ARRAY_SIZE(lg->pgdirs);
+ if (!lg->pgdirs[next].pgdir) {
+ lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL);
+ if (!lg->pgdirs[next].pgdir)
+ next = lg->pgdidx;
+ else
+ /* There are no mappings: you'll need to re-pin */
+ *blank_pgdir = 1;
+ }
+ lg->pgdirs[next].cr3 = cr3;
+ /* Release all the non-kernel mappings. */
+ flush_user_mappings(lg, next);
+
+ return next;
+}
+
+void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
+{
+ int newpgdir, repin = 0;
+
+ newpgdir = find_pgdir(lg, pgtable);
+ if (newpgdir == ARRAY_SIZE(lg->pgdirs))
+ newpgdir = new_pgdir(lg, pgtable, &repin);
+ lg->pgdidx = newpgdir;
+ if (repin)
+ pin_stack_pages(lg);
+}
+
+static void release_all_pagetables(struct lguest *lg)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+ if (lg->pgdirs[i].pgdir)
+ for (j = 0; j < SWITCHER_PGD_INDEX; j++)
+ release_pgd(lg, lg->pgdirs[i].pgdir + j);
+}
+
+void guest_pagetable_clear_all(struct lguest *lg)
+{
+ release_all_pagetables(lg);
+ pin_stack_pages(lg);
+}
+
+static void do_set_pte(struct lguest *lg, int idx,
+ unsigned long vaddr, gpte_t gpte)
+{
+ spgd_t *spgd = spgd_addr(lg, idx, vaddr);
+ if (spgd->flags & _PAGE_PRESENT) {
+ spte_t *spte = spte_addr(lg, *spgd, vaddr);
+ release_pte(*spte);
+ if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
+ check_gpte(lg, gpte);
+ *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY);
+ } else
+ spte->raw.val = 0;
+ }
+}
+
+void guest_set_pte(struct lguest *lg,
+ unsigned long cr3, unsigned long vaddr, gpte_t gpte)
+{
+ /* Kernel mappings must be changed on all top levels. */
+ if (vaddr >= lg->page_offset) {
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+ if (lg->pgdirs[i].pgdir)
+ do_set_pte(lg, i, vaddr, gpte);
+ } else {
+ int pgdir = find_pgdir(lg, cr3);
+ if (pgdir != ARRAY_SIZE(lg->pgdirs))
+ do_set_pte(lg, pgdir, vaddr, gpte);
+ }
+}
+
+void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx)
+{
+ int pgdir;
+
+ if (idx >= SWITCHER_PGD_INDEX)
+ return;
+
+ pgdir = find_pgdir(lg, cr3);
+ if (pgdir < ARRAY_SIZE(lg->pgdirs))
+ release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
+}
+
+int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
+{
+ /* We assume this in flush_user_mappings, so check now */
+ if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX)
+ return -EINVAL;
+ lg->pgdidx = 0;
+ lg->pgdirs[lg->pgdidx].cr3 = pgtable;
+ lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL);
+ if (!lg->pgdirs[lg->pgdidx].pgdir)
+ return -ENOMEM;
+ return 0;
+}
+
+void free_guest_pagetable(struct lguest *lg)
+{
+ unsigned int i;
+
+ release_all_pagetables(lg);
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+ free_page((long)lg->pgdirs[i].pgdir);
+}
+
+/* Caller must be preempt-safe */
+void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
+{
+ spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+ spgd_t switcher_pgd;
+ spte_t regs_pte;
+
+ /* Since switcher less that 4MB, we simply mug top pte page. */
+ switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT;
+ switcher_pgd.flags = _PAGE_KERNEL;
+ lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
+
+ /* Map our regs page over stack page. */
+ regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT;
+ regs_pte.flags = _PAGE_KERNEL;
+ switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE]
+ = regs_pte;
+}
+
+static void free_switcher_pte_pages(void)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i)
+ free_page((long)switcher_pte_page(i));
+}
+
+static __init void populate_switcher_pte_page(unsigned int cpu,
+ struct page *switcher_page[],
+ unsigned int pages)
+{
+ unsigned int i;
+ spte_t *pte = switcher_pte_page(cpu);
+
+ for (i = 0; i < pages; i++) {
+ pte[i].pfn = page_to_pfn(switcher_page[i]);
+ pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
+ }
+
+ /* We only map this CPU's pages, so guest can't see others. */
+ i = pages + cpu*2;
+
+ /* First page (regs) is rw, second (state) is ro. */
+ pte[i].pfn = page_to_pfn(switcher_page[i]);
+ pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW;
+ pte[i+1].pfn = page_to_pfn(switcher_page[i+1]);
+ pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED;
+}
+
+__init int init_pagetables(struct page **switcher_page, unsigned int pages)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i) {
+ switcher_pte_page(i) = (spte_t *)get_zeroed_page(GFP_KERNEL);
+ if (!switcher_pte_page(i)) {
+ free_switcher_pte_pages();
+ return -ENOMEM;
+ }
+ populate_switcher_pte_page(i, switcher_page, pages);
+ }
+ return 0;
+}
+
+void free_pagetables(void)
+{
+ free_switcher_pte_pages();
+}
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
new file mode 100644
index 000000000000..1b2cfe89dcd5
--- /dev/null
+++ b/drivers/lguest/segments.c
@@ -0,0 +1,125 @@
+#include "lg.h"
+
+static int desc_ok(const struct desc_struct *gdt)
+{
+ /* MBZ=0, P=1, DT=1 */
+ return ((gdt->b & 0x00209000) == 0x00009000);
+}
+
+static int segment_present(const struct desc_struct *gdt)
+{
+ return gdt->b & 0x8000;
+}
+
+static int ignored_gdt(unsigned int num)
+{
+ return (num == GDT_ENTRY_TSS
+ || num == GDT_ENTRY_LGUEST_CS
+ || num == GDT_ENTRY_LGUEST_DS
+ || num == GDT_ENTRY_DOUBLEFAULT_TSS);
+}
+
+/* We don't allow removal of CS, DS or SS; it doesn't make sense. */
+static void check_segment_use(struct lguest *lg, unsigned int desc)
+{
+ if (lg->regs->gs / 8 == desc)
+ lg->regs->gs = 0;
+ if (lg->regs->fs / 8 == desc)
+ lg->regs->fs = 0;
+ if (lg->regs->es / 8 == desc)
+ lg->regs->es = 0;
+ if (lg->regs->ds / 8 == desc
+ || lg->regs->cs / 8 == desc
+ || lg->regs->ss / 8 == desc)
+ kill_guest(lg, "Removed live GDT entry %u", desc);
+}
+
+static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++) {
+ /* We never copy these ones to real gdt */
+ if (ignored_gdt(i))
+ continue;
+
+ /* We could fault in switch_to_guest if they are using
+ * a removed segment. */
+ if (!segment_present(&lg->gdt[i])) {
+ check_segment_use(lg, i);
+ continue;
+ }
+
+ if (!desc_ok(&lg->gdt[i]))
+ kill_guest(lg, "Bad GDT descriptor %i", i);
+
+ /* DPL 0 presumably means "for use by guest". */
+ if ((lg->gdt[i].b & 0x00006000) == 0)
+ lg->gdt[i].b |= (GUEST_PL << 13);
+
+ /* Set accessed bit, since gdt isn't writable. */
+ lg->gdt[i].b |= 0x00000100;
+ }
+}
+
+void setup_default_gdt_entries(struct lguest_ro_state *state)
+{
+ struct desc_struct *gdt = state->guest_gdt;
+ unsigned long tss = (unsigned long)&state->guest_tss;
+
+ /* Hypervisor segments. */
+ gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
+ gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
+
+ /* This is the one which we *cannot* copy from guest, since tss
+ is depended on this lguest_ro_state, ie. this cpu. */
+ gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
+ gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
+ | ((tss >> 16) & 0x000000FF);
+}
+
+void setup_guest_gdt(struct lguest *lg)
+{
+ lg->gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
+ lg->gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
+ lg->gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
+ lg->gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
+}
+
+/* This is a fast version for the common case where only the three TLS entries
+ * have changed. */
+void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt)
+{
+ unsigned int i;
+
+ for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
+ gdt[i] = lg->gdt[i];
+}
+
+void copy_gdt(const struct lguest *lg, struct desc_struct *gdt)
+{
+ unsigned int i;
+
+ for (i = 0; i < GDT_ENTRIES; i++)
+ if (!ignored_gdt(i))
+ gdt[i] = lg->gdt[i];
+}
+
+void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num)
+{
+ if (num > ARRAY_SIZE(lg->gdt))
+ kill_guest(lg, "too many gdt entries %i", num);
+
+ lgread(lg, lg->gdt, table, num * sizeof(lg->gdt[0]));
+ fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->gdt));
+ lg->changed |= CHANGED_GDT;
+}
+
+void guest_load_tls(struct lguest *lg, unsigned long gtls)
+{
+ struct desc_struct *tls = &lg->gdt[GDT_ENTRY_TLS_MIN];
+
+ lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
+ fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
+ lg->changed |= CHANGED_GDT_TLS;
+}
diff --git a/drivers/lguest/switcher.S b/drivers/lguest/switcher.S
new file mode 100644
index 000000000000..eadd4cc299d2
--- /dev/null
+++ b/drivers/lguest/switcher.S
@@ -0,0 +1,159 @@
+/* This code sits at 0xFFC00000 to do the low-level guest<->host switch.
+
+ There is are two pages above us for this CPU (struct lguest_pages).
+ The second page (struct lguest_ro_state) becomes read-only after the
+ context switch. The first page (the stack for traps) remains writable,
+ but while we're in here, the guest cannot be running.
+*/
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include "lg.h"
+
+.text
+ENTRY(start_switcher_text)
+
+/* %eax points to lguest pages for this CPU. %ebx contains cr3 value.
+ All normal registers can be clobbered! */
+ENTRY(switch_to_guest)
+ /* Save host segments on host stack. */
+ pushl %es
+ pushl %ds
+ pushl %gs
+ pushl %fs
+ /* With CONFIG_FRAME_POINTER, gcc doesn't let us clobber this! */
+ pushl %ebp
+ /* Save host stack. */
+ movl %esp, LGUEST_PAGES_host_sp(%eax)
+ /* Switch to guest stack: if we get NMI we expect to be there. */
+ movl %eax, %edx
+ addl $LGUEST_PAGES_regs, %edx
+ movl %edx, %esp
+ /* Switch to guest's GDT, IDT. */
+ lgdt LGUEST_PAGES_guest_gdt_desc(%eax)
+ lidt LGUEST_PAGES_guest_idt_desc(%eax)
+ /* Switch to guest's TSS while GDT still writable. */
+ movl $(GDT_ENTRY_TSS*8), %edx
+ ltr %dx
+ /* Set host's TSS GDT entry to available (clear byte 5 bit 2). */
+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
+ /* Switch to guest page tables: lguest_pages->state now read-only. */
+ movl %ebx, %cr3
+ /* Restore guest regs */
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %gs
+ popl %eax
+ popl %fs
+ popl %ds
+ popl %es
+ /* Skip error code and trap number */
+ addl $8, %esp
+ iret
+
+#define SWITCH_TO_HOST \
+ /* Save guest state */ \
+ pushl %es; \
+ pushl %ds; \
+ pushl %fs; \
+ pushl %eax; \
+ pushl %gs; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ /* Load lguest ds segment for convenience. */ \
+ movl $(LGUEST_DS), %eax; \
+ movl %eax, %ds; \
+ /* Figure out where we are, based on stack (at top of regs). */ \
+ movl %esp, %eax; \
+ subl $LGUEST_PAGES_regs, %eax; \
+ /* Put trap number in %ebx before we switch cr3 and lose it. */ \
+ movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \
+ /* Switch to host page tables (host GDT, IDT and stack are in host \
+ mem, so need this first) */ \
+ movl LGUEST_PAGES_host_cr3(%eax), %edx; \
+ movl %edx, %cr3; \
+ /* Set guest's TSS to available (clear byte 5 bit 2). */ \
+ andb $0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \
+ /* Switch to host's GDT & IDT. */ \
+ lgdt LGUEST_PAGES_host_gdt_desc(%eax); \
+ lidt LGUEST_PAGES_host_idt_desc(%eax); \
+ /* Switch to host's stack. */ \
+ movl LGUEST_PAGES_host_sp(%eax), %esp; \
+ /* Switch to host's TSS */ \
+ movl $(GDT_ENTRY_TSS*8), %edx; \
+ ltr %dx; \
+ popl %ebp; \
+ popl %fs; \
+ popl %gs; \
+ popl %ds; \
+ popl %es
+
+/* Return to run_guest_once. */
+return_to_host:
+ SWITCH_TO_HOST
+ iret
+
+deliver_to_host:
+ SWITCH_TO_HOST
+ /* Decode IDT and jump to hosts' irq handler. When that does iret, it
+ * will return to run_guest_once. This is a feature. */
+ movl (LGUEST_PAGES_host_idt_desc+2)(%eax), %edx
+ leal (%edx,%ebx,8), %eax
+ movzwl (%eax),%edx
+ movl 4(%eax), %eax
+ xorw %ax, %ax
+ orl %eax, %edx
+ jmp *%edx
+
+/* Real hardware interrupts are delivered straight to the host. Others
+ cause us to return to run_guest_once so it can decide what to do. Note
+ that some of these are overridden by the guest to deliver directly, and
+ never enter here (see load_guest_idt_entry). */
+.macro IRQ_STUB N TARGET
+ .data; .long 1f; .text; 1:
+ /* Make an error number for most traps, which don't have one. */
+ .if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17)
+ pushl $0
+ .endif
+ pushl $\N
+ jmp \TARGET
+ ALIGN
+.endm
+
+.macro IRQ_STUBS FIRST LAST TARGET
+ irq=\FIRST
+ .rept \LAST-\FIRST+1
+ IRQ_STUB irq \TARGET
+ irq=irq+1
+ .endr
+.endm
+
+/* We intercept every interrupt, because we may need to switch back to
+ * host. Unfortunately we can't tell them apart except by entry
+ * point, so we need 256 entry points.
+ */
+.data
+.global default_idt_entries
+default_idt_entries:
+.text
+ IRQ_STUBS 0 1 return_to_host /* First two traps */
+ IRQ_STUB 2 handle_nmi /* NMI */
+ IRQ_STUBS 3 31 return_to_host /* Rest of traps */
+ IRQ_STUBS 32 127 deliver_to_host /* Real interrupts */
+ IRQ_STUB 128 return_to_host /* System call (overridden) */
+ IRQ_STUBS 129 255 deliver_to_host /* Other real interrupts */
+
+/* We ignore NMI and return. */
+handle_nmi:
+ addl $8, %esp
+ iret
+
+ENTRY(end_switcher_text)
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index adfea3c7c62a..bc77c5e2ca9f 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -248,21 +248,15 @@ static int adb_scan_bus(void)
static int
adb_probe_task(void *x)
{
- sigset_t blocked;
-
strcpy(current->comm, "kadbprobe");
- sigfillset(&blocked);
- sigprocmask(SIG_BLOCK, &blocked, NULL);
- flush_signals(current);
-
printk(KERN_INFO "adb: starting probe task...\n");
do_adb_reset_bus();
printk(KERN_INFO "adb: finished probe task...\n");
-
+
adb_probe_task_pid = 0;
up(&adb_probe_mutex);
-
+
return 0;
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index c96b7fe882a4..ec9e5f32f0ae 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -365,10 +365,9 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
if (np == NULL)
return NULL;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
- memset(dev, 0, sizeof(*dev));
dev->bus = &chip->lbus;
dev->media_bay = in_bay;
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 4177ff004753..2c21d4f25cc8 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -30,7 +30,6 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
-#include <asm/dbdma.h>
#include <asm/macio.h>
#include <asm/keylargo.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index f8e1a135bf9d..d409f6759482 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1053,10 +1053,9 @@ static int smu_open(struct inode *inode, struct file *file)
struct smu_private *pp;
unsigned long flags;
- pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
+ pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
if (pp == 0)
return -ENOMEM;
- memset(pp, 0, sizeof(struct smu_private));
spin_lock_init(&pp->lock);
pp->mode = smu_file_commands;
init_waitqueue_head(&pp->wait);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index bd55e6ab99fc..f25685b9b7cf 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -335,6 +335,7 @@ static int monitor_task(void *arg)
{
struct thermostat* th = arg;
+ set_freezable();
while(!kthread_should_stop()) {
try_to_freeze();
msleep_interruptible(2000);
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index dbb22403979f..e43554e754a4 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -318,10 +318,9 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
if (adap == NULL)
return NULL;
- clt = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ clt = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (clt == NULL)
return NULL;
- memset(clt, 0, sizeof(struct i2c_client));
clt->addr = (id >> 1) & 0x7f;
clt->adapter = adap;
@@ -1770,7 +1769,8 @@ static int call_critical_overtemp(void)
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL };
- return call_usermodehelper(critical_overtemp_path, argv, envp, 0);
+ return call_usermodehelper(critical_overtemp_path,
+ argv, envp, UMH_WAIT_EXEC);
}
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 3d0354e96a97..5452da1bb1a5 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -431,9 +431,8 @@ do_probe( struct i2c_adapter *adapter, int addr, int kind )
| I2C_FUNC_SMBUS_WRITE_BYTE) )
return 0;
- if( !(cl=kmalloc(sizeof(*cl), GFP_KERNEL)) )
+ if( !(cl=kzalloc(sizeof(*cl), GFP_KERNEL)) )
return -ENOMEM;
- memset( cl, 0, sizeof(struct i2c_client) );
cl->addr = addr;
cl->adapter = adapter;
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 11ced17f438a..516d943227e2 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -80,7 +80,8 @@ int wf_critical_overtemp(void)
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL };
- return call_usermodehelper(critical_overtemp_path, argv, envp, 0);
+ return call_usermodehelper(critical_overtemp_path,
+ argv, envp, UMH_WAIT_EXEC);
}
EXPORT_SYMBOL_GPL(wf_critical_overtemp);
@@ -92,6 +93,7 @@ static int wf_thread_func(void *data)
DBG("wf: thread started\n");
+ set_freezable();
while(!kthread_should_stop()) {
if (time_after_eq(jiffies, next)) {
wf_notify(WF_EVENT_TICK, NULL);
@@ -212,7 +214,6 @@ int wf_register_control(struct wf_control *new_ct)
list_add(&new_ct->link, &wf_controls);
new_ct->attr.attr.name = new_ct->name;
- new_ct->attr.attr.owner = THIS_MODULE;
new_ct->attr.attr.mode = 0644;
new_ct->attr.show = wf_show_control;
new_ct->attr.store = wf_store_control;
@@ -325,7 +326,6 @@ int wf_register_sensor(struct wf_sensor *new_sr)
list_add(&new_sr->link, &wf_sensors);
new_sr->attr.attr.name = new_sr->name;
- new_sr->attr.attr.owner = THIS_MODULE;
new_sr->attr.attr.mode = 0444;
new_sr->attr.show = wf_show_sensor;
new_sr->attr.store = NULL;
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index a0fabf3c2008..7e10c3ab4d50 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -117,10 +117,9 @@ static struct wf_lm75_sensor *wf_lm75_create(struct i2c_adapter *adapter,
DBG("wf_lm75: creating %s device at address 0x%02x\n",
ds1775 ? "ds1775" : "lm75", addr);
- lm = kmalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
+ lm = kzalloc(sizeof(struct wf_lm75_sensor), GFP_KERNEL);
if (lm == NULL)
return NULL;
- memset(lm, 0, sizeof(struct wf_lm75_sensor));
/* Usual rant about sensor names not beeing very consistent in
* the device-tree, oh well ...
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 1043b39aa123..351982bcec1b 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -67,26 +67,6 @@ static struct i2c_driver wf_sat_driver = {
.detach_client = wf_sat_detach,
};
-/*
- * XXX i2c_smbus_read_i2c_block_data doesn't pass the requested
- * length down to the low-level driver, so we use this, which
- * works well enough with the SMU i2c driver code...
- */
-static int sat_read_block(struct i2c_client *client, u8 command,
- u8 *values, int len)
-{
- union i2c_smbus_data data;
- int err;
-
- data.block[0] = len;
- err = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- I2C_SMBUS_READ, command, I2C_SMBUS_I2C_BLOCK_DATA,
- &data);
- if (!err)
- memcpy(values, data.block, len);
- return err;
-}
-
struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
unsigned int *size)
{
@@ -124,8 +104,8 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
return NULL;
for (i = 0; i < len; i += 4) {
- err = sat_read_block(&sat->i2c, 0xa, data, 4);
- if (err) {
+ err = i2c_smbus_read_i2c_block_data(&sat->i2c, 0xa, 4, data);
+ if (err < 0) {
printk(KERN_ERR "smu_sat_get_sdb_part rd err %d\n",
err);
goto fail;
@@ -157,8 +137,8 @@ static int wf_sat_read_cache(struct wf_sat *sat)
{
int err;
- err = sat_read_block(&sat->i2c, 0x3f, sat->cache, 16);
- if (err)
+ err = i2c_smbus_read_i2c_block_data(&sat->i2c, 0x3f, 16, sat->cache);
+ if (err < 0)
return err;
sat->last_read = jiffies;
#ifdef LOTSA_DEBUG
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 7df934d69134..531d4d17d011 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -2,19 +2,17 @@
# Block device driver configuration
#
-if BLOCK
-
-menu "Multi-device support (RAID and LVM)"
-
-config MD
+menuconfig MD
bool "Multiple devices driver support (RAID and LVM)"
+ depends on BLOCK
help
Support multiple physical spindles through a single logical device.
Required for RAID and logical volume management.
+if MD
+
config BLK_DEV_MD
tristate "RAID support"
- depends on MD
---help---
This driver lets you combine several hard disk partitions into one
logical block device. This can be used to simply append one
@@ -109,6 +107,8 @@ config MD_RAID10
config MD_RAID456
tristate "RAID-4/RAID-5/RAID-6 mode"
depends on BLK_DEV_MD
+ select ASYNC_MEMCPY
+ select ASYNC_XOR
---help---
A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure
@@ -189,7 +189,6 @@ config MD_FAULTY
config BLK_DEV_DM
tristate "Device mapper support"
- depends on MD
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
@@ -262,6 +261,12 @@ config DM_MULTIPATH_EMC
---help---
Multipath support for EMC CX/AX series hardware.
+config DM_MULTIPATH_RDAC
+ tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
+ depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL
+ ---help---
+ Multipath support for LSI/Engenio RDAC.
+
config DM_DELAY
tristate "I/O delaying target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
@@ -271,6 +276,4 @@ config DM_DELAY
If unsure, say N.
-endmenu
-
-endif
+endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 38754084eac7..c49366cdc05d 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -7,6 +7,7 @@ dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-multipath-objs := dm-hw-handler.o dm-path-selector.o dm-mpath.o
dm-snapshot-objs := dm-snap.o dm-exception-store.o
dm-mirror-objs := dm-log.o dm-raid1.o
+dm-rdac-objs := dm-mpath-rdac.o
md-mod-objs := md.o bitmap.o
raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \
@@ -17,7 +18,7 @@ raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
hostprogs-y := mktables
# Note: link order is important. All raid personalities
-# and xor.o must come before md.o, as they each initialise
+# and must come before md.o, as they each initialise
# themselves, and md.o may use the personalities when it
# auto-initialised.
@@ -25,7 +26,7 @@ obj-$(CONFIG_MD_LINEAR) += linear.o
obj-$(CONFIG_MD_RAID0) += raid0.o
obj-$(CONFIG_MD_RAID1) += raid1.o
obj-$(CONFIG_MD_RAID10) += raid10.o
-obj-$(CONFIG_MD_RAID456) += raid456.o xor.o
+obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
+obj-$(CONFIG_DM_MULTIPATH_RDAC) += dm-rdac.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9620d452d030..927cb34c4805 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -268,6 +268,31 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
bdev_hardsect_size(rdev->bdev));
+ /* Just make sure we aren't corrupting data or
+ * metadata
+ */
+ if (bitmap->offset < 0) {
+ /* DATA BITMAP METADATA */
+ if (bitmap->offset
+ + page->index * (PAGE_SIZE/512)
+ + size/512 > 0)
+ /* bitmap runs in to metadata */
+ return -EINVAL;
+ if (rdev->data_offset + mddev->size*2
+ > rdev->sb_offset*2 + bitmap->offset)
+ /* data runs in to bitmap */
+ return -EINVAL;
+ } else if (rdev->sb_offset*2 < rdev->data_offset) {
+ /* METADATA BITMAP DATA */
+ if (rdev->sb_offset*2
+ + bitmap->offset
+ + page->index*(PAGE_SIZE/512) + size/512
+ > rdev->data_offset)
+ /* bitmap runs in to data */
+ return -EINVAL;
+ } else {
+ /* DATA METADATA BITMAP - no problems */
+ }
md_super_write(mddev, rdev,
(rdev->sb_offset<<1) + bitmap->offset
+ page->index * (PAGE_SIZE/512),
@@ -280,32 +305,38 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
return 0;
}
+static void bitmap_file_kick(struct bitmap *bitmap);
/*
* write out a page to a file
*/
-static int write_page(struct bitmap *bitmap, struct page *page, int wait)
+static void write_page(struct bitmap *bitmap, struct page *page, int wait)
{
struct buffer_head *bh;
- if (bitmap->file == NULL)
- return write_sb_page(bitmap, page, wait);
+ if (bitmap->file == NULL) {
+ switch (write_sb_page(bitmap, page, wait)) {
+ case -EINVAL:
+ bitmap->flags |= BITMAP_WRITE_ERROR;
+ }
+ } else {
- bh = page_buffers(page);
+ bh = page_buffers(page);
- while (bh && bh->b_blocknr) {
- atomic_inc(&bitmap->pending_writes);
- set_buffer_locked(bh);
- set_buffer_mapped(bh);
- submit_bh(WRITE, bh);
- bh = bh->b_this_page;
- }
+ while (bh && bh->b_blocknr) {
+ atomic_inc(&bitmap->pending_writes);
+ set_buffer_locked(bh);
+ set_buffer_mapped(bh);
+ submit_bh(WRITE, bh);
+ bh = bh->b_this_page;
+ }
- if (wait) {
- wait_event(bitmap->write_wait,
- atomic_read(&bitmap->pending_writes)==0);
- return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
+ if (wait) {
+ wait_event(bitmap->write_wait,
+ atomic_read(&bitmap->pending_writes)==0);
+ }
}
- return 0;
+ if (bitmap->flags & BITMAP_WRITE_ERROR)
+ bitmap_file_kick(bitmap);
}
static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -425,17 +456,17 @@ out:
*/
/* update the event counter and sync the superblock to disk */
-int bitmap_update_sb(struct bitmap *bitmap)
+void bitmap_update_sb(struct bitmap *bitmap)
{
bitmap_super_t *sb;
unsigned long flags;
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
- return 0;
+ return;
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->sb_page) { /* no superblock */
spin_unlock_irqrestore(&bitmap->lock, flags);
- return 0;
+ return;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -443,7 +474,7 @@ int bitmap_update_sb(struct bitmap *bitmap)
if (!bitmap->mddev->degraded)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
kunmap_atomic(sb, KM_USER0);
- return write_page(bitmap, bitmap->sb_page, 1);
+ write_page(bitmap, bitmap->sb_page, 1);
}
/* print out the bitmap file superblock */
@@ -572,20 +603,22 @@ enum bitmap_mask_op {
MASK_UNSET
};
-/* record the state of the bitmap in the superblock */
-static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
- enum bitmap_mask_op op)
+/* record the state of the bitmap in the superblock. Return the old value */
+static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
+ enum bitmap_mask_op op)
{
bitmap_super_t *sb;
unsigned long flags;
+ int old;
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->sb_page) { /* can't set the state */
spin_unlock_irqrestore(&bitmap->lock, flags);
- return;
+ return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
+ old = le32_to_cpu(sb->state) & bits;
switch (op) {
case MASK_SET: sb->state |= cpu_to_le32(bits);
break;
@@ -594,6 +627,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
default: BUG();
}
kunmap_atomic(sb, KM_USER0);
+ return old;
}
/*
@@ -687,18 +721,23 @@ static void bitmap_file_kick(struct bitmap *bitmap)
{
char *path, *ptr = NULL;
- bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET);
- bitmap_update_sb(bitmap);
+ if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
+ bitmap_update_sb(bitmap);
- if (bitmap->file) {
- path = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (path)
- ptr = file_path(bitmap->file, path, PAGE_SIZE);
+ if (bitmap->file) {
+ path = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (path)
+ ptr = file_path(bitmap->file, path, PAGE_SIZE);
- printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n",
- bmname(bitmap), ptr ? ptr : "");
+ printk(KERN_ALERT
+ "%s: kicking failed bitmap file %s from array!\n",
+ bmname(bitmap), ptr ? ptr : "");
- kfree(path);
+ kfree(path);
+ } else
+ printk(KERN_ALERT
+ "%s: disabling internal bitmap due to errors\n",
+ bmname(bitmap));
}
bitmap_file_put(bitmap);
@@ -769,16 +808,15 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
-int bitmap_unplug(struct bitmap *bitmap)
+void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i, flags;
int dirty, need_write;
struct page *page;
int wait = 0;
- int err;
if (!bitmap)
- return 0;
+ return;
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
@@ -786,7 +824,7 @@ int bitmap_unplug(struct bitmap *bitmap)
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->filemap) {
spin_unlock_irqrestore(&bitmap->lock, flags);
- return 0;
+ return;
}
page = bitmap->filemap[i];
dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -798,7 +836,7 @@ int bitmap_unplug(struct bitmap *bitmap)
spin_unlock_irqrestore(&bitmap->lock, flags);
if (dirty | need_write)
- err = write_page(bitmap, page, 0);
+ write_page(bitmap, page, 0);
}
if (wait) { /* if any writes were performed, we need to wait on them */
if (bitmap->file)
@@ -809,7 +847,6 @@ int bitmap_unplug(struct bitmap *bitmap)
}
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
- return 0;
}
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -858,21 +895,21 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bmname(bitmap),
(unsigned long) i_size_read(file->f_mapping->host),
bytes + sizeof(bitmap_super_t));
- goto out;
+ goto err;
}
ret = -ENOMEM;
bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
if (!bitmap->filemap)
- goto out;
+ goto err;
/* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
bitmap->filemap_attr = kzalloc(
roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
GFP_KERNEL);
if (!bitmap->filemap_attr)
- goto out;
+ goto err;
oldindex = ~0L;
@@ -905,7 +942,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
}
if (IS_ERR(page)) { /* read error */
ret = PTR_ERR(page);
- goto out;
+ goto err;
}
oldindex = index;
@@ -920,11 +957,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
memset(paddr + offset, 0xff,
PAGE_SIZE - offset);
kunmap_atomic(paddr, KM_USER0);
- ret = write_page(bitmap, page, 1);
- if (ret) {
+ write_page(bitmap, page, 1);
+
+ ret = -EIO;
+ if (bitmap->flags & BITMAP_WRITE_ERROR) {
/* release, page not in filemap yet */
put_page(page);
- goto out;
+ goto err;
}
}
@@ -956,11 +995,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
md_wakeup_thread(bitmap->mddev->thread);
}
-out:
printk(KERN_INFO "%s: bitmap initialized from disk: "
- "read %lu/%lu pages, set %lu bits, status: %d\n",
- bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret);
+ "read %lu/%lu pages, set %lu bits\n",
+ bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
+
+ return 0;
+ err:
+ printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
+ bmname(bitmap), ret);
return ret;
}
@@ -997,19 +1040,18 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
* out to disk
*/
-int bitmap_daemon_work(struct bitmap *bitmap)
+void bitmap_daemon_work(struct bitmap *bitmap)
{
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
- int err = 0;
int blocks;
void *paddr;
if (bitmap == NULL)
- return 0;
+ return;
if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
- return 0;
+ return;
bitmap->daemon_lastrun = jiffies;
for (j = 0; j < bitmap->chunks; j++) {
@@ -1032,14 +1074,8 @@ int bitmap_daemon_work(struct bitmap *bitmap)
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (need_write) {
- switch (write_page(bitmap, page, 0)) {
- case 0:
- break;
- default:
- bitmap_file_kick(bitmap);
- }
- }
+ if (need_write)
+ write_page(bitmap, page, 0);
continue;
}
@@ -1048,13 +1084,11 @@ int bitmap_daemon_work(struct bitmap *bitmap)
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- err = write_page(bitmap, lastpage, 0);
+ write_page(bitmap, lastpage, 0);
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
- if (err)
- bitmap_file_kick(bitmap);
} else
spin_unlock_irqrestore(&bitmap->lock, flags);
lastpage = page;
@@ -1097,14 +1131,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- err = write_page(bitmap, lastpage, 0);
+ write_page(bitmap, lastpage, 0);
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
}
- return err;
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1517,7 +1550,9 @@ int bitmap_create(mddev_t *mddev)
mddev->thread->timeout = bitmap->daemon_sleep * HZ;
- return bitmap_update_sb(bitmap);
+ bitmap_update_sb(bitmap);
+
+ return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
error:
bitmap_free(bitmap);
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index c6be88826fae..16ee3b018b3a 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -8,7 +8,6 @@
#define DM_BIO_LIST_H
#include <linux/bio.h>
-#include <linux/prefetch.h>
struct bio_list {
struct bio *head;
@@ -31,8 +30,7 @@ static inline void bio_list_init(struct bio_list *bl)
}
#define bio_list_for_each(bio, bl) \
- for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \
- bio = bio->bi_next)
+ for (bio = (bl)->head; bio; bio = bio->bi_next)
static inline unsigned bio_list_size(const struct bio_list *bl)
{
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7b0fcfc9eaa5..bdc52d6922b7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -30,7 +30,7 @@
/*
* per bio private data
*/
-struct crypt_io {
+struct dm_crypt_io {
struct dm_target *target;
struct bio *base_bio;
struct work_struct work;
@@ -106,7 +106,7 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool;
-static void clone_init(struct crypt_io *, struct bio *);
+static void clone_init(struct dm_crypt_io *, struct bio *);
/*
* Different IV generation algorithms:
@@ -382,7 +382,7 @@ static int crypt_convert(struct crypt_config *cc,
static void dm_crypt_bio_destructor(struct bio *bio)
{
- struct crypt_io *io = bio->bi_private;
+ struct dm_crypt_io *io = bio->bi_private;
struct crypt_config *cc = io->target->private;
bio_free(bio, cc->bs);
@@ -393,7 +393,7 @@ static int crypt_convert(struct crypt_config *cc,
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
-static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
@@ -479,7 +479,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc,
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
*/
-static void dec_pending(struct crypt_io *io, int error)
+static void dec_pending(struct dm_crypt_io *io, int error)
{
struct crypt_config *cc = (struct crypt_config *) io->target->private;
@@ -503,7 +503,7 @@ static void dec_pending(struct crypt_io *io, int error)
static struct workqueue_struct *_kcryptd_workqueue;
static void kcryptd_do_work(struct work_struct *work);
-static void kcryptd_queue_io(struct crypt_io *io)
+static void kcryptd_queue_io(struct dm_crypt_io *io)
{
INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
@@ -511,7 +511,7 @@ static void kcryptd_queue_io(struct crypt_io *io)
static int crypt_endio(struct bio *clone, unsigned int done, int error)
{
- struct crypt_io *io = clone->bi_private;
+ struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
@@ -545,7 +545,7 @@ out:
return error;
}
-static void clone_init(struct crypt_io *io, struct bio *clone)
+static void clone_init(struct dm_crypt_io *io, struct bio *clone)
{
struct crypt_config *cc = io->target->private;
@@ -556,7 +556,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
clone->bi_destructor = dm_crypt_bio_destructor;
}
-static void process_read(struct crypt_io *io)
+static void process_read(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
@@ -587,7 +587,7 @@ static void process_read(struct crypt_io *io)
generic_make_request(clone);
}
-static void process_write(struct crypt_io *io)
+static void process_write(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
@@ -644,7 +644,7 @@ static void process_write(struct crypt_io *io)
}
}
-static void process_read_endio(struct crypt_io *io)
+static void process_read_endio(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct convert_context ctx;
@@ -657,7 +657,7 @@ static void process_read_endio(struct crypt_io *io)
static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = container_of(work, struct crypt_io, work);
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
if (io->post_process)
process_read_endio(io);
@@ -920,6 +920,8 @@ static void crypt_dtr(struct dm_target *ti)
{
struct crypt_config *cc = (struct crypt_config *) ti->private;
+ flush_workqueue(_kcryptd_workqueue);
+
bioset_free(cc->bs);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->io_pool);
@@ -939,10 +941,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct crypt_config *cc = ti->private;
- struct crypt_io *io;
-
- if (bio_barrier(bio))
- return -EOPNOTSUPP;
+ struct dm_crypt_io *io;
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
@@ -1062,9 +1061,7 @@ static int __init dm_crypt_init(void)
{
int r;
- _crypt_io_pool = kmem_cache_create("dm-crypt_io",
- sizeof(struct crypt_io),
- 0, 0, NULL, NULL);
+ _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
if (!_crypt_io_pool)
return -ENOMEM;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 52c7cf9e5803..6928c136d3c5 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -20,7 +20,7 @@
struct delay_c {
struct timer_list delay_timer;
- struct semaphore timer_lock;
+ struct mutex timer_lock;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
@@ -37,7 +37,7 @@ struct delay_c {
unsigned writes;
};
-struct delay_info {
+struct dm_delay_info {
struct delay_c *context;
struct list_head list;
struct bio *bio;
@@ -58,12 +58,12 @@ static void handle_delayed_timer(unsigned long data)
static void queue_timeout(struct delay_c *dc, unsigned long expires)
{
- down(&dc->timer_lock);
+ mutex_lock(&dc->timer_lock);
if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
mod_timer(&dc->delay_timer, expires);
- up(&dc->timer_lock);
+ mutex_unlock(&dc->timer_lock);
}
static void flush_bios(struct bio *bio)
@@ -80,7 +80,7 @@ static void flush_bios(struct bio *bio)
static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
{
- struct delay_info *delayed, *next;
+ struct dm_delay_info *delayed, *next;
unsigned long next_expires = 0;
int start_timer = 0;
BIO_LIST(flush_bios);
@@ -193,13 +193,11 @@ out:
goto bad;
}
- init_timer(&dc->delay_timer);
- dc->delay_timer.function = handle_delayed_timer;
- dc->delay_timer.data = (unsigned long)dc;
+ setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios);
- init_MUTEX(&dc->timer_lock);
+ mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
ti->private = dc;
@@ -227,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
{
- struct delay_info *delayed;
+ struct dm_delay_info *delayed;
unsigned long expires = 0;
if (!delay || !atomic_read(&dc->may_delay))
@@ -338,10 +336,7 @@ static int __init dm_delay_init(void)
goto bad_queue;
}
- delayed_cache = kmem_cache_create("dm-delay",
- sizeof(struct delay_info),
- __alignof__(struct delay_info),
- 0, NULL, NULL);
+ delayed_cache = KMEM_CACHE(dm_delay_info, 0);
if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache.");
goto bad_memcache;
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 07e0a0c84f6e..8fe81e1807e0 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -125,9 +125,11 @@ struct pstore {
uint32_t callback_count;
struct commit_callback *callbacks;
struct dm_io_client *io_client;
+
+ struct workqueue_struct *metadata_wq;
};
-static inline unsigned int sectors_to_pages(unsigned int sectors)
+static unsigned sectors_to_pages(unsigned sectors)
{
return sectors / (PAGE_SIZE >> 9);
}
@@ -156,10 +158,24 @@ static void free_area(struct pstore *ps)
ps->area = NULL;
}
+struct mdata_req {
+ struct io_region *where;
+ struct dm_io_request *io_req;
+ struct work_struct work;
+ int result;
+};
+
+static void do_metadata(struct work_struct *work)
+{
+ struct mdata_req *req = container_of(work, struct mdata_req, work);
+
+ req->result = dm_io(req->io_req, 1, req->where, NULL);
+}
+
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
+static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
{
struct io_region where = {
.bdev = ps->snap->cow->bdev,
@@ -173,8 +189,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
.client = ps->io_client,
.notify.fn = NULL,
};
+ struct mdata_req req;
+
+ if (!metadata)
+ return dm_io(&io_req, 1, &where, NULL);
+
+ req.where = &where;
+ req.io_req = &io_req;
- return dm_io(&io_req, 1, &where, NULL);
+ /*
+ * Issue the synchronous I/O from a different thread
+ * to avoid generic_make_request recursion.
+ */
+ INIT_WORK(&req.work, do_metadata);
+ queue_work(ps->metadata_wq, &req.work);
+ flush_workqueue(ps->metadata_wq);
+
+ return req.result;
}
/*
@@ -189,7 +220,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
/* convert a metadata area index to a chunk index */
chunk = 1 + ((ps->exceptions_per_area + 1) * area);
- r = chunk_io(ps, chunk, rw);
+ r = chunk_io(ps, chunk, rw, 0);
if (r)
return r;
@@ -230,7 +261,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (r)
return r;
- r = chunk_io(ps, 0, READ);
+ r = chunk_io(ps, 0, READ, 1);
if (r)
goto bad;
@@ -292,7 +323,7 @@ static int write_header(struct pstore *ps)
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
- return chunk_io(ps, 0, WRITE);
+ return chunk_io(ps, 0, WRITE, 1);
}
/*
@@ -393,7 +424,7 @@ static int read_exceptions(struct pstore *ps)
return 0;
}
-static inline struct pstore *get_info(struct exception_store *store)
+static struct pstore *get_info(struct exception_store *store)
{
return (struct pstore *) store->context;
}
@@ -409,6 +440,7 @@ static void persistent_destroy(struct exception_store *store)
{
struct pstore *ps = get_info(store);
+ destroy_workqueue(ps->metadata_wq);
dm_io_client_destroy(ps->io_client);
vfree(ps->callbacks);
free_area(ps);
@@ -457,11 +489,6 @@ static int persistent_read_metadata(struct exception_store *store)
/*
* Sanity checks.
*/
- if (!ps->valid) {
- DMWARN("snapshot is marked invalid");
- return -EINVAL;
- }
-
if (ps->version != SNAPSHOT_DISK_VERSION) {
DMWARN("unable to handle snapshot disk version %d",
ps->version);
@@ -469,6 +496,12 @@ static int persistent_read_metadata(struct exception_store *store)
}
/*
+ * Metadata are valid, but snapshot is invalidated
+ */
+ if (!ps->valid)
+ return 1;
+
+ /*
* Read the metadata.
*/
r = read_exceptions(ps);
@@ -480,7 +513,7 @@ static int persistent_read_metadata(struct exception_store *store)
}
static int persistent_prepare(struct exception_store *store,
- struct exception *e)
+ struct dm_snap_exception *e)
{
struct pstore *ps = get_info(store);
uint32_t stride;
@@ -505,7 +538,7 @@ static int persistent_prepare(struct exception_store *store,
}
static void persistent_commit(struct exception_store *store,
- struct exception *e,
+ struct dm_snap_exception *e,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -588,6 +621,13 @@ int dm_create_persistent(struct exception_store *store)
atomic_set(&ps->pending_count, 0);
ps->callbacks = NULL;
+ ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
+ if (!ps->metadata_wq) {
+ kfree(ps);
+ DMERR("couldn't start header metadata update thread");
+ return -ENOMEM;
+ }
+
store->destroy = persistent_destroy;
store->read_metadata = persistent_read_metadata;
store->prepare_exception = persistent_prepare;
@@ -616,7 +656,8 @@ static int transient_read_metadata(struct exception_store *store)
return 0;
}
-static int transient_prepare(struct exception_store *store, struct exception *e)
+static int transient_prepare(struct exception_store *store,
+ struct dm_snap_exception *e)
{
struct transient_c *tc = (struct transient_c *) store->context;
sector_t size = get_dev_size(store->snap->cow->bdev);
@@ -631,9 +672,9 @@ static int transient_prepare(struct exception_store *store, struct exception *e)
}
static void transient_commit(struct exception_store *store,
- struct exception *e,
- void (*callback) (void *, int success),
- void *callback_context)
+ struct dm_snap_exception *e,
+ void (*callback) (void *, int success),
+ void *callback_context)
{
/* Just succeed */
callback(callback_context, 1);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 352c6fbeac53..f3a772486437 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -293,7 +293,10 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
* bvec for bio_get/set_region() and decrement bi_max_vecs
* to hide it from bio_add_page().
*/
- num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
+ num_bvecs = dm_sector_div_up(remaining,
+ (PAGE_SIZE >> SECTOR_SHIFT));
+ num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
+ num_bvecs);
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
diff --git a/drivers/md/dm-mpath-rdac.c b/drivers/md/dm-mpath-rdac.c
new file mode 100644
index 000000000000..8b776b8cb7f7
--- /dev/null
+++ b/drivers/md/dm-mpath-rdac.c
@@ -0,0 +1,700 @@
+/*
+ * Engenio/LSI RDAC DM HW handler
+ *
+ * Copyright (C) 2005 Mike Christie. All rights reserved.
+ * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+
+#define DM_MSG_PREFIX "multipath rdac"
+
+#include "dm.h"
+#include "dm-hw-handler.h"
+
+#define RDAC_DM_HWH_NAME "rdac"
+#define RDAC_DM_HWH_VER "0.4"
+
+/*
+ * LSI mode page stuff
+ *
+ * These struct definitions and the forming of the
+ * mode page were taken from the LSI RDAC 2.4 GPL'd
+ * driver, and then converted to Linux conventions.
+ */
+#define RDAC_QUIESCENCE_TIME 20;
+/*
+ * Page Codes
+ */
+#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
+
+/*
+ * Controller modes definitions
+ */
+#define RDAC_MODE_TRANSFER_ALL_LUNS 0x01
+#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
+
+/*
+ * RDAC Options field
+ */
+#define RDAC_FORCED_QUIESENCE 0x02
+
+#define RDAC_FAILOVER_TIMEOUT (60 * HZ)
+
+struct rdac_mode_6_hdr {
+ u8 data_len;
+ u8 medium_type;
+ u8 device_params;
+ u8 block_desc_len;
+};
+
+struct rdac_mode_10_hdr {
+ u16 data_len;
+ u8 medium_type;
+ u8 device_params;
+ u16 reserved;
+ u16 block_desc_len;
+};
+
+struct rdac_mode_common {
+ u8 controller_serial[16];
+ u8 alt_controller_serial[16];
+ u8 rdac_mode[2];
+ u8 alt_rdac_mode[2];
+ u8 quiescence_timeout;
+ u8 rdac_options;
+};
+
+struct rdac_pg_legacy {
+ struct rdac_mode_6_hdr hdr;
+ u8 page_code;
+ u8 page_len;
+ struct rdac_mode_common common;
+#define MODE6_MAX_LUN 32
+ u8 lun_table[MODE6_MAX_LUN];
+ u8 reserved2[32];
+ u8 reserved3;
+ u8 reserved4;
+};
+
+struct rdac_pg_expanded {
+ struct rdac_mode_10_hdr hdr;
+ u8 page_code;
+ u8 subpage_code;
+ u8 page_len[2];
+ struct rdac_mode_common common;
+ u8 lun_table[256];
+ u8 reserved3;
+ u8 reserved4;
+};
+
+struct c9_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC9 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "vace" */
+ u8 avte_cvp;
+ u8 path_prio;
+ u8 reserved2[38];
+};
+
+#define SUBSYS_ID_LEN 16
+#define SLOT_ID_LEN 2
+
+struct c4_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC4 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "subs" */
+ u8 subsys_id[SUBSYS_ID_LEN];
+ u8 revision[4];
+ u8 slot_id[SLOT_ID_LEN];
+ u8 reserved[2];
+};
+
+struct rdac_controller {
+ u8 subsys_id[SUBSYS_ID_LEN];
+ u8 slot_id[SLOT_ID_LEN];
+ int use_10_ms;
+ struct kref kref;
+ struct list_head node; /* list of all controllers */
+ spinlock_t lock;
+ int submitted;
+ struct list_head cmd_list; /* list of commands to be submitted */
+ union {
+ struct rdac_pg_legacy legacy;
+ struct rdac_pg_expanded expanded;
+ } mode_select;
+};
+struct c8_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC8 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "edid" */
+ u8 reserved2[3];
+ u8 vol_uniq_id_len;
+ u8 vol_uniq_id[16];
+ u8 vol_user_label_len;
+ u8 vol_user_label[60];
+ u8 array_uniq_id_len;
+ u8 array_unique_id[16];
+ u8 array_user_label_len;
+ u8 array_user_label[60];
+ u8 lun[8];
+};
+
+struct c2_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC2 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "swr4" */
+ u8 sw_version[3];
+ u8 sw_date[3];
+ u8 features_enabled;
+ u8 max_lun_supported;
+ u8 partitions[239]; /* Total allocation length should be 0xFF */
+};
+
+struct rdac_handler {
+ struct list_head entry; /* list waiting to submit MODE SELECT */
+ unsigned timeout;
+ struct rdac_controller *ctlr;
+#define UNINITIALIZED_LUN (1 << 8)
+ unsigned lun;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ struct dm_path *path;
+ struct work_struct work;
+#define SEND_C2_INQUIRY 1
+#define SEND_C4_INQUIRY 2
+#define SEND_C8_INQUIRY 3
+#define SEND_C9_INQUIRY 4
+#define SEND_MODE_SELECT 5
+ int cmd_to_send;
+ union {
+ struct c2_inquiry c2;
+ struct c4_inquiry c4;
+ struct c8_inquiry c8;
+ struct c9_inquiry c9;
+ } inq;
+};
+
+static LIST_HEAD(ctlr_list);
+static DEFINE_SPINLOCK(list_lock);
+static struct workqueue_struct *rdac_wkqd;
+
+static inline int had_failures(struct request *req, int error)
+{
+ return (error || host_byte(req->errors) != DID_OK ||
+ msg_byte(req->errors) != COMMAND_COMPLETE);
+}
+
+static void rdac_resubmit_all(struct rdac_handler *h)
+{
+ struct rdac_controller *ctlr = h->ctlr;
+ struct rdac_handler *tmp, *h1;
+
+ spin_lock(&ctlr->lock);
+ list_for_each_entry_safe(h1, tmp, &ctlr->cmd_list, entry) {
+ h1->cmd_to_send = SEND_C9_INQUIRY;
+ queue_work(rdac_wkqd, &h1->work);
+ list_del(&h1->entry);
+ }
+ ctlr->submitted = 0;
+ spin_unlock(&ctlr->lock);
+}
+
+static void mode_select_endio(struct request *req, int error)
+{
+ struct rdac_handler *h = req->end_io_data;
+ struct scsi_sense_hdr sense_hdr;
+ int sense = 0, fail = 0;
+
+ if (had_failures(req, error)) {
+ fail = 1;
+ goto failed;
+ }
+
+ if (status_byte(req->errors) == CHECK_CONDITION) {
+ scsi_normalize_sense(req->sense, SCSI_SENSE_BUFFERSIZE,
+ &sense_hdr);
+ sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
+ sense_hdr.ascq;
+ /* If it is retryable failure, submit the c9 inquiry again */
+ if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
+ sense == 0x62900) {
+ /* 0x59136 - Command lock contention
+ * 0x[6b]8b02 - Quiesense in progress or achieved
+ * 0x62900 - Power On, Reset, or Bus Device Reset
+ */
+ h->cmd_to_send = SEND_C9_INQUIRY;
+ queue_work(rdac_wkqd, &h->work);
+ goto done;
+ }
+ if (sense)
+ DMINFO("MODE_SELECT failed on %s with sense 0x%x",
+ h->path->dev->name, sense);
+ }
+failed:
+ if (fail || sense)
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+ else
+ dm_pg_init_complete(h->path, 0);
+
+done:
+ rdac_resubmit_all(h);
+ __blk_put_request(req->q, req);
+}
+
+static struct request *get_rdac_req(struct rdac_handler *h,
+ void *buffer, unsigned buflen, int rw)
+{
+ struct request *rq;
+ struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
+
+ rq = blk_get_request(q, rw, GFP_KERNEL);
+
+ if (!rq) {
+ DMINFO("get_rdac_req: blk_get_request failed");
+ return NULL;
+ }
+
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
+ blk_put_request(rq);
+ DMINFO("get_rdac_req: blk_rq_map_kern failed");
+ return NULL;
+ }
+
+ memset(&rq->cmd, 0, BLK_MAX_CDB);
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
+ rq->end_io_data = h;
+ rq->timeout = h->timeout;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->cmd_flags = REQ_FAILFAST | REQ_NOMERGE;
+ return rq;
+}
+
+static struct request *rdac_failover_get(struct rdac_handler *h)
+{
+ struct request *rq;
+ struct rdac_mode_common *common;
+ unsigned data_size;
+
+ if (h->ctlr->use_10_ms) {
+ struct rdac_pg_expanded *rdac_pg;
+
+ data_size = sizeof(struct rdac_pg_expanded);
+ rdac_pg = &h->ctlr->mode_select.expanded;
+ memset(rdac_pg, 0, data_size);
+ common = &rdac_pg->common;
+ rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
+ rdac_pg->subpage_code = 0x1;
+ rdac_pg->page_len[0] = 0x01;
+ rdac_pg->page_len[1] = 0x28;
+ rdac_pg->lun_table[h->lun] = 0x81;
+ } else {
+ struct rdac_pg_legacy *rdac_pg;
+
+ data_size = sizeof(struct rdac_pg_legacy);
+ rdac_pg = &h->ctlr->mode_select.legacy;
+ memset(rdac_pg, 0, data_size);
+ common = &rdac_pg->common;
+ rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
+ rdac_pg->page_len = 0x68;
+ rdac_pg->lun_table[h->lun] = 0x81;
+ }
+ common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
+ common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+ common->rdac_options = RDAC_FORCED_QUIESENCE;
+
+ /* get request for block layer packet command */
+ rq = get_rdac_req(h, &h->ctlr->mode_select, data_size, WRITE);
+ if (!rq) {
+ DMERR("rdac_failover_get: no rq");
+ return NULL;
+ }
+
+ /* Prepare the command. */
+ if (h->ctlr->use_10_ms) {
+ rq->cmd[0] = MODE_SELECT_10;
+ rq->cmd[7] = data_size >> 8;
+ rq->cmd[8] = data_size & 0xff;
+ } else {
+ rq->cmd[0] = MODE_SELECT;
+ rq->cmd[4] = data_size;
+ }
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+ return rq;
+}
+
+/* Acquires h->ctlr->lock */
+static void submit_mode_select(struct rdac_handler *h)
+{
+ struct request *rq;
+ struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
+
+ spin_lock(&h->ctlr->lock);
+ if (h->ctlr->submitted) {
+ list_add(&h->entry, &h->ctlr->cmd_list);
+ goto drop_lock;
+ }
+
+ if (!q) {
+ DMINFO("submit_mode_select: no queue");
+ goto fail_path;
+ }
+
+ rq = rdac_failover_get(h);
+ if (!rq) {
+ DMERR("submit_mode_select: no rq");
+ goto fail_path;
+ }
+
+ DMINFO("queueing MODE_SELECT command on %s", h->path->dev->name);
+
+ blk_execute_rq_nowait(q, NULL, rq, 1, mode_select_endio);
+ h->ctlr->submitted = 1;
+ goto drop_lock;
+fail_path:
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+drop_lock:
+ spin_unlock(&h->ctlr->lock);
+}
+
+static void release_ctlr(struct kref *kref)
+{
+ struct rdac_controller *ctlr;
+ ctlr = container_of(kref, struct rdac_controller, kref);
+
+ spin_lock(&list_lock);
+ list_del(&ctlr->node);
+ spin_unlock(&list_lock);
+ kfree(ctlr);
+}
+
+static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
+{
+ struct rdac_controller *ctlr, *tmp;
+
+ spin_lock(&list_lock);
+
+ list_for_each_entry(tmp, &ctlr_list, node) {
+ if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
+ (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
+ kref_get(&tmp->kref);
+ spin_unlock(&list_lock);
+ return tmp;
+ }
+ }
+ ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
+ if (!ctlr)
+ goto done;
+
+ /* initialize fields of controller */
+ memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
+ memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+ kref_init(&ctlr->kref);
+ spin_lock_init(&ctlr->lock);
+ ctlr->submitted = 0;
+ ctlr->use_10_ms = -1;
+ INIT_LIST_HEAD(&ctlr->cmd_list);
+ list_add(&ctlr->node, &ctlr_list);
+done:
+ spin_unlock(&list_lock);
+ return ctlr;
+}
+
+static void c4_endio(struct request *req, int error)
+{
+ struct rdac_handler *h = req->end_io_data;
+ struct c4_inquiry *sp;
+
+ if (had_failures(req, error)) {
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+ goto done;
+ }
+
+ sp = &h->inq.c4;
+
+ h->ctlr = get_controller(sp->subsys_id, sp->slot_id);
+
+ if (h->ctlr) {
+ h->cmd_to_send = SEND_C9_INQUIRY;
+ queue_work(rdac_wkqd, &h->work);
+ } else
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+done:
+ __blk_put_request(req->q, req);
+}
+
+static void c2_endio(struct request *req, int error)
+{
+ struct rdac_handler *h = req->end_io_data;
+ struct c2_inquiry *sp;
+
+ if (had_failures(req, error)) {
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+ goto done;
+ }
+
+ sp = &h->inq.c2;
+
+ /* If more than MODE6_MAX_LUN luns are supported, use mode select 10 */
+ if (sp->max_lun_supported >= MODE6_MAX_LUN)
+ h->ctlr->use_10_ms = 1;
+ else
+ h->ctlr->use_10_ms = 0;
+
+ h->cmd_to_send = SEND_MODE_SELECT;
+ queue_work(rdac_wkqd, &h->work);
+done:
+ __blk_put_request(req->q, req);
+}
+
+static void c9_endio(struct request *req, int error)
+{
+ struct rdac_handler *h = req->end_io_data;
+ struct c9_inquiry *sp;
+
+ if (had_failures(req, error)) {
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+ goto done;
+ }
+
+ /* We need to look at the sense keys here to take clear action.
+ * For now simple logic: If the host is in AVT mode or if controller
+ * owns the lun, return dm_pg_init_complete(), otherwise submit
+ * MODE SELECT.
+ */
+ sp = &h->inq.c9;
+
+ /* If in AVT mode, return success */
+ if ((sp->avte_cvp >> 7) == 0x1) {
+ dm_pg_init_complete(h->path, 0);
+ goto done;
+ }
+
+ /* If the controller on this path owns the LUN, return success */
+ if (sp->avte_cvp & 0x1) {
+ dm_pg_init_complete(h->path, 0);
+ goto done;
+ }
+
+ if (h->ctlr) {
+ if (h->ctlr->use_10_ms == -1)
+ h->cmd_to_send = SEND_C2_INQUIRY;
+ else
+ h->cmd_to_send = SEND_MODE_SELECT;
+ } else
+ h->cmd_to_send = SEND_C4_INQUIRY;
+ queue_work(rdac_wkqd, &h->work);
+done:
+ __blk_put_request(req->q, req);
+}
+
+static void c8_endio(struct request *req, int error)
+{
+ struct rdac_handler *h = req->end_io_data;
+ struct c8_inquiry *sp;
+
+ if (had_failures(req, error)) {
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+ goto done;
+ }
+
+ /* We need to look at the sense keys here to take clear action.
+ * For now simple logic: Get the lun from the inquiry page.
+ */
+ sp = &h->inq.c8;
+ h->lun = sp->lun[7]; /* currently it uses only one byte */
+ h->cmd_to_send = SEND_C9_INQUIRY;
+ queue_work(rdac_wkqd, &h->work);
+done:
+ __blk_put_request(req->q, req);
+}
+
+static void submit_inquiry(struct rdac_handler *h, int page_code,
+ unsigned int len, rq_end_io_fn endio)
+{
+ struct request *rq;
+ struct request_queue *q = bdev_get_queue(h->path->dev->bdev);
+
+ if (!q)
+ goto fail_path;
+
+ rq = get_rdac_req(h, &h->inq, len, READ);
+ if (!rq)
+ goto fail_path;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 1;
+ rq->cmd[2] = page_code;
+ rq->cmd[4] = len;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+ blk_execute_rq_nowait(q, NULL, rq, 1, endio);
+ return;
+
+fail_path:
+ dm_pg_init_complete(h->path, MP_FAIL_PATH);
+}
+
+static void service_wkq(struct work_struct *work)
+{
+ struct rdac_handler *h = container_of(work, struct rdac_handler, work);
+
+ switch (h->cmd_to_send) {
+ case SEND_C2_INQUIRY:
+ submit_inquiry(h, 0xC2, sizeof(struct c2_inquiry), c2_endio);
+ break;
+ case SEND_C4_INQUIRY:
+ submit_inquiry(h, 0xC4, sizeof(struct c4_inquiry), c4_endio);
+ break;
+ case SEND_C8_INQUIRY:
+ submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
+ break;
+ case SEND_C9_INQUIRY:
+ submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
+ break;
+ case SEND_MODE_SELECT:
+ submit_mode_select(h);
+ break;
+ default:
+ BUG();
+ }
+}
+/*
+ * only support subpage2c until we confirm that this is just a matter of
+ * of updating firmware or not, and RDAC (basic AVT works already) for now
+ * but we can add these in in when we get time and testers
+ */
+static int rdac_create(struct hw_handler *hwh, unsigned argc, char **argv)
+{
+ struct rdac_handler *h;
+ unsigned timeout;
+
+ if (argc == 0) {
+ /* No arguments: use defaults */
+ timeout = RDAC_FAILOVER_TIMEOUT;
+ } else if (argc != 1) {
+ DMWARN("incorrect number of arguments");
+ return -EINVAL;
+ } else {
+ if (sscanf(argv[1], "%u", &timeout) != 1) {
+ DMWARN("invalid timeout value");
+ return -EINVAL;
+ }
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ hwh->context = h;
+ h->timeout = timeout;
+ h->lun = UNINITIALIZED_LUN;
+ INIT_WORK(&h->work, service_wkq);
+ DMWARN("using RDAC command with timeout %u", h->timeout);
+
+ return 0;
+}
+
+static void rdac_destroy(struct hw_handler *hwh)
+{
+ struct rdac_handler *h = hwh->context;
+
+ if (h->ctlr)
+ kref_put(&h->ctlr->kref, release_ctlr);
+ kfree(h);
+ hwh->context = NULL;
+}
+
+static unsigned rdac_error(struct hw_handler *hwh, struct bio *bio)
+{
+ /* Try default handler */
+ return dm_scsi_err_handler(hwh, bio);
+}
+
+static void rdac_pg_init(struct hw_handler *hwh, unsigned bypassed,
+ struct dm_path *path)
+{
+ struct rdac_handler *h = hwh->context;
+
+ h->path = path;
+ switch (h->lun) {
+ case UNINITIALIZED_LUN:
+ submit_inquiry(h, 0xC8, sizeof(struct c8_inquiry), c8_endio);
+ break;
+ default:
+ submit_inquiry(h, 0xC9, sizeof(struct c9_inquiry), c9_endio);
+ }
+}
+
+static struct hw_handler_type rdac_handler = {
+ .name = RDAC_DM_HWH_NAME,
+ .module = THIS_MODULE,
+ .create = rdac_create,
+ .destroy = rdac_destroy,
+ .pg_init = rdac_pg_init,
+ .error = rdac_error,
+};
+
+static int __init rdac_init(void)
+{
+ int r = dm_register_hw_handler(&rdac_handler);
+
+ if (r < 0) {
+ DMERR("%s: register failed %d", RDAC_DM_HWH_NAME, r);
+ return r;
+ }
+
+ rdac_wkqd = create_singlethread_workqueue("rdac_wkqd");
+ if (!rdac_wkqd) {
+ DMERR("Failed to create workqueue rdac_wkqd.");
+ dm_unregister_hw_handler(&rdac_handler);
+ return -ENOMEM;
+ }
+
+ DMINFO("%s: version %s loaded", RDAC_DM_HWH_NAME, RDAC_DM_HWH_VER);
+ return 0;
+}
+
+static void __exit rdac_exit(void)
+{
+ int r = dm_unregister_hw_handler(&rdac_handler);
+
+ destroy_workqueue(rdac_wkqd);
+ if (r < 0)
+ DMERR("%s: unregister failed %d", RDAC_DM_HWH_NAME, r);
+}
+
+module_init(rdac_init);
+module_exit(rdac_exit);
+
+MODULE_DESCRIPTION("DM Multipath LSI/Engenio RDAC support");
+MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RDAC_DM_HWH_VER);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index de54b39e6ffe..d6ca9d0a6fd1 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -83,7 +83,7 @@ struct multipath {
struct work_struct trigger_event;
/*
- * We must use a mempool of mpath_io structs so that we
+ * We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error.
*/
mempool_t *mpio_pool;
@@ -92,7 +92,7 @@ struct multipath {
/*
* Context information attached to each bio we process.
*/
-struct mpath_io {
+struct dm_mpath_io {
struct pgpath *pgpath;
struct dm_bio_details details;
};
@@ -122,7 +122,7 @@ static struct pgpath *alloc_pgpath(void)
return pgpath;
}
-static inline void free_pgpath(struct pgpath *pgpath)
+static void free_pgpath(struct pgpath *pgpath)
{
kfree(pgpath);
}
@@ -299,8 +299,8 @@ static int __must_push_back(struct multipath *m)
dm_noflush_suspending(m->ti));
}
-static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
- unsigned was_queued)
+static int map_io(struct multipath *m, struct bio *bio,
+ struct dm_mpath_io *mpio, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
unsigned long flags;
@@ -374,7 +374,7 @@ static void dispatch_queued_ios(struct multipath *m)
int r;
unsigned long flags;
struct bio *bio = NULL, *next;
- struct mpath_io *mpio;
+ struct dm_mpath_io *mpio;
union map_info *info;
spin_lock_irqsave(&m->lock, flags);
@@ -795,12 +795,9 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
int r;
- struct mpath_io *mpio;
+ struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- if (bio_barrier(bio))
- return -EOPNOTSUPP;
-
mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
dm_bio_record(&mpio->details, bio);
@@ -1014,7 +1011,7 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
* end_io handling
*/
static int do_end_io(struct multipath *m, struct bio *bio,
- int error, struct mpath_io *mpio)
+ int error, struct dm_mpath_io *mpio)
{
struct hw_handler *hwh = &m->hw_handler;
unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
@@ -1075,8 +1072,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
static int multipath_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context)
{
- struct multipath *m = (struct multipath *) ti->private;
- struct mpath_io *mpio = (struct mpath_io *) map_context->ptr;
+ struct multipath *m = ti->private;
+ struct dm_mpath_io *mpio = map_context->ptr;
struct pgpath *pgpath = mpio->pgpath;
struct path_selector *ps;
int r;
@@ -1346,22 +1343,20 @@ static int __init dm_multipath_init(void)
int r;
/* allocate a slab for the dm_ios */
- _mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io),
- 0, 0, NULL, NULL);
+ _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
if (!_mpio_cache)
return -ENOMEM;
r = dm_register_target(&multipath_target);
if (r < 0) {
- DMERR("%s: register failed %d", multipath_target.name, r);
+ DMERR("register failed %d", r);
kmem_cache_destroy(_mpio_cache);
return -EINVAL;
}
kmultipathd = create_workqueue("kmpathd");
if (!kmultipathd) {
- DMERR("%s: failed to create workqueue kmpathd",
- multipath_target.name);
+ DMERR("failed to create workqueue kmpathd");
dm_unregister_target(&multipath_target);
kmem_cache_destroy(_mpio_cache);
return -ENOMEM;
@@ -1382,8 +1377,7 @@ static void __exit dm_multipath_exit(void)
r = dm_unregister_target(&multipath_target);
if (r < 0)
- DMERR("%s: target unregister failed %d",
- multipath_target.name, r);
+ DMERR("target unregister failed %d", r);
kmem_cache_destroy(_mpio_cache);
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ef124b71ccc8..144071e70a93 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -24,6 +24,7 @@
#define DM_IO_PAGES 64
#define DM_RAID1_HANDLE_ERRORS 0x01
+#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
@@ -85,6 +86,7 @@ struct region_hash {
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
+ struct list_head failed_recovered_regions;
};
enum {
@@ -132,6 +134,7 @@ struct mirror_set {
/* recovery */
region_t nr_regions;
int in_sync;
+ int log_failure;
struct mirror *default_mirror; /* Default mirror */
@@ -204,6 +207,7 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
INIT_LIST_HEAD(&rh->clean_regions);
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
+ INIT_LIST_HEAD(&rh->failed_recovered_regions);
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct region));
@@ -368,6 +372,7 @@ static void rh_update_states(struct region_hash *rh)
LIST_HEAD(clean);
LIST_HEAD(recovered);
+ LIST_HEAD(failed_recovered);
/*
* Quickly grab the lists.
@@ -378,10 +383,8 @@ static void rh_update_states(struct region_hash *rh)
list_splice(&rh->clean_regions, &clean);
INIT_LIST_HEAD(&rh->clean_regions);
- list_for_each_entry (reg, &clean, list) {
- rh->log->type->clear_region(rh->log, reg->key);
+ list_for_each_entry(reg, &clean, list)
list_del(&reg->hash_list);
- }
}
if (!list_empty(&rh->recovered_regions)) {
@@ -391,6 +394,15 @@ static void rh_update_states(struct region_hash *rh)
list_for_each_entry (reg, &recovered, list)
list_del(&reg->hash_list);
}
+
+ if (!list_empty(&rh->failed_recovered_regions)) {
+ list_splice(&rh->failed_recovered_regions, &failed_recovered);
+ INIT_LIST_HEAD(&rh->failed_recovered_regions);
+
+ list_for_each_entry(reg, &failed_recovered, list)
+ list_del(&reg->hash_list);
+ }
+
spin_unlock(&rh->region_lock);
write_unlock_irq(&rh->hash_lock);
@@ -405,10 +417,17 @@ static void rh_update_states(struct region_hash *rh)
mempool_free(reg, rh->region_pool);
}
- rh->log->type->flush(rh->log);
+ list_for_each_entry_safe(reg, next, &failed_recovered, list) {
+ complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
+ mempool_free(reg, rh->region_pool);
+ }
- list_for_each_entry_safe (reg, next, &clean, list)
+ list_for_each_entry_safe(reg, next, &clean, list) {
+ rh->log->type->clear_region(rh->log, reg->key);
mempool_free(reg, rh->region_pool);
+ }
+
+ rh->log->type->flush(rh->log);
}
static void rh_inc(struct region_hash *rh, region_t region)
@@ -555,21 +574,25 @@ static struct region *rh_recovery_start(struct region_hash *rh)
return reg;
}
-/* FIXME: success ignored for now */
static void rh_recovery_end(struct region *reg, int success)
{
struct region_hash *rh = reg->rh;
spin_lock_irq(&rh->region_lock);
- list_add(&reg->list, &reg->rh->recovered_regions);
+ if (success)
+ list_add(&reg->list, &reg->rh->recovered_regions);
+ else {
+ reg->state = RH_NOSYNC;
+ list_add(&reg->list, &reg->rh->failed_recovered_regions);
+ }
spin_unlock_irq(&rh->region_lock);
wake(rh->ms);
}
-static void rh_flush(struct region_hash *rh)
+static int rh_flush(struct region_hash *rh)
{
- rh->log->type->flush(rh->log);
+ return rh->log->type->flush(rh->log);
}
static void rh_delay(struct region_hash *rh, struct bio *bio)
@@ -633,7 +656,14 @@ static void recovery_complete(int read_err, unsigned int write_err,
{
struct region *reg = (struct region *) context;
- /* FIXME: better error handling */
+ if (read_err)
+ /* Read error means the failure of default mirror. */
+ DMERR_LIMIT("Unable to read primary mirror during recovery");
+
+ if (write_err)
+ DMERR_LIMIT("Write error during recovery (error = 0x%x)",
+ write_err);
+
rh_recovery_end(reg, !(read_err || write_err));
}
@@ -863,12 +893,15 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
rh_inc_pending(&ms->rh, &sync);
rh_inc_pending(&ms->rh, &nosync);
- rh_flush(&ms->rh);
+ ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
/*
* Dispatch io.
*/
- while ((bio = bio_list_pop(&sync)))
+ if (unlikely(ms->log_failure))
+ while ((bio = bio_list_pop(&sync)))
+ bio_endio(bio, bio->bi_size, -EIO);
+ else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while ((bio = bio_list_pop(&recover)))
@@ -918,13 +951,12 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
- ms = kmalloc(len, GFP_KERNEL);
+ ms = kzalloc(len, GFP_KERNEL);
if (!ms) {
ti->error = "Cannot allocate mirror context";
return NULL;
}
- memset(ms, 0, len);
spin_lock_init(&ms->lock);
ms->ti = ti;
@@ -1145,6 +1177,15 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used;
argc -= args_used;
+ /*
+ * Any read-balancing addition depends on the
+ * DM_RAID1_HANDLE_ERRORS flag being present.
+ * This is because the decision to balance depends
+ * on the sync state of a region. If the above
+ * flag is not present, we ignore errors; and
+ * the sync state may be inaccurate.
+ */
+
if (argc) {
ti->error = "Too many mirror arguments";
free_context(ms, ti, ms->nr_mirrors);
@@ -1288,12 +1329,12 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
for (m = 0; m < ms->nr_mirrors; m++)
DMEMIT("%s ", ms->mirror[m].dev->name);
- DMEMIT("%llu/%llu",
+ DMEMIT("%llu/%llu 0 ",
(unsigned long long)ms->rh.log->type->
get_sync_count(ms->rh.log),
(unsigned long long)ms->nr_regions);
- sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
+ sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
break;
@@ -1335,8 +1376,7 @@ static int __init dm_mirror_init(void)
r = dm_register_target(&mirror_target);
if (r < 0) {
- DMERR("%s: Failed to register mirror target",
- mirror_target.name);
+ DMERR("Failed to register mirror target");
dm_dirty_log_exit();
}
@@ -1349,7 +1389,7 @@ static void __exit dm_mirror_exit(void)
r = dm_unregister_target(&mirror_target);
if (r < 0)
- DMERR("%s: unregister failed %d", mirror_target.name, r);
+ DMERR("unregister failed %d", r);
dm_dirty_log_exit();
}
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index a348a97b65af..391dfa2ad434 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -205,7 +205,7 @@ static void __exit dm_rr_exit(void)
int r = dm_unregister_path_selector(&rr_ps);
if (r < 0)
- DMERR("round-robin: unregister failed %d", r);
+ DMERR("unregister failed %d", r);
}
module_init(dm_rr_init);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 0821a2b68a73..83ddbfe6b8a4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -42,8 +42,8 @@
static struct workqueue_struct *ksnapd;
static void flush_queued_bios(struct work_struct *work);
-struct pending_exception {
- struct exception e;
+struct dm_snap_pending_exception {
+ struct dm_snap_exception e;
/*
* Origin buffers waiting for this to complete are held
@@ -63,7 +63,7 @@ struct pending_exception {
* group of pending_exceptions. It is always last to get freed.
* These fields get set up when writing to the origin.
*/
- struct pending_exception *primary_pe;
+ struct dm_snap_pending_exception *primary_pe;
/*
* Number of pending_exceptions processing this chunk.
@@ -137,7 +137,7 @@ static void exit_origin_hash(void)
kfree(_origins);
}
-static inline unsigned int origin_hash(struct block_device *bdev)
+static unsigned origin_hash(struct block_device *bdev)
{
return bdev->bd_dev & ORIGIN_MASK;
}
@@ -231,7 +231,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
struct list_head *slot;
- struct exception *ex, *next;
+ struct dm_snap_exception *ex, *next;
int i, size;
size = et->hash_mask + 1;
@@ -245,18 +245,19 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
vfree(et->table);
}
-static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
+static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
{
return chunk & et->hash_mask;
}
-static void insert_exception(struct exception_table *eh, struct exception *e)
+static void insert_exception(struct exception_table *eh,
+ struct dm_snap_exception *e)
{
struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
list_add(&e->hash_list, l);
}
-static inline void remove_exception(struct exception *e)
+static void remove_exception(struct dm_snap_exception *e)
{
list_del(&e->hash_list);
}
@@ -265,11 +266,11 @@ static inline void remove_exception(struct exception *e)
* Return the exception data for a sector, or NULL if not
* remapped.
*/
-static struct exception *lookup_exception(struct exception_table *et,
- chunk_t chunk)
+static struct dm_snap_exception *lookup_exception(struct exception_table *et,
+ chunk_t chunk)
{
struct list_head *slot;
- struct exception *e;
+ struct dm_snap_exception *e;
slot = &et->table[exception_hash(et, chunk)];
list_for_each_entry (e, slot, hash_list)
@@ -279,9 +280,9 @@ static struct exception *lookup_exception(struct exception_table *et,
return NULL;
}
-static inline struct exception *alloc_exception(void)
+static struct dm_snap_exception *alloc_exception(void)
{
- struct exception *e;
+ struct dm_snap_exception *e;
e = kmem_cache_alloc(exception_cache, GFP_NOIO);
if (!e)
@@ -290,24 +291,24 @@ static inline struct exception *alloc_exception(void)
return e;
}
-static inline void free_exception(struct exception *e)
+static void free_exception(struct dm_snap_exception *e)
{
kmem_cache_free(exception_cache, e);
}
-static inline struct pending_exception *alloc_pending_exception(void)
+static struct dm_snap_pending_exception *alloc_pending_exception(void)
{
return mempool_alloc(pending_pool, GFP_NOIO);
}
-static inline void free_pending_exception(struct pending_exception *pe)
+static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
mempool_free(pe, pending_pool);
}
int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
{
- struct exception *e;
+ struct dm_snap_exception *e;
e = alloc_exception();
if (!e)
@@ -334,7 +335,7 @@ static int calc_max_buckets(void)
/*
* Rounds a number down to a power of 2.
*/
-static inline uint32_t round_down(uint32_t n)
+static uint32_t round_down(uint32_t n)
{
while (n & (n - 1))
n &= (n - 1);
@@ -384,7 +385,7 @@ static int init_hash_tables(struct dm_snapshot *s)
* Round a number up to the nearest 'size' boundary. size must
* be a power of 2.
*/
-static inline ulong round_up(ulong n, ulong size)
+static ulong round_up(ulong n, ulong size)
{
size--;
return (n + size) & ~size;
@@ -522,9 +523,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Metadata must only be loaded into one table at once */
r = s->store.read_metadata(&s->store);
- if (r) {
+ if (r < 0) {
ti->error = "Failed to read snapshot metadata";
goto bad6;
+ } else if (r > 0) {
+ s->valid = 0;
+ DMWARN("Snapshot is marked invalid.");
}
bio_list_init(&s->queued_bios);
@@ -577,7 +581,7 @@ static void __free_exceptions(struct dm_snapshot *s)
static void snapshot_dtr(struct dm_target *ti)
{
- struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+ struct dm_snapshot *s = ti->private;
flush_workqueue(ksnapd);
@@ -655,14 +659,14 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
dm_table_event(s->table);
}
-static void get_pending_exception(struct pending_exception *pe)
+static void get_pending_exception(struct dm_snap_pending_exception *pe)
{
atomic_inc(&pe->ref_count);
}
-static struct bio *put_pending_exception(struct pending_exception *pe)
+static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
{
- struct pending_exception *primary_pe;
+ struct dm_snap_pending_exception *primary_pe;
struct bio *origin_bios = NULL;
primary_pe = pe->primary_pe;
@@ -692,9 +696,9 @@ static struct bio *put_pending_exception(struct pending_exception *pe)
return origin_bios;
}
-static void pending_complete(struct pending_exception *pe, int success)
+static void pending_complete(struct dm_snap_pending_exception *pe, int success)
{
- struct exception *e;
+ struct dm_snap_exception *e;
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
@@ -748,7 +752,8 @@ static void pending_complete(struct pending_exception *pe, int success)
static void commit_callback(void *context, int success)
{
- struct pending_exception *pe = (struct pending_exception *) context;
+ struct dm_snap_pending_exception *pe = context;
+
pending_complete(pe, success);
}
@@ -758,7 +763,7 @@ static void commit_callback(void *context, int success)
*/
static void copy_callback(int read_err, unsigned int write_err, void *context)
{
- struct pending_exception *pe = (struct pending_exception *) context;
+ struct dm_snap_pending_exception *pe = context;
struct dm_snapshot *s = pe->snap;
if (read_err || write_err)
@@ -773,7 +778,7 @@ static void copy_callback(int read_err, unsigned int write_err, void *context)
/*
* Dispatches the copy operation to kcopyd.
*/
-static void start_copy(struct pending_exception *pe)
+static void start_copy(struct dm_snap_pending_exception *pe)
{
struct dm_snapshot *s = pe->snap;
struct io_region src, dest;
@@ -803,11 +808,11 @@ static void start_copy(struct pending_exception *pe)
* NOTE: a write lock must be held on snap->lock before calling
* this.
*/
-static struct pending_exception *
+static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
{
- struct exception *e;
- struct pending_exception *pe;
+ struct dm_snap_exception *e;
+ struct dm_snap_pending_exception *pe;
chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
/*
@@ -816,7 +821,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
e = lookup_exception(&s->pending, chunk);
if (e) {
/* cast the exception to a pending exception */
- pe = container_of(e, struct pending_exception, e);
+ pe = container_of(e, struct dm_snap_pending_exception, e);
goto out;
}
@@ -836,7 +841,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
e = lookup_exception(&s->pending, chunk);
if (e) {
free_pending_exception(pe);
- pe = container_of(e, struct pending_exception, e);
+ pe = container_of(e, struct dm_snap_pending_exception, e);
goto out;
}
@@ -860,8 +865,8 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
return pe;
}
-static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
- struct bio *bio)
+static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
+ struct bio *bio)
{
bio->bi_bdev = s->cow->bdev;
bio->bi_sector = chunk_to_sector(s, e->new_chunk) +
@@ -871,11 +876,11 @@ static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
static int snapshot_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct exception *e;
- struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+ struct dm_snap_exception *e;
+ struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
- struct pending_exception *pe = NULL;
+ struct dm_snap_pending_exception *pe = NULL;
chunk = sector_to_chunk(s, bio->bi_sector);
@@ -884,9 +889,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
if (!s->valid)
return -EIO;
- if (unlikely(bio_barrier(bio)))
- return -EOPNOTSUPP;
-
/* FIXME: should only take write lock if we need
* to copy an exception */
down_write(&s->lock);
@@ -945,7 +947,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
static void snapshot_resume(struct dm_target *ti)
{
- struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+ struct dm_snapshot *s = ti->private;
down_write(&s->lock);
s->active = 1;
@@ -955,7 +957,7 @@ static void snapshot_resume(struct dm_target *ti)
static int snapshot_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
- struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
+ struct dm_snapshot *snap = ti->private;
switch (type) {
case STATUSTYPE_INFO:
@@ -999,8 +1001,8 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
{
int r = DM_MAPIO_REMAPPED, first = 0;
struct dm_snapshot *snap;
- struct exception *e;
- struct pending_exception *pe, *next_pe, *primary_pe = NULL;
+ struct dm_snap_exception *e;
+ struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
chunk_t chunk;
LIST_HEAD(pe_queue);
@@ -1147,19 +1149,16 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
static void origin_dtr(struct dm_target *ti)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
dm_put_device(ti, dev);
}
static int origin_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
- if (unlikely(bio_barrier(bio)))
- return -EOPNOTSUPP;
-
/* Only tell snapshots if this is a write */
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
@@ -1172,7 +1171,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
*/
static void origin_resume(struct dm_target *ti)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
struct dm_snapshot *snap;
struct origin *o;
chunk_t chunk_size = 0;
@@ -1190,7 +1189,7 @@ static void origin_resume(struct dm_target *ti)
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
unsigned int maxlen)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
switch (type) {
case STATUSTYPE_INFO:
@@ -1249,21 +1248,14 @@ static int __init dm_snapshot_init(void)
goto bad2;
}
- exception_cache = kmem_cache_create("dm-snapshot-ex",
- sizeof(struct exception),
- __alignof__(struct exception),
- 0, NULL, NULL);
+ exception_cache = KMEM_CACHE(dm_snap_exception, 0);
if (!exception_cache) {
DMERR("Couldn't create exception cache.");
r = -ENOMEM;
goto bad3;
}
- pending_cache =
- kmem_cache_create("dm-snapshot-in",
- sizeof(struct pending_exception),
- __alignof__(struct pending_exception),
- 0, NULL, NULL);
+ pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
if (!pending_cache) {
DMERR("Couldn't create pending cache.");
r = -ENOMEM;
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 15fa2ae6cdc2..650e0f1f51d8 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -30,7 +30,7 @@ typedef sector_t chunk_t;
* An exception is used where an old chunk of data has been
* replaced by a new one.
*/
-struct exception {
+struct dm_snap_exception {
struct list_head hash_list;
chunk_t old_chunk;
@@ -58,13 +58,13 @@ struct exception_store {
* Find somewhere to store the next exception.
*/
int (*prepare_exception) (struct exception_store *store,
- struct exception *e);
+ struct dm_snap_exception *e);
/*
* Update the metadata with this exception.
*/
void (*commit_exception) (struct exception_store *store,
- struct exception *e,
+ struct dm_snap_exception *e,
void (*callback) (void *, int success),
void *callback_context);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2717a355dc5b..846614e676c6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -45,7 +45,7 @@ struct dm_io {
* One of these is allocated per target within a bio. Hopefully
* this will be simplified out one day.
*/
-struct target_io {
+struct dm_target_io {
struct dm_io *io;
struct dm_target *ti;
union map_info info;
@@ -54,7 +54,7 @@ struct target_io {
union map_info *dm_get_mapinfo(struct bio *bio)
{
if (bio && bio->bi_private)
- return &((struct target_io *)bio->bi_private)->info;
+ return &((struct dm_target_io *)bio->bi_private)->info;
return NULL;
}
@@ -132,14 +132,12 @@ static int __init local_init(void)
int r;
/* allocate a slab for the dm_ios */
- _io_cache = kmem_cache_create("dm_io",
- sizeof(struct dm_io), 0, 0, NULL, NULL);
+ _io_cache = KMEM_CACHE(dm_io, 0);
if (!_io_cache)
return -ENOMEM;
/* allocate a slab for the target ios */
- _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
- 0, 0, NULL, NULL);
+ _tio_cache = KMEM_CACHE(dm_target_io, 0);
if (!_tio_cache) {
kmem_cache_destroy(_io_cache);
return -ENOMEM;
@@ -163,9 +161,7 @@ static void local_exit(void)
{
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
-
- if (unregister_blkdev(_major, _name) < 0)
- DMERR("unregister_blkdev failed");
+ unregister_blkdev(_major, _name);
_major = 0;
@@ -325,22 +321,22 @@ out:
return r;
}
-static inline struct dm_io *alloc_io(struct mapped_device *md)
+static struct dm_io *alloc_io(struct mapped_device *md)
{
return mempool_alloc(md->io_pool, GFP_NOIO);
}
-static inline void free_io(struct mapped_device *md, struct dm_io *io)
+static void free_io(struct mapped_device *md, struct dm_io *io)
{
mempool_free(io, md->io_pool);
}
-static inline struct target_io *alloc_tio(struct mapped_device *md)
+static struct dm_target_io *alloc_tio(struct mapped_device *md)
{
return mempool_alloc(md->tio_pool, GFP_NOIO);
}
-static inline void free_tio(struct mapped_device *md, struct target_io *tio)
+static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
mempool_free(tio, md->tio_pool);
}
@@ -498,7 +494,7 @@ static void dec_pending(struct dm_io *io, int error)
static int clone_endio(struct bio *bio, unsigned int done, int error)
{
int r = 0;
- struct target_io *tio = bio->bi_private;
+ struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
@@ -558,7 +554,7 @@ static sector_t max_io_len(struct mapped_device *md,
}
static void __map_bio(struct dm_target *ti, struct bio *clone,
- struct target_io *tio)
+ struct dm_target_io *tio)
{
int r;
sector_t sector;
@@ -672,7 +668,7 @@ static void __clone_and_map(struct clone_info *ci)
struct bio *clone, *bio = ci->bio;
struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
- struct target_io *tio;
+ struct dm_target_io *tio;
/*
* Allocate a target io object.
@@ -802,6 +798,15 @@ static int dm_request(request_queue_t *q, struct bio *bio)
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ /*
+ * There is no use in forwarding any barrier request since we can't
+ * guarantee it is (or can be) handled by the targets correctly.
+ */
+ if (unlikely(bio_barrier(bio))) {
+ bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
+ return 0;
+ }
+
down_read(&md->io_lock);
disk_stat_inc(dm_disk(md), ios[rw]);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 2f796b1436b2..462ee652a890 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -18,13 +18,45 @@
#define DM_NAME "device-mapper"
-#define DMERR(f, arg...) printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMWARN(f, arg...) printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMINFO(f, arg...) printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMERR(f, arg...) \
+ printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMERR_LIMIT(f, arg...) \
+ do { \
+ if (printk_ratelimit()) \
+ printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
+ f "\n", ## arg); \
+ } while (0)
+
+#define DMWARN(f, arg...) \
+ printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMWARN_LIMIT(f, arg...) \
+ do { \
+ if (printk_ratelimit()) \
+ printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
+ f "\n", ## arg); \
+ } while (0)
+
+#define DMINFO(f, arg...) \
+ printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMINFO_LIMIT(f, arg...) \
+ do { \
+ if (printk_ratelimit()) \
+ printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
+ "\n", ## arg); \
+ } while (0)
+
#ifdef CONFIG_DM_DEBUG
-# define DMDEBUG(f, arg...) printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
+# define DMDEBUG(f, arg...) \
+ printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
+# define DMDEBUG_LIMIT(f, arg...) \
+ do { \
+ if (printk_ratelimit()) \
+ printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
+ "\n", ## arg); \
+ } while (0)
#else
# define DMDEBUG(f, arg...) do {} while (0)
+# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
#endif
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index dbc234e3c69f..7e052378c47e 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -29,7 +29,7 @@
static struct workqueue_struct *_kcopyd_wq;
static struct work_struct _kcopyd_work;
-static inline void wake(void)
+static void wake(void)
{
queue_work(_kcopyd_wq, &_kcopyd_work);
}
@@ -226,10 +226,7 @@ static LIST_HEAD(_pages_jobs);
static int jobs_init(void)
{
- _job_cache = kmem_cache_create("kcopyd-jobs",
- sizeof(struct kcopyd_job),
- __alignof__(struct kcopyd_job),
- 0, NULL, NULL);
+ _job_cache = KMEM_CACHE(kcopyd_job, 0);
if (!_job_cache)
return -ENOMEM;
@@ -258,7 +255,7 @@ static void jobs_exit(void)
* Functions to push and pop a job onto the head of a given job
* list.
*/
-static inline struct kcopyd_job *pop(struct list_head *jobs)
+static struct kcopyd_job *pop(struct list_head *jobs)
{
struct kcopyd_job *job = NULL;
unsigned long flags;
@@ -274,7 +271,7 @@ static inline struct kcopyd_job *pop(struct list_head *jobs)
return job;
}
-static inline void push(struct list_head *jobs, struct kcopyd_job *job)
+static void push(struct list_head *jobs, struct kcopyd_job *job)
{
unsigned long flags;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1c54f3c1cca7..65ddc887dfd7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1640,7 +1640,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
static void md_update_sb(mddev_t * mddev, int force_change)
{
- int err;
struct list_head *tmp;
mdk_rdev_t *rdev;
int sync_req;
@@ -1727,7 +1726,7 @@ repeat:
"md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev),mddev->in_sync);
- err = bitmap_update_sb(mddev->bitmap);
+ bitmap_update_sb(mddev->bitmap);
ITERATE_RDEV(mddev,rdev,tmp) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
@@ -2073,9 +2072,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
err = super_types[super_format].
load_super(rdev, NULL, super_minor);
if (err == -EINVAL) {
- printk(KERN_WARNING
- "md: %s has invalid sb, not importing!\n",
- bdevname(rdev->bdev,b));
+ printk(KERN_WARNING
+ "md: %s does not have a valid v%d.%d "
+ "superblock, not importing!\n",
+ bdevname(rdev->bdev,b),
+ super_format, super_minor);
goto abort_free;
}
if (err < 0) {
@@ -3174,13 +3175,33 @@ static int do_md_run(mddev_t * mddev)
* Drop all container device buffers, from now on
* the only valid external interface is through the md
* device.
- * Also find largest hardsector size
*/
ITERATE_RDEV(mddev,rdev,tmp) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
invalidate_bdev(rdev->bdev);
+
+ /* perform some consistency tests on the device.
+ * We don't want the data to overlap the metadata,
+ * Internal Bitmap issues has handled elsewhere.
+ */
+ if (rdev->data_offset < rdev->sb_offset) {
+ if (mddev->size &&
+ rdev->data_offset + mddev->size*2
+ > rdev->sb_offset*2) {
+ printk("md: %s: data overlaps metadata\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ } else {
+ if (rdev->sb_offset*2 + rdev->sb_size/512
+ > rdev->data_offset) {
+ printk("md: %s: metadata overlaps data\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ }
}
md_probe(mddev->unit, NULL, NULL);
@@ -4642,7 +4663,6 @@ static int md_thread(void * arg)
* many dirty RAID5 blocks.
*/
- current->flags |= PF_NOFREEZE;
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
@@ -5090,7 +5110,7 @@ static int is_mddev_idle(mddev_t *mddev)
mdk_rdev_t * rdev;
struct list_head *tmp;
int idle;
- unsigned long curr_events;
+ long curr_events;
idle = 1;
ITERATE_RDEV(mddev,rdev,tmp) {
@@ -5098,20 +5118,29 @@ static int is_mddev_idle(mddev_t *mddev)
curr_events = disk_stat_read(disk, sectors[0]) +
disk_stat_read(disk, sectors[1]) -
atomic_read(&disk->sync_io);
- /* The difference between curr_events and last_events
- * will be affected by any new non-sync IO (making
- * curr_events bigger) and any difference in the amount of
- * in-flight syncio (making current_events bigger or smaller)
- * The amount in-flight is currently limited to
- * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
- * which is at most 4096 sectors.
- * These numbers are fairly fragile and should be made
- * more robust, probably by enforcing the
- * 'window size' that md_do_sync sort-of uses.
+ /* sync IO will cause sync_io to increase before the disk_stats
+ * as sync_io is counted when a request starts, and
+ * disk_stats is counted when it completes.
+ * So resync activity will cause curr_events to be smaller than
+ * when there was no such activity.
+ * non-sync IO will cause disk_stat to increase without
+ * increasing sync_io so curr_events will (eventually)
+ * be larger than it was before. Once it becomes
+ * substantially larger, the test below will cause
+ * the array to appear non-idle, and resync will slow
+ * down.
+ * If there is a lot of outstanding resync activity when
+ * we set last_event to curr_events, then all that activity
+ * completing might cause the array to appear non-idle
+ * and resync will be slowed down even though there might
+ * not have been non-resync activity. This will only
+ * happen once though. 'last_events' will soon reflect
+ * the state where there is little or no outstanding
+ * resync requests, and further resync activity will
+ * always make curr_events less than last_events.
*
- * Note: the following is an unsigned comparison.
*/
- if ((long)curr_events - (long)rdev->last_events > 4096) {
+ if (curr_events - rdev->last_events > 4096) {
rdev->last_events = curr_events;
idle = 0;
}
@@ -5772,7 +5801,7 @@ static void autostart_arrays(int part)
for (i = 0; i < dev_cnt; i++) {
dev_t dev = detected_devices[i];
- rdev = md_import_device(dev,0, 0);
+ rdev = md_import_device(dev,0, 90);
if (IS_ERR(rdev))
continue;
@@ -5814,7 +5843,7 @@ static __exit void md_exit(void)
}
}
-module_init(md_init)
+subsys_initcall(md_init);
module_exit(md_exit)
static int get_ro(char *buffer, struct kernel_param *kp)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46677d7d9980..00c78b77b13d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1526,8 +1526,7 @@ static void raid1d(mddev_t *mddev)
blk_remove_plug(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- if (bitmap_unplug(mddev->bitmap) != 0)
- printk("%s: bitmap file write failed!\n", mdname(mddev));
+ bitmap_unplug(mddev->bitmap);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9eb66c1b523b..a95ada1cfac4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1510,8 +1510,7 @@ static void raid10d(mddev_t *mddev)
blk_remove_plug(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
/* flush any pending bitmap writes to disk before proceeding w/ I/O */
- if (bitmap_unplug(mddev->bitmap) != 0)
- printk("%s: bitmap file write failed!\n", mdname(mddev));
+ bitmap_unplug(mddev->bitmap);
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 061375ee6592..d90ee145effe 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -52,6 +52,7 @@
#include "raid6.h"
#include <linux/raid/bitmap.h>
+#include <linux/async_tx.h>
/*
* Stripe cache
@@ -80,7 +81,6 @@
/*
* The following can be used to debug the driver
*/
-#define RAID5_DEBUG 0
#define RAID5_PARANOIA 1
#if RAID5_PARANOIA && defined(CONFIG_SMP)
# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
@@ -88,8 +88,7 @@
# define CHECK_DEVLOCK()
#endif
-#define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
-#if RAID5_DEBUG
+#ifdef DEBUG
#define inline
#define __inline__
#endif
@@ -104,6 +103,23 @@ static inline int raid6_next_disk(int disk, int raid_disks)
disk++;
return (disk < raid_disks) ? disk : 0;
}
+
+static void return_io(struct bio *return_bi)
+{
+ struct bio *bi = return_bi;
+ while (bi) {
+ int bytes = bi->bi_size;
+
+ return_bi = bi->bi_next;
+ bi->bi_next = NULL;
+ bi->bi_size = 0;
+ bi->bi_end_io(bi, bytes,
+ test_bit(BIO_UPTODATE, &bi->bi_flags)
+ ? 0 : -EIO);
+ bi = return_bi;
+ }
+}
+
static void print_raid5_conf (raid5_conf_t *conf);
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
@@ -125,6 +141,7 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
}
md_wakeup_thread(conf->mddev->thread);
} else {
+ BUG_ON(sh->ops.pending);
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
atomic_dec(&conf->preread_active_stripes);
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -152,7 +169,8 @@ static void release_stripe(struct stripe_head *sh)
static inline void remove_hash(struct stripe_head *sh)
{
- PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
+ pr_debug("remove_hash(), stripe %llu\n",
+ (unsigned long long)sh->sector);
hlist_del_init(&sh->hash);
}
@@ -161,7 +179,8 @@ static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
{
struct hlist_head *hp = stripe_hash(conf, sh->sector);
- PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
+ pr_debug("insert_hash(), stripe %llu\n",
+ (unsigned long long)sh->sector);
CHECK_DEVLOCK();
hlist_add_head(&sh->hash, hp);
@@ -224,9 +243,10 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
-
+ BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete);
+
CHECK_DEVLOCK();
- PRINTK("init_stripe called, stripe %llu\n",
+ pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sh->sector);
remove_hash(sh);
@@ -240,11 +260,11 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->toread || dev->towrite || dev->written ||
+ if (dev->toread || dev->read || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) {
- printk("sector=%llx i=%d %p %p %p %d\n",
+ printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
(unsigned long long)sh->sector, i, dev->toread,
- dev->towrite, dev->written,
+ dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
BUG();
}
@@ -260,11 +280,11 @@ static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, in
struct hlist_node *hn;
CHECK_DEVLOCK();
- PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
+ pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
if (sh->sector == sector && sh->disks == disks)
return sh;
- PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
+ pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
return NULL;
}
@@ -276,7 +296,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
{
struct stripe_head *sh;
- PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
+ pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(&conf->device_lock);
@@ -324,6 +344,579 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
return sh;
}
+/* test_and_ack_op() ensures that we only dequeue an operation once */
+#define test_and_ack_op(op, pend) \
+do { \
+ if (test_bit(op, &sh->ops.pending) && \
+ !test_bit(op, &sh->ops.complete)) { \
+ if (test_and_set_bit(op, &sh->ops.ack)) \
+ clear_bit(op, &pend); \
+ else \
+ ack++; \
+ } else \
+ clear_bit(op, &pend); \
+} while (0)
+
+/* find new work to run, do not resubmit work that is already
+ * in flight
+ */
+static unsigned long get_stripe_work(struct stripe_head *sh)
+{
+ unsigned long pending;
+ int ack = 0;
+
+ pending = sh->ops.pending;
+
+ test_and_ack_op(STRIPE_OP_BIOFILL, pending);
+ test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending);
+ test_and_ack_op(STRIPE_OP_PREXOR, pending);
+ test_and_ack_op(STRIPE_OP_BIODRAIN, pending);
+ test_and_ack_op(STRIPE_OP_POSTXOR, pending);
+ test_and_ack_op(STRIPE_OP_CHECK, pending);
+ if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending))
+ ack++;
+
+ sh->ops.count -= ack;
+ BUG_ON(sh->ops.count < 0);
+
+ return pending;
+}
+
+static int
+raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error);
+static int
+raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error);
+
+static void ops_run_io(struct stripe_head *sh)
+{
+ raid5_conf_t *conf = sh->raid_conf;
+ int i, disks = sh->disks;
+
+ might_sleep();
+
+ for (i = disks; i--; ) {
+ int rw;
+ struct bio *bi;
+ mdk_rdev_t *rdev;
+ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
+ rw = WRITE;
+ else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
+ rw = READ;
+ else
+ continue;
+
+ bi = &sh->dev[i].req;
+
+ bi->bi_rw = rw;
+ if (rw == WRITE)
+ bi->bi_end_io = raid5_end_write_request;
+ else
+ bi->bi_end_io = raid5_end_read_request;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
+ if (rdev)
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+
+ if (rdev) {
+ if (test_bit(STRIPE_SYNCING, &sh->state) ||
+ test_bit(STRIPE_EXPAND_SOURCE, &sh->state) ||
+ test_bit(STRIPE_EXPAND_READY, &sh->state))
+ md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+
+ bi->bi_bdev = rdev->bdev;
+ pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+ __FUNCTION__, (unsigned long long)sh->sector,
+ bi->bi_rw, i);
+ atomic_inc(&sh->count);
+ bi->bi_sector = sh->sector + rdev->data_offset;
+ bi->bi_flags = 1 << BIO_UPTODATE;
+ bi->bi_vcnt = 1;
+ bi->bi_max_vecs = 1;
+ bi->bi_idx = 0;
+ bi->bi_io_vec = &sh->dev[i].vec;
+ bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ bi->bi_io_vec[0].bv_offset = 0;
+ bi->bi_size = STRIPE_SIZE;
+ bi->bi_next = NULL;
+ if (rw == WRITE &&
+ test_bit(R5_ReWrite, &sh->dev[i].flags))
+ atomic_add(STRIPE_SECTORS,
+ &rdev->corrected_errors);
+ generic_make_request(bi);
+ } else {
+ if (rw == WRITE)
+ set_bit(STRIPE_DEGRADED, &sh->state);
+ pr_debug("skip op %ld on disc %d for sector %llu\n",
+ bi->bi_rw, i, (unsigned long long)sh->sector);
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ }
+}
+
+static struct dma_async_tx_descriptor *
+async_copy_data(int frombio, struct bio *bio, struct page *page,
+ sector_t sector, struct dma_async_tx_descriptor *tx)
+{
+ struct bio_vec *bvl;
+ struct page *bio_page;
+ int i;
+ int page_offset;
+
+ if (bio->bi_sector >= sector)
+ page_offset = (signed)(bio->bi_sector - sector) * 512;
+ else
+ page_offset = (signed)(sector - bio->bi_sector) * -512;
+ bio_for_each_segment(bvl, bio, i) {
+ int len = bio_iovec_idx(bio, i)->bv_len;
+ int clen;
+ int b_offset = 0;
+
+ if (page_offset < 0) {
+ b_offset = -page_offset;
+ page_offset += b_offset;
+ len -= b_offset;
+ }
+
+ if (len > 0 && page_offset + len > STRIPE_SIZE)
+ clen = STRIPE_SIZE - page_offset;
+ else
+ clen = len;
+
+ if (clen > 0) {
+ b_offset += bio_iovec_idx(bio, i)->bv_offset;
+ bio_page = bio_iovec_idx(bio, i)->bv_page;
+ if (frombio)
+ tx = async_memcpy(page, bio_page, page_offset,
+ b_offset, clen,
+ ASYNC_TX_DEP_ACK,
+ tx, NULL, NULL);
+ else
+ tx = async_memcpy(bio_page, page, b_offset,
+ page_offset, clen,
+ ASYNC_TX_DEP_ACK,
+ tx, NULL, NULL);
+ }
+ if (clen < len) /* hit end of page */
+ break;
+ page_offset += len;
+ }
+
+ return tx;
+}
+
+static void ops_complete_biofill(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+ struct bio *return_bi = NULL;
+ raid5_conf_t *conf = sh->raid_conf;
+ int i, more_to_read = 0;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ /* clear completed biofills */
+ for (i = sh->disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ /* check if this stripe has new incoming reads */
+ if (dev->toread)
+ more_to_read++;
+
+ /* acknowledge completion of a biofill operation */
+ /* and check if we need to reply to a read request
+ */
+ if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+ struct bio *rbi, *rbi2;
+ clear_bit(R5_Wantfill, &dev->flags);
+
+ /* The access to dev->read is outside of the
+ * spin_lock_irq(&conf->device_lock), but is protected
+ * by the STRIPE_OP_BIOFILL pending bit
+ */
+ BUG_ON(!dev->read);
+ rbi = dev->read;
+ dev->read = NULL;
+ while (rbi && rbi->bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ rbi2 = r5_next_bio(rbi, dev->sector);
+ spin_lock_irq(&conf->device_lock);
+ if (--rbi->bi_phys_segments == 0) {
+ rbi->bi_next = return_bi;
+ return_bi = rbi;
+ }
+ spin_unlock_irq(&conf->device_lock);
+ rbi = rbi2;
+ }
+ }
+ }
+ clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack);
+ clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending);
+
+ return_io(return_bi);
+
+ if (more_to_read)
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+}
+
+static void ops_run_biofill(struct stripe_head *sh)
+{
+ struct dma_async_tx_descriptor *tx = NULL;
+ raid5_conf_t *conf = sh->raid_conf;
+ int i;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ for (i = sh->disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (test_bit(R5_Wantfill, &dev->flags)) {
+ struct bio *rbi;
+ spin_lock_irq(&conf->device_lock);
+ dev->read = rbi = dev->toread;
+ dev->toread = NULL;
+ spin_unlock_irq(&conf->device_lock);
+ while (rbi && rbi->bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ tx = async_copy_data(0, rbi, dev->page,
+ dev->sector, tx);
+ rbi = r5_next_bio(rbi, dev->sector);
+ }
+ }
+ }
+
+ atomic_inc(&sh->count);
+ async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
+ ops_complete_biofill, sh);
+}
+
+static void ops_complete_compute5(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+ int target = sh->ops.target;
+ struct r5dev *tgt = &sh->dev[target];
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ set_bit(R5_UPTODATE, &tgt->flags);
+ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
+ clear_bit(R5_Wantcompute, &tgt->flags);
+ set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_compute5(struct stripe_head *sh, unsigned long pending)
+{
+ /* kernel stack size limits the total number of disks */
+ int disks = sh->disks;
+ struct page *xor_srcs[disks];
+ int target = sh->ops.target;
+ struct r5dev *tgt = &sh->dev[target];
+ struct page *xor_dest = tgt->page;
+ int count = 0;
+ struct dma_async_tx_descriptor *tx;
+ int i;
+
+ pr_debug("%s: stripe %llu block: %d\n",
+ __FUNCTION__, (unsigned long long)sh->sector, target);
+ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
+
+ for (i = disks; i--; )
+ if (i != target)
+ xor_srcs[count++] = sh->dev[i].page;
+
+ atomic_inc(&sh->count);
+
+ if (unlikely(count == 1))
+ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
+ 0, NULL, ops_complete_compute5, sh);
+ else
+ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+ ASYNC_TX_XOR_ZERO_DST, NULL,
+ ops_complete_compute5, sh);
+
+ /* ack now if postxor is not set to be run */
+ if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending))
+ async_tx_ack(tx);
+
+ return tx;
+}
+
+static void ops_complete_prexor(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+{
+ /* kernel stack size limits the total number of disks */
+ int disks = sh->disks;
+ struct page *xor_srcs[disks];
+ int count = 0, pd_idx = sh->pd_idx, i;
+
+ /* existing parity data subtracted */
+ struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ /* Only process blocks that are known to be uptodate */
+ if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags))
+ xor_srcs[count++] = dev->page;
+ }
+
+ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+ ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx,
+ ops_complete_prexor, sh);
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+{
+ int disks = sh->disks;
+ int pd_idx = sh->pd_idx, i;
+
+ /* check if prexor is active which means only process blocks
+ * that are part of a read-modify-write (Wantprexor)
+ */
+ int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ struct bio *chosen;
+ int towrite;
+
+ towrite = 0;
+ if (prexor) { /* rmw */
+ if (dev->towrite &&
+ test_bit(R5_Wantprexor, &dev->flags))
+ towrite = 1;
+ } else { /* rcw */
+ if (i != pd_idx && dev->towrite &&
+ test_bit(R5_LOCKED, &dev->flags))
+ towrite = 1;
+ }
+
+ if (towrite) {
+ struct bio *wbi;
+
+ spin_lock(&sh->lock);
+ chosen = dev->towrite;
+ dev->towrite = NULL;
+ BUG_ON(dev->written);
+ wbi = dev->written = chosen;
+ spin_unlock(&sh->lock);
+
+ while (wbi && wbi->bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ tx = async_copy_data(1, wbi, dev->page,
+ dev->sector, tx);
+ wbi = r5_next_bio(wbi, dev->sector);
+ }
+ }
+ }
+
+ return tx;
+}
+
+static void ops_complete_postxor(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+}
+
+static void ops_complete_write(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+ int disks = sh->disks, i, pd_idx = sh->pd_idx;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (dev->written || i == pd_idx)
+ set_bit(R5_UPTODATE, &dev->flags);
+ }
+
+ set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
+ set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+}
+
+static void
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+{
+ /* kernel stack size limits the total number of disks */
+ int disks = sh->disks;
+ struct page *xor_srcs[disks];
+
+ int count = 0, pd_idx = sh->pd_idx, i;
+ struct page *xor_dest;
+ int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ unsigned long flags;
+ dma_async_tx_callback callback;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ /* check if prexor is active which means only process blocks
+ * that are part of a read-modify-write (written)
+ */
+ if (prexor) {
+ xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (dev->written)
+ xor_srcs[count++] = dev->page;
+ }
+ } else {
+ xor_dest = sh->dev[pd_idx].page;
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (i != pd_idx)
+ xor_srcs[count++] = dev->page;
+ }
+ }
+
+ /* check whether this postxor is part of a write */
+ callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ?
+ ops_complete_write : ops_complete_postxor;
+
+ /* 1/ if we prexor'd then the dest is reused as a source
+ * 2/ if we did not prexor then we are redoing the parity
+ * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
+ * for the synchronous xor case
+ */
+ flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK |
+ (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
+
+ atomic_inc(&sh->count);
+
+ if (unlikely(count == 1)) {
+ flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST);
+ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE,
+ flags, tx, callback, sh);
+ } else
+ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+ flags, tx, callback, sh);
+}
+
+static void ops_complete_check(void *stripe_head_ref)
+{
+ struct stripe_head *sh = stripe_head_ref;
+ int pd_idx = sh->pd_idx;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
+ sh->ops.zero_sum_result == 0)
+ set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+
+ set_bit(STRIPE_OP_CHECK, &sh->ops.complete);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+}
+
+static void ops_run_check(struct stripe_head *sh)
+{
+ /* kernel stack size limits the total number of disks */
+ int disks = sh->disks;
+ struct page *xor_srcs[disks];
+ struct dma_async_tx_descriptor *tx;
+
+ int count = 0, pd_idx = sh->pd_idx, i;
+ struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+
+ pr_debug("%s: stripe %llu\n", __FUNCTION__,
+ (unsigned long long)sh->sector);
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (i != pd_idx)
+ xor_srcs[count++] = dev->page;
+ }
+
+ tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
+ &sh->ops.zero_sum_result, 0, NULL, NULL, NULL);
+
+ if (tx)
+ set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
+ else
+ clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending);
+
+ atomic_inc(&sh->count);
+ tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx,
+ ops_complete_check, sh);
+}
+
+static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
+{
+ int overlap_clear = 0, i, disks = sh->disks;
+ struct dma_async_tx_descriptor *tx = NULL;
+
+ if (test_bit(STRIPE_OP_BIOFILL, &pending)) {
+ ops_run_biofill(sh);
+ overlap_clear++;
+ }
+
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending))
+ tx = ops_run_compute5(sh, pending);
+
+ if (test_bit(STRIPE_OP_PREXOR, &pending))
+ tx = ops_run_prexor(sh, tx);
+
+ if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
+ tx = ops_run_biodrain(sh, tx);
+ overlap_clear++;
+ }
+
+ if (test_bit(STRIPE_OP_POSTXOR, &pending))
+ ops_run_postxor(sh, tx);
+
+ if (test_bit(STRIPE_OP_CHECK, &pending))
+ ops_run_check(sh);
+
+ if (test_bit(STRIPE_OP_IO, &pending))
+ ops_run_io(sh);
+
+ if (overlap_clear)
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&sh->raid_conf->wait_for_overlap);
+ }
+}
+
static int grow_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
@@ -358,7 +951,7 @@ static int grow_stripes(raid5_conf_t *conf, int num)
conf->active_name = 0;
sc = kmem_cache_create(conf->cache_name[conf->active_name],
sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return 1;
conf->slab_cache = sc;
@@ -410,7 +1003,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!sc)
return -ENOMEM;
@@ -537,8 +1130,8 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
if (bi == &sh->dev[i].req)
break;
- PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
- (unsigned long long)sh->sector, i, atomic_read(&sh->count),
+ pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
+ (unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) {
BUG();
@@ -613,7 +1206,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
if (bi == &sh->dev[i].req)
break;
- PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
+ pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
if (i == disks) {
@@ -658,7 +1251,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
- PRINTK("raid5: error called\n");
+ pr_debug("raid5: error called\n");
if (!test_bit(Faulty, &rdev->flags)) {
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -916,137 +1509,13 @@ static void copy_data(int frombio, struct bio *bio,
}
}
-#define check_xor() do { \
- if (count == MAX_XOR_BLOCKS) { \
- xor_block(count, STRIPE_SIZE, ptr); \
- count = 1; \
- } \
+#define check_xor() do { \
+ if (count == MAX_XOR_BLOCKS) { \
+ xor_blocks(count, STRIPE_SIZE, dest, ptr);\
+ count = 0; \
+ } \
} while(0)
-
-static void compute_block(struct stripe_head *sh, int dd_idx)
-{
- int i, count, disks = sh->disks;
- void *ptr[MAX_XOR_BLOCKS], *p;
-
- PRINTK("compute_block, stripe %llu, idx %d\n",
- (unsigned long long)sh->sector, dd_idx);
-
- ptr[0] = page_address(sh->dev[dd_idx].page);
- memset(ptr[0], 0, STRIPE_SIZE);
- count = 1;
- for (i = disks ; i--; ) {
- if (i == dd_idx)
- continue;
- p = page_address(sh->dev[i].page);
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
- ptr[count++] = p;
- else
- printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
- " not present\n", dd_idx,
- (unsigned long long)sh->sector, i);
-
- check_xor();
- }
- if (count != 1)
- xor_block(count, STRIPE_SIZE, ptr);
- set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
-}
-
-static void compute_parity5(struct stripe_head *sh, int method)
-{
- raid5_conf_t *conf = sh->raid_conf;
- int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
- void *ptr[MAX_XOR_BLOCKS];
- struct bio *chosen;
-
- PRINTK("compute_parity5, stripe %llu, method %d\n",
- (unsigned long long)sh->sector, method);
-
- count = 1;
- ptr[0] = page_address(sh->dev[pd_idx].page);
- switch(method) {
- case READ_MODIFY_WRITE:
- BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
- for (i=disks ; i-- ;) {
- if (i==pd_idx)
- continue;
- if (sh->dev[i].towrite &&
- test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
- ptr[count++] = page_address(sh->dev[i].page);
- chosen = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- BUG_ON(sh->dev[i].written);
- sh->dev[i].written = chosen;
- check_xor();
- }
- }
- break;
- case RECONSTRUCT_WRITE:
- memset(ptr[0], 0, STRIPE_SIZE);
- for (i= disks; i-- ;)
- if (i!=pd_idx && sh->dev[i].towrite) {
- chosen = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- BUG_ON(sh->dev[i].written);
- sh->dev[i].written = chosen;
- }
- break;
- case CHECK_PARITY:
- break;
- }
- if (count>1) {
- xor_block(count, STRIPE_SIZE, ptr);
- count = 1;
- }
-
- for (i = disks; i--;)
- if (sh->dev[i].written) {
- sector_t sector = sh->dev[i].sector;
- struct bio *wbi = sh->dev[i].written;
- while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
- copy_data(1, wbi, sh->dev[i].page, sector);
- wbi = r5_next_bio(wbi, sector);
- }
-
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(R5_UPTODATE, &sh->dev[i].flags);
- }
-
- switch(method) {
- case RECONSTRUCT_WRITE:
- case CHECK_PARITY:
- for (i=disks; i--;)
- if (i != pd_idx) {
- ptr[count++] = page_address(sh->dev[i].page);
- check_xor();
- }
- break;
- case READ_MODIFY_WRITE:
- for (i = disks; i--;)
- if (sh->dev[i].written) {
- ptr[count++] = page_address(sh->dev[i].page);
- check_xor();
- }
- }
- if (count != 1)
- xor_block(count, STRIPE_SIZE, ptr);
-
- if (method != CHECK_PARITY) {
- set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
- set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
- } else
- clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
-}
-
static void compute_parity6(struct stripe_head *sh, int method)
{
raid6_conf_t *conf = sh->raid_conf;
@@ -1058,7 +1527,7 @@ static void compute_parity6(struct stripe_head *sh, int method)
qd_idx = raid6_next_disk(pd_idx, disks);
d0_idx = raid6_next_disk(qd_idx, disks);
- PRINTK("compute_parity, stripe %llu, method %d\n",
+ pr_debug("compute_parity, stripe %llu, method %d\n",
(unsigned long long)sh->sector, method);
switch(method) {
@@ -1132,20 +1601,20 @@ static void compute_parity6(struct stripe_head *sh, int method)
static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
{
int i, count, disks = sh->disks;
- void *ptr[MAX_XOR_BLOCKS], *p;
+ void *ptr[MAX_XOR_BLOCKS], *dest, *p;
int pd_idx = sh->pd_idx;
int qd_idx = raid6_next_disk(pd_idx, disks);
- PRINTK("compute_block_1, stripe %llu, idx %d\n",
+ pr_debug("compute_block_1, stripe %llu, idx %d\n",
(unsigned long long)sh->sector, dd_idx);
if ( dd_idx == qd_idx ) {
/* We're actually computing the Q drive */
compute_parity6(sh, UPDATE_PARITY);
} else {
- ptr[0] = page_address(sh->dev[dd_idx].page);
- if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
- count = 1;
+ dest = page_address(sh->dev[dd_idx].page);
+ if (!nozero) memset(dest, 0, STRIPE_SIZE);
+ count = 0;
for (i = disks ; i--; ) {
if (i == dd_idx || i == qd_idx)
continue;
@@ -1159,8 +1628,8 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
check_xor();
}
- if (count != 1)
- xor_block(count, STRIPE_SIZE, ptr);
+ if (count)
+ xor_blocks(count, STRIPE_SIZE, dest, ptr);
if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
}
@@ -1183,7 +1652,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
BUG_ON(faila == failb);
if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
- PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
+ pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
(unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
if ( failb == disks-1 ) {
@@ -1229,7 +1698,79 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
}
}
+static int
+handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
+{
+ int i, pd_idx = sh->pd_idx, disks = sh->disks;
+ int locked = 0;
+ if (rcw) {
+ /* if we are not expanding this is a proper write request, and
+ * there will be bios with new data to be drained into the
+ * stripe cache
+ */
+ if (!expand) {
+ set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
+ sh->ops.count++;
+ }
+
+ set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+ sh->ops.count++;
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+
+ if (dev->towrite) {
+ set_bit(R5_LOCKED, &dev->flags);
+ if (!expand)
+ clear_bit(R5_UPTODATE, &dev->flags);
+ locked++;
+ }
+ }
+ } else {
+ BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
+ test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
+
+ set_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
+ set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+
+ sh->ops.count += 3;
+
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (i == pd_idx)
+ continue;
+
+ /* For a read-modify write there may be blocks that are
+ * locked for reading while others are ready to be
+ * written so we distinguish these blocks by the
+ * R5_Wantprexor bit
+ */
+ if (dev->towrite &&
+ (test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ set_bit(R5_Wantprexor, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ clear_bit(R5_UPTODATE, &dev->flags);
+ locked++;
+ }
+ }
+ }
+
+ /* keep the parity disk locked while asynchronous operations
+ * are in flight
+ */
+ set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
+ clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
+ locked++;
+
+ pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
+ __FUNCTION__, (unsigned long long)sh->sector,
+ locked, sh->ops.pending);
+
+ return locked;
+}
/*
* Each stripe/dev can have one or more bion attached.
@@ -1242,7 +1783,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
raid5_conf_t *conf = sh->raid_conf;
int firstwrite=0;
- PRINTK("adding bh b#%llu to stripe s#%llu\n",
+ pr_debug("adding bh b#%llu to stripe s#%llu\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);
@@ -1271,7 +1812,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock);
- PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
+ pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector, dd_idx);
@@ -1326,6 +1867,729 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
return pd_idx;
}
+static void
+handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks,
+ struct bio **return_bi)
+{
+ int i;
+ for (i = disks; i--; ) {
+ struct bio *bi;
+ int bitmap_end = 0;
+
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ mdk_rdev_t *rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags))
+ /* multiple read failures in one stripe */
+ md_error(conf->mddev, rdev);
+ rcu_read_unlock();
+ }
+ spin_lock_irq(&conf->device_lock);
+ /* fail all writes first */
+ bi = sh->dev[i].towrite;
+ sh->dev[i].towrite = NULL;
+ if (bi) {
+ s->to_write--;
+ bitmap_end = 1;
+ }
+
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wake_up(&conf->wait_for_overlap);
+
+ while (bi && bi->bi_sector <
+ sh->dev[i].sector + STRIPE_SECTORS) {
+ struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
+ clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ if (--bi->bi_phys_segments == 0) {
+ md_write_end(conf->mddev);
+ bi->bi_next = *return_bi;
+ *return_bi = bi;
+ }
+ bi = nextbi;
+ }
+ /* and fail all 'written' */
+ bi = sh->dev[i].written;
+ sh->dev[i].written = NULL;
+ if (bi) bitmap_end = 1;
+ while (bi && bi->bi_sector <
+ sh->dev[i].sector + STRIPE_SECTORS) {
+ struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
+ clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ if (--bi->bi_phys_segments == 0) {
+ md_write_end(conf->mddev);
+ bi->bi_next = *return_bi;
+ *return_bi = bi;
+ }
+ bi = bi2;
+ }
+
+ /* fail any reads if this device is non-operational and
+ * the data has not reached the cache yet.
+ */
+ if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
+ (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+ test_bit(R5_ReadError, &sh->dev[i].flags))) {
+ bi = sh->dev[i].toread;
+ sh->dev[i].toread = NULL;
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wake_up(&conf->wait_for_overlap);
+ if (bi) s->to_read--;
+ while (bi && bi->bi_sector <
+ sh->dev[i].sector + STRIPE_SECTORS) {
+ struct bio *nextbi =
+ r5_next_bio(bi, sh->dev[i].sector);
+ clear_bit(BIO_UPTODATE, &bi->bi_flags);
+ if (--bi->bi_phys_segments == 0) {
+ bi->bi_next = *return_bi;
+ *return_bi = bi;
+ }
+ bi = nextbi;
+ }
+ }
+ spin_unlock_irq(&conf->device_lock);
+ if (bitmap_end)
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0, 0);
+ }
+
+}
+
+/* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
+ * to process
+ */
+static int __handle_issuing_new_read_requests5(struct stripe_head *sh,
+ struct stripe_head_state *s, int disk_idx, int disks)
+{
+ struct r5dev *dev = &sh->dev[disk_idx];
+ struct r5dev *failed_dev = &sh->dev[s->failed_num];
+
+ /* don't schedule compute operations or reads on the parity block while
+ * a check is in flight
+ */
+ if ((disk_idx == sh->pd_idx) &&
+ test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
+ return ~0;
+
+ /* is the data in this block needed, and can we get it? */
+ if (!test_bit(R5_LOCKED, &dev->flags) &&
+ !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread ||
+ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
+ s->syncing || s->expanding || (s->failed &&
+ (failed_dev->toread || (failed_dev->towrite &&
+ !test_bit(R5_OVERWRITE, &failed_dev->flags)
+ ))))) {
+ /* 1/ We would like to get this block, possibly by computing it,
+ * but we might not be able to.
+ *
+ * 2/ Since parity check operations potentially make the parity
+ * block !uptodate it will need to be refreshed before any
+ * compute operations on data disks are scheduled.
+ *
+ * 3/ We hold off parity block re-reads until check operations
+ * have quiesced.
+ */
+ if ((s->uptodate == disks - 1) &&
+ !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+ set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ set_bit(R5_Wantcompute, &dev->flags);
+ sh->ops.target = disk_idx;
+ s->req_compute = 1;
+ sh->ops.count++;
+ /* Careful: from this point on 'uptodate' is in the eye
+ * of raid5_run_ops which services 'compute' operations
+ * before writes. R5_Wantcompute flags a block that will
+ * be R5_UPTODATE by the time it is needed for a
+ * subsequent operation.
+ */
+ s->uptodate++;
+ return 0; /* uptodate + compute == disks */
+ } else if ((s->uptodate < disks - 1) &&
+ test_bit(R5_Insync, &dev->flags)) {
+ /* Note: we hold off compute operations while checks are
+ * in flight, but we still prefer 'compute' over 'read'
+ * hence we only read if (uptodate < * disks-1)
+ */
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+ s->locked++;
+ pr_debug("Reading block %d (sync=%d)\n", disk_idx,
+ s->syncing);
+ }
+ }
+
+ return ~0;
+}
+
+static void handle_issuing_new_read_requests5(struct stripe_head *sh,
+ struct stripe_head_state *s, int disks)
+{
+ int i;
+
+ /* Clear completed compute operations. Parity recovery
+ * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled
+ * later on in this routine
+ */
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+ !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ }
+
+ /* look for blocks to read/compute, skip this if a compute
+ * is already in flight, or if the stripe contents are in the
+ * midst of changing due to a write
+ */
+ if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ for (i = disks; i--; )
+ if (__handle_issuing_new_read_requests5(
+ sh, s, i, disks) == 0)
+ break;
+ }
+ set_bit(STRIPE_HANDLE, &sh->state);
+}
+
+static void handle_issuing_new_read_requests6(struct stripe_head *sh,
+ struct stripe_head_state *s, struct r6_state *r6s,
+ int disks)
+{
+ int i;
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (!test_bit(R5_LOCKED, &dev->flags) &&
+ !test_bit(R5_UPTODATE, &dev->flags) &&
+ (dev->toread || (dev->towrite &&
+ !test_bit(R5_OVERWRITE, &dev->flags)) ||
+ s->syncing || s->expanding ||
+ (s->failed >= 1 &&
+ (sh->dev[r6s->failed_num[0]].toread ||
+ s->to_write)) ||
+ (s->failed >= 2 &&
+ (sh->dev[r6s->failed_num[1]].toread ||
+ s->to_write)))) {
+ /* we would like to get this block, possibly
+ * by computing it, but we might not be able to
+ */
+ if (s->uptodate == disks-1) {
+ pr_debug("Computing stripe %llu block %d\n",
+ (unsigned long long)sh->sector, i);
+ compute_block_1(sh, i, 0);
+ s->uptodate++;
+ } else if ( s->uptodate == disks-2 && s->failed >= 2 ) {
+ /* Computing 2-failure is *very* expensive; only
+ * do it if failed >= 2
+ */
+ int other;
+ for (other = disks; other--; ) {
+ if (other == i)
+ continue;
+ if (!test_bit(R5_UPTODATE,
+ &sh->dev[other].flags))
+ break;
+ }
+ BUG_ON(other < 0);
+ pr_debug("Computing stripe %llu blocks %d,%d\n",
+ (unsigned long long)sh->sector,
+ i, other);
+ compute_block_2(sh, i, other);
+ s->uptodate += 2;
+ } else if (test_bit(R5_Insync, &dev->flags)) {
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ s->locked++;
+ pr_debug("Reading block %d (sync=%d)\n",
+ i, s->syncing);
+ }
+ }
+ }
+ set_bit(STRIPE_HANDLE, &sh->state);
+}
+
+
+/* handle_completed_write_requests
+ * any written block on an uptodate or failed drive can be returned.
+ * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
+ * never LOCKED, so we don't need to test 'failed' directly.
+ */
+static void handle_completed_write_requests(raid5_conf_t *conf,
+ struct stripe_head *sh, int disks, struct bio **return_bi)
+{
+ int i;
+ struct r5dev *dev;
+
+ for (i = disks; i--; )
+ if (sh->dev[i].written) {
+ dev = &sh->dev[i];
+ if (!test_bit(R5_LOCKED, &dev->flags) &&
+ test_bit(R5_UPTODATE, &dev->flags)) {
+ /* We can return any write requests */
+ struct bio *wbi, *wbi2;
+ int bitmap_end = 0;
+ pr_debug("Return write for disc %d\n", i);
+ spin_lock_irq(&conf->device_lock);
+ wbi = dev->written;
+ dev->written = NULL;
+ while (wbi && wbi->bi_sector <
+ dev->sector + STRIPE_SECTORS) {
+ wbi2 = r5_next_bio(wbi, dev->sector);
+ if (--wbi->bi_phys_segments == 0) {
+ md_write_end(conf->mddev);
+ wbi->bi_next = *return_bi;
+ *return_bi = wbi;
+ }
+ wbi = wbi2;
+ }
+ if (dev->towrite == NULL)
+ bitmap_end = 1;
+ spin_unlock_irq(&conf->device_lock);
+ if (bitmap_end)
+ bitmap_endwrite(conf->mddev->bitmap,
+ sh->sector,
+ STRIPE_SECTORS,
+ !test_bit(STRIPE_DEGRADED, &sh->state),
+ 0);
+ }
+ }
+}
+
+static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
+ struct stripe_head *sh, struct stripe_head_state *s, int disks)
+{
+ int rmw = 0, rcw = 0, i;
+ for (i = disks; i--; ) {
+ /* would I have to read this buffer for read_modify_write */
+ struct r5dev *dev = &sh->dev[i];
+ if ((dev->towrite || i == sh->pd_idx) &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ if (test_bit(R5_Insync, &dev->flags))
+ rmw++;
+ else
+ rmw += 2*disks; /* cannot read it */
+ }
+ /* Would I have to read this buffer for reconstruct_write */
+ if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ if (test_bit(R5_Insync, &dev->flags)) rcw++;
+ else
+ rcw += 2*disks;
+ }
+ }
+ pr_debug("for sector %llu, rmw=%d rcw=%d\n",
+ (unsigned long long)sh->sector, rmw, rcw);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ if (rmw < rcw && rmw > 0)
+ /* prefer read-modify-write, but need to get some data */
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if ((dev->towrite || i == sh->pd_idx) &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
+ test_bit(R5_Insync, &dev->flags)) {
+ if (
+ test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ pr_debug("Read_old block "
+ "%d for r-m-w\n", i);
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(
+ STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+ s->locked++;
+ } else {
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ }
+ }
+ if (rcw <= rmw && rcw > 0)
+ /* want reconstruct write, but need to get some data */
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
+ i != sh->pd_idx &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ !(test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Wantcompute, &dev->flags)) &&
+ test_bit(R5_Insync, &dev->flags)) {
+ if (
+ test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ pr_debug("Read_old block "
+ "%d for Reconstruct\n", i);
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(
+ STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+ s->locked++;
+ } else {
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ }
+ }
+ /* now if nothing is locked, and if we have enough data,
+ * we can start a write request
+ */
+ /* since handle_stripe can be called at any time we need to handle the
+ * case where a compute block operation has been submitted and then a
+ * subsequent call wants to start a write request. raid5_run_ops only
+ * handles the case where compute block and postxor are requested
+ * simultaneously. If this is not the case then new writes need to be
+ * held off until the compute completes.
+ */
+ if ((s->req_compute ||
+ !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) &&
+ (s->locked == 0 && (rcw == 0 || rmw == 0) &&
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+ s->locked += handle_write_operations5(sh, rcw == 0, 0);
+}
+
+static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
+ struct stripe_head *sh, struct stripe_head_state *s,
+ struct r6_state *r6s, int disks)
+{
+ int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i;
+ int qd_idx = r6s->qd_idx;
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ /* Would I have to read this buffer for reconstruct_write */
+ if (!test_bit(R5_OVERWRITE, &dev->flags)
+ && i != pd_idx && i != qd_idx
+ && (!test_bit(R5_LOCKED, &dev->flags)
+ ) &&
+ !test_bit(R5_UPTODATE, &dev->flags)) {
+ if (test_bit(R5_Insync, &dev->flags)) rcw++;
+ else {
+ pr_debug("raid6: must_compute: "
+ "disk %d flags=%#lx\n", i, dev->flags);
+ must_compute++;
+ }
+ }
+ }
+ pr_debug("for sector %llu, rcw=%d, must_compute=%d\n",
+ (unsigned long long)sh->sector, rcw, must_compute);
+ set_bit(STRIPE_HANDLE, &sh->state);
+
+ if (rcw > 0)
+ /* want reconstruct write, but need to get some data */
+ for (i = disks; i--; ) {
+ struct r5dev *dev = &sh->dev[i];
+ if (!test_bit(R5_OVERWRITE, &dev->flags)
+ && !(s->failed == 0 && (i == pd_idx || i == qd_idx))
+ && !test_bit(R5_LOCKED, &dev->flags) &&
+ !test_bit(R5_UPTODATE, &dev->flags) &&
+ test_bit(R5_Insync, &dev->flags)) {
+ if (
+ test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ pr_debug("Read_old stripe %llu "
+ "block %d for Reconstruct\n",
+ (unsigned long long)sh->sector, i);
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantread, &dev->flags);
+ s->locked++;
+ } else {
+ pr_debug("Request delayed stripe %llu "
+ "block %d for Reconstruct\n",
+ (unsigned long long)sh->sector, i);
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ }
+ }
+ }
+ /* now if nothing is locked, and if we have enough data, we can start a
+ * write request
+ */
+ if (s->locked == 0 && rcw == 0 &&
+ !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
+ if (must_compute > 0) {
+ /* We have failed blocks and need to compute them */
+ switch (s->failed) {
+ case 0:
+ BUG();
+ case 1:
+ compute_block_1(sh, r6s->failed_num[0], 0);
+ break;
+ case 2:
+ compute_block_2(sh, r6s->failed_num[0],
+ r6s->failed_num[1]);
+ break;
+ default: /* This request should have been failed? */
+ BUG();
+ }
+ }
+
+ pr_debug("Computing parity for stripe %llu\n",
+ (unsigned long long)sh->sector);
+ compute_parity6(sh, RECONSTRUCT_WRITE);
+ /* now every locked buffer is ready to be written */
+ for (i = disks; i--; )
+ if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
+ pr_debug("Writing stripe %llu block %d\n",
+ (unsigned long long)sh->sector, i);
+ s->locked++;
+ set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ }
+ /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
+ set_bit(STRIPE_INSYNC, &sh->state);
+
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
+ }
+}
+
+static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
+ struct stripe_head_state *s, int disks)
+{
+ set_bit(STRIPE_HANDLE, &sh->state);
+ /* Take one of the following actions:
+ * 1/ start a check parity operation if (uptodate == disks)
+ * 2/ finish a check parity operation and act on the result
+ * 3/ skip to the writeback section if we previously
+ * initiated a recovery operation
+ */
+ if (s->failed == 0 &&
+ !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+ if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) {
+ BUG_ON(s->uptodate != disks);
+ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
+ sh->ops.count++;
+ s->uptodate--;
+ } else if (
+ test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) {
+ clear_bit(STRIPE_OP_CHECK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_CHECK, &sh->ops.pending);
+
+ if (sh->ops.zero_sum_result == 0)
+ /* parity is correct (on disc,
+ * not in buffer any more)
+ */
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ conf->mddev->resync_mismatches +=
+ STRIPE_SECTORS;
+ if (test_bit(
+ MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ set_bit(STRIPE_OP_COMPUTE_BLK,
+ &sh->ops.pending);
+ set_bit(STRIPE_OP_MOD_REPAIR_PD,
+ &sh->ops.pending);
+ set_bit(R5_Wantcompute,
+ &sh->dev[sh->pd_idx].flags);
+ sh->ops.target = sh->pd_idx;
+ sh->ops.count++;
+ s->uptodate++;
+ }
+ }
+ }
+ }
+
+ /* check if we can clear a parity disk reconstruct */
+ if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) &&
+ test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) {
+
+ clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack);
+ clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending);
+ }
+
+ /* Wait for check parity and compute block operations to complete
+ * before write-back
+ */
+ if (!test_bit(STRIPE_INSYNC, &sh->state) &&
+ !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) {
+ struct r5dev *dev;
+ /* either failed parity check, or recovery is happening */
+ if (s->failed == 0)
+ s->failed_num = sh->pd_idx;
+ dev = &sh->dev[s->failed_num];
+ BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
+ BUG_ON(s->uptodate != disks);
+
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+
+ clear_bit(STRIPE_DEGRADED, &sh->state);
+ s->locked++;
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+}
+
+
+static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
+ struct stripe_head_state *s,
+ struct r6_state *r6s, struct page *tmp_page,
+ int disks)
+{
+ int update_p = 0, update_q = 0;
+ struct r5dev *dev;
+ int pd_idx = sh->pd_idx;
+ int qd_idx = r6s->qd_idx;
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+
+ BUG_ON(s->failed > 2);
+ BUG_ON(s->uptodate < disks);
+ /* Want to check and possibly repair P and Q.
+ * However there could be one 'failed' device, in which
+ * case we can only check one of them, possibly using the
+ * other to generate missing data
+ */
+
+ /* If !tmp_page, we cannot do the calculations,
+ * but as we have set STRIPE_HANDLE, we will soon be called
+ * by stripe_handle with a tmp_page - just wait until then.
+ */
+ if (tmp_page) {
+ if (s->failed == r6s->q_failed) {
+ /* The only possible failed device holds 'Q', so it
+ * makes sense to check P (If anything else were failed,
+ * we would have used P to recreate it).
+ */
+ compute_block_1(sh, pd_idx, 1);
+ if (!page_is_zero(sh->dev[pd_idx].page)) {
+ compute_block_1(sh, pd_idx, 0);
+ update_p = 1;
+ }
+ }
+ if (!r6s->q_failed && s->failed < 2) {
+ /* q is not failed, and we didn't use it to generate
+ * anything, so it makes sense to check it
+ */
+ memcpy(page_address(tmp_page),
+ page_address(sh->dev[qd_idx].page),
+ STRIPE_SIZE);
+ compute_parity6(sh, UPDATE_PARITY);
+ if (memcmp(page_address(tmp_page),
+ page_address(sh->dev[qd_idx].page),
+ STRIPE_SIZE) != 0) {
+ clear_bit(STRIPE_INSYNC, &sh->state);
+ update_q = 1;
+ }
+ }
+ if (update_p || update_q) {
+ conf->mddev->resync_mismatches += STRIPE_SECTORS;
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ update_p = update_q = 0;
+ }
+
+ /* now write out any block on a failed drive,
+ * or P or Q if they need it
+ */
+
+ if (s->failed == 2) {
+ dev = &sh->dev[r6s->failed_num[1]];
+ s->locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ if (s->failed >= 1) {
+ dev = &sh->dev[r6s->failed_num[0]];
+ s->locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+
+ if (update_p) {
+ dev = &sh->dev[pd_idx];
+ s->locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ if (update_q) {
+ dev = &sh->dev[qd_idx];
+ s->locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ clear_bit(STRIPE_DEGRADED, &sh->state);
+
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+}
+
+static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
+ struct r6_state *r6s)
+{
+ int i;
+
+ /* We have read all the blocks in this stripe and now we need to
+ * copy some of them into a target stripe for expand.
+ */
+ struct dma_async_tx_descriptor *tx = NULL;
+ clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ for (i = 0; i < sh->disks; i++)
+ if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) {
+ int dd_idx, pd_idx, j;
+ struct stripe_head *sh2;
+
+ sector_t bn = compute_blocknr(sh, i);
+ sector_t s = raid5_compute_sector(bn, conf->raid_disks,
+ conf->raid_disks -
+ conf->max_degraded, &dd_idx,
+ &pd_idx, conf);
+ sh2 = get_active_stripe(conf, s, conf->raid_disks,
+ pd_idx, 1);
+ if (sh2 == NULL)
+ /* so far only the early blocks of this stripe
+ * have been requested. When later blocks
+ * get requested, we will try again
+ */
+ continue;
+ if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
+ test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
+ /* must have already done this block */
+ release_stripe(sh2);
+ continue;
+ }
+
+ /* place all the copies on one channel */
+ tx = async_memcpy(sh2->dev[dd_idx].page,
+ sh->dev[i].page, 0, 0, STRIPE_SIZE,
+ ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+
+ set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
+ set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
+ for (j = 0; j < conf->raid_disks; j++)
+ if (j != sh2->pd_idx &&
+ (r6s && j != r6s->qd_idx) &&
+ !test_bit(R5_Expanded, &sh2->dev[j].flags))
+ break;
+ if (j == conf->raid_disks) {
+ set_bit(STRIPE_EXPAND_READY, &sh2->state);
+ set_bit(STRIPE_HANDLE, &sh2->state);
+ }
+ release_stripe(sh2);
+
+ /* done submitting copies, wait for them to complete */
+ if (i + 1 >= sh->disks) {
+ async_tx_ack(tx);
+ dma_wait_for_async_tx(tx);
+ }
+ }
+}
/*
* handle_stripe - do things to a stripe.
@@ -1339,81 +2603,70 @@ static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
* schedule a write of some buffers
* return confirmation of parity correctness
*
- * Parity calculations are done inside the stripe lock
* buffers are taken off read_list or write_list, and bh_cache buffers
* get BH_Lock set before the stripe lock is released.
*
*/
-
+
static void handle_stripe5(struct stripe_head *sh)
{
raid5_conf_t *conf = sh->raid_conf;
- int disks = sh->disks;
- struct bio *return_bi= NULL;
- struct bio *bi;
- int i;
- int syncing, expanding, expanded;
- int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
- int non_overwrite = 0;
- int failed_num=0;
+ int disks = sh->disks, i;
+ struct bio *return_bi = NULL;
+ struct stripe_head_state s;
struct r5dev *dev;
+ unsigned long pending = 0;
- PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
- (unsigned long long)sh->sector, atomic_read(&sh->count),
- sh->pd_idx);
+ memset(&s, 0, sizeof(s));
+ pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
+ "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state,
+ atomic_read(&sh->count), sh->pd_idx,
+ sh->ops.pending, sh->ops.ack, sh->ops.complete);
spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
- syncing = test_bit(STRIPE_SYNCING, &sh->state);
- expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */
rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
- dev = &sh->dev[i];
+ struct r5dev *dev = &sh->dev[i];
clear_bit(R5_Insync, &dev->flags);
- PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
- /* maybe we can reply to a read */
- if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
- struct bio *rbi, *rbi2;
- PRINTK("Return read for disc %d\n", i);
- spin_lock_irq(&conf->device_lock);
- rbi = dev->toread;
- dev->toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &dev->flags))
- wake_up(&conf->wait_for_overlap);
- spin_unlock_irq(&conf->device_lock);
- while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- copy_data(0, rbi, dev->page, dev->sector);
- rbi2 = r5_next_bio(rbi, dev->sector);
- spin_lock_irq(&conf->device_lock);
- if (--rbi->bi_phys_segments == 0) {
- rbi->bi_next = return_bi;
- return_bi = rbi;
- }
- spin_unlock_irq(&conf->device_lock);
- rbi = rbi2;
- }
- }
+ pr_debug("check %d: state 0x%lx toread %p read %p write %p "
+ "written %p\n", i, dev->flags, dev->toread, dev->read,
+ dev->towrite, dev->written);
- /* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
+ /* maybe we can request a biofill operation
+ *
+ * new wantfill requests are only permitted while
+ * STRIPE_OP_BIOFILL is clear
+ */
+ if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
+ !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+ set_bit(R5_Wantfill, &dev->flags);
-
- if (dev->toread) to_read++;
+ /* now count some things */
+ if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
+ if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
+ if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
+
+ if (test_bit(R5_Wantfill, &dev->flags))
+ s.to_fill++;
+ else if (dev->toread)
+ s.to_read++;
if (dev->towrite) {
- to_write++;
+ s.to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))
- non_overwrite++;
+ s.non_overwrite++;
}
- if (dev->written) written++;
+ if (dev->written)
+ s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -1422,306 +2675,131 @@ static void handle_stripe5(struct stripe_head *sh)
}
if (!rdev || !test_bit(In_sync, &rdev->flags)
|| test_bit(R5_ReadError, &dev->flags)) {
- failed++;
- failed_num = i;
+ s.failed++;
+ s.failed_num = i;
} else
set_bit(R5_Insync, &dev->flags);
}
rcu_read_unlock();
- PRINTK("locked=%d uptodate=%d to_read=%d"
+
+ if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
+ sh->ops.count++;
+
+ pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d\n",
- locked, uptodate, to_read, to_write, failed, failed_num);
+ s.locked, s.uptodate, s.to_read, s.to_write,
+ s.failed, s.failed_num);
/* check if the array has lost two devices and, if so, some requests might
* need to be failed
*/
- if (failed > 1 && to_read+to_write+written) {
- for (i=disks; i--; ) {
- int bitmap_end = 0;
-
- if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- mdk_rdev_t *rdev;
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags))
- /* multiple read failures in one stripe */
- md_error(conf->mddev, rdev);
- rcu_read_unlock();
- }
-
- spin_lock_irq(&conf->device_lock);
- /* fail all writes first */
- bi = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
- if (bi) { to_write--; bitmap_end = 1; }
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = nextbi;
- }
- /* and fail all 'written' */
- bi = sh->dev[i].written;
- sh->dev[i].written = NULL;
- if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
- struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = bi2;
- }
-
- /* fail any reads if this device is non-operational */
- if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
- test_bit(R5_ReadError, &sh->dev[i].flags)) {
- bi = sh->dev[i].toread;
- sh->dev[i].toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
- if (bi) to_read--;
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = nextbi;
- }
- }
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
- }
- }
- if (failed > 1 && syncing) {
+ if (s.failed > 1 && s.to_read+s.to_write+s.written)
+ handle_requests_to_failed_array(conf, sh, &s, disks,
+ &return_bi);
+ if (s.failed > 1 && s.syncing) {
md_done_sync(conf->mddev, STRIPE_SECTORS,0);
clear_bit(STRIPE_SYNCING, &sh->state);
- syncing = 0;
+ s.syncing = 0;
}
/* might be able to return some write requests if the parity block
* is safe, or on a failed drive
*/
dev = &sh->dev[sh->pd_idx];
- if ( written &&
- ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags))
- || (failed == 1 && failed_num == sh->pd_idx))
- ) {
- /* any written block on an uptodate or failed drive can be returned.
- * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
- * never LOCKED, so we don't need to test 'failed' directly.
- */
- for (i=disks; i--; )
- if (sh->dev[i].written) {
- dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags) ) {
- /* We can return any write requests */
- struct bio *wbi, *wbi2;
- int bitmap_end = 0;
- PRINTK("Return write for disc %d\n", i);
- spin_lock_irq(&conf->device_lock);
- wbi = dev->written;
- dev->written = NULL;
- while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- wbi2 = r5_next_bio(wbi, dev->sector);
- if (--wbi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- wbi->bi_next = return_bi;
- return_bi = wbi;
- }
- wbi = wbi2;
- }
- if (dev->towrite == NULL)
- bitmap_end = 1;
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
- !test_bit(STRIPE_DEGRADED, &sh->state), 0);
- }
- }
- }
+ if ( s.written &&
+ ((test_bit(R5_Insync, &dev->flags) &&
+ !test_bit(R5_LOCKED, &dev->flags) &&
+ test_bit(R5_UPTODATE, &dev->flags)) ||
+ (s.failed == 1 && s.failed_num == sh->pd_idx)))
+ handle_completed_write_requests(conf, sh, disks, &return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- syncing ||
- expanding ||
- (failed && (sh->dev[failed_num].toread ||
- (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
- )
- ) {
- /* we would like to get this block, possibly
- * by computing it, but we might not be able to
- */
- if (uptodate == disks-1) {
- PRINTK("Computing block %d\n", i);
- compute_block(sh, i);
- uptodate++;
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- locked++;
- PRINTK("Reading block %d (sync=%d)\n",
- i, syncing);
- }
- }
- }
- set_bit(STRIPE_HANDLE, &sh->state);
+ if (s.to_read || s.non_overwrite ||
+ (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding ||
+ test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
+ handle_issuing_new_read_requests5(sh, &s, disks);
+
+ /* Now we check to see if any write operations have recently
+ * completed
+ */
+
+ /* leave prexor set until postxor is done, allows us to distinguish
+ * a rmw from a rcw during biodrain
+ */
+ if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) &&
+ test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
+
+ clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
+ clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack);
+ clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+
+ for (i = disks; i--; )
+ clear_bit(R5_Wantprexor, &sh->dev[i].flags);
}
- /* now to consider writing and what else, if anything should be read */
- if (to_write) {
- int rmw=0, rcw=0;
- for (i=disks ; i--;) {
- /* would I have to read this buffer for read_modify_write */
+ /* if only POSTXOR is set then this is an 'expand' postxor */
+ if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) &&
+ test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) {
+
+ clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete);
+ clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack);
+ clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending);
+
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+
+ /* All the 'written' buffers and the parity block are ready to
+ * be written back to disk
+ */
+ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
+ for (i = disks; i--; ) {
dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
- (!test_bit(R5_LOCKED, &dev->flags)
- ) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
- if (test_bit(R5_Insync, &dev->flags)
-/* && !(!mddev->insync && i == sh->pd_idx) */
- )
- rmw++;
- else rmw += 2*disks; /* cannot read it */
- }
- /* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
- (!test_bit(R5_LOCKED, &dev->flags)
- ) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
- if (test_bit(R5_Insync, &dev->flags)) rcw++;
- else rcw += 2*disks;
+ if (test_bit(R5_LOCKED, &dev->flags) &&
+ (i == sh->pd_idx || dev->written)) {
+ pr_debug("Writing block %d\n", i);
+ set_bit(R5_Wantwrite, &dev->flags);
+ if (!test_and_set_bit(
+ STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
+ if (!test_bit(R5_Insync, &dev->flags) ||
+ (i == sh->pd_idx && s.failed == 0))
+ set_bit(STRIPE_INSYNC, &sh->state);
}
}
- PRINTK("for sector %llu, rmw=%d rcw=%d\n",
- (unsigned long long)sh->sector, rmw, rcw);
- set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0)
- /* prefer read-modify-write, but need to get some data */
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
- !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- test_bit(R5_Insync, &dev->flags)) {
- if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- {
- PRINTK("Read_old block %d for r-m-w\n", i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- locked++;
- } else {
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
- if (rcw <= rmw && rcw > 0)
- /* want reconstruct write, but need to get some data */
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
- !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- test_bit(R5_Insync, &dev->flags)) {
- if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- {
- PRINTK("Read_old block %d for Reconstruct\n", i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- locked++;
- } else {
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
- /* now if nothing is locked, and if we have enough data, we can start a write request */
- if (locked == 0 && (rcw == 0 ||rmw == 0) &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
- PRINTK("Computing parity...\n");
- compute_parity5(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
- /* now every locked buffer is ready to be written */
- for (i=disks; i--;)
- if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
- PRINTK("Writing block %d\n", i);
- locked++;
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- if (!test_bit(R5_Insync, &sh->dev[i].flags)
- || (i==sh->pd_idx && failed == 0))
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
}
}
- /* maybe we need to check and possibly fix the parity for this stripe
- * Any reads will already have been scheduled, so we just see if enough data
- * is available
+ /* Now to consider new write requests and what else, if anything
+ * should be read. We do not handle new writes when:
+ * 1/ A 'write' operation (copy+xor) is already in flight.
+ * 2/ A 'check' operation is in flight, as it may clobber the parity
+ * block.
*/
- if (syncing && locked == 0 &&
- !test_bit(STRIPE_INSYNC, &sh->state)) {
- set_bit(STRIPE_HANDLE, &sh->state);
- if (failed == 0) {
- BUG_ON(uptodate != disks);
- compute_parity5(sh, CHECK_PARITY);
- uptodate--;
- if (page_is_zero(sh->dev[sh->pd_idx].page)) {
- /* parity is correct (on disc, not in buffer any more) */
- set_bit(STRIPE_INSYNC, &sh->state);
- } else {
- conf->mddev->resync_mismatches += STRIPE_SECTORS;
- if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
- /* don't try to repair!! */
- set_bit(STRIPE_INSYNC, &sh->state);
- else {
- compute_block(sh, sh->pd_idx);
- uptodate++;
- }
- }
- }
- if (!test_bit(STRIPE_INSYNC, &sh->state)) {
- /* either failed parity check, or recovery is happening */
- if (failed==0)
- failed_num = sh->pd_idx;
- dev = &sh->dev[failed_num];
- BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
- BUG_ON(uptodate != disks);
+ if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) &&
+ !test_bit(STRIPE_OP_CHECK, &sh->ops.pending))
+ handle_issuing_new_write_requests5(conf, sh, &s, disks);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- clear_bit(STRIPE_DEGRADED, &sh->state);
- locked++;
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- }
- if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+ /* maybe we need to check and possibly fix the parity for this stripe
+ * Any reads will already have been scheduled, so we just see if enough
+ * data is available. The parity check is held off while parity
+ * dependent operations are in flight.
+ */
+ if ((s.syncing && s.locked == 0 &&
+ !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) &&
+ !test_bit(STRIPE_INSYNC, &sh->state)) ||
+ test_bit(STRIPE_OP_CHECK, &sh->ops.pending) ||
+ test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending))
+ handle_parity_checks5(conf, sh, &s, disks);
+
+ if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
@@ -1729,186 +2807,102 @@ static void handle_stripe5(struct stripe_head *sh)
/* If the failed drive is just a ReadError, then we might need to progress
* the repair/check process
*/
- if (failed == 1 && ! conf->mddev->ro &&
- test_bit(R5_ReadError, &sh->dev[failed_num].flags)
- && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
- && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
+ if (s.failed == 1 && !conf->mddev->ro &&
+ test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
+ && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
+ && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
) {
- dev = &sh->dev[failed_num];
+ dev = &sh->dev[s.failed_num];
if (!test_bit(R5_ReWrite, &dev->flags)) {
set_bit(R5_Wantwrite, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
set_bit(R5_ReWrite, &dev->flags);
set_bit(R5_LOCKED, &dev->flags);
- locked++;
+ s.locked++;
} else {
/* let's read it back */
set_bit(R5_Wantread, &dev->flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
set_bit(R5_LOCKED, &dev->flags);
- locked++;
+ s.locked++;
}
}
- if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
- /* Need to write out all blocks after computing parity */
- sh->disks = conf->raid_disks;
- sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
- compute_parity5(sh, RECONSTRUCT_WRITE);
- for (i= conf->raid_disks; i--;) {
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- locked++;
+ /* Finish postxor operations initiated by the expansion
+ * process
+ */
+ if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) &&
+ !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) {
+
+ clear_bit(STRIPE_EXPANDING, &sh->state);
+
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending);
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack);
+ clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
+
+ for (i = conf->raid_disks; i--; ) {
set_bit(R5_Wantwrite, &sh->dev[i].flags);
+ if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending))
+ sh->ops.count++;
}
- clear_bit(STRIPE_EXPANDING, &sh->state);
- } else if (expanded) {
+ }
+
+ if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
+ !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
+ /* Need to write out all blocks after computing parity */
+ sh->disks = conf->raid_disks;
+ sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
+ conf->raid_disks);
+ s.locked += handle_write_operations5(sh, 0, 1);
+ } else if (s.expanded &&
+ !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
}
- if (expanding && locked == 0) {
- /* We have read all the blocks in this stripe and now we need to
- * copy some of them into a target stripe for expand.
- */
- clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- for (i=0; i< sh->disks; i++)
- if (i != sh->pd_idx) {
- int dd_idx, pd_idx, j;
- struct stripe_head *sh2;
-
- sector_t bn = compute_blocknr(sh, i);
- sector_t s = raid5_compute_sector(bn, conf->raid_disks,
- conf->raid_disks-1,
- &dd_idx, &pd_idx, conf);
- sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
- if (sh2 == NULL)
- /* so far only the early blocks of this stripe
- * have been requested. When later blocks
- * get requested, we will try again
- */
- continue;
- if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
- test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
- /* must have already done this block */
- release_stripe(sh2);
- continue;
- }
- memcpy(page_address(sh2->dev[dd_idx].page),
- page_address(sh->dev[i].page),
- STRIPE_SIZE);
- set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
- set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
- for (j=0; j<conf->raid_disks; j++)
- if (j != sh2->pd_idx &&
- !test_bit(R5_Expanded, &sh2->dev[j].flags))
- break;
- if (j == conf->raid_disks) {
- set_bit(STRIPE_EXPAND_READY, &sh2->state);
- set_bit(STRIPE_HANDLE, &sh2->state);
- }
- release_stripe(sh2);
- }
- }
+ if (s.expanding && s.locked == 0)
+ handle_stripe_expansion(conf, sh, NULL);
+
+ if (sh->ops.count)
+ pending = get_stripe_work(sh);
spin_unlock(&sh->lock);
- while ((bi=return_bi)) {
- int bytes = bi->bi_size;
+ if (pending)
+ raid5_run_ops(sh, pending);
- return_bi = bi->bi_next;
- bi->bi_next = NULL;
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
- test_bit(BIO_UPTODATE, &bi->bi_flags)
- ? 0 : -EIO);
- }
- for (i=disks; i-- ;) {
- int rw;
- struct bio *bi;
- mdk_rdev_t *rdev;
- if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
- rw = WRITE;
- else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = READ;
- else
- continue;
-
- bi = &sh->dev[i].req;
-
- bi->bi_rw = rw;
- if (rw == WRITE)
- bi->bi_end_io = raid5_end_write_request;
- else
- bi->bi_end_io = raid5_end_read_request;
-
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(Faulty, &rdev->flags))
- rdev = NULL;
- if (rdev)
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- if (rdev) {
- if (syncing || expanding || expanded)
- md_sync_acct(rdev->bdev, STRIPE_SECTORS);
+ return_io(return_bi);
- bi->bi_bdev = rdev->bdev;
- PRINTK("for %llu schedule op %ld on disc %d\n",
- (unsigned long long)sh->sector, bi->bi_rw, i);
- atomic_inc(&sh->count);
- bi->bi_sector = sh->sector + rdev->data_offset;
- bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
- bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
- bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
- bi->bi_io_vec[0].bv_offset = 0;
- bi->bi_size = STRIPE_SIZE;
- bi->bi_next = NULL;
- if (rw == WRITE &&
- test_bit(R5_ReWrite, &sh->dev[i].flags))
- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
- generic_make_request(bi);
- } else {
- if (rw == WRITE)
- set_bit(STRIPE_DEGRADED, &sh->state);
- PRINTK("skip op %ld on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
}
static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
{
raid6_conf_t *conf = sh->raid_conf;
int disks = sh->disks;
- struct bio *return_bi= NULL;
- struct bio *bi;
- int i;
- int syncing, expanding, expanded;
- int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
- int non_overwrite = 0;
- int failed_num[2] = {0, 0};
+ struct bio *return_bi = NULL;
+ int i, pd_idx = sh->pd_idx;
+ struct stripe_head_state s;
+ struct r6_state r6s;
struct r5dev *dev, *pdev, *qdev;
- int pd_idx = sh->pd_idx;
- int qd_idx = raid6_next_disk(pd_idx, disks);
- int p_failed, q_failed;
- PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
- (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
- pd_idx, qd_idx);
+ r6s.qd_idx = raid6_next_disk(pd_idx, disks);
+ pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
+ "pd_idx=%d, qd_idx=%d\n",
+ (unsigned long long)sh->sector, sh->state,
+ atomic_read(&sh->count), pd_idx, r6s.qd_idx);
+ memset(&s, 0, sizeof(s));
spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
- syncing = test_bit(STRIPE_SYNCING, &sh->state);
- expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
/* Now to look around and see what can be done */
rcu_read_lock();
@@ -1917,12 +2911,12 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
dev = &sh->dev[i];
clear_bit(R5_Insync, &dev->flags);
- PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
+ pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
i, dev->flags, dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read */
if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
struct bio *rbi, *rbi2;
- PRINTK("Return read for disc %d\n", i);
+ pr_debug("Return read for disc %d\n", i);
spin_lock_irq(&conf->device_lock);
rbi = dev->toread;
dev->toread = NULL;
@@ -1943,17 +2937,19 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
}
/* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
+ if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
+ if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
- if (dev->toread) to_read++;
+ if (dev->toread)
+ s.to_read++;
if (dev->towrite) {
- to_write++;
+ s.to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))
- non_overwrite++;
+ s.non_overwrite++;
}
- if (dev->written) written++;
+ if (dev->written)
+ s.written++;
rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
/* The ReadError flag will just be confusing now */
@@ -1962,96 +2958,27 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
}
if (!rdev || !test_bit(In_sync, &rdev->flags)
|| test_bit(R5_ReadError, &dev->flags)) {
- if ( failed < 2 )
- failed_num[failed] = i;
- failed++;
+ if (s.failed < 2)
+ r6s.failed_num[s.failed] = i;
+ s.failed++;
} else
set_bit(R5_Insync, &dev->flags);
}
rcu_read_unlock();
- PRINTK("locked=%d uptodate=%d to_read=%d"
+ pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
- locked, uptodate, to_read, to_write, failed,
- failed_num[0], failed_num[1]);
- /* check if the array has lost >2 devices and, if so, some requests might
- * need to be failed
+ s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
+ r6s.failed_num[0], r6s.failed_num[1]);
+ /* check if the array has lost >2 devices and, if so, some requests
+ * might need to be failed
*/
- if (failed > 2 && to_read+to_write+written) {
- for (i=disks; i--; ) {
- int bitmap_end = 0;
-
- if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- mdk_rdev_t *rdev;
- rcu_read_lock();
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags))
- /* multiple read failures in one stripe */
- md_error(conf->mddev, rdev);
- rcu_read_unlock();
- }
-
- spin_lock_irq(&conf->device_lock);
- /* fail all writes first */
- bi = sh->dev[i].towrite;
- sh->dev[i].towrite = NULL;
- if (bi) { to_write--; bitmap_end = 1; }
-
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
-
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = nextbi;
- }
- /* and fail all 'written' */
- bi = sh->dev[i].written;
- sh->dev[i].written = NULL;
- if (bi) bitmap_end = 1;
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
- struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = bi2;
- }
-
- /* fail any reads if this device is non-operational */
- if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
- test_bit(R5_ReadError, &sh->dev[i].flags)) {
- bi = sh->dev[i].toread;
- sh->dev[i].toread = NULL;
- if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
- wake_up(&conf->wait_for_overlap);
- if (bi) to_read--;
- while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
- struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (--bi->bi_phys_segments == 0) {
- bi->bi_next = return_bi;
- return_bi = bi;
- }
- bi = nextbi;
- }
- }
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0, 0);
- }
- }
- if (failed > 2 && syncing) {
+ if (s.failed > 2 && s.to_read+s.to_write+s.written)
+ handle_requests_to_failed_array(conf, sh, &s, disks,
+ &return_bi);
+ if (s.failed > 2 && s.syncing) {
md_done_sync(conf->mddev, STRIPE_SECTORS,0);
clear_bit(STRIPE_SYNCING, &sh->state);
- syncing = 0;
+ s.syncing = 0;
}
/*
@@ -2059,279 +2986,41 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
* are safe, or on a failed drive
*/
pdev = &sh->dev[pd_idx];
- p_failed = (failed >= 1 && failed_num[0] == pd_idx)
- || (failed >= 2 && failed_num[1] == pd_idx);
- qdev = &sh->dev[qd_idx];
- q_failed = (failed >= 1 && failed_num[0] == qd_idx)
- || (failed >= 2 && failed_num[1] == qd_idx);
-
- if ( written &&
- ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
+ || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
+ qdev = &sh->dev[r6s.qd_idx];
+ r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx)
+ || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx);
+
+ if ( s.written &&
+ ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
&& !test_bit(R5_LOCKED, &pdev->flags)
- && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
- ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ && test_bit(R5_UPTODATE, &pdev->flags)))) &&
+ ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
&& !test_bit(R5_LOCKED, &qdev->flags)
- && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
- /* any written block on an uptodate or failed drive can be
- * returned. Note that if we 'wrote' to a failed drive,
- * it will be UPTODATE, but never LOCKED, so we don't need
- * to test 'failed' directly.
- */
- for (i=disks; i--; )
- if (sh->dev[i].written) {
- dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags) ) {
- /* We can return any write requests */
- int bitmap_end = 0;
- struct bio *wbi, *wbi2;
- PRINTK("Return write for stripe %llu disc %d\n",
- (unsigned long long)sh->sector, i);
- spin_lock_irq(&conf->device_lock);
- wbi = dev->written;
- dev->written = NULL;
- while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
- wbi2 = r5_next_bio(wbi, dev->sector);
- if (--wbi->bi_phys_segments == 0) {
- md_write_end(conf->mddev);
- wbi->bi_next = return_bi;
- return_bi = wbi;
- }
- wbi = wbi2;
- }
- if (dev->towrite == NULL)
- bitmap_end = 1;
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS,
- !test_bit(STRIPE_DEGRADED, &sh->state), 0);
- }
- }
- }
+ && test_bit(R5_UPTODATE, &qdev->flags)))))
+ handle_completed_write_requests(conf, sh, disks, &return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (to_read || non_overwrite || (to_write && failed) ||
- (syncing && (uptodate < disks)) || expanding) {
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- syncing ||
- expanding ||
- (failed >= 1 && (sh->dev[failed_num[0]].toread || to_write)) ||
- (failed >= 2 && (sh->dev[failed_num[1]].toread || to_write))
- )
- ) {
- /* we would like to get this block, possibly
- * by computing it, but we might not be able to
- */
- if (uptodate == disks-1) {
- PRINTK("Computing stripe %llu block %d\n",
- (unsigned long long)sh->sector, i);
- compute_block_1(sh, i, 0);
- uptodate++;
- } else if ( uptodate == disks-2 && failed >= 2 ) {
- /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
- int other;
- for (other=disks; other--;) {
- if ( other == i )
- continue;
- if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
- break;
- }
- BUG_ON(other < 0);
- PRINTK("Computing stripe %llu blocks %d,%d\n",
- (unsigned long long)sh->sector, i, other);
- compute_block_2(sh, i, other);
- uptodate += 2;
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- locked++;
- PRINTK("Reading block %d (sync=%d)\n",
- i, syncing);
- }
- }
- }
- set_bit(STRIPE_HANDLE, &sh->state);
- }
+ if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
+ (s.syncing && (s.uptodate < disks)) || s.expanding)
+ handle_issuing_new_read_requests6(sh, &s, &r6s, disks);
/* now to consider writing and what else, if anything should be read */
- if (to_write) {
- int rcw=0, must_compute=0;
- for (i=disks ; i--;) {
- dev = &sh->dev[i];
- /* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags)
- && i != pd_idx && i != qd_idx
- && (!test_bit(R5_LOCKED, &dev->flags)
- ) &&
- !test_bit(R5_UPTODATE, &dev->flags)) {
- if (test_bit(R5_Insync, &dev->flags)) rcw++;
- else {
- PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
- must_compute++;
- }
- }
- }
- PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
- (unsigned long long)sh->sector, rcw, must_compute);
- set_bit(STRIPE_HANDLE, &sh->state);
-
- if (rcw > 0)
- /* want reconstruct write, but need to get some data */
- for (i=disks; i--;) {
- dev = &sh->dev[i];
- if (!test_bit(R5_OVERWRITE, &dev->flags)
- && !(failed == 0 && (i == pd_idx || i == qd_idx))
- && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
- test_bit(R5_Insync, &dev->flags)) {
- if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- {
- PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- locked++;
- } else {
- PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
- /* now if nothing is locked, and if we have enough data, we can start a write request */
- if (locked == 0 && rcw == 0 &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
- if ( must_compute > 0 ) {
- /* We have failed blocks and need to compute them */
- switch ( failed ) {
- case 0: BUG();
- case 1: compute_block_1(sh, failed_num[0], 0); break;
- case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
- default: BUG(); /* This request should have been failed? */
- }
- }
-
- PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
- compute_parity6(sh, RECONSTRUCT_WRITE);
- /* now every locked buffer is ready to be written */
- for (i=disks; i--;)
- if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
- PRINTK("Writing stripe %llu block %d\n",
- (unsigned long long)sh->sector, i);
- locked++;
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- }
- /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
- set_bit(STRIPE_INSYNC, &sh->state);
-
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
- }
- }
+ if (s.to_write)
+ handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
- * Any reads will already have been scheduled, so we just see if enough data
- * is available
+ * Any reads will already have been scheduled, so we just see if enough
+ * data is available
*/
- if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
- int update_p = 0, update_q = 0;
- struct r5dev *dev;
-
- set_bit(STRIPE_HANDLE, &sh->state);
-
- BUG_ON(failed>2);
- BUG_ON(uptodate < disks);
- /* Want to check and possibly repair P and Q.
- * However there could be one 'failed' device, in which
- * case we can only check one of them, possibly using the
- * other to generate missing data
- */
-
- /* If !tmp_page, we cannot do the calculations,
- * but as we have set STRIPE_HANDLE, we will soon be called
- * by stripe_handle with a tmp_page - just wait until then.
- */
- if (tmp_page) {
- if (failed == q_failed) {
- /* The only possible failed device holds 'Q', so it makes
- * sense to check P (If anything else were failed, we would
- * have used P to recreate it).
- */
- compute_block_1(sh, pd_idx, 1);
- if (!page_is_zero(sh->dev[pd_idx].page)) {
- compute_block_1(sh,pd_idx,0);
- update_p = 1;
- }
- }
- if (!q_failed && failed < 2) {
- /* q is not failed, and we didn't use it to generate
- * anything, so it makes sense to check it
- */
- memcpy(page_address(tmp_page),
- page_address(sh->dev[qd_idx].page),
- STRIPE_SIZE);
- compute_parity6(sh, UPDATE_PARITY);
- if (memcmp(page_address(tmp_page),
- page_address(sh->dev[qd_idx].page),
- STRIPE_SIZE)!= 0) {
- clear_bit(STRIPE_INSYNC, &sh->state);
- update_q = 1;
- }
- }
- if (update_p || update_q) {
- conf->mddev->resync_mismatches += STRIPE_SECTORS;
- if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
- /* don't try to repair!! */
- update_p = update_q = 0;
- }
-
- /* now write out any block on a failed drive,
- * or P or Q if they need it
- */
+ if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state))
+ handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks);
- if (failed == 2) {
- dev = &sh->dev[failed_num[1]];
- locked++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
- if (failed >= 1) {
- dev = &sh->dev[failed_num[0]];
- locked++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
-
- if (update_p) {
- dev = &sh->dev[pd_idx];
- locked ++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
- if (update_q) {
- dev = &sh->dev[qd_idx];
- locked++;
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantwrite, &dev->flags);
- }
- clear_bit(STRIPE_DEGRADED, &sh->state);
-
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- }
-
- if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+ if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
@@ -2339,9 +3028,9 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
/* If the failed drives are just a ReadError, then we might need
* to progress the repair/check process
*/
- if (failed <= 2 && ! conf->mddev->ro)
- for (i=0; i<failed;i++) {
- dev = &sh->dev[failed_num[i]];
+ if (s.failed <= 2 && !conf->mddev->ro)
+ for (i = 0; i < s.failed; i++) {
+ dev = &sh->dev[r6s.failed_num[i]];
if (test_bit(R5_ReadError, &dev->flags)
&& !test_bit(R5_LOCKED, &dev->flags)
&& test_bit(R5_UPTODATE, &dev->flags)
@@ -2358,7 +3047,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
}
}
- if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
+ if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
/* Need to write out all blocks after computing P&Q */
sh->disks = conf->raid_disks;
sh->pd_idx = stripe_to_pdidx(sh->sector, conf,
@@ -2366,82 +3055,24 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
compute_parity6(sh, RECONSTRUCT_WRITE);
for (i = conf->raid_disks ; i-- ; ) {
set_bit(R5_LOCKED, &sh->dev[i].flags);
- locked++;
+ s.locked++;
set_bit(R5_Wantwrite, &sh->dev[i].flags);
}
clear_bit(STRIPE_EXPANDING, &sh->state);
- } else if (expanded) {
+ } else if (s.expanded) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
}
- if (expanding && locked == 0) {
- /* We have read all the blocks in this stripe and now we need to
- * copy some of them into a target stripe for expand.
- */
- clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- for (i = 0; i < sh->disks ; i++)
- if (i != pd_idx && i != qd_idx) {
- int dd_idx2, pd_idx2, j;
- struct stripe_head *sh2;
-
- sector_t bn = compute_blocknr(sh, i);
- sector_t s = raid5_compute_sector(
- bn, conf->raid_disks,
- conf->raid_disks - conf->max_degraded,
- &dd_idx2, &pd_idx2, conf);
- sh2 = get_active_stripe(conf, s,
- conf->raid_disks,
- pd_idx2, 1);
- if (sh2 == NULL)
- /* so for only the early blocks of
- * this stripe have been requests.
- * When later blocks get requests, we
- * will try again
- */
- continue;
- if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
- test_bit(R5_Expanded,
- &sh2->dev[dd_idx2].flags)) {
- /* must have already done this block */
- release_stripe(sh2);
- continue;
- }
- memcpy(page_address(sh2->dev[dd_idx2].page),
- page_address(sh->dev[i].page),
- STRIPE_SIZE);
- set_bit(R5_Expanded, &sh2->dev[dd_idx2].flags);
- set_bit(R5_UPTODATE, &sh2->dev[dd_idx2].flags);
- for (j = 0 ; j < conf->raid_disks ; j++)
- if (j != sh2->pd_idx &&
- j != raid6_next_disk(sh2->pd_idx,
- sh2->disks) &&
- !test_bit(R5_Expanded,
- &sh2->dev[j].flags))
- break;
- if (j == conf->raid_disks) {
- set_bit(STRIPE_EXPAND_READY,
- &sh2->state);
- set_bit(STRIPE_HANDLE, &sh2->state);
- }
- release_stripe(sh2);
- }
- }
+ if (s.expanding && s.locked == 0)
+ handle_stripe_expansion(conf, sh, &r6s);
spin_unlock(&sh->lock);
- while ((bi=return_bi)) {
- int bytes = bi->bi_size;
+ return_io(return_bi);
- return_bi = bi->bi_next;
- bi->bi_next = NULL;
- bi->bi_size = 0;
- bi->bi_end_io(bi, bytes,
- test_bit(BIO_UPTODATE, &bi->bi_flags)
- ? 0 : -EIO);
- }
for (i=disks; i-- ;) {
int rw;
struct bio *bi;
@@ -2470,11 +3101,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
rcu_read_unlock();
if (rdev) {
- if (syncing || expanding || expanded)
+ if (s.syncing || s.expanding || s.expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev;
- PRINTK("for %llu schedule op %ld on disc %d\n",
+ pr_debug("for %llu schedule op %ld on disc %d\n",
(unsigned long long)sh->sector, bi->bi_rw, i);
atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset;
@@ -2494,7 +3125,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
} else {
if (rw == WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
- PRINTK("skip op %ld on disc %d for sector %llu\n",
+ pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -2738,7 +3369,7 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
}
- PRINTK("raid5_align_endio : io error...handing IO for a retry\n");
+ pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
return 0;
@@ -2776,7 +3407,7 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
mdk_rdev_t *rdev;
if (!in_chunk_boundary(mddev, raid_bio)) {
- PRINTK("chunk_aligned_read : non aligned\n");
+ pr_debug("chunk_aligned_read : non aligned\n");
return 0;
}
/*
@@ -2900,7 +3531,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
new_sector = raid5_compute_sector(logical_sector, disks, data_disks,
&dd_idx, &pd_idx, conf);
- PRINTK("raid5: make_request, sector %llu logical %llu\n",
+ pr_debug("raid5: make_request, sector %llu logical %llu\n",
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
@@ -3273,7 +3904,7 @@ static void raid5d (mddev_t *mddev)
raid5_conf_t *conf = mddev_to_conf(mddev);
int handled;
- PRINTK("+++ raid5d active\n");
+ pr_debug("+++ raid5d active\n");
md_check_recovery(mddev);
@@ -3308,8 +3939,10 @@ static void raid5d (mddev_t *mddev)
handled++;
}
- if (list_empty(&conf->handle_list))
+ if (list_empty(&conf->handle_list)) {
+ async_tx_issue_pending_all();
break;
+ }
first = conf->handle_list.next;
sh = list_entry(first, struct stripe_head, lru);
@@ -3325,13 +3958,13 @@ static void raid5d (mddev_t *mddev)
spin_lock_irq(&conf->device_lock);
}
- PRINTK("%d stripes handled\n", handled);
+ pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
unplug_slaves(mddev);
- PRINTK("--- raid5d inactive\n");
+ pr_debug("--- raid5d inactive\n");
}
static ssize_t
@@ -3507,7 +4140,7 @@ static int run(mddev_t *mddev)
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
- PRINTK("raid5: run(%s) called.\n", mdname(mddev));
+ pr_debug("raid5: run(%s) called.\n", mdname(mddev));
ITERATE_RDEV(mddev,rdev,tmp) {
raid_disk = rdev->raid_disk;
@@ -3690,7 +4323,7 @@ static int stop(mddev_t *mddev)
return 0;
}
-#if RAID5_DEBUG
+#ifdef DEBUG
static void print_sh (struct seq_file *seq, struct stripe_head *sh)
{
int i;
@@ -3737,7 +4370,7 @@ static void status (struct seq_file *seq, mddev_t *mddev)
conf->disks[i].rdev &&
test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
-#if RAID5_DEBUG
+#ifdef DEBUG
seq_printf (seq, "\n");
printall(seq, conf);
#endif
diff --git a/drivers/md/xor.c b/drivers/md/xor.c
deleted file mode 100644
index 324897c4be4e..000000000000
--- a/drivers/md/xor.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * xor.c : Multiple Devices driver for Linux
- *
- * Copyright (C) 1996, 1997, 1998, 1999, 2000,
- * Ingo Molnar, Matti Aarnio, Jakub Jelinek, Richard Henderson.
- *
- * Dispatch optimized RAID-5 checksumming functions.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define BH_TRACE 0
-#include <linux/module.h>
-#include <linux/raid/md.h>
-#include <linux/raid/xor.h>
-#include <asm/xor.h>
-
-/* The xor routines to use. */
-static struct xor_block_template *active_template;
-
-void
-xor_block(unsigned int count, unsigned int bytes, void **ptr)
-{
- unsigned long *p0, *p1, *p2, *p3, *p4;
-
- p0 = (unsigned long *) ptr[0];
- p1 = (unsigned long *) ptr[1];
- if (count == 2) {
- active_template->do_2(bytes, p0, p1);
- return;
- }
-
- p2 = (unsigned long *) ptr[2];
- if (count == 3) {
- active_template->do_3(bytes, p0, p1, p2);
- return;
- }
-
- p3 = (unsigned long *) ptr[3];
- if (count == 4) {
- active_template->do_4(bytes, p0, p1, p2, p3);
- return;
- }
-
- p4 = (unsigned long *) ptr[4];
- active_template->do_5(bytes, p0, p1, p2, p3, p4);
-}
-
-/* Set of all registered templates. */
-static struct xor_block_template *template_list;
-
-#define BENCH_SIZE (PAGE_SIZE)
-
-static void
-do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
-{
- int speed;
- unsigned long now;
- int i, count, max;
-
- tmpl->next = template_list;
- template_list = tmpl;
-
- /*
- * Count the number of XORs done during a whole jiffy, and use
- * this to calculate the speed of checksumming. We use a 2-page
- * allocation to have guaranteed color L1-cache layout.
- */
- max = 0;
- for (i = 0; i < 5; i++) {
- now = jiffies;
- count = 0;
- while (jiffies == now) {
- mb();
- tmpl->do_2(BENCH_SIZE, b1, b2);
- mb();
- count++;
- mb();
- }
- if (count > max)
- max = count;
- }
-
- speed = max * (HZ * BENCH_SIZE / 1024);
- tmpl->speed = speed;
-
- printk(" %-10s: %5d.%03d MB/sec\n", tmpl->name,
- speed / 1000, speed % 1000);
-}
-
-static int
-calibrate_xor_block(void)
-{
- void *b1, *b2;
- struct xor_block_template *f, *fastest;
-
- b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
- if (! b1) {
- printk("raid5: Yikes! No memory available.\n");
- return -ENOMEM;
- }
- b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
-
- /*
- * If this arch/cpu has a short-circuited selection, don't loop through all
- * the possible functions, just test the best one
- */
-
- fastest = NULL;
-
-#ifdef XOR_SELECT_TEMPLATE
- fastest = XOR_SELECT_TEMPLATE(fastest);
-#endif
-
-#define xor_speed(templ) do_xor_speed((templ), b1, b2)
-
- if (fastest) {
- printk(KERN_INFO "raid5: automatically using best checksumming function: %s\n",
- fastest->name);
- xor_speed(fastest);
- } else {
- printk(KERN_INFO "raid5: measuring checksumming speed\n");
- XOR_TRY_TEMPLATES;
- fastest = template_list;
- for (f = fastest; f; f = f->next)
- if (f->speed > fastest->speed)
- fastest = f;
- }
-
- printk("raid5: using function: %s (%d.%03d MB/sec)\n",
- fastest->name, fastest->speed / 1000, fastest->speed % 1000);
-
-#undef xor_speed
-
- free_pages((unsigned long)b1, 2);
-
- active_template = fastest;
- return 0;
-}
-
-static __exit void xor_exit(void) { }
-
-EXPORT_SYMBOL(xor_block);
-MODULE_LICENSE("GPL");
-
-module_init(calibrate_xor_block);
-module_exit(xor_exit);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 624b21cef5b3..d9d033e07e19 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,8 +80,12 @@ config VIDEO_BUF_DVB
config VIDEO_BTCX
tristate
+config VIDEO_IR_I2C
+ tristate
+
config VIDEO_IR
tristate
+ select VIDEO_IR_I2C if I2C
config VIDEO_TVEEPROM
tristate
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/common/ir-functions.c
index fcb194135627..a3292e955aaa 100644
--- a/drivers/media/common/ir-functions.c
+++ b/drivers/media/common/ir-functions.c
@@ -107,21 +107,20 @@ void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir,
}
/* -------------------------------------------------------------------------- */
-
+/* extract mask bits out of data and pack them into the result */
u32 ir_extract_bits(u32 data, u32 mask)
{
- int mbit, vbit;
- u32 value;
+ u32 vbit = 1, value = 0;
+
+ do {
+ if (mask&1) {
+ if (data&1)
+ value |= vbit;
+ vbit<<=1;
+ }
+ data>>=1;
+ } while (mask>>=1);
- value = 0;
- vbit = 0;
- for (mbit = 0; mbit < 32; mbit++) {
- if (!(mask & ((u32)1 << mbit)))
- continue;
- if (data & ((u32)1 << mbit))
- value |= (1 << vbit);
- vbit++;
- }
return value;
}
@@ -346,8 +345,8 @@ void ir_rc5_timer_end(unsigned long data)
}
/* Set/reset key-up timer */
- timeout = current_jiffies + (500 + ir->rc5_key_timeout
- * HZ) / 1000;
+ timeout = current_jiffies +
+ msecs_to_jiffies(ir->rc5_key_timeout);
mod_timer(&ir->timer_keyup, timeout);
/* Save code for repeat test */
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index ef3e54cd9407..ba6701e97671 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -27,7 +27,7 @@ static int saa7146_num;
unsigned int saa7146_debug;
-module_param(saa7146_debug, int, 0644);
+module_param(saa7146_debug, uint, 0644);
MODULE_PARM_DESC(saa7146_debug, "debug level (default: 0)");
#if 0
@@ -130,10 +130,10 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
/********************************************************************************/
/* common page table functions */
-char *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
+void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt)
{
int pages = (length+PAGE_SIZE-1)/PAGE_SIZE;
- char *mem = vmalloc_32(length);
+ void *mem = vmalloc_32(length);
int slen = 0;
if (NULL == mem)
@@ -168,7 +168,7 @@ err_null:
return NULL;
}
-void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, char *mem, struct saa7146_pgtable *pt)
+void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt)
{
pci_unmap_sg(pci, pt->slist, pt->nents, PCI_DMA_FROMDEVICE);
saa7146_pgtable_free(pci, pt);
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index e3d04a4cef4d..664280c78ff2 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -889,9 +889,9 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
DEB_EE(("VIDIOC_QUERYCAP\n"));
- strcpy(cap->driver, "saa7146 v4l2");
- strlcpy(cap->card, dev->ext->name, sizeof(cap->card));
- sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
+ strcpy((char *)cap->driver, "saa7146 v4l2");
+ strlcpy((char *)cap->card, dev->ext->name, sizeof(cap->card));
+ sprintf((char *)cap->bus_info,"PCI:%s", pci_name(dev->pci));
cap->version = SAA7146_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
@@ -968,7 +968,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
}
memset(f,0,sizeof(*f));
f->index = index;
- strlcpy(f->description,formats[index].name,sizeof(f->description));
+ strlcpy((char *)f->description,formats[index].name,sizeof(f->description));
f->pixelformat = formats[index].pixelformat;
break;
}
diff --git a/drivers/media/dvb/b2c2/Kconfig b/drivers/media/dvb/b2c2/Kconfig
index a0dcd59da76e..3197aeb61d1f 100644
--- a/drivers/media/dvb/b2c2/Kconfig
+++ b/drivers/media/dvb/b2c2/Kconfig
@@ -1,7 +1,7 @@
config DVB_B2C2_FLEXCOP
tristate "Technisat/B2C2 FlexCopII(b) and FlexCopIII adapters"
depends on DVB_CORE && I2C
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_STV0299 if !DVB_FE_CUSTOMISE
select DVB_MT352 if !DVB_FE_CUSTOMISE
select DVB_MT312 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/b2c2/Makefile b/drivers/media/dvb/b2c2/Makefile
index bff00b58bf65..e97ff60a1eff 100644
--- a/drivers/media/dvb/b2c2/Makefile
+++ b/drivers/media/dvb/b2c2/Makefile
@@ -12,4 +12,4 @@ obj-$(CONFIG_DVB_B2C2_FLEXCOP_PCI) += b2c2-flexcop-pci.o
b2c2-flexcop-usb-objs = flexcop-usb.o
obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index b02c2fd65baa..0378fd646591 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -500,13 +500,13 @@ int flexcop_frontend_init(struct flexcop_device *fc)
/* try the air atsc 2nd generation (nxt2002) */
if ((fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, &fc->i2c_adap)) != NULL) {
fc->dev_type = FC_AIR_ATSC2;
- dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, &dvb_pll_samsung_tbmv);
+ dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV);
info("found the nxt2002 at i2c address: 0x%02x",samsung_tbmv_config.demod_address);
} else
/* try the air atsc 3nd generation (lgdt3303) */
if ((fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, &fc->i2c_adap)) != NULL) {
fc->dev_type = FC_AIR_ATSC3;
- dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->i2c_adap, &dvb_pll_lg_tdvs_h06xf);
+ dvb_attach(dvb_pll_attach, fc->fe, 0x61, &fc->i2c_adap, DVB_PLL_LG_TDVS_H06XF);
info("found the lgdt3303 at i2c address: 0x%02x",air2pc_atsc_hd5000_config.demod_address);
} else
/* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index cfd6fb729a61..ea666174e988 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -7,7 +7,7 @@ config DVB_BT8XX
select DVB_CX24110 if !DVB_FE_CUSTOMISE
select DVB_OR51211 if !DVB_FE_CUSTOMISE
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_ZL10353 if !DVB_FE_CUSTOMISE
select FW_LOADER
help
diff --git a/drivers/media/dvb/bt8xx/Makefile b/drivers/media/dvb/bt8xx/Makefile
index 9d197efb481d..84cf70504d17 100644
--- a/drivers/media/dvb/bt8xx/Makefile
+++ b/drivers/media/dvb/bt8xx/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/video/bt8xx -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/video/bt8xx -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index e908e3cf1e50..b7a17e69ca4d 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -1652,7 +1652,7 @@ static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_paramet
static int dst_tune_frontend(struct dvb_frontend* fe,
struct dvb_frontend_parameters* p,
unsigned int mode_flags,
- int *delay,
+ unsigned int *delay,
fe_status_t *status)
{
struct dst_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 4f1c09bee538..67613eb6fa3d 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -611,7 +611,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
card->fe = dvb_attach(lgdt330x_attach, &tdvs_tua6034_config, card->i2c_adapter);
if (card->fe != NULL) {
dvb_attach(dvb_pll_attach, card->fe, 0x61,
- card->i2c_adapter, &dvb_pll_lg_tdvs_h06xf);
+ card->i2c_adapter, DVB_PLL_LG_TDVS_H06XF);
dprintk ("dvb_bt8xx: lgdt330x detected\n");
}
break;
@@ -692,6 +692,9 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
case BTTV_BOARD_PC_HDTV:
card->fe = dvb_attach(or51211_attach, &or51211_config, card->i2c_adapter);
+ if (card->fe != NULL)
+ dvb_attach(dvb_pll_attach, card->fe, 0x61,
+ card->i2c_adapter, DVB_PLL_FCV1236D);
break;
}
diff --git a/drivers/media/dvb/cinergyT2/Makefile b/drivers/media/dvb/cinergyT2/Makefile
index c51aece20f9f..d762d8cb0cf1 100644
--- a/drivers/media/dvb/cinergyT2/Makefile
+++ b/drivers/media/dvb/cinergyT2/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_CINERGYT2) += cinergyT2.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index b40af48a2edb..28929b618e20 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -829,7 +829,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
input_dev->id.vendor = cinergyt2->udev->descriptor.idVendor;
input_dev->id.product = cinergyt2->udev->descriptor.idProduct;
input_dev->id.version = 1;
- input_dev->cdev.dev = &cinergyt2->udev->dev;
+ input_dev->dev.parent = &cinergyt2->udev->dev;
err = input_register_device(input_dev);
if (err) {
@@ -905,12 +905,11 @@ static int cinergyt2_probe (struct usb_interface *intf,
struct cinergyt2 *cinergyt2;
int err;
- if (!(cinergyt2 = kmalloc (sizeof(struct cinergyt2), GFP_KERNEL))) {
+ if (!(cinergyt2 = kzalloc (sizeof(struct cinergyt2), GFP_KERNEL))) {
dprintk(1, "out of memory?!?\n");
return -ENOMEM;
}
- memset (cinergyt2, 0, sizeof (struct cinergyt2));
usb_set_intfdata (intf, (void *) cinergyt2);
mutex_init(&cinergyt2->sem);
@@ -1000,18 +999,15 @@ static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
if (cinergyt2->disconnect_pending || mutex_lock_interruptible(&cinergyt2->wq_sem))
return -ERESTARTSYS;
- if (1) {
- cinergyt2_suspend_rc(cinergyt2);
- cancel_rearming_delayed_work(&cinergyt2->query_work);
+ cinergyt2_suspend_rc(cinergyt2);
+ cancel_rearming_delayed_work(&cinergyt2->query_work);
- mutex_lock(&cinergyt2->sem);
- if (cinergyt2->streaming)
- cinergyt2_stop_stream_xfer(cinergyt2);
- cinergyt2_sleep(cinergyt2, 1);
- mutex_unlock(&cinergyt2->sem);
- }
+ mutex_lock(&cinergyt2->sem);
+ if (cinergyt2->streaming)
+ cinergyt2_stop_stream_xfer(cinergyt2);
+ cinergyt2_sleep(cinergyt2, 1);
+ mutex_unlock(&cinergyt2->sem);
- mutex_unlock(&cinergyt2->wq_sem);
return 0;
}
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 275df65fde99..5394de2e4ce0 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -97,7 +97,7 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src,
if (avail > todo)
avail = todo;
- ret = dvb_ringbuffer_read(src, buf, avail, 1);
+ ret = dvb_ringbuffer_read(src, (u8 *)buf, avail, 1);
if (ret < 0)
break;
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 2a03bf53cb29..4fadddb264d6 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -175,7 +175,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * e
* @param nlen Number of bytes in needle.
* @return Pointer into haystack needle was found at, or NULL if not found.
*/
-static u8 *findstr(u8 * haystack, int hlen, u8 * needle, int nlen)
+static char *findstr(char * haystack, int hlen, char * needle, int nlen)
{
int i;
@@ -482,7 +482,7 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
}
/* check it contains the correct DVB string */
- dvb_str = findstr(tuple, tupleLength, "DVB_CI_V", 8);
+ dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8);
if (dvb_str == NULL)
return -EINVAL;
if (tupleLength < ((dvb_str - (char *) tuple) + 12))
@@ -513,8 +513,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
ca->slot_info[slot].config_option = tuple[0] & 0x3f;
/* OK, check it contains the correct strings */
- if ((findstr(tuple, tupleLength, "DVB_HOST", 8) == NULL) ||
- (findstr(tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL))
+ if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) == NULL) ||
+ (findstr((char *)tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL))
break;
got_cftableentry = 1;
@@ -1300,7 +1300,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
struct dvb_ca_private *ca = dvbdev->priv;
u8 slot, connection_id;
int status;
- char fragbuf[HOST_LINK_BUF_SIZE];
+ u8 fragbuf[HOST_LINK_BUF_SIZE];
int fragpos = 0;
int fraglen;
unsigned long timeout;
@@ -1486,7 +1486,7 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf,
}
if ((status = dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 2,
- buf + pktlen, fraglen, 1)) < 0) {
+ (u8 *)buf + pktlen, fraglen, 1)) < 0) {
goto exit;
}
pktlen += fraglen;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 6d8d1c3df863..cb6987fce26c 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -1068,7 +1068,7 @@ static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count)
if (mutex_lock_interruptible(&dvbdemux->mutex))
return -ERESTARTSYS;
- dvb_dmx_swfilter(dvbdemux, buf, count);
+ dvb_dmx_swfilter(dvbdemux, (u8 *)buf, count);
mutex_unlock(&dvbdemux->mutex);
if (signal_pending(current))
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index f4e4ca2dcade..b6c7f6610ec5 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -523,6 +523,7 @@ static int dvb_frontend_thread(void *data)
dvb_frontend_init(fe);
+ set_freezable();
while (1) {
up(&fepriv->sem); /* is locked when we enter the thread... */
restart:
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index f233d78bc364..a770a87b9a93 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -103,7 +103,7 @@ struct dvb_frontend_ops {
int (*tune)(struct dvb_frontend* fe,
struct dvb_frontend_parameters* params,
unsigned int mode_flags,
- int *delay,
+ unsigned int *delay,
fe_status_t *status);
/* get frontend tuning algorithm from the module */
int (*get_frontend_algo)(struct dvb_frontend *fe);
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 4ebf33a5ffa2..acf026342ec5 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -347,7 +347,8 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
{
struct dvb_net_priv *priv = dev->priv;
unsigned long skipped = 0L;
- u8 *ts, *ts_end, *from_where = NULL, ts_remain = 0, how_much = 0, new_ts = 1;
+ const u8 *ts, *ts_end, *from_where = NULL;
+ u8 ts_remain = 0, how_much = 0, new_ts = 1;
struct ethhdr *ethh = NULL;
#ifdef ULE_DEBUG
@@ -364,7 +365,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
/* For all TS cells in current buffer.
* Appearently, we are called for every single TS cell.
*/
- for (ts = (char *)buf, ts_end = (char *)buf + buf_len; ts < ts_end; /* no default incr. */ ) {
+ for (ts = buf, ts_end = buf + buf_len; ts < ts_end; /* no default incr. */ ) {
if (new_ts) {
/* We are about to process a new TS cell. */
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a9fa3337dd81..9ef0c00605ee 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -208,7 +208,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if ((id = dvbdev_get_free_id (adap, type)) < 0){
mutex_unlock(&dvbdev_register_lock);
*pdvbdev = NULL;
- printk ("%s: could get find free device id...\n", __FUNCTION__);
+ printk(KERN_ERR "%s: couldn't find free device id\n", __FUNCTION__);
return -ENFILE;
}
@@ -252,7 +252,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
return PTR_ERR(clsdev);
}
- dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
+ dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
adap->num, dnames[type], id, nums2minor(adap->num, type, id),
nums2minor(adap->num, type, id));
@@ -311,7 +311,7 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name, struct modu
memset (adap, 0, sizeof(struct dvb_adapter));
INIT_LIST_HEAD (&adap->device_list);
- printk ("DVB: registering new adapter (%s).\n", name);
+ printk(KERN_INFO "DVB: registering new adapter (%s)\n", name);
adap->num = num;
adap->name = name;
@@ -407,13 +407,13 @@ static int __init init_dvbdev(void)
dev_t dev = MKDEV(DVB_MAJOR, 0);
if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
- printk("dvb-core: unable to get major %d\n", DVB_MAJOR);
+ printk(KERN_ERR "dvb-core: unable to get major %d\n", DVB_MAJOR);
return retval;
}
cdev_init(&dvb_device_cdev, &dvb_device_fops);
if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
- printk("dvb-core: unable to get major %d\n", DVB_MAJOR);
+ printk(KERN_ERR "dvb-core: unable register character device\n");
goto error;
}
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 54488737a08f..40e41f2f5afe 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -2,7 +2,6 @@ config DVB_USB
tristate "Support for various USB DVB devices"
depends on DVB_CORE && USB && I2C
select FW_LOADER
- select DVB_PLL
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
@@ -27,13 +26,14 @@ config DVB_USB_A800
depends on DVB_USB
select DVB_DIB3000MC
select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+ select DVB_PLL if !DVB_FE_CUSTOMISE
help
Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver.
config DVB_USB_DIBUSB_MB
tristate "DiBcom USB DVB-T devices (based on the DiB3000M-B) (see help for device list)"
depends on DVB_USB
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_DIB3000MB
select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
help
@@ -89,7 +89,7 @@ config DVB_USB_DIB0700
config DVB_USB_UMT_010
tristate "HanfTek UMT-010 DVB-T USB2.0 support"
depends on DVB_USB
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_DIB3000MC
select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
help
@@ -98,7 +98,7 @@ config DVB_USB_UMT_010
config DVB_USB_CXUSB
tristate "Conexant USB2.0 hybrid reference design support"
depends on DVB_USB
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_CX22702 if !DVB_FE_CUSTOMISE
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
select DVB_MT352 if !DVB_FE_CUSTOMISE
@@ -142,7 +142,7 @@ config DVB_USB_AU6610
config DVB_USB_DIGITV
tristate "Nebula Electronics uDigiTV DVB-T USB2.0 support"
depends on DVB_USB
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_NXT6000 if !DVB_FE_CUSTOMISE
select DVB_MT352 if !DVB_FE_CUSTOMISE
help
@@ -188,6 +188,7 @@ config DVB_USB_NOVA_T_USB2
depends on DVB_USB
select DVB_DIB3000MC
select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+ select DVB_PLL if !DVB_FE_CUSTOMISE
help
Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
@@ -216,5 +217,23 @@ config DVB_USB_OPERA1
tristate "Opera1 DVB-S USB2.0 receiver"
depends on DVB_USB
select DVB_STV0299 if !DVB_FE_CUSTOMISE
+ select DVB_PLL if !DVB_FE_CUSTOMISE
help
Say Y here to support the Opera DVB-S USB2.0 receiver.
+
+config DVB_USB_AF9005
+ tristate "Afatech AF9005 DVB-T USB1.1 support"
+ depends on DVB_USB && EXPERIMENTAL
+ select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+ select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE
+ help
+ Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver
+ and the TerraTec Cinergy T USB XE (Rev.1)
+
+config DVB_USB_AF9005_REMOTE
+ tristate "Afatech AF9005 default remote control support"
+ depends on DVB_USB_AF9005
+ help
+ Say Y here to support the default remote control decoding for the
+ Afatech AF9005 based receiver.
+
diff --git a/drivers/media/dvb/dvb-usb/Makefile b/drivers/media/dvb/dvb-usb/Makefile
index 976f840cc904..73ac0a93fdeb 100644
--- a/drivers/media/dvb/dvb-usb/Makefile
+++ b/drivers/media/dvb/dvb-usb/Makefile
@@ -55,4 +55,10 @@ dvb-usb-opera-objs = opera1.o
obj-$(CONFIG_DVB_USB_OPERA1) += dvb-usb-opera.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+dvb-usb-af9005-objs = af9005.o af9005-fe.o
+obj-$(CONFIG_DVB_USB_AF9005) += dvb-usb-af9005.o
+
+dvb-usb-af9005-remote-objs = af9005-remote.o
+obj-$(CONFIG_DVB_USB_AF9005_REMOTE) += dvb-usb-af9005-remote.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/dvb-usb/af9005-fe.c b/drivers/media/dvb/dvb-usb/af9005-fe.c
new file mode 100644
index 000000000000..7195c9461524
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005-fe.c
@@ -0,0 +1,1503 @@
+/* Frontend part of the Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+#include "af9005.h"
+#include "af9005-script.h"
+#include "mt2060.h"
+#include "qt1010.h"
+#include <asm/div64.h>
+
+struct af9005_fe_state {
+ struct dvb_usb_device *d;
+ struct dvb_frontend *tuner;
+
+ fe_status_t stat;
+
+ /* retraining parameters */
+ u32 original_fcw;
+ u16 original_rf_top;
+ u16 original_if_top;
+ u16 original_if_min;
+ u16 original_aci0_if_top;
+ u16 original_aci1_if_top;
+ u16 original_aci0_if_min;
+ u8 original_if_unplug_th;
+ u8 original_rf_unplug_th;
+ u8 original_dtop_if_unplug_th;
+ u8 original_dtop_rf_unplug_th;
+
+ /* statistics */
+ u32 pre_vit_error_count;
+ u32 pre_vit_bit_count;
+ u32 ber;
+ u32 post_vit_error_count;
+ u32 post_vit_bit_count;
+ u32 unc;
+ u16 abort_count;
+
+ int opened;
+ int strong;
+ unsigned long next_status_check;
+ struct dvb_frontend frontend;
+};
+
+static int af9005_write_word_agc(struct dvb_usb_device *d, u16 reghi,
+ u16 reglo, u8 pos, u8 len, u16 value)
+{
+ int ret;
+ u8 temp;
+
+ if ((ret = af9005_write_ofdm_register(d, reglo, (u8) (value & 0xff))))
+ return ret;
+ temp = (u8) ((value & 0x0300) >> 8);
+ return af9005_write_register_bits(d, reghi, pos, len,
+ (u8) ((value & 0x300) >> 8));
+}
+
+static int af9005_read_word_agc(struct dvb_usb_device *d, u16 reghi,
+ u16 reglo, u8 pos, u8 len, u16 * value)
+{
+ int ret;
+ u8 temp0, temp1;
+
+ if ((ret = af9005_read_ofdm_register(d, reglo, &temp0)))
+ return ret;
+ if ((ret = af9005_read_ofdm_register(d, reghi, &temp1)))
+ return ret;
+ switch (pos) {
+ case 0:
+ *value = ((u16) (temp1 & 0x03) << 8) + (u16) temp0;
+ break;
+ case 2:
+ *value = ((u16) (temp1 & 0x0C) << 6) + (u16) temp0;
+ break;
+ case 4:
+ *value = ((u16) (temp1 & 0x30) << 4) + (u16) temp0;
+ break;
+ case 6:
+ *value = ((u16) (temp1 & 0xC0) << 2) + (u16) temp0;
+ break;
+ default:
+ err("invalid pos in read word agc");
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static int af9005_is_fecmon_available(struct dvb_frontend *fe, int *available)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 temp;
+
+ *available = false;
+
+ ret = af9005_read_register_bits(state->d, xd_p_fec_vtb_rsd_mon_en,
+ fec_vtb_rsd_mon_en_pos,
+ fec_vtb_rsd_mon_en_len, &temp);
+ if (ret)
+ return ret;
+ if (temp & 1) {
+ ret =
+ af9005_read_register_bits(state->d,
+ xd_p_reg_ofsm_read_rbc_en,
+ reg_ofsm_read_rbc_en_pos,
+ reg_ofsm_read_rbc_en_len, &temp);
+ if (ret)
+ return ret;
+ if ((temp & 1) == 0)
+ *available = true;
+
+ }
+ return 0;
+}
+
+static int af9005_get_post_vit_err_cw_count(struct dvb_frontend *fe,
+ u32 * post_err_count,
+ u32 * post_cw_count,
+ u16 * abort_count)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u32 err_count;
+ u32 cw_count;
+ u8 temp, temp0, temp1, temp2;
+ u16 loc_abort_count;
+
+ *post_err_count = 0;
+ *post_cw_count = 0;
+
+ /* check if error bit count is ready */
+ ret =
+ af9005_read_register_bits(state->d, xd_r_fec_rsd_ber_rdy,
+ fec_rsd_ber_rdy_pos, fec_rsd_ber_rdy_len,
+ &temp);
+ if (ret)
+ return ret;
+ if (!temp) {
+ deb_info("rsd counter not ready\n");
+ return 100;
+ }
+ /* get abort count */
+ ret =
+ af9005_read_ofdm_register(state->d,
+ xd_r_fec_rsd_abort_packet_cnt_7_0,
+ &temp0);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d,
+ xd_r_fec_rsd_abort_packet_cnt_15_8,
+ &temp1);
+ if (ret)
+ return ret;
+ loc_abort_count = ((u16) temp1 << 8) + temp0;
+
+ /* get error count */
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_fec_rsd_bit_err_cnt_7_0,
+ &temp0);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_fec_rsd_bit_err_cnt_15_8,
+ &temp1);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_fec_rsd_bit_err_cnt_23_16,
+ &temp2);
+ if (ret)
+ return ret;
+ err_count = ((u32) temp2 << 16) + ((u32) temp1 << 8) + temp0;
+ *post_err_count = err_count - (u32) loc_abort_count *8 * 8;
+
+ /* get RSD packet number */
+ ret =
+ af9005_read_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_7_0,
+ &temp0);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_15_8,
+ &temp1);
+ if (ret)
+ return ret;
+ cw_count = ((u32) temp1 << 8) + temp0;
+ if (cw_count == 0) {
+ err("wrong RSD packet count");
+ return -EIO;
+ }
+ deb_info("POST abort count %d err count %d rsd packets %d\n",
+ loc_abort_count, err_count, cw_count);
+ *post_cw_count = cw_count - (u32) loc_abort_count;
+ *abort_count = loc_abort_count;
+ return 0;
+
+}
+
+static int af9005_get_post_vit_ber(struct dvb_frontend *fe,
+ u32 * post_err_count, u32 * post_cw_count,
+ u16 * abort_count)
+{
+ u32 loc_cw_count = 0, loc_err_count;
+ u16 loc_abort_count;
+ int ret;
+
+ ret =
+ af9005_get_post_vit_err_cw_count(fe, &loc_err_count, &loc_cw_count,
+ &loc_abort_count);
+ if (ret)
+ return ret;
+ *post_err_count = loc_err_count;
+ *post_cw_count = loc_cw_count * 204 * 8;
+ *abort_count = loc_abort_count;
+
+ return 0;
+}
+
+static int af9005_get_pre_vit_err_bit_count(struct dvb_frontend *fe,
+ u32 * pre_err_count,
+ u32 * pre_bit_count)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ u8 temp, temp0, temp1, temp2;
+ u32 super_frame_count, x, bits;
+ int ret;
+
+ ret =
+ af9005_read_register_bits(state->d, xd_r_fec_vtb_ber_rdy,
+ fec_vtb_ber_rdy_pos, fec_vtb_ber_rdy_len,
+ &temp);
+ if (ret)
+ return ret;
+ if (!temp) {
+ deb_info("viterbi counter not ready\n");
+ return 101; /* ERR_APO_VTB_COUNTER_NOT_READY; */
+ }
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_fec_vtb_err_bit_cnt_7_0,
+ &temp0);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_fec_vtb_err_bit_cnt_15_8,
+ &temp1);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_fec_vtb_err_bit_cnt_23_16,
+ &temp2);
+ if (ret)
+ return ret;
+ *pre_err_count = ((u32) temp2 << 16) + ((u32) temp1 << 8) + temp0;
+
+ ret =
+ af9005_read_ofdm_register(state->d, xd_p_fec_super_frm_unit_7_0,
+ &temp0);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_p_fec_super_frm_unit_15_8,
+ &temp1);
+ if (ret)
+ return ret;
+ super_frame_count = ((u32) temp1 << 8) + temp0;
+ if (super_frame_count == 0) {
+ deb_info("super frame count 0\n");
+ return 102;
+ }
+
+ /* read fft mode */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_txmod,
+ reg_tpsd_txmod_pos, reg_tpsd_txmod_len,
+ &temp);
+ if (ret)
+ return ret;
+ if (temp == 0) {
+ /* 2K */
+ x = 1512;
+ } else if (temp == 1) {
+ /* 8k */
+ x = 6048;
+ } else {
+ err("Invalid fft mode");
+ return -EINVAL;
+ }
+
+ /* read constellation mode */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_const,
+ reg_tpsd_const_pos, reg_tpsd_const_len,
+ &temp);
+ if (ret)
+ return ret;
+ switch (temp) {
+ case 0: /* QPSK */
+ bits = 2;
+ break;
+ case 1: /* QAM_16 */
+ bits = 4;
+ break;
+ case 2: /* QAM_64 */
+ bits = 6;
+ break;
+ default:
+ err("invalid constellation mode");
+ return -EINVAL;
+ }
+ *pre_bit_count = super_frame_count * 68 * 4 * x * bits;
+ deb_info("PRE err count %d frame count %d bit count %d\n",
+ *pre_err_count, super_frame_count, *pre_bit_count);
+ return 0;
+}
+
+static int af9005_reset_pre_viterbi(struct dvb_frontend *fe)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+
+ /* set super frame count to 1 */
+ ret =
+ af9005_write_ofdm_register(state->d, xd_p_fec_super_frm_unit_7_0,
+ 1 & 0xff);
+ if (ret)
+ return ret;
+ af9005_write_ofdm_register(state->d, xd_p_fec_super_frm_unit_15_8,
+ 1 >> 8);
+ if (ret)
+ return ret;
+ /* reset pre viterbi error count */
+ ret =
+ af9005_write_register_bits(state->d, xd_p_fec_vtb_ber_rst,
+ fec_vtb_ber_rst_pos, fec_vtb_ber_rst_len,
+ 1);
+
+ return ret;
+}
+
+static int af9005_reset_post_viterbi(struct dvb_frontend *fe)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+
+ /* set packet unit */
+ ret =
+ af9005_write_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_7_0,
+ 10000 & 0xff);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_ofdm_register(state->d, xd_p_fec_rsd_packet_unit_15_8,
+ 10000 >> 8);
+ if (ret)
+ return ret;
+ /* reset post viterbi error count */
+ ret =
+ af9005_write_register_bits(state->d, xd_p_fec_rsd_ber_rst,
+ fec_rsd_ber_rst_pos, fec_rsd_ber_rst_len,
+ 1);
+
+ return ret;
+}
+
+static int af9005_get_statistic(struct dvb_frontend *fe)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret, fecavailable;
+ u64 numerator, denominator;
+
+ deb_info("GET STATISTIC\n");
+ ret = af9005_is_fecmon_available(fe, &fecavailable);
+ if (ret)
+ return ret;
+ if (!fecavailable) {
+ deb_info("fecmon not available\n");
+ return 0;
+ }
+
+ ret = af9005_get_pre_vit_err_bit_count(fe, &state->pre_vit_error_count,
+ &state->pre_vit_bit_count);
+ if (ret == 0) {
+ af9005_reset_pre_viterbi(fe);
+ if (state->pre_vit_bit_count > 0) {
+ /* according to v 0.0.4 of the dvb api ber should be a multiple
+ of 10E-9 so we have to multiply the error count by
+ 10E9=1000000000 */
+ numerator =
+ (u64) state->pre_vit_error_count * (u64) 1000000000;
+ denominator = (u64) state->pre_vit_bit_count;
+ state->ber = do_div(numerator, denominator);
+ } else {
+ state->ber = 0xffffffff;
+ }
+ }
+
+ ret = af9005_get_post_vit_ber(fe, &state->post_vit_error_count,
+ &state->post_vit_bit_count,
+ &state->abort_count);
+ if (ret == 0) {
+ ret = af9005_reset_post_viterbi(fe);
+ state->unc += state->abort_count;
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int af9005_fe_refresh_state(struct dvb_frontend *fe)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ if (time_after(jiffies, state->next_status_check)) {
+ deb_info("REFRESH STATE\n");
+
+ /* statistics */
+ if (af9005_get_statistic(fe))
+ err("get_statistic_failed");
+ state->next_status_check = jiffies + 250 * HZ / 1000;
+ }
+ return 0;
+}
+
+static int af9005_fe_read_status(struct dvb_frontend *fe, fe_status_t * stat)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ u8 temp;
+ int ret;
+
+ if (state->tuner == NULL)
+ return -ENODEV;
+
+ *stat = 0;
+ ret = af9005_read_register_bits(state->d, xd_p_agc_lock,
+ agc_lock_pos, agc_lock_len, &temp);
+ if (ret)
+ return ret;
+ if (temp)
+ *stat |= FE_HAS_SIGNAL;
+
+ ret = af9005_read_register_bits(state->d, xd_p_fd_tpsd_lock,
+ fd_tpsd_lock_pos, fd_tpsd_lock_len,
+ &temp);
+ if (ret)
+ return ret;
+ if (temp)
+ *stat |= FE_HAS_CARRIER;
+
+ ret = af9005_read_register_bits(state->d,
+ xd_r_mp2if_sync_byte_locked,
+ mp2if_sync_byte_locked_pos,
+ mp2if_sync_byte_locked_pos, &temp);
+ if (ret)
+ return ret;
+ if (temp)
+ *stat |= FE_HAS_SYNC | FE_HAS_VITERBI | FE_HAS_LOCK;
+ if (state->opened)
+ af9005_led_control(state->d, *stat & FE_HAS_LOCK);
+
+ ret =
+ af9005_read_register_bits(state->d, xd_p_reg_strong_sginal_detected,
+ reg_strong_sginal_detected_pos,
+ reg_strong_sginal_detected_len, &temp);
+ if (ret)
+ return ret;
+ if (temp != state->strong) {
+ deb_info("adjust for strong signal %d\n", temp);
+ state->strong = temp;
+ }
+ return 0;
+}
+
+static int af9005_fe_read_ber(struct dvb_frontend *fe, u32 * ber)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ if (state->tuner == NULL)
+ return -ENODEV;
+ af9005_fe_refresh_state(fe);
+ *ber = state->ber;
+ return 0;
+}
+
+static int af9005_fe_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ if (state->tuner == NULL)
+ return -ENODEV;
+ af9005_fe_refresh_state(fe);
+ *unc = state->unc;
+ return 0;
+}
+
+static int af9005_fe_read_signal_strength(struct dvb_frontend *fe,
+ u16 * strength)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 if_gain, rf_gain;
+
+ if (state->tuner == NULL)
+ return -ENODEV;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_reg_aagc_rf_gain,
+ &rf_gain);
+ if (ret)
+ return ret;
+ ret =
+ af9005_read_ofdm_register(state->d, xd_r_reg_aagc_if_gain,
+ &if_gain);
+ if (ret)
+ return ret;
+ /* this value has no real meaning, but i don't have the tables that relate
+ the rf and if gain with the dbm, so I just scale the value */
+ *strength = (512 - rf_gain - if_gain) << 7;
+ return 0;
+}
+
+static int af9005_fe_read_snr(struct dvb_frontend *fe, u16 * snr)
+{
+ /* the snr can be derived from the ber and the constellation
+ but I don't think this kind of complex calculations belong
+ in the driver. I may be wrong.... */
+ return -ENOSYS;
+}
+
+static int af9005_fe_program_cfoe(struct dvb_usb_device *d, fe_bandwidth_t bw)
+{
+ u8 temp0, temp1, temp2, temp3, buf[4];
+ int ret;
+ u32 NS_coeff1_2048Nu;
+ u32 NS_coeff1_8191Nu;
+ u32 NS_coeff1_8192Nu;
+ u32 NS_coeff1_8193Nu;
+ u32 NS_coeff2_2k;
+ u32 NS_coeff2_8k;
+
+ switch (bw) {
+ case BANDWIDTH_6_MHZ:
+ NS_coeff1_2048Nu = 0x2ADB6DC;
+ NS_coeff1_8191Nu = 0xAB7313;
+ NS_coeff1_8192Nu = 0xAB6DB7;
+ NS_coeff1_8193Nu = 0xAB685C;
+ NS_coeff2_2k = 0x156DB6E;
+ NS_coeff2_8k = 0x55B6DC;
+ break;
+
+ case BANDWIDTH_7_MHZ:
+ NS_coeff1_2048Nu = 0x3200001;
+ NS_coeff1_8191Nu = 0xC80640;
+ NS_coeff1_8192Nu = 0xC80000;
+ NS_coeff1_8193Nu = 0xC7F9C0;
+ NS_coeff2_2k = 0x1900000;
+ NS_coeff2_8k = 0x640000;
+ break;
+
+ case BANDWIDTH_8_MHZ:
+ NS_coeff1_2048Nu = 0x3924926;
+ NS_coeff1_8191Nu = 0xE4996E;
+ NS_coeff1_8192Nu = 0xE49249;
+ NS_coeff1_8193Nu = 0xE48B25;
+ NS_coeff2_2k = 0x1C92493;
+ NS_coeff2_8k = 0x724925;
+ break;
+ default:
+ err("Invalid bandwith %d.", bw);
+ return -EINVAL;
+ }
+
+ /*
+ * write NS_coeff1_2048Nu
+ */
+
+ temp0 = (u8) (NS_coeff1_2048Nu & 0x000000FF);
+ temp1 = (u8) ((NS_coeff1_2048Nu & 0x0000FF00) >> 8);
+ temp2 = (u8) ((NS_coeff1_2048Nu & 0x00FF0000) >> 16);
+ temp3 = (u8) ((NS_coeff1_2048Nu & 0x03000000) >> 24);
+
+ /* big endian to make 8051 happy */
+ buf[0] = temp3;
+ buf[1] = temp2;
+ buf[2] = temp1;
+ buf[3] = temp0;
+
+ /* cfoe_NS_2k_coeff1_25_24 */
+ ret = af9005_write_ofdm_register(d, 0xAE00, buf[0]);
+ if (ret)
+ return ret;
+
+ /* cfoe_NS_2k_coeff1_23_16 */
+ ret = af9005_write_ofdm_register(d, 0xAE01, buf[1]);
+ if (ret)
+ return ret;
+
+ /* cfoe_NS_2k_coeff1_15_8 */
+ ret = af9005_write_ofdm_register(d, 0xAE02, buf[2]);
+ if (ret)
+ return ret;
+
+ /* cfoe_NS_2k_coeff1_7_0 */
+ ret = af9005_write_ofdm_register(d, 0xAE03, buf[3]);
+ if (ret)
+ return ret;
+
+ /*
+ * write NS_coeff2_2k
+ */
+
+ temp0 = (u8) ((NS_coeff2_2k & 0x0000003F));
+ temp1 = (u8) ((NS_coeff2_2k & 0x00003FC0) >> 6);
+ temp2 = (u8) ((NS_coeff2_2k & 0x003FC000) >> 14);
+ temp3 = (u8) ((NS_coeff2_2k & 0x01C00000) >> 22);
+
+ /* big endian to make 8051 happy */
+ buf[0] = temp3;
+ buf[1] = temp2;
+ buf[2] = temp1;
+ buf[3] = temp0;
+
+ ret = af9005_write_ofdm_register(d, 0xAE04, buf[0]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE05, buf[1]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE06, buf[2]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE07, buf[3]);
+ if (ret)
+ return ret;
+
+ /*
+ * write NS_coeff1_8191Nu
+ */
+
+ temp0 = (u8) ((NS_coeff1_8191Nu & 0x000000FF));
+ temp1 = (u8) ((NS_coeff1_8191Nu & 0x0000FF00) >> 8);
+ temp2 = (u8) ((NS_coeff1_8191Nu & 0x00FFC000) >> 16);
+ temp3 = (u8) ((NS_coeff1_8191Nu & 0x03000000) >> 24);
+
+ /* big endian to make 8051 happy */
+ buf[0] = temp3;
+ buf[1] = temp2;
+ buf[2] = temp1;
+ buf[3] = temp0;
+
+ ret = af9005_write_ofdm_register(d, 0xAE08, buf[0]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE09, buf[1]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE0A, buf[2]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE0B, buf[3]);
+ if (ret)
+ return ret;
+
+ /*
+ * write NS_coeff1_8192Nu
+ */
+
+ temp0 = (u8) (NS_coeff1_8192Nu & 0x000000FF);
+ temp1 = (u8) ((NS_coeff1_8192Nu & 0x0000FF00) >> 8);
+ temp2 = (u8) ((NS_coeff1_8192Nu & 0x00FFC000) >> 16);
+ temp3 = (u8) ((NS_coeff1_8192Nu & 0x03000000) >> 24);
+
+ /* big endian to make 8051 happy */
+ buf[0] = temp3;
+ buf[1] = temp2;
+ buf[2] = temp1;
+ buf[3] = temp0;
+
+ ret = af9005_write_ofdm_register(d, 0xAE0C, buf[0]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE0D, buf[1]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE0E, buf[2]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE0F, buf[3]);
+ if (ret)
+ return ret;
+
+ /*
+ * write NS_coeff1_8193Nu
+ */
+
+ temp0 = (u8) ((NS_coeff1_8193Nu & 0x000000FF));
+ temp1 = (u8) ((NS_coeff1_8193Nu & 0x0000FF00) >> 8);
+ temp2 = (u8) ((NS_coeff1_8193Nu & 0x00FFC000) >> 16);
+ temp3 = (u8) ((NS_coeff1_8193Nu & 0x03000000) >> 24);
+
+ /* big endian to make 8051 happy */
+ buf[0] = temp3;
+ buf[1] = temp2;
+ buf[2] = temp1;
+ buf[3] = temp0;
+
+ ret = af9005_write_ofdm_register(d, 0xAE10, buf[0]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE11, buf[1]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE12, buf[2]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE13, buf[3]);
+ if (ret)
+ return ret;
+
+ /*
+ * write NS_coeff2_8k
+ */
+
+ temp0 = (u8) ((NS_coeff2_8k & 0x0000003F));
+ temp1 = (u8) ((NS_coeff2_8k & 0x00003FC0) >> 6);
+ temp2 = (u8) ((NS_coeff2_8k & 0x003FC000) >> 14);
+ temp3 = (u8) ((NS_coeff2_8k & 0x01C00000) >> 22);
+
+ /* big endian to make 8051 happy */
+ buf[0] = temp3;
+ buf[1] = temp2;
+ buf[2] = temp1;
+ buf[3] = temp0;
+
+ ret = af9005_write_ofdm_register(d, 0xAE14, buf[0]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE15, buf[1]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE16, buf[2]);
+ if (ret)
+ return ret;
+
+ ret = af9005_write_ofdm_register(d, 0xAE17, buf[3]);
+ return ret;
+
+}
+
+static int af9005_fe_select_bw(struct dvb_usb_device *d, fe_bandwidth_t bw)
+{
+ u8 temp;
+ switch (bw) {
+ case BANDWIDTH_6_MHZ:
+ temp = 0;
+ break;
+ case BANDWIDTH_7_MHZ:
+ temp = 1;
+ break;
+ case BANDWIDTH_8_MHZ:
+ temp = 2;
+ break;
+ default:
+ err("Invalid bandwith %d.", bw);
+ return -EINVAL;
+ }
+ return af9005_write_register_bits(d, xd_g_reg_bw, reg_bw_pos,
+ reg_bw_len, temp);
+}
+
+static int af9005_fe_power(struct dvb_frontend *fe, int on)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ u8 temp = on;
+ int ret;
+ deb_info("power %s tuner\n", on ? "on" : "off");
+ ret = af9005_send_command(state->d, 0x03, &temp, 1, NULL, 0);
+ return ret;
+}
+
+static struct mt2060_config af9005_mt2060_config = {
+ 0xC0
+};
+
+static struct qt1010_config af9005_qt1010_config = {
+ 0xC4
+};
+
+static int af9005_fe_init(struct dvb_frontend *fe)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ int ret, i, scriptlen;
+ u8 temp, temp0 = 0, temp1 = 0, temp2 = 0;
+ u8 buf[2];
+ u16 if1;
+
+ deb_info("in af9005_fe_init\n");
+
+ /* reset */
+ deb_info("reset\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_I2C_reg_ofdm_rst_en,
+ 4, 1, 0x01)))
+ return ret;
+ if ((ret = af9005_write_ofdm_register(state->d, APO_REG_RESET, 0)))
+ return ret;
+ /* clear ofdm reset */
+ deb_info("clear ofdm reset\n");
+ for (i = 0; i < 150; i++) {
+ if ((ret =
+ af9005_read_ofdm_register(state->d,
+ xd_I2C_reg_ofdm_rst, &temp)))
+ return ret;
+ if (temp & (regmask[reg_ofdm_rst_len - 1] << reg_ofdm_rst_pos))
+ break;
+ msleep(10);
+ }
+ if (i == 150)
+ return -ETIMEDOUT;
+
+ /*FIXME in the dump
+ write B200 A9
+ write xd_g_reg_ofsm_clk 7
+ read eepr c6 (2)
+ read eepr c7 (2)
+ misc ctrl 3 -> 1
+ read eepr ca (6)
+ write xd_g_reg_ofsm_clk 0
+ write B200 a1
+ */
+ ret = af9005_write_ofdm_register(state->d, 0xb200, 0xa9);
+ if (ret)
+ return ret;
+ ret = af9005_write_ofdm_register(state->d, xd_g_reg_ofsm_clk, 0x07);
+ if (ret)
+ return ret;
+ temp = 0x01;
+ ret = af9005_send_command(state->d, 0x03, &temp, 1, NULL, 0);
+ if (ret)
+ return ret;
+ ret = af9005_write_ofdm_register(state->d, xd_g_reg_ofsm_clk, 0x00);
+ if (ret)
+ return ret;
+ ret = af9005_write_ofdm_register(state->d, 0xb200, 0xa1);
+ if (ret)
+ return ret;
+
+ temp = regmask[reg_ofdm_rst_len - 1] << reg_ofdm_rst_pos;
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_I2C_reg_ofdm_rst,
+ reg_ofdm_rst_pos, reg_ofdm_rst_len, 1)))
+ return ret;
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_I2C_reg_ofdm_rst,
+ reg_ofdm_rst_pos, reg_ofdm_rst_len, 0)))
+ return ret;
+
+ if (ret)
+ return ret;
+ /* don't know what register aefc is, but this is what the windows driver does */
+ ret = af9005_write_ofdm_register(state->d, 0xaefc, 0);
+ if (ret)
+ return ret;
+
+ /* set stand alone chip */
+ deb_info("set stand alone chip\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_reg_dca_stand_alone,
+ reg_dca_stand_alone_pos,
+ reg_dca_stand_alone_len, 1)))
+ return ret;
+
+ /* set dca upper & lower chip */
+ deb_info("set dca upper & lower chip\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_reg_dca_upper_chip,
+ reg_dca_upper_chip_pos,
+ reg_dca_upper_chip_len, 0)))
+ return ret;
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_reg_dca_lower_chip,
+ reg_dca_lower_chip_pos,
+ reg_dca_lower_chip_len, 0)))
+ return ret;
+
+ /* set 2wire master clock to 0x14 (for 60KHz) */
+ deb_info("set 2wire master clock to 0x14 (for 60KHz)\n");
+ if ((ret =
+ af9005_write_ofdm_register(state->d, xd_I2C_i2c_m_period, 0x14)))
+ return ret;
+
+ /* clear dca enable chip */
+ deb_info("clear dca enable chip\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_reg_dca_en,
+ reg_dca_en_pos, reg_dca_en_len, 0)))
+ return ret;
+ /* FIXME these are register bits, but I don't know which ones */
+ ret = af9005_write_ofdm_register(state->d, 0xa16c, 1);
+ if (ret)
+ return ret;
+ ret = af9005_write_ofdm_register(state->d, 0xa3c1, 0);
+ if (ret)
+ return ret;
+
+ /* init other parameters: program cfoe and select bandwith */
+ deb_info("program cfoe\n");
+ if ((ret = af9005_fe_program_cfoe(state->d, BANDWIDTH_6_MHZ)))
+ return ret;
+ /* set read-update bit for constellation */
+ deb_info("set read-update bit for constellation\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_reg_feq_read_update,
+ reg_feq_read_update_pos,
+ reg_feq_read_update_len, 1)))
+ return ret;
+
+ /* sample code has a set MPEG TS code here
+ but sniffing reveals that it doesn't do it */
+
+ /* set read-update bit to 1 for DCA constellation */
+ deb_info("set read-update bit 1 for DCA constellation\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_reg_dca_read_update,
+ reg_dca_read_update_pos,
+ reg_dca_read_update_len, 1)))
+ return ret;
+
+ /* enable fec monitor */
+ deb_info("enable fec monitor\n");
+ if ((ret =
+ af9005_write_register_bits(state->d, xd_p_fec_vtb_rsd_mon_en,
+ fec_vtb_rsd_mon_en_pos,
+ fec_vtb_rsd_mon_en_len, 1)))
+ return ret;
+
+ /* FIXME should be register bits, I don't know which ones */
+ ret = af9005_write_ofdm_register(state->d, 0xa601, 0);
+
+ /* set api_retrain_never_freeze */
+ deb_info("set api_retrain_never_freeze\n");
+ if ((ret = af9005_write_ofdm_register(state->d, 0xaefb, 0x01)))
+ return ret;
+
+ /* load init script */
+ deb_info("load init script\n");
+ scriptlen = sizeof(script) / sizeof(RegDesc);
+ for (i = 0; i < scriptlen; i++) {
+ if ((ret =
+ af9005_write_register_bits(state->d, script[i].reg,
+ script[i].pos,
+ script[i].len, script[i].val)))
+ return ret;
+ /* save 3 bytes of original fcw */
+ if (script[i].reg == 0xae18)
+ temp2 = script[i].val;
+ if (script[i].reg == 0xae19)
+ temp1 = script[i].val;
+ if (script[i].reg == 0xae1a)
+ temp0 = script[i].val;
+
+ /* save original unplug threshold */
+ if (script[i].reg == xd_p_reg_unplug_th)
+ state->original_if_unplug_th = script[i].val;
+ if (script[i].reg == xd_p_reg_unplug_rf_gain_th)
+ state->original_rf_unplug_th = script[i].val;
+ if (script[i].reg == xd_p_reg_unplug_dtop_if_gain_th)
+ state->original_dtop_if_unplug_th = script[i].val;
+ if (script[i].reg == xd_p_reg_unplug_dtop_rf_gain_th)
+ state->original_dtop_rf_unplug_th = script[i].val;
+
+ }
+ state->original_fcw =
+ ((u32) temp2 << 16) + ((u32) temp1 << 8) + (u32) temp0;
+
+
+ /* save original TOPs */
+ deb_info("save original TOPs\n");
+
+ /* RF TOP */
+ ret =
+ af9005_read_word_agc(state->d,
+ xd_p_reg_aagc_rf_top_numerator_9_8,
+ xd_p_reg_aagc_rf_top_numerator_7_0, 0, 2,
+ &state->original_rf_top);
+ if (ret)
+ return ret;
+
+ /* IF TOP */
+ ret =
+ af9005_read_word_agc(state->d,
+ xd_p_reg_aagc_if_top_numerator_9_8,
+ xd_p_reg_aagc_if_top_numerator_7_0, 0, 2,
+ &state->original_if_top);
+ if (ret)
+ return ret;
+
+ /* ACI 0 IF TOP */
+ ret =
+ af9005_read_word_agc(state->d, 0xA60E, 0xA60A, 4, 2,
+ &state->original_aci0_if_top);
+ if (ret)
+ return ret;
+
+ /* ACI 1 IF TOP */
+ ret =
+ af9005_read_word_agc(state->d, 0xA60E, 0xA60B, 6, 2,
+ &state->original_aci1_if_top);
+ if (ret)
+ return ret;
+
+ /* attach tuner and init */
+ if (state->tuner == NULL) {
+ /* read tuner and board id from eeprom */
+ ret = af9005_read_eeprom(adap->dev, 0xc6, buf, 2);
+ if (ret) {
+ err("Impossible to read EEPROM\n");
+ return ret;
+ }
+ deb_info("Tuner id %d, board id %d\n", buf[0], buf[1]);
+ switch (buf[0]) {
+ case 2: /* MT2060 */
+ /* read if1 from eeprom */
+ ret = af9005_read_eeprom(adap->dev, 0xc8, buf, 2);
+ if (ret) {
+ err("Impossible to read EEPROM\n");
+ return ret;
+ }
+ if1 = (u16) (buf[0] << 8) + buf[1];
+ state->tuner =
+ dvb_attach(mt2060_attach, fe, &adap->dev->i2c_adap,
+ &af9005_mt2060_config, if1);
+ if (state->tuner == NULL) {
+ deb_info("MT2060 attach failed\n");
+ return -ENODEV;
+ }
+ break;
+ case 3: /* QT1010 */
+ case 9: /* QT1010B */
+ state->tuner =
+ dvb_attach(qt1010_attach, fe, &adap->dev->i2c_adap,
+ &af9005_qt1010_config);
+ if (state->tuner == NULL) {
+ deb_info("QT1010 attach failed\n");
+ return -ENODEV;
+ }
+ break;
+ default:
+ err("Unsupported tuner type %d", buf[0]);
+ return -ENODEV;
+ }
+ ret = state->tuner->ops.tuner_ops.init(state->tuner);
+ if (ret)
+ return ret;
+ }
+
+ deb_info("profit!\n");
+ return 0;
+}
+
+static int af9005_fe_sleep(struct dvb_frontend *fe)
+{
+ return af9005_fe_power(fe, 0);
+}
+
+static int af9005_ts_bus_ctrl(struct dvb_frontend *fe, int acquire)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+
+ if (acquire) {
+ state->opened++;
+ } else {
+
+ state->opened--;
+ if (!state->opened)
+ af9005_led_control(state->d, 0);
+ }
+ return 0;
+}
+
+static int af9005_fe_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *fep)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 temp, temp0, temp1, temp2;
+
+ deb_info("af9005_fe_set_frontend freq %d bw %d\n", fep->frequency,
+ fep->u.ofdm.bandwidth);
+ if (state->tuner == NULL) {
+ err("Tuner not attached");
+ return -ENODEV;
+ }
+
+ deb_info("turn off led\n");
+ /* not in the log */
+ ret = af9005_led_control(state->d, 0);
+ if (ret)
+ return ret;
+ /* not sure about the bits */
+ ret = af9005_write_register_bits(state->d, XD_MP2IF_MISC, 2, 1, 0);
+ if (ret)
+ return ret;
+
+ /* set FCW to default value */
+ deb_info("set FCW to default value\n");
+ temp0 = (u8) (state->original_fcw & 0x000000ff);
+ temp1 = (u8) ((state->original_fcw & 0x0000ff00) >> 8);
+ temp2 = (u8) ((state->original_fcw & 0x00ff0000) >> 16);
+ ret = af9005_write_ofdm_register(state->d, 0xae1a, temp0);
+ if (ret)
+ return ret;
+ ret = af9005_write_ofdm_register(state->d, 0xae19, temp1);
+ if (ret)
+ return ret;
+ ret = af9005_write_ofdm_register(state->d, 0xae18, temp2);
+ if (ret)
+ return ret;
+
+ /* restore original TOPs */
+ deb_info("restore original TOPs\n");
+ ret =
+ af9005_write_word_agc(state->d,
+ xd_p_reg_aagc_rf_top_numerator_9_8,
+ xd_p_reg_aagc_rf_top_numerator_7_0, 0, 2,
+ state->original_rf_top);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_word_agc(state->d,
+ xd_p_reg_aagc_if_top_numerator_9_8,
+ xd_p_reg_aagc_if_top_numerator_7_0, 0, 2,
+ state->original_if_top);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_word_agc(state->d, 0xA60E, 0xA60A, 4, 2,
+ state->original_aci0_if_top);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_word_agc(state->d, 0xA60E, 0xA60B, 6, 2,
+ state->original_aci1_if_top);
+ if (ret)
+ return ret;
+
+ /* select bandwith */
+ deb_info("select bandwidth");
+ ret = af9005_fe_select_bw(state->d, fep->u.ofdm.bandwidth);
+ if (ret)
+ return ret;
+ ret = af9005_fe_program_cfoe(state->d, fep->u.ofdm.bandwidth);
+ if (ret)
+ return ret;
+
+ /* clear easy mode flag */
+ deb_info("clear easy mode flag\n");
+ ret = af9005_write_ofdm_register(state->d, 0xaefd, 0);
+ if (ret)
+ return ret;
+
+ /* set unplug threshold to original value */
+ deb_info("set unplug threshold to original value\n");
+ ret =
+ af9005_write_ofdm_register(state->d, xd_p_reg_unplug_th,
+ state->original_if_unplug_th);
+ if (ret)
+ return ret;
+ /* set tuner */
+ deb_info("set tuner\n");
+ ret = state->tuner->ops.tuner_ops.set_params(state->tuner, fep);
+ if (ret)
+ return ret;
+
+ /* trigger ofsm */
+ deb_info("trigger ofsm\n");
+ temp = 0;
+ ret = af9005_write_tuner_registers(state->d, 0xffff, &temp, 1);
+ if (ret)
+ return ret;
+
+ /* clear retrain and freeze flag */
+ deb_info("clear retrain and freeze flag\n");
+ ret =
+ af9005_write_register_bits(state->d,
+ xd_p_reg_api_retrain_request,
+ reg_api_retrain_request_pos, 2, 0);
+ if (ret)
+ return ret;
+
+ /* reset pre viterbi and post viterbi registers and statistics */
+ af9005_reset_pre_viterbi(fe);
+ af9005_reset_post_viterbi(fe);
+ state->pre_vit_error_count = 0;
+ state->pre_vit_bit_count = 0;
+ state->ber = 0;
+ state->post_vit_error_count = 0;
+ /* state->unc = 0; commented out since it should be ever increasing */
+ state->abort_count = 0;
+
+ state->next_status_check = jiffies;
+ state->strong = -1;
+
+ return 0;
+}
+
+static int af9005_fe_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *fep)
+{
+ struct af9005_fe_state *state = fe->demodulator_priv;
+ int ret;
+ u8 temp;
+
+ /* mode */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_const,
+ reg_tpsd_const_pos, reg_tpsd_const_len,
+ &temp);
+ if (ret)
+ return ret;
+ deb_info("===== fe_get_frontend ==============\n");
+ deb_info("CONSTELLATION ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.constellation = QPSK;
+ deb_info("QPSK\n");
+ break;
+ case 1:
+ fep->u.ofdm.constellation = QAM_16;
+ deb_info("QAM_16\n");
+ break;
+ case 2:
+ fep->u.ofdm.constellation = QAM_64;
+ deb_info("QAM_64\n");
+ break;
+ }
+
+ /* tps hierarchy and alpha value */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_hier,
+ reg_tpsd_hier_pos, reg_tpsd_hier_len,
+ &temp);
+ if (ret)
+ return ret;
+ deb_info("HIERARCHY ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.hierarchy_information = HIERARCHY_NONE;
+ deb_info("NONE\n");
+ break;
+ case 1:
+ fep->u.ofdm.hierarchy_information = HIERARCHY_1;
+ deb_info("1\n");
+ break;
+ case 2:
+ fep->u.ofdm.hierarchy_information = HIERARCHY_2;
+ deb_info("2\n");
+ break;
+ case 3:
+ fep->u.ofdm.hierarchy_information = HIERARCHY_4;
+ deb_info("4\n");
+ break;
+ }
+
+ /* high/low priority */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_dec_pri,
+ reg_dec_pri_pos, reg_dec_pri_len, &temp);
+ if (ret)
+ return ret;
+ /* if temp is set = high priority */
+ deb_info("PRIORITY %s\n", temp ? "high" : "low");
+
+ /* high coderate */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_hpcr,
+ reg_tpsd_hpcr_pos, reg_tpsd_hpcr_len,
+ &temp);
+ if (ret)
+ return ret;
+ deb_info("CODERATE HP ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.code_rate_HP = FEC_1_2;
+ deb_info("FEC_1_2\n");
+ break;
+ case 1:
+ fep->u.ofdm.code_rate_HP = FEC_2_3;
+ deb_info("FEC_2_3\n");
+ break;
+ case 2:
+ fep->u.ofdm.code_rate_HP = FEC_3_4;
+ deb_info("FEC_3_4\n");
+ break;
+ case 3:
+ fep->u.ofdm.code_rate_HP = FEC_5_6;
+ deb_info("FEC_5_6\n");
+ break;
+ case 4:
+ fep->u.ofdm.code_rate_HP = FEC_7_8;
+ deb_info("FEC_7_8\n");
+ break;
+ }
+
+ /* low coderate */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_lpcr,
+ reg_tpsd_lpcr_pos, reg_tpsd_lpcr_len,
+ &temp);
+ if (ret)
+ return ret;
+ deb_info("CODERATE LP ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.code_rate_LP = FEC_1_2;
+ deb_info("FEC_1_2\n");
+ break;
+ case 1:
+ fep->u.ofdm.code_rate_LP = FEC_2_3;
+ deb_info("FEC_2_3\n");
+ break;
+ case 2:
+ fep->u.ofdm.code_rate_LP = FEC_3_4;
+ deb_info("FEC_3_4\n");
+ break;
+ case 3:
+ fep->u.ofdm.code_rate_LP = FEC_5_6;
+ deb_info("FEC_5_6\n");
+ break;
+ case 4:
+ fep->u.ofdm.code_rate_LP = FEC_7_8;
+ deb_info("FEC_7_8\n");
+ break;
+ }
+
+ /* guard interval */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_gi,
+ reg_tpsd_gi_pos, reg_tpsd_gi_len, &temp);
+ if (ret)
+ return ret;
+ deb_info("GUARD INTERVAL ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
+ deb_info("1_32\n");
+ break;
+ case 1:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
+ deb_info("1_16\n");
+ break;
+ case 2:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
+ deb_info("1_8\n");
+ break;
+ case 3:
+ fep->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
+ deb_info("1_4\n");
+ break;
+ }
+
+ /* fft */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_tpsd_txmod,
+ reg_tpsd_txmod_pos, reg_tpsd_txmod_len,
+ &temp);
+ if (ret)
+ return ret;
+ deb_info("TRANSMISSION MODE ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
+ deb_info("2K\n");
+ break;
+ case 1:
+ fep->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
+ deb_info("8K\n");
+ break;
+ }
+
+ /* bandwidth */
+ ret =
+ af9005_read_register_bits(state->d, xd_g_reg_bw, reg_bw_pos,
+ reg_bw_len, &temp);
+ deb_info("BANDWIDTH ");
+ switch (temp) {
+ case 0:
+ fep->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
+ deb_info("6\n");
+ break;
+ case 1:
+ fep->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
+ deb_info("7\n");
+ break;
+ case 2:
+ fep->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
+ deb_info("8\n");
+ break;
+ }
+ return 0;
+}
+
+static void af9005_fe_release(struct dvb_frontend *fe)
+{
+ struct af9005_fe_state *state =
+ (struct af9005_fe_state *)fe->demodulator_priv;
+ if (state->tuner != NULL && state->tuner->ops.tuner_ops.release != NULL) {
+ state->tuner->ops.tuner_ops.release(state->tuner);
+#ifdef CONFIG_DVB_CORE_ATTACH
+ symbol_put_addr(state->tuner->ops.tuner_ops.release);
+#endif
+ }
+ kfree(state);
+}
+
+static struct dvb_frontend_ops af9005_fe_ops;
+
+struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
+{
+ struct af9005_fe_state *state = NULL;
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct af9005_fe_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ deb_info("attaching frontend af9005\n");
+
+ state->d = d;
+ state->tuner = NULL;
+ state->opened = 0;
+
+ memcpy(&state->frontend.ops, &af9005_fe_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+
+ return &state->frontend;
+ error:
+ return NULL;
+}
+
+static struct dvb_frontend_ops af9005_fe_ops = {
+ .info = {
+ .name = "AF9005 USB DVB-T",
+ .type = FE_OFDM,
+ .frequency_min = 44250000,
+ .frequency_max = 867250000,
+ .frequency_stepsize = 250000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER |
+ FE_CAN_HIERARCHY_AUTO,
+ },
+
+ .release = af9005_fe_release,
+
+ .init = af9005_fe_init,
+ .sleep = af9005_fe_sleep,
+ .ts_bus_ctrl = af9005_ts_bus_ctrl,
+
+ .set_frontend = af9005_fe_set_frontend,
+ .get_frontend = af9005_fe_get_frontend,
+
+ .read_status = af9005_fe_read_status,
+ .read_ber = af9005_fe_read_ber,
+ .read_signal_strength = af9005_fe_read_signal_strength,
+ .read_snr = af9005_fe_read_snr,
+ .read_ucblocks = af9005_fe_read_unc_blocks,
+};
diff --git a/drivers/media/dvb/dvb-usb/af9005-remote.c b/drivers/media/dvb/dvb-usb/af9005-remote.c
new file mode 100644
index 000000000000..ff00c0e8f4a1
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005-remote.c
@@ -0,0 +1,157 @@
+/* DVB USB compliant Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Standard remote decode function
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/REDME.dvb-usb for more information
+ */
+#include "af9005.h"
+/* debug */
+int dvb_usb_af9005_remote_debug;
+module_param_named(debug, dvb_usb_af9005_remote_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "enable (1) or disable (0) debug messages."
+ DVB_USB_DEBUG_STATUS);
+
+#define deb_decode(args...) dprintk(dvb_usb_af9005_remote_debug,0x01,args)
+
+struct dvb_usb_rc_key af9005_rc_keys[] = {
+
+ {0x01, 0xb7, KEY_POWER},
+ {0x01, 0xa7, KEY_VOLUMEUP},
+ {0x01, 0x87, KEY_CHANNELUP},
+ {0x01, 0x7f, KEY_MUTE},
+ {0x01, 0xbf, KEY_VOLUMEDOWN},
+ {0x01, 0x3f, KEY_CHANNELDOWN},
+ {0x01, 0xdf, KEY_1},
+ {0x01, 0x5f, KEY_2},
+ {0x01, 0x9f, KEY_3},
+ {0x01, 0x1f, KEY_4},
+ {0x01, 0xef, KEY_5},
+ {0x01, 0x6f, KEY_6},
+ {0x01, 0xaf, KEY_7},
+ {0x01, 0x27, KEY_8},
+ {0x01, 0x07, KEY_9},
+ {0x01, 0xcf, KEY_ZOOM},
+ {0x01, 0x4f, KEY_0},
+ {0x01, 0x8f, KEY_GOTO}, /* marked jump on the remote */
+
+ {0x00, 0xbd, KEY_POWER},
+ {0x00, 0x7d, KEY_VOLUMEUP},
+ {0x00, 0xfd, KEY_CHANNELUP},
+ {0x00, 0x9d, KEY_MUTE},
+ {0x00, 0x5d, KEY_VOLUMEDOWN},
+ {0x00, 0xdd, KEY_CHANNELDOWN},
+ {0x00, 0xad, KEY_1},
+ {0x00, 0x6d, KEY_2},
+ {0x00, 0xed, KEY_3},
+ {0x00, 0x8d, KEY_4},
+ {0x00, 0x4d, KEY_5},
+ {0x00, 0xcd, KEY_6},
+ {0x00, 0xb5, KEY_7},
+ {0x00, 0x75, KEY_8},
+ {0x00, 0xf5, KEY_9},
+ {0x00, 0x95, KEY_ZOOM},
+ {0x00, 0x55, KEY_0},
+ {0x00, 0xd5, KEY_GOTO}, /* marked jump on the remote */
+};
+
+int af9005_rc_keys_size = ARRAY_SIZE(af9005_rc_keys);
+
+static int repeatable_keys[] = {
+ KEY_VOLUMEUP,
+ KEY_VOLUMEDOWN,
+ KEY_CHANNELUP,
+ KEY_CHANNELDOWN
+};
+
+int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len, u32 * event,
+ int *state)
+{
+ u16 mark, space;
+ u32 result;
+ u8 cust, dat, invdat;
+ int i;
+
+ if (len >= 6) {
+ mark = (u16) (data[0] << 8) + data[1];
+ space = (u16) (data[2] << 8) + data[3];
+ if (space * 3 < mark) {
+ for (i = 0; i < ARRAY_SIZE(repeatable_keys); i++) {
+ if (d->last_event == repeatable_keys[i]) {
+ *state = REMOTE_KEY_REPEAT;
+ *event = d->last_event;
+ deb_decode("repeat key, event %x\n",
+ *event);
+ return 0;
+ }
+ }
+ deb_decode("repeated key ignored (non repeatable)\n");
+ return 0;
+ } else if (len >= 33 * 4) { /*32 bits + start code */
+ result = 0;
+ for (i = 4; i < 4 + 32 * 4; i += 4) {
+ result <<= 1;
+ mark = (u16) (data[i] << 8) + data[i + 1];
+ mark >>= 1;
+ space = (u16) (data[i + 2] << 8) + data[i + 3];
+ space >>= 1;
+ if (mark * 2 > space)
+ result += 1;
+ }
+ deb_decode("key pressed, raw value %x\n", result);
+ if ((result & 0xff000000) != 0xfe000000) {
+ deb_decode
+ ("doesn't start with 0xfe, ignored\n");
+ return 0;
+ }
+ cust = (result >> 16) & 0xff;
+ dat = (result >> 8) & 0xff;
+ invdat = (~result) & 0xff;
+ if (dat != invdat) {
+ deb_decode("code != inverted code\n");
+ return 0;
+ }
+ for (i = 0; i < af9005_rc_keys_size; i++) {
+ if (af9005_rc_keys[i].custom == cust
+ && af9005_rc_keys[i].data == dat) {
+ *event = af9005_rc_keys[i].event;
+ *state = REMOTE_KEY_PRESSED;
+ deb_decode
+ ("key pressed, event %x\n", *event);
+ return 0;
+ }
+ }
+ deb_decode("not found in table\n");
+ }
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(af9005_rc_keys);
+EXPORT_SYMBOL(af9005_rc_keys_size);
+EXPORT_SYMBOL(af9005_rc_decode);
+
+MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
+MODULE_DESCRIPTION
+ ("Standard remote control decoder for Afatech 9005 DVB-T USB1.1 stick");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9005-script.h b/drivers/media/dvb/dvb-usb/af9005-script.h
new file mode 100644
index 000000000000..6eeaae51b1ca
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005-script.h
@@ -0,0 +1,203 @@
+/*
+File automatically generated by createinit.py using data
+extracted from AF05BDA.sys (windows driver):
+
+dd if=AF05BDA.sys of=initsequence bs=1 skip=88316 count=1110
+python createinit.py > af9005-script.h
+
+*/
+
+typedef struct {
+ u16 reg;
+ u8 pos;
+ u8 len;
+ u8 val;
+} RegDesc;
+
+RegDesc script[] = {
+ {0xa180, 0x0, 0x8, 0xa},
+ {0xa181, 0x0, 0x8, 0xd7},
+ {0xa182, 0x0, 0x8, 0xa3},
+ {0xa0a0, 0x0, 0x8, 0x0},
+ {0xa0a1, 0x0, 0x5, 0x0},
+ {0xa0a1, 0x5, 0x1, 0x1},
+ {0xa0c0, 0x0, 0x4, 0x1},
+ {0xa20e, 0x4, 0x4, 0xa},
+ {0xa20f, 0x0, 0x8, 0x40},
+ {0xa210, 0x0, 0x8, 0x8},
+ {0xa32a, 0x0, 0x4, 0xa},
+ {0xa32c, 0x0, 0x8, 0x20},
+ {0xa32b, 0x0, 0x8, 0x15},
+ {0xa1a0, 0x1, 0x1, 0x1},
+ {0xa000, 0x0, 0x1, 0x1},
+ {0xa000, 0x1, 0x1, 0x0},
+ {0xa001, 0x1, 0x1, 0x1},
+ {0xa001, 0x0, 0x1, 0x0},
+ {0xa001, 0x5, 0x1, 0x0},
+ {0xa00e, 0x0, 0x5, 0x10},
+ {0xa00f, 0x0, 0x3, 0x4},
+ {0xa00f, 0x3, 0x3, 0x5},
+ {0xa010, 0x0, 0x3, 0x4},
+ {0xa010, 0x3, 0x3, 0x5},
+ {0xa016, 0x4, 0x4, 0x3},
+ {0xa01f, 0x0, 0x6, 0xa},
+ {0xa020, 0x0, 0x6, 0xa},
+ {0xa2bc, 0x0, 0x1, 0x1},
+ {0xa2bc, 0x5, 0x1, 0x1},
+ {0xa015, 0x0, 0x8, 0x50},
+ {0xa016, 0x0, 0x1, 0x0},
+ {0xa02a, 0x0, 0x8, 0x50},
+ {0xa029, 0x0, 0x8, 0x4b},
+ {0xa614, 0x0, 0x8, 0x46},
+ {0xa002, 0x0, 0x5, 0x19},
+ {0xa003, 0x0, 0x5, 0x1a},
+ {0xa004, 0x0, 0x5, 0x19},
+ {0xa005, 0x0, 0x5, 0x1a},
+ {0xa008, 0x0, 0x8, 0x69},
+ {0xa009, 0x0, 0x2, 0x2},
+ {0xae1b, 0x0, 0x8, 0x69},
+ {0xae1c, 0x0, 0x8, 0x2},
+ {0xae1d, 0x0, 0x8, 0x2a},
+ {0xa022, 0x0, 0x8, 0xaa},
+ {0xa006, 0x0, 0x8, 0xc8},
+ {0xa007, 0x0, 0x2, 0x0},
+ {0xa00c, 0x0, 0x8, 0xba},
+ {0xa00d, 0x0, 0x2, 0x2},
+ {0xa608, 0x0, 0x8, 0xba},
+ {0xa60e, 0x0, 0x2, 0x2},
+ {0xa609, 0x0, 0x8, 0x80},
+ {0xa60e, 0x2, 0x2, 0x3},
+ {0xa00a, 0x0, 0x8, 0xb6},
+ {0xa00b, 0x0, 0x2, 0x0},
+ {0xa011, 0x0, 0x8, 0xb9},
+ {0xa012, 0x0, 0x2, 0x0},
+ {0xa013, 0x0, 0x8, 0xbd},
+ {0xa014, 0x0, 0x2, 0x2},
+ {0xa366, 0x0, 0x1, 0x1},
+ {0xa2bc, 0x3, 0x1, 0x0},
+ {0xa2bd, 0x0, 0x8, 0xa},
+ {0xa2be, 0x0, 0x8, 0x14},
+ {0xa2bf, 0x0, 0x8, 0x8},
+ {0xa60a, 0x0, 0x8, 0xbd},
+ {0xa60e, 0x4, 0x2, 0x2},
+ {0xa60b, 0x0, 0x8, 0x86},
+ {0xa60e, 0x6, 0x2, 0x3},
+ {0xa001, 0x2, 0x2, 0x1},
+ {0xa1c7, 0x0, 0x8, 0xf5},
+ {0xa03d, 0x0, 0x8, 0xb1},
+ {0xa616, 0x0, 0x8, 0xff},
+ {0xa617, 0x0, 0x8, 0xad},
+ {0xa618, 0x0, 0x8, 0xad},
+ {0xa61e, 0x3, 0x1, 0x1},
+ {0xae1a, 0x0, 0x8, 0x0},
+ {0xae19, 0x0, 0x8, 0xc8},
+ {0xae18, 0x0, 0x8, 0x61},
+ {0xa140, 0x0, 0x8, 0x0},
+ {0xa141, 0x0, 0x8, 0xc8},
+ {0xa142, 0x0, 0x7, 0x61},
+ {0xa023, 0x0, 0x8, 0xff},
+ {0xa021, 0x0, 0x8, 0xad},
+ {0xa026, 0x0, 0x1, 0x0},
+ {0xa024, 0x0, 0x8, 0xff},
+ {0xa025, 0x0, 0x8, 0xff},
+ {0xa1c8, 0x0, 0x8, 0xf},
+ {0xa2bc, 0x1, 0x1, 0x0},
+ {0xa60c, 0x0, 0x4, 0x5},
+ {0xa60c, 0x4, 0x4, 0x6},
+ {0xa60d, 0x0, 0x8, 0xa},
+ {0xa371, 0x0, 0x1, 0x1},
+ {0xa366, 0x1, 0x3, 0x7},
+ {0xa338, 0x0, 0x8, 0x10},
+ {0xa339, 0x0, 0x6, 0x7},
+ {0xa33a, 0x0, 0x6, 0x1f},
+ {0xa33b, 0x0, 0x8, 0xf6},
+ {0xa33c, 0x3, 0x5, 0x4},
+ {0xa33d, 0x4, 0x4, 0x0},
+ {0xa33d, 0x1, 0x1, 0x1},
+ {0xa33d, 0x2, 0x1, 0x1},
+ {0xa33d, 0x3, 0x1, 0x1},
+ {0xa16d, 0x0, 0x4, 0xf},
+ {0xa161, 0x0, 0x5, 0x5},
+ {0xa162, 0x0, 0x4, 0x5},
+ {0xa165, 0x0, 0x8, 0xff},
+ {0xa166, 0x0, 0x8, 0x9c},
+ {0xa2c3, 0x0, 0x4, 0x5},
+ {0xa61a, 0x0, 0x6, 0xf},
+ {0xb200, 0x0, 0x8, 0xa1},
+ {0xb201, 0x0, 0x8, 0x7},
+ {0xa093, 0x0, 0x1, 0x0},
+ {0xa093, 0x1, 0x5, 0xf},
+ {0xa094, 0x0, 0x8, 0xff},
+ {0xa095, 0x0, 0x8, 0xf},
+ {0xa080, 0x2, 0x5, 0x3},
+ {0xa081, 0x0, 0x4, 0x0},
+ {0xa081, 0x4, 0x4, 0x9},
+ {0xa082, 0x0, 0x5, 0x1f},
+ {0xa08d, 0x0, 0x8, 0x1},
+ {0xa083, 0x0, 0x8, 0x32},
+ {0xa084, 0x0, 0x1, 0x0},
+ {0xa08e, 0x0, 0x8, 0x3},
+ {0xa085, 0x0, 0x8, 0x32},
+ {0xa086, 0x0, 0x3, 0x0},
+ {0xa087, 0x0, 0x8, 0x6e},
+ {0xa088, 0x0, 0x5, 0x15},
+ {0xa089, 0x0, 0x8, 0x0},
+ {0xa08a, 0x0, 0x5, 0x19},
+ {0xa08b, 0x0, 0x8, 0x92},
+ {0xa08c, 0x0, 0x5, 0x1c},
+ {0xa120, 0x0, 0x8, 0x0},
+ {0xa121, 0x0, 0x5, 0x10},
+ {0xa122, 0x0, 0x8, 0x0},
+ {0xa123, 0x0, 0x7, 0x40},
+ {0xa123, 0x7, 0x1, 0x0},
+ {0xa124, 0x0, 0x8, 0x13},
+ {0xa125, 0x0, 0x7, 0x10},
+ {0xa1c0, 0x0, 0x8, 0x0},
+ {0xa1c1, 0x0, 0x5, 0x4},
+ {0xa1c2, 0x0, 0x8, 0x0},
+ {0xa1c3, 0x0, 0x5, 0x10},
+ {0xa1c3, 0x5, 0x3, 0x0},
+ {0xa1c4, 0x0, 0x6, 0x0},
+ {0xa1c5, 0x0, 0x7, 0x10},
+ {0xa100, 0x0, 0x8, 0x0},
+ {0xa101, 0x0, 0x5, 0x10},
+ {0xa102, 0x0, 0x8, 0x0},
+ {0xa103, 0x0, 0x7, 0x40},
+ {0xa103, 0x7, 0x1, 0x0},
+ {0xa104, 0x0, 0x8, 0x18},
+ {0xa105, 0x0, 0x7, 0xa},
+ {0xa106, 0x0, 0x8, 0x20},
+ {0xa107, 0x0, 0x8, 0x40},
+ {0xa108, 0x0, 0x4, 0x0},
+ {0xa38c, 0x0, 0x8, 0xfc},
+ {0xa38d, 0x0, 0x8, 0x0},
+ {0xa38e, 0x0, 0x8, 0x7e},
+ {0xa38f, 0x0, 0x8, 0x0},
+ {0xa390, 0x0, 0x8, 0x2f},
+ {0xa60f, 0x5, 0x1, 0x1},
+ {0xa170, 0x0, 0x8, 0xdc},
+ {0xa171, 0x0, 0x2, 0x0},
+ {0xa2ae, 0x0, 0x1, 0x1},
+ {0xa2ae, 0x1, 0x1, 0x1},
+ {0xa392, 0x0, 0x1, 0x1},
+ {0xa391, 0x2, 0x1, 0x0},
+ {0xabc1, 0x0, 0x8, 0xff},
+ {0xabc2, 0x0, 0x8, 0x0},
+ {0xabc8, 0x0, 0x8, 0x8},
+ {0xabca, 0x0, 0x8, 0x10},
+ {0xabcb, 0x0, 0x1, 0x0},
+ {0xabc3, 0x5, 0x3, 0x7},
+ {0xabc0, 0x6, 0x1, 0x0},
+ {0xabc0, 0x4, 0x2, 0x0},
+ {0xa344, 0x4, 0x4, 0x1},
+ {0xabc0, 0x7, 0x1, 0x1},
+ {0xabc0, 0x2, 0x1, 0x1},
+ {0xa345, 0x0, 0x8, 0x66},
+ {0xa346, 0x0, 0x8, 0x66},
+ {0xa347, 0x0, 0x4, 0x0},
+ {0xa343, 0x0, 0x4, 0xa},
+ {0xa347, 0x4, 0x4, 0x2},
+ {0xa348, 0x0, 0x4, 0xc},
+ {0xa348, 0x4, 0x4, 0x7},
+ {0xa349, 0x0, 0x6, 0x2},
+};
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
new file mode 100644
index 000000000000..7db6eee50e39
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -0,0 +1,1141 @@
+/* DVB USB compliant Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/REDME.dvb-usb for more information
+ */
+#include "af9005.h"
+
+/* debug */
+int dvb_usb_af9005_debug;
+module_param_named(debug, dvb_usb_af9005_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "set debugging level (1=info,xfer=2,rc=4,reg=8,i2c=16,fw=32 (or-able))."
+ DVB_USB_DEBUG_STATUS);
+/* enable obnoxious led */
+int dvb_usb_af9005_led = 1;
+module_param_named(led, dvb_usb_af9005_led, bool, 0644);
+MODULE_PARM_DESC(led, "enable led (default: 1).");
+
+/* eeprom dump */
+int dvb_usb_af9005_dump_eeprom = 0;
+module_param_named(dump_eeprom, dvb_usb_af9005_dump_eeprom, int, 0);
+MODULE_PARM_DESC(dump_eeprom, "dump contents of the eeprom.");
+
+/* remote control decoder */
+int (*rc_decode) (struct dvb_usb_device * d, u8 * data, int len, u32 * event,
+ int *state);
+void *rc_keys;
+int *rc_keys_size;
+
+u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
+
+struct af9005_device_state {
+ u8 sequence;
+ int led_state;
+};
+
+int af9005_usb_generic_rw(struct dvb_usb_device *d, u8 * wbuf, u16 wlen,
+ u8 * rbuf, u16 rlen, int delay_ms)
+{
+ int actlen, ret = -ENOMEM;
+
+ if (wbuf == NULL || wlen == 0)
+ return -EINVAL;
+
+ if ((ret = mutex_lock_interruptible(&d->usb_mutex)))
+ return ret;
+
+ deb_xfer(">>> ");
+ debug_dump(wbuf, wlen, deb_xfer);
+
+ ret = usb_bulk_msg(d->udev, usb_sndbulkpipe(d->udev,
+ 2), wbuf, wlen,
+ &actlen, 2000);
+
+ if (ret)
+ err("bulk message failed: %d (%d/%d)", ret, wlen, actlen);
+ else
+ ret = actlen != wlen ? -1 : 0;
+
+ /* an answer is expected, and no error before */
+ if (!ret && rbuf && rlen) {
+ if (delay_ms)
+ msleep(delay_ms);
+
+ ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev,
+ 0x01), rbuf,
+ rlen, &actlen, 2000);
+
+ if (ret)
+ err("recv bulk message failed: %d", ret);
+ else {
+ deb_xfer("<<< ");
+ debug_dump(rbuf, actlen, deb_xfer);
+ }
+ }
+
+ mutex_unlock(&d->usb_mutex);
+ return ret;
+}
+
+int af9005_usb_generic_write(struct dvb_usb_device *d, u8 * buf, u16 len)
+{
+ return af9005_usb_generic_rw(d, buf, len, NULL, 0, 0);
+}
+
+int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg,
+ int readwrite, int type, u8 * values, int len)
+{
+ struct af9005_device_state *st = d->priv;
+ u8 obuf[16] = { 0 };
+ u8 ibuf[17] = { 0 };
+ u8 command;
+ int i;
+ int ret;
+
+ if (len < 1) {
+ err("generic read/write, less than 1 byte. Makes no sense.");
+ return -EINVAL;
+ }
+ if (len > 8) {
+ err("generic read/write, more than 8 bytes. Not supported.");
+ return -EINVAL;
+ }
+
+ obuf[0] = 14; /* rest of buffer length low */
+ obuf[1] = 0; /* rest of buffer length high */
+
+ obuf[2] = AF9005_REGISTER_RW; /* register operation */
+ obuf[3] = 12; /* rest of buffer length */
+
+ obuf[4] = st->sequence++; /* sequence number */
+
+ obuf[5] = (u8) (reg >> 8); /* register address */
+ obuf[6] = (u8) (reg & 0xff);
+
+ if (type == AF9005_OFDM_REG) {
+ command = AF9005_CMD_OFDM_REG;
+ } else {
+ command = AF9005_CMD_TUNER;
+ }
+
+ if (len > 1)
+ command |=
+ AF9005_CMD_BURST | AF9005_CMD_AUTOINC | (len - 1) << 3;
+ command |= readwrite;
+ if (readwrite == AF9005_CMD_WRITE)
+ for (i = 0; i < len; i++)
+ obuf[8 + i] = values[i];
+ else if (type == AF9005_TUNER_REG)
+ /* read command for tuner, the first byte contains the i2c address */
+ obuf[8] = values[0];
+ obuf[7] = command;
+
+ ret = af9005_usb_generic_rw(d, obuf, 16, ibuf, 17, 0);
+ if (ret)
+ return ret;
+
+ /* sanity check */
+ if (ibuf[2] != AF9005_REGISTER_RW_ACK) {
+ err("generic read/write, wrong reply code.");
+ return -EIO;
+ }
+ if (ibuf[3] != 0x0d) {
+ err("generic read/write, wrong length in reply.");
+ return -EIO;
+ }
+ if (ibuf[4] != obuf[4]) {
+ err("generic read/write, wrong sequence in reply.");
+ return -EIO;
+ }
+ /*
+ Windows driver doesn't check these fields, in fact sometimes
+ the register in the reply is different that what has been sent
+
+ if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) {
+ err("generic read/write, wrong register in reply.");
+ return -EIO;
+ }
+ if (ibuf[7] != command) {
+ err("generic read/write wrong command in reply.");
+ return -EIO;
+ }
+ */
+ if (ibuf[16] != 0x01) {
+ err("generic read/write wrong status code in reply.");
+ return -EIO;
+ }
+ if (readwrite == AF9005_CMD_READ)
+ for (i = 0; i < len; i++)
+ values[i] = ibuf[8 + i];
+
+ return 0;
+
+}
+
+int af9005_read_ofdm_register(struct dvb_usb_device *d, u16 reg, u8 * value)
+{
+ int ret;
+ deb_reg("read register %x ", reg);
+ ret = af9005_generic_read_write(d, reg,
+ AF9005_CMD_READ, AF9005_OFDM_REG,
+ value, 1);
+ if (ret)
+ deb_reg("failed\n");
+ else
+ deb_reg("value %x\n", *value);
+ return ret;
+}
+
+int af9005_read_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+ u8 * values, int len)
+{
+ int ret;
+ deb_reg("read %d registers %x ", len, reg);
+ ret = af9005_generic_read_write(d, reg,
+ AF9005_CMD_READ, AF9005_OFDM_REG,
+ values, len);
+ if (ret)
+ deb_reg("failed\n");
+ else
+ debug_dump(values, len, deb_reg);
+ return ret;
+}
+
+int af9005_write_ofdm_register(struct dvb_usb_device *d, u16 reg, u8 value)
+{
+ int ret;
+ u8 temp = value;
+ deb_reg("write register %x value %x ", reg, value);
+ ret = af9005_generic_read_write(d, reg,
+ AF9005_CMD_WRITE, AF9005_OFDM_REG,
+ &temp, 1);
+ if (ret)
+ deb_reg("failed\n");
+ else
+ deb_reg("ok\n");
+ return ret;
+}
+
+int af9005_write_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+ u8 * values, int len)
+{
+ int ret;
+ deb_reg("write %d registers %x values ", len, reg);
+ debug_dump(values, len, deb_reg);
+
+ ret = af9005_generic_read_write(d, reg,
+ AF9005_CMD_WRITE, AF9005_OFDM_REG,
+ values, len);
+ if (ret)
+ deb_reg("failed\n");
+ else
+ deb_reg("ok\n");
+ return ret;
+}
+
+int af9005_read_register_bits(struct dvb_usb_device *d, u16 reg, u8 pos,
+ u8 len, u8 * value)
+{
+ u8 temp;
+ int ret;
+ deb_reg("read bits %x %x %x", reg, pos, len);
+ ret = af9005_read_ofdm_register(d, reg, &temp);
+ if (ret) {
+ deb_reg(" failed\n");
+ return ret;
+ }
+ *value = (temp >> pos) & regmask[len - 1];
+ deb_reg(" value %x\n", *value);
+ return 0;
+
+}
+
+int af9005_write_register_bits(struct dvb_usb_device *d, u16 reg, u8 pos,
+ u8 len, u8 value)
+{
+ u8 temp, mask;
+ int ret;
+ deb_reg("write bits %x %x %x value %x\n", reg, pos, len, value);
+ if (pos == 0 && len == 8)
+ return af9005_write_ofdm_register(d, reg, value);
+ ret = af9005_read_ofdm_register(d, reg, &temp);
+ if (ret)
+ return ret;
+ mask = regmask[len - 1] << pos;
+ temp = (temp & ~mask) | ((value << pos) & mask);
+ return af9005_write_ofdm_register(d, reg, temp);
+
+}
+
+static int af9005_usb_read_tuner_registers(struct dvb_usb_device *d,
+ u16 reg, u8 * values, int len)
+{
+ return af9005_generic_read_write(d, reg,
+ AF9005_CMD_READ, AF9005_TUNER_REG,
+ values, len);
+}
+
+static int af9005_usb_write_tuner_registers(struct dvb_usb_device *d,
+ u16 reg, u8 * values, int len)
+{
+ return af9005_generic_read_write(d, reg,
+ AF9005_CMD_WRITE,
+ AF9005_TUNER_REG, values, len);
+}
+
+int af9005_write_tuner_registers(struct dvb_usb_device *d, u16 reg,
+ u8 * values, int len)
+{
+ /* don't let the name of this function mislead you: it's just used
+ as an interface from the firmware to the i2c bus. The actual
+ i2c addresses are contained in the data */
+ int ret, i, done = 0, fail = 0;
+ u8 temp;
+ ret = af9005_usb_write_tuner_registers(d, reg, values, len);
+ if (ret)
+ return ret;
+ if (reg != 0xffff) {
+ /* check if write done (0xa40d bit 1) or fail (0xa40d bit 2) */
+ for (i = 0; i < 200; i++) {
+ ret =
+ af9005_read_ofdm_register(d,
+ xd_I2C_i2c_m_status_wdat_done,
+ &temp);
+ if (ret)
+ return ret;
+ done = temp & (regmask[i2c_m_status_wdat_done_len - 1]
+ << i2c_m_status_wdat_done_pos);
+ if (done)
+ break;
+ fail = temp & (regmask[i2c_m_status_wdat_fail_len - 1]
+ << i2c_m_status_wdat_fail_pos);
+ if (fail)
+ break;
+ msleep(50);
+ }
+ if (i == 200)
+ return -ETIMEDOUT;
+ if (fail) {
+ /* clear write fail bit */
+ af9005_write_register_bits(d,
+ xd_I2C_i2c_m_status_wdat_fail,
+ i2c_m_status_wdat_fail_pos,
+ i2c_m_status_wdat_fail_len,
+ 1);
+ return -EIO;
+ }
+ /* clear write done bit */
+ ret =
+ af9005_write_register_bits(d,
+ xd_I2C_i2c_m_status_wdat_fail,
+ i2c_m_status_wdat_done_pos,
+ i2c_m_status_wdat_done_len, 1);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int af9005_read_tuner_registers(struct dvb_usb_device *d, u16 reg, u8 addr,
+ u8 * values, int len)
+{
+ /* don't let the name of this function mislead you: it's just used
+ as an interface from the firmware to the i2c bus. The actual
+ i2c addresses are contained in the data */
+ int ret, i;
+ u8 temp, buf[2];
+
+ buf[0] = addr; /* tuner i2c address */
+ buf[1] = values[0]; /* tuner register */
+
+ values[0] = addr + 0x01; /* i2c read address */
+
+ if (reg == APO_REG_I2C_RW_SILICON_TUNER) {
+ /* write tuner i2c address to tuner, 0c00c0 undocumented, found by sniffing */
+ ret = af9005_write_tuner_registers(d, 0x00c0, buf, 2);
+ if (ret)
+ return ret;
+ }
+
+ /* send read command to ofsm */
+ ret = af9005_usb_read_tuner_registers(d, reg, values, 1);
+ if (ret)
+ return ret;
+
+ /* check if read done */
+ for (i = 0; i < 200; i++) {
+ ret = af9005_read_ofdm_register(d, 0xa408, &temp);
+ if (ret)
+ return ret;
+ if (temp & 0x01)
+ break;
+ msleep(50);
+ }
+ if (i == 200)
+ return -ETIMEDOUT;
+
+ /* clear read done bit (by writing 1) */
+ ret = af9005_write_ofdm_register(d, xd_I2C_i2c_m_data8, 1);
+ if (ret)
+ return ret;
+
+ /* get read data (available from 0xa400) */
+ for (i = 0; i < len; i++) {
+ ret = af9005_read_ofdm_register(d, 0xa400 + i, &temp);
+ if (ret)
+ return ret;
+ values[i] = temp;
+ }
+ return 0;
+}
+
+static int af9005_i2c_write(struct dvb_usb_device *d, u8 i2caddr, u8 reg,
+ u8 * data, int len)
+{
+ int ret, i;
+ u8 buf[3];
+ deb_i2c("i2c_write i2caddr %x, reg %x, len %d data ", i2caddr,
+ reg, len);
+ debug_dump(data, len, deb_i2c);
+
+ for (i = 0; i < len; i++) {
+ buf[0] = i2caddr;
+ buf[1] = reg + (u8) i;
+ buf[2] = data[i];
+ ret =
+ af9005_write_tuner_registers(d,
+ APO_REG_I2C_RW_SILICON_TUNER,
+ buf, 3);
+ if (ret) {
+ deb_i2c("i2c_write failed\n");
+ return ret;
+ }
+ }
+ deb_i2c("i2c_write ok\n");
+ return 0;
+}
+
+static int af9005_i2c_read(struct dvb_usb_device *d, u8 i2caddr, u8 reg,
+ u8 * data, int len)
+{
+ int ret, i;
+ u8 temp;
+ deb_i2c("i2c_read i2caddr %x, reg %x, len %d\n ", i2caddr, reg, len);
+ for (i = 0; i < len; i++) {
+ temp = reg + i;
+ ret =
+ af9005_read_tuner_registers(d,
+ APO_REG_I2C_RW_SILICON_TUNER,
+ i2caddr, &temp, 1);
+ if (ret) {
+ deb_i2c("i2c_read failed\n");
+ return ret;
+ }
+ data[i] = temp;
+ }
+ deb_i2c("i2c data read: ");
+ debug_dump(data, len, deb_i2c);
+ return 0;
+}
+
+static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
+{
+ /* only implements what the mt2060 module does, don't know how
+ to make it really generic */
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ int ret;
+ u8 reg, addr;
+ u8 *value;
+
+ if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
+ return -EAGAIN;
+
+ if (num > 2)
+ warn("more than 2 i2c messages at a time is not handled yet. TODO.");
+
+ if (num == 2) {
+ /* reads a single register */
+ reg = *msg[0].buf;
+ addr = msg[0].addr;
+ value = msg[1].buf;
+ ret = af9005_i2c_read(d, addr, reg, value, 1);
+ if (ret == 0)
+ ret = 2;
+ } else {
+ /* write one or more registers */
+ reg = msg[0].buf[0];
+ addr = msg[0].addr;
+ value = &msg[0].buf[1];
+ ret = af9005_i2c_write(d, addr, reg, value, msg[0].len - 1);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ mutex_unlock(&d->i2c_mutex);
+ return ret;
+}
+
+static u32 af9005_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C;
+}
+
+static struct i2c_algorithm af9005_i2c_algo = {
+ .master_xfer = af9005_i2c_xfer,
+ .functionality = af9005_i2c_func,
+};
+
+int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf,
+ int wlen, u8 * rbuf, int rlen)
+{
+ struct af9005_device_state *st = d->priv;
+
+ int ret, i, packet_len;
+ u8 buf[64];
+ u8 ibuf[64];
+
+ if (wlen < 0) {
+ err("send command, wlen less than 0 bytes. Makes no sense.");
+ return -EINVAL;
+ }
+ if (wlen > 54) {
+ err("send command, wlen more than 54 bytes. Not supported.");
+ return -EINVAL;
+ }
+ if (rlen > 54) {
+ err("send command, rlen more than 54 bytes. Not supported.");
+ return -EINVAL;
+ }
+ packet_len = wlen + 5;
+ buf[0] = (u8) (packet_len & 0xff);
+ buf[1] = (u8) ((packet_len & 0xff00) >> 8);
+
+ buf[2] = 0x26; /* packet type */
+ buf[3] = wlen + 3;
+ buf[4] = st->sequence++;
+ buf[5] = command;
+ buf[6] = wlen;
+ for (i = 0; i < wlen; i++)
+ buf[7 + i] = wbuf[i];
+ ret = af9005_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0);
+ if (ret)
+ return ret;
+ if (ibuf[2] != 0x27) {
+ err("send command, wrong reply code.");
+ return -EIO;
+ }
+ if (ibuf[4] != buf[4]) {
+ err("send command, wrong sequence in reply.");
+ return -EIO;
+ }
+ if (ibuf[5] != 0x01) {
+ err("send command, wrong status code in reply.");
+ return -EIO;
+ }
+ if (ibuf[6] != rlen) {
+ err("send command, invalid data length in reply.");
+ return -EIO;
+ }
+ for (i = 0; i < rlen; i++)
+ rbuf[i] = ibuf[i + 7];
+ return 0;
+}
+
+int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values,
+ int len)
+{
+ struct af9005_device_state *st = d->priv;
+ u8 obuf[16], ibuf[14];
+ int ret, i;
+
+ memset(obuf, 0, sizeof(obuf));
+ memset(ibuf, 0, sizeof(ibuf));
+
+ obuf[0] = 14; /* length of rest of packet low */
+ obuf[1] = 0; /* length of rest of packer high */
+
+ obuf[2] = 0x2a; /* read/write eeprom */
+
+ obuf[3] = 12; /* size */
+
+ obuf[4] = st->sequence++;
+
+ obuf[5] = 0; /* read */
+
+ obuf[6] = len;
+ obuf[7] = address;
+ ret = af9005_usb_generic_rw(d, obuf, 16, ibuf, 14, 0);
+ if (ret)
+ return ret;
+ if (ibuf[2] != 0x2b) {
+ err("Read eeprom, invalid reply code");
+ return -EIO;
+ }
+ if (ibuf[3] != 10) {
+ err("Read eeprom, invalid reply length");
+ return -EIO;
+ }
+ if (ibuf[4] != obuf[4]) {
+ err("Read eeprom, wrong sequence in reply ");
+ return -EIO;
+ }
+ if (ibuf[5] != 1) {
+ err("Read eeprom, wrong status in reply ");
+ return -EIO;
+ }
+ for (i = 0; i < len; i++) {
+ values[i] = ibuf[6 + i];
+ }
+ return 0;
+}
+
+static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply)
+{
+ u8 buf[FW_BULKOUT_SIZE + 2];
+ u16 checksum;
+ int act_len, i, ret;
+ memset(buf, 0, sizeof(buf));
+ buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
+ buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
+ switch (type) {
+ case FW_CONFIG:
+ buf[2] = 0x11;
+ buf[3] = 0x04;
+ buf[4] = 0x00; /* sequence number, original driver doesn't increment it here */
+ buf[5] = 0x03;
+ checksum = buf[4] + buf[5];
+ buf[6] = (u8) ((checksum >> 8) & 0xff);
+ buf[7] = (u8) (checksum & 0xff);
+ break;
+ case FW_CONFIRM:
+ buf[2] = 0x11;
+ buf[3] = 0x04;
+ buf[4] = 0x00; /* sequence number, original driver doesn't increment it here */
+ buf[5] = 0x01;
+ checksum = buf[4] + buf[5];
+ buf[6] = (u8) ((checksum >> 8) & 0xff);
+ buf[7] = (u8) (checksum & 0xff);
+ break;
+ case FW_BOOT:
+ buf[2] = 0x10;
+ buf[3] = 0x08;
+ buf[4] = 0x00; /* sequence number, original driver doesn't increment it here */
+ buf[5] = 0x97;
+ buf[6] = 0xaa;
+ buf[7] = 0x55;
+ buf[8] = 0xa5;
+ buf[9] = 0x5a;
+ checksum = 0;
+ for (i = 4; i <= 9; i++)
+ checksum += buf[i];
+ buf[10] = (u8) ((checksum >> 8) & 0xff);
+ buf[11] = (u8) (checksum & 0xff);
+ break;
+ default:
+ err("boot packet invalid boot packet type");
+ return -EINVAL;
+ }
+ deb_fw(">>> ");
+ debug_dump(buf, FW_BULKOUT_SIZE + 2, deb_fw);
+
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, 0x02),
+ buf, FW_BULKOUT_SIZE + 2, &act_len, 2000);
+ if (ret)
+ err("boot packet bulk message failed: %d (%d/%d)", ret,
+ FW_BULKOUT_SIZE + 2, act_len);
+ else
+ ret = act_len != FW_BULKOUT_SIZE + 2 ? -1 : 0;
+ if (ret)
+ return ret;
+ memset(buf, 0, 9);
+ ret = usb_bulk_msg(udev,
+ usb_rcvbulkpipe(udev, 0x01), buf, 9, &act_len, 2000);
+ if (ret) {
+ err("boot packet recv bulk message failed: %d", ret);
+ return ret;
+ }
+ deb_fw("<<< ");
+ debug_dump(buf, act_len, deb_fw);
+ checksum = 0;
+ switch (type) {
+ case FW_CONFIG:
+ if (buf[2] != 0x11) {
+ err("boot bad config header.");
+ return -EIO;
+ }
+ if (buf[3] != 0x05) {
+ err("boot bad config size.");
+ return -EIO;
+ }
+ if (buf[4] != 0x00) {
+ err("boot bad config sequence.");
+ return -EIO;
+ }
+ if (buf[5] != 0x04) {
+ err("boot bad config subtype.");
+ return -EIO;
+ }
+ for (i = 4; i <= 6; i++)
+ checksum += buf[i];
+ if (buf[7] * 256 + buf[8] != checksum) {
+ err("boot bad config checksum.");
+ return -EIO;
+ }
+ *reply = buf[6];
+ break;
+ case FW_CONFIRM:
+ if (buf[2] != 0x11) {
+ err("boot bad confirm header.");
+ return -EIO;
+ }
+ if (buf[3] != 0x05) {
+ err("boot bad confirm size.");
+ return -EIO;
+ }
+ if (buf[4] != 0x00) {
+ err("boot bad confirm sequence.");
+ return -EIO;
+ }
+ if (buf[5] != 0x02) {
+ err("boot bad confirm subtype.");
+ return -EIO;
+ }
+ for (i = 4; i <= 6; i++)
+ checksum += buf[i];
+ if (buf[7] * 256 + buf[8] != checksum) {
+ err("boot bad confirm checksum.");
+ return -EIO;
+ }
+ *reply = buf[6];
+ break;
+ case FW_BOOT:
+ if (buf[2] != 0x10) {
+ err("boot bad boot header.");
+ return -EIO;
+ }
+ if (buf[3] != 0x05) {
+ err("boot bad boot size.");
+ return -EIO;
+ }
+ if (buf[4] != 0x00) {
+ err("boot bad boot sequence.");
+ return -EIO;
+ }
+ if (buf[5] != 0x01) {
+ err("boot bad boot pattern 01.");
+ return -EIO;
+ }
+ if (buf[6] != 0x10) {
+ err("boot bad boot pattern 10.");
+ return -EIO;
+ }
+ for (i = 4; i <= 6; i++)
+ checksum += buf[i];
+ if (buf[7] * 256 + buf[8] != checksum) {
+ err("boot bad boot checksum.");
+ return -EIO;
+ }
+ break;
+
+ }
+
+ return 0;
+}
+
+int af9005_download_firmware(struct usb_device *udev, const struct firmware *fw)
+{
+ int i, packets, ret, act_len;
+
+ u8 buf[FW_BULKOUT_SIZE + 2];
+ u8 reply;
+
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ if (ret)
+ return ret;
+ if (reply != 0x01) {
+ err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply);
+ return -EIO;
+ }
+ packets = fw->size / FW_BULKOUT_SIZE;
+ buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
+ buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff);
+ for (i = 0; i < packets; i++) {
+ memcpy(&buf[2], fw->data + i * FW_BULKOUT_SIZE,
+ FW_BULKOUT_SIZE);
+ deb_fw(">>> ");
+ debug_dump(buf, FW_BULKOUT_SIZE + 2, deb_fw);
+ ret = usb_bulk_msg(udev,
+ usb_sndbulkpipe(udev, 0x02),
+ buf, FW_BULKOUT_SIZE + 2, &act_len, 1000);
+ if (ret) {
+ err("firmware download failed at packet %d with code %d", i, ret);
+ return ret;
+ }
+ }
+ ret = af9005_boot_packet(udev, FW_CONFIRM, &reply);
+ if (ret)
+ return ret;
+ if (reply != (u8) (packets & 0xff)) {
+ err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply);
+ return -EIO;
+ }
+ ret = af9005_boot_packet(udev, FW_BOOT, &reply);
+ if (ret)
+ return ret;
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ if (ret)
+ return ret;
+ if (reply != 0x02) {
+ err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply);
+ return -EIO;
+ }
+
+ return 0;
+
+}
+
+int af9005_led_control(struct dvb_usb_device *d, int onoff)
+{
+ struct af9005_device_state *st = d->priv;
+ int temp, ret;
+
+ if (onoff && dvb_usb_af9005_led)
+ temp = 1;
+ else
+ temp = 0;
+ if (st->led_state != temp) {
+ ret =
+ af9005_write_register_bits(d, xd_p_reg_top_locken1,
+ reg_top_locken1_pos,
+ reg_top_locken1_len, temp);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_register_bits(d, xd_p_reg_top_lock1,
+ reg_top_lock1_pos,
+ reg_top_lock1_len, temp);
+ if (ret)
+ return ret;
+ st->led_state = temp;
+ }
+ return 0;
+}
+
+static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ u8 buf[8];
+ int i;
+
+ /* without these calls the first commands after downloading
+ the firmware fail. I put these calls here to simulate
+ what it is done in dvb-usb-init.c.
+ */
+ struct usb_device *udev = adap->dev->udev;
+ usb_clear_halt(udev, usb_sndbulkpipe(udev, 2));
+ usb_clear_halt(udev, usb_rcvbulkpipe(udev, 1));
+ if (dvb_usb_af9005_dump_eeprom) {
+ printk("EEPROM DUMP\n");
+ for (i = 0; i < 255; i += 8) {
+ af9005_read_eeprom(adap->dev, i, buf, 8);
+ printk("ADDR %x ", i);
+ debug_dump(buf, 8, printk);
+ }
+ }
+ adap->fe = af9005_fe_attach(adap->dev);
+ return 0;
+}
+
+static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state)
+{
+ struct af9005_device_state *st = d->priv;
+ int ret, len;
+
+ u8 obuf[5];
+ u8 ibuf[256];
+
+ *state = REMOTE_NO_KEY_PRESSED;
+ if (rc_decode == NULL) {
+ /* it shouldn't never come here */
+ return 0;
+ }
+ /* deb_info("rc_query\n"); */
+ obuf[0] = 3; /* rest of packet length low */
+ obuf[1] = 0; /* rest of packet lentgh high */
+ obuf[2] = 0x40; /* read remote */
+ obuf[3] = 1; /* rest of packet length */
+ obuf[4] = st->sequence++; /* sequence number */
+ ret = af9005_usb_generic_rw(d, obuf, 5, ibuf, 256, 0);
+ if (ret) {
+ err("rc query failed");
+ return ret;
+ }
+ if (ibuf[2] != 0x41) {
+ err("rc query bad header.");
+ return -EIO;
+ }
+ if (ibuf[4] != obuf[4]) {
+ err("rc query bad sequence.");
+ return -EIO;
+ }
+ len = ibuf[5];
+ if (len > 246) {
+ err("rc query invalid length");
+ return -EIO;
+ }
+ if (len > 0) {
+ deb_rc("rc data (%d) ", len);
+ debug_dump((ibuf + 6), len, deb_rc);
+ ret = rc_decode(d, &ibuf[6], len, event, state);
+ if (ret) {
+ err("rc_decode failed");
+ return ret;
+ } else {
+ deb_rc("rc_decode state %x event %x\n", *state, *event);
+ if (*state == REMOTE_KEY_REPEAT)
+ *event = d->last_event;
+ }
+ }
+ return 0;
+}
+
+static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+
+ return 0;
+}
+
+static int af9005_pid_filter_control(struct dvb_usb_adapter *adap, int onoff)
+{
+ int ret;
+ deb_info("pid filter control onoff %d\n", onoff);
+ if (onoff) {
+ ret =
+ af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_register_bits(adap->dev,
+ XD_MP2IF_DMX_CTRL, 1, 1, 1);
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1);
+ } else
+ ret =
+ af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 0);
+ if (ret)
+ return ret;
+ deb_info("pid filter control ok\n");
+ return 0;
+}
+
+static int af9005_pid_filter(struct dvb_usb_adapter *adap, int index,
+ u16 pid, int onoff)
+{
+ u8 cmd = index & 0x1f;
+ int ret;
+ deb_info("set pid filter, index %d, pid %x, onoff %d\n", index,
+ pid, onoff);
+ if (onoff) {
+ /* cannot use it as pid_filter_ctrl since it has to be done
+ before setting the first pid */
+ if (adap->feedcount == 1) {
+ deb_info("first pid set, enable pid table\n");
+ ret = af9005_pid_filter_control(adap, onoff);
+ if (ret)
+ return ret;
+ }
+ ret =
+ af9005_write_ofdm_register(adap->dev,
+ XD_MP2IF_PID_DATA_L,
+ (u8) (pid & 0xff));
+ if (ret)
+ return ret;
+ ret =
+ af9005_write_ofdm_register(adap->dev,
+ XD_MP2IF_PID_DATA_H,
+ (u8) (pid >> 8));
+ if (ret)
+ return ret;
+ cmd |= 0x20 | 0x40;
+ } else {
+ if (adap->feedcount == 0) {
+ deb_info("last pid unset, disable pid table\n");
+ ret = af9005_pid_filter_control(adap, onoff);
+ if (ret)
+ return ret;
+ }
+ }
+ ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_PID_IDX, cmd);
+ if (ret)
+ return ret;
+ deb_info("set pid ok\n");
+ return 0;
+}
+
+static int af9005_identify_state(struct usb_device *udev,
+ struct dvb_usb_device_properties *props,
+ struct dvb_usb_device_description **desc,
+ int *cold)
+{
+ int ret;
+ u8 reply;
+ ret = af9005_boot_packet(udev, FW_CONFIG, &reply);
+ if (ret)
+ return ret;
+ deb_info("result of FW_CONFIG in identify state %d\n", reply);
+ if (reply == 0x01)
+ *cold = 1;
+ else if (reply == 0x02)
+ *cold = 0;
+ else
+ return -EIO;
+ deb_info("Identify state cold = %d\n", *cold);
+ return 0;
+}
+
+static struct dvb_usb_device_properties af9005_properties;
+
+static int af9005_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return dvb_usb_device_init(intf, &af9005_properties, THIS_MODULE, NULL);
+}
+
+static struct usb_device_id af9005_usb_table[] = {
+ {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9005)},
+ {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_USB_XE)},
+ {0},
+};
+
+MODULE_DEVICE_TABLE(usb, af9005_usb_table);
+
+static struct dvb_usb_device_properties af9005_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "af9005.fw",
+ .download_firmware = af9005_download_firmware,
+ .no_reconnect = 1,
+
+ .size_of_priv = sizeof(struct af9005_device_state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .caps =
+ DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = af9005_pid_filter,
+ /* .pid_filter_ctrl = af9005_pid_filter_control, */
+ .frontend_attach = af9005_frontend_attach,
+ /* .tuner_attach = af9005_tuner_attach, */
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 10,
+ .endpoint = 0x04,
+ .u = {
+ .bulk = {
+ .buffersize = 4096, /* actual size seen is 3948 */
+ }
+ }
+ },
+ }
+ },
+ .power_ctrl = af9005_power_ctrl,
+ .identify_state = af9005_identify_state,
+
+ .i2c_algo = &af9005_i2c_algo,
+
+ .rc_interval = 200,
+ .rc_key_map = NULL,
+ .rc_key_map_size = 0,
+ .rc_query = af9005_rc_query,
+
+ .num_device_descs = 2,
+ .devices = {
+ {.name = "Afatech DVB-T USB1.1 stick",
+ .cold_ids = {&af9005_usb_table[0], NULL},
+ .warm_ids = {NULL},
+ },
+ {.name = "TerraTec Cinergy T USB XE",
+ .cold_ids = {&af9005_usb_table[1], NULL},
+ .warm_ids = {NULL},
+ },
+ {NULL},
+ }
+};
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver af9005_usb_driver = {
+ .name = "dvb_usb_af9005",
+ .probe = af9005_usb_probe,
+ .disconnect = dvb_usb_device_exit,
+ .id_table = af9005_usb_table,
+};
+
+/* module stuff */
+static int __init af9005_usb_module_init(void)
+{
+ int result;
+ if ((result = usb_register(&af9005_usb_driver))) {
+ err("usb_register failed. (%d)", result);
+ return result;
+ }
+ rc_decode = symbol_request(af9005_rc_decode);
+ rc_keys = symbol_request(af9005_rc_keys);
+ rc_keys_size = symbol_request(af9005_rc_keys_size);
+ if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
+ err("af9005_rc_decode function not found, disabling remote");
+ af9005_properties.rc_query = NULL;
+ } else {
+ af9005_properties.rc_key_map = rc_keys;
+ af9005_properties.rc_key_map_size = *rc_keys_size;
+ }
+
+ return 0;
+}
+
+static void __exit af9005_usb_module_exit(void)
+{
+ /* release rc decode symbols */
+ if (rc_decode != NULL)
+ symbol_put(af9005_rc_decode);
+ if (rc_keys != NULL)
+ symbol_put(af9005_rc_keys);
+ if (rc_keys_size != NULL)
+ symbol_put(af9005_rc_keys_size);
+ /* deregister this driver from the USB subsystem */
+ usb_deregister(&af9005_usb_driver);
+}
+
+module_init(af9005_usb_module_init);
+module_exit(af9005_usb_module_exit);
+
+MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>");
+MODULE_DESCRIPTION("Driver for Afatech 9005 DVB-T USB1.1 stick");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
new file mode 100644
index 000000000000..0bc48a012187
--- /dev/null
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -0,0 +1,3496 @@
+/* Common header-file of the Linux driver for the Afatech 9005
+ * USB1.1 DVB-T receiver.
+ *
+ * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org)
+ *
+ * Thanks to Afatech who kindly provided information.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * see Documentation/dvb/README.dvb-usb for more information
+ */
+#ifndef _DVB_USB_AF9005_H_
+#define _DVB_USB_AF9005_H_
+
+#define DVB_USB_LOG_PREFIX "af9005"
+#include "dvb-usb.h"
+
+extern int dvb_usb_af9005_debug;
+#define deb_info(args...) dprintk(dvb_usb_af9005_debug,0x01,args)
+#define deb_xfer(args...) dprintk(dvb_usb_af9005_debug,0x02,args)
+#define deb_rc(args...) dprintk(dvb_usb_af9005_debug,0x04,args)
+#define deb_reg(args...) dprintk(dvb_usb_af9005_debug,0x08,args)
+#define deb_i2c(args...) dprintk(dvb_usb_af9005_debug,0x10,args)
+#define deb_fw(args...) dprintk(dvb_usb_af9005_debug,0x20,args)
+
+extern int dvb_usb_af9005_led;
+
+/* firmware */
+#define FW_BULKOUT_SIZE 250
+enum {
+ FW_CONFIG,
+ FW_CONFIRM,
+ FW_BOOT
+};
+
+/* af9005 commands */
+#define AF9005_OFDM_REG 0
+#define AF9005_TUNER_REG 1
+
+#define AF9005_REGISTER_RW 0x20
+#define AF9005_REGISTER_RW_ACK 0x21
+
+#define AF9005_CMD_OFDM_REG 0x00
+#define AF9005_CMD_TUNER 0x80
+#define AF9005_CMD_BURST 0x02
+#define AF9005_CMD_AUTOINC 0x04
+#define AF9005_CMD_READ 0x00
+#define AF9005_CMD_WRITE 0x01
+
+/* af9005 registers */
+#define APO_REG_RESET 0xAEFF
+
+#define APO_REG_I2C_RW_CAN_TUNER 0xF000
+#define APO_REG_I2C_RW_SILICON_TUNER 0xF001
+#define APO_REG_GPIO_RW_SILICON_TUNER 0xFFFE /* also for OFSM */
+#define APO_REG_TRIGGER_OFSM 0xFFFF /* also for OFSM */
+
+/***********************************************************************
+ * Apollo Registers from VLSI *
+ ***********************************************************************/
+#define xd_p_reg_aagc_inverted_agc 0xA000
+#define reg_aagc_inverted_agc_pos 0
+#define reg_aagc_inverted_agc_len 1
+#define reg_aagc_inverted_agc_lsb 0
+#define xd_p_reg_aagc_sign_only 0xA000
+#define reg_aagc_sign_only_pos 1
+#define reg_aagc_sign_only_len 1
+#define reg_aagc_sign_only_lsb 0
+#define xd_p_reg_aagc_slow_adc_en 0xA000
+#define reg_aagc_slow_adc_en_pos 2
+#define reg_aagc_slow_adc_en_len 1
+#define reg_aagc_slow_adc_en_lsb 0
+#define xd_p_reg_aagc_slow_adc_scale 0xA000
+#define reg_aagc_slow_adc_scale_pos 3
+#define reg_aagc_slow_adc_scale_len 5
+#define reg_aagc_slow_adc_scale_lsb 0
+#define xd_p_reg_aagc_check_slow_adc_lock 0xA001
+#define reg_aagc_check_slow_adc_lock_pos 0
+#define reg_aagc_check_slow_adc_lock_len 1
+#define reg_aagc_check_slow_adc_lock_lsb 0
+#define xd_p_reg_aagc_init_control 0xA001
+#define reg_aagc_init_control_pos 1
+#define reg_aagc_init_control_len 1
+#define reg_aagc_init_control_lsb 0
+#define xd_p_reg_aagc_total_gain_sel 0xA001
+#define reg_aagc_total_gain_sel_pos 2
+#define reg_aagc_total_gain_sel_len 2
+#define reg_aagc_total_gain_sel_lsb 0
+#define xd_p_reg_aagc_out_inv 0xA001
+#define reg_aagc_out_inv_pos 5
+#define reg_aagc_out_inv_len 1
+#define reg_aagc_out_inv_lsb 0
+#define xd_p_reg_aagc_int_en 0xA001
+#define reg_aagc_int_en_pos 6
+#define reg_aagc_int_en_len 1
+#define reg_aagc_int_en_lsb 0
+#define xd_p_reg_aagc_lock_change_flag 0xA001
+#define reg_aagc_lock_change_flag_pos 7
+#define reg_aagc_lock_change_flag_len 1
+#define reg_aagc_lock_change_flag_lsb 0
+#define xd_p_reg_aagc_rf_loop_bw_scale_acquire 0xA002
+#define reg_aagc_rf_loop_bw_scale_acquire_pos 0
+#define reg_aagc_rf_loop_bw_scale_acquire_len 5
+#define reg_aagc_rf_loop_bw_scale_acquire_lsb 0
+#define xd_p_reg_aagc_rf_loop_bw_scale_track 0xA003
+#define reg_aagc_rf_loop_bw_scale_track_pos 0
+#define reg_aagc_rf_loop_bw_scale_track_len 5
+#define reg_aagc_rf_loop_bw_scale_track_lsb 0
+#define xd_p_reg_aagc_if_loop_bw_scale_acquire 0xA004
+#define reg_aagc_if_loop_bw_scale_acquire_pos 0
+#define reg_aagc_if_loop_bw_scale_acquire_len 5
+#define reg_aagc_if_loop_bw_scale_acquire_lsb 0
+#define xd_p_reg_aagc_if_loop_bw_scale_track 0xA005
+#define reg_aagc_if_loop_bw_scale_track_pos 0
+#define reg_aagc_if_loop_bw_scale_track_len 5
+#define reg_aagc_if_loop_bw_scale_track_lsb 0
+#define xd_p_reg_aagc_max_rf_agc_7_0 0xA006
+#define reg_aagc_max_rf_agc_7_0_pos 0
+#define reg_aagc_max_rf_agc_7_0_len 8
+#define reg_aagc_max_rf_agc_7_0_lsb 0
+#define xd_p_reg_aagc_max_rf_agc_9_8 0xA007
+#define reg_aagc_max_rf_agc_9_8_pos 0
+#define reg_aagc_max_rf_agc_9_8_len 2
+#define reg_aagc_max_rf_agc_9_8_lsb 8
+#define xd_p_reg_aagc_min_rf_agc_7_0 0xA008
+#define reg_aagc_min_rf_agc_7_0_pos 0
+#define reg_aagc_min_rf_agc_7_0_len 8
+#define reg_aagc_min_rf_agc_7_0_lsb 0
+#define xd_p_reg_aagc_min_rf_agc_9_8 0xA009
+#define reg_aagc_min_rf_agc_9_8_pos 0
+#define reg_aagc_min_rf_agc_9_8_len 2
+#define reg_aagc_min_rf_agc_9_8_lsb 8
+#define xd_p_reg_aagc_max_if_agc_7_0 0xA00A
+#define reg_aagc_max_if_agc_7_0_pos 0
+#define reg_aagc_max_if_agc_7_0_len 8
+#define reg_aagc_max_if_agc_7_0_lsb 0
+#define xd_p_reg_aagc_max_if_agc_9_8 0xA00B
+#define reg_aagc_max_if_agc_9_8_pos 0
+#define reg_aagc_max_if_agc_9_8_len 2
+#define reg_aagc_max_if_agc_9_8_lsb 8
+#define xd_p_reg_aagc_min_if_agc_7_0 0xA00C
+#define reg_aagc_min_if_agc_7_0_pos 0
+#define reg_aagc_min_if_agc_7_0_len 8
+#define reg_aagc_min_if_agc_7_0_lsb 0
+#define xd_p_reg_aagc_min_if_agc_9_8 0xA00D
+#define reg_aagc_min_if_agc_9_8_pos 0
+#define reg_aagc_min_if_agc_9_8_len 2
+#define reg_aagc_min_if_agc_9_8_lsb 8
+#define xd_p_reg_aagc_lock_sample_scale 0xA00E
+#define reg_aagc_lock_sample_scale_pos 0
+#define reg_aagc_lock_sample_scale_len 5
+#define reg_aagc_lock_sample_scale_lsb 0
+#define xd_p_reg_aagc_rf_agc_lock_scale_acquire 0xA00F
+#define reg_aagc_rf_agc_lock_scale_acquire_pos 0
+#define reg_aagc_rf_agc_lock_scale_acquire_len 3
+#define reg_aagc_rf_agc_lock_scale_acquire_lsb 0
+#define xd_p_reg_aagc_rf_agc_lock_scale_track 0xA00F
+#define reg_aagc_rf_agc_lock_scale_track_pos 3
+#define reg_aagc_rf_agc_lock_scale_track_len 3
+#define reg_aagc_rf_agc_lock_scale_track_lsb 0
+#define xd_p_reg_aagc_if_agc_lock_scale_acquire 0xA010
+#define reg_aagc_if_agc_lock_scale_acquire_pos 0
+#define reg_aagc_if_agc_lock_scale_acquire_len 3
+#define reg_aagc_if_agc_lock_scale_acquire_lsb 0
+#define xd_p_reg_aagc_if_agc_lock_scale_track 0xA010
+#define reg_aagc_if_agc_lock_scale_track_pos 3
+#define reg_aagc_if_agc_lock_scale_track_len 3
+#define reg_aagc_if_agc_lock_scale_track_lsb 0
+#define xd_p_reg_aagc_rf_top_numerator_7_0 0xA011
+#define reg_aagc_rf_top_numerator_7_0_pos 0
+#define reg_aagc_rf_top_numerator_7_0_len 8
+#define reg_aagc_rf_top_numerator_7_0_lsb 0
+#define xd_p_reg_aagc_rf_top_numerator_9_8 0xA012
+#define reg_aagc_rf_top_numerator_9_8_pos 0
+#define reg_aagc_rf_top_numerator_9_8_len 2
+#define reg_aagc_rf_top_numerator_9_8_lsb 8
+#define xd_p_reg_aagc_if_top_numerator_7_0 0xA013
+#define reg_aagc_if_top_numerator_7_0_pos 0
+#define reg_aagc_if_top_numerator_7_0_len 8
+#define reg_aagc_if_top_numerator_7_0_lsb 0
+#define xd_p_reg_aagc_if_top_numerator_9_8 0xA014
+#define reg_aagc_if_top_numerator_9_8_pos 0
+#define reg_aagc_if_top_numerator_9_8_len 2
+#define reg_aagc_if_top_numerator_9_8_lsb 8
+#define xd_p_reg_aagc_adc_out_desired_7_0 0xA015
+#define reg_aagc_adc_out_desired_7_0_pos 0
+#define reg_aagc_adc_out_desired_7_0_len 8
+#define reg_aagc_adc_out_desired_7_0_lsb 0
+#define xd_p_reg_aagc_adc_out_desired_8 0xA016
+#define reg_aagc_adc_out_desired_8_pos 0
+#define reg_aagc_adc_out_desired_8_len 1
+#define reg_aagc_adc_out_desired_8_lsb 0
+#define xd_p_reg_aagc_fixed_gain 0xA016
+#define reg_aagc_fixed_gain_pos 3
+#define reg_aagc_fixed_gain_len 1
+#define reg_aagc_fixed_gain_lsb 0
+#define xd_p_reg_aagc_lock_count_th 0xA016
+#define reg_aagc_lock_count_th_pos 4
+#define reg_aagc_lock_count_th_len 4
+#define reg_aagc_lock_count_th_lsb 0
+#define xd_p_reg_aagc_fixed_rf_agc_control_7_0 0xA017
+#define reg_aagc_fixed_rf_agc_control_7_0_pos 0
+#define reg_aagc_fixed_rf_agc_control_7_0_len 8
+#define reg_aagc_fixed_rf_agc_control_7_0_lsb 0
+#define xd_p_reg_aagc_fixed_rf_agc_control_15_8 0xA018
+#define reg_aagc_fixed_rf_agc_control_15_8_pos 0
+#define reg_aagc_fixed_rf_agc_control_15_8_len 8
+#define reg_aagc_fixed_rf_agc_control_15_8_lsb 8
+#define xd_p_reg_aagc_fixed_rf_agc_control_23_16 0xA019
+#define reg_aagc_fixed_rf_agc_control_23_16_pos 0
+#define reg_aagc_fixed_rf_agc_control_23_16_len 8
+#define reg_aagc_fixed_rf_agc_control_23_16_lsb 16
+#define xd_p_reg_aagc_fixed_rf_agc_control_30_24 0xA01A
+#define reg_aagc_fixed_rf_agc_control_30_24_pos 0
+#define reg_aagc_fixed_rf_agc_control_30_24_len 7
+#define reg_aagc_fixed_rf_agc_control_30_24_lsb 24
+#define xd_p_reg_aagc_fixed_if_agc_control_7_0 0xA01B
+#define reg_aagc_fixed_if_agc_control_7_0_pos 0
+#define reg_aagc_fixed_if_agc_control_7_0_len 8
+#define reg_aagc_fixed_if_agc_control_7_0_lsb 0
+#define xd_p_reg_aagc_fixed_if_agc_control_15_8 0xA01C
+#define reg_aagc_fixed_if_agc_control_15_8_pos 0
+#define reg_aagc_fixed_if_agc_control_15_8_len 8
+#define reg_aagc_fixed_if_agc_control_15_8_lsb 8
+#define xd_p_reg_aagc_fixed_if_agc_control_23_16 0xA01D
+#define reg_aagc_fixed_if_agc_control_23_16_pos 0
+#define reg_aagc_fixed_if_agc_control_23_16_len 8
+#define reg_aagc_fixed_if_agc_control_23_16_lsb 16
+#define xd_p_reg_aagc_fixed_if_agc_control_30_24 0xA01E
+#define reg_aagc_fixed_if_agc_control_30_24_pos 0
+#define reg_aagc_fixed_if_agc_control_30_24_len 7
+#define reg_aagc_fixed_if_agc_control_30_24_lsb 24
+#define xd_p_reg_aagc_rf_agc_unlock_numerator 0xA01F
+#define reg_aagc_rf_agc_unlock_numerator_pos 0
+#define reg_aagc_rf_agc_unlock_numerator_len 6
+#define reg_aagc_rf_agc_unlock_numerator_lsb 0
+#define xd_p_reg_aagc_if_agc_unlock_numerator 0xA020
+#define reg_aagc_if_agc_unlock_numerator_pos 0
+#define reg_aagc_if_agc_unlock_numerator_len 6
+#define reg_aagc_if_agc_unlock_numerator_lsb 0
+#define xd_p_reg_unplug_th 0xA021
+#define reg_unplug_th_pos 0
+#define reg_unplug_th_len 8
+#define reg_aagc_rf_x0_lsb 0
+#define xd_p_reg_weak_signal_rfagc_thr 0xA022
+#define reg_weak_signal_rfagc_thr_pos 0
+#define reg_weak_signal_rfagc_thr_len 8
+#define reg_weak_signal_rfagc_thr_lsb 0
+#define xd_p_reg_unplug_rf_gain_th 0xA023
+#define reg_unplug_rf_gain_th_pos 0
+#define reg_unplug_rf_gain_th_len 8
+#define reg_unplug_rf_gain_th_lsb 0
+#define xd_p_reg_unplug_dtop_rf_gain_th 0xA024
+#define reg_unplug_dtop_rf_gain_th_pos 0
+#define reg_unplug_dtop_rf_gain_th_len 8
+#define reg_unplug_dtop_rf_gain_th_lsb 0
+#define xd_p_reg_unplug_dtop_if_gain_th 0xA025
+#define reg_unplug_dtop_if_gain_th_pos 0
+#define reg_unplug_dtop_if_gain_th_len 8
+#define reg_unplug_dtop_if_gain_th_lsb 0
+#define xd_p_reg_top_recover_at_unplug_en 0xA026
+#define reg_top_recover_at_unplug_en_pos 0
+#define reg_top_recover_at_unplug_en_len 1
+#define reg_top_recover_at_unplug_en_lsb 0
+#define xd_p_reg_aagc_rf_x6 0xA027
+#define reg_aagc_rf_x6_pos 0
+#define reg_aagc_rf_x6_len 8
+#define reg_aagc_rf_x6_lsb 0
+#define xd_p_reg_aagc_rf_x7 0xA028
+#define reg_aagc_rf_x7_pos 0
+#define reg_aagc_rf_x7_len 8
+#define reg_aagc_rf_x7_lsb 0
+#define xd_p_reg_aagc_rf_x8 0xA029
+#define reg_aagc_rf_x8_pos 0
+#define reg_aagc_rf_x8_len 8
+#define reg_aagc_rf_x8_lsb 0
+#define xd_p_reg_aagc_rf_x9 0xA02A
+#define reg_aagc_rf_x9_pos 0
+#define reg_aagc_rf_x9_len 8
+#define reg_aagc_rf_x9_lsb 0
+#define xd_p_reg_aagc_rf_x10 0xA02B
+#define reg_aagc_rf_x10_pos 0
+#define reg_aagc_rf_x10_len 8
+#define reg_aagc_rf_x10_lsb 0
+#define xd_p_reg_aagc_rf_x11 0xA02C
+#define reg_aagc_rf_x11_pos 0
+#define reg_aagc_rf_x11_len 8
+#define reg_aagc_rf_x11_lsb 0
+#define xd_p_reg_aagc_rf_x12 0xA02D
+#define reg_aagc_rf_x12_pos 0
+#define reg_aagc_rf_x12_len 8
+#define reg_aagc_rf_x12_lsb 0
+#define xd_p_reg_aagc_rf_x13 0xA02E
+#define reg_aagc_rf_x13_pos 0
+#define reg_aagc_rf_x13_len 8
+#define reg_aagc_rf_x13_lsb 0
+#define xd_p_reg_aagc_if_x0 0xA02F
+#define reg_aagc_if_x0_pos 0
+#define reg_aagc_if_x0_len 8
+#define reg_aagc_if_x0_lsb 0
+#define xd_p_reg_aagc_if_x1 0xA030
+#define reg_aagc_if_x1_pos 0
+#define reg_aagc_if_x1_len 8
+#define reg_aagc_if_x1_lsb 0
+#define xd_p_reg_aagc_if_x2 0xA031
+#define reg_aagc_if_x2_pos 0
+#define reg_aagc_if_x2_len 8
+#define reg_aagc_if_x2_lsb 0
+#define xd_p_reg_aagc_if_x3 0xA032
+#define reg_aagc_if_x3_pos 0
+#define reg_aagc_if_x3_len 8
+#define reg_aagc_if_x3_lsb 0
+#define xd_p_reg_aagc_if_x4 0xA033
+#define reg_aagc_if_x4_pos 0
+#define reg_aagc_if_x4_len 8
+#define reg_aagc_if_x4_lsb 0
+#define xd_p_reg_aagc_if_x5 0xA034
+#define reg_aagc_if_x5_pos 0
+#define reg_aagc_if_x5_len 8
+#define reg_aagc_if_x5_lsb 0
+#define xd_p_reg_aagc_if_x6 0xA035
+#define reg_aagc_if_x6_pos 0
+#define reg_aagc_if_x6_len 8
+#define reg_aagc_if_x6_lsb 0
+#define xd_p_reg_aagc_if_x7 0xA036
+#define reg_aagc_if_x7_pos 0
+#define reg_aagc_if_x7_len 8
+#define reg_aagc_if_x7_lsb 0
+#define xd_p_reg_aagc_if_x8 0xA037
+#define reg_aagc_if_x8_pos 0
+#define reg_aagc_if_x8_len 8
+#define reg_aagc_if_x8_lsb 0
+#define xd_p_reg_aagc_if_x9 0xA038
+#define reg_aagc_if_x9_pos 0
+#define reg_aagc_if_x9_len 8
+#define reg_aagc_if_x9_lsb 0
+#define xd_p_reg_aagc_if_x10 0xA039
+#define reg_aagc_if_x10_pos 0
+#define reg_aagc_if_x10_len 8
+#define reg_aagc_if_x10_lsb 0
+#define xd_p_reg_aagc_if_x11 0xA03A
+#define reg_aagc_if_x11_pos 0
+#define reg_aagc_if_x11_len 8
+#define reg_aagc_if_x11_lsb 0
+#define xd_p_reg_aagc_if_x12 0xA03B
+#define reg_aagc_if_x12_pos 0
+#define reg_aagc_if_x12_len 8
+#define reg_aagc_if_x12_lsb 0
+#define xd_p_reg_aagc_if_x13 0xA03C
+#define reg_aagc_if_x13_pos 0
+#define reg_aagc_if_x13_len 8
+#define reg_aagc_if_x13_lsb 0
+#define xd_p_reg_aagc_min_rf_ctl_8bit_for_dca 0xA03D
+#define reg_aagc_min_rf_ctl_8bit_for_dca_pos 0
+#define reg_aagc_min_rf_ctl_8bit_for_dca_len 8
+#define reg_aagc_min_rf_ctl_8bit_for_dca_lsb 0
+#define xd_p_reg_aagc_min_if_ctl_8bit_for_dca 0xA03E
+#define reg_aagc_min_if_ctl_8bit_for_dca_pos 0
+#define reg_aagc_min_if_ctl_8bit_for_dca_len 8
+#define reg_aagc_min_if_ctl_8bit_for_dca_lsb 0
+#define xd_r_reg_aagc_total_gain_7_0 0xA070
+#define reg_aagc_total_gain_7_0_pos 0
+#define reg_aagc_total_gain_7_0_len 8
+#define reg_aagc_total_gain_7_0_lsb 0
+#define xd_r_reg_aagc_total_gain_15_8 0xA071
+#define reg_aagc_total_gain_15_8_pos 0
+#define reg_aagc_total_gain_15_8_len 8
+#define reg_aagc_total_gain_15_8_lsb 8
+#define xd_p_reg_aagc_in_sat_cnt_7_0 0xA074
+#define reg_aagc_in_sat_cnt_7_0_pos 0
+#define reg_aagc_in_sat_cnt_7_0_len 8
+#define reg_aagc_in_sat_cnt_7_0_lsb 0
+#define xd_p_reg_aagc_in_sat_cnt_15_8 0xA075
+#define reg_aagc_in_sat_cnt_15_8_pos 0
+#define reg_aagc_in_sat_cnt_15_8_len 8
+#define reg_aagc_in_sat_cnt_15_8_lsb 8
+#define xd_p_reg_aagc_in_sat_cnt_23_16 0xA076
+#define reg_aagc_in_sat_cnt_23_16_pos 0
+#define reg_aagc_in_sat_cnt_23_16_len 8
+#define reg_aagc_in_sat_cnt_23_16_lsb 16
+#define xd_p_reg_aagc_in_sat_cnt_31_24 0xA077
+#define reg_aagc_in_sat_cnt_31_24_pos 0
+#define reg_aagc_in_sat_cnt_31_24_len 8
+#define reg_aagc_in_sat_cnt_31_24_lsb 24
+#define xd_r_reg_aagc_digital_rf_volt_7_0 0xA078
+#define reg_aagc_digital_rf_volt_7_0_pos 0
+#define reg_aagc_digital_rf_volt_7_0_len 8
+#define reg_aagc_digital_rf_volt_7_0_lsb 0
+#define xd_r_reg_aagc_digital_rf_volt_9_8 0xA079
+#define reg_aagc_digital_rf_volt_9_8_pos 0
+#define reg_aagc_digital_rf_volt_9_8_len 2
+#define reg_aagc_digital_rf_volt_9_8_lsb 8
+#define xd_r_reg_aagc_digital_if_volt_7_0 0xA07A
+#define reg_aagc_digital_if_volt_7_0_pos 0
+#define reg_aagc_digital_if_volt_7_0_len 8
+#define reg_aagc_digital_if_volt_7_0_lsb 0
+#define xd_r_reg_aagc_digital_if_volt_9_8 0xA07B
+#define reg_aagc_digital_if_volt_9_8_pos 0
+#define reg_aagc_digital_if_volt_9_8_len 2
+#define reg_aagc_digital_if_volt_9_8_lsb 8
+#define xd_r_reg_aagc_rf_gain 0xA07C
+#define reg_aagc_rf_gain_pos 0
+#define reg_aagc_rf_gain_len 8
+#define reg_aagc_rf_gain_lsb 0
+#define xd_r_reg_aagc_if_gain 0xA07D
+#define reg_aagc_if_gain_pos 0
+#define reg_aagc_if_gain_len 8
+#define reg_aagc_if_gain_lsb 0
+#define xd_p_tinr_imp_indicator 0xA080
+#define tinr_imp_indicator_pos 0
+#define tinr_imp_indicator_len 2
+#define tinr_imp_indicator_lsb 0
+#define xd_p_reg_tinr_fifo_size 0xA080
+#define reg_tinr_fifo_size_pos 2
+#define reg_tinr_fifo_size_len 5
+#define reg_tinr_fifo_size_lsb 0
+#define xd_p_reg_tinr_saturation_cnt_th 0xA081
+#define reg_tinr_saturation_cnt_th_pos 0
+#define reg_tinr_saturation_cnt_th_len 4
+#define reg_tinr_saturation_cnt_th_lsb 0
+#define xd_p_reg_tinr_saturation_th_3_0 0xA081
+#define reg_tinr_saturation_th_3_0_pos 4
+#define reg_tinr_saturation_th_3_0_len 4
+#define reg_tinr_saturation_th_3_0_lsb 0
+#define xd_p_reg_tinr_saturation_th_8_4 0xA082
+#define reg_tinr_saturation_th_8_4_pos 0
+#define reg_tinr_saturation_th_8_4_len 5
+#define reg_tinr_saturation_th_8_4_lsb 4
+#define xd_p_reg_tinr_imp_duration_th_2k_7_0 0xA083
+#define reg_tinr_imp_duration_th_2k_7_0_pos 0
+#define reg_tinr_imp_duration_th_2k_7_0_len 8
+#define reg_tinr_imp_duration_th_2k_7_0_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_2k_8 0xA084
+#define reg_tinr_imp_duration_th_2k_8_pos 0
+#define reg_tinr_imp_duration_th_2k_8_len 1
+#define reg_tinr_imp_duration_th_2k_8_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_8k_7_0 0xA085
+#define reg_tinr_imp_duration_th_8k_7_0_pos 0
+#define reg_tinr_imp_duration_th_8k_7_0_len 8
+#define reg_tinr_imp_duration_th_8k_7_0_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_8k_10_8 0xA086
+#define reg_tinr_imp_duration_th_8k_10_8_pos 0
+#define reg_tinr_imp_duration_th_8k_10_8_len 3
+#define reg_tinr_imp_duration_th_8k_10_8_lsb 8
+#define xd_p_reg_tinr_freq_ratio_6m_7_0 0xA087
+#define reg_tinr_freq_ratio_6m_7_0_pos 0
+#define reg_tinr_freq_ratio_6m_7_0_len 8
+#define reg_tinr_freq_ratio_6m_7_0_lsb 0
+#define xd_p_reg_tinr_freq_ratio_6m_12_8 0xA088
+#define reg_tinr_freq_ratio_6m_12_8_pos 0
+#define reg_tinr_freq_ratio_6m_12_8_len 5
+#define reg_tinr_freq_ratio_6m_12_8_lsb 8
+#define xd_p_reg_tinr_freq_ratio_7m_7_0 0xA089
+#define reg_tinr_freq_ratio_7m_7_0_pos 0
+#define reg_tinr_freq_ratio_7m_7_0_len 8
+#define reg_tinr_freq_ratio_7m_7_0_lsb 0
+#define xd_p_reg_tinr_freq_ratio_7m_12_8 0xA08A
+#define reg_tinr_freq_ratio_7m_12_8_pos 0
+#define reg_tinr_freq_ratio_7m_12_8_len 5
+#define reg_tinr_freq_ratio_7m_12_8_lsb 8
+#define xd_p_reg_tinr_freq_ratio_8m_7_0 0xA08B
+#define reg_tinr_freq_ratio_8m_7_0_pos 0
+#define reg_tinr_freq_ratio_8m_7_0_len 8
+#define reg_tinr_freq_ratio_8m_7_0_lsb 0
+#define xd_p_reg_tinr_freq_ratio_8m_12_8 0xA08C
+#define reg_tinr_freq_ratio_8m_12_8_pos 0
+#define reg_tinr_freq_ratio_8m_12_8_len 5
+#define reg_tinr_freq_ratio_8m_12_8_lsb 8
+#define xd_p_reg_tinr_imp_duration_th_low_2k 0xA08D
+#define reg_tinr_imp_duration_th_low_2k_pos 0
+#define reg_tinr_imp_duration_th_low_2k_len 8
+#define reg_tinr_imp_duration_th_low_2k_lsb 0
+#define xd_p_reg_tinr_imp_duration_th_low_8k 0xA08E
+#define reg_tinr_imp_duration_th_low_8k_pos 0
+#define reg_tinr_imp_duration_th_low_8k_len 8
+#define reg_tinr_imp_duration_th_low_8k_lsb 0
+#define xd_r_reg_tinr_counter_7_0 0xA090
+#define reg_tinr_counter_7_0_pos 0
+#define reg_tinr_counter_7_0_len 8
+#define reg_tinr_counter_7_0_lsb 0
+#define xd_r_reg_tinr_counter_15_8 0xA091
+#define reg_tinr_counter_15_8_pos 0
+#define reg_tinr_counter_15_8_len 8
+#define reg_tinr_counter_15_8_lsb 8
+#define xd_p_reg_tinr_adative_tinr_en 0xA093
+#define reg_tinr_adative_tinr_en_pos 0
+#define reg_tinr_adative_tinr_en_len 1
+#define reg_tinr_adative_tinr_en_lsb 0
+#define xd_p_reg_tinr_peak_fifo_size 0xA093
+#define reg_tinr_peak_fifo_size_pos 1
+#define reg_tinr_peak_fifo_size_len 5
+#define reg_tinr_peak_fifo_size_lsb 0
+#define xd_p_reg_tinr_counter_rst 0xA093
+#define reg_tinr_counter_rst_pos 6
+#define reg_tinr_counter_rst_len 1
+#define reg_tinr_counter_rst_lsb 0
+#define xd_p_reg_tinr_search_period_7_0 0xA094
+#define reg_tinr_search_period_7_0_pos 0
+#define reg_tinr_search_period_7_0_len 8
+#define reg_tinr_search_period_7_0_lsb 0
+#define xd_p_reg_tinr_search_period_15_8 0xA095
+#define reg_tinr_search_period_15_8_pos 0
+#define reg_tinr_search_period_15_8_len 8
+#define reg_tinr_search_period_15_8_lsb 8
+#define xd_p_reg_ccifs_fcw_7_0 0xA0A0
+#define reg_ccifs_fcw_7_0_pos 0
+#define reg_ccifs_fcw_7_0_len 8
+#define reg_ccifs_fcw_7_0_lsb 0
+#define xd_p_reg_ccifs_fcw_12_8 0xA0A1
+#define reg_ccifs_fcw_12_8_pos 0
+#define reg_ccifs_fcw_12_8_len 5
+#define reg_ccifs_fcw_12_8_lsb 8
+#define xd_p_reg_ccifs_spec_inv 0xA0A1
+#define reg_ccifs_spec_inv_pos 5
+#define reg_ccifs_spec_inv_len 1
+#define reg_ccifs_spec_inv_lsb 0
+#define xd_p_reg_gp_trigger 0xA0A2
+#define reg_gp_trigger_pos 0
+#define reg_gp_trigger_len 1
+#define reg_gp_trigger_lsb 0
+#define xd_p_reg_trigger_sel 0xA0A2
+#define reg_trigger_sel_pos 1
+#define reg_trigger_sel_len 2
+#define reg_trigger_sel_lsb 0
+#define xd_p_reg_debug_ofdm 0xA0A2
+#define reg_debug_ofdm_pos 3
+#define reg_debug_ofdm_len 2
+#define reg_debug_ofdm_lsb 0
+#define xd_p_reg_trigger_module_sel 0xA0A3
+#define reg_trigger_module_sel_pos 0
+#define reg_trigger_module_sel_len 6
+#define reg_trigger_module_sel_lsb 0
+#define xd_p_reg_trigger_set_sel 0xA0A4
+#define reg_trigger_set_sel_pos 0
+#define reg_trigger_set_sel_len 6
+#define reg_trigger_set_sel_lsb 0
+#define xd_p_reg_fw_int_mask_n 0xA0A4
+#define reg_fw_int_mask_n_pos 6
+#define reg_fw_int_mask_n_len 1
+#define reg_fw_int_mask_n_lsb 0
+#define xd_p_reg_debug_group 0xA0A5
+#define reg_debug_group_pos 0
+#define reg_debug_group_len 4
+#define reg_debug_group_lsb 0
+#define xd_p_reg_odbg_clk_sel 0xA0A5
+#define reg_odbg_clk_sel_pos 4
+#define reg_odbg_clk_sel_len 2
+#define reg_odbg_clk_sel_lsb 0
+#define xd_p_reg_ccif_sc 0xA0C0
+#define reg_ccif_sc_pos 0
+#define reg_ccif_sc_len 4
+#define reg_ccif_sc_lsb 0
+#define xd_r_reg_ccif_saturate 0xA0C1
+#define reg_ccif_saturate_pos 0
+#define reg_ccif_saturate_len 2
+#define reg_ccif_saturate_lsb 0
+#define xd_r_reg_antif_saturate 0xA0C1
+#define reg_antif_saturate_pos 2
+#define reg_antif_saturate_len 4
+#define reg_antif_saturate_lsb 0
+#define xd_r_reg_acif_saturate 0xA0C2
+#define reg_acif_saturate_pos 0
+#define reg_acif_saturate_len 8
+#define reg_acif_saturate_lsb 0
+#define xd_p_reg_tmr_timer0_threshold_7_0 0xA0C8
+#define reg_tmr_timer0_threshold_7_0_pos 0
+#define reg_tmr_timer0_threshold_7_0_len 8
+#define reg_tmr_timer0_threshold_7_0_lsb 0
+#define xd_p_reg_tmr_timer0_threshold_15_8 0xA0C9
+#define reg_tmr_timer0_threshold_15_8_pos 0
+#define reg_tmr_timer0_threshold_15_8_len 8
+#define reg_tmr_timer0_threshold_15_8_lsb 8
+#define xd_p_reg_tmr_timer0_enable 0xA0CA
+#define reg_tmr_timer0_enable_pos 0
+#define reg_tmr_timer0_enable_len 1
+#define reg_tmr_timer0_enable_lsb 0
+#define xd_p_reg_tmr_timer0_clk_sel 0xA0CA
+#define reg_tmr_timer0_clk_sel_pos 1
+#define reg_tmr_timer0_clk_sel_len 1
+#define reg_tmr_timer0_clk_sel_lsb 0
+#define xd_p_reg_tmr_timer0_int 0xA0CA
+#define reg_tmr_timer0_int_pos 2
+#define reg_tmr_timer0_int_len 1
+#define reg_tmr_timer0_int_lsb 0
+#define xd_p_reg_tmr_timer0_rst 0xA0CA
+#define reg_tmr_timer0_rst_pos 3
+#define reg_tmr_timer0_rst_len 1
+#define reg_tmr_timer0_rst_lsb 0
+#define xd_r_reg_tmr_timer0_count_7_0 0xA0CB
+#define reg_tmr_timer0_count_7_0_pos 0
+#define reg_tmr_timer0_count_7_0_len 8
+#define reg_tmr_timer0_count_7_0_lsb 0
+#define xd_r_reg_tmr_timer0_count_15_8 0xA0CC
+#define reg_tmr_timer0_count_15_8_pos 0
+#define reg_tmr_timer0_count_15_8_len 8
+#define reg_tmr_timer0_count_15_8_lsb 8
+#define xd_p_reg_suspend 0xA0CD
+#define reg_suspend_pos 0
+#define reg_suspend_len 1
+#define reg_suspend_lsb 0
+#define xd_p_reg_suspend_rdy 0xA0CD
+#define reg_suspend_rdy_pos 1
+#define reg_suspend_rdy_len 1
+#define reg_suspend_rdy_lsb 0
+#define xd_p_reg_resume 0xA0CD
+#define reg_resume_pos 2
+#define reg_resume_len 1
+#define reg_resume_lsb 0
+#define xd_p_reg_resume_rdy 0xA0CD
+#define reg_resume_rdy_pos 3
+#define reg_resume_rdy_len 1
+#define reg_resume_rdy_lsb 0
+#define xd_p_reg_fmf 0xA0CE
+#define reg_fmf_pos 0
+#define reg_fmf_len 8
+#define reg_fmf_lsb 0
+#define xd_p_ccid_accumulate_num_2k_7_0 0xA100
+#define ccid_accumulate_num_2k_7_0_pos 0
+#define ccid_accumulate_num_2k_7_0_len 8
+#define ccid_accumulate_num_2k_7_0_lsb 0
+#define xd_p_ccid_accumulate_num_2k_12_8 0xA101
+#define ccid_accumulate_num_2k_12_8_pos 0
+#define ccid_accumulate_num_2k_12_8_len 5
+#define ccid_accumulate_num_2k_12_8_lsb 8
+#define xd_p_ccid_accumulate_num_8k_7_0 0xA102
+#define ccid_accumulate_num_8k_7_0_pos 0
+#define ccid_accumulate_num_8k_7_0_len 8
+#define ccid_accumulate_num_8k_7_0_lsb 0
+#define xd_p_ccid_accumulate_num_8k_14_8 0xA103
+#define ccid_accumulate_num_8k_14_8_pos 0
+#define ccid_accumulate_num_8k_14_8_len 7
+#define ccid_accumulate_num_8k_14_8_lsb 8
+#define xd_p_ccid_desired_level_0 0xA103
+#define ccid_desired_level_0_pos 7
+#define ccid_desired_level_0_len 1
+#define ccid_desired_level_0_lsb 0
+#define xd_p_ccid_desired_level_8_1 0xA104
+#define ccid_desired_level_8_1_pos 0
+#define ccid_desired_level_8_1_len 8
+#define ccid_desired_level_8_1_lsb 1
+#define xd_p_ccid_apply_delay 0xA105
+#define ccid_apply_delay_pos 0
+#define ccid_apply_delay_len 7
+#define ccid_apply_delay_lsb 0
+#define xd_p_ccid_CCID_Threshold1 0xA106
+#define ccid_CCID_Threshold1_pos 0
+#define ccid_CCID_Threshold1_len 8
+#define ccid_CCID_Threshold1_lsb 0
+#define xd_p_ccid_CCID_Threshold2 0xA107
+#define ccid_CCID_Threshold2_pos 0
+#define ccid_CCID_Threshold2_len 8
+#define ccid_CCID_Threshold2_lsb 0
+#define xd_p_reg_ccid_gain_scale 0xA108
+#define reg_ccid_gain_scale_pos 0
+#define reg_ccid_gain_scale_len 4
+#define reg_ccid_gain_scale_lsb 0
+#define xd_p_reg_ccid2_passband_gain_set 0xA108
+#define reg_ccid2_passband_gain_set_pos 4
+#define reg_ccid2_passband_gain_set_len 4
+#define reg_ccid2_passband_gain_set_lsb 0
+#define xd_r_ccid_multiplier_7_0 0xA109
+#define ccid_multiplier_7_0_pos 0
+#define ccid_multiplier_7_0_len 8
+#define ccid_multiplier_7_0_lsb 0
+#define xd_r_ccid_multiplier_15_8 0xA10A
+#define ccid_multiplier_15_8_pos 0
+#define ccid_multiplier_15_8_len 8
+#define ccid_multiplier_15_8_lsb 8
+#define xd_r_ccid_right_shift_bits 0xA10B
+#define ccid_right_shift_bits_pos 0
+#define ccid_right_shift_bits_len 4
+#define ccid_right_shift_bits_lsb 0
+#define xd_r_reg_ccid_sx_7_0 0xA10C
+#define reg_ccid_sx_7_0_pos 0
+#define reg_ccid_sx_7_0_len 8
+#define reg_ccid_sx_7_0_lsb 0
+#define xd_r_reg_ccid_sx_15_8 0xA10D
+#define reg_ccid_sx_15_8_pos 0
+#define reg_ccid_sx_15_8_len 8
+#define reg_ccid_sx_15_8_lsb 8
+#define xd_r_reg_ccid_sx_21_16 0xA10E
+#define reg_ccid_sx_21_16_pos 0
+#define reg_ccid_sx_21_16_len 6
+#define reg_ccid_sx_21_16_lsb 16
+#define xd_r_reg_ccid_sy_7_0 0xA110
+#define reg_ccid_sy_7_0_pos 0
+#define reg_ccid_sy_7_0_len 8
+#define reg_ccid_sy_7_0_lsb 0
+#define xd_r_reg_ccid_sy_15_8 0xA111
+#define reg_ccid_sy_15_8_pos 0
+#define reg_ccid_sy_15_8_len 8
+#define reg_ccid_sy_15_8_lsb 8
+#define xd_r_reg_ccid_sy_23_16 0xA112
+#define reg_ccid_sy_23_16_pos 0
+#define reg_ccid_sy_23_16_len 8
+#define reg_ccid_sy_23_16_lsb 16
+#define xd_r_reg_ccid2_sz_7_0 0xA114
+#define reg_ccid2_sz_7_0_pos 0
+#define reg_ccid2_sz_7_0_len 8
+#define reg_ccid2_sz_7_0_lsb 0
+#define xd_r_reg_ccid2_sz_15_8 0xA115
+#define reg_ccid2_sz_15_8_pos 0
+#define reg_ccid2_sz_15_8_len 8
+#define reg_ccid2_sz_15_8_lsb 8
+#define xd_r_reg_ccid2_sz_23_16 0xA116
+#define reg_ccid2_sz_23_16_pos 0
+#define reg_ccid2_sz_23_16_len 8
+#define reg_ccid2_sz_23_16_lsb 16
+#define xd_r_reg_ccid2_sz_25_24 0xA117
+#define reg_ccid2_sz_25_24_pos 0
+#define reg_ccid2_sz_25_24_len 2
+#define reg_ccid2_sz_25_24_lsb 24
+#define xd_r_reg_ccid2_sy_7_0 0xA118
+#define reg_ccid2_sy_7_0_pos 0
+#define reg_ccid2_sy_7_0_len 8
+#define reg_ccid2_sy_7_0_lsb 0
+#define xd_r_reg_ccid2_sy_15_8 0xA119
+#define reg_ccid2_sy_15_8_pos 0
+#define reg_ccid2_sy_15_8_len 8
+#define reg_ccid2_sy_15_8_lsb 8
+#define xd_r_reg_ccid2_sy_23_16 0xA11A
+#define reg_ccid2_sy_23_16_pos 0
+#define reg_ccid2_sy_23_16_len 8
+#define reg_ccid2_sy_23_16_lsb 16
+#define xd_r_reg_ccid2_sy_25_24 0xA11B
+#define reg_ccid2_sy_25_24_pos 0
+#define reg_ccid2_sy_25_24_len 2
+#define reg_ccid2_sy_25_24_lsb 24
+#define xd_p_dagc1_accumulate_num_2k_7_0 0xA120
+#define dagc1_accumulate_num_2k_7_0_pos 0
+#define dagc1_accumulate_num_2k_7_0_len 8
+#define dagc1_accumulate_num_2k_7_0_lsb 0
+#define xd_p_dagc1_accumulate_num_2k_12_8 0xA121
+#define dagc1_accumulate_num_2k_12_8_pos 0
+#define dagc1_accumulate_num_2k_12_8_len 5
+#define dagc1_accumulate_num_2k_12_8_lsb 8
+#define xd_p_dagc1_accumulate_num_8k_7_0 0xA122
+#define dagc1_accumulate_num_8k_7_0_pos 0
+#define dagc1_accumulate_num_8k_7_0_len 8
+#define dagc1_accumulate_num_8k_7_0_lsb 0
+#define xd_p_dagc1_accumulate_num_8k_14_8 0xA123
+#define dagc1_accumulate_num_8k_14_8_pos 0
+#define dagc1_accumulate_num_8k_14_8_len 7
+#define dagc1_accumulate_num_8k_14_8_lsb 8
+#define xd_p_dagc1_desired_level_0 0xA123
+#define dagc1_desired_level_0_pos 7
+#define dagc1_desired_level_0_len 1
+#define dagc1_desired_level_0_lsb 0
+#define xd_p_dagc1_desired_level_8_1 0xA124
+#define dagc1_desired_level_8_1_pos 0
+#define dagc1_desired_level_8_1_len 8
+#define dagc1_desired_level_8_1_lsb 1
+#define xd_p_dagc1_apply_delay 0xA125
+#define dagc1_apply_delay_pos 0
+#define dagc1_apply_delay_len 7
+#define dagc1_apply_delay_lsb 0
+#define xd_p_dagc1_bypass_scale_ctl 0xA126
+#define dagc1_bypass_scale_ctl_pos 0
+#define dagc1_bypass_scale_ctl_len 2
+#define dagc1_bypass_scale_ctl_lsb 0
+#define xd_p_reg_dagc1_in_sat_cnt_7_0 0xA127
+#define reg_dagc1_in_sat_cnt_7_0_pos 0
+#define reg_dagc1_in_sat_cnt_7_0_len 8
+#define reg_dagc1_in_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc1_in_sat_cnt_15_8 0xA128
+#define reg_dagc1_in_sat_cnt_15_8_pos 0
+#define reg_dagc1_in_sat_cnt_15_8_len 8
+#define reg_dagc1_in_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc1_in_sat_cnt_23_16 0xA129
+#define reg_dagc1_in_sat_cnt_23_16_pos 0
+#define reg_dagc1_in_sat_cnt_23_16_len 8
+#define reg_dagc1_in_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc1_in_sat_cnt_31_24 0xA12A
+#define reg_dagc1_in_sat_cnt_31_24_pos 0
+#define reg_dagc1_in_sat_cnt_31_24_len 8
+#define reg_dagc1_in_sat_cnt_31_24_lsb 24
+#define xd_p_reg_dagc1_out_sat_cnt_7_0 0xA12B
+#define reg_dagc1_out_sat_cnt_7_0_pos 0
+#define reg_dagc1_out_sat_cnt_7_0_len 8
+#define reg_dagc1_out_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc1_out_sat_cnt_15_8 0xA12C
+#define reg_dagc1_out_sat_cnt_15_8_pos 0
+#define reg_dagc1_out_sat_cnt_15_8_len 8
+#define reg_dagc1_out_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc1_out_sat_cnt_23_16 0xA12D
+#define reg_dagc1_out_sat_cnt_23_16_pos 0
+#define reg_dagc1_out_sat_cnt_23_16_len 8
+#define reg_dagc1_out_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc1_out_sat_cnt_31_24 0xA12E
+#define reg_dagc1_out_sat_cnt_31_24_pos 0
+#define reg_dagc1_out_sat_cnt_31_24_len 8
+#define reg_dagc1_out_sat_cnt_31_24_lsb 24
+#define xd_r_dagc1_multiplier_7_0 0xA136
+#define dagc1_multiplier_7_0_pos 0
+#define dagc1_multiplier_7_0_len 8
+#define dagc1_multiplier_7_0_lsb 0
+#define xd_r_dagc1_multiplier_15_8 0xA137
+#define dagc1_multiplier_15_8_pos 0
+#define dagc1_multiplier_15_8_len 8
+#define dagc1_multiplier_15_8_lsb 8
+#define xd_r_dagc1_right_shift_bits 0xA138
+#define dagc1_right_shift_bits_pos 0
+#define dagc1_right_shift_bits_len 4
+#define dagc1_right_shift_bits_lsb 0
+#define xd_p_reg_bfs_fcw_7_0 0xA140
+#define reg_bfs_fcw_7_0_pos 0
+#define reg_bfs_fcw_7_0_len 8
+#define reg_bfs_fcw_7_0_lsb 0
+#define xd_p_reg_bfs_fcw_15_8 0xA141
+#define reg_bfs_fcw_15_8_pos 0
+#define reg_bfs_fcw_15_8_len 8
+#define reg_bfs_fcw_15_8_lsb 8
+#define xd_p_reg_bfs_fcw_22_16 0xA142
+#define reg_bfs_fcw_22_16_pos 0
+#define reg_bfs_fcw_22_16_len 7
+#define reg_bfs_fcw_22_16_lsb 16
+#define xd_p_reg_antif_sf_7_0 0xA144
+#define reg_antif_sf_7_0_pos 0
+#define reg_antif_sf_7_0_len 8
+#define reg_antif_sf_7_0_lsb 0
+#define xd_p_reg_antif_sf_11_8 0xA145
+#define reg_antif_sf_11_8_pos 0
+#define reg_antif_sf_11_8_len 4
+#define reg_antif_sf_11_8_lsb 8
+#define xd_r_bfs_fcw_q_7_0 0xA150
+#define bfs_fcw_q_7_0_pos 0
+#define bfs_fcw_q_7_0_len 8
+#define bfs_fcw_q_7_0_lsb 0
+#define xd_r_bfs_fcw_q_15_8 0xA151
+#define bfs_fcw_q_15_8_pos 0
+#define bfs_fcw_q_15_8_len 8
+#define bfs_fcw_q_15_8_lsb 8
+#define xd_r_bfs_fcw_q_22_16 0xA152
+#define bfs_fcw_q_22_16_pos 0
+#define bfs_fcw_q_22_16_len 7
+#define bfs_fcw_q_22_16_lsb 16
+#define xd_p_reg_dca_enu 0xA160
+#define reg_dca_enu_pos 0
+#define reg_dca_enu_len 1
+#define reg_dca_enu_lsb 0
+#define xd_p_reg_dca_enl 0xA160
+#define reg_dca_enl_pos 1
+#define reg_dca_enl_len 1
+#define reg_dca_enl_lsb 0
+#define xd_p_reg_dca_lower_chip 0xA160
+#define reg_dca_lower_chip_pos 2
+#define reg_dca_lower_chip_len 1
+#define reg_dca_lower_chip_lsb 0
+#define xd_p_reg_dca_upper_chip 0xA160
+#define reg_dca_upper_chip_pos 3
+#define reg_dca_upper_chip_len 1
+#define reg_dca_upper_chip_lsb 0
+#define xd_p_reg_dca_platch 0xA160
+#define reg_dca_platch_pos 4
+#define reg_dca_platch_len 1
+#define reg_dca_platch_lsb 0
+#define xd_p_reg_dca_th 0xA161
+#define reg_dca_th_pos 0
+#define reg_dca_th_len 5
+#define reg_dca_th_lsb 0
+#define xd_p_reg_dca_scale 0xA162
+#define reg_dca_scale_pos 0
+#define reg_dca_scale_len 4
+#define reg_dca_scale_lsb 0
+#define xd_p_reg_dca_tone_7_0 0xA163
+#define reg_dca_tone_7_0_pos 0
+#define reg_dca_tone_7_0_len 8
+#define reg_dca_tone_7_0_lsb 0
+#define xd_p_reg_dca_tone_12_8 0xA164
+#define reg_dca_tone_12_8_pos 0
+#define reg_dca_tone_12_8_len 5
+#define reg_dca_tone_12_8_lsb 8
+#define xd_p_reg_dca_time_7_0 0xA165
+#define reg_dca_time_7_0_pos 0
+#define reg_dca_time_7_0_len 8
+#define reg_dca_time_7_0_lsb 0
+#define xd_p_reg_dca_time_15_8 0xA166
+#define reg_dca_time_15_8_pos 0
+#define reg_dca_time_15_8_len 8
+#define reg_dca_time_15_8_lsb 8
+#define xd_r_dcasm 0xA167
+#define dcasm_pos 0
+#define dcasm_len 3
+#define dcasm_lsb 0
+#define xd_p_reg_qnt_valuew_7_0 0xA168
+#define reg_qnt_valuew_7_0_pos 0
+#define reg_qnt_valuew_7_0_len 8
+#define reg_qnt_valuew_7_0_lsb 0
+#define xd_p_reg_qnt_valuew_10_8 0xA169
+#define reg_qnt_valuew_10_8_pos 0
+#define reg_qnt_valuew_10_8_len 3
+#define reg_qnt_valuew_10_8_lsb 8
+#define xd_p_dca_sbx_gain_diff_7_0 0xA16A
+#define dca_sbx_gain_diff_7_0_pos 0
+#define dca_sbx_gain_diff_7_0_len 8
+#define dca_sbx_gain_diff_7_0_lsb 0
+#define xd_p_dca_sbx_gain_diff_9_8 0xA16B
+#define dca_sbx_gain_diff_9_8_pos 0
+#define dca_sbx_gain_diff_9_8_len 2
+#define dca_sbx_gain_diff_9_8_lsb 8
+#define xd_p_reg_dca_stand_alone 0xA16C
+#define reg_dca_stand_alone_pos 0
+#define reg_dca_stand_alone_len 1
+#define reg_dca_stand_alone_lsb 0
+#define xd_p_reg_dca_upper_out_en 0xA16C
+#define reg_dca_upper_out_en_pos 1
+#define reg_dca_upper_out_en_len 1
+#define reg_dca_upper_out_en_lsb 0
+#define xd_p_reg_dca_rc_en 0xA16C
+#define reg_dca_rc_en_pos 2
+#define reg_dca_rc_en_len 1
+#define reg_dca_rc_en_lsb 0
+#define xd_p_reg_dca_retrain_send 0xA16C
+#define reg_dca_retrain_send_pos 3
+#define reg_dca_retrain_send_len 1
+#define reg_dca_retrain_send_lsb 0
+#define xd_p_reg_dca_retrain_rec 0xA16C
+#define reg_dca_retrain_rec_pos 4
+#define reg_dca_retrain_rec_len 1
+#define reg_dca_retrain_rec_lsb 0
+#define xd_p_reg_dca_api_tpsrdy 0xA16C
+#define reg_dca_api_tpsrdy_pos 5
+#define reg_dca_api_tpsrdy_len 1
+#define reg_dca_api_tpsrdy_lsb 0
+#define xd_p_reg_dca_symbol_gap 0xA16D
+#define reg_dca_symbol_gap_pos 0
+#define reg_dca_symbol_gap_len 4
+#define reg_dca_symbol_gap_lsb 0
+#define xd_p_reg_qnt_nfvaluew_7_0 0xA16E
+#define reg_qnt_nfvaluew_7_0_pos 0
+#define reg_qnt_nfvaluew_7_0_len 8
+#define reg_qnt_nfvaluew_7_0_lsb 0
+#define xd_p_reg_qnt_nfvaluew_10_8 0xA16F
+#define reg_qnt_nfvaluew_10_8_pos 0
+#define reg_qnt_nfvaluew_10_8_len 3
+#define reg_qnt_nfvaluew_10_8_lsb 8
+#define xd_p_reg_qnt_flatness_thr_7_0 0xA170
+#define reg_qnt_flatness_thr_7_0_pos 0
+#define reg_qnt_flatness_thr_7_0_len 8
+#define reg_qnt_flatness_thr_7_0_lsb 0
+#define xd_p_reg_qnt_flatness_thr_9_8 0xA171
+#define reg_qnt_flatness_thr_9_8_pos 0
+#define reg_qnt_flatness_thr_9_8_len 2
+#define reg_qnt_flatness_thr_9_8_lsb 8
+#define xd_p_reg_dca_tone_idx_5_0 0xA171
+#define reg_dca_tone_idx_5_0_pos 2
+#define reg_dca_tone_idx_5_0_len 6
+#define reg_dca_tone_idx_5_0_lsb 0
+#define xd_p_reg_dca_tone_idx_12_6 0xA172
+#define reg_dca_tone_idx_12_6_pos 0
+#define reg_dca_tone_idx_12_6_len 7
+#define reg_dca_tone_idx_12_6_lsb 6
+#define xd_p_reg_dca_data_vld 0xA173
+#define reg_dca_data_vld_pos 0
+#define reg_dca_data_vld_len 1
+#define reg_dca_data_vld_lsb 0
+#define xd_p_reg_dca_read_update 0xA173
+#define reg_dca_read_update_pos 1
+#define reg_dca_read_update_len 1
+#define reg_dca_read_update_lsb 0
+#define xd_r_reg_dca_data_re_5_0 0xA173
+#define reg_dca_data_re_5_0_pos 2
+#define reg_dca_data_re_5_0_len 6
+#define reg_dca_data_re_5_0_lsb 0
+#define xd_r_reg_dca_data_re_10_6 0xA174
+#define reg_dca_data_re_10_6_pos 0
+#define reg_dca_data_re_10_6_len 5
+#define reg_dca_data_re_10_6_lsb 6
+#define xd_r_reg_dca_data_im_7_0 0xA175
+#define reg_dca_data_im_7_0_pos 0
+#define reg_dca_data_im_7_0_len 8
+#define reg_dca_data_im_7_0_lsb 0
+#define xd_r_reg_dca_data_im_10_8 0xA176
+#define reg_dca_data_im_10_8_pos 0
+#define reg_dca_data_im_10_8_len 3
+#define reg_dca_data_im_10_8_lsb 8
+#define xd_r_reg_dca_data_h2_7_0 0xA178
+#define reg_dca_data_h2_7_0_pos 0
+#define reg_dca_data_h2_7_0_len 8
+#define reg_dca_data_h2_7_0_lsb 0
+#define xd_r_reg_dca_data_h2_9_8 0xA179
+#define reg_dca_data_h2_9_8_pos 0
+#define reg_dca_data_h2_9_8_len 2
+#define reg_dca_data_h2_9_8_lsb 8
+#define xd_p_reg_f_adc_7_0 0xA180
+#define reg_f_adc_7_0_pos 0
+#define reg_f_adc_7_0_len 8
+#define reg_f_adc_7_0_lsb 0
+#define xd_p_reg_f_adc_15_8 0xA181
+#define reg_f_adc_15_8_pos 0
+#define reg_f_adc_15_8_len 8
+#define reg_f_adc_15_8_lsb 8
+#define xd_p_reg_f_adc_23_16 0xA182
+#define reg_f_adc_23_16_pos 0
+#define reg_f_adc_23_16_len 8
+#define reg_f_adc_23_16_lsb 16
+#define xd_r_intp_mu_7_0 0xA190
+#define intp_mu_7_0_pos 0
+#define intp_mu_7_0_len 8
+#define intp_mu_7_0_lsb 0
+#define xd_r_intp_mu_15_8 0xA191
+#define intp_mu_15_8_pos 0
+#define intp_mu_15_8_len 8
+#define intp_mu_15_8_lsb 8
+#define xd_r_intp_mu_19_16 0xA192
+#define intp_mu_19_16_pos 0
+#define intp_mu_19_16_len 4
+#define intp_mu_19_16_lsb 16
+#define xd_p_reg_agc_rst 0xA1A0
+#define reg_agc_rst_pos 0
+#define reg_agc_rst_len 1
+#define reg_agc_rst_lsb 0
+#define xd_p_rf_agc_en 0xA1A0
+#define rf_agc_en_pos 1
+#define rf_agc_en_len 1
+#define rf_agc_en_lsb 0
+#define xd_p_rf_agc_dis 0xA1A0
+#define rf_agc_dis_pos 2
+#define rf_agc_dis_len 1
+#define rf_agc_dis_lsb 0
+#define xd_p_if_agc_rst 0xA1A0
+#define if_agc_rst_pos 3
+#define if_agc_rst_len 1
+#define if_agc_rst_lsb 0
+#define xd_p_if_agc_en 0xA1A0
+#define if_agc_en_pos 4
+#define if_agc_en_len 1
+#define if_agc_en_lsb 0
+#define xd_p_if_agc_dis 0xA1A0
+#define if_agc_dis_pos 5
+#define if_agc_dis_len 1
+#define if_agc_dis_lsb 0
+#define xd_p_agc_lock 0xA1A0
+#define agc_lock_pos 6
+#define agc_lock_len 1
+#define agc_lock_lsb 0
+#define xd_p_reg_tinr_rst 0xA1A1
+#define reg_tinr_rst_pos 0
+#define reg_tinr_rst_len 1
+#define reg_tinr_rst_lsb 0
+#define xd_p_reg_tinr_en 0xA1A1
+#define reg_tinr_en_pos 1
+#define reg_tinr_en_len 1
+#define reg_tinr_en_lsb 0
+#define xd_p_reg_ccifs_en 0xA1A2
+#define reg_ccifs_en_pos 0
+#define reg_ccifs_en_len 1
+#define reg_ccifs_en_lsb 0
+#define xd_p_reg_ccifs_dis 0xA1A2
+#define reg_ccifs_dis_pos 1
+#define reg_ccifs_dis_len 1
+#define reg_ccifs_dis_lsb 0
+#define xd_p_reg_ccifs_rst 0xA1A2
+#define reg_ccifs_rst_pos 2
+#define reg_ccifs_rst_len 1
+#define reg_ccifs_rst_lsb 0
+#define xd_p_reg_ccifs_byp 0xA1A2
+#define reg_ccifs_byp_pos 3
+#define reg_ccifs_byp_len 1
+#define reg_ccifs_byp_lsb 0
+#define xd_p_reg_ccif_en 0xA1A3
+#define reg_ccif_en_pos 0
+#define reg_ccif_en_len 1
+#define reg_ccif_en_lsb 0
+#define xd_p_reg_ccif_dis 0xA1A3
+#define reg_ccif_dis_pos 1
+#define reg_ccif_dis_len 1
+#define reg_ccif_dis_lsb 0
+#define xd_p_reg_ccif_rst 0xA1A3
+#define reg_ccif_rst_pos 2
+#define reg_ccif_rst_len 1
+#define reg_ccif_rst_lsb 0
+#define xd_p_reg_ccif_byp 0xA1A3
+#define reg_ccif_byp_pos 3
+#define reg_ccif_byp_len 1
+#define reg_ccif_byp_lsb 0
+#define xd_p_dagc1_rst 0xA1A4
+#define dagc1_rst_pos 0
+#define dagc1_rst_len 1
+#define dagc1_rst_lsb 0
+#define xd_p_dagc1_en 0xA1A4
+#define dagc1_en_pos 1
+#define dagc1_en_len 1
+#define dagc1_en_lsb 0
+#define xd_p_dagc1_mode 0xA1A4
+#define dagc1_mode_pos 2
+#define dagc1_mode_len 2
+#define dagc1_mode_lsb 0
+#define xd_p_dagc1_done 0xA1A4
+#define dagc1_done_pos 4
+#define dagc1_done_len 1
+#define dagc1_done_lsb 0
+#define xd_p_ccid_rst 0xA1A5
+#define ccid_rst_pos 0
+#define ccid_rst_len 1
+#define ccid_rst_lsb 0
+#define xd_p_ccid_en 0xA1A5
+#define ccid_en_pos 1
+#define ccid_en_len 1
+#define ccid_en_lsb 0
+#define xd_p_ccid_mode 0xA1A5
+#define ccid_mode_pos 2
+#define ccid_mode_len 2
+#define ccid_mode_lsb 0
+#define xd_p_ccid_done 0xA1A5
+#define ccid_done_pos 4
+#define ccid_done_len 1
+#define ccid_done_lsb 0
+#define xd_r_ccid_deted 0xA1A5
+#define ccid_deted_pos 5
+#define ccid_deted_len 1
+#define ccid_deted_lsb 0
+#define xd_p_ccid2_en 0xA1A5
+#define ccid2_en_pos 6
+#define ccid2_en_len 1
+#define ccid2_en_lsb 0
+#define xd_p_ccid2_done 0xA1A5
+#define ccid2_done_pos 7
+#define ccid2_done_len 1
+#define ccid2_done_lsb 0
+#define xd_p_reg_bfs_en 0xA1A6
+#define reg_bfs_en_pos 0
+#define reg_bfs_en_len 1
+#define reg_bfs_en_lsb 0
+#define xd_p_reg_bfs_dis 0xA1A6
+#define reg_bfs_dis_pos 1
+#define reg_bfs_dis_len 1
+#define reg_bfs_dis_lsb 0
+#define xd_p_reg_bfs_rst 0xA1A6
+#define reg_bfs_rst_pos 2
+#define reg_bfs_rst_len 1
+#define reg_bfs_rst_lsb 0
+#define xd_p_reg_bfs_byp 0xA1A6
+#define reg_bfs_byp_pos 3
+#define reg_bfs_byp_len 1
+#define reg_bfs_byp_lsb 0
+#define xd_p_reg_antif_en 0xA1A7
+#define reg_antif_en_pos 0
+#define reg_antif_en_len 1
+#define reg_antif_en_lsb 0
+#define xd_p_reg_antif_dis 0xA1A7
+#define reg_antif_dis_pos 1
+#define reg_antif_dis_len 1
+#define reg_antif_dis_lsb 0
+#define xd_p_reg_antif_rst 0xA1A7
+#define reg_antif_rst_pos 2
+#define reg_antif_rst_len 1
+#define reg_antif_rst_lsb 0
+#define xd_p_reg_antif_byp 0xA1A7
+#define reg_antif_byp_pos 3
+#define reg_antif_byp_len 1
+#define reg_antif_byp_lsb 0
+#define xd_p_intp_en 0xA1A8
+#define intp_en_pos 0
+#define intp_en_len 1
+#define intp_en_lsb 0
+#define xd_p_intp_dis 0xA1A8
+#define intp_dis_pos 1
+#define intp_dis_len 1
+#define intp_dis_lsb 0
+#define xd_p_intp_rst 0xA1A8
+#define intp_rst_pos 2
+#define intp_rst_len 1
+#define intp_rst_lsb 0
+#define xd_p_intp_byp 0xA1A8
+#define intp_byp_pos 3
+#define intp_byp_len 1
+#define intp_byp_lsb 0
+#define xd_p_reg_acif_en 0xA1A9
+#define reg_acif_en_pos 0
+#define reg_acif_en_len 1
+#define reg_acif_en_lsb 0
+#define xd_p_reg_acif_dis 0xA1A9
+#define reg_acif_dis_pos 1
+#define reg_acif_dis_len 1
+#define reg_acif_dis_lsb 0
+#define xd_p_reg_acif_rst 0xA1A9
+#define reg_acif_rst_pos 2
+#define reg_acif_rst_len 1
+#define reg_acif_rst_lsb 0
+#define xd_p_reg_acif_byp 0xA1A9
+#define reg_acif_byp_pos 3
+#define reg_acif_byp_len 1
+#define reg_acif_byp_lsb 0
+#define xd_p_reg_acif_sync_mode 0xA1A9
+#define reg_acif_sync_mode_pos 4
+#define reg_acif_sync_mode_len 1
+#define reg_acif_sync_mode_lsb 0
+#define xd_p_dagc2_rst 0xA1AA
+#define dagc2_rst_pos 0
+#define dagc2_rst_len 1
+#define dagc2_rst_lsb 0
+#define xd_p_dagc2_en 0xA1AA
+#define dagc2_en_pos 1
+#define dagc2_en_len 1
+#define dagc2_en_lsb 0
+#define xd_p_dagc2_mode 0xA1AA
+#define dagc2_mode_pos 2
+#define dagc2_mode_len 2
+#define dagc2_mode_lsb 0
+#define xd_p_dagc2_done 0xA1AA
+#define dagc2_done_pos 4
+#define dagc2_done_len 1
+#define dagc2_done_lsb 0
+#define xd_p_reg_dca_en 0xA1AB
+#define reg_dca_en_pos 0
+#define reg_dca_en_len 1
+#define reg_dca_en_lsb 0
+#define xd_p_dagc2_accumulate_num_2k_7_0 0xA1C0
+#define dagc2_accumulate_num_2k_7_0_pos 0
+#define dagc2_accumulate_num_2k_7_0_len 8
+#define dagc2_accumulate_num_2k_7_0_lsb 0
+#define xd_p_dagc2_accumulate_num_2k_12_8 0xA1C1
+#define dagc2_accumulate_num_2k_12_8_pos 0
+#define dagc2_accumulate_num_2k_12_8_len 5
+#define dagc2_accumulate_num_2k_12_8_lsb 8
+#define xd_p_dagc2_accumulate_num_8k_7_0 0xA1C2
+#define dagc2_accumulate_num_8k_7_0_pos 0
+#define dagc2_accumulate_num_8k_7_0_len 8
+#define dagc2_accumulate_num_8k_7_0_lsb 0
+#define xd_p_dagc2_accumulate_num_8k_12_8 0xA1C3
+#define dagc2_accumulate_num_8k_12_8_pos 0
+#define dagc2_accumulate_num_8k_12_8_len 5
+#define dagc2_accumulate_num_8k_12_8_lsb 8
+#define xd_p_dagc2_desired_level_2_0 0xA1C3
+#define dagc2_desired_level_2_0_pos 5
+#define dagc2_desired_level_2_0_len 3
+#define dagc2_desired_level_2_0_lsb 0
+#define xd_p_dagc2_desired_level_8_3 0xA1C4
+#define dagc2_desired_level_8_3_pos 0
+#define dagc2_desired_level_8_3_len 6
+#define dagc2_desired_level_8_3_lsb 3
+#define xd_p_dagc2_apply_delay 0xA1C5
+#define dagc2_apply_delay_pos 0
+#define dagc2_apply_delay_len 7
+#define dagc2_apply_delay_lsb 0
+#define xd_p_dagc2_bypass_scale_ctl 0xA1C6
+#define dagc2_bypass_scale_ctl_pos 0
+#define dagc2_bypass_scale_ctl_len 3
+#define dagc2_bypass_scale_ctl_lsb 0
+#define xd_p_dagc2_programmable_shift1 0xA1C7
+#define dagc2_programmable_shift1_pos 0
+#define dagc2_programmable_shift1_len 8
+#define dagc2_programmable_shift1_lsb 0
+#define xd_p_dagc2_programmable_shift2 0xA1C8
+#define dagc2_programmable_shift2_pos 0
+#define dagc2_programmable_shift2_len 8
+#define dagc2_programmable_shift2_lsb 0
+#define xd_p_reg_dagc2_in_sat_cnt_7_0 0xA1C9
+#define reg_dagc2_in_sat_cnt_7_0_pos 0
+#define reg_dagc2_in_sat_cnt_7_0_len 8
+#define reg_dagc2_in_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc2_in_sat_cnt_15_8 0xA1CA
+#define reg_dagc2_in_sat_cnt_15_8_pos 0
+#define reg_dagc2_in_sat_cnt_15_8_len 8
+#define reg_dagc2_in_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc2_in_sat_cnt_23_16 0xA1CB
+#define reg_dagc2_in_sat_cnt_23_16_pos 0
+#define reg_dagc2_in_sat_cnt_23_16_len 8
+#define reg_dagc2_in_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc2_in_sat_cnt_31_24 0xA1CC
+#define reg_dagc2_in_sat_cnt_31_24_pos 0
+#define reg_dagc2_in_sat_cnt_31_24_len 8
+#define reg_dagc2_in_sat_cnt_31_24_lsb 24
+#define xd_p_reg_dagc2_out_sat_cnt_7_0 0xA1CD
+#define reg_dagc2_out_sat_cnt_7_0_pos 0
+#define reg_dagc2_out_sat_cnt_7_0_len 8
+#define reg_dagc2_out_sat_cnt_7_0_lsb 0
+#define xd_p_reg_dagc2_out_sat_cnt_15_8 0xA1CE
+#define reg_dagc2_out_sat_cnt_15_8_pos 0
+#define reg_dagc2_out_sat_cnt_15_8_len 8
+#define reg_dagc2_out_sat_cnt_15_8_lsb 8
+#define xd_p_reg_dagc2_out_sat_cnt_23_16 0xA1CF
+#define reg_dagc2_out_sat_cnt_23_16_pos 0
+#define reg_dagc2_out_sat_cnt_23_16_len 8
+#define reg_dagc2_out_sat_cnt_23_16_lsb 16
+#define xd_p_reg_dagc2_out_sat_cnt_31_24 0xA1D0
+#define reg_dagc2_out_sat_cnt_31_24_pos 0
+#define reg_dagc2_out_sat_cnt_31_24_len 8
+#define reg_dagc2_out_sat_cnt_31_24_lsb 24
+#define xd_r_dagc2_multiplier_7_0 0xA1D6
+#define dagc2_multiplier_7_0_pos 0
+#define dagc2_multiplier_7_0_len 8
+#define dagc2_multiplier_7_0_lsb 0
+#define xd_r_dagc2_multiplier_15_8 0xA1D7
+#define dagc2_multiplier_15_8_pos 0
+#define dagc2_multiplier_15_8_len 8
+#define dagc2_multiplier_15_8_lsb 8
+#define xd_r_dagc2_right_shift_bits 0xA1D8
+#define dagc2_right_shift_bits_pos 0
+#define dagc2_right_shift_bits_len 4
+#define dagc2_right_shift_bits_lsb 0
+#define xd_p_cfoe_NS_coeff1_7_0 0xA200
+#define cfoe_NS_coeff1_7_0_pos 0
+#define cfoe_NS_coeff1_7_0_len 8
+#define cfoe_NS_coeff1_7_0_lsb 0
+#define xd_p_cfoe_NS_coeff1_15_8 0xA201
+#define cfoe_NS_coeff1_15_8_pos 0
+#define cfoe_NS_coeff1_15_8_len 8
+#define cfoe_NS_coeff1_15_8_lsb 8
+#define xd_p_cfoe_NS_coeff1_23_16 0xA202
+#define cfoe_NS_coeff1_23_16_pos 0
+#define cfoe_NS_coeff1_23_16_len 8
+#define cfoe_NS_coeff1_23_16_lsb 16
+#define xd_p_cfoe_NS_coeff1_25_24 0xA203
+#define cfoe_NS_coeff1_25_24_pos 0
+#define cfoe_NS_coeff1_25_24_len 2
+#define cfoe_NS_coeff1_25_24_lsb 24
+#define xd_p_cfoe_NS_coeff2_5_0 0xA203
+#define cfoe_NS_coeff2_5_0_pos 2
+#define cfoe_NS_coeff2_5_0_len 6
+#define cfoe_NS_coeff2_5_0_lsb 0
+#define xd_p_cfoe_NS_coeff2_13_6 0xA204
+#define cfoe_NS_coeff2_13_6_pos 0
+#define cfoe_NS_coeff2_13_6_len 8
+#define cfoe_NS_coeff2_13_6_lsb 6
+#define xd_p_cfoe_NS_coeff2_21_14 0xA205
+#define cfoe_NS_coeff2_21_14_pos 0
+#define cfoe_NS_coeff2_21_14_len 8
+#define cfoe_NS_coeff2_21_14_lsb 14
+#define xd_p_cfoe_NS_coeff2_24_22 0xA206
+#define cfoe_NS_coeff2_24_22_pos 0
+#define cfoe_NS_coeff2_24_22_len 3
+#define cfoe_NS_coeff2_24_22_lsb 22
+#define xd_p_cfoe_lf_c1_4_0 0xA206
+#define cfoe_lf_c1_4_0_pos 3
+#define cfoe_lf_c1_4_0_len 5
+#define cfoe_lf_c1_4_0_lsb 0
+#define xd_p_cfoe_lf_c1_12_5 0xA207
+#define cfoe_lf_c1_12_5_pos 0
+#define cfoe_lf_c1_12_5_len 8
+#define cfoe_lf_c1_12_5_lsb 5
+#define xd_p_cfoe_lf_c1_20_13 0xA208
+#define cfoe_lf_c1_20_13_pos 0
+#define cfoe_lf_c1_20_13_len 8
+#define cfoe_lf_c1_20_13_lsb 13
+#define xd_p_cfoe_lf_c1_25_21 0xA209
+#define cfoe_lf_c1_25_21_pos 0
+#define cfoe_lf_c1_25_21_len 5
+#define cfoe_lf_c1_25_21_lsb 21
+#define xd_p_cfoe_lf_c2_2_0 0xA209
+#define cfoe_lf_c2_2_0_pos 5
+#define cfoe_lf_c2_2_0_len 3
+#define cfoe_lf_c2_2_0_lsb 0
+#define xd_p_cfoe_lf_c2_10_3 0xA20A
+#define cfoe_lf_c2_10_3_pos 0
+#define cfoe_lf_c2_10_3_len 8
+#define cfoe_lf_c2_10_3_lsb 3
+#define xd_p_cfoe_lf_c2_18_11 0xA20B
+#define cfoe_lf_c2_18_11_pos 0
+#define cfoe_lf_c2_18_11_len 8
+#define cfoe_lf_c2_18_11_lsb 11
+#define xd_p_cfoe_lf_c2_25_19 0xA20C
+#define cfoe_lf_c2_25_19_pos 0
+#define cfoe_lf_c2_25_19_len 7
+#define cfoe_lf_c2_25_19_lsb 19
+#define xd_p_cfoe_ifod_7_0 0xA20D
+#define cfoe_ifod_7_0_pos 0
+#define cfoe_ifod_7_0_len 8
+#define cfoe_ifod_7_0_lsb 0
+#define xd_p_cfoe_ifod_10_8 0xA20E
+#define cfoe_ifod_10_8_pos 0
+#define cfoe_ifod_10_8_len 3
+#define cfoe_ifod_10_8_lsb 8
+#define xd_p_cfoe_Divg_ctr_th 0xA20E
+#define cfoe_Divg_ctr_th_pos 4
+#define cfoe_Divg_ctr_th_len 4
+#define cfoe_Divg_ctr_th_lsb 0
+#define xd_p_cfoe_FOT_divg_th 0xA20F
+#define cfoe_FOT_divg_th_pos 0
+#define cfoe_FOT_divg_th_len 8
+#define cfoe_FOT_divg_th_lsb 0
+#define xd_p_cfoe_FOT_cnvg_th 0xA210
+#define cfoe_FOT_cnvg_th_pos 0
+#define cfoe_FOT_cnvg_th_len 8
+#define cfoe_FOT_cnvg_th_lsb 0
+#define xd_p_reg_cfoe_offset_7_0 0xA211
+#define reg_cfoe_offset_7_0_pos 0
+#define reg_cfoe_offset_7_0_len 8
+#define reg_cfoe_offset_7_0_lsb 0
+#define xd_p_reg_cfoe_offset_9_8 0xA212
+#define reg_cfoe_offset_9_8_pos 0
+#define reg_cfoe_offset_9_8_len 2
+#define reg_cfoe_offset_9_8_lsb 8
+#define xd_p_reg_cfoe_ifoe_sign_corr 0xA212
+#define reg_cfoe_ifoe_sign_corr_pos 2
+#define reg_cfoe_ifoe_sign_corr_len 1
+#define reg_cfoe_ifoe_sign_corr_lsb 0
+#define xd_r_cfoe_fot_LF_output_7_0 0xA218
+#define cfoe_fot_LF_output_7_0_pos 0
+#define cfoe_fot_LF_output_7_0_len 8
+#define cfoe_fot_LF_output_7_0_lsb 0
+#define xd_r_cfoe_fot_LF_output_15_8 0xA219
+#define cfoe_fot_LF_output_15_8_pos 0
+#define cfoe_fot_LF_output_15_8_len 8
+#define cfoe_fot_LF_output_15_8_lsb 8
+#define xd_r_cfoe_ifo_metric_7_0 0xA21A
+#define cfoe_ifo_metric_7_0_pos 0
+#define cfoe_ifo_metric_7_0_len 8
+#define cfoe_ifo_metric_7_0_lsb 0
+#define xd_r_cfoe_ifo_metric_15_8 0xA21B
+#define cfoe_ifo_metric_15_8_pos 0
+#define cfoe_ifo_metric_15_8_len 8
+#define cfoe_ifo_metric_15_8_lsb 8
+#define xd_r_cfoe_ifo_metric_23_16 0xA21C
+#define cfoe_ifo_metric_23_16_pos 0
+#define cfoe_ifo_metric_23_16_len 8
+#define cfoe_ifo_metric_23_16_lsb 16
+#define xd_p_ste_Nu 0xA220
+#define ste_Nu_pos 0
+#define ste_Nu_len 2
+#define ste_Nu_lsb 0
+#define xd_p_ste_GI 0xA220
+#define ste_GI_pos 2
+#define ste_GI_len 3
+#define ste_GI_lsb 0
+#define xd_p_ste_symbol_num 0xA221
+#define ste_symbol_num_pos 0
+#define ste_symbol_num_len 2
+#define ste_symbol_num_lsb 0
+#define xd_p_ste_sample_num 0xA221
+#define ste_sample_num_pos 2
+#define ste_sample_num_len 2
+#define ste_sample_num_lsb 0
+#define xd_p_reg_ste_buf_en 0xA221
+#define reg_ste_buf_en_pos 7
+#define reg_ste_buf_en_len 1
+#define reg_ste_buf_en_lsb 0
+#define xd_p_ste_FFT_offset_7_0 0xA222
+#define ste_FFT_offset_7_0_pos 0
+#define ste_FFT_offset_7_0_len 8
+#define ste_FFT_offset_7_0_lsb 0
+#define xd_p_ste_FFT_offset_11_8 0xA223
+#define ste_FFT_offset_11_8_pos 0
+#define ste_FFT_offset_11_8_len 4
+#define ste_FFT_offset_11_8_lsb 8
+#define xd_p_reg_ste_tstmod 0xA223
+#define reg_ste_tstmod_pos 5
+#define reg_ste_tstmod_len 1
+#define reg_ste_tstmod_lsb 0
+#define xd_p_ste_adv_start_7_0 0xA224
+#define ste_adv_start_7_0_pos 0
+#define ste_adv_start_7_0_len 8
+#define ste_adv_start_7_0_lsb 0
+#define xd_p_ste_adv_start_10_8 0xA225
+#define ste_adv_start_10_8_pos 0
+#define ste_adv_start_10_8_len 3
+#define ste_adv_start_10_8_lsb 8
+#define xd_p_ste_adv_stop 0xA226
+#define ste_adv_stop_pos 0
+#define ste_adv_stop_len 8
+#define ste_adv_stop_lsb 0
+#define xd_r_ste_P_value_7_0 0xA228
+#define ste_P_value_7_0_pos 0
+#define ste_P_value_7_0_len 8
+#define ste_P_value_7_0_lsb 0
+#define xd_r_ste_P_value_10_8 0xA229
+#define ste_P_value_10_8_pos 0
+#define ste_P_value_10_8_len 3
+#define ste_P_value_10_8_lsb 8
+#define xd_r_ste_M_value_7_0 0xA22A
+#define ste_M_value_7_0_pos 0
+#define ste_M_value_7_0_len 8
+#define ste_M_value_7_0_lsb 0
+#define xd_r_ste_M_value_10_8 0xA22B
+#define ste_M_value_10_8_pos 0
+#define ste_M_value_10_8_len 3
+#define ste_M_value_10_8_lsb 8
+#define xd_r_ste_H1 0xA22C
+#define ste_H1_pos 0
+#define ste_H1_len 7
+#define ste_H1_lsb 0
+#define xd_r_ste_H2 0xA22D
+#define ste_H2_pos 0
+#define ste_H2_len 7
+#define ste_H2_lsb 0
+#define xd_r_ste_H3 0xA22E
+#define ste_H3_pos 0
+#define ste_H3_len 7
+#define ste_H3_lsb 0
+#define xd_r_ste_H4 0xA22F
+#define ste_H4_pos 0
+#define ste_H4_len 7
+#define ste_H4_lsb 0
+#define xd_r_ste_Corr_value_I_7_0 0xA230
+#define ste_Corr_value_I_7_0_pos 0
+#define ste_Corr_value_I_7_0_len 8
+#define ste_Corr_value_I_7_0_lsb 0
+#define xd_r_ste_Corr_value_I_15_8 0xA231
+#define ste_Corr_value_I_15_8_pos 0
+#define ste_Corr_value_I_15_8_len 8
+#define ste_Corr_value_I_15_8_lsb 8
+#define xd_r_ste_Corr_value_I_23_16 0xA232
+#define ste_Corr_value_I_23_16_pos 0
+#define ste_Corr_value_I_23_16_len 8
+#define ste_Corr_value_I_23_16_lsb 16
+#define xd_r_ste_Corr_value_I_27_24 0xA233
+#define ste_Corr_value_I_27_24_pos 0
+#define ste_Corr_value_I_27_24_len 4
+#define ste_Corr_value_I_27_24_lsb 24
+#define xd_r_ste_Corr_value_Q_7_0 0xA234
+#define ste_Corr_value_Q_7_0_pos 0
+#define ste_Corr_value_Q_7_0_len 8
+#define ste_Corr_value_Q_7_0_lsb 0
+#define xd_r_ste_Corr_value_Q_15_8 0xA235
+#define ste_Corr_value_Q_15_8_pos 0
+#define ste_Corr_value_Q_15_8_len 8
+#define ste_Corr_value_Q_15_8_lsb 8
+#define xd_r_ste_Corr_value_Q_23_16 0xA236
+#define ste_Corr_value_Q_23_16_pos 0
+#define ste_Corr_value_Q_23_16_len 8
+#define ste_Corr_value_Q_23_16_lsb 16
+#define xd_r_ste_Corr_value_Q_27_24 0xA237
+#define ste_Corr_value_Q_27_24_pos 0
+#define ste_Corr_value_Q_27_24_len 4
+#define ste_Corr_value_Q_27_24_lsb 24
+#define xd_r_ste_J_num_7_0 0xA238
+#define ste_J_num_7_0_pos 0
+#define ste_J_num_7_0_len 8
+#define ste_J_num_7_0_lsb 0
+#define xd_r_ste_J_num_15_8 0xA239
+#define ste_J_num_15_8_pos 0
+#define ste_J_num_15_8_len 8
+#define ste_J_num_15_8_lsb 8
+#define xd_r_ste_J_num_23_16 0xA23A
+#define ste_J_num_23_16_pos 0
+#define ste_J_num_23_16_len 8
+#define ste_J_num_23_16_lsb 16
+#define xd_r_ste_J_num_31_24 0xA23B
+#define ste_J_num_31_24_pos 0
+#define ste_J_num_31_24_len 8
+#define ste_J_num_31_24_lsb 24
+#define xd_r_ste_J_den_7_0 0xA23C
+#define ste_J_den_7_0_pos 0
+#define ste_J_den_7_0_len 8
+#define ste_J_den_7_0_lsb 0
+#define xd_r_ste_J_den_15_8 0xA23D
+#define ste_J_den_15_8_pos 0
+#define ste_J_den_15_8_len 8
+#define ste_J_den_15_8_lsb 8
+#define xd_r_ste_J_den_18_16 0xA23E
+#define ste_J_den_18_16_pos 0
+#define ste_J_den_18_16_len 3
+#define ste_J_den_18_16_lsb 16
+#define xd_r_ste_Beacon_Indicator 0xA23E
+#define ste_Beacon_Indicator_pos 4
+#define ste_Beacon_Indicator_len 1
+#define ste_Beacon_Indicator_lsb 0
+#define xd_r_tpsd_Frame_Num 0xA250
+#define tpsd_Frame_Num_pos 0
+#define tpsd_Frame_Num_len 2
+#define tpsd_Frame_Num_lsb 0
+#define xd_r_tpsd_Constel 0xA250
+#define tpsd_Constel_pos 2
+#define tpsd_Constel_len 2
+#define tpsd_Constel_lsb 0
+#define xd_r_tpsd_GI 0xA250
+#define tpsd_GI_pos 4
+#define tpsd_GI_len 2
+#define tpsd_GI_lsb 0
+#define xd_r_tpsd_Mode 0xA250
+#define tpsd_Mode_pos 6
+#define tpsd_Mode_len 2
+#define tpsd_Mode_lsb 0
+#define xd_r_tpsd_CR_HP 0xA251
+#define tpsd_CR_HP_pos 0
+#define tpsd_CR_HP_len 3
+#define tpsd_CR_HP_lsb 0
+#define xd_r_tpsd_CR_LP 0xA251
+#define tpsd_CR_LP_pos 3
+#define tpsd_CR_LP_len 3
+#define tpsd_CR_LP_lsb 0
+#define xd_r_tpsd_Hie 0xA252
+#define tpsd_Hie_pos 0
+#define tpsd_Hie_len 3
+#define tpsd_Hie_lsb 0
+#define xd_r_tpsd_Res_Bits 0xA252
+#define tpsd_Res_Bits_pos 3
+#define tpsd_Res_Bits_len 5
+#define tpsd_Res_Bits_lsb 0
+#define xd_r_tpsd_Res_Bits_0 0xA253
+#define tpsd_Res_Bits_0_pos 0
+#define tpsd_Res_Bits_0_len 1
+#define tpsd_Res_Bits_0_lsb 0
+#define xd_r_tpsd_LengthInd 0xA253
+#define tpsd_LengthInd_pos 1
+#define tpsd_LengthInd_len 6
+#define tpsd_LengthInd_lsb 0
+#define xd_r_tpsd_Cell_Id_7_0 0xA254
+#define tpsd_Cell_Id_7_0_pos 0
+#define tpsd_Cell_Id_7_0_len 8
+#define tpsd_Cell_Id_7_0_lsb 0
+#define xd_r_tpsd_Cell_Id_15_8 0xA255
+#define tpsd_Cell_Id_15_8_pos 0
+#define tpsd_Cell_Id_15_8_len 8
+#define tpsd_Cell_Id_15_8_lsb 0
+#define xd_p_reg_fft_mask_tone0_7_0 0xA260
+#define reg_fft_mask_tone0_7_0_pos 0
+#define reg_fft_mask_tone0_7_0_len 8
+#define reg_fft_mask_tone0_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone0_12_8 0xA261
+#define reg_fft_mask_tone0_12_8_pos 0
+#define reg_fft_mask_tone0_12_8_len 5
+#define reg_fft_mask_tone0_12_8_lsb 8
+#define xd_p_reg_fft_mask_tone1_7_0 0xA262
+#define reg_fft_mask_tone1_7_0_pos 0
+#define reg_fft_mask_tone1_7_0_len 8
+#define reg_fft_mask_tone1_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone1_12_8 0xA263
+#define reg_fft_mask_tone1_12_8_pos 0
+#define reg_fft_mask_tone1_12_8_len 5
+#define reg_fft_mask_tone1_12_8_lsb 8
+#define xd_p_reg_fft_mask_tone2_7_0 0xA264
+#define reg_fft_mask_tone2_7_0_pos 0
+#define reg_fft_mask_tone2_7_0_len 8
+#define reg_fft_mask_tone2_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone2_12_8 0xA265
+#define reg_fft_mask_tone2_12_8_pos 0
+#define reg_fft_mask_tone2_12_8_len 5
+#define reg_fft_mask_tone2_12_8_lsb 8
+#define xd_p_reg_fft_mask_tone3_7_0 0xA266
+#define reg_fft_mask_tone3_7_0_pos 0
+#define reg_fft_mask_tone3_7_0_len 8
+#define reg_fft_mask_tone3_7_0_lsb 0
+#define xd_p_reg_fft_mask_tone3_12_8 0xA267
+#define reg_fft_mask_tone3_12_8_pos 0
+#define reg_fft_mask_tone3_12_8_len 5
+#define reg_fft_mask_tone3_12_8_lsb 8
+#define xd_p_reg_fft_mask_from0_7_0 0xA268
+#define reg_fft_mask_from0_7_0_pos 0
+#define reg_fft_mask_from0_7_0_len 8
+#define reg_fft_mask_from0_7_0_lsb 0
+#define xd_p_reg_fft_mask_from0_12_8 0xA269
+#define reg_fft_mask_from0_12_8_pos 0
+#define reg_fft_mask_from0_12_8_len 5
+#define reg_fft_mask_from0_12_8_lsb 8
+#define xd_p_reg_fft_mask_to0_7_0 0xA26A
+#define reg_fft_mask_to0_7_0_pos 0
+#define reg_fft_mask_to0_7_0_len 8
+#define reg_fft_mask_to0_7_0_lsb 0
+#define xd_p_reg_fft_mask_to0_12_8 0xA26B
+#define reg_fft_mask_to0_12_8_pos 0
+#define reg_fft_mask_to0_12_8_len 5
+#define reg_fft_mask_to0_12_8_lsb 8
+#define xd_p_reg_fft_mask_from1_7_0 0xA26C
+#define reg_fft_mask_from1_7_0_pos 0
+#define reg_fft_mask_from1_7_0_len 8
+#define reg_fft_mask_from1_7_0_lsb 0
+#define xd_p_reg_fft_mask_from1_12_8 0xA26D
+#define reg_fft_mask_from1_12_8_pos 0
+#define reg_fft_mask_from1_12_8_len 5
+#define reg_fft_mask_from1_12_8_lsb 8
+#define xd_p_reg_fft_mask_to1_7_0 0xA26E
+#define reg_fft_mask_to1_7_0_pos 0
+#define reg_fft_mask_to1_7_0_len 8
+#define reg_fft_mask_to1_7_0_lsb 0
+#define xd_p_reg_fft_mask_to1_12_8 0xA26F
+#define reg_fft_mask_to1_12_8_pos 0
+#define reg_fft_mask_to1_12_8_len 5
+#define reg_fft_mask_to1_12_8_lsb 8
+#define xd_p_reg_cge_idx0_7_0 0xA280
+#define reg_cge_idx0_7_0_pos 0
+#define reg_cge_idx0_7_0_len 8
+#define reg_cge_idx0_7_0_lsb 0
+#define xd_p_reg_cge_idx0_12_8 0xA281
+#define reg_cge_idx0_12_8_pos 0
+#define reg_cge_idx0_12_8_len 5
+#define reg_cge_idx0_12_8_lsb 8
+#define xd_p_reg_cge_idx1_7_0 0xA282
+#define reg_cge_idx1_7_0_pos 0
+#define reg_cge_idx1_7_0_len 8
+#define reg_cge_idx1_7_0_lsb 0
+#define xd_p_reg_cge_idx1_12_8 0xA283
+#define reg_cge_idx1_12_8_pos 0
+#define reg_cge_idx1_12_8_len 5
+#define reg_cge_idx1_12_8_lsb 8
+#define xd_p_reg_cge_idx2_7_0 0xA284
+#define reg_cge_idx2_7_0_pos 0
+#define reg_cge_idx2_7_0_len 8
+#define reg_cge_idx2_7_0_lsb 0
+#define xd_p_reg_cge_idx2_12_8 0xA285
+#define reg_cge_idx2_12_8_pos 0
+#define reg_cge_idx2_12_8_len 5
+#define reg_cge_idx2_12_8_lsb 8
+#define xd_p_reg_cge_idx3_7_0 0xA286
+#define reg_cge_idx3_7_0_pos 0
+#define reg_cge_idx3_7_0_len 8
+#define reg_cge_idx3_7_0_lsb 0
+#define xd_p_reg_cge_idx3_12_8 0xA287
+#define reg_cge_idx3_12_8_pos 0
+#define reg_cge_idx3_12_8_len 5
+#define reg_cge_idx3_12_8_lsb 8
+#define xd_p_reg_cge_idx4_7_0 0xA288
+#define reg_cge_idx4_7_0_pos 0
+#define reg_cge_idx4_7_0_len 8
+#define reg_cge_idx4_7_0_lsb 0
+#define xd_p_reg_cge_idx4_12_8 0xA289
+#define reg_cge_idx4_12_8_pos 0
+#define reg_cge_idx4_12_8_len 5
+#define reg_cge_idx4_12_8_lsb 8
+#define xd_p_reg_cge_idx5_7_0 0xA28A
+#define reg_cge_idx5_7_0_pos 0
+#define reg_cge_idx5_7_0_len 8
+#define reg_cge_idx5_7_0_lsb 0
+#define xd_p_reg_cge_idx5_12_8 0xA28B
+#define reg_cge_idx5_12_8_pos 0
+#define reg_cge_idx5_12_8_len 5
+#define reg_cge_idx5_12_8_lsb 8
+#define xd_p_reg_cge_idx6_7_0 0xA28C
+#define reg_cge_idx6_7_0_pos 0
+#define reg_cge_idx6_7_0_len 8
+#define reg_cge_idx6_7_0_lsb 0
+#define xd_p_reg_cge_idx6_12_8 0xA28D
+#define reg_cge_idx6_12_8_pos 0
+#define reg_cge_idx6_12_8_len 5
+#define reg_cge_idx6_12_8_lsb 8
+#define xd_p_reg_cge_idx7_7_0 0xA28E
+#define reg_cge_idx7_7_0_pos 0
+#define reg_cge_idx7_7_0_len 8
+#define reg_cge_idx7_7_0_lsb 0
+#define xd_p_reg_cge_idx7_12_8 0xA28F
+#define reg_cge_idx7_12_8_pos 0
+#define reg_cge_idx7_12_8_len 5
+#define reg_cge_idx7_12_8_lsb 8
+#define xd_p_reg_cge_idx8_7_0 0xA290
+#define reg_cge_idx8_7_0_pos 0
+#define reg_cge_idx8_7_0_len 8
+#define reg_cge_idx8_7_0_lsb 0
+#define xd_p_reg_cge_idx8_12_8 0xA291
+#define reg_cge_idx8_12_8_pos 0
+#define reg_cge_idx8_12_8_len 5
+#define reg_cge_idx8_12_8_lsb 8
+#define xd_p_reg_cge_idx9_7_0 0xA292
+#define reg_cge_idx9_7_0_pos 0
+#define reg_cge_idx9_7_0_len 8
+#define reg_cge_idx9_7_0_lsb 0
+#define xd_p_reg_cge_idx9_12_8 0xA293
+#define reg_cge_idx9_12_8_pos 0
+#define reg_cge_idx9_12_8_len 5
+#define reg_cge_idx9_12_8_lsb 8
+#define xd_p_reg_cge_idx10_7_0 0xA294
+#define reg_cge_idx10_7_0_pos 0
+#define reg_cge_idx10_7_0_len 8
+#define reg_cge_idx10_7_0_lsb 0
+#define xd_p_reg_cge_idx10_12_8 0xA295
+#define reg_cge_idx10_12_8_pos 0
+#define reg_cge_idx10_12_8_len 5
+#define reg_cge_idx10_12_8_lsb 8
+#define xd_p_reg_cge_idx11_7_0 0xA296
+#define reg_cge_idx11_7_0_pos 0
+#define reg_cge_idx11_7_0_len 8
+#define reg_cge_idx11_7_0_lsb 0
+#define xd_p_reg_cge_idx11_12_8 0xA297
+#define reg_cge_idx11_12_8_pos 0
+#define reg_cge_idx11_12_8_len 5
+#define reg_cge_idx11_12_8_lsb 8
+#define xd_p_reg_cge_idx12_7_0 0xA298
+#define reg_cge_idx12_7_0_pos 0
+#define reg_cge_idx12_7_0_len 8
+#define reg_cge_idx12_7_0_lsb 0
+#define xd_p_reg_cge_idx12_12_8 0xA299
+#define reg_cge_idx12_12_8_pos 0
+#define reg_cge_idx12_12_8_len 5
+#define reg_cge_idx12_12_8_lsb 8
+#define xd_p_reg_cge_idx13_7_0 0xA29A
+#define reg_cge_idx13_7_0_pos 0
+#define reg_cge_idx13_7_0_len 8
+#define reg_cge_idx13_7_0_lsb 0
+#define xd_p_reg_cge_idx13_12_8 0xA29B
+#define reg_cge_idx13_12_8_pos 0
+#define reg_cge_idx13_12_8_len 5
+#define reg_cge_idx13_12_8_lsb 8
+#define xd_p_reg_cge_idx14_7_0 0xA29C
+#define reg_cge_idx14_7_0_pos 0
+#define reg_cge_idx14_7_0_len 8
+#define reg_cge_idx14_7_0_lsb 0
+#define xd_p_reg_cge_idx14_12_8 0xA29D
+#define reg_cge_idx14_12_8_pos 0
+#define reg_cge_idx14_12_8_len 5
+#define reg_cge_idx14_12_8_lsb 8
+#define xd_p_reg_cge_idx15_7_0 0xA29E
+#define reg_cge_idx15_7_0_pos 0
+#define reg_cge_idx15_7_0_len 8
+#define reg_cge_idx15_7_0_lsb 0
+#define xd_p_reg_cge_idx15_12_8 0xA29F
+#define reg_cge_idx15_12_8_pos 0
+#define reg_cge_idx15_12_8_len 5
+#define reg_cge_idx15_12_8_lsb 8
+#define xd_r_reg_fft_crc 0xA2A8
+#define reg_fft_crc_pos 0
+#define reg_fft_crc_len 8
+#define reg_fft_crc_lsb 0
+#define xd_p_fd_fft_shift_max 0xA2A9
+#define fd_fft_shift_max_pos 0
+#define fd_fft_shift_max_len 4
+#define fd_fft_shift_max_lsb 0
+#define xd_r_fd_fft_shift 0xA2A9
+#define fd_fft_shift_pos 4
+#define fd_fft_shift_len 4
+#define fd_fft_shift_lsb 0
+#define xd_r_fd_fft_frame_num 0xA2AA
+#define fd_fft_frame_num_pos 0
+#define fd_fft_frame_num_len 2
+#define fd_fft_frame_num_lsb 0
+#define xd_r_fd_fft_symbol_count 0xA2AB
+#define fd_fft_symbol_count_pos 0
+#define fd_fft_symbol_count_len 7
+#define fd_fft_symbol_count_lsb 0
+#define xd_r_reg_fft_idx_max_7_0 0xA2AC
+#define reg_fft_idx_max_7_0_pos 0
+#define reg_fft_idx_max_7_0_len 8
+#define reg_fft_idx_max_7_0_lsb 0
+#define xd_r_reg_fft_idx_max_12_8 0xA2AD
+#define reg_fft_idx_max_12_8_pos 0
+#define reg_fft_idx_max_12_8_len 5
+#define reg_fft_idx_max_12_8_lsb 8
+#define xd_p_reg_cge_program 0xA2AE
+#define reg_cge_program_pos 0
+#define reg_cge_program_len 1
+#define reg_cge_program_lsb 0
+#define xd_p_reg_cge_fixed 0xA2AE
+#define reg_cge_fixed_pos 1
+#define reg_cge_fixed_len 1
+#define reg_cge_fixed_lsb 0
+#define xd_p_reg_fft_rotate_en 0xA2AE
+#define reg_fft_rotate_en_pos 2
+#define reg_fft_rotate_en_len 1
+#define reg_fft_rotate_en_lsb 0
+#define xd_p_reg_fft_rotate_base_4_0 0xA2AE
+#define reg_fft_rotate_base_4_0_pos 3
+#define reg_fft_rotate_base_4_0_len 5
+#define reg_fft_rotate_base_4_0_lsb 0
+#define xd_p_reg_fft_rotate_base_12_5 0xA2AF
+#define reg_fft_rotate_base_12_5_pos 0
+#define reg_fft_rotate_base_12_5_len 8
+#define reg_fft_rotate_base_12_5_lsb 5
+#define xd_p_reg_gp_trigger_fd 0xA2B8
+#define reg_gp_trigger_fd_pos 0
+#define reg_gp_trigger_fd_len 1
+#define reg_gp_trigger_fd_lsb 0
+#define xd_p_reg_trigger_sel_fd 0xA2B8
+#define reg_trigger_sel_fd_pos 1
+#define reg_trigger_sel_fd_len 2
+#define reg_trigger_sel_fd_lsb 0
+#define xd_p_reg_trigger_module_sel_fd 0xA2B9
+#define reg_trigger_module_sel_fd_pos 0
+#define reg_trigger_module_sel_fd_len 6
+#define reg_trigger_module_sel_fd_lsb 0
+#define xd_p_reg_trigger_set_sel_fd 0xA2BA
+#define reg_trigger_set_sel_fd_pos 0
+#define reg_trigger_set_sel_fd_len 6
+#define reg_trigger_set_sel_fd_lsb 0
+#define xd_p_reg_fd_noname_7_0 0xA2BC
+#define reg_fd_noname_7_0_pos 0
+#define reg_fd_noname_7_0_len 8
+#define reg_fd_noname_7_0_lsb 0
+#define xd_p_reg_fd_noname_15_8 0xA2BD
+#define reg_fd_noname_15_8_pos 0
+#define reg_fd_noname_15_8_len 8
+#define reg_fd_noname_15_8_lsb 8
+#define xd_p_reg_fd_noname_23_16 0xA2BE
+#define reg_fd_noname_23_16_pos 0
+#define reg_fd_noname_23_16_len 8
+#define reg_fd_noname_23_16_lsb 16
+#define xd_p_reg_fd_noname_31_24 0xA2BF
+#define reg_fd_noname_31_24_pos 0
+#define reg_fd_noname_31_24_len 8
+#define reg_fd_noname_31_24_lsb 24
+#define xd_r_fd_fpcc_cp_corr_signn 0xA2C0
+#define fd_fpcc_cp_corr_signn_pos 0
+#define fd_fpcc_cp_corr_signn_len 8
+#define fd_fpcc_cp_corr_signn_lsb 0
+#define xd_p_reg_feq_s1 0xA2C1
+#define reg_feq_s1_pos 0
+#define reg_feq_s1_len 5
+#define reg_feq_s1_lsb 0
+#define xd_p_fd_fpcc_cp_corr_tone_th 0xA2C2
+#define fd_fpcc_cp_corr_tone_th_pos 0
+#define fd_fpcc_cp_corr_tone_th_len 6
+#define fd_fpcc_cp_corr_tone_th_lsb 0
+#define xd_p_fd_fpcc_cp_corr_symbol_log_th 0xA2C3
+#define fd_fpcc_cp_corr_symbol_log_th_pos 0
+#define fd_fpcc_cp_corr_symbol_log_th_len 4
+#define fd_fpcc_cp_corr_symbol_log_th_lsb 0
+#define xd_p_fd_fpcc_cp_corr_int 0xA2C4
+#define fd_fpcc_cp_corr_int_pos 0
+#define fd_fpcc_cp_corr_int_len 1
+#define fd_fpcc_cp_corr_int_lsb 0
+#define xd_p_reg_sfoe_ns_7_0 0xA320
+#define reg_sfoe_ns_7_0_pos 0
+#define reg_sfoe_ns_7_0_len 8
+#define reg_sfoe_ns_7_0_lsb 0
+#define xd_p_reg_sfoe_ns_14_8 0xA321
+#define reg_sfoe_ns_14_8_pos 0
+#define reg_sfoe_ns_14_8_len 7
+#define reg_sfoe_ns_14_8_lsb 8
+#define xd_p_reg_sfoe_c1_7_0 0xA322
+#define reg_sfoe_c1_7_0_pos 0
+#define reg_sfoe_c1_7_0_len 8
+#define reg_sfoe_c1_7_0_lsb 0
+#define xd_p_reg_sfoe_c1_15_8 0xA323
+#define reg_sfoe_c1_15_8_pos 0
+#define reg_sfoe_c1_15_8_len 8
+#define reg_sfoe_c1_15_8_lsb 8
+#define xd_p_reg_sfoe_c1_17_16 0xA324
+#define reg_sfoe_c1_17_16_pos 0
+#define reg_sfoe_c1_17_16_len 2
+#define reg_sfoe_c1_17_16_lsb 16
+#define xd_p_reg_sfoe_c2_7_0 0xA325
+#define reg_sfoe_c2_7_0_pos 0
+#define reg_sfoe_c2_7_0_len 8
+#define reg_sfoe_c2_7_0_lsb 0
+#define xd_p_reg_sfoe_c2_15_8 0xA326
+#define reg_sfoe_c2_15_8_pos 0
+#define reg_sfoe_c2_15_8_len 8
+#define reg_sfoe_c2_15_8_lsb 8
+#define xd_p_reg_sfoe_c2_17_16 0xA327
+#define reg_sfoe_c2_17_16_pos 0
+#define reg_sfoe_c2_17_16_len 2
+#define reg_sfoe_c2_17_16_lsb 16
+#define xd_r_reg_sfoe_out_9_2 0xA328
+#define reg_sfoe_out_9_2_pos 0
+#define reg_sfoe_out_9_2_len 8
+#define reg_sfoe_out_9_2_lsb 0
+#define xd_r_reg_sfoe_out_1_0 0xA329
+#define reg_sfoe_out_1_0_pos 0
+#define reg_sfoe_out_1_0_len 2
+#define reg_sfoe_out_1_0_lsb 0
+#define xd_p_reg_sfoe_lm_counter_th 0xA32A
+#define reg_sfoe_lm_counter_th_pos 0
+#define reg_sfoe_lm_counter_th_len 4
+#define reg_sfoe_lm_counter_th_lsb 0
+#define xd_p_reg_sfoe_convg_th 0xA32B
+#define reg_sfoe_convg_th_pos 0
+#define reg_sfoe_convg_th_len 8
+#define reg_sfoe_convg_th_lsb 0
+#define xd_p_reg_sfoe_divg_th 0xA32C
+#define reg_sfoe_divg_th_pos 0
+#define reg_sfoe_divg_th_len 8
+#define reg_sfoe_divg_th_lsb 0
+#define xd_p_fd_tpsd_en 0xA330
+#define fd_tpsd_en_pos 0
+#define fd_tpsd_en_len 1
+#define fd_tpsd_en_lsb 0
+#define xd_p_fd_tpsd_dis 0xA330
+#define fd_tpsd_dis_pos 1
+#define fd_tpsd_dis_len 1
+#define fd_tpsd_dis_lsb 0
+#define xd_p_fd_tpsd_rst 0xA330
+#define fd_tpsd_rst_pos 2
+#define fd_tpsd_rst_len 1
+#define fd_tpsd_rst_lsb 0
+#define xd_p_fd_tpsd_lock 0xA330
+#define fd_tpsd_lock_pos 3
+#define fd_tpsd_lock_len 1
+#define fd_tpsd_lock_lsb 0
+#define xd_r_fd_tpsd_s19 0xA330
+#define fd_tpsd_s19_pos 4
+#define fd_tpsd_s19_len 1
+#define fd_tpsd_s19_lsb 0
+#define xd_r_fd_tpsd_s17 0xA330
+#define fd_tpsd_s17_pos 5
+#define fd_tpsd_s17_len 1
+#define fd_tpsd_s17_lsb 0
+#define xd_p_fd_sfr_ste_en 0xA331
+#define fd_sfr_ste_en_pos 0
+#define fd_sfr_ste_en_len 1
+#define fd_sfr_ste_en_lsb 0
+#define xd_p_fd_sfr_ste_dis 0xA331
+#define fd_sfr_ste_dis_pos 1
+#define fd_sfr_ste_dis_len 1
+#define fd_sfr_ste_dis_lsb 0
+#define xd_p_fd_sfr_ste_rst 0xA331
+#define fd_sfr_ste_rst_pos 2
+#define fd_sfr_ste_rst_len 1
+#define fd_sfr_ste_rst_lsb 0
+#define xd_p_fd_sfr_ste_mode 0xA331
+#define fd_sfr_ste_mode_pos 3
+#define fd_sfr_ste_mode_len 1
+#define fd_sfr_ste_mode_lsb 0
+#define xd_p_fd_sfr_ste_done 0xA331
+#define fd_sfr_ste_done_pos 4
+#define fd_sfr_ste_done_len 1
+#define fd_sfr_ste_done_lsb 0
+#define xd_p_reg_cfoe_ffoe_en 0xA332
+#define reg_cfoe_ffoe_en_pos 0
+#define reg_cfoe_ffoe_en_len 1
+#define reg_cfoe_ffoe_en_lsb 0
+#define xd_p_reg_cfoe_ffoe_dis 0xA332
+#define reg_cfoe_ffoe_dis_pos 1
+#define reg_cfoe_ffoe_dis_len 1
+#define reg_cfoe_ffoe_dis_lsb 0
+#define xd_p_reg_cfoe_ffoe_rst 0xA332
+#define reg_cfoe_ffoe_rst_pos 2
+#define reg_cfoe_ffoe_rst_len 1
+#define reg_cfoe_ffoe_rst_lsb 0
+#define xd_p_reg_cfoe_ifoe_en 0xA332
+#define reg_cfoe_ifoe_en_pos 3
+#define reg_cfoe_ifoe_en_len 1
+#define reg_cfoe_ifoe_en_lsb 0
+#define xd_p_reg_cfoe_ifoe_dis 0xA332
+#define reg_cfoe_ifoe_dis_pos 4
+#define reg_cfoe_ifoe_dis_len 1
+#define reg_cfoe_ifoe_dis_lsb 0
+#define xd_p_reg_cfoe_ifoe_rst 0xA332
+#define reg_cfoe_ifoe_rst_pos 5
+#define reg_cfoe_ifoe_rst_len 1
+#define reg_cfoe_ifoe_rst_lsb 0
+#define xd_p_reg_cfoe_fot_en 0xA332
+#define reg_cfoe_fot_en_pos 6
+#define reg_cfoe_fot_en_len 1
+#define reg_cfoe_fot_en_lsb 0
+#define xd_p_reg_cfoe_fot_lm_en 0xA332
+#define reg_cfoe_fot_lm_en_pos 7
+#define reg_cfoe_fot_lm_en_len 1
+#define reg_cfoe_fot_lm_en_lsb 0
+#define xd_p_reg_cfoe_fot_rst 0xA333
+#define reg_cfoe_fot_rst_pos 0
+#define reg_cfoe_fot_rst_len 1
+#define reg_cfoe_fot_rst_lsb 0
+#define xd_r_fd_cfoe_ffoe_done 0xA333
+#define fd_cfoe_ffoe_done_pos 1
+#define fd_cfoe_ffoe_done_len 1
+#define fd_cfoe_ffoe_done_lsb 0
+#define xd_p_fd_cfoe_metric_vld 0xA333
+#define fd_cfoe_metric_vld_pos 2
+#define fd_cfoe_metric_vld_len 1
+#define fd_cfoe_metric_vld_lsb 0
+#define xd_p_reg_cfoe_ifod_vld 0xA333
+#define reg_cfoe_ifod_vld_pos 3
+#define reg_cfoe_ifod_vld_len 1
+#define reg_cfoe_ifod_vld_lsb 0
+#define xd_r_fd_cfoe_ifoe_done 0xA333
+#define fd_cfoe_ifoe_done_pos 4
+#define fd_cfoe_ifoe_done_len 1
+#define fd_cfoe_ifoe_done_lsb 0
+#define xd_r_fd_cfoe_fot_valid 0xA333
+#define fd_cfoe_fot_valid_pos 5
+#define fd_cfoe_fot_valid_len 1
+#define fd_cfoe_fot_valid_lsb 0
+#define xd_p_reg_cfoe_divg_int 0xA333
+#define reg_cfoe_divg_int_pos 6
+#define reg_cfoe_divg_int_len 1
+#define reg_cfoe_divg_int_lsb 0
+#define xd_r_reg_cfoe_divg_flag 0xA333
+#define reg_cfoe_divg_flag_pos 7
+#define reg_cfoe_divg_flag_len 1
+#define reg_cfoe_divg_flag_lsb 0
+#define xd_p_reg_sfoe_en 0xA334
+#define reg_sfoe_en_pos 0
+#define reg_sfoe_en_len 1
+#define reg_sfoe_en_lsb 0
+#define xd_p_reg_sfoe_dis 0xA334
+#define reg_sfoe_dis_pos 1
+#define reg_sfoe_dis_len 1
+#define reg_sfoe_dis_lsb 0
+#define xd_p_reg_sfoe_rst 0xA334
+#define reg_sfoe_rst_pos 2
+#define reg_sfoe_rst_len 1
+#define reg_sfoe_rst_lsb 0
+#define xd_p_reg_sfoe_vld_int 0xA334
+#define reg_sfoe_vld_int_pos 3
+#define reg_sfoe_vld_int_len 1
+#define reg_sfoe_vld_int_lsb 0
+#define xd_p_reg_sfoe_lm_en 0xA334
+#define reg_sfoe_lm_en_pos 4
+#define reg_sfoe_lm_en_len 1
+#define reg_sfoe_lm_en_lsb 0
+#define xd_p_reg_sfoe_divg_int 0xA334
+#define reg_sfoe_divg_int_pos 5
+#define reg_sfoe_divg_int_len 1
+#define reg_sfoe_divg_int_lsb 0
+#define xd_r_reg_sfoe_divg_flag 0xA334
+#define reg_sfoe_divg_flag_pos 6
+#define reg_sfoe_divg_flag_len 1
+#define reg_sfoe_divg_flag_lsb 0
+#define xd_p_reg_fft_rst 0xA335
+#define reg_fft_rst_pos 0
+#define reg_fft_rst_len 1
+#define reg_fft_rst_lsb 0
+#define xd_p_reg_fft_fast_beacon 0xA335
+#define reg_fft_fast_beacon_pos 1
+#define reg_fft_fast_beacon_len 1
+#define reg_fft_fast_beacon_lsb 0
+#define xd_p_reg_fft_fast_valid 0xA335
+#define reg_fft_fast_valid_pos 2
+#define reg_fft_fast_valid_len 1
+#define reg_fft_fast_valid_lsb 0
+#define xd_p_reg_fft_mask_en 0xA335
+#define reg_fft_mask_en_pos 3
+#define reg_fft_mask_en_len 1
+#define reg_fft_mask_en_lsb 0
+#define xd_p_reg_fft_crc_en 0xA335
+#define reg_fft_crc_en_pos 4
+#define reg_fft_crc_en_len 1
+#define reg_fft_crc_en_lsb 0
+#define xd_p_reg_finr_en 0xA336
+#define reg_finr_en_pos 0
+#define reg_finr_en_len 1
+#define reg_finr_en_lsb 0
+#define xd_p_fd_fste_en 0xA337
+#define fd_fste_en_pos 1
+#define fd_fste_en_len 1
+#define fd_fste_en_lsb 0
+#define xd_p_fd_sqi_tps_level_shift 0xA338
+#define fd_sqi_tps_level_shift_pos 0
+#define fd_sqi_tps_level_shift_len 8
+#define fd_sqi_tps_level_shift_lsb 0
+#define xd_p_fd_pilot_ma_len 0xA339
+#define fd_pilot_ma_len_pos 0
+#define fd_pilot_ma_len_len 6
+#define fd_pilot_ma_len_lsb 0
+#define xd_p_fd_tps_ma_len 0xA33A
+#define fd_tps_ma_len_pos 0
+#define fd_tps_ma_len_len 6
+#define fd_tps_ma_len_lsb 0
+#define xd_p_fd_sqi_s3 0xA33B
+#define fd_sqi_s3_pos 0
+#define fd_sqi_s3_len 8
+#define fd_sqi_s3_lsb 0
+#define xd_p_fd_sqi_dummy_reg_0 0xA33C
+#define fd_sqi_dummy_reg_0_pos 0
+#define fd_sqi_dummy_reg_0_len 1
+#define fd_sqi_dummy_reg_0_lsb 0
+#define xd_p_fd_sqi_debug_sel 0xA33C
+#define fd_sqi_debug_sel_pos 1
+#define fd_sqi_debug_sel_len 2
+#define fd_sqi_debug_sel_lsb 0
+#define xd_p_fd_sqi_s2 0xA33C
+#define fd_sqi_s2_pos 3
+#define fd_sqi_s2_len 5
+#define fd_sqi_s2_lsb 0
+#define xd_p_fd_sqi_dummy_reg_1 0xA33D
+#define fd_sqi_dummy_reg_1_pos 0
+#define fd_sqi_dummy_reg_1_len 1
+#define fd_sqi_dummy_reg_1_lsb 0
+#define xd_p_fd_inr_ignore 0xA33D
+#define fd_inr_ignore_pos 1
+#define fd_inr_ignore_len 1
+#define fd_inr_ignore_lsb 0
+#define xd_p_fd_pilot_ignore 0xA33D
+#define fd_pilot_ignore_pos 2
+#define fd_pilot_ignore_len 1
+#define fd_pilot_ignore_lsb 0
+#define xd_p_fd_etps_ignore 0xA33D
+#define fd_etps_ignore_pos 3
+#define fd_etps_ignore_len 1
+#define fd_etps_ignore_lsb 0
+#define xd_p_fd_sqi_s1 0xA33D
+#define fd_sqi_s1_pos 4
+#define fd_sqi_s1_len 4
+#define fd_sqi_s1_lsb 0
+#define xd_p_reg_fste_ehw_7_0 0xA33E
+#define reg_fste_ehw_7_0_pos 0
+#define reg_fste_ehw_7_0_len 8
+#define reg_fste_ehw_7_0_lsb 0
+#define xd_p_reg_fste_ehw_9_8 0xA33F
+#define reg_fste_ehw_9_8_pos 0
+#define reg_fste_ehw_9_8_len 2
+#define reg_fste_ehw_9_8_lsb 8
+#define xd_p_reg_fste_i_adj_vld 0xA33F
+#define reg_fste_i_adj_vld_pos 2
+#define reg_fste_i_adj_vld_len 1
+#define reg_fste_i_adj_vld_lsb 0
+#define xd_p_reg_fste_phase_ini_7_0 0xA340
+#define reg_fste_phase_ini_7_0_pos 0
+#define reg_fste_phase_ini_7_0_len 8
+#define reg_fste_phase_ini_7_0_lsb 0
+#define xd_p_reg_fste_phase_ini_11_8 0xA341
+#define reg_fste_phase_ini_11_8_pos 0
+#define reg_fste_phase_ini_11_8_len 4
+#define reg_fste_phase_ini_11_8_lsb 8
+#define xd_p_reg_fste_phase_inc_3_0 0xA341
+#define reg_fste_phase_inc_3_0_pos 4
+#define reg_fste_phase_inc_3_0_len 4
+#define reg_fste_phase_inc_3_0_lsb 0
+#define xd_p_reg_fste_phase_inc_11_4 0xA342
+#define reg_fste_phase_inc_11_4_pos 0
+#define reg_fste_phase_inc_11_4_len 8
+#define reg_fste_phase_inc_11_4_lsb 4
+#define xd_p_reg_fste_acum_cost_cnt_max 0xA343
+#define reg_fste_acum_cost_cnt_max_pos 0
+#define reg_fste_acum_cost_cnt_max_len 4
+#define reg_fste_acum_cost_cnt_max_lsb 0
+#define xd_p_reg_fste_step_size_std 0xA343
+#define reg_fste_step_size_std_pos 4
+#define reg_fste_step_size_std_len 4
+#define reg_fste_step_size_std_lsb 0
+#define xd_p_reg_fste_step_size_max 0xA344
+#define reg_fste_step_size_max_pos 0
+#define reg_fste_step_size_max_len 4
+#define reg_fste_step_size_max_lsb 0
+#define xd_p_reg_fste_step_size_min 0xA344
+#define reg_fste_step_size_min_pos 4
+#define reg_fste_step_size_min_len 4
+#define reg_fste_step_size_min_lsb 0
+#define xd_p_reg_fste_frac_step_size_7_0 0xA345
+#define reg_fste_frac_step_size_7_0_pos 0
+#define reg_fste_frac_step_size_7_0_len 8
+#define reg_fste_frac_step_size_7_0_lsb 0
+#define xd_p_reg_fste_frac_step_size_15_8 0xA346
+#define reg_fste_frac_step_size_15_8_pos 0
+#define reg_fste_frac_step_size_15_8_len 8
+#define reg_fste_frac_step_size_15_8_lsb 8
+#define xd_p_reg_fste_frac_step_size_19_16 0xA347
+#define reg_fste_frac_step_size_19_16_pos 0
+#define reg_fste_frac_step_size_19_16_len 4
+#define reg_fste_frac_step_size_19_16_lsb 16
+#define xd_p_reg_fste_rpd_dir_cnt_max 0xA347
+#define reg_fste_rpd_dir_cnt_max_pos 4
+#define reg_fste_rpd_dir_cnt_max_len 4
+#define reg_fste_rpd_dir_cnt_max_lsb 0
+#define xd_p_reg_fste_ehs 0xA348
+#define reg_fste_ehs_pos 0
+#define reg_fste_ehs_len 4
+#define reg_fste_ehs_lsb 0
+#define xd_p_reg_fste_frac_cost_cnt_max_3_0 0xA348
+#define reg_fste_frac_cost_cnt_max_3_0_pos 4
+#define reg_fste_frac_cost_cnt_max_3_0_len 4
+#define reg_fste_frac_cost_cnt_max_3_0_lsb 0
+#define xd_p_reg_fste_frac_cost_cnt_max_9_4 0xA349
+#define reg_fste_frac_cost_cnt_max_9_4_pos 0
+#define reg_fste_frac_cost_cnt_max_9_4_len 6
+#define reg_fste_frac_cost_cnt_max_9_4_lsb 4
+#define xd_p_reg_fste_w0_7_0 0xA34A
+#define reg_fste_w0_7_0_pos 0
+#define reg_fste_w0_7_0_len 8
+#define reg_fste_w0_7_0_lsb 0
+#define xd_p_reg_fste_w0_11_8 0xA34B
+#define reg_fste_w0_11_8_pos 0
+#define reg_fste_w0_11_8_len 4
+#define reg_fste_w0_11_8_lsb 8
+#define xd_p_reg_fste_w1_3_0 0xA34B
+#define reg_fste_w1_3_0_pos 4
+#define reg_fste_w1_3_0_len 4
+#define reg_fste_w1_3_0_lsb 0
+#define xd_p_reg_fste_w1_11_4 0xA34C
+#define reg_fste_w1_11_4_pos 0
+#define reg_fste_w1_11_4_len 8
+#define reg_fste_w1_11_4_lsb 4
+#define xd_p_reg_fste_w2_7_0 0xA34D
+#define reg_fste_w2_7_0_pos 0
+#define reg_fste_w2_7_0_len 8
+#define reg_fste_w2_7_0_lsb 0
+#define xd_p_reg_fste_w2_11_8 0xA34E
+#define reg_fste_w2_11_8_pos 0
+#define reg_fste_w2_11_8_len 4
+#define reg_fste_w2_11_8_lsb 8
+#define xd_p_reg_fste_w3_3_0 0xA34E
+#define reg_fste_w3_3_0_pos 4
+#define reg_fste_w3_3_0_len 4
+#define reg_fste_w3_3_0_lsb 0
+#define xd_p_reg_fste_w3_11_4 0xA34F
+#define reg_fste_w3_11_4_pos 0
+#define reg_fste_w3_11_4_len 8
+#define reg_fste_w3_11_4_lsb 4
+#define xd_p_reg_fste_w4_7_0 0xA350
+#define reg_fste_w4_7_0_pos 0
+#define reg_fste_w4_7_0_len 8
+#define reg_fste_w4_7_0_lsb 0
+#define xd_p_reg_fste_w4_11_8 0xA351
+#define reg_fste_w4_11_8_pos 0
+#define reg_fste_w4_11_8_len 4
+#define reg_fste_w4_11_8_lsb 8
+#define xd_p_reg_fste_w5_3_0 0xA351
+#define reg_fste_w5_3_0_pos 4
+#define reg_fste_w5_3_0_len 4
+#define reg_fste_w5_3_0_lsb 0
+#define xd_p_reg_fste_w5_11_4 0xA352
+#define reg_fste_w5_11_4_pos 0
+#define reg_fste_w5_11_4_len 8
+#define reg_fste_w5_11_4_lsb 4
+#define xd_p_reg_fste_w6_7_0 0xA353
+#define reg_fste_w6_7_0_pos 0
+#define reg_fste_w6_7_0_len 8
+#define reg_fste_w6_7_0_lsb 0
+#define xd_p_reg_fste_w6_11_8 0xA354
+#define reg_fste_w6_11_8_pos 0
+#define reg_fste_w6_11_8_len 4
+#define reg_fste_w6_11_8_lsb 8
+#define xd_p_reg_fste_w7_3_0 0xA354
+#define reg_fste_w7_3_0_pos 4
+#define reg_fste_w7_3_0_len 4
+#define reg_fste_w7_3_0_lsb 0
+#define xd_p_reg_fste_w7_11_4 0xA355
+#define reg_fste_w7_11_4_pos 0
+#define reg_fste_w7_11_4_len 8
+#define reg_fste_w7_11_4_lsb 4
+#define xd_p_reg_fste_w8_7_0 0xA356
+#define reg_fste_w8_7_0_pos 0
+#define reg_fste_w8_7_0_len 8
+#define reg_fste_w8_7_0_lsb 0
+#define xd_p_reg_fste_w8_11_8 0xA357
+#define reg_fste_w8_11_8_pos 0
+#define reg_fste_w8_11_8_len 4
+#define reg_fste_w8_11_8_lsb 8
+#define xd_p_reg_fste_w9_3_0 0xA357
+#define reg_fste_w9_3_0_pos 4
+#define reg_fste_w9_3_0_len 4
+#define reg_fste_w9_3_0_lsb 0
+#define xd_p_reg_fste_w9_11_4 0xA358
+#define reg_fste_w9_11_4_pos 0
+#define reg_fste_w9_11_4_len 8
+#define reg_fste_w9_11_4_lsb 4
+#define xd_p_reg_fste_wa_7_0 0xA359
+#define reg_fste_wa_7_0_pos 0
+#define reg_fste_wa_7_0_len 8
+#define reg_fste_wa_7_0_lsb 0
+#define xd_p_reg_fste_wa_11_8 0xA35A
+#define reg_fste_wa_11_8_pos 0
+#define reg_fste_wa_11_8_len 4
+#define reg_fste_wa_11_8_lsb 8
+#define xd_p_reg_fste_wb_3_0 0xA35A
+#define reg_fste_wb_3_0_pos 4
+#define reg_fste_wb_3_0_len 4
+#define reg_fste_wb_3_0_lsb 0
+#define xd_p_reg_fste_wb_11_4 0xA35B
+#define reg_fste_wb_11_4_pos 0
+#define reg_fste_wb_11_4_len 8
+#define reg_fste_wb_11_4_lsb 4
+#define xd_r_fd_fste_i_adj 0xA35C
+#define fd_fste_i_adj_pos 0
+#define fd_fste_i_adj_len 5
+#define fd_fste_i_adj_lsb 0
+#define xd_r_fd_fste_f_adj_7_0 0xA35D
+#define fd_fste_f_adj_7_0_pos 0
+#define fd_fste_f_adj_7_0_len 8
+#define fd_fste_f_adj_7_0_lsb 0
+#define xd_r_fd_fste_f_adj_15_8 0xA35E
+#define fd_fste_f_adj_15_8_pos 0
+#define fd_fste_f_adj_15_8_len 8
+#define fd_fste_f_adj_15_8_lsb 8
+#define xd_r_fd_fste_f_adj_19_16 0xA35F
+#define fd_fste_f_adj_19_16_pos 0
+#define fd_fste_f_adj_19_16_len 4
+#define fd_fste_f_adj_19_16_lsb 16
+#define xd_p_reg_feq_Leak_Bypass 0xA366
+#define reg_feq_Leak_Bypass_pos 0
+#define reg_feq_Leak_Bypass_len 1
+#define reg_feq_Leak_Bypass_lsb 0
+#define xd_p_reg_feq_Leak_Mneg1 0xA366
+#define reg_feq_Leak_Mneg1_pos 1
+#define reg_feq_Leak_Mneg1_len 3
+#define reg_feq_Leak_Mneg1_lsb 0
+#define xd_p_reg_feq_Leak_B_ShiftQ 0xA366
+#define reg_feq_Leak_B_ShiftQ_pos 4
+#define reg_feq_Leak_B_ShiftQ_len 4
+#define reg_feq_Leak_B_ShiftQ_lsb 0
+#define xd_p_reg_feq_Leak_B_Float0 0xA367
+#define reg_feq_Leak_B_Float0_pos 0
+#define reg_feq_Leak_B_Float0_len 8
+#define reg_feq_Leak_B_Float0_lsb 0
+#define xd_p_reg_feq_Leak_B_Float1 0xA368
+#define reg_feq_Leak_B_Float1_pos 0
+#define reg_feq_Leak_B_Float1_len 8
+#define reg_feq_Leak_B_Float1_lsb 0
+#define xd_p_reg_feq_Leak_B_Float2 0xA369
+#define reg_feq_Leak_B_Float2_pos 0
+#define reg_feq_Leak_B_Float2_len 8
+#define reg_feq_Leak_B_Float2_lsb 0
+#define xd_p_reg_feq_Leak_B_Float3 0xA36A
+#define reg_feq_Leak_B_Float3_pos 0
+#define reg_feq_Leak_B_Float3_len 8
+#define reg_feq_Leak_B_Float3_lsb 0
+#define xd_p_reg_feq_Leak_B_Float4 0xA36B
+#define reg_feq_Leak_B_Float4_pos 0
+#define reg_feq_Leak_B_Float4_len 8
+#define reg_feq_Leak_B_Float4_lsb 0
+#define xd_p_reg_feq_Leak_B_Float5 0xA36C
+#define reg_feq_Leak_B_Float5_pos 0
+#define reg_feq_Leak_B_Float5_len 8
+#define reg_feq_Leak_B_Float5_lsb 0
+#define xd_p_reg_feq_Leak_B_Float6 0xA36D
+#define reg_feq_Leak_B_Float6_pos 0
+#define reg_feq_Leak_B_Float6_len 8
+#define reg_feq_Leak_B_Float6_lsb 0
+#define xd_p_reg_feq_Leak_B_Float7 0xA36E
+#define reg_feq_Leak_B_Float7_pos 0
+#define reg_feq_Leak_B_Float7_len 8
+#define reg_feq_Leak_B_Float7_lsb 0
+#define xd_r_reg_feq_data_h2_7_0 0xA36F
+#define reg_feq_data_h2_7_0_pos 0
+#define reg_feq_data_h2_7_0_len 8
+#define reg_feq_data_h2_7_0_lsb 0
+#define xd_r_reg_feq_data_h2_9_8 0xA370
+#define reg_feq_data_h2_9_8_pos 0
+#define reg_feq_data_h2_9_8_len 2
+#define reg_feq_data_h2_9_8_lsb 8
+#define xd_p_reg_feq_leak_use_slice_tps 0xA371
+#define reg_feq_leak_use_slice_tps_pos 0
+#define reg_feq_leak_use_slice_tps_len 1
+#define reg_feq_leak_use_slice_tps_lsb 0
+#define xd_p_reg_feq_read_update 0xA371
+#define reg_feq_read_update_pos 1
+#define reg_feq_read_update_len 1
+#define reg_feq_read_update_lsb 0
+#define xd_p_reg_feq_data_vld 0xA371
+#define reg_feq_data_vld_pos 2
+#define reg_feq_data_vld_len 1
+#define reg_feq_data_vld_lsb 0
+#define xd_p_reg_feq_tone_idx_4_0 0xA371
+#define reg_feq_tone_idx_4_0_pos 3
+#define reg_feq_tone_idx_4_0_len 5
+#define reg_feq_tone_idx_4_0_lsb 0
+#define xd_p_reg_feq_tone_idx_12_5 0xA372
+#define reg_feq_tone_idx_12_5_pos 0
+#define reg_feq_tone_idx_12_5_len 8
+#define reg_feq_tone_idx_12_5_lsb 5
+#define xd_r_reg_feq_data_re_7_0 0xA373
+#define reg_feq_data_re_7_0_pos 0
+#define reg_feq_data_re_7_0_len 8
+#define reg_feq_data_re_7_0_lsb 0
+#define xd_r_reg_feq_data_re_10_8 0xA374
+#define reg_feq_data_re_10_8_pos 0
+#define reg_feq_data_re_10_8_len 3
+#define reg_feq_data_re_10_8_lsb 8
+#define xd_r_reg_feq_data_im_7_0 0xA375
+#define reg_feq_data_im_7_0_pos 0
+#define reg_feq_data_im_7_0_len 8
+#define reg_feq_data_im_7_0_lsb 0
+#define xd_r_reg_feq_data_im_10_8 0xA376
+#define reg_feq_data_im_10_8_pos 0
+#define reg_feq_data_im_10_8_len 3
+#define reg_feq_data_im_10_8_lsb 8
+#define xd_r_reg_feq_y_re 0xA377
+#define reg_feq_y_re_pos 0
+#define reg_feq_y_re_len 8
+#define reg_feq_y_re_lsb 0
+#define xd_r_reg_feq_y_im 0xA378
+#define reg_feq_y_im_pos 0
+#define reg_feq_y_im_len 8
+#define reg_feq_y_im_lsb 0
+#define xd_r_reg_feq_h_re_7_0 0xA379
+#define reg_feq_h_re_7_0_pos 0
+#define reg_feq_h_re_7_0_len 8
+#define reg_feq_h_re_7_0_lsb 0
+#define xd_r_reg_feq_h_re_8 0xA37A
+#define reg_feq_h_re_8_pos 0
+#define reg_feq_h_re_8_len 1
+#define reg_feq_h_re_8_lsb 0
+#define xd_r_reg_feq_h_im_7_0 0xA37B
+#define reg_feq_h_im_7_0_pos 0
+#define reg_feq_h_im_7_0_len 8
+#define reg_feq_h_im_7_0_lsb 0
+#define xd_r_reg_feq_h_im_8 0xA37C
+#define reg_feq_h_im_8_pos 0
+#define reg_feq_h_im_8_len 1
+#define reg_feq_h_im_8_lsb 0
+#define xd_p_fec_super_frm_unit_7_0 0xA380
+#define fec_super_frm_unit_7_0_pos 0
+#define fec_super_frm_unit_7_0_len 8
+#define fec_super_frm_unit_7_0_lsb 0
+#define xd_p_fec_super_frm_unit_15_8 0xA381
+#define fec_super_frm_unit_15_8_pos 0
+#define fec_super_frm_unit_15_8_len 8
+#define fec_super_frm_unit_15_8_lsb 8
+#define xd_r_fec_vtb_err_bit_cnt_7_0 0xA382
+#define fec_vtb_err_bit_cnt_7_0_pos 0
+#define fec_vtb_err_bit_cnt_7_0_len 8
+#define fec_vtb_err_bit_cnt_7_0_lsb 0
+#define xd_r_fec_vtb_err_bit_cnt_15_8 0xA383
+#define fec_vtb_err_bit_cnt_15_8_pos 0
+#define fec_vtb_err_bit_cnt_15_8_len 8
+#define fec_vtb_err_bit_cnt_15_8_lsb 8
+#define xd_r_fec_vtb_err_bit_cnt_23_16 0xA384
+#define fec_vtb_err_bit_cnt_23_16_pos 0
+#define fec_vtb_err_bit_cnt_23_16_len 8
+#define fec_vtb_err_bit_cnt_23_16_lsb 16
+#define xd_p_fec_rsd_packet_unit_7_0 0xA385
+#define fec_rsd_packet_unit_7_0_pos 0
+#define fec_rsd_packet_unit_7_0_len 8
+#define fec_rsd_packet_unit_7_0_lsb 0
+#define xd_p_fec_rsd_packet_unit_15_8 0xA386
+#define fec_rsd_packet_unit_15_8_pos 0
+#define fec_rsd_packet_unit_15_8_len 8
+#define fec_rsd_packet_unit_15_8_lsb 8
+#define xd_r_fec_rsd_bit_err_cnt_7_0 0xA387
+#define fec_rsd_bit_err_cnt_7_0_pos 0
+#define fec_rsd_bit_err_cnt_7_0_len 8
+#define fec_rsd_bit_err_cnt_7_0_lsb 0
+#define xd_r_fec_rsd_bit_err_cnt_15_8 0xA388
+#define fec_rsd_bit_err_cnt_15_8_pos 0
+#define fec_rsd_bit_err_cnt_15_8_len 8
+#define fec_rsd_bit_err_cnt_15_8_lsb 8
+#define xd_r_fec_rsd_bit_err_cnt_23_16 0xA389
+#define fec_rsd_bit_err_cnt_23_16_pos 0
+#define fec_rsd_bit_err_cnt_23_16_len 8
+#define fec_rsd_bit_err_cnt_23_16_lsb 16
+#define xd_r_fec_rsd_abort_packet_cnt_7_0 0xA38A
+#define fec_rsd_abort_packet_cnt_7_0_pos 0
+#define fec_rsd_abort_packet_cnt_7_0_len 8
+#define fec_rsd_abort_packet_cnt_7_0_lsb 0
+#define xd_r_fec_rsd_abort_packet_cnt_15_8 0xA38B
+#define fec_rsd_abort_packet_cnt_15_8_pos 0
+#define fec_rsd_abort_packet_cnt_15_8_len 8
+#define fec_rsd_abort_packet_cnt_15_8_lsb 8
+#define xd_p_fec_RSD_PKT_NUM_PER_UNIT_7_0 0xA38C
+#define fec_RSD_PKT_NUM_PER_UNIT_7_0_pos 0
+#define fec_RSD_PKT_NUM_PER_UNIT_7_0_len 8
+#define fec_RSD_PKT_NUM_PER_UNIT_7_0_lsb 0
+#define xd_p_fec_RSD_PKT_NUM_PER_UNIT_15_8 0xA38D
+#define fec_RSD_PKT_NUM_PER_UNIT_15_8_pos 0
+#define fec_RSD_PKT_NUM_PER_UNIT_15_8_len 8
+#define fec_RSD_PKT_NUM_PER_UNIT_15_8_lsb 8
+#define xd_p_fec_RS_TH_1_7_0 0xA38E
+#define fec_RS_TH_1_7_0_pos 0
+#define fec_RS_TH_1_7_0_len 8
+#define fec_RS_TH_1_7_0_lsb 0
+#define xd_p_fec_RS_TH_1_15_8 0xA38F
+#define fec_RS_TH_1_15_8_pos 0
+#define fec_RS_TH_1_15_8_len 8
+#define fec_RS_TH_1_15_8_lsb 8
+#define xd_p_fec_RS_TH_2 0xA390
+#define fec_RS_TH_2_pos 0
+#define fec_RS_TH_2_len 8
+#define fec_RS_TH_2_lsb 0
+#define xd_p_fec_mon_en 0xA391
+#define fec_mon_en_pos 0
+#define fec_mon_en_len 1
+#define fec_mon_en_lsb 0
+#define xd_p_reg_b8to47 0xA391
+#define reg_b8to47_pos 1
+#define reg_b8to47_len 1
+#define reg_b8to47_lsb 0
+#define xd_p_reg_rsd_sync_rep 0xA391
+#define reg_rsd_sync_rep_pos 2
+#define reg_rsd_sync_rep_len 1
+#define reg_rsd_sync_rep_lsb 0
+#define xd_p_fec_rsd_retrain_rst 0xA391
+#define fec_rsd_retrain_rst_pos 3
+#define fec_rsd_retrain_rst_len 1
+#define fec_rsd_retrain_rst_lsb 0
+#define xd_r_fec_rsd_ber_rdy 0xA391
+#define fec_rsd_ber_rdy_pos 4
+#define fec_rsd_ber_rdy_len 1
+#define fec_rsd_ber_rdy_lsb 0
+#define xd_p_fec_rsd_ber_rst 0xA391
+#define fec_rsd_ber_rst_pos 5
+#define fec_rsd_ber_rst_len 1
+#define fec_rsd_ber_rst_lsb 0
+#define xd_r_fec_vtb_ber_rdy 0xA391
+#define fec_vtb_ber_rdy_pos 6
+#define fec_vtb_ber_rdy_len 1
+#define fec_vtb_ber_rdy_lsb 0
+#define xd_p_fec_vtb_ber_rst 0xA391
+#define fec_vtb_ber_rst_pos 7
+#define fec_vtb_ber_rst_len 1
+#define fec_vtb_ber_rst_lsb 0
+#define xd_p_reg_vtb_clk40en 0xA392
+#define reg_vtb_clk40en_pos 0
+#define reg_vtb_clk40en_len 1
+#define reg_vtb_clk40en_lsb 0
+#define xd_p_fec_vtb_rsd_mon_en 0xA392
+#define fec_vtb_rsd_mon_en_pos 1
+#define fec_vtb_rsd_mon_en_len 1
+#define fec_vtb_rsd_mon_en_lsb 0
+#define xd_p_reg_fec_data_en 0xA392
+#define reg_fec_data_en_pos 2
+#define reg_fec_data_en_len 1
+#define reg_fec_data_en_lsb 0
+#define xd_p_fec_dummy_reg_2 0xA392
+#define fec_dummy_reg_2_pos 3
+#define fec_dummy_reg_2_len 3
+#define fec_dummy_reg_2_lsb 0
+#define xd_p_reg_sync_chk 0xA392
+#define reg_sync_chk_pos 6
+#define reg_sync_chk_len 1
+#define reg_sync_chk_lsb 0
+#define xd_p_fec_rsd_bypass 0xA392
+#define fec_rsd_bypass_pos 7
+#define fec_rsd_bypass_len 1
+#define fec_rsd_bypass_lsb 0
+#define xd_p_fec_sw_rst 0xA393
+#define fec_sw_rst_pos 0
+#define fec_sw_rst_len 1
+#define fec_sw_rst_lsb 0
+#define xd_r_fec_vtb_pm_crc 0xA394
+#define fec_vtb_pm_crc_pos 0
+#define fec_vtb_pm_crc_len 8
+#define fec_vtb_pm_crc_lsb 0
+#define xd_r_fec_vtb_tb_7_crc 0xA395
+#define fec_vtb_tb_7_crc_pos 0
+#define fec_vtb_tb_7_crc_len 8
+#define fec_vtb_tb_7_crc_lsb 0
+#define xd_r_fec_vtb_tb_6_crc 0xA396
+#define fec_vtb_tb_6_crc_pos 0
+#define fec_vtb_tb_6_crc_len 8
+#define fec_vtb_tb_6_crc_lsb 0
+#define xd_r_fec_vtb_tb_5_crc 0xA397
+#define fec_vtb_tb_5_crc_pos 0
+#define fec_vtb_tb_5_crc_len 8
+#define fec_vtb_tb_5_crc_lsb 0
+#define xd_r_fec_vtb_tb_4_crc 0xA398
+#define fec_vtb_tb_4_crc_pos 0
+#define fec_vtb_tb_4_crc_len 8
+#define fec_vtb_tb_4_crc_lsb 0
+#define xd_r_fec_vtb_tb_3_crc 0xA399
+#define fec_vtb_tb_3_crc_pos 0
+#define fec_vtb_tb_3_crc_len 8
+#define fec_vtb_tb_3_crc_lsb 0
+#define xd_r_fec_vtb_tb_2_crc 0xA39A
+#define fec_vtb_tb_2_crc_pos 0
+#define fec_vtb_tb_2_crc_len 8
+#define fec_vtb_tb_2_crc_lsb 0
+#define xd_r_fec_vtb_tb_1_crc 0xA39B
+#define fec_vtb_tb_1_crc_pos 0
+#define fec_vtb_tb_1_crc_len 8
+#define fec_vtb_tb_1_crc_lsb 0
+#define xd_r_fec_vtb_tb_0_crc 0xA39C
+#define fec_vtb_tb_0_crc_pos 0
+#define fec_vtb_tb_0_crc_len 8
+#define fec_vtb_tb_0_crc_lsb 0
+#define xd_r_fec_rsd_bank0_crc 0xA39D
+#define fec_rsd_bank0_crc_pos 0
+#define fec_rsd_bank0_crc_len 8
+#define fec_rsd_bank0_crc_lsb 0
+#define xd_r_fec_rsd_bank1_crc 0xA39E
+#define fec_rsd_bank1_crc_pos 0
+#define fec_rsd_bank1_crc_len 8
+#define fec_rsd_bank1_crc_lsb 0
+#define xd_r_fec_idi_vtb_crc 0xA39F
+#define fec_idi_vtb_crc_pos 0
+#define fec_idi_vtb_crc_len 8
+#define fec_idi_vtb_crc_lsb 0
+#define xd_g_reg_tpsd_txmod 0xA3C0
+#define reg_tpsd_txmod_pos 0
+#define reg_tpsd_txmod_len 2
+#define reg_tpsd_txmod_lsb 0
+#define xd_g_reg_tpsd_gi 0xA3C0
+#define reg_tpsd_gi_pos 2
+#define reg_tpsd_gi_len 2
+#define reg_tpsd_gi_lsb 0
+#define xd_g_reg_tpsd_hier 0xA3C0
+#define reg_tpsd_hier_pos 4
+#define reg_tpsd_hier_len 3
+#define reg_tpsd_hier_lsb 0
+#define xd_g_reg_bw 0xA3C1
+#define reg_bw_pos 2
+#define reg_bw_len 2
+#define reg_bw_lsb 0
+#define xd_g_reg_dec_pri 0xA3C1
+#define reg_dec_pri_pos 4
+#define reg_dec_pri_len 1
+#define reg_dec_pri_lsb 0
+#define xd_g_reg_tpsd_const 0xA3C1
+#define reg_tpsd_const_pos 6
+#define reg_tpsd_const_len 2
+#define reg_tpsd_const_lsb 0
+#define xd_g_reg_tpsd_hpcr 0xA3C2
+#define reg_tpsd_hpcr_pos 0
+#define reg_tpsd_hpcr_len 3
+#define reg_tpsd_hpcr_lsb 0
+#define xd_g_reg_tpsd_lpcr 0xA3C2
+#define reg_tpsd_lpcr_pos 3
+#define reg_tpsd_lpcr_len 3
+#define reg_tpsd_lpcr_lsb 0
+#define xd_g_reg_ofsm_clk 0xA3D0
+#define reg_ofsm_clk_pos 0
+#define reg_ofsm_clk_len 3
+#define reg_ofsm_clk_lsb 0
+#define xd_g_reg_fclk_cfg 0xA3D1
+#define reg_fclk_cfg_pos 0
+#define reg_fclk_cfg_len 1
+#define reg_fclk_cfg_lsb 0
+#define xd_g_reg_fclk_idi 0xA3D1
+#define reg_fclk_idi_pos 1
+#define reg_fclk_idi_len 1
+#define reg_fclk_idi_lsb 0
+#define xd_g_reg_fclk_odi 0xA3D1
+#define reg_fclk_odi_pos 2
+#define reg_fclk_odi_len 1
+#define reg_fclk_odi_lsb 0
+#define xd_g_reg_fclk_rsd 0xA3D1
+#define reg_fclk_rsd_pos 3
+#define reg_fclk_rsd_len 1
+#define reg_fclk_rsd_lsb 0
+#define xd_g_reg_fclk_vtb 0xA3D1
+#define reg_fclk_vtb_pos 4
+#define reg_fclk_vtb_len 1
+#define reg_fclk_vtb_lsb 0
+#define xd_g_reg_fclk_cste 0xA3D1
+#define reg_fclk_cste_pos 5
+#define reg_fclk_cste_len 1
+#define reg_fclk_cste_lsb 0
+#define xd_g_reg_fclk_mp2if 0xA3D1
+#define reg_fclk_mp2if_pos 6
+#define reg_fclk_mp2if_len 1
+#define reg_fclk_mp2if_lsb 0
+#define xd_I2C_i2c_m_slave_addr 0xA400
+#define i2c_m_slave_addr_pos 0
+#define i2c_m_slave_addr_len 8
+#define i2c_m_slave_addr_lsb 0
+#define xd_I2C_i2c_m_data1 0xA401
+#define i2c_m_data1_pos 0
+#define i2c_m_data1_len 8
+#define i2c_m_data1_lsb 0
+#define xd_I2C_i2c_m_data2 0xA402
+#define i2c_m_data2_pos 0
+#define i2c_m_data2_len 8
+#define i2c_m_data2_lsb 0
+#define xd_I2C_i2c_m_data3 0xA403
+#define i2c_m_data3_pos 0
+#define i2c_m_data3_len 8
+#define i2c_m_data3_lsb 0
+#define xd_I2C_i2c_m_data4 0xA404
+#define i2c_m_data4_pos 0
+#define i2c_m_data4_len 8
+#define i2c_m_data4_lsb 0
+#define xd_I2C_i2c_m_data5 0xA405
+#define i2c_m_data5_pos 0
+#define i2c_m_data5_len 8
+#define i2c_m_data5_lsb 0
+#define xd_I2C_i2c_m_data6 0xA406
+#define i2c_m_data6_pos 0
+#define i2c_m_data6_len 8
+#define i2c_m_data6_lsb 0
+#define xd_I2C_i2c_m_data7 0xA407
+#define i2c_m_data7_pos 0
+#define i2c_m_data7_len 8
+#define i2c_m_data7_lsb 0
+#define xd_I2C_i2c_m_data8 0xA408
+#define i2c_m_data8_pos 0
+#define i2c_m_data8_len 8
+#define i2c_m_data8_lsb 0
+#define xd_I2C_i2c_m_data9 0xA409
+#define i2c_m_data9_pos 0
+#define i2c_m_data9_len 8
+#define i2c_m_data9_lsb 0
+#define xd_I2C_i2c_m_data10 0xA40A
+#define i2c_m_data10_pos 0
+#define i2c_m_data10_len 8
+#define i2c_m_data10_lsb 0
+#define xd_I2C_i2c_m_data11 0xA40B
+#define i2c_m_data11_pos 0
+#define i2c_m_data11_len 8
+#define i2c_m_data11_lsb 0
+#define xd_I2C_i2c_m_cmd_rw 0xA40C
+#define i2c_m_cmd_rw_pos 0
+#define i2c_m_cmd_rw_len 1
+#define i2c_m_cmd_rw_lsb 0
+#define xd_I2C_i2c_m_cmd_rwlen 0xA40C
+#define i2c_m_cmd_rwlen_pos 3
+#define i2c_m_cmd_rwlen_len 4
+#define i2c_m_cmd_rwlen_lsb 0
+#define xd_I2C_i2c_m_status_cmd_exe 0xA40D
+#define i2c_m_status_cmd_exe_pos 0
+#define i2c_m_status_cmd_exe_len 1
+#define i2c_m_status_cmd_exe_lsb 0
+#define xd_I2C_i2c_m_status_wdat_done 0xA40D
+#define i2c_m_status_wdat_done_pos 1
+#define i2c_m_status_wdat_done_len 1
+#define i2c_m_status_wdat_done_lsb 0
+#define xd_I2C_i2c_m_status_wdat_fail 0xA40D
+#define i2c_m_status_wdat_fail_pos 2
+#define i2c_m_status_wdat_fail_len 1
+#define i2c_m_status_wdat_fail_lsb 0
+#define xd_I2C_i2c_m_period 0xA40E
+#define i2c_m_period_pos 0
+#define i2c_m_period_len 8
+#define i2c_m_period_lsb 0
+#define xd_I2C_i2c_m_reg_msb_lsb 0xA40F
+#define i2c_m_reg_msb_lsb_pos 0
+#define i2c_m_reg_msb_lsb_len 1
+#define i2c_m_reg_msb_lsb_lsb 0
+#define xd_I2C_reg_ofdm_rst 0xA40F
+#define reg_ofdm_rst_pos 1
+#define reg_ofdm_rst_len 1
+#define reg_ofdm_rst_lsb 0
+#define xd_I2C_reg_sample_period_on_tuner 0xA40F
+#define reg_sample_period_on_tuner_pos 2
+#define reg_sample_period_on_tuner_len 1
+#define reg_sample_period_on_tuner_lsb 0
+#define xd_I2C_reg_rst_i2c 0xA40F
+#define reg_rst_i2c_pos 3
+#define reg_rst_i2c_len 1
+#define reg_rst_i2c_lsb 0
+#define xd_I2C_reg_ofdm_rst_en 0xA40F
+#define reg_ofdm_rst_en_pos 4
+#define reg_ofdm_rst_en_len 1
+#define reg_ofdm_rst_en_lsb 0
+#define xd_I2C_reg_tuner_sda_sync_on 0xA40F
+#define reg_tuner_sda_sync_on_pos 5
+#define reg_tuner_sda_sync_on_len 1
+#define reg_tuner_sda_sync_on_lsb 0
+#define xd_p_mp2if_data_access_disable_ofsm 0xA500
+#define mp2if_data_access_disable_ofsm_pos 0
+#define mp2if_data_access_disable_ofsm_len 1
+#define mp2if_data_access_disable_ofsm_lsb 0
+#define xd_p_reg_mp2_sw_rst_ofsm 0xA500
+#define reg_mp2_sw_rst_ofsm_pos 1
+#define reg_mp2_sw_rst_ofsm_len 1
+#define reg_mp2_sw_rst_ofsm_lsb 0
+#define xd_p_reg_mp2if_clk_en_ofsm 0xA500
+#define reg_mp2if_clk_en_ofsm_pos 2
+#define reg_mp2if_clk_en_ofsm_len 1
+#define reg_mp2if_clk_en_ofsm_lsb 0
+#define xd_r_mp2if_sync_byte_locked 0xA500
+#define mp2if_sync_byte_locked_pos 3
+#define mp2if_sync_byte_locked_len 1
+#define mp2if_sync_byte_locked_lsb 0
+#define xd_r_mp2if_ts_not_188 0xA500
+#define mp2if_ts_not_188_pos 4
+#define mp2if_ts_not_188_len 1
+#define mp2if_ts_not_188_lsb 0
+#define xd_r_mp2if_psb_empty 0xA500
+#define mp2if_psb_empty_pos 5
+#define mp2if_psb_empty_len 1
+#define mp2if_psb_empty_lsb 0
+#define xd_r_mp2if_psb_overflow 0xA500
+#define mp2if_psb_overflow_pos 6
+#define mp2if_psb_overflow_len 1
+#define mp2if_psb_overflow_lsb 0
+#define xd_p_mp2if_keep_sf_sync_byte_ofsm 0xA500
+#define mp2if_keep_sf_sync_byte_ofsm_pos 7
+#define mp2if_keep_sf_sync_byte_ofsm_len 1
+#define mp2if_keep_sf_sync_byte_ofsm_lsb 0
+#define xd_r_mp2if_psb_mp2if_num_pkt 0xA501
+#define mp2if_psb_mp2if_num_pkt_pos 0
+#define mp2if_psb_mp2if_num_pkt_len 6
+#define mp2if_psb_mp2if_num_pkt_lsb 0
+#define xd_p_reg_mpeg_full_speed_ofsm 0xA501
+#define reg_mpeg_full_speed_ofsm_pos 6
+#define reg_mpeg_full_speed_ofsm_len 1
+#define reg_mpeg_full_speed_ofsm_lsb 0
+#define xd_p_mp2if_mpeg_ser_mode_ofsm 0xA501
+#define mp2if_mpeg_ser_mode_ofsm_pos 7
+#define mp2if_mpeg_ser_mode_ofsm_len 1
+#define mp2if_mpeg_ser_mode_ofsm_lsb 0
+#define xd_p_reg_sw_mon51 0xA600
+#define reg_sw_mon51_pos 0
+#define reg_sw_mon51_len 8
+#define reg_sw_mon51_lsb 0
+#define xd_p_reg_top_pcsel 0xA601
+#define reg_top_pcsel_pos 0
+#define reg_top_pcsel_len 1
+#define reg_top_pcsel_lsb 0
+#define xd_p_reg_top_rs232 0xA601
+#define reg_top_rs232_pos 1
+#define reg_top_rs232_len 1
+#define reg_top_rs232_lsb 0
+#define xd_p_reg_top_pcout 0xA601
+#define reg_top_pcout_pos 2
+#define reg_top_pcout_len 1
+#define reg_top_pcout_lsb 0
+#define xd_p_reg_top_debug 0xA601
+#define reg_top_debug_pos 3
+#define reg_top_debug_len 1
+#define reg_top_debug_lsb 0
+#define xd_p_reg_top_adcdly 0xA601
+#define reg_top_adcdly_pos 4
+#define reg_top_adcdly_len 2
+#define reg_top_adcdly_lsb 0
+#define xd_p_reg_top_pwrdw 0xA601
+#define reg_top_pwrdw_pos 6
+#define reg_top_pwrdw_len 1
+#define reg_top_pwrdw_lsb 0
+#define xd_p_reg_top_pwrdw_inv 0xA601
+#define reg_top_pwrdw_inv_pos 7
+#define reg_top_pwrdw_inv_len 1
+#define reg_top_pwrdw_inv_lsb 0
+#define xd_p_reg_top_int_inv 0xA602
+#define reg_top_int_inv_pos 0
+#define reg_top_int_inv_len 1
+#define reg_top_int_inv_lsb 0
+#define xd_p_reg_top_dio_sel 0xA602
+#define reg_top_dio_sel_pos 1
+#define reg_top_dio_sel_len 1
+#define reg_top_dio_sel_lsb 0
+#define xd_p_reg_top_gpioon0 0xA603
+#define reg_top_gpioon0_pos 0
+#define reg_top_gpioon0_len 1
+#define reg_top_gpioon0_lsb 0
+#define xd_p_reg_top_gpioon1 0xA603
+#define reg_top_gpioon1_pos 1
+#define reg_top_gpioon1_len 1
+#define reg_top_gpioon1_lsb 0
+#define xd_p_reg_top_gpioon2 0xA603
+#define reg_top_gpioon2_pos 2
+#define reg_top_gpioon2_len 1
+#define reg_top_gpioon2_lsb 0
+#define xd_p_reg_top_gpioon3 0xA603
+#define reg_top_gpioon3_pos 3
+#define reg_top_gpioon3_len 1
+#define reg_top_gpioon3_lsb 0
+#define xd_p_reg_top_lockon1 0xA603
+#define reg_top_lockon1_pos 4
+#define reg_top_lockon1_len 1
+#define reg_top_lockon1_lsb 0
+#define xd_p_reg_top_lockon2 0xA603
+#define reg_top_lockon2_pos 5
+#define reg_top_lockon2_len 1
+#define reg_top_lockon2_lsb 0
+#define xd_p_reg_top_gpioo0 0xA604
+#define reg_top_gpioo0_pos 0
+#define reg_top_gpioo0_len 1
+#define reg_top_gpioo0_lsb 0
+#define xd_p_reg_top_gpioo1 0xA604
+#define reg_top_gpioo1_pos 1
+#define reg_top_gpioo1_len 1
+#define reg_top_gpioo1_lsb 0
+#define xd_p_reg_top_gpioo2 0xA604
+#define reg_top_gpioo2_pos 2
+#define reg_top_gpioo2_len 1
+#define reg_top_gpioo2_lsb 0
+#define xd_p_reg_top_gpioo3 0xA604
+#define reg_top_gpioo3_pos 3
+#define reg_top_gpioo3_len 1
+#define reg_top_gpioo3_lsb 0
+#define xd_p_reg_top_lock1 0xA604
+#define reg_top_lock1_pos 4
+#define reg_top_lock1_len 1
+#define reg_top_lock1_lsb 0
+#define xd_p_reg_top_lock2 0xA604
+#define reg_top_lock2_pos 5
+#define reg_top_lock2_len 1
+#define reg_top_lock2_lsb 0
+#define xd_p_reg_top_gpioen0 0xA605
+#define reg_top_gpioen0_pos 0
+#define reg_top_gpioen0_len 1
+#define reg_top_gpioen0_lsb 0
+#define xd_p_reg_top_gpioen1 0xA605
+#define reg_top_gpioen1_pos 1
+#define reg_top_gpioen1_len 1
+#define reg_top_gpioen1_lsb 0
+#define xd_p_reg_top_gpioen2 0xA605
+#define reg_top_gpioen2_pos 2
+#define reg_top_gpioen2_len 1
+#define reg_top_gpioen2_lsb 0
+#define xd_p_reg_top_gpioen3 0xA605
+#define reg_top_gpioen3_pos 3
+#define reg_top_gpioen3_len 1
+#define reg_top_gpioen3_lsb 0
+#define xd_p_reg_top_locken1 0xA605
+#define reg_top_locken1_pos 4
+#define reg_top_locken1_len 1
+#define reg_top_locken1_lsb 0
+#define xd_p_reg_top_locken2 0xA605
+#define reg_top_locken2_pos 5
+#define reg_top_locken2_len 1
+#define reg_top_locken2_lsb 0
+#define xd_r_reg_top_gpioi0 0xA606
+#define reg_top_gpioi0_pos 0
+#define reg_top_gpioi0_len 1
+#define reg_top_gpioi0_lsb 0
+#define xd_r_reg_top_gpioi1 0xA606
+#define reg_top_gpioi1_pos 1
+#define reg_top_gpioi1_len 1
+#define reg_top_gpioi1_lsb 0
+#define xd_r_reg_top_gpioi2 0xA606
+#define reg_top_gpioi2_pos 2
+#define reg_top_gpioi2_len 1
+#define reg_top_gpioi2_lsb 0
+#define xd_r_reg_top_gpioi3 0xA606
+#define reg_top_gpioi3_pos 3
+#define reg_top_gpioi3_len 1
+#define reg_top_gpioi3_lsb 0
+#define xd_r_reg_top_locki1 0xA606
+#define reg_top_locki1_pos 4
+#define reg_top_locki1_len 1
+#define reg_top_locki1_lsb 0
+#define xd_r_reg_top_locki2 0xA606
+#define reg_top_locki2_pos 5
+#define reg_top_locki2_len 1
+#define reg_top_locki2_lsb 0
+#define xd_p_reg_dummy_7_0 0xA608
+#define reg_dummy_7_0_pos 0
+#define reg_dummy_7_0_len 8
+#define reg_dummy_7_0_lsb 0
+#define xd_p_reg_dummy_15_8 0xA609
+#define reg_dummy_15_8_pos 0
+#define reg_dummy_15_8_len 8
+#define reg_dummy_15_8_lsb 8
+#define xd_p_reg_dummy_23_16 0xA60A
+#define reg_dummy_23_16_pos 0
+#define reg_dummy_23_16_len 8
+#define reg_dummy_23_16_lsb 16
+#define xd_p_reg_dummy_31_24 0xA60B
+#define reg_dummy_31_24_pos 0
+#define reg_dummy_31_24_len 8
+#define reg_dummy_31_24_lsb 24
+#define xd_p_reg_dummy_39_32 0xA60C
+#define reg_dummy_39_32_pos 0
+#define reg_dummy_39_32_len 8
+#define reg_dummy_39_32_lsb 32
+#define xd_p_reg_dummy_47_40 0xA60D
+#define reg_dummy_47_40_pos 0
+#define reg_dummy_47_40_len 8
+#define reg_dummy_47_40_lsb 40
+#define xd_p_reg_dummy_55_48 0xA60E
+#define reg_dummy_55_48_pos 0
+#define reg_dummy_55_48_len 8
+#define reg_dummy_55_48_lsb 48
+#define xd_p_reg_dummy_63_56 0xA60F
+#define reg_dummy_63_56_pos 0
+#define reg_dummy_63_56_len 8
+#define reg_dummy_63_56_lsb 56
+#define xd_p_reg_dummy_71_64 0xA610
+#define reg_dummy_71_64_pos 0
+#define reg_dummy_71_64_len 8
+#define reg_dummy_71_64_lsb 64
+#define xd_p_reg_dummy_79_72 0xA611
+#define reg_dummy_79_72_pos 0
+#define reg_dummy_79_72_len 8
+#define reg_dummy_79_72_lsb 72
+#define xd_p_reg_dummy_87_80 0xA612
+#define reg_dummy_87_80_pos 0
+#define reg_dummy_87_80_len 8
+#define reg_dummy_87_80_lsb 80
+#define xd_p_reg_dummy_95_88 0xA613
+#define reg_dummy_95_88_pos 0
+#define reg_dummy_95_88_len 8
+#define reg_dummy_95_88_lsb 88
+#define xd_p_reg_dummy_103_96 0xA614
+#define reg_dummy_103_96_pos 0
+#define reg_dummy_103_96_len 8
+#define reg_dummy_103_96_lsb 96
+
+#define xd_p_reg_unplug_flag 0xA615
+#define reg_unplug_flag_pos 0
+#define reg_unplug_flag_len 1
+#define reg_unplug_flag_lsb 104
+
+#define xd_p_reg_api_dca_stes_request 0xA615
+#define reg_api_dca_stes_request_pos 1
+#define reg_api_dca_stes_request_len 1
+#define reg_api_dca_stes_request_lsb 0
+
+#define xd_p_reg_back_to_dca_flag 0xA615
+#define reg_back_to_dca_flag_pos 2
+#define reg_back_to_dca_flag_len 1
+#define reg_back_to_dca_flag_lsb 106
+
+#define xd_p_reg_api_retrain_request 0xA615
+#define reg_api_retrain_request_pos 3
+#define reg_api_retrain_request_len 1
+#define reg_api_retrain_request_lsb 0
+
+#define xd_p_reg_Dyn_Top_Try_flag 0xA615
+#define reg_Dyn_Top_Try_flag_pos 3
+#define reg_Dyn_Top_Try_flag_len 1
+#define reg_Dyn_Top_Try_flag_lsb 107
+
+#define xd_p_reg_API_retrain_freeze_flag 0xA615
+#define reg_API_retrain_freeze_flag_pos 4
+#define reg_API_retrain_freeze_flag_len 1
+#define reg_API_retrain_freeze_flag_lsb 108
+
+#define xd_p_reg_dummy_111_104 0xA615
+#define reg_dummy_111_104_pos 0
+#define reg_dummy_111_104_len 8
+#define reg_dummy_111_104_lsb 104
+#define xd_p_reg_dummy_119_112 0xA616
+#define reg_dummy_119_112_pos 0
+#define reg_dummy_119_112_len 8
+#define reg_dummy_119_112_lsb 112
+#define xd_p_reg_dummy_127_120 0xA617
+#define reg_dummy_127_120_pos 0
+#define reg_dummy_127_120_len 8
+#define reg_dummy_127_120_lsb 120
+#define xd_p_reg_dummy_135_128 0xA618
+#define reg_dummy_135_128_pos 0
+#define reg_dummy_135_128_len 8
+#define reg_dummy_135_128_lsb 128
+
+#define xd_p_reg_dummy_143_136 0xA619
+#define reg_dummy_143_136_pos 0
+#define reg_dummy_143_136_len 8
+#define reg_dummy_143_136_lsb 136
+
+#define xd_p_reg_CCIR_dis 0xA619
+#define reg_CCIR_dis_pos 0
+#define reg_CCIR_dis_len 1
+#define reg_CCIR_dis_lsb 0
+
+#define xd_p_reg_dummy_151_144 0xA61A
+#define reg_dummy_151_144_pos 0
+#define reg_dummy_151_144_len 8
+#define reg_dummy_151_144_lsb 144
+
+#define xd_p_reg_dummy_159_152 0xA61B
+#define reg_dummy_159_152_pos 0
+#define reg_dummy_159_152_len 8
+#define reg_dummy_159_152_lsb 152
+
+#define xd_p_reg_dummy_167_160 0xA61C
+#define reg_dummy_167_160_pos 0
+#define reg_dummy_167_160_len 8
+#define reg_dummy_167_160_lsb 160
+
+#define xd_p_reg_dummy_175_168 0xA61D
+#define reg_dummy_175_168_pos 0
+#define reg_dummy_175_168_len 8
+#define reg_dummy_175_168_lsb 168
+
+#define xd_p_reg_dummy_183_176 0xA61E
+#define reg_dummy_183_176_pos 0
+#define reg_dummy_183_176_len 8
+#define reg_dummy_183_176_lsb 176
+
+#define xd_p_reg_ofsm_read_rbc_en 0xA61E
+#define reg_ofsm_read_rbc_en_pos 2
+#define reg_ofsm_read_rbc_en_len 1
+#define reg_ofsm_read_rbc_en_lsb 0
+
+#define xd_p_reg_ce_filter_selection_dis 0xA61E
+#define reg_ce_filter_selection_dis_pos 1
+#define reg_ce_filter_selection_dis_len 1
+#define reg_ce_filter_selection_dis_lsb 0
+
+#define xd_p_reg_OFSM_version_control_7_0 0xA611
+#define reg_OFSM_version_control_7_0_pos 0
+#define reg_OFSM_version_control_7_0_len 8
+#define reg_OFSM_version_control_7_0_lsb 0
+
+#define xd_p_reg_OFSM_version_control_15_8 0xA61F
+#define reg_OFSM_version_control_15_8_pos 0
+#define reg_OFSM_version_control_15_8_len 8
+#define reg_OFSM_version_control_15_8_lsb 0
+
+#define xd_p_reg_OFSM_version_control_23_16 0xA620
+#define reg_OFSM_version_control_23_16_pos 0
+#define reg_OFSM_version_control_23_16_len 8
+#define reg_OFSM_version_control_23_16_lsb 0
+
+#define xd_p_reg_dummy_191_184 0xA61F
+#define reg_dummy_191_184_pos 0
+#define reg_dummy_191_184_len 8
+#define reg_dummy_191_184_lsb 184
+
+#define xd_p_reg_dummy_199_192 0xA620
+#define reg_dummy_199_192_pos 0
+#define reg_dummy_199_192_len 8
+#define reg_dummy_199_192_lsb 192
+
+#define xd_p_reg_ce_en 0xABC0
+#define reg_ce_en_pos 0
+#define reg_ce_en_len 1
+#define reg_ce_en_lsb 0
+#define xd_p_reg_ce_fctrl_en 0xABC0
+#define reg_ce_fctrl_en_pos 1
+#define reg_ce_fctrl_en_len 1
+#define reg_ce_fctrl_en_lsb 0
+#define xd_p_reg_ce_fste_tdi 0xABC0
+#define reg_ce_fste_tdi_pos 2
+#define reg_ce_fste_tdi_len 1
+#define reg_ce_fste_tdi_lsb 0
+#define xd_p_reg_ce_dynamic 0xABC0
+#define reg_ce_dynamic_pos 3
+#define reg_ce_dynamic_len 1
+#define reg_ce_dynamic_lsb 0
+#define xd_p_reg_ce_conf 0xABC0
+#define reg_ce_conf_pos 4
+#define reg_ce_conf_len 2
+#define reg_ce_conf_lsb 0
+#define xd_p_reg_ce_dyn12 0xABC0
+#define reg_ce_dyn12_pos 6
+#define reg_ce_dyn12_len 1
+#define reg_ce_dyn12_lsb 0
+#define xd_p_reg_ce_derot_en 0xABC0
+#define reg_ce_derot_en_pos 7
+#define reg_ce_derot_en_len 1
+#define reg_ce_derot_en_lsb 0
+#define xd_p_reg_ce_dynamic_th_7_0 0xABC1
+#define reg_ce_dynamic_th_7_0_pos 0
+#define reg_ce_dynamic_th_7_0_len 8
+#define reg_ce_dynamic_th_7_0_lsb 0
+#define xd_p_reg_ce_dynamic_th_15_8 0xABC2
+#define reg_ce_dynamic_th_15_8_pos 0
+#define reg_ce_dynamic_th_15_8_len 8
+#define reg_ce_dynamic_th_15_8_lsb 8
+#define xd_p_reg_ce_s1 0xABC3
+#define reg_ce_s1_pos 0
+#define reg_ce_s1_len 5
+#define reg_ce_s1_lsb 0
+#define xd_p_reg_ce_var_forced_value 0xABC3
+#define reg_ce_var_forced_value_pos 5
+#define reg_ce_var_forced_value_len 3
+#define reg_ce_var_forced_value_lsb 0
+#define xd_p_reg_ce_data_im_7_0 0xABC4
+#define reg_ce_data_im_7_0_pos 0
+#define reg_ce_data_im_7_0_len 8
+#define reg_ce_data_im_7_0_lsb 0
+#define xd_p_reg_ce_data_im_8 0xABC5
+#define reg_ce_data_im_8_pos 0
+#define reg_ce_data_im_8_len 1
+#define reg_ce_data_im_8_lsb 0
+#define xd_p_reg_ce_data_re_6_0 0xABC5
+#define reg_ce_data_re_6_0_pos 1
+#define reg_ce_data_re_6_0_len 7
+#define reg_ce_data_re_6_0_lsb 0
+#define xd_p_reg_ce_data_re_8_7 0xABC6
+#define reg_ce_data_re_8_7_pos 0
+#define reg_ce_data_re_8_7_len 2
+#define reg_ce_data_re_8_7_lsb 7
+#define xd_p_reg_ce_tone_5_0 0xABC6
+#define reg_ce_tone_5_0_pos 2
+#define reg_ce_tone_5_0_len 6
+#define reg_ce_tone_5_0_lsb 0
+#define xd_p_reg_ce_tone_12_6 0xABC7
+#define reg_ce_tone_12_6_pos 0
+#define reg_ce_tone_12_6_len 7
+#define reg_ce_tone_12_6_lsb 6
+#define xd_p_reg_ce_centroid_drift_th 0xABC8
+#define reg_ce_centroid_drift_th_pos 0
+#define reg_ce_centroid_drift_th_len 8
+#define reg_ce_centroid_drift_th_lsb 0
+#define xd_p_reg_ce_centroid_count_max 0xABC9
+#define reg_ce_centroid_count_max_pos 0
+#define reg_ce_centroid_count_max_len 4
+#define reg_ce_centroid_count_max_lsb 0
+#define xd_p_reg_ce_centroid_bias_inc_7_0 0xABCA
+#define reg_ce_centroid_bias_inc_7_0_pos 0
+#define reg_ce_centroid_bias_inc_7_0_len 8
+#define reg_ce_centroid_bias_inc_7_0_lsb 0
+#define xd_p_reg_ce_centroid_bias_inc_8 0xABCB
+#define reg_ce_centroid_bias_inc_8_pos 0
+#define reg_ce_centroid_bias_inc_8_len 1
+#define reg_ce_centroid_bias_inc_8_lsb 0
+#define xd_p_reg_ce_var_th0_7_0 0xABCC
+#define reg_ce_var_th0_7_0_pos 0
+#define reg_ce_var_th0_7_0_len 8
+#define reg_ce_var_th0_7_0_lsb 0
+#define xd_p_reg_ce_var_th0_15_8 0xABCD
+#define reg_ce_var_th0_15_8_pos 0
+#define reg_ce_var_th0_15_8_len 8
+#define reg_ce_var_th0_15_8_lsb 8
+#define xd_p_reg_ce_var_th1_7_0 0xABCE
+#define reg_ce_var_th1_7_0_pos 0
+#define reg_ce_var_th1_7_0_len 8
+#define reg_ce_var_th1_7_0_lsb 0
+#define xd_p_reg_ce_var_th1_15_8 0xABCF
+#define reg_ce_var_th1_15_8_pos 0
+#define reg_ce_var_th1_15_8_len 8
+#define reg_ce_var_th1_15_8_lsb 8
+#define xd_p_reg_ce_var_th2_7_0 0xABD0
+#define reg_ce_var_th2_7_0_pos 0
+#define reg_ce_var_th2_7_0_len 8
+#define reg_ce_var_th2_7_0_lsb 0
+#define xd_p_reg_ce_var_th2_15_8 0xABD1
+#define reg_ce_var_th2_15_8_pos 0
+#define reg_ce_var_th2_15_8_len 8
+#define reg_ce_var_th2_15_8_lsb 8
+#define xd_p_reg_ce_var_th3_7_0 0xABD2
+#define reg_ce_var_th3_7_0_pos 0
+#define reg_ce_var_th3_7_0_len 8
+#define reg_ce_var_th3_7_0_lsb 0
+#define xd_p_reg_ce_var_th3_15_8 0xABD3
+#define reg_ce_var_th3_15_8_pos 0
+#define reg_ce_var_th3_15_8_len 8
+#define reg_ce_var_th3_15_8_lsb 8
+#define xd_p_reg_ce_var_th4_7_0 0xABD4
+#define reg_ce_var_th4_7_0_pos 0
+#define reg_ce_var_th4_7_0_len 8
+#define reg_ce_var_th4_7_0_lsb 0
+#define xd_p_reg_ce_var_th4_15_8 0xABD5
+#define reg_ce_var_th4_15_8_pos 0
+#define reg_ce_var_th4_15_8_len 8
+#define reg_ce_var_th4_15_8_lsb 8
+#define xd_p_reg_ce_var_th5_7_0 0xABD6
+#define reg_ce_var_th5_7_0_pos 0
+#define reg_ce_var_th5_7_0_len 8
+#define reg_ce_var_th5_7_0_lsb 0
+#define xd_p_reg_ce_var_th5_15_8 0xABD7
+#define reg_ce_var_th5_15_8_pos 0
+#define reg_ce_var_th5_15_8_len 8
+#define reg_ce_var_th5_15_8_lsb 8
+#define xd_p_reg_ce_var_th6_7_0 0xABD8
+#define reg_ce_var_th6_7_0_pos 0
+#define reg_ce_var_th6_7_0_len 8
+#define reg_ce_var_th6_7_0_lsb 0
+#define xd_p_reg_ce_var_th6_15_8 0xABD9
+#define reg_ce_var_th6_15_8_pos 0
+#define reg_ce_var_th6_15_8_len 8
+#define reg_ce_var_th6_15_8_lsb 8
+#define xd_p_reg_ce_fctrl_reset 0xABDA
+#define reg_ce_fctrl_reset_pos 0
+#define reg_ce_fctrl_reset_len 1
+#define reg_ce_fctrl_reset_lsb 0
+#define xd_p_reg_ce_cent_auto_clr_en 0xABDA
+#define reg_ce_cent_auto_clr_en_pos 1
+#define reg_ce_cent_auto_clr_en_len 1
+#define reg_ce_cent_auto_clr_en_lsb 0
+#define xd_p_reg_ce_fctrl_auto_reset_en 0xABDA
+#define reg_ce_fctrl_auto_reset_en_pos 2
+#define reg_ce_fctrl_auto_reset_en_len 1
+#define reg_ce_fctrl_auto_reset_en_lsb 0
+#define xd_p_reg_ce_var_forced_en 0xABDA
+#define reg_ce_var_forced_en_pos 3
+#define reg_ce_var_forced_en_len 1
+#define reg_ce_var_forced_en_lsb 0
+#define xd_p_reg_ce_cent_forced_en 0xABDA
+#define reg_ce_cent_forced_en_pos 4
+#define reg_ce_cent_forced_en_len 1
+#define reg_ce_cent_forced_en_lsb 0
+#define xd_p_reg_ce_var_max 0xABDA
+#define reg_ce_var_max_pos 5
+#define reg_ce_var_max_len 3
+#define reg_ce_var_max_lsb 0
+#define xd_p_reg_ce_cent_forced_value_7_0 0xABDB
+#define reg_ce_cent_forced_value_7_0_pos 0
+#define reg_ce_cent_forced_value_7_0_len 8
+#define reg_ce_cent_forced_value_7_0_lsb 0
+#define xd_p_reg_ce_cent_forced_value_11_8 0xABDC
+#define reg_ce_cent_forced_value_11_8_pos 0
+#define reg_ce_cent_forced_value_11_8_len 4
+#define reg_ce_cent_forced_value_11_8_lsb 8
+#define xd_p_reg_ce_fctrl_rd 0xABDD
+#define reg_ce_fctrl_rd_pos 0
+#define reg_ce_fctrl_rd_len 1
+#define reg_ce_fctrl_rd_lsb 0
+#define xd_p_reg_ce_centroid_max_6_0 0xABDD
+#define reg_ce_centroid_max_6_0_pos 1
+#define reg_ce_centroid_max_6_0_len 7
+#define reg_ce_centroid_max_6_0_lsb 0
+#define xd_p_reg_ce_centroid_max_11_7 0xABDE
+#define reg_ce_centroid_max_11_7_pos 0
+#define reg_ce_centroid_max_11_7_len 5
+#define reg_ce_centroid_max_11_7_lsb 7
+#define xd_p_reg_ce_var 0xABDF
+#define reg_ce_var_pos 0
+#define reg_ce_var_len 3
+#define reg_ce_var_lsb 0
+#define xd_p_reg_ce_fctrl_rdy 0xABDF
+#define reg_ce_fctrl_rdy_pos 3
+#define reg_ce_fctrl_rdy_len 1
+#define reg_ce_fctrl_rdy_lsb 0
+#define xd_p_reg_ce_centroid_out_3_0 0xABDF
+#define reg_ce_centroid_out_3_0_pos 4
+#define reg_ce_centroid_out_3_0_len 4
+#define reg_ce_centroid_out_3_0_lsb 0
+#define xd_p_reg_ce_centroid_out_11_4 0xABE0
+#define reg_ce_centroid_out_11_4_pos 0
+#define reg_ce_centroid_out_11_4_len 8
+#define reg_ce_centroid_out_11_4_lsb 4
+#define xd_p_reg_ce_bias_7_0 0xABE1
+#define reg_ce_bias_7_0_pos 0
+#define reg_ce_bias_7_0_len 8
+#define reg_ce_bias_7_0_lsb 0
+#define xd_p_reg_ce_bias_11_8 0xABE2
+#define reg_ce_bias_11_8_pos 0
+#define reg_ce_bias_11_8_len 4
+#define reg_ce_bias_11_8_lsb 8
+#define xd_p_reg_ce_m1_3_0 0xABE2
+#define reg_ce_m1_3_0_pos 4
+#define reg_ce_m1_3_0_len 4
+#define reg_ce_m1_3_0_lsb 0
+#define xd_p_reg_ce_m1_11_4 0xABE3
+#define reg_ce_m1_11_4_pos 0
+#define reg_ce_m1_11_4_len 8
+#define reg_ce_m1_11_4_lsb 4
+#define xd_p_reg_ce_rh0_7_0 0xABE4
+#define reg_ce_rh0_7_0_pos 0
+#define reg_ce_rh0_7_0_len 8
+#define reg_ce_rh0_7_0_lsb 0
+#define xd_p_reg_ce_rh0_15_8 0xABE5
+#define reg_ce_rh0_15_8_pos 0
+#define reg_ce_rh0_15_8_len 8
+#define reg_ce_rh0_15_8_lsb 8
+#define xd_p_reg_ce_rh0_23_16 0xABE6
+#define reg_ce_rh0_23_16_pos 0
+#define reg_ce_rh0_23_16_len 8
+#define reg_ce_rh0_23_16_lsb 16
+#define xd_p_reg_ce_rh0_31_24 0xABE7
+#define reg_ce_rh0_31_24_pos 0
+#define reg_ce_rh0_31_24_len 8
+#define reg_ce_rh0_31_24_lsb 24
+#define xd_p_reg_ce_rh3_real_7_0 0xABE8
+#define reg_ce_rh3_real_7_0_pos 0
+#define reg_ce_rh3_real_7_0_len 8
+#define reg_ce_rh3_real_7_0_lsb 0
+#define xd_p_reg_ce_rh3_real_15_8 0xABE9
+#define reg_ce_rh3_real_15_8_pos 0
+#define reg_ce_rh3_real_15_8_len 8
+#define reg_ce_rh3_real_15_8_lsb 8
+#define xd_p_reg_ce_rh3_real_23_16 0xABEA
+#define reg_ce_rh3_real_23_16_pos 0
+#define reg_ce_rh3_real_23_16_len 8
+#define reg_ce_rh3_real_23_16_lsb 16
+#define xd_p_reg_ce_rh3_real_31_24 0xABEB
+#define reg_ce_rh3_real_31_24_pos 0
+#define reg_ce_rh3_real_31_24_len 8
+#define reg_ce_rh3_real_31_24_lsb 24
+#define xd_p_reg_ce_rh3_imag_7_0 0xABEC
+#define reg_ce_rh3_imag_7_0_pos 0
+#define reg_ce_rh3_imag_7_0_len 8
+#define reg_ce_rh3_imag_7_0_lsb 0
+#define xd_p_reg_ce_rh3_imag_15_8 0xABED
+#define reg_ce_rh3_imag_15_8_pos 0
+#define reg_ce_rh3_imag_15_8_len 8
+#define reg_ce_rh3_imag_15_8_lsb 8
+#define xd_p_reg_ce_rh3_imag_23_16 0xABEE
+#define reg_ce_rh3_imag_23_16_pos 0
+#define reg_ce_rh3_imag_23_16_len 8
+#define reg_ce_rh3_imag_23_16_lsb 16
+#define xd_p_reg_ce_rh3_imag_31_24 0xABEF
+#define reg_ce_rh3_imag_31_24_pos 0
+#define reg_ce_rh3_imag_31_24_len 8
+#define reg_ce_rh3_imag_31_24_lsb 24
+#define xd_p_reg_feq_fix_eh2_7_0 0xABF0
+#define reg_feq_fix_eh2_7_0_pos 0
+#define reg_feq_fix_eh2_7_0_len 8
+#define reg_feq_fix_eh2_7_0_lsb 0
+#define xd_p_reg_feq_fix_eh2_15_8 0xABF1
+#define reg_feq_fix_eh2_15_8_pos 0
+#define reg_feq_fix_eh2_15_8_len 8
+#define reg_feq_fix_eh2_15_8_lsb 8
+#define xd_p_reg_feq_fix_eh2_23_16 0xABF2
+#define reg_feq_fix_eh2_23_16_pos 0
+#define reg_feq_fix_eh2_23_16_len 8
+#define reg_feq_fix_eh2_23_16_lsb 16
+#define xd_p_reg_feq_fix_eh2_31_24 0xABF3
+#define reg_feq_fix_eh2_31_24_pos 0
+#define reg_feq_fix_eh2_31_24_len 8
+#define reg_feq_fix_eh2_31_24_lsb 24
+#define xd_p_reg_ce_m2_central_7_0 0xABF4
+#define reg_ce_m2_central_7_0_pos 0
+#define reg_ce_m2_central_7_0_len 8
+#define reg_ce_m2_central_7_0_lsb 0
+#define xd_p_reg_ce_m2_central_15_8 0xABF5
+#define reg_ce_m2_central_15_8_pos 0
+#define reg_ce_m2_central_15_8_len 8
+#define reg_ce_m2_central_15_8_lsb 8
+#define xd_p_reg_ce_fftshift 0xABF6
+#define reg_ce_fftshift_pos 0
+#define reg_ce_fftshift_len 4
+#define reg_ce_fftshift_lsb 0
+#define xd_p_reg_ce_fftshift1 0xABF6
+#define reg_ce_fftshift1_pos 4
+#define reg_ce_fftshift1_len 4
+#define reg_ce_fftshift1_lsb 0
+#define xd_p_reg_ce_fftshift2 0xABF7
+#define reg_ce_fftshift2_pos 0
+#define reg_ce_fftshift2_len 4
+#define reg_ce_fftshift2_lsb 0
+#define xd_p_reg_ce_top_mobile 0xABF7
+#define reg_ce_top_mobile_pos 4
+#define reg_ce_top_mobile_len 1
+#define reg_ce_top_mobile_lsb 0
+#define xd_p_reg_strong_sginal_detected 0xA2BC
+#define reg_strong_sginal_detected_pos 2
+#define reg_strong_sginal_detected_len 1
+#define reg_strong_sginal_detected_lsb 0
+
+#define XD_MP2IF_BASE 0xB000
+#define XD_MP2IF_CSR (0x00 + XD_MP2IF_BASE)
+#define XD_MP2IF_DMX_CTRL (0x03 + XD_MP2IF_BASE)
+#define XD_MP2IF_PID_IDX (0x04 + XD_MP2IF_BASE)
+#define XD_MP2IF_PID_DATA_L (0x05 + XD_MP2IF_BASE)
+#define XD_MP2IF_PID_DATA_H (0x06 + XD_MP2IF_BASE)
+#define XD_MP2IF_MISC (0x07 + XD_MP2IF_BASE)
+
+extern struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d);
+extern int af9005_read_ofdm_register(struct dvb_usb_device *d, u16 reg,
+ u8 * value);
+extern int af9005_read_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+ u8 * values, int len);
+extern int af9005_write_ofdm_register(struct dvb_usb_device *d, u16 reg,
+ u8 value);
+extern int af9005_write_ofdm_registers(struct dvb_usb_device *d, u16 reg,
+ u8 * values, int len);
+extern int af9005_read_tuner_registers(struct dvb_usb_device *d, u16 reg,
+ u8 addr, u8 * values, int len);
+extern int af9005_write_tuner_registers(struct dvb_usb_device *d, u16 reg,
+ u8 * values, int len);
+extern int af9005_read_register_bits(struct dvb_usb_device *d, u16 reg,
+ u8 pos, u8 len, u8 * value);
+extern int af9005_write_register_bits(struct dvb_usb_device *d, u16 reg,
+ u8 pos, u8 len, u8 value);
+extern int af9005_send_command(struct dvb_usb_device *d, u8 command,
+ u8 * wbuf, int wlen, u8 * rbuf, int rlen);
+extern int af9005_read_eeprom(struct dvb_usb_device *d, u8 address,
+ u8 * values, int len);
+extern int af9005_tuner_attach(struct dvb_usb_adapter *adap);
+extern int af9005_led_control(struct dvb_usb_device *d, int onoff);
+
+extern u8 regmask[8];
+
+/* remote control decoder */
+extern int af9005_rc_decode(struct dvb_usb_device *d, u8 * data, int len,
+ u32 * event, int *state);
+extern struct dvb_usb_rc_key af9005_rc_keys[];
+extern int af9005_rc_keys_size;
+
+#endif
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index bac2ae3b4a1f..04e31cf7d530 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -354,41 +354,35 @@ static struct mt352_config cxusb_mt352_config = {
/* Callbacks for DVB USB */
static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap)
{
- u8 bpll[4] = { 0x0b, 0xdc, 0x9c, 0xa0 };
- adap->pll_addr = 0x61;
- memcpy(adap->pll_init, bpll, 4);
- adap->pll_desc = &dvb_pll_fmd1216me;
-
- adap->fe->ops.tuner_ops.init = dvb_usb_tuner_init_i2c;
- adap->fe->ops.tuner_ops.set_params = dvb_usb_tuner_set_params_i2c;
-
+ dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
+ DVB_PLL_FMD1216ME);
return 0;
}
static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe, 0x61,
- NULL, &dvb_pll_thomson_dtt7579);
+ NULL, DVB_PLL_THOMSON_DTT7579);
return 0;
}
static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap)
{
- dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, &dvb_pll_lg_z201);
+ dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, DVB_PLL_LG_Z201);
return 0;
}
static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe, 0x60,
- NULL, &dvb_pll_thomson_dtt7579);
+ NULL, DVB_PLL_THOMSON_DTT7579);
return 0;
}
static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
- &dvb_pll_lg_tdvs_h06xf);
+ DVB_PLL_LG_TDVS_H06XF);
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 5143e426d283..9a184da01c47 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -295,7 +295,7 @@ int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe, 1);
if (dvb_attach(mt2060_attach, adap->fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
/* not found - use panasonic pll parameters */
- if (dvb_attach(dvb_pll_attach, adap->fe, 0x60, tun_i2c, &dvb_pll_env57h1xd5) == NULL)
+ if (dvb_attach(dvb_pll_attach, adap->fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
return -ENOMEM;
} else {
st->mt2060_present = 1;
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 7a6ae8f482e0..043cadae0859 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -14,6 +14,14 @@
*/
#include "dibusb.h"
+static int dib3000mb_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dibusb_state *st = adap->priv;
+
+ return st->ops.tuner_pass_ctrl(fe, enable, st->tuner_addr);
+}
+
static int dibusb_dib3000mb_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dib3000_config demod_cfg;
@@ -21,21 +29,34 @@ static int dibusb_dib3000mb_frontend_attach(struct dvb_usb_adapter *adap)
demod_cfg.demod_address = 0x8;
- if ((adap->fe = dib3000mb_attach(&demod_cfg,&adap->dev->i2c_adap,&st->ops)) == NULL)
+ if ((adap->fe = dvb_attach(dib3000mb_attach, &demod_cfg,
+ &adap->dev->i2c_adap, &st->ops)) == NULL)
return -ENODEV;
- adap->fe->ops.tuner_ops.init = dvb_usb_tuner_init_i2c;
- adap->fe->ops.tuner_ops.set_params = dvb_usb_tuner_set_params_i2c;
-
- adap->tuner_pass_ctrl = st->ops.tuner_pass_ctrl;
+ adap->fe->ops.i2c_gate_ctrl = dib3000mb_i2c_gate_ctrl;
return 0;
}
static int dibusb_thomson_tuner_attach(struct dvb_usb_adapter *adap)
{
- adap->pll_addr = 0x61;
- adap->pll_desc = &dvb_pll_tua6010xs;
+ struct dibusb_state *st = adap->priv;
+
+ st->tuner_addr = 0x61;
+
+ dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap,
+ DVB_PLL_TUA6010XS);
+ return 0;
+}
+
+static int dibusb_panasonic_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dibusb_state *st = adap->priv;
+
+ st->tuner_addr = 0x60;
+
+ dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap,
+ DVB_PLL_TDA665X);
return 0;
}
@@ -50,30 +71,28 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap)
{ .flags = 0, .buf = b, .len = 2 },
{ .flags = I2C_M_RD, .buf = b2, .len = 1 },
};
+ struct dibusb_state *st = adap->priv;
/* the Panasonic sits on I2C addrass 0x60, the Thomson on 0x61 */
- msg[0].addr = msg[1].addr = 0x60;
+ msg[0].addr = msg[1].addr = st->tuner_addr = 0x60;
- if (adap->tuner_pass_ctrl)
- adap->tuner_pass_ctrl(adap->fe,1,msg[0].addr);
+ if (adap->fe->ops.i2c_gate_ctrl)
+ adap->fe->ops.i2c_gate_ctrl(adap->fe,1);
if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) {
err("tuner i2c write failed.");
ret = -EREMOTEIO;
}
- if (adap->tuner_pass_ctrl)
- adap->tuner_pass_ctrl(adap->fe,0,msg[0].addr);
+ if (adap->fe->ops.i2c_gate_ctrl)
+ adap->fe->ops.i2c_gate_ctrl(adap->fe,0);
if (b2[0] == 0xfe) {
info("This device has the Thomson Cable onboard. Which is default.");
- dibusb_thomson_tuner_attach(adap);
+ ret = dibusb_thomson_tuner_attach(adap);
} else {
- u8 bpll[4] = { 0x0b, 0xf5, 0x85, 0xab };
info("This device has the Panasonic ENV77H11D5 onboard.");
- adap->pll_addr = 0x60;
- memcpy(adap->pll_init,bpll,4);
- adap->pll_desc = &dvb_pll_tda665x;
+ ret = dibusb_panasonic_tuner_attach(adap);
}
return ret;
diff --git a/drivers/media/dvb/dvb-usb/dibusb.h b/drivers/media/dvb/dvb-usb/dibusb.h
index b60781032742..8e847aa73ba1 100644
--- a/drivers/media/dvb/dvb-usb/dibusb.h
+++ b/drivers/media/dvb/dvb-usb/dibusb.h
@@ -99,6 +99,7 @@
struct dibusb_state {
struct dib_fe_xfer_ops ops;
int mt2060_present;
+ u8 tuner_addr;
};
struct dibusb_device_state {
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index b5acb11c0bc9..bca1e0905739 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -118,7 +118,8 @@ static int digitv_nxt6000_tuner_set_params(struct dvb_frontend *fe, struct dvb_f
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
u8 b[5];
- dvb_usb_tuner_calc_regs(fe,fep,b, 5);
+
+ fe->ops.tuner_ops.calc_regs(fe, fep, b, sizeof(b));
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
return digitv_ctrl_msg(adap->dev, USB_WRITE_TUNER, 0, &b[1], 4, NULL, 0);
@@ -130,12 +131,14 @@ static struct nxt6000_config digitv_nxt6000_config = {
static int digitv_frontend_attach(struct dvb_usb_adapter *adap)
{
+ struct digitv_state *st = adap->dev->priv;
+
if ((adap->fe = dvb_attach(mt352_attach, &digitv_mt352_config, &adap->dev->i2c_adap)) != NULL) {
- adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
+ st->is_nxt6000 = 0;
return 0;
}
if ((adap->fe = dvb_attach(nxt6000_attach, &digitv_nxt6000_config, &adap->dev->i2c_adap)) != NULL) {
- adap->fe->ops.tuner_ops.set_params = digitv_nxt6000_tuner_set_params;
+ st->is_nxt6000 = 1;
return 0;
}
return -EIO;
@@ -143,8 +146,14 @@ static int digitv_frontend_attach(struct dvb_usb_adapter *adap)
static int digitv_tuner_attach(struct dvb_usb_adapter *adap)
{
- adap->pll_addr = 0x60;
- adap->pll_desc = &dvb_pll_tded4;
+ struct digitv_state *st = adap->dev->priv;
+
+ if (!dvb_attach(dvb_pll_attach, adap->fe, 0x60, NULL, DVB_PLL_TDED4))
+ return -ENODEV;
+
+ if (st->is_nxt6000)
+ adap->fe->ops.tuner_ops.set_params = digitv_nxt6000_tuner_set_params;
+
return 0;
}
@@ -273,6 +282,8 @@ static struct dvb_usb_device_properties digitv_properties = {
.usb_ctrl = CYPRESS_FX2,
.firmware = "dvb-usb-digitv-02.fw",
+ .size_of_priv = sizeof(struct digitv_state),
+
.num_adapters = 1,
.adapter = {
{
diff --git a/drivers/media/dvb/dvb-usb/digitv.h b/drivers/media/dvb/dvb-usb/digitv.h
index 477ee428a70e..8b43e3db8691 100644
--- a/drivers/media/dvb/dvb-usb/digitv.h
+++ b/drivers/media/dvb/dvb-usb/digitv.h
@@ -4,6 +4,10 @@
#define DVB_USB_LOG_PREFIX "digitv"
#include "dvb-usb.h"
+struct digitv_state {
+ int is_nxt6000;
+};
+
extern int dvb_usb_digitv_debug;
#define deb_rc(args...) dprintk(dvb_usb_digitv_debug,0x01,args)
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
index 088b6dee3a7f..23428cd30756 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
@@ -46,82 +46,3 @@ int dvb_usb_i2c_exit(struct dvb_usb_device *d)
d->state &= ~DVB_USB_STATE_I2C;
return 0;
}
-
-int dvb_usb_tuner_init_i2c(struct dvb_frontend *fe)
-{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
- struct i2c_msg msg = { .addr = adap->pll_addr, .flags = 0, .buf = adap->pll_init, .len = 4 };
- int ret = 0;
-
- /* if pll_desc is not used */
- if (adap->pll_desc == NULL)
- return 0;
-
- if (adap->tuner_pass_ctrl)
- adap->tuner_pass_ctrl(fe, 1, adap->pll_addr);
-
- deb_pll("pll init: %x\n",adap->pll_addr);
- deb_pll("pll-buf: %x %x %x %x\n",adap->pll_init[0], adap->pll_init[1],
- adap->pll_init[2], adap->pll_init[3]);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer (&adap->dev->i2c_adap, &msg, 1) != 1) {
- err("tuner i2c write failed for pll_init.");
- ret = -EREMOTEIO;
- }
- msleep(1);
-
- if (adap->tuner_pass_ctrl)
- adap->tuner_pass_ctrl(fe,0,adap->pll_addr);
- return ret;
-}
-EXPORT_SYMBOL(dvb_usb_tuner_init_i2c);
-
-int dvb_usb_tuner_calc_regs(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep, u8 *b, int buf_len)
-{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
-
- if (buf_len != 5)
- return -EINVAL;
- if (adap->pll_desc == NULL)
- return 0;
-
- deb_pll("pll addr: %x, freq: %d %p\n",adap->pll_addr, fep->frequency, adap->pll_desc);
-
- b[0] = adap->pll_addr;
- dvb_pll_configure(adap->pll_desc, &b[1], fep->frequency, fep->u.ofdm.bandwidth);
-
- deb_pll("pll-buf: %x %x %x %x %x\n",b[0],b[1],b[2],b[3],b[4]);
-
- return 5;
-}
-EXPORT_SYMBOL(dvb_usb_tuner_calc_regs);
-
-int dvb_usb_tuner_set_params_i2c(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
-{
- struct dvb_usb_adapter *adap = fe->dvb->priv;
- int ret = 0;
- u8 b[5];
- struct i2c_msg msg = { .addr = adap->pll_addr, .flags = 0, .buf = &b[1], .len = 4 };
-
- dvb_usb_tuner_calc_regs(fe,fep,b,5);
-
- if (adap->tuner_pass_ctrl)
- adap->tuner_pass_ctrl(fe, 1, adap->pll_addr);
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
-
- if (i2c_transfer(&adap->dev->i2c_adap, &msg, 1) != 1) {
- err("tuner i2c write failed for pll_set.");
- ret = -EREMOTEIO;
- }
- msleep(1);
-
- if (adap->tuner_pass_ctrl)
- adap->tuner_pass_ctrl(fe, 0, adap->pll_addr);
-
- return ret;
-}
-EXPORT_SYMBOL(dvb_usb_tuner_set_params_i2c);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 403081689de1..4dfab02a8a0d 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -11,7 +11,9 @@
/* Vendor IDs */
#define USB_VID_ADSTECH 0x06e1
+#define USB_VID_AFATECH 0x15a4
#define USB_VID_ALCOR_MICRO 0x058f
+#define USB_VID_ALINK 0x05e3
#define USB_VID_ANCHOR 0x0547
#define USB_VID_ANUBIS_ELECTRONIC 0x10fd
#define USB_VID_AVERMEDIA 0x07ca
@@ -35,6 +37,7 @@
#define USB_VID_MSI 0x0db0
#define USB_VID_OPERA1 0x695c
#define USB_VID_PINNACLE 0x2304
+#define USB_VID_TERRATEC 0x0ccd
#define USB_VID_VISIONPLUS 0x13d3
#define USB_VID_TWINHAN 0x1822
#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
@@ -44,6 +47,8 @@
/* Product IDs */
#define USB_PID_ADSTECH_USB2_COLD 0xa333
#define USB_PID_ADSTECH_USB2_WARM 0xa334
+#define USB_PID_AFATECH_AF9005 0x9020
+#define USB_VID_ALINK_DTU 0xf170
#define USB_PID_AVERMEDIA_DVBT_USB_COLD 0x0001
#define USB_PID_AVERMEDIA_DVBT_USB_WARM 0x0002
#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800
@@ -69,6 +74,7 @@
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
+#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
#define USB_PID_TWINHAN_VP7041_WARM 0x3202
#define USB_PID_TWINHAN_VP7020_COLD 0x3203
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 9200a30dd1b9..7b9f35bfb4f0 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -110,7 +110,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
input_dev->name = "IR-receiver inside an USB DVB receiver";
input_dev->phys = d->rc_phys;
usb_to_input_id(d->udev, &input_dev->id);
- input_dev->cdev.dev = &d->udev->dev;
+ input_dev->dev.parent = &d->udev->dev;
/* set the bits for the keys */
deb_rc("key map size: %d\n", d->props.rc_key_map_size);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 6f824a569e14..d1b3c7b81fff 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -297,12 +297,6 @@ struct dvb_usb_adapter {
int feedcount;
int pid_filtering;
- /* tuner programming information */
- u8 pll_addr;
- u8 pll_init[4];
- struct dvb_pll_desc *pll_desc;
- int (*tuner_pass_ctrl) (struct dvb_frontend *, int, u8);
-
/* dvb */
struct dvb_adapter dvb_adap;
struct dmxdev dmxdev;
@@ -388,11 +382,6 @@ extern int dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
/* commonly used remote control parsing */
extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
-/* commonly used pll init and set functions */
-extern int dvb_usb_tuner_init_i2c(struct dvb_frontend *);
-extern int dvb_usb_tuner_calc_regs(struct dvb_frontend *, struct dvb_frontend_parameters *, u8 *buf, int buf_len);
-extern int dvb_usb_tuner_set_params_i2c(struct dvb_frontend *, struct dvb_frontend_parameters *);
-
/* commonly used firmware download types and function */
struct hexline {
u8 len;
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index e0587e663591..f01d99c1c43c 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -157,6 +157,7 @@ static int gl861_probe(struct usb_interface *intf,
static struct usb_device_id gl861_table [] = {
{ USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580_55801) },
+ { USB_DEVICE(USB_VID_ALINK, USB_VID_ALINK_DTU) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, gl861_table);
@@ -187,12 +188,16 @@ static struct dvb_usb_device_properties gl861_properties = {
}},
.i2c_algo = &gl861_i2c_algo,
- .num_device_descs = 1,
+ .num_device_descs = 2,
.devices = {
{ "MSI Mega Sky 55801 DVB-T USB2.0",
{ &gl861_table[0], NULL },
{ NULL },
},
+ { "A-LINK DTU DVB-T USB2.0",
+ { &gl861_table[1], NULL },
+ { NULL },
+ },
}
};
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index c546ddeda5d4..a956bc503a4c 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -22,6 +22,8 @@ static int dvb_usb_m920x_debug;
module_param_named(debug,dvb_usb_m920x_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS);
+static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid);
+
static inline int m920x_read(struct usb_device *udev, u8 request, u16 value,
u16 index, void *data, int size)
{
@@ -57,7 +59,8 @@ static inline int m920x_write(struct usb_device *udev, u8 request,
static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq)
{
- int ret = 0;
+ int ret = 0, i, epi, flags = 0;
+ int adap_enabled[M9206_MAX_ADAPTERS] = { 0 };
/* Remote controller init. */
if (d->props.rc_query) {
@@ -76,9 +79,51 @@ static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq)
deb("Initialising remote control success\n");
}
+ for (i = 0; i < d->props.num_adapters; i++)
+ flags |= d->adapter[i].props.caps;
+
+ /* Some devices(Dposh) might crash if we attempt touch at all. */
+ if (flags & DVB_USB_ADAP_HAS_PID_FILTER) {
+ for (i = 0; i < d->props.num_adapters; i++) {
+ epi = d->adapter[i].props.stream.endpoint - 0x81;
+
+ if (epi < 0 || epi >= M9206_MAX_ADAPTERS) {
+ printk(KERN_INFO "m920x: Unexpected adapter endpoint!\n");
+ return -EINVAL;
+ }
+
+ adap_enabled[epi] = 1;
+ }
+
+ for (i = 0; i < M9206_MAX_ADAPTERS; i++) {
+ if (adap_enabled[i])
+ continue;
+
+ if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x0)) != 0)
+ return ret;
+
+ if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x02f5)) != 0)
+ return ret;
+ }
+ }
+
return ret;
}
+static int m920x_init_ep(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_host_interface *alt;
+
+ if ((alt = usb_altnum_to_altsetting(intf, 1)) == NULL) {
+ deb("No alt found!\n");
+ return -ENODEV;
+ }
+
+ return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
+}
+
static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
struct m920x_state *m = d->priv;
@@ -211,8 +256,7 @@ static struct i2c_algorithm m920x_i2c_algo = {
};
/* pid filter */
-static int m920x_set_filter(struct dvb_usb_adapter *adap,
- int type, int idx, int pid)
+static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid)
{
int ret = 0;
@@ -221,10 +265,10 @@ static int m920x_set_filter(struct dvb_usb_adapter *adap,
pid |= 0x8000;
- if ((ret = m920x_write(adap->dev->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0)
+ if ((ret = m920x_write(d->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0)
return ret;
- if ((ret = m920x_write(adap->dev->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0)
+ if ((ret = m920x_write(d->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0)
return ret;
return ret;
@@ -233,40 +277,35 @@ static int m920x_set_filter(struct dvb_usb_adapter *adap,
static int m920x_update_filters(struct dvb_usb_adapter *adap)
{
struct m920x_state *m = adap->dev->priv;
- int enabled = m->filtering_enabled;
+ int enabled = m->filtering_enabled[adap->id];
int i, ret = 0, filter = 0;
+ int ep = adap->props.stream.endpoint;
for (i = 0; i < M9206_MAX_FILTERS; i++)
- if (m->filters[i] == 8192)
+ if (m->filters[adap->id][i] == 8192)
enabled = 0;
/* Disable all filters */
- if ((ret = m920x_set_filter(adap, 0x81, 1, enabled)) != 0)
+ if ((ret = m920x_set_filter(adap->dev, ep, 1, enabled)) != 0)
return ret;
for (i = 0; i < M9206_MAX_FILTERS; i++)
- if ((ret = m920x_set_filter(adap, 0x81, i + 2, 0)) != 0)
+ if ((ret = m920x_set_filter(adap->dev, ep, i + 2, 0)) != 0)
return ret;
- if ((ret = m920x_set_filter(adap, 0x82, 0, 0x0)) != 0)
- return ret;
-
/* Set */
if (enabled) {
for (i = 0; i < M9206_MAX_FILTERS; i++) {
- if (m->filters[i] == 0)
+ if (m->filters[adap->id][i] == 0)
continue;
- if ((ret = m920x_set_filter(adap, 0x81, filter + 2, m->filters[i])) != 0)
+ if ((ret = m920x_set_filter(adap->dev, ep, filter + 2, m->filters[adap->id][i])) != 0)
return ret;
filter++;
}
}
- if ((ret = m920x_set_filter(adap, 0x82, 0, 0x02f5)) != 0)
- return ret;
-
return ret;
}
@@ -274,7 +313,7 @@ static int m920x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
struct m920x_state *m = adap->dev->priv;
- m->filtering_enabled = onoff ? 1 : 0;
+ m->filtering_enabled[adap->id] = onoff ? 1 : 0;
return m920x_update_filters(adap);
}
@@ -283,7 +322,7 @@ static int m920x_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, in
{
struct m920x_state *m = adap->dev->priv;
- m->filters[index] = onoff ? pid : 0;
+ m->filters[adap->id][index] = onoff ? pid : 0;
return m920x_update_filters(adap);
}
@@ -368,6 +407,7 @@ static int m920x_identify_state(struct usb_device *udev,
/* demod configurations */
static int m920x_mt352_demod_init(struct dvb_frontend *fe)
{
+ int ret;
u8 config[] = { CONFIG, 0x3d };
u8 clock[] = { CLOCK_CTL, 0x30 };
u8 reset[] = { RESET, 0x80 };
@@ -377,17 +417,25 @@ static int m920x_mt352_demod_init(struct dvb_frontend *fe)
u8 unk1[] = { 0x93, 0x1a };
u8 unk2[] = { 0xb5, 0x7a };
- mt352_write(fe, config, ARRAY_SIZE(config));
- mt352_write(fe, clock, ARRAY_SIZE(clock));
- mt352_write(fe, reset, ARRAY_SIZE(reset));
- mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl));
- mt352_write(fe, agc, ARRAY_SIZE(agc));
- mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc));
- mt352_write(fe, unk1, ARRAY_SIZE(unk1));
- mt352_write(fe, unk2, ARRAY_SIZE(unk2));
-
deb("Demod init!\n");
+ if ((ret = mt352_write(fe, config, ARRAY_SIZE(config))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, clock, ARRAY_SIZE(clock))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, reset, ARRAY_SIZE(reset))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, agc, ARRAY_SIZE(agc))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, unk1, ARRAY_SIZE(unk1))) != 0)
+ return ret;
+ if ((ret = mt352_write(fe, unk2, ARRAY_SIZE(unk2))) != 0)
+ return ret;
+
return 0;
}
@@ -558,8 +606,7 @@ static struct dvb_usb_device_properties dposh_properties;
static int m920x_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
- struct usb_host_interface *alt;
+ struct dvb_usb_device *d = NULL;
int ret;
struct m920x_inits *rc_init_seq = NULL;
int bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -604,23 +651,13 @@ static int m920x_probe(struct usb_interface *intf,
* tvwalkertwin_properties already configured both
* tuners, so there is nothing for us to do here
*/
-
- return -ENODEV;
}
found:
- alt = usb_altnum_to_altsetting(intf, 1);
- if (alt == NULL) {
- deb("No alt found!\n");
- return -ENODEV;
- }
-
- ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber,
- alt->desc.bAlternateSetting);
- if (ret < 0)
+ if ((ret = m920x_init_ep(intf)) < 0)
return ret;
- if ((ret = m920x_init(d, rc_init_seq)) != 0)
+ if (d && (ret = m920x_init(d, rc_init_seq)) != 0)
return ret;
return ret;
@@ -737,9 +774,9 @@ static struct dvb_usb_device_properties digivox_mini_ii_properties = {
*
* LifeView TV Walker Twin has 1 x M9206, 2 x TDA10046, 2 x TDA8275A
* TDA10046 #0 is located at i2c address 0x08
- * TDA10046 #1 is located at i2c address 0x0b (presently disabled - not yet working)
+ * TDA10046 #1 is located at i2c address 0x0b
* TDA8275A #0 is located at i2c address 0x60
- * TDA8275A #1 is located at i2c address 0x61 (presently disabled - not yet working)
+ * TDA8275A #1 is located at i2c address 0x61
*/
static struct dvb_usb_device_properties tvwalkertwin_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -756,7 +793,7 @@ static struct dvb_usb_device_properties tvwalkertwin_properties = {
.size_of_priv = sizeof(struct m920x_state),
.identify_state = m920x_identify_state,
- .num_adapters = 1,
+ .num_adapters = 2,
.adapter = {{
.caps = DVB_USB_ADAP_HAS_PID_FILTER |
DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
diff --git a/drivers/media/dvb/dvb-usb/m920x.h b/drivers/media/dvb/dvb-usb/m920x.h
index 2c8942d04222..37532890accd 100644
--- a/drivers/media/dvb/dvb-usb/m920x.h
+++ b/drivers/media/dvb/dvb-usb/m920x.h
@@ -18,6 +18,7 @@
#define M9206_FW 0x30
#define M9206_MAX_FILTERS 8
+#define M9206_MAX_ADAPTERS 2
/*
sequences found in logs:
@@ -60,8 +61,8 @@ response to a write, is unknown.
*/
struct m920x_state {
- u16 filters[M9206_MAX_FILTERS];
- int filtering_enabled;
+ u16 filters[M9206_MAX_ADAPTERS][M9206_MAX_FILTERS];
+ int filtering_enabled[M9206_MAX_ADAPTERS];
int rep_count;
};
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 518d7ad217df..d7c04951ceab 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -263,7 +263,7 @@ static int opera1_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(
dvb_pll_attach, adap->fe, 0xc0>>1,
- &adap->dev->i2c_adap, &dvb_pll_opera1
+ &adap->dev->i2c_adap, DVB_PLL_OPERA1
);
return 0;
}
@@ -435,9 +435,9 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
{
const struct firmware *fw = NULL;
u8 *b, *p;
- int ret = 0, i;
+ int ret = 0, i,fpgasize=40;
u8 testval;
- info("start downloading fpga firmware");
+ info("start downloading fpga firmware %s",filename);
if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
err("did not find the firmware file. (%s) "
@@ -454,17 +454,20 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
/* clear fpga ? */
opera1_xilinx_rw(dev, 0xbc, 0xaa, &fpga_command, 1,
OPERA_WRITE_MSG);
- for (i = 0; p[i] != 0 && i < fw->size;) {
+ for (i = 0; i < fw->size;) {
+ if ( (fw->size - i) <fpgasize){
+ fpgasize=fw->size-i;
+ }
b = (u8 *) p + i;
if (opera1_xilinx_rw
- (dev, OPERA_WRITE_FX2, 0x0, b + 1, b[0],
- OPERA_WRITE_MSG) != b[0]
+ (dev, OPERA_WRITE_FX2, 0x0, b , fpgasize,
+ OPERA_WRITE_MSG) != fpgasize
) {
err("error while transferring firmware");
ret = -EINVAL;
break;
}
- i = i + 1 + b[0];
+ i = i + fpgasize;
}
/* restart the CPU */
if (ret || opera1_xilinx_rw
@@ -534,18 +537,16 @@ static struct dvb_usb_device_properties opera1_properties = {
static int opera1_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct dvb_usb_device *d;
struct usb_device *udev = interface_to_usbdev(intf);
if (udev->descriptor.idProduct == USB_PID_OPERA1_WARM &&
udev->descriptor.idVendor == USB_VID_OPERA1 &&
- (d == NULL
- || opera1_xilinx_load_firmware(udev, "dvb-usb-opera1-fpga.fw") != 0)
- ) {
+ opera1_xilinx_load_firmware(udev, "dvb-usb-opera1-fpga-01.fw") != 0
+ ) {
return -EINVAL;
}
- if (dvb_usb_device_init(intf, &opera1_properties, THIS_MODULE, &d) != 0)
+ if (dvb_usb_device_init(intf, &opera1_properties, THIS_MODULE, NULL) != 0)
return -EINVAL;
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index f77b48f76582..0dcab3d4e236 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -65,9 +65,7 @@ static int umt_mt352_frontend_attach(struct dvb_usb_adapter *adap)
static int umt_tuner_attach (struct dvb_usb_adapter *adap)
{
- adap->pll_addr = 0x61;
- adap->pll_desc = &dvb_pll_tua6034;
- adap->fe->ops.tuner_ops.calc_regs = dvb_usb_tuner_calc_regs;
+ dvb_attach(dvb_pll_attach, adap->fe, 0x61, NULL, DVB_PLL_TUA6034);
return 0;
}
@@ -84,8 +82,8 @@ static int umt_probe(struct usb_interface *intf,
/* do not change the order of the ID table */
static struct usb_device_id umt_table [] = {
-/* 00 */ { USB_DEVICE(USB_VID_HANFTEK, USB_PID_HANFTEK_UMT_010_COLD) },
-/* 01 */ { USB_DEVICE(USB_VID_HANFTEK, USB_PID_HANFTEK_UMT_010_WARM) },
+/* 00 */ { USB_DEVICE(USB_VID_HANFTEK, USB_PID_HANFTEK_UMT_010_COLD) },
+/* 01 */ { USB_DEVICE(USB_VID_HANFTEK, USB_PID_HANFTEK_UMT_010_WARM) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, umt_table);
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 27f386585d43..156b062e02c4 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -2,7 +2,7 @@
# Makefile for the kernel DVB frontend device drivers.
#
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
obj-$(CONFIG_DVB_PLL) += dvb-pll.o
obj-$(CONFIG_DVB_STV0299) += stv0299.o
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 335219ebce2d..1dc164d5488c 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include "dvb_frontend.h"
-#include "dvb-pll.h"
#include "cx22702.h"
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index 732e94aaa364..0834c0677fef 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -917,7 +917,7 @@ static int cx24123_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
static int cx24123_tune(struct dvb_frontend* fe,
struct dvb_frontend_parameters* params,
unsigned int mode_flags,
- int *delay,
+ unsigned int *delay,
fe_status_t *status)
{
int retval = 0;
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index 5f96ffda91ad..0c0b94767bc1 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -24,6 +24,23 @@
#include "dvb-pll.h"
+struct dvb_pll_desc {
+ char *name;
+ u32 min;
+ u32 max;
+ u32 iffreq;
+ void (*set)(u8 *buf, const struct dvb_frontend_parameters *params);
+ u8 *initdata;
+ u8 *sleepdata;
+ int count;
+ struct {
+ u32 limit;
+ u32 stepsize;
+ u8 config;
+ u8 cb;
+ } entries[12];
+};
+
/* ----------------------------------------------------------- */
/* descriptions */
@@ -38,7 +55,13 @@
0x50 = AGC Take over point = 103 dBuV */
static u8 tua603x_agc103[] = { 2, 0x80|0x40|0x18|0x06|0x01, 0x00|0x50 };
-struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
+/* 0x04 = 166.67 kHz divider
+
+ 0x80 = AGC Time constant 50ms Iagc = 9 uA
+ 0x20 = AGC Take over point = 112 dBuV */
+static u8 tua603x_agc112[] = { 2, 0x80|0x40|0x18|0x04|0x01, 0x80|0x20 };
+
+static struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
.name = "Thomson dtt7579",
.min = 177000000,
.max = 858000000,
@@ -52,9 +75,8 @@ struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
{ 999999999, 166667, 0xf4, 0x08 },
},
};
-EXPORT_SYMBOL(dvb_pll_thomson_dtt7579);
-struct dvb_pll_desc dvb_pll_thomson_dtt7610 = {
+static struct dvb_pll_desc dvb_pll_thomson_dtt7610 = {
.name = "Thomson dtt7610",
.min = 44000000,
.max = 958000000,
@@ -66,19 +88,19 @@ struct dvb_pll_desc dvb_pll_thomson_dtt7610 = {
{ 999999999, 62500, 0x8e, 0x3c },
},
};
-EXPORT_SYMBOL(dvb_pll_thomson_dtt7610);
-static void thomson_dtt759x_bw(u8 *buf, u32 freq, int bandwidth)
+static void thomson_dtt759x_bw(u8 *buf,
+ const struct dvb_frontend_parameters *params)
{
- if (BANDWIDTH_7_MHZ == bandwidth)
+ if (BANDWIDTH_7_MHZ == params->u.ofdm.bandwidth)
buf[3] |= 0x10;
}
-struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
+static struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
.name = "Thomson dtt759x",
.min = 177000000,
.max = 896000000,
- .setbw = thomson_dtt759x_bw,
+ .set = thomson_dtt759x_bw,
.iffreq= 36166667,
.sleepdata = (u8[]){ 2, 0x84, 0x03 },
.count = 5,
@@ -90,9 +112,8 @@ struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
{ 999999999, 166667, 0xfc, 0x08 },
},
};
-EXPORT_SYMBOL(dvb_pll_thomson_dtt759x);
-struct dvb_pll_desc dvb_pll_lg_z201 = {
+static struct dvb_pll_desc dvb_pll_lg_z201 = {
.name = "LG z201",
.min = 174000000,
.max = 862000000,
@@ -107,9 +128,8 @@ struct dvb_pll_desc dvb_pll_lg_z201 = {
{ 999999999, 166667, 0xfc, 0x04 },
},
};
-EXPORT_SYMBOL(dvb_pll_lg_z201);
-struct dvb_pll_desc dvb_pll_microtune_4042 = {
+static struct dvb_pll_desc dvb_pll_microtune_4042 = {
.name = "Microtune 4042 FI5",
.min = 57000000,
.max = 858000000,
@@ -121,9 +141,8 @@ struct dvb_pll_desc dvb_pll_microtune_4042 = {
{ 999999999, 62500, 0x8e, 0x31 },
},
};
-EXPORT_SYMBOL(dvb_pll_microtune_4042);
-struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
+static struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
/* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
.name = "Thomson dtt761x",
.min = 57000000,
@@ -137,9 +156,8 @@ struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
{ 999999999, 62500, 0x8e, 0x3c },
},
};
-EXPORT_SYMBOL(dvb_pll_thomson_dtt761x);
-struct dvb_pll_desc dvb_pll_unknown_1 = {
+static struct dvb_pll_desc dvb_pll_unknown_1 = {
.name = "unknown 1", /* used by dntv live dvb-t */
.min = 174000000,
.max = 862000000,
@@ -157,12 +175,11 @@ struct dvb_pll_desc dvb_pll_unknown_1 = {
{ 999999999, 166667, 0xfc, 0x08 },
},
};
-EXPORT_SYMBOL(dvb_pll_unknown_1);
/* Infineon TUA6010XS
* used in Thomson Cable Tuner
*/
-struct dvb_pll_desc dvb_pll_tua6010xs = {
+static struct dvb_pll_desc dvb_pll_tua6010xs = {
.name = "Infineon TUA6010XS",
.min = 44250000,
.max = 858000000,
@@ -174,10 +191,9 @@ struct dvb_pll_desc dvb_pll_tua6010xs = {
{ 999999999, 62500, 0x8e, 0x85 },
},
};
-EXPORT_SYMBOL(dvb_pll_tua6010xs);
/* Panasonic env57h1xd5 (some Philips PLL ?) */
-struct dvb_pll_desc dvb_pll_env57h1xd5 = {
+static struct dvb_pll_desc dvb_pll_env57h1xd5 = {
.name = "Panasonic ENV57H1XD5",
.min = 44250000,
.max = 858000000,
@@ -190,23 +206,23 @@ struct dvb_pll_desc dvb_pll_env57h1xd5 = {
{ 999999999, 166667, 0xc2, 0xa4 },
},
};
-EXPORT_SYMBOL(dvb_pll_env57h1xd5);
/* Philips TDA6650/TDA6651
* used in Panasonic ENV77H11D5
*/
-static void tda665x_bw(u8 *buf, u32 freq, int bandwidth)
+static void tda665x_bw(u8 *buf, const struct dvb_frontend_parameters *params)
{
- if (bandwidth == BANDWIDTH_8_MHZ)
+ if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
buf[3] |= 0x08;
}
-struct dvb_pll_desc dvb_pll_tda665x = {
+static struct dvb_pll_desc dvb_pll_tda665x = {
.name = "Philips TDA6650/TDA6651",
.min = 44250000,
.max = 858000000,
- .setbw = tda665x_bw,
+ .set = tda665x_bw,
.iffreq= 36166667,
+ .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
.count = 12,
.entries = {
{ 93834000, 166667, 0xca, 0x61 /* 011 0 0 0 01 */ },
@@ -223,36 +239,34 @@ struct dvb_pll_desc dvb_pll_tda665x = {
{ 861000000, 166667, 0xca, 0xe4 /* 111 0 0 1 00 */ },
}
};
-EXPORT_SYMBOL(dvb_pll_tda665x);
/* Infineon TUA6034
* used in LG TDTP E102P
*/
-static void tua6034_bw(u8 *buf, u32 freq, int bandwidth)
+static void tua6034_bw(u8 *buf, const struct dvb_frontend_parameters *params)
{
- if (BANDWIDTH_7_MHZ != bandwidth)
+ if (BANDWIDTH_7_MHZ != params->u.ofdm.bandwidth)
buf[3] |= 0x08;
}
-struct dvb_pll_desc dvb_pll_tua6034 = {
+static struct dvb_pll_desc dvb_pll_tua6034 = {
.name = "Infineon TUA6034",
.min = 44250000,
.max = 858000000,
.iffreq= 36166667,
.count = 3,
- .setbw = tua6034_bw,
+ .set = tua6034_bw,
.entries = {
{ 174500000, 62500, 0xce, 0x01 },
{ 230000000, 62500, 0xce, 0x02 },
{ 999999999, 62500, 0xce, 0x04 },
},
};
-EXPORT_SYMBOL(dvb_pll_tua6034);
/* Infineon TUA6034
* used in LG TDVS-H061F, LG TDVS-H062F and LG TDVS-H064F
*/
-struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf = {
+static struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf = {
.name = "LG TDVS-H06xF",
.min = 54000000,
.max = 863000000,
@@ -265,23 +279,25 @@ struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf = {
{ 999999999, 62500, 0xce, 0x04 },
},
};
-EXPORT_SYMBOL(dvb_pll_lg_tdvs_h06xf);
/* Philips FMD1216ME
* used in Medion Hybrid PCMCIA card and USB Box
*/
-static void fmd1216me_bw(u8 *buf, u32 freq, int bandwidth)
+static void fmd1216me_bw(u8 *buf, const struct dvb_frontend_parameters *params)
{
- if (bandwidth == BANDWIDTH_8_MHZ && freq >= 158870000)
+ if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ &&
+ params->frequency >= 158870000)
buf[3] |= 0x08;
}
-struct dvb_pll_desc dvb_pll_fmd1216me = {
+static struct dvb_pll_desc dvb_pll_fmd1216me = {
.name = "Philips FMD1216ME",
.min = 50870000,
.max = 858000000,
.iffreq= 36125000,
- .setbw = fmd1216me_bw,
+ .set = fmd1216me_bw,
+ .initdata = tua603x_agc112,
+ .sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 },
.count = 7,
.entries = {
{ 143870000, 166667, 0xbc, 0x41 },
@@ -293,23 +309,22 @@ struct dvb_pll_desc dvb_pll_fmd1216me = {
{ 999999999, 166667, 0xfc, 0x44 },
}
};
-EXPORT_SYMBOL(dvb_pll_fmd1216me);
/* ALPS TDED4
* used in Nebula-Cards and USB boxes
*/
-static void tded4_bw(u8 *buf, u32 freq, int bandwidth)
+static void tded4_bw(u8 *buf, const struct dvb_frontend_parameters *params)
{
- if (bandwidth == BANDWIDTH_8_MHZ)
+ if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
buf[3] |= 0x04;
}
-struct dvb_pll_desc dvb_pll_tded4 = {
+static struct dvb_pll_desc dvb_pll_tded4 = {
.name = "ALPS TDED4",
.min = 47000000,
.max = 863000000,
.iffreq= 36166667,
- .setbw = tded4_bw,
+ .set = tded4_bw,
.count = 4,
.entries = {
{ 153000000, 166667, 0x85, 0x01 },
@@ -318,12 +333,11 @@ struct dvb_pll_desc dvb_pll_tded4 = {
{ 999999999, 166667, 0x85, 0x88 },
}
};
-EXPORT_SYMBOL(dvb_pll_tded4);
/* ALPS TDHU2
* used in AverTVHD MCE A180
*/
-struct dvb_pll_desc dvb_pll_tdhu2 = {
+static struct dvb_pll_desc dvb_pll_tdhu2 = {
.name = "ALPS TDHU2",
.min = 54000000,
.max = 864000000,
@@ -336,16 +350,29 @@ struct dvb_pll_desc dvb_pll_tdhu2 = {
{ 999999999, 62500, 0x85, 0x88 },
}
};
-EXPORT_SYMBOL(dvb_pll_tdhu2);
/* Philips TUV1236D
* used in ATI HDTV Wonder
*/
-struct dvb_pll_desc dvb_pll_tuv1236d = {
+static void tuv1236d_rf(u8 *buf, const struct dvb_frontend_parameters *params)
+{
+ switch (params->u.vsb.modulation) {
+ case QAM_64:
+ case QAM_256:
+ buf[3] |= 0x08;
+ break;
+ case VSB_8:
+ default:
+ buf[3] &= ~0x08;
+ }
+}
+
+static struct dvb_pll_desc dvb_pll_tuv1236d = {
.name = "Philips TUV1236D",
.min = 54000000,
.max = 864000000,
.iffreq= 44000000,
+ .set = tuv1236d_rf,
.count = 3,
.entries = {
{ 157250000, 62500, 0xc6, 0x41 },
@@ -353,12 +380,11 @@ struct dvb_pll_desc dvb_pll_tuv1236d = {
{ 999999999, 62500, 0xc6, 0x44 },
},
};
-EXPORT_SYMBOL(dvb_pll_tuv1236d);
/* Samsung TBMV30111IN / TBMV30712IN1
* used in Air2PC ATSC - 2nd generation (nxt2002)
*/
-struct dvb_pll_desc dvb_pll_samsung_tbmv = {
+static struct dvb_pll_desc dvb_pll_samsung_tbmv = {
.name = "Samsung TBMV30111IN / TBMV30712IN1",
.min = 54000000,
.max = 860000000,
@@ -373,12 +399,11 @@ struct dvb_pll_desc dvb_pll_samsung_tbmv = {
{ 999999999, 166667, 0xfc, 0x02 },
}
};
-EXPORT_SYMBOL(dvb_pll_samsung_tbmv);
/*
* Philips SD1878 Tuner.
*/
-struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
+static struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
.name = "Philips SD1878",
.min = 950000,
.max = 2150000,
@@ -391,19 +416,18 @@ struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
{ 2150000, 500, 0xc4, 0xc0},
},
};
-EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261);
/*
* Philips TD1316 Tuner.
*/
-static void td1316_bw(u8 *buf, u32 freq, int bandwidth)
+static void td1316_bw(u8 *buf, const struct dvb_frontend_parameters *params)
{
u8 band;
/* determine band */
- if (freq < 161000000)
+ if (params->frequency < 161000000)
band = 1;
- else if (freq < 444000000)
+ else if (params->frequency < 444000000)
band = 2;
else
band = 4;
@@ -411,16 +435,16 @@ static void td1316_bw(u8 *buf, u32 freq, int bandwidth)
buf[3] |= band;
/* setup PLL filter */
- if (bandwidth == BANDWIDTH_8_MHZ)
+ if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
buf[3] |= 1 << 3;
}
-struct dvb_pll_desc dvb_pll_philips_td1316 = {
+static struct dvb_pll_desc dvb_pll_philips_td1316 = {
.name = "Philips TD1316",
.min = 87000000,
.max = 895000000,
.iffreq= 36166667,
- .setbw = td1316_bw,
+ .set = td1316_bw,
.count = 9,
.entries = {
{ 93834000, 166667, 0xca, 0x60},
@@ -434,10 +458,9 @@ struct dvb_pll_desc dvb_pll_philips_td1316 = {
{ 858834000, 166667, 0xca, 0xe0},
},
};
-EXPORT_SYMBOL(dvb_pll_philips_td1316);
/* FE6600 used on DViCO Hybrid */
-struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
+static struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
.name = "Thomson FE6600",
.min = 44250000,
.max = 858000000,
@@ -450,19 +473,19 @@ struct dvb_pll_desc dvb_pll_thomson_fe6600 = {
{ 999999999, 166667, 0xf4, 0x18 },
}
};
-EXPORT_SYMBOL(dvb_pll_thomson_fe6600);
-static void opera1_bw(u8 *buf, u32 freq, int bandwidth)
+
+static void opera1_bw(u8 *buf, const struct dvb_frontend_parameters *params)
{
- if (bandwidth == BANDWIDTH_8_MHZ)
+ if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ)
buf[2] |= 0x08;
}
-struct dvb_pll_desc dvb_pll_opera1 = {
+static struct dvb_pll_desc dvb_pll_opera1 = {
.name = "Opera Tuner",
.min = 900000,
.max = 2250000,
.iffreq= 0,
- .setbw = opera1_bw,
+ .set = opera1_bw,
.count = 8,
.entries = {
{ 1064000, 500, 0xe5, 0xc6 },
@@ -475,7 +498,54 @@ struct dvb_pll_desc dvb_pll_opera1 = {
{ 2250000, 500, 0xe5, 0xc4 },
}
};
-EXPORT_SYMBOL(dvb_pll_opera1);
+
+/* Philips FCV1236D
+ */
+struct dvb_pll_desc dvb_pll_fcv1236d = {
+/* Bit_0: RF Input select
+ * Bit_1: 0=digital, 1=analog
+ */
+ .name = "Philips FCV1236D",
+ .min = 53000000,
+ .max = 803000000,
+ .iffreq= 44000000,
+ .count = 3,
+ .entries = {
+ { 159000000, 62500, 0x8e, 0xa0 },
+ { 453000000, 62500, 0x8e, 0x90 },
+ { 999999999, 62500, 0x8e, 0x30 },
+ },
+};
+
+/* ----------------------------------------------------------- */
+
+static struct dvb_pll_desc *pll_list[] = {
+ [DVB_PLL_UNDEFINED] = NULL,
+ [DVB_PLL_THOMSON_DTT7579] = &dvb_pll_thomson_dtt7579,
+ [DVB_PLL_THOMSON_DTT759X] = &dvb_pll_thomson_dtt759x,
+ [DVB_PLL_THOMSON_DTT7610] = &dvb_pll_thomson_dtt7610,
+ [DVB_PLL_LG_Z201] = &dvb_pll_lg_z201,
+ [DVB_PLL_MICROTUNE_4042] = &dvb_pll_microtune_4042,
+ [DVB_PLL_THOMSON_DTT761X] = &dvb_pll_thomson_dtt761x,
+ [DVB_PLL_UNKNOWN_1] = &dvb_pll_unknown_1,
+ [DVB_PLL_TUA6010XS] = &dvb_pll_tua6010xs,
+ [DVB_PLL_ENV57H1XD5] = &dvb_pll_env57h1xd5,
+ [DVB_PLL_TUA6034] = &dvb_pll_tua6034,
+ [DVB_PLL_LG_TDVS_H06XF] = &dvb_pll_lg_tdvs_h06xf,
+ [DVB_PLL_TDA665X] = &dvb_pll_tda665x,
+ [DVB_PLL_FMD1216ME] = &dvb_pll_fmd1216me,
+ [DVB_PLL_TDED4] = &dvb_pll_tded4,
+ [DVB_PLL_TUV1236D] = &dvb_pll_tuv1236d,
+ [DVB_PLL_TDHU2] = &dvb_pll_tdhu2,
+ [DVB_PLL_SAMSUNG_TBMV] = &dvb_pll_samsung_tbmv,
+ [DVB_PLL_PHILIPS_SD1878_TDA8261] = &dvb_pll_philips_sd1878_tda8261,
+ [DVB_PLL_PHILIPS_TD1316] = &dvb_pll_philips_td1316,
+ [DVB_PLL_THOMSON_FE6600] = &dvb_pll_thomson_fe6600,
+ [DVB_PLL_OPERA1] = &dvb_pll_opera1,
+ [DVB_PLL_FCV1236D] = &dvb_pll_fcv1236d,
+};
+
+/* ----------------------------------------------------------- */
struct dvb_pll_priv {
/* i2c details */
@@ -497,35 +567,37 @@ static int debug = 0;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable verbose debug messages");
-int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
- u32 freq, int bandwidth)
+static int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
+ const struct dvb_frontend_parameters *params)
{
u32 div;
int i;
- if (freq != 0 && (freq < desc->min || freq > desc->max))
- return -EINVAL;
+ if (params->frequency != 0 && (params->frequency < desc->min ||
+ params->frequency > desc->max))
+ return -EINVAL;
for (i = 0; i < desc->count; i++) {
- if (freq > desc->entries[i].limit)
+ if (params->frequency > desc->entries[i].limit)
continue;
break;
}
+
if (debug)
- printk("pll: %s: freq=%d bw=%d | i=%d/%d\n",
- desc->name, freq, bandwidth, i, desc->count);
+ printk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
+ params->frequency, i, desc->count);
if (i == desc->count)
return -EINVAL;
- div = (freq + desc->iffreq + desc->entries[i].stepsize/2) /
- desc->entries[i].stepsize;
+ div = (params->frequency + desc->iffreq +
+ desc->entries[i].stepsize/2) / desc->entries[i].stepsize;
buf[0] = div >> 8;
buf[1] = div & 0xff;
buf[2] = desc->entries[i].config;
buf[3] = desc->entries[i].cb;
- if (desc->setbw)
- desc->setbw(buf, freq, bandwidth);
+ if (desc->set)
+ desc->set(buf, params);
if (debug)
printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
@@ -534,7 +606,6 @@ int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
// calculate the frequency we set it to
return (div * desc->entries[i].stepsize) - desc->iffreq;
}
-EXPORT_SYMBOL(dvb_pll_configure);
static int dvb_pll_release(struct dvb_frontend *fe)
{
@@ -578,18 +649,12 @@ static int dvb_pll_set_params(struct dvb_frontend *fe,
{ .addr = priv->pll_i2c_address, .flags = 0,
.buf = buf, .len = sizeof(buf) };
int result;
- u32 bandwidth = 0, frequency = 0;
+ u32 frequency = 0;
if (priv->i2c == NULL)
return -EINVAL;
- // DVBT bandwidth only just now
- if (fe->ops.info.type == FE_OFDM) {
- bandwidth = params->u.ofdm.bandwidth;
- }
-
- if ((result = dvb_pll_configure(priv->pll_desc, buf,
- params->frequency, bandwidth)) < 0)
+ if ((result = dvb_pll_configure(priv->pll_desc, buf, params)) < 0)
return result;
else
frequency = result;
@@ -601,7 +666,7 @@ static int dvb_pll_set_params(struct dvb_frontend *fe,
}
priv->frequency = frequency;
- priv->bandwidth = bandwidth;
+ priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
return 0;
}
@@ -612,18 +677,12 @@ static int dvb_pll_calc_regs(struct dvb_frontend *fe,
{
struct dvb_pll_priv *priv = fe->tuner_priv;
int result;
- u32 bandwidth = 0, frequency = 0;
+ u32 frequency = 0;
if (buf_len < 5)
return -EINVAL;
- // DVBT bandwidth only just now
- if (fe->ops.info.type == FE_OFDM) {
- bandwidth = params->u.ofdm.bandwidth;
- }
-
- if ((result = dvb_pll_configure(priv->pll_desc, buf+1,
- params->frequency, bandwidth)) < 0)
+ if ((result = dvb_pll_configure(priv->pll_desc, buf+1, params)) < 0)
return result;
else
frequency = result;
@@ -631,7 +690,7 @@ static int dvb_pll_calc_regs(struct dvb_frontend *fe,
buf[0] = priv->pll_i2c_address;
priv->frequency = frequency;
- priv->bandwidth = bandwidth;
+ priv->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0;
return 5;
}
@@ -687,13 +746,18 @@ static struct dvb_tuner_ops dvb_pll_tuner_ops = {
struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct i2c_adapter *i2c,
- struct dvb_pll_desc *desc)
+ unsigned int pll_desc_id)
{
u8 b1 [] = { 0 };
struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
.buf = b1, .len = 1 };
struct dvb_pll_priv *priv = NULL;
int ret;
+ struct dvb_pll_desc *desc;
+
+ BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
+
+ desc = pll_list[pll_desc_id];
if (i2c != NULL) {
if (fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/dvb/frontends/dvb-pll.h b/drivers/media/dvb/frontends/dvb-pll.h
index 5209f46f0893..e93a8104052b 100644
--- a/drivers/media/dvb/frontends/dvb-pll.h
+++ b/drivers/media/dvb/frontends/dvb-pll.h
@@ -8,50 +8,29 @@
#include <linux/i2c.h>
#include "dvb_frontend.h"
-struct dvb_pll_desc {
- char *name;
- u32 min;
- u32 max;
- u32 iffreq;
- void (*setbw)(u8 *buf, u32 freq, int bandwidth);
- u8 *initdata;
- u8 *sleepdata;
- int count;
- struct {
- u32 limit;
- u32 stepsize;
- u8 config;
- u8 cb;
- } entries[12];
-};
-
-extern struct dvb_pll_desc dvb_pll_thomson_dtt7579;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt759x;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt7610;
-extern struct dvb_pll_desc dvb_pll_lg_z201;
-extern struct dvb_pll_desc dvb_pll_microtune_4042;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt761x;
-extern struct dvb_pll_desc dvb_pll_unknown_1;
-
-extern struct dvb_pll_desc dvb_pll_tua6010xs;
-extern struct dvb_pll_desc dvb_pll_env57h1xd5;
-extern struct dvb_pll_desc dvb_pll_tua6034;
-extern struct dvb_pll_desc dvb_pll_lg_tdvs_h06xf;
-extern struct dvb_pll_desc dvb_pll_tda665x;
-extern struct dvb_pll_desc dvb_pll_fmd1216me;
-extern struct dvb_pll_desc dvb_pll_tded4;
-
-extern struct dvb_pll_desc dvb_pll_tuv1236d;
-extern struct dvb_pll_desc dvb_pll_tdhu2;
-extern struct dvb_pll_desc dvb_pll_samsung_tbmv;
-extern struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261;
-extern struct dvb_pll_desc dvb_pll_philips_td1316;
-
-extern struct dvb_pll_desc dvb_pll_thomson_fe6600;
-extern struct dvb_pll_desc dvb_pll_opera1;
-
-extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
- u32 freq, int bandwidth);
+#define DVB_PLL_UNDEFINED 0
+#define DVB_PLL_THOMSON_DTT7579 1
+#define DVB_PLL_THOMSON_DTT759X 2
+#define DVB_PLL_THOMSON_DTT7610 3
+#define DVB_PLL_LG_Z201 4
+#define DVB_PLL_MICROTUNE_4042 5
+#define DVB_PLL_THOMSON_DTT761X 6
+#define DVB_PLL_UNKNOWN_1 7
+#define DVB_PLL_TUA6010XS 8
+#define DVB_PLL_ENV57H1XD5 9
+#define DVB_PLL_TUA6034 10
+#define DVB_PLL_LG_TDVS_H06XF 11
+#define DVB_PLL_TDA665X 12
+#define DVB_PLL_FMD1216ME 13
+#define DVB_PLL_TDED4 14
+#define DVB_PLL_TUV1236D 15
+#define DVB_PLL_TDHU2 16
+#define DVB_PLL_SAMSUNG_TBMV 17
+#define DVB_PLL_PHILIPS_SD1878_TDA8261 18
+#define DVB_PLL_PHILIPS_TD1316 19
+#define DVB_PLL_THOMSON_FE6600 20
+#define DVB_PLL_OPERA1 21
+#define DVB_PLL_FCV1236D 22
/**
* Attach a dvb-pll to the supplied frontend structure.
@@ -59,19 +38,19 @@ extern int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
* @param fe Frontend to attach to.
* @param pll_addr i2c address of the PLL (if used).
* @param i2c i2c adapter to use (set to NULL if not used).
- * @param desc dvb_pll_desc to use.
+ * @param pll_desc_id dvb_pll_desc to use.
* @return Frontend pointer on success, NULL on failure
*/
#if defined(CONFIG_DVB_PLL) || (defined(CONFIG_DVB_PLL_MODULE) && defined(MODULE))
extern struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
int pll_addr,
struct i2c_adapter *i2c,
- struct dvb_pll_desc *desc);
+ unsigned int pll_desc_id);
#else
static inline struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe,
int pll_addr,
struct i2c_adapter *i2c,
- struct dvb_pll_desc *desc)
+ unsigned int pll_desc_id)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
return NULL;
diff --git a/drivers/media/dvb/frontends/nxt200x.c b/drivers/media/dvb/frontends/nxt200x.c
index b809f83d9563..ddc84899cf86 100644
--- a/drivers/media/dvb/frontends/nxt200x.c
+++ b/drivers/media/dvb/frontends/nxt200x.c
@@ -49,7 +49,6 @@
#include <linux/string.h>
#include "dvb_frontend.h"
-#include "dvb-pll.h"
#include "nxt200x.h"
struct nxt200x_state {
@@ -546,11 +545,6 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
nxt200x_writebytes(state, 0x17, buf, 1);
}
- /* get tuning information */
- if (fe->ops.tuner_ops.calc_regs) {
- fe->ops.tuner_ops.calc_regs(fe, p, buf, 5);
- }
-
/* set additional params */
switch (p->u.vsb.modulation) {
case QAM_64:
@@ -559,27 +553,24 @@ static int nxt200x_setup_frontend_parameters (struct dvb_frontend* fe,
/* This is just a guess since I am unable to test it */
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 1);
-
- /* set input */
- if (state->config->set_pll_input)
- state->config->set_pll_input(buf+1, 1);
break;
case VSB_8:
/* Set non-punctured clock for VSB */
if (state->config->set_ts_params)
state->config->set_ts_params(fe, 0);
-
- /* set input */
- if (state->config->set_pll_input)
- state->config->set_pll_input(buf+1, 0);
break;
default:
return -EINVAL;
break;
}
- /* write frequency information */
- nxt200x_writetuner(state, buf);
+ if (fe->ops.tuner_ops.calc_regs) {
+ /* get tuning information */
+ fe->ops.tuner_ops.calc_regs(fe, p, buf, 5);
+
+ /* write frequency information */
+ nxt200x_writetuner(state, buf);
+ }
/* reset the agc now that tuning has been completed */
nxt200x_agc_reset(state);
diff --git a/drivers/media/dvb/frontends/nxt200x.h b/drivers/media/dvb/frontends/nxt200x.h
index 28bc5591b319..bb0ef58d7972 100644
--- a/drivers/media/dvb/frontends/nxt200x.h
+++ b/drivers/media/dvb/frontends/nxt200x.h
@@ -38,9 +38,6 @@ struct nxt200x_config
/* the demodulator's i2c address */
u8 demod_address;
- /* used to set pll input */
- int (*set_pll_input)(u8* buf, int input);
-
/* need to set device param for start_dma */
int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
};
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 4e0aca7c67aa..3cc8b444b8f2 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -45,7 +45,6 @@
#include "dvb_math.h"
#include "dvb_frontend.h"
-#include "dvb-pll.h"
#include "or51132.h"
static int debug;
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index 048d7cfe12d3..f46d5a46683a 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -223,38 +223,13 @@ static int or51211_set_parameters(struct dvb_frontend* fe,
struct dvb_frontend_parameters *param)
{
struct or51211_state* state = fe->demodulator_priv;
- u32 freq = 0;
- u16 tunerfreq = 0;
- u8 buf[4];
/* Change only if we are actually changing the channel */
if (state->current_frequency != param->frequency) {
- freq = 44000 + (param->frequency/1000);
- tunerfreq = freq * 16/1000;
-
- dprintk("set_parameters frequency = %d (tunerfreq = %d)\n",
- param->frequency,tunerfreq);
-
- buf[0] = (tunerfreq >> 8) & 0x7F;
- buf[1] = (tunerfreq & 0xFF);
- buf[2] = 0x8E;
-
- if (param->frequency < 157250000) {
- buf[3] = 0xA0;
- dprintk("set_parameters VHF low range\n");
- } else if (param->frequency < 454000000) {
- buf[3] = 0x90;
- dprintk("set_parameters VHF high range\n");
- } else {
- buf[3] = 0x30;
- dprintk("set_parameters UHF range\n");
+ if (fe->ops.tuner_ops.set_params) {
+ fe->ops.tuner_ops.set_params(fe, param);
+ if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
- dprintk("set_parameters tuner bytes: 0x%02x 0x%02x "
- "0x%02x 0x%02x\n",buf[0],buf[1],buf[2],buf[3]);
-
- if (i2c_writebytes(state,0xC2>>1,buf,4))
- printk(KERN_WARNING "or51211:set_parameters error "
- "writing to tuner\n");
/* Set to ATSC mode */
or51211_setmode(fe,0);
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 18768d2f6d40..6c607302c1b6 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -249,7 +249,7 @@ static int stv0299_get_symbolrate (struct stv0299_state* state)
dprintk ("%s\n", __FUNCTION__);
stv0299_readregs (state, 0x1f, sfr, 3);
- stv0299_readregs (state, 0x1a, &rtf, 1);
+ stv0299_readregs (state, 0x1a, (u8 *)&rtf, 1);
srate = (sfr[0] << 8) | sfr[1];
srate *= Mclk;
diff --git a/drivers/media/dvb/frontends/tda10023.c b/drivers/media/dvb/frontends/tda10023.c
index da796e784be3..4bb06f97938b 100644
--- a/drivers/media/dvb/frontends/tda10023.c
+++ b/drivers/media/dvb/frontends/tda10023.c
@@ -478,7 +478,7 @@ struct dvb_frontend* tda10023_attach(const struct tda1002x_config* config,
state->i2c = i2c;
memcpy(&state->frontend.ops, &tda10023_ops, sizeof(struct dvb_frontend_ops));
state->pwm = pwm;
- for (i=0; i < sizeof(tda10023_inittab)/sizeof(*tda10023_inittab);i+=3) {
+ for (i=0; i < ARRAY_SIZE(tda10023_inittab);i+=3) {
if (tda10023_inittab[i] == 0x00) {
state->reg0 = tda10023_inittab[i+2];
break;
diff --git a/drivers/media/dvb/pluto2/Makefile b/drivers/media/dvb/pluto2/Makefile
index ce6a9aaf937e..7ac128724df8 100644
--- a/drivers/media/dvb/pluto2/Makefile
+++ b/drivers/media/dvb/pluto2/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_PLUTO2) += pluto2.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index 7751628e1415..6d53289b3276 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -108,7 +108,7 @@ config DVB_BUDGET_AV
tristate "Budget cards with analog video inputs"
depends on DVB_CORE && PCI && I2C && VIDEO_V4L1
select VIDEO_SAA7146_VV
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_STV0299 if !DVB_FE_CUSTOMISE
select DVB_TDA1004X if !DVB_FE_CUSTOMISE
select DVB_TDA10021 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile
index aa85ecdc6c80..2c1145236ee6 100644
--- a/drivers/media/dvb/ttpci/Makefile
+++ b/drivers/media/dvb/ttpci/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_DVB_BUDGET_CI) += budget-core.o budget-ci.o ttpci-eeprom.o
obj-$(CONFIG_DVB_BUDGET_PATCH) += budget-core.o budget-patch.o ttpci-eeprom.o
obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o ttpci-eeprom.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
hostprogs-y := fdump
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index ef1108c0bf11..8178832d14a8 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -137,6 +137,15 @@ static void init_av7110_av(struct av7110 *av7110)
if (ret < 0)
printk("dvb-ttpci:cannot set internal volume to maximum:%d\n",ret);
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType,
+ 1, (u16) av7110->display_ar);
+ if (ret < 0)
+ printk("dvb-ttpci: unable to set aspect ratio\n");
+ ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
+ 1, av7110->display_panscan);
+ if (ret < 0)
+ printk("dvb-ttpci: unable to set pan scan\n");
+
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 2, wss_cfg_4_3);
if (ret < 0)
printk("dvb-ttpci: unable to configure 4:3 wss\n");
@@ -2258,7 +2267,7 @@ static int frontend_init(struct av7110 *av7110)
FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_send_master_cmd, av7110->fe_diseqc_send_master_cmd, av7110_fe_diseqc_send_master_cmd);
FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_send_burst, av7110->fe_diseqc_send_burst, av7110_fe_diseqc_send_burst);
FE_FUNC_OVERRIDE(av7110->fe->ops.set_tone, av7110->fe_set_tone, av7110_fe_set_tone);
- FE_FUNC_OVERRIDE(av7110->fe->ops.set_voltage, av7110->fe_set_voltage, av7110_fe_set_voltage;)
+ FE_FUNC_OVERRIDE(av7110->fe->ops.set_voltage, av7110->fe_set_voltage, av7110_fe_set_voltage);
FE_FUNC_OVERRIDE(av7110->fe->ops.dishnetwork_send_legacy_command, av7110->fe_dishnetwork_send_legacy_command, av7110_fe_dishnetwork_send_legacy_command);
FE_FUNC_OVERRIDE(av7110->fe->ops.set_frontend, av7110->fe_set_frontend, av7110_fe_set_frontend);
@@ -2639,12 +2648,12 @@ static int __devinit av7110_attach(struct saa7146_dev* dev,
av7110->mixer.volume_left = volume;
av7110->mixer.volume_right = volume;
- init_av7110_av(av7110);
-
ret = av7110_register(av7110);
if (ret < 0)
goto err_arm_thread_stop_10;
+ init_av7110_av(av7110);
+
/* special case DVB-C: these cards have an analog tuner
plus need some special handling, so we have separate
saa7146_ext_vv data for these... */
diff --git a/drivers/media/dvb/ttpci/av7110.h b/drivers/media/dvb/ttpci/av7110.h
index 115002b0390c..0cb439527498 100644
--- a/drivers/media/dvb/ttpci/av7110.h
+++ b/drivers/media/dvb/ttpci/av7110.h
@@ -194,6 +194,7 @@ struct av7110 {
int video_blank;
struct video_status videostate;
+ u16 display_panscan;
int display_ar;
int trickmode;
#define TRICK_NONE 0
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 58678c05aa53..d75e7e48addc 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -391,7 +391,7 @@ static int get_video_format(struct av7110 *av7110, u8 *buf, int count)
****************************************************************************/
static inline long aux_ring_buffer_write(struct dvb_ringbuffer *rbuf,
- const char *buf, unsigned long count)
+ const u8 *buf, unsigned long count)
{
unsigned long todo = count;
int free;
@@ -436,7 +436,7 @@ static void play_audio_cb(u8 *buf, int count, void *priv)
#define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \
dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
-static ssize_t dvb_play(struct av7110 *av7110, const u8 __user *buf,
+static ssize_t dvb_play(struct av7110 *av7110, const char __user *buf,
unsigned long count, int nonblock, int type)
{
unsigned long todo = count, n;
@@ -499,7 +499,7 @@ static ssize_t dvb_play_kernel(struct av7110 *av7110, const u8 *buf,
return count - todo;
}
-static ssize_t dvb_aplay(struct av7110 *av7110, const u8 __user *buf,
+static ssize_t dvb_aplay(struct av7110 *av7110, const char __user *buf,
unsigned long count, int nonblock, int type)
{
unsigned long todo = count, n;
@@ -959,7 +959,7 @@ static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x
#define MIN_IFRAME 400000
-static int play_iframe(struct av7110 *av7110, u8 __user *buf, unsigned int len, int nonblock)
+static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock)
{
int i, n;
@@ -1082,19 +1082,18 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
case VIDEO_SET_DISPLAY_FORMAT:
{
video_displayformat_t format = (video_displayformat_t) arg;
- u16 val = 0;
switch (format) {
case VIDEO_PAN_SCAN:
- val = VID_PAN_SCAN_PREF;
+ av7110->display_panscan = VID_PAN_SCAN_PREF;
break;
case VIDEO_LETTER_BOX:
- val = VID_VC_AND_PS_PREF;
+ av7110->display_panscan = VID_VC_AND_PS_PREF;
break;
case VIDEO_CENTER_CUT_OUT:
- val = VID_CENTRE_CUT_PREF;
+ av7110->display_panscan = VID_CENTRE_CUT_PREF;
break;
default:
@@ -1104,7 +1103,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
break;
av7110->videostate.display_format = format;
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
- 1, (u16) val);
+ 1, av7110->display_panscan);
break;
}
@@ -1466,8 +1465,9 @@ int av7110_av_register(struct av7110 *av7110)
av7110->videostate.play_state = VIDEO_STOPPED;
av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX;
av7110->videostate.video_format = VIDEO_FORMAT_4_3;
- av7110->videostate.display_format = VIDEO_CENTER_CUT_OUT;
+ av7110->videostate.display_format = VIDEO_LETTER_BOX;
av7110->display_ar = VIDEO_FORMAT_4_3;
+ av7110->display_panscan = VID_VC_AND_PS_PREF;
init_waitqueue_head(&av7110->video_events.wait_queue);
spin_lock_init(&av7110->video_events.lock);
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index e1c1294bb767..c58e3fc509ed 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -151,7 +151,7 @@ static ssize_t ci_ll_write(struct dvb_ringbuffer *cibuf, struct file *file,
{
int free;
int non_blocking = file->f_flags & O_NONBLOCK;
- char *page = (char *)__get_free_page(GFP_USER);
+ u8 *page = (u8 *)__get_free_page(GFP_USER);
int res;
if (!page)
@@ -208,7 +208,7 @@ static ssize_t ci_ll_read(struct dvb_ringbuffer *cibuf, struct file *file,
return -EINVAL;
DVB_RINGBUFFER_SKIP(cibuf, 2);
- return dvb_ringbuffer_read(cibuf, buf, len, 1);
+ return dvb_ringbuffer_read(cibuf, (u8 *)buf, len, 1);
}
static int dvb_ca_open(struct inode *inode, struct file *file)
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 70aee4eb5da4..515e8232e020 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -158,7 +158,7 @@ static int load_dram(struct av7110 *av7110, u32 *data, int len)
}
dprintk(4, "writing DRAM block %d\n", i);
mwdebi(av7110, DEBISWAB, bootblock,
- ((char*)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
+ ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
bootblock ^= 0x1400;
iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, AV7110_BOOT_MAX_SIZE, 2);
@@ -173,10 +173,10 @@ static int load_dram(struct av7110 *av7110, u32 *data, int len)
}
if (rest > 4)
mwdebi(av7110, DEBISWAB, bootblock,
- ((char*)data) + i * AV7110_BOOT_MAX_SIZE, rest);
+ ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, rest);
else
mwdebi(av7110, DEBISWAB, bootblock,
- ((char*)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
+ ((u8 *)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, rest, 2);
@@ -751,7 +751,7 @@ static int FlushText(struct av7110 *av7110)
return 0;
}
-static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, u8* buf)
+static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, char *buf)
{
int i, ret;
unsigned long start;
diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h
index 673d9b3f064c..74d940f75da6 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.h
+++ b/drivers/media/dvb/ttpci/av7110_hw.h
@@ -393,7 +393,7 @@ static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val,
}
/* buffer writes */
-static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, char *val, int count)
+static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, u8 *val, int count)
{
memcpy(av7110->debi_virt, val, count);
av7110_debiwrite(av7110, config, addr, 0, count);
diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c
index a97f166bb523..6322800ee12b 100644
--- a/drivers/media/dvb/ttpci/av7110_ir.c
+++ b/drivers/media/dvb/ttpci/av7110_ir.c
@@ -356,7 +356,7 @@ int __devinit av7110_ir_init(struct av7110 *av7110)
input_dev->id.vendor = av7110->dev->pci->vendor;
input_dev->id.product = av7110->dev->pci->device;
}
- input_dev->cdev.dev = &av7110->dev->pci->dev;
+ input_dev->dev.parent = &av7110->dev->pci->dev;
/* initial keymap */
memcpy(av7110->ir.key_map, default_key_map, sizeof av7110->ir.key_map);
input_register_keys(&av7110->ir);
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index fcd9994058d0..87afaebc0703 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -333,7 +333,7 @@ static int av7110_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
return -EINVAL;
memset(t, 0, sizeof(*t));
- strcpy(t->name, "Television");
+ strcpy((char *)t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 0e817d6f1ce5..0aee7a13a070 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -828,29 +828,6 @@ static u8 philips_sd1878_inittab[] = {
0xff, 0xff
};
-static int philips_sd1878_tda8261_tuner_set_params(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *params)
-{
- u8 buf[4];
- int rc;
- struct i2c_msg tuner_msg = {.addr=0x60,.flags=0,.buf=buf,.len=sizeof(buf)};
- struct budget *budget = (struct budget *) fe->dvb->priv;
-
- if((params->frequency < 950000) || (params->frequency > 2150000))
- return -EINVAL;
-
- rc=dvb_pll_configure(&dvb_pll_philips_sd1878_tda8261, buf,
- params->frequency, 0);
- if(rc < 0) return rc;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if(i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
u32 srate, u32 ratio)
{
@@ -921,6 +898,7 @@ static u8 read_pwm(struct budget_av *budget_av)
#define SUBID_DVBS_TV_STAR 0x0014
#define SUBID_DVBS_TV_STAR_CI 0x0016
#define SUBID_DVBS_EASYWATCH_1 0x001a
+#define SUBID_DVBS_EASYWATCH_2 0x001b
#define SUBID_DVBS_EASYWATCH 0x001e
#define SUBID_DVBC_EASYWATCH 0x002a
@@ -982,10 +960,13 @@ static void frontend_init(struct budget_av *budget_av)
case SUBID_DVBS_TV_STAR_CI:
case SUBID_DVBS_CYNERGY1200N:
case SUBID_DVBS_EASYWATCH:
+ case SUBID_DVBS_EASYWATCH_2:
fe = dvb_attach(stv0299_attach, &philips_sd1878_config,
&budget_av->budget.i2c_adap);
if (fe) {
- fe->ops.tuner_ops.set_params = philips_sd1878_tda8261_tuner_set_params;
+ dvb_attach(dvb_pll_attach, fe, 0x60,
+ &budget_av->budget.i2c_adap,
+ DVB_PLL_PHILIPS_SD1878_TDA8261);
}
break;
@@ -1264,6 +1245,7 @@ MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
+MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S);
MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3);
MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
@@ -1287,6 +1269,7 @@ static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
+ MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b),
MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c),
MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 9d42f88ebb0e..873c3ba296f2 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -206,7 +206,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
input_dev->id.vendor = saa->pci->vendor;
input_dev->id.product = saa->pci->device;
}
- input_dev->cdev.dev = &saa->pci->dev;
+ input_dev->dev.parent = &saa->pci->dev;
/* Select keymap and address */
switch (budget_ci->budget.dev->pci->subsystem_device) {
diff --git a/drivers/media/dvb/ttusb-budget/Makefile b/drivers/media/dvb/ttusb-budget/Makefile
index 6ab97f6b53fc..fbe2b9514c21 100644
--- a/drivers/media/dvb/ttusb-budget/Makefile
+++ b/drivers/media/dvb/ttusb-budget/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_TTUSB_BUDGET) += dvb-ttusb-budget.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends
diff --git a/drivers/media/dvb/ttusb-dec/Makefile b/drivers/media/dvb/ttusb-dec/Makefile
index b41bf1f06a9f..2d70a8269391 100644
--- a/drivers/media/dvb/ttusb-dec/Makefile
+++ b/drivers/media/dvb/ttusb-dec/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_TTUSB_DEC) += ttusb_dec.o ttusbdecfe.o
-EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 194b102140ef..f8bf9fe37d36 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -324,8 +324,8 @@ config RADIO_ZOLTRIX_PORT
Enter the I/O port of your Zoltrix radio card.
config USB_DSBR
- tristate "D-Link USB FM radio support (EXPERIMENTAL)"
- depends on USB && VIDEO_V4L2 && EXPERIMENTAL
+ tristate "D-Link/GemTek USB FM radio support"
+ depends on USB && VIDEO_V4L2
---help---
Say Y here if you want to connect this type of radio to your
computer's USB port. Note that the audio is not digital, and
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 5adc27c3ced9..f0a67e93d7fd 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -63,7 +63,7 @@ struct rt_device
static void sleep_delay(long n)
{
/* Sleep nicely for 'n' uS */
- int d=n/(1000000/HZ);
+ int d=n/msecs_to_jiffies(1000);
if(!d)
udelay(n);
else
@@ -392,7 +392,6 @@ static struct video_device rtrack_radio=
.owner = THIS_MODULE,
.name = "RadioTrack radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &rtrack_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 9f1addae6928..9b1f7a99dac0 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -355,7 +355,6 @@ static struct video_device aztech_radio=
.owner = THIS_MODULE,
.name = "Aztech radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &aztech_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 8cf2e9df5c8a..34e317ced5a3 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -329,7 +329,7 @@ cadet_handler(unsigned long data)
init_timer(&readtimer);
readtimer.function=cadet_handler;
readtimer.data=(unsigned long)0;
- readtimer.expires=jiffies+(HZ/20);
+ readtimer.expires=jiffies+msecs_to_jiffies(50);
add_timer(&readtimer);
}
@@ -349,7 +349,7 @@ cadet_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
init_timer(&readtimer);
readtimer.function=cadet_handler;
readtimer.data=(unsigned long)0;
- readtimer.expires=jiffies+(HZ/20);
+ readtimer.expires=jiffies+msecs_to_jiffies(50);
add_timer(&readtimer);
}
if(rdsin==rdsout) {
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index fdf5d6e46eac..99a323131333 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -94,8 +94,6 @@ struct gemtek_pci_card {
u32 iobase;
u32 length;
- u8 chiprev;
- u16 model;
u32 current_frequency;
u8 mute;
@@ -378,7 +376,6 @@ static struct video_device vdev_template = {
.owner = THIS_MODULE,
.name = "Gemtek PCI Radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &gemtek_pci_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
@@ -415,9 +412,6 @@ static int __devinit gemtek_pci_probe( struct pci_dev *pci_dev, const struct pci
goto err_pci;
}
- pci_read_config_byte( pci_dev, PCI_REVISION_ID, &card->chiprev );
- pci_read_config_word( pci_dev, PCI_SUBSYSTEM_ID, &card->model );
-
pci_set_drvdata( pci_dev, card );
if ( (devradio = kmalloc( sizeof( struct video_device ), GFP_KERNEL )) == NULL ) {
@@ -436,7 +430,7 @@ static int __devinit gemtek_pci_probe( struct pci_dev *pci_dev, const struct pci
gemtek_pci_mute( card );
printk( KERN_INFO "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n",
- card->chiprev, card->iobase, card->iobase + card->length - 1 );
+ pci_dev->revision, card->iobase, card->iobase + card->length - 1 );
return 0;
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index b04b6a7fff7c..eab8c80a2e47 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -330,7 +330,6 @@ static struct video_device gemtek_radio=
.owner = THIS_MODULE,
.name = "GemTek radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &gemtek_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 9b493b3298cd..82aedfc95d4f 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -297,7 +297,6 @@ static struct video_device rtrack2_radio=
.owner = THIS_MODULE,
.name = "RadioTrack II radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &rtrack2_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index dc33f19c0e2c..395165367f37 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -297,7 +297,6 @@ static struct video_device fmi_radio=
.owner = THIS_MODULE,
.name = "SF16FMx radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &fmi_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index e6c125def5cb..c432c44bd634 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -442,7 +442,6 @@ static struct video_device fmr2_radio=
.owner = THIS_MODULE,
.name = "SF16FMR2 radio",
. type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &fmr2_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index e43acfd7e533..7e1911c3d54e 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -369,7 +369,6 @@ static struct video_device terratec_radio=
.owner = THIS_MODULE,
.name = "TerraTec ActiveRadio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &terratec_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index c27c629d99df..c11981fed827 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -349,7 +349,6 @@ static struct video_device trust_radio=
.owner = THIS_MODULE,
.name = "Trust FM Radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &trust_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 8ff5a23a9f01..1366326474e5 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -349,7 +349,6 @@ static struct video_device typhoon_radio =
.owner = THIS_MODULE,
.name = "Typhoon Radio",
.type = VID_TYPE_TUNER,
- .hardware = 0,
.fops = &typhoon_fops,
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 4d45a40016de..9dcbffd0aa15 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -489,6 +489,15 @@ config TUNER_3036
Say Y here to include support for Philips SAB3036 compatible tuners.
If in doubt, say N.
+config TUNER_TEA5761
+ bool "TEA 5761 radio tuner (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on I2C
+ select VIDEO_TUNER
+ help
+ Say Y here to include support for Philips TEA5761 radio tuner.
+ If in doubt, say N.
+
config VIDEO_VINO
tristate "SGI Vino Video For Linux (EXPERIMENTAL)"
depends on I2C && SGI_IP22 && EXPERIMENTAL && VIDEO_V4L2
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 9c2de501612f..10b4d4469016 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -7,6 +7,8 @@ zr36067-objs := zoran_procfs.o zoran_device.o \
tuner-objs := tuner-core.o tuner-types.o tuner-simple.o \
mt20xx.o tda8290.o tea5767.o tda9887.o
+tuner-$(CONFIG_TUNER_TEA5761) += tea5761.o
+
msp3400-objs := msp3400-driver.o msp3400-kthreads.o
obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o compat_ioctl32.o
@@ -16,7 +18,7 @@ ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y)
endif
obj-$(CONFIG_VIDEO_BT848) += bt8xx/
-obj-$(CONFIG_VIDEO_BT848) += ir-kbd-i2c.o
+obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
@@ -59,7 +61,7 @@ obj-$(CONFIG_VIDEO_CPIA) += cpia.o
obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
obj-$(CONFIG_VIDEO_CPIA_USB) += cpia_usb.o
obj-$(CONFIG_VIDEO_MEYE) += meye.o
-obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
+obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_IVTV) += ivtv/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 823cd6cc471e..cbab53fc6243 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -38,23 +38,23 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_encoder.h>
MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(x) (x)->name
-#include <linux/video_encoder.h>
static int debug = 0;
module_param(debug, int, 0);
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index 05c7820fe53e..0d0c554bfdf7 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -34,23 +34,23 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_encoder.h>
MODULE_DESCRIPTION("Analog Devices ADV7175 video encoder driver");
MODULE_AUTHOR("Dave Perks");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(s) (s)->name
-#include <linux/video_encoder.h>
static int debug = 0;
module_param(debug, int, 0);
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index 59a43603b5cb..12d1b9248be5 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -38,23 +38,24 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_decoder.h>
+
MODULE_DESCRIPTION("Brooktree-819 video decoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(s) (s)->name
-#include <linux/video_decoder.h>
static int debug = 0;
module_param(debug, int, 0);
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index 853b1a3d6a1d..e1028a76c042 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -38,23 +38,23 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/video_encoder.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
MODULE_DESCRIPTION("Brooktree-856A video encoder driver");
MODULE_AUTHOR("Mike Bernson & Dave Perks");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(s) (s)->name
-#include <linux/video_encoder.h>
static int debug = 0;
module_param(debug, int, 0);
diff --git a/drivers/media/video/bt866.c b/drivers/media/video/bt866.c
index 2e4cf1efdd21..b767b098d14b 100644
--- a/drivers/media/video/bt866.c
+++ b/drivers/media/video/bt866.c
@@ -257,7 +257,7 @@ static int bt866_write(struct bt866 *encoder,
printk(KERN_WARNING "%s: I/O error #%d "
"(write 0x%02x/0x%02x)\n",
encoder->i2c->name, err, encoder->addr, subaddr);
- schedule_timeout_interruptible(HZ/10);
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
}
if (err == 3) {
printk(KERN_WARNING "%s: giving up\n",
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 6b31e50fb951..387cb2122d4f 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -178,8 +178,8 @@ static struct CARD {
/* this seems to happen as well ... */
{ 0xff1211bd, BTTV_BOARD_PINNACLE, "Pinnacle PCTV" },
- { 0x3000121a, BTTV_BOARD_VOODOOTV_FM, "3Dfx VoodooTV FM/ VoodooTV 200" },
- { 0x263710b4, BTTV_BOARD_VOODOOTV_FM, "3Dfx VoodooTV FM/ VoodooTV 200" },
+ { 0x3000121a, BTTV_BOARD_VOODOOTV_200, "3Dfx VoodooTV 200" },
+ { 0x263710b4, BTTV_BOARD_VOODOOTV_FM, "3Dfx VoodooTV FM" },
{ 0x3060121a, BTTV_BOARD_STB2, "3Dfx VoodooTV 100/ STB OEM" },
{ 0x3000144f, BTTV_BOARD_MAGICTVIEW063, "(Askey Magic/others) TView99 CPH06x" },
@@ -313,6 +313,7 @@ static struct CARD {
{ 0xdb1118ac, BTTV_BOARD_DVICO_DVBT_LITE, "Ultraview DVB-T Lite" },
{ 0xd50018ac, BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE, "DViCO FusionHDTV 5 Lite" },
{ 0x00261822, BTTV_BOARD_TWINHAN_DST, "DNTV Live! Mini "},
+ { 0xd200dbc0, BTTV_BOARD_DVICO_FUSIONHDTV_2, "DViCO FusionHDTV 2" },
{ 0, -1, NULL }
};
@@ -329,7 +330,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.muxsel = { 2, 3, 1, 0 },
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -344,7 +345,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -359,7 +360,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -387,13 +388,13 @@ struct tvcard bttv_tvcards[] = {
.name = "Intel Create and Share PCI/ Smart Video Recorder III",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 2,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 0 },
.needs_tvaudio = 0,
- .tuner_type = 4,
+ .tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -408,7 +409,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 1, 0, 1 },
.gpiomute = 3,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -423,7 +424,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x0c, 0x04, 0x08, 0x04 },
/* 0x04 for some cards ?? */
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_hook = avermedia_tvphone_audio,
@@ -433,13 +434,13 @@ struct tvcard bttv_tvcards[] = {
.name = "MATRIX-Vision MV-Delta",
.video_inputs = 5,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 3,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 0, 0 },
.gpiomux = { 0 },
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -457,7 +458,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0xc00,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -488,7 +489,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 4,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -503,7 +504,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -519,7 +520,7 @@ struct tvcard bttv_tvcards[] = {
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 13, 14, 11, 7 },
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -553,7 +554,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 4,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -568,7 +569,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0, 1, 0 },
.gpiomute = 10,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -587,7 +588,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x002000,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
},
[BTTV_BOARD_WINVIEW_601] = {
.name = "Leadtek WinView 601",
@@ -600,7 +601,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
.gpiomute = 0xcfa007,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_hook = winview_audio,
@@ -616,7 +617,7 @@ struct tvcard bttv_tvcards[] = {
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 1, 0, 0, 0 },
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -624,13 +625,13 @@ struct tvcard bttv_tvcards[] = {
.name = "Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)",
.video_inputs = 4,
.audio_inputs = 1,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0x8dff00,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 0 },
.no_msp34xx = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -643,7 +644,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.muxsel = { 2, 3, 1, 1 },
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -674,7 +675,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0xc00,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -683,7 +684,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 3,
.audio_inputs = 1,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 7,
.muxsel = { 2, 3, -1 },
.digital_mode = DIGITAL_MODE_CAMERA,
@@ -708,7 +709,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0xc00,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_remote = 1,
@@ -740,7 +741,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -813,13 +814,13 @@ struct tvcard bttv_tvcards[] = {
.name = "Imagenation PXC200",
.video_inputs = 5,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1, /* was: 4 */
.gpiomask = 0,
.muxsel = { 2, 3, 1, 0, 0},
.gpiomux = { 0 },
.needs_tvaudio = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.muxsel_hook = PXC200_muxsel,
@@ -836,7 +837,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0x0800, 0x1000, 0x1000 },
.gpiomute = 0x1800,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -860,13 +861,13 @@ struct tvcard bttv_tvcards[] = {
.name = "Intel Create and Share PCI/ Smart Video Recorder III",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 2,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 0 },
.needs_tvaudio = 0,
- .tuner_type = 4,
+ .tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -911,7 +912,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 0,
.pll = PLL_28,
.has_radio = 1,
- .tuner_type = 5, /* default for now, gpio reads BFFF06 for Pal bg+dk */
+ .tuner_type = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_hook = winfast2000_audio,
@@ -928,7 +929,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0x800, 0x1000, 0x1000 },
.gpiomute = 0x1800,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -945,7 +946,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0x800, 0x1000, 0x1000 },
.gpiomute = 0x1800,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_radio = 1,
@@ -962,7 +963,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x29,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -978,7 +979,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x551c00,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = 1,
+ .tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_remote = 1,
@@ -995,7 +996,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 1,
.needs_tvaudio = 0,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1030,7 +1031,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 13, 4, 11, 7 },
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_radio = 1,
@@ -1048,7 +1049,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = 1,
+ .tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1063,7 +1064,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0xff9ff6, 0xff9ff6, 0xff1ff7, 0 },
.gpiomute = 0xff3ffc,
.no_msp34xx = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1074,14 +1075,14 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 2,
.audio_inputs = 1,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 3,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 1, 1, 0, 2 },
.gpiomute = 3,
.no_msp34xx = 1,
.pll = PLL_NONE,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1089,14 +1090,14 @@ struct tvcard bttv_tvcards[] = {
.name = "MATRIX-Vision MV-Delta 2",
.video_inputs = 5,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 3,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 0, 0 },
.gpiomux = { 0 },
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1112,7 +1113,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0xbcb03f,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = 21,
+ .tuner_type = TUNER_TEMIC_4039FR5_NTSC,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1129,7 +1130,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_35,
- .tuner_type = 1,
+ .tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_radio = 1,
@@ -1148,7 +1149,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 1,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1206,7 +1207,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
.pll = PLL_28,
- .tuner_type = -1 /* TUNER_ALPS_TMDH2_NTSC */,
+ .tuner_type = UNSET /* TUNER_ALPS_TMDH2_NTSC */,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1234,7 +1235,7 @@ struct tvcard bttv_tvcards[] = {
1= FM stereo Radio from Tuner */
.needs_tvaudio = 0,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1277,7 +1278,7 @@ struct tvcard bttv_tvcards[] = {
0x0080: Tuner A2 SAP (second audio program = Zweikanalton)
0x0880: Tuner A2 stereo */
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1313,7 +1314,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0x800, 0x1000, 0x1000 },
.gpiomute = 0x1800,
.pll = PLL_28,
- .tuner_type = 5,
+ .tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1324,7 +1325,7 @@ struct tvcard bttv_tvcards[] = {
.name = "GrandTec 'Grand Video Capture' (Bt848)",
.video_inputs = 2,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.gpiomask = 0,
.muxsel = { 3, 1 },
@@ -1332,7 +1333,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_35,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1365,7 +1366,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 1,
.pll = PLL_28,
- .tuner_type = 0,
+ .tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1377,7 +1378,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 2,
.audio_inputs = 2,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 11,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 2, 0, 0, 1 },
@@ -1392,7 +1393,7 @@ struct tvcard bttv_tvcards[] = {
.name = "AG Electronics GMV1",
.video_inputs = 2,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.gpiomask = 0xF,
.muxsel = { 2, 2 },
@@ -1400,7 +1401,7 @@ struct tvcard bttv_tvcards[] = {
.no_msp34xx = 1,
.needs_tvaudio = 0,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1447,7 +1448,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 2,
.audio_inputs = 1,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 1,
.muxsel = { 2, 3, 0, 1 },
.gpiomux = { 0, 0, 1, 0 },
@@ -1476,7 +1477,7 @@ struct tvcard bttv_tvcards[] = {
.no_tda9875 = 1,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = 5,
+ .tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1517,13 +1518,35 @@ struct tvcard bttv_tvcards[] = {
/* ---- card 0x44 ---------------------------------- */
[BTTV_BOARD_VOODOOTV_FM] = {
- .name = "3Dfx VoodooTV FM (Euro), VoodooTV 200 (USA)",
+ .name = "3Dfx VoodooTV FM (Euro)",
+ /* try "insmod msp3400 simple=0" if you have
+ * sound problems with this card. */
+ .video_inputs = 4,
+ .audio_inputs = 1,
+ .tuner = 0,
+ .svhs = UNSET,
+ .gpiomask = 0x4f8a00,
+ /* 0x100000: 1=MSP enabled (0=disable again)
+ * 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */
+ .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff },
+ .gpiomute = 0x947fff,
+ /* tvtuner, radio, external,internal, mute, stereo
+ * tuner, Composit, SVid, Composit-on-Svid-adapter */
+ .muxsel = { 2, 3 ,0 ,1 },
+ .tuner_type = TUNER_MT2032,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .pll = PLL_28,
+ .has_radio = 1,
+ },
+ [BTTV_BOARD_VOODOOTV_200] = {
+ .name = "VoodooTV 200 (USA)",
/* try "insmod msp3400 simple=0" if you have
* sound problems with this card. */
.video_inputs = 4,
.audio_inputs = 1,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 0x4f8a00,
/* 0x100000: 1=MSP enabled (0=disable again)
* 0x010000: Connected to "S0" on tda9880 (0=Pal/BG, 1=NTSC) */
@@ -1543,8 +1566,8 @@ struct tvcard bttv_tvcards[] = {
.name = "Active Imaging AIMMS",
.video_inputs = 1,
.audio_inputs = 0,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1564,7 +1587,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 13,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = 25,
+ .tuner_type = TUNER_LG_PAL_I_FM,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_remote = 1,
@@ -1580,7 +1603,7 @@ struct tvcard bttv_tvcards[] = {
.name = "Lifeview FlyVideo 98EZ (capture only) LR51",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 2,
.muxsel = { 2, 3, 1, 1 }, /* AV1, AV2, SVHS, CVid adapter on SVHS */
.pll = PLL_28,
@@ -1606,7 +1629,7 @@ struct tvcard bttv_tvcards[] = {
.no_msp34xx = 1,
.no_tda9875 = 1,
.pll = PLL_28,
- .tuner_type = 5,
+ .tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_hook = pvbt878p9b_audio, /* Note: not all cards have stereo */
@@ -1626,13 +1649,13 @@ struct tvcard bttv_tvcards[] = {
.name = "Sensoray 311",
.video_inputs = 5,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 4,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 0, 0 },
.gpiomux = { 0 },
.needs_tvaudio = 0,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1641,15 +1664,15 @@ struct tvcard bttv_tvcards[] = {
.name = "RemoteVision MX (RV605)",
.video_inputs = 16,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0x00,
.gpiomask2 = 0x07ff,
.muxsel = { 0x33, 0x13, 0x23, 0x43, 0xf3, 0x73, 0xe3, 0x03,
0xd3, 0xb3, 0xc3, 0x63, 0x93, 0x53, 0x83, 0xa3 },
.no_msp34xx = 1,
.no_tda9875 = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.muxsel_hook = rv605_muxsel,
@@ -1693,15 +1716,15 @@ struct tvcard bttv_tvcards[] = {
.name = "GrandTec Multi Capture Card (Bt878)",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 0 },
.gpiomux = { 0 },
.needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1724,7 +1747,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = 5,
+ .tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
/* Samsung TCPA9095PC27A (BG+DK), philips compatible, w/FM, stereo and
@@ -1744,10 +1767,10 @@ struct tvcard bttv_tvcards[] = {
/* Arthur Tetzlaff-Deas, DSP Design Ltd <software@dspdesign.com> */
.name = "DSP Design TCVIDEO",
.video_inputs = 4,
- .svhs = -1,
+ .svhs = UNSET,
.muxsel = { 2, 3, 1, 0 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -1762,7 +1785,7 @@ struct tvcard bttv_tvcards[] = {
.muxsel = { 2, 0, 1, 1 },
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1791,11 +1814,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 100/150 (878)", /* 0x1(2|3)-45C6-C1 */
.video_inputs = 4, /* id-inputs-clock */
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 3,
.muxsel = { 3, 2, 0, 1 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1806,11 +1829,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 100/150 (848)", /* 0x04-54C0-C1 & older boards */
.video_inputs = 3,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 2,
.muxsel = { 2, 3, 1 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1823,11 +1846,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 101 (848)", /* 0x05-40C0-C1 */
.video_inputs = 2,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 3, 1 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1838,11 +1861,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 101/151", /* 0x1(4|5)-0004-C4 */
.video_inputs = 1,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.muxsel = { 0 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1853,11 +1876,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 101/151 w/ svid", /* 0x(16|17|20)-00C4-C1 */
.video_inputs = 2,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 0, 1 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1868,8 +1891,8 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 200/201/250/251", /* 0x1(8|9|E|F)-0004-C4 */
.video_inputs = 1,
.audio_inputs = 1,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.muxsel = { 0 },
.pll = PLL_28,
.tuner_type = UNSET,
@@ -1885,7 +1908,7 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 200/250", /* 0x1(A|B)-00C4-C1 */
.video_inputs = 2,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 0, 1 },
.pll = PLL_28,
@@ -1900,7 +1923,7 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 210/220/230", /* 0x1(A|B)-04C0-C1 */
.video_inputs = 2,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 2, 3 },
.pll = PLL_28,
@@ -1915,11 +1938,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 500", /* 500 */
.video_inputs = 2,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 2, 3 },
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1930,9 +1953,9 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 540", /* 540 */
.video_inputs = 4,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -1945,7 +1968,7 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 2000", /* 2000 */
.video_inputs = 2,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 2, 3 },
.pll = PLL_28,
@@ -1961,11 +1984,11 @@ struct tvcard bttv_tvcards[] = {
.name = "IDS Eagle",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 0,
.muxsel = { 0, 1, 2, 3 },
.muxsel_hook = eagle_muxsel,
@@ -1978,8 +2001,8 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 2,
.audio_inputs = 0,
.svhs = 1,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.no_msp34xx = 1,
@@ -2020,13 +2043,13 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 3,
.audio_inputs = 1,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 7,
.muxsel = { 2, 3, 1, 1},
.gpiomux = { 0, 1, 2, 3},
.gpiomute = 4,
.needs_tvaudio = 1,
- .tuner_type = 5,
+ .tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -2035,7 +2058,7 @@ struct tvcard bttv_tvcards[] = {
.name = "Euresys Picolo",
.video_inputs = 3,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 2,
.gpiomask = 0,
.no_msp34xx = 1,
@@ -2052,8 +2075,8 @@ struct tvcard bttv_tvcards[] = {
.name = "ProVideo PV150", /* 0x4f */
.video_inputs = 2,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0,
.muxsel = { 2, 3 },
.gpiomux = { 0 },
@@ -2080,7 +2103,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = 2,
+ .tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_hook = adtvk503_audio,
@@ -2098,7 +2121,7 @@ struct tvcard bttv_tvcards[] = {
.needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = 5,
+ .tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
/* Notes:
@@ -2121,7 +2144,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.no_tda9875 = 1,
.no_tda7432 = 1,
- .tuner_type = 1,
+ .tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_radio = 1,
@@ -2138,11 +2161,11 @@ struct tvcard bttv_tvcards[] = {
.name = "IVC-200",
.video_inputs = 1,
.audio_inputs = 0,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 0xdf,
.muxsel = { 2 },
.pll = PLL_28,
@@ -2151,9 +2174,9 @@ struct tvcard bttv_tvcards[] = {
.name = "Grand X-Guard / Trust 814PCI",
.video_inputs = 16,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
- .tuner_type = 4,
+ .tuner = UNSET,
+ .svhs = UNSET,
+ .tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.gpiomask2 = 0xff,
@@ -2169,14 +2192,14 @@ struct tvcard bttv_tvcards[] = {
[BTTV_BOARD_NEBULA_DIGITV] = {
.name = "Nebula Electronics DigiTV",
.video_inputs = 1,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.muxsel = { 2, 3, 1, 0 },
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_dvb = 1,
@@ -2189,15 +2212,15 @@ struct tvcard bttv_tvcards[] = {
.name = "ProVideo PV143",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0,
.muxsel = { 2, 3, 1, 0 },
.gpiomux = { 0 },
.needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2206,14 +2229,14 @@ struct tvcard bttv_tvcards[] = {
.name = "PHYTEC VD-009-X1 MiniDIN (bt878)",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1, /* card has no tuner */
+ .tuner = UNSET, /* card has no tuner */
.svhs = 3,
.gpiomask = 0x00,
.muxsel = { 2, 3, 1, 0 },
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2221,14 +2244,14 @@ struct tvcard bttv_tvcards[] = {
.name = "PHYTEC VD-009-X1 Combi (bt878)",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1, /* card has no tuner */
+ .tuner = UNSET, /* card has no tuner */
.svhs = 3,
.gpiomask = 0x00,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2238,7 +2261,7 @@ struct tvcard bttv_tvcards[] = {
.name = "PHYTEC VD-009 MiniDIN (bt878)",
.video_inputs = 10,
.audio_inputs = 0,
- .tuner = -1, /* card has no tuner */
+ .tuner = UNSET, /* card has no tuner */
.svhs = 9,
.gpiomask = 0x00,
.gpiomask2 = 0x03, /* gpiomask2 defines the bits used to switch audio
@@ -2248,7 +2271,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2256,7 +2279,7 @@ struct tvcard bttv_tvcards[] = {
.name = "PHYTEC VD-009 Combi (bt878)",
.video_inputs = 10,
.audio_inputs = 0,
- .tuner = -1, /* card has no tuner */
+ .tuner = UNSET, /* card has no tuner */
.svhs = 9,
.gpiomask = 0x00,
.gpiomask2 = 0x03, /* gpiomask2 defines the bits used to switch audio
@@ -2266,7 +2289,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2274,11 +2297,11 @@ struct tvcard bttv_tvcards[] = {
.name = "IVC-100",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 0xdf,
.muxsel = { 2, 3, 1, 0 },
.pll = PLL_28,
@@ -2288,11 +2311,11 @@ struct tvcard bttv_tvcards[] = {
.name = "IVC-120G",
.video_inputs = 16,
.audio_inputs = 0, /* card has no audio */
- .tuner = -1, /* card has no tuner */
- .tuner_type = -1,
+ .tuner = UNSET, /* card has no tuner */
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .svhs = -1, /* card has no svhs */
+ .svhs = UNSET, /* card has no svhs */
.needs_tvaudio = 0,
.no_msp34xx = 1,
.no_tda9875 = 1,
@@ -2333,7 +2356,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 3,
.audio_inputs = 0,
.svhs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.muxsel = { 3, 1, 1, 3 }, /* Vid In, SVid In, Vid over SVid in connector */
.no_msp34xx = 1,
.no_tda9875 = 1,
@@ -2364,9 +2387,9 @@ struct tvcard bttv_tvcards[] = {
.name = "SIMUS GVC1100",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -2395,14 +2418,14 @@ struct tvcard bttv_tvcards[] = {
.name = "LMLBT4",
.video_inputs = 4, /* IN1,IN2,IN3,IN4 */
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.muxsel = { 2, 3, 1, 0 },
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
.needs_tvaudio = 0,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2452,8 +2475,8 @@ struct tvcard bttv_tvcards[] = {
.name = "Euresys Picolo Tetra",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0,
.gpiomask2 = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
.no_msp34xx = 1,
@@ -2464,7 +2487,7 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.needs_tvaudio = 0,
.muxsel_hook = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2490,7 +2513,7 @@ struct tvcard bttv_tvcards[] = {
.name = "AVerMedia AVerTV DVB-T 771",
.video_inputs = 2,
.svhs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2509,14 +2532,14 @@ struct tvcard bttv_tvcards[] = {
/* Based on the Nebula card data - added remote and new card number - BTTV_BOARD_AVDVBT_761, see also ir-kbd-gpio.c */
.name = "AverMedia AverTV DVB-T 761",
.video_inputs = 2,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 3, 1, 2, 0 }, /* Comp0, S-Video, ?, ? */
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.has_dvb = 1,
@@ -2528,8 +2551,8 @@ struct tvcard bttv_tvcards[] = {
.name = "MATRIX Vision Sigma-SQ",
.video_inputs = 16,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0x0,
.muxsel = { 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3 },
@@ -2537,7 +2560,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 0 },
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2546,15 +2569,15 @@ struct tvcard bttv_tvcards[] = {
.name = "MATRIX Vision Sigma-SLC",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0x0,
.muxsel = { 2, 2, 2, 2 },
.muxsel_hook = sigmaSLC_muxsel,
.gpiomux = { 0 },
.no_msp34xx = 1,
.pll = PLL_28,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2566,7 +2589,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 2,
.audio_inputs = 1,
.tuner = 0,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 0xFF,
.muxsel = { 2, 3, 1, 1 },
.gpiomux = { 2, 0, 0, 0 },
@@ -2584,14 +2607,14 @@ struct tvcard bttv_tvcards[] = {
[BTTV_BOARD_DVICO_DVBT_LITE] = {
/* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
.name = "DViCO FusionHDTV DVB-T Lite",
- .tuner = -1,
+ .tuner = UNSET,
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
.pll = PLL_28,
.no_video = 1,
.has_dvb = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2634,14 +2657,14 @@ struct tvcard bttv_tvcards[] = {
.name = "Tibet Systems 'Progress DVR' CS16",
.video_inputs = 16,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.muxsel = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
.pll = PLL_28,
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.muxsel_hook = tibetCS16_muxsel,
@@ -2661,11 +2684,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Kodicom 4400R (master)",
.video_inputs = 16,
.audio_inputs = 0,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .svhs = -1,
+ .svhs = UNSET,
/* GPIO bits 0-9 used for analog switch:
* 00 - 03: camera selector
* 04 - 06: channel (controller) selector
@@ -2693,11 +2716,11 @@ struct tvcard bttv_tvcards[] = {
.name = "Kodicom 4400R (slave)",
.video_inputs = 16,
.audio_inputs = 0,
- .tuner = -1,
- .tuner_type = -1,
+ .tuner = UNSET,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .svhs = -1,
+ .svhs = UNSET,
.gpiomask = 0x010000,
.no_gpioirq = 1,
.muxsel = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
@@ -2717,7 +2740,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.muxsel = { 2, 3, 1, 0 },
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -2824,7 +2847,7 @@ struct tvcard bttv_tvcards[] = {
.name = "Osprey 440",
.video_inputs = 1,
.audio_inputs = 1,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 2 },
.pll = PLL_28,
@@ -2848,7 +2871,7 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 1,
.needs_tvaudio = 1,
.pll = PLL_28,
- .tuner_type = 2,
+ .tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2875,14 +2898,14 @@ struct tvcard bttv_tvcards[] = {
.name = "Hauppauge ImpactVCB (bt878)",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.gpiomask = 0x0f, /* old: 7 */
.muxsel = { 0, 1, 3, 2 }, /* Composite 0-3 */
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2914,10 +2937,10 @@ struct tvcard bttv_tvcards[] = {
.name = "SSAI Security Video Interface",
.video_inputs = 4,
.audio_inputs = 0,
- .tuner = -1,
- .svhs = -1,
+ .tuner = UNSET,
+ .svhs = UNSET,
.muxsel = { 0, 1, 2, 3 },
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
@@ -2925,13 +2948,31 @@ struct tvcard bttv_tvcards[] = {
.name = "SSAI Ultrasound Video Interface",
.video_inputs = 2,
.audio_inputs = 0,
- .tuner = -1,
+ .tuner = UNSET,
.svhs = 1,
.muxsel = { 2, 0, 1, 3 },
- .tuner_type = -1,
+ .tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
},
+ /* ---- card 0x94---------------------------------- */
+ [BTTV_BOARD_DVICO_FUSIONHDTV_2] = {
+ .name = "DViCO FusionHDTV 2",
+ .tuner = 0,
+ .tuner_type = TUNER_PHILIPS_ATSC, /* FCV1236D */
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .video_inputs = 3,
+ .audio_inputs = 1,
+ .svhs = 2,
+ .muxsel = { 2, 3, 1 },
+ .gpiomask = 0x00e00007,
+ .gpiomux = { 0x00400005, 0, 0x00000001, 0 },
+ .gpiomute = 0x00c00007,
+ .no_msp34xx = 1,
+ .no_tda9875 = 1,
+ .no_tda7432 = 1,
+ },
};
static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
@@ -3040,7 +3081,7 @@ static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256])
static void flyvideo_gpio(struct bttv *btv)
{
int gpio,has_remote,has_radio,is_capture_only,is_lr90,has_tda9820_tda9821;
- int tuner=-1,ttype;
+ int tuner=UNSET,ttype;
gpio_inout(0xffffff, 0);
udelay(8); /* without this we would see the 0x1800 mask */
@@ -3085,7 +3126,7 @@ static void flyvideo_gpio(struct bttv *btv)
* gpio & 0x001000 output bit for audio routing */
if(is_capture_only)
- tuner=4; /* No tuner present */
+ tuner = TUNER_ABSENT; /* No tuner present */
printk(KERN_INFO "bttv%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n",
btv->c.nr, has_radio? "yes":"no ", has_remote? "yes":"no ", tuner, gpio);
@@ -3093,7 +3134,7 @@ static void flyvideo_gpio(struct bttv *btv)
btv->c.nr, is_lr90?"yes":"no ", has_tda9820_tda9821?"yes":"no ",
is_capture_only?"yes":"no ");
- if(tuner!= -1) /* only set if known tuner autodetected, else let insmod option through */
+ if (tuner != UNSET) /* only set if known tuner autodetected, else let insmod option through */
btv->tuner_type = tuner;
btv->has_radio = has_radio;
@@ -3302,6 +3343,7 @@ void __devinit bttv_init_card1(struct bttv *btv)
case BTTV_BOARD_HAUPPAUGE878:
boot_msp34xx(btv,5);
break;
+ case BTTV_BOARD_VOODOOTV_200:
case BTTV_BOARD_VOODOOTV_FM:
boot_msp34xx(btv,20);
break;
@@ -3328,10 +3370,9 @@ void __devinit bttv_init_card1(struct bttv *btv)
/* initialization part two -- after registering i2c bus */
void __devinit bttv_init_card2(struct bttv *btv)
{
- int tda9887;
int addr=ADDR_UNSET;
- btv->tuner_type = -1;
+ btv->tuner_type = UNSET;
if (BTTV_BOARD_UNKNOWN == btv->c.type) {
bttv_readee(btv,eeprom_data,0xa0);
@@ -3479,7 +3520,15 @@ void __devinit bttv_init_card2(struct bttv *btv)
btv->tuner_type = bttv_tvcards[btv->c.type].tuner_type;
if (UNSET != tuner[btv->c.nr])
btv->tuner_type = tuner[btv->c.nr];
- printk("bttv%d: using tuner=%d\n",btv->c.nr,btv->tuner_type);
+
+ if (btv->tuner_type == TUNER_ABSENT ||
+ bttv_tvcards[btv->c.type].tuner == UNSET)
+ printk(KERN_INFO "bttv%d: tuner absent\n", btv->c.nr);
+ else if(btv->tuner_type == UNSET)
+ printk(KERN_WARNING "bttv%d: tuner type unset\n", btv->c.nr);
+ else
+ printk(KERN_INFO "bttv%d: tuner type=%d\n", btv->c.nr,
+ btv->tuner_type);
if (btv->tuner_type != UNSET) {
struct tuner_setup tun_setup;
@@ -3521,6 +3570,9 @@ void __devinit bttv_init_card2(struct bttv *btv)
if (!autoload)
return;
+ if (bttv_tvcards[btv->c.type].tuner == UNSET)
+ return; /* no tuner or related drivers to load */
+
/* try to detect audio/fader chips */
if (!bttv_tvcards[btv->c.type].no_msp34xx &&
bttv_I2CRead(btv, I2C_ADDR_MSP3400, "MSP34xx") >=0)
@@ -3541,17 +3593,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
if (bttv_tvcards[btv->c.type].needs_tvaudio)
request_module("tvaudio");
- /* tuner modules */
- tda9887 = 0;
- if (btv->tda9887_conf)
- tda9887 = 1;
- if (0 == tda9887 && 0 == bttv_tvcards[btv->c.type].has_dvb &&
- bttv_I2CRead(btv, I2C_ADDR_TDA9887, "TDA9887") >=0)
- tda9887 = 1;
- /* Hybrid DVB card, DOES have a tda9887 */
- if (btv->c.type == BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE)
- tda9887 = 1;
- if (btv->tuner_type != UNSET)
+ if (btv->tuner_type != UNSET && btv->tuner_type != TUNER_ABSENT)
request_module("tuner");
}
@@ -3865,11 +3907,15 @@ void bttv_tda9880_setnorm(struct bttv *btv, int norm)
if(norm==VIDEO_MODE_NTSC) {
bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomux[TVAUDIO_INPUT_TUNER]=0x957fff;
bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomute=0x957fff;
+ bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomux[TVAUDIO_INPUT_TUNER]=0x957fff;
+ bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomute=0x957fff;
dprintk("bttv_tda9880_setnorm to NTSC\n");
}
else {
bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomux[TVAUDIO_INPUT_TUNER]=0x947fff;
bttv_tvcards[BTTV_BOARD_VOODOOTV_FM].gpiomute=0x947fff;
+ bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomux[TVAUDIO_INPUT_TUNER]=0x947fff;
+ bttv_tvcards[BTTV_BOARD_VOODOOTV_200].gpiomute=0x947fff;
dprintk("bttv_tda9880_setnorm to PAL\n");
}
/* set GPIO according */
@@ -4163,7 +4209,7 @@ static int tea5757_read(struct bttv *btv)
bus_low(btv,btv->mbox_clk);
udelay(10);
- timeout= jiffies + HZ;
+ timeout= jiffies + msecs_to_jiffies(1000);
/* wait for DATA line to go low; error if it doesn't */
while (bus_in(btv,btv->mbox_data) && time_before(jiffies, timeout))
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index b1fedb0f6431..cb555f2c40f9 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1218,7 +1218,14 @@ audio_mux(struct bttv *btv, int input, int mute)
break;
case TVAUDIO_INPUT_TUNER:
default:
- route.input = MSP_INPUT_DEFAULT;
+ /* This is the only card that uses TUNER2, and afaik,
+ is the only difference between the VOODOOTV_FM
+ and VOODOOTV_200 */
+ if (btv->c.type == BTTV_BOARD_VOODOOTV_200)
+ route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER2, \
+ MSP_DSP_IN_TUNER, MSP_DSP_IN_TUNER);
+ else
+ route.input = MSP_INPUT_DEFAULT;
break;
}
route.output = MSP_OUTPUT_DEFAULT;
@@ -1253,7 +1260,7 @@ i2c_vidiocschan(struct bttv *btv)
v4l2_std_id std = bttv_tvnorms[btv->tvnorm].v4l2_id;
bttv_call_i2c_clients(btv, VIDIOC_S_STD, &std);
- if (btv->c.type == BTTV_BOARD_VOODOOTV_FM)
+ if (btv->c.type == BTTV_BOARD_VOODOOTV_FM || btv->c.type == BTTV_BOARD_VOODOOTV_200)
bttv_tda9880_setnorm(btv,btv->tvnorm);
}
@@ -1323,6 +1330,7 @@ set_tvnorm(struct bttv *btv, unsigned int norm)
switch (btv->c.type) {
case BTTV_BOARD_VOODOOTV_FM:
+ case BTTV_BOARD_VOODOOTV_200:
bttv_tda9880_setnorm(btv,norm);
break;
}
@@ -2251,6 +2259,24 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
printk(KERN_INFO "bttv%d: ================== END STATUS CARD #%d ==================\n", btv->c.nr, btv->c.nr);
return 0;
}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_DBG_G_REGISTER:
+ case VIDIOC_DBG_S_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
+ return -EINVAL;
+ /* bt848 has a 12-bit register space */
+ reg->reg &= 0xfff;
+ if (cmd == VIDIOC_DBG_G_REGISTER)
+ reg->val = btread(reg->reg);
+ else
+ btwrite(reg->val, reg->reg);
+ return 0;
+ }
+#endif
default:
return -ENOIOCTLCMD;
@@ -3561,6 +3587,8 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
case VIDIOC_G_FREQUENCY:
case VIDIOC_S_FREQUENCY:
case VIDIOC_LOG_STATUS:
+ case VIDIOC_DBG_G_REGISTER:
+ case VIDIOC_DBG_S_REGISTER:
return bttv_common_ioctls(btv,cmd,arg);
default:
@@ -3943,6 +3971,8 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
case VIDIOCGAUDIO:
case VIDIOCSAUDIO:
case VIDIOC_LOG_STATUS:
+ case VIDIOC_DBG_G_REGISTER:
+ case VIDIOC_DBG_S_REGISTER:
return bttv_common_ioctls(btv,cmd,arg);
default:
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 6f74c8042bc3..4201552bc3c0 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -153,7 +153,7 @@ static void bttv_ir_start(struct bttv *btv, struct card_ir *ir)
{
if (ir->polling) {
setup_timer(&ir->timer, bttv_input_timer, (unsigned long)btv);
- ir->timer.expires = jiffies + HZ;
+ ir->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&ir->timer);
} else if (ir->rc5_gpio) {
/* set timer_end for code completion */
@@ -313,7 +313,7 @@ int bttv_input_init(struct bttv *btv)
input_dev->id.vendor = btv->c.pci->vendor;
input_dev->id.product = btv->c.pci->device;
}
- input_dev->cdev.dev = &btv->c.pci->dev;
+ input_dev->dev.parent = &btv->c.pci->dev;
btv->remote = ir;
bttv_ir_start(btv, ir);
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index f821ba69db99..dcc847dc2486 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -170,6 +170,8 @@
#define BTTV_BOARD_MACHTV_MAGICTV 0x90
#define BTTV_BOARD_SSAI_SECURITY 0x91
#define BTTV_BOARD_SSAI_ULTRASOUND 0x92
+#define BTTV_BOARD_VOODOOTV_200 0x93
+#define BTTV_BOARD_DVICO_FUSIONHDTV_2 0x94
/* more card-specific defines */
#define PT2254_L_CHANNEL 0x10
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 8f44f02029be..5b25faca1504 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -33,12 +33,12 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/videodev.h>
-#include <media/v4l2-common.h>
#include <linux/pci.h>
#include <linux/input.h>
#include <linux/mutex.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
+#include <media/v4l2-common.h>
#include <linux/device.h>
#include <media/video-buf.h>
@@ -284,8 +284,8 @@ extern int fini_bttv_i2c(struct bttv *btv);
#define d2printk if (bttv_debug >= 2) printk
#define BTTV_MAX_FBUF 0x208000
-#define BTTV_TIMEOUT (HZ/2) /* 0.5 seconds */
-#define BTTV_FREE_IDLE (HZ) /* one second */
+#define BTTV_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
+#define BTTV_FREE_IDLE msecs_to_jiffies(1000) /* one second */
struct bttv_pll_info {
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 925ff17efbbc..f76c6a6c3766 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -95,7 +95,7 @@ static unsigned int qcam_await_ready1(struct qcam_device *qcam,
unsigned long oldjiffies = jiffies;
unsigned int i;
- for (oldjiffies = jiffies; (jiffies - oldjiffies) < (HZ/25); )
+ for (oldjiffies = jiffies; (jiffies - oldjiffies) < msecs_to_jiffies(40); )
if (qcam_ready1(qcam) == value)
return 0;
@@ -120,7 +120,7 @@ static unsigned int qcam_await_ready2(struct qcam_device *qcam, int value)
unsigned long oldjiffies = jiffies;
unsigned int i;
- for (oldjiffies = jiffies; (jiffies - oldjiffies) < (HZ/25); )
+ for (oldjiffies = jiffies; (jiffies - oldjiffies) < msecs_to_jiffies(40); )
if (qcam_ready2(qcam) == value)
return 0;
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index fd771c7a2fe2..a76bd786cf13 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -663,15 +663,13 @@ int cpia2_reset_camera(struct camera_data *cam)
cpia2_send_command(cam, &cmd);
}
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(100 * HZ / 1000); /* wait for 100 msecs */
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
if (cam->params.pnp_id.device_type == DEVICE_STV_672)
retval = apply_vp_patch(cam);
/* wait for vp to go to sleep */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(100 * HZ / 1000); /* wait for 100 msecs */
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
/***
* If this is a 676, apply VP5 fixes before we start streaming
@@ -720,8 +718,7 @@ int cpia2_reset_camera(struct camera_data *cam)
set_default_user_mode(cam);
/* Give VP time to wake up */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(100 * HZ / 1000); /* wait for 100 msecs */
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
set_all_properties(cam);
@@ -2227,15 +2224,13 @@ struct camera_data *cpia2_init_camera_struct(void)
{
struct camera_data *cam;
- cam = kmalloc(sizeof(*cam), GFP_KERNEL);
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam) {
ERR("couldn't kmalloc cpia2 struct\n");
return NULL;
}
- /* Default everything to 0 */
- memset(cam, 0, sizeof(struct camera_data));
cam->present = 1;
mutex_init(&cam->busy_lock);
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 1bda7ad9de11..92778cd1d735 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -105,7 +105,7 @@ static struct control_menu_info framerate_controls[] =
{ CPIA2_VP_FRAMERATE_25, "25 fps" },
{ CPIA2_VP_FRAMERATE_30, "30 fps" },
};
-#define NUM_FRAMERATE_CONTROLS (sizeof(framerate_controls)/sizeof(framerate_controls[0]))
+#define NUM_FRAMERATE_CONTROLS (ARRAY_SIZE(framerate_controls))
static struct control_menu_info flicker_controls[] =
{
@@ -113,7 +113,7 @@ static struct control_menu_info flicker_controls[] =
{ FLICKER_50, "50 Hz" },
{ FLICKER_60, "60 Hz" },
};
-#define NUM_FLICKER_CONTROLS (sizeof(flicker_controls)/sizeof(flicker_controls[0]))
+#define NUM_FLICKER_CONTROLS (ARRAY_SIZE(flicker_controls))
static struct control_menu_info lights_controls[] =
{
@@ -122,7 +122,7 @@ static struct control_menu_info lights_controls[] =
{ 128, "Bottom" },
{ 192, "Both" },
};
-#define NUM_LIGHTS_CONTROLS (sizeof(lights_controls)/sizeof(lights_controls[0]))
+#define NUM_LIGHTS_CONTROLS (ARRAY_SIZE(lights_controls))
#define GPIO_LIGHTS_MASK 192
static struct v4l2_queryctrl controls[] = {
@@ -235,7 +235,7 @@ static struct v4l2_queryctrl controls[] = {
.default_value = 0,
},
};
-#define NUM_CONTROLS (sizeof(controls)/sizeof(controls[0]))
+#define NUM_CONTROLS (ARRAY_SIZE(controls))
/******************************************************************************
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 0f9d96963618..f750a543c961 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -47,7 +47,7 @@ config VIDEO_CX88_DVB
tristate "DVB/ATSC Support for cx2388x based TV cards"
depends on VIDEO_CX88 && DVB_CORE
select VIDEO_BUF_DVB
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_MT352 if !DVB_FE_CUSTOMISE
select DVB_ZL10353 if !DVB_FE_CUSTOMISE
select DVB_OR51132 if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index a80b1cb1abe8..f2fcdb92ecce 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -56,8 +56,7 @@ MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
/* ------------------------------------------------------------------ */
-#define OLD_BLACKBIRD_FIRM_IMAGE_SIZE 262144
-#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
+#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
/* defines below are from ivtv-driver.h */
@@ -405,7 +404,7 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
u32 value;
int i;
- for (i = 0; i < dev->fw_size; i++) {
+ for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) {
memory_read(dev->core, i, &value);
if (value == signature[signaturecnt])
signaturecnt++;
@@ -453,15 +452,12 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
return -1;
}
- if ((firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) &&
- (firmware->size != OLD_BLACKBIRD_FIRM_IMAGE_SIZE)) {
- dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d or %d)\n",
- firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE,
- OLD_BLACKBIRD_FIRM_IMAGE_SIZE);
+ if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
+ dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
+ firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
- dev->fw_size = firmware->size;
if (0 != memcmp(firmware->data, magic, 8)) {
dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index e61102dc8ad7..6a136ddbccf8 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1335,6 +1335,26 @@ struct cx88_board cx88_boards[] = {
/* fixme: Add radio support */
.mpeg = CX88_MPEG_DVB | CX88_MPEG_BLACKBIRD,
},
+ [CX88_BOARD_ADSTECH_PTV_390] = {
+ .name = "ADS Tech Instant Video PCI",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_DEBUG,
+ .vmux = 3,
+ .gpio0 = 0x04ff,
+ },{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x07fa,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x07fa,
+ }},
+ },
};
const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards);
@@ -1641,6 +1661,10 @@ struct cx88_subid cx88_subids[] = {
.subvendor = 0x1421,
.subdevice = 0x0341, /* ADS Tech InstantTV DVB-S */
.card = CX88_BOARD_KWORLD_DVBS_100,
+ },{
+ .subvendor = 0x1421,
+ .subdevice = 0x0390,
+ .card = CX88_BOARD_ADSTECH_PTV_390,
},
};
const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids);
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index dbfe4dc9cf8c..1773b40467dc 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -35,9 +35,7 @@
#include "mt352.h"
#include "mt352_priv.h"
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
-# include "cx88-vp3054-i2c.h"
-#endif
+#include "cx88-vp3054-i2c.h"
#include "zl10353.h"
#include "cx22702.h"
#include "or51132.h"
@@ -199,7 +197,7 @@ static struct mt352_config dvico_fusionhdtv_dual = {
.demod_init = dvico_dual_demod_init,
};
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
{
static u8 clock_config [] = { 0x89, 0x38, 0x38 };
@@ -223,64 +221,6 @@ static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
return 0;
}
-static int philips_fmd1216_pll_init(struct dvb_frontend *fe)
-{
- struct cx8802_dev *dev= fe->dvb->priv;
-
- /* this message is to set up ATC and ALC */
- static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 };
- struct i2c_msg msg =
- { .addr = dev->core->pll_addr, .flags = 0,
- .buf = fmd1216_init, .len = sizeof(fmd1216_init) };
- int err;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
- if (err < 0)
- return err;
- else
- return -EREMOTEIO;
- }
-
- return 0;
-}
-
-static int dntv_live_dvbt_pro_tuner_set_params(struct dvb_frontend* fe,
- struct dvb_frontend_parameters* params)
-{
- struct cx8802_dev *dev= fe->dvb->priv;
- u8 buf[4];
- struct i2c_msg msg =
- { .addr = dev->core->pll_addr, .flags = 0,
- .buf = buf, .len = 4 };
- int err;
-
- /* Switch PLL to DVB mode */
- err = philips_fmd1216_pll_init(fe);
- if (err)
- return err;
-
- /* Tune PLL */
- dvb_pll_configure(dev->core->pll_desc, buf,
- params->frequency,
- params->u.ofdm.bandwidth);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
-
- printk(KERN_WARNING "cx88-dvb: %s error "
- "(addr %02x <- %02x, err = %i)\n",
- __FUNCTION__, dev->core->pll_addr, buf[0], err);
- if (err < 0)
- return err;
- else
- return -EREMOTEIO;
- }
-
- return 0;
-}
-
static struct mt352_config dntv_live_dvbt_pro_config = {
.demod_address = 0x0f,
.no_tuner = 1,
@@ -370,18 +310,8 @@ static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
return 0;
}
-static int nxt200x_set_pll_input(u8* buf, int input)
-{
- if (input)
- buf[3] |= 0x08;
- else
- buf[3] &= ~0x08;
- return 0;
-}
-
static struct nxt200x_config ati_hdtvwonder = {
.demod_address = 0x0a,
- .set_pll_input = nxt200x_set_pll_input,
.set_ts_params = nxt200x_set_ts_param,
};
@@ -456,7 +386,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_thomson_dtt759x);
+ DVB_PLL_THOMSON_DTT759X);
}
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
@@ -469,7 +399,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
&dev->core->i2c_adap,
- &dvb_pll_thomson_dtt7579);
+ DVB_PLL_THOMSON_DTT7579);
}
break;
case CX88_BOARD_WINFAST_DTV2000H:
@@ -482,7 +412,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- &dev->core->i2c_adap, &dvb_pll_fmd1216me);
+ &dev->core->i2c_adap, DVB_PLL_FMD1216ME);
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
@@ -491,7 +421,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
- NULL, &dvb_pll_thomson_dtt7579);
+ NULL, DVB_PLL_THOMSON_DTT7579);
break;
}
/* ZL10353 replaces MT352 on later cards */
@@ -500,7 +430,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x60,
- NULL, &dvb_pll_thomson_dtt7579);
+ NULL, DVB_PLL_THOMSON_DTT7579);
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
@@ -511,7 +441,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_thomson_dtt7579);
+ NULL, DVB_PLL_THOMSON_DTT7579);
break;
}
/* ZL10353 replaces MT352 on later cards */
@@ -520,7 +450,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_thomson_dtt7579);
+ NULL, DVB_PLL_THOMSON_DTT7579);
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
@@ -529,7 +459,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_lg_z201);
+ NULL, DVB_PLL_LG_Z201);
}
break;
case CX88_BOARD_KWORLD_DVB_T:
@@ -540,17 +470,16 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_unknown_1);
+ NULL, DVB_PLL_UNKNOWN_1);
}
break;
case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
- dev->core->pll_addr = 0x61;
- dev->core->pll_desc = &dvb_pll_fmd1216me;
+#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
dev->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
&((struct vp3054_i2c_state *)dev->card_priv)->adap);
if (dev->dvb.frontend != NULL) {
- dev->dvb.frontend->ops.tuner_ops.set_params = dntv_live_dvbt_pro_tuner_set_params;
+ dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
+ &dev->core->i2c_adap, DVB_PLL_FMD1216ME);
}
#else
printk("%s: built without vp3054 support\n", dev->core->name);
@@ -563,7 +492,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_thomson_fe6600);
+ DVB_PLL_THOMSON_FE6600);
}
break;
case CX88_BOARD_PCHDTV_HD3000:
@@ -572,7 +501,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_thomson_dtt761x);
+ DVB_PLL_THOMSON_DTT761X);
}
break;
case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
@@ -594,7 +523,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_microtune_4042);
+ DVB_PLL_MICROTUNE_4042);
}
}
break;
@@ -614,7 +543,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_thomson_dtt761x);
+ DVB_PLL_THOMSON_DTT761X);
}
}
break;
@@ -634,7 +563,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_lg_tdvs_h06xf);
+ DVB_PLL_LG_TDVS_H06XF);
}
}
break;
@@ -654,7 +583,7 @@ static int dvb_register(struct cx8802_dev *dev)
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
&dev->core->i2c_adap,
- &dvb_pll_lg_tdvs_h06xf);
+ DVB_PLL_LG_TDVS_H06XF);
}
}
break;
@@ -664,7 +593,7 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
if (dev->dvb.frontend != NULL) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_tuv1236d);
+ NULL, DVB_PLL_TUV1236D);
}
break;
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
@@ -705,10 +634,6 @@ static int dvb_register(struct cx8802_dev *dev)
return -1;
}
- if (dev->core->pll_desc) {
- dev->dvb.frontend->ops.info.frequency_min = dev->core->pll_desc->min;
- dev->dvb.frontend->ops.info.frequency_max = dev->core->pll_desc->max;
- }
/* Ensure all frontends negotiate bus access */
dev->dvb.frontend->ops.ts_bus_ctrl = cx88_dvb_bus_ctrl;
@@ -778,11 +703,10 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
if (!(cx88_boards[core->board].mpeg & CX88_MPEG_DVB))
goto fail_core;
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
+ /* If vp3054 isn't enabled, a stub will just return 0 */
err = vp3054_i2c_probe(dev);
if (0 != err)
goto fail_core;
-#endif
/* dvb stuff */
printk("%s/2: cx2388x based dvb card\n", core->name);
@@ -807,9 +731,7 @@ static int cx8802_dvb_remove(struct cx8802_driver *drv)
/* dvb */
videobuf_dvb_unregister(&dev->dvb);
-#if defined(CONFIG_VIDEO_CX88_VP3054) || defined(CONFIG_VIDEO_CX88_VP3054_MODULE)
vp3054_i2c_remove(dev);
-#endif
return 0;
}
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 7919a1f9da06..78bbcfab9670 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -160,7 +160,7 @@ void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
i2c_clients_command(&core->i2c_adap, cmd, arg);
}
-static struct i2c_algo_bit_data cx8800_i2c_algo_template = {
+static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
.setsda = cx8800_bit_setsda,
.setscl = cx8800_bit_setscl,
.getsda = cx8800_bit_getsda,
@@ -171,18 +171,6 @@ static struct i2c_algo_bit_data cx8800_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
-static struct i2c_adapter cx8800_i2c_adap_template = {
- .name = "cx2388x",
- .owner = THIS_MODULE,
- .id = I2C_HW_B_CX2388x,
- .client_register = attach_inform,
- .client_unregister = detach_inform,
-};
-
-static struct i2c_client cx8800_i2c_client_template = {
- .name = "cx88xx internal",
-};
-
static char *i2c_devs[128] = {
[ 0x1c >> 1 ] = "lgdt330x",
[ 0x86 >> 1 ] = "tda9887/cx22702",
@@ -212,14 +200,9 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
/* Prevents usage of invalid delay values */
if (i2c_udelay<5)
i2c_udelay=5;
- cx8800_i2c_algo_template.udelay=i2c_udelay;
- memcpy(&core->i2c_adap, &cx8800_i2c_adap_template,
- sizeof(core->i2c_adap));
memcpy(&core->i2c_algo, &cx8800_i2c_algo_template,
sizeof(core->i2c_algo));
- memcpy(&core->i2c_client, &cx8800_i2c_client_template,
- sizeof(core->i2c_client));
if (core->tuner_type != TUNER_ABSENT)
core->i2c_adap.class |= I2C_CLASS_TV_ANALOG;
@@ -228,10 +211,16 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
core->i2c_adap.dev.parent = &pci->dev;
strlcpy(core->i2c_adap.name,core->name,sizeof(core->i2c_adap.name));
+ core->i2c_adap.owner = THIS_MODULE;
+ core->i2c_adap.id = I2C_HW_B_CX2388x;
+ core->i2c_adap.client_register = attach_inform;
+ core->i2c_adap.client_unregister = detach_inform;
+ core->i2c_algo.udelay = i2c_udelay;
core->i2c_algo.data = core;
i2c_set_adapdata(&core->i2c_adap,core);
core->i2c_adap.algo_data = &core->i2c_algo;
core->i2c_client.adapter = &core->i2c_adap;
+ strlcpy(core->i2c_client.name, "cx88xx internal", I2C_NAME_SIZE);
cx8800_bit_setscl(core,1);
cx8800_bit_setsda(core,1);
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 8136673fe9e8..f5d4a565346e 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -74,7 +74,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
/* read gpio value */
gpio = cx_read(ir->gpio_addr);
- if (core->board == CX88_BOARD_NPGTECH_REALTV_TOP10FM) {
+ switch (core->board) {
+ case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
/* This board apparently uses a combination of 2 GPIO
to represent the keys. Additionally, the second GPIO
can be used for parity.
@@ -90,9 +91,14 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
auxgpio = cx_read(MO_GP1_IO);
/* Take out the parity part */
gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
- } else
+ break;
+ case CX88_BOARD_WINFAST_DTV1000:
+ gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
auxgpio = gpio;
-
+ break;
+ default:
+ auxgpio = gpio;
+ }
if (ir->polling) {
if (ir->last_gpio == auxgpio)
return;
@@ -148,20 +154,16 @@ static void ir_timer(unsigned long data)
static void cx88_ir_work(struct work_struct *work)
{
struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
- unsigned long timeout;
cx88_ir_handle_key(ir);
- timeout = jiffies + (ir->polling * HZ / 1000);
- mod_timer(&ir->timer, timeout);
+ mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
static void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
{
if (ir->polling) {
+ setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
INIT_WORK(&ir->work, cx88_ir_work);
- init_timer(&ir->timer);
- ir->timer.function = ir_timer;
- ir->timer.data = (unsigned long)ir;
schedule_work(&ir->work);
}
if (ir->sampling) {
@@ -222,7 +224,6 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_HVR1100:
- case CX88_BOARD_HAUPPAUGE_HVR1300:
case CX88_BOARD_HAUPPAUGE_HVR3000:
ir_codes = ir_codes_hauppauge_new;
ir_type = IR_TYPE_RC5;
@@ -236,6 +237,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->polling = 50; /* ms */
break;
case CX88_BOARD_WINFAST2000XP_EXPERT:
+ case CX88_BOARD_WINFAST_DTV1000:
ir_codes = ir_codes_winfast;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
@@ -328,7 +330,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
input_dev->id.vendor = pci->vendor;
input_dev->id.product = pci->device;
}
- input_dev->cdev.dev = &pci->dev;
+ input_dev->dev.parent = &pci->dev;
/* record handles to ourself */
ir->core = core;
core->ir = ir;
@@ -442,7 +444,6 @@ void cx88_ir_irq(struct cx88_core *core)
case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
case CX88_BOARD_HAUPPAUGE_HVR1100:
- case CX88_BOARD_HAUPPAUGE_HVR1300:
case CX88_BOARD_HAUPPAUGE_HVR3000:
ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7);
ir_dprintk("biphase decoded: %x\n", ircode);
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 543b05ebc0e7..317a2a3f9cc1 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -336,7 +336,7 @@ static void cx8802_timeout(unsigned long data)
{
struct cx8802_dev *dev = (struct cx8802_dev*)data;
- dprintk(0, "%s\n",__FUNCTION__);
+ dprintk(1, "%s\n",__FUNCTION__);
if (debug)
cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 259ea08e784f..1cc2d286a1cb 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -906,6 +906,7 @@ int cx88_audio_thread(void *data)
u32 mode = 0;
dprintk("cx88: tvaudio thread started\n");
+ set_freezable();
for (;;) {
msleep_interruptible(1000);
if (kthread_should_stop())
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 98fa35421bdd..06b233a7b20b 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1881,8 +1881,14 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
mutex_unlock(&core->lock);
/* start tvaudio thread */
- if (core->tuner_type != TUNER_ABSENT)
+ if (core->tuner_type != TUNER_ABSENT) {
core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
+ if (IS_ERR(core->kthread)) {
+ err = PTR_ERR(core->kthread);
+ printk(KERN_ERR "Failed to create cx88 audio thread, err=%d\n",
+ err);
+ }
+ }
return 0;
fail_unreg:
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
index 82bc3a28aa22..cd0877636a32 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -94,7 +94,7 @@ static int vp3054_bit_getsda(void *data)
/* ----------------------------------------------------------------------- */
-static struct i2c_algo_bit_data vp3054_i2c_algo_template = {
+static const struct i2c_algo_bit_data vp3054_i2c_algo_template = {
.setsda = vp3054_bit_setsda,
.setscl = vp3054_bit_setscl,
.getsda = vp3054_bit_getsda,
@@ -105,12 +105,6 @@ static struct i2c_algo_bit_data vp3054_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
-static struct i2c_adapter vp3054_i2c_adap_template = {
- .name = "cx2388x",
- .owner = THIS_MODULE,
- .id = I2C_HW_B_CX2388x,
-};
-
int vp3054_i2c_probe(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
@@ -125,8 +119,6 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
return -ENOMEM;
vp3054_i2c = dev->card_priv;
- memcpy(&vp3054_i2c->adap, &vp3054_i2c_adap_template,
- sizeof(vp3054_i2c->adap));
memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
sizeof(vp3054_i2c->algo));
@@ -135,6 +127,8 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
vp3054_i2c->adap.dev.parent = &dev->pci->dev;
strlcpy(vp3054_i2c->adap.name, core->name,
sizeof(vp3054_i2c->adap.name));
+ vp3054_i2c->adap.owner = THIS_MODULE;
+ vp3054_i2c->adap.id = I2C_HW_B_CX2388x;
vp3054_i2c->algo.data = dev;
i2c_set_adapdata(&vp3054_i2c->adap, dev);
vp3054_i2c->adap.algo_data = &vp3054_i2c->algo;
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.h b/drivers/media/video/cx88/cx88-vp3054-i2c.h
index 637a7d232238..be99c931dc3e 100644
--- a/drivers/media/video/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.h
@@ -30,5 +30,12 @@ struct vp3054_i2c_state {
};
/* ----------------------------------------------------------------------- */
+#if defined(CONFIG_VIDEO_CX88_VP3054) || (defined(CONFIG_VIDEO_CX88_VP3054_MODULE) && defined(MODULE))
int vp3054_i2c_probe(struct cx8802_dev *dev);
void vp3054_i2c_remove(struct cx8802_dev *dev);
+#else
+static inline int vp3054_i2c_probe(struct cx8802_dev *dev)
+{ return 0; }
+static inline void vp3054_i2c_remove(struct cx8802_dev *dev)
+{ }
+#endif
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 738d4f20c580..809126866a3e 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -209,6 +209,7 @@ extern struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_NORWOOD_MICRO 54
#define CX88_BOARD_TE_DTV_250_OEM_SWANN 55
#define CX88_BOARD_HAUPPAUGE_HVR1300 56
+#define CX88_BOARD_ADSTECH_PTV_390 57
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -258,7 +259,7 @@ struct cx88_subid {
#define RESOURCE_VIDEO 2
#define RESOURCE_VBI 4
-#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
+#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
/* buffer for one video frame */
struct cx88_buffer {
@@ -316,8 +317,6 @@ struct cx88_core {
/* config info -- dvb */
#if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE)
- struct dvb_pll_desc *pll_desc;
- unsigned int pll_addr;
int (*prev_set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
#endif
@@ -463,13 +462,10 @@ struct cx8802_dev {
u32 mailbox;
int width;
int height;
- int fw_size;
#if defined(CONFIG_VIDEO_BUF_DVB) || defined(CONFIG_VIDEO_BUF_DVB_MODULE)
/* for dvb only */
struct videobuf_dvb dvb;
- void* fe_handle;
- int (*fe_release)(void *handle);
void *card_priv;
#endif
diff --git a/drivers/media/video/et61x251/Kconfig b/drivers/media/video/et61x251/Kconfig
index 664676f44068..dcc1a0335440 100644
--- a/drivers/media/video/et61x251/Kconfig
+++ b/drivers/media/video/et61x251/Kconfig
@@ -1,6 +1,6 @@
config USB_ET61X251
tristate "USB ET61X[12]51 PC Camera Controller support"
- depends on VIDEO_V4L1
+ depends on VIDEO_V4L2
---help---
Say Y here if you want support for cameras based on Etoms ET61X151
or ET61X251 PC Camera Controllers.
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index 262f98e12409..02c741d8f85a 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/kref.h>
#include "et61x251_sensor.h"
@@ -134,7 +135,7 @@ struct et61x251_module_param {
};
static DEFINE_MUTEX(et61x251_sysfs_lock);
-static DECLARE_RWSEM(et61x251_disconnect);
+static DECLARE_RWSEM(et61x251_dev_lock);
struct et61x251_device {
struct video_device* v4ldev;
@@ -158,12 +159,14 @@ struct et61x251_device {
struct et61x251_sysfs_attr sysfs;
struct et61x251_module_param module_param;
+ struct kref kref;
enum et61x251_dev_state state;
u8 users;
- struct mutex dev_mutex, fileop_mutex;
+ struct completion probe;
+ struct mutex open_mutex, fileop_mutex;
spinlock_t queue_lock;
- wait_queue_head_t open, wait_frame, wait_stream;
+ wait_queue_head_t wait_open, wait_frame, wait_stream;
};
/*****************************************************************************/
@@ -177,7 +180,7 @@ et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id)
void
et61x251_attach_sensor(struct et61x251_device* cam,
- struct et61x251_sensor* sensor)
+ const struct et61x251_sensor* sensor)
{
memcpy(&cam->sensor, sensor, sizeof(struct et61x251_sensor));
}
@@ -195,8 +198,8 @@ do { \
else if ((level) == 2) \
dev_info(&cam->usbdev->dev, fmt "\n", ## args); \
else if ((level) >= 3) \
- dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n", \
- __FUNCTION__, __LINE__ , ## args); \
+ dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
+ __FILE__, __FUNCTION__, __LINE__ , ## args); \
} \
} while (0)
# define KDBG(level, fmt, args...) \
@@ -205,8 +208,8 @@ do { \
if ((level) == 1 || (level) == 2) \
pr_info("et61x251: " fmt "\n", ## args); \
else if ((level) == 3) \
- pr_debug("et61x251: [%s:%d] " fmt "\n", __FUNCTION__, \
- __LINE__ , ## args); \
+ pr_debug("sn9c102: [%s:%s:%d] " fmt "\n", __FILE__, \
+ __FUNCTION__, __LINE__ , ## args); \
} \
} while (0)
# define V4LDBG(level, name, cmd) \
@@ -222,8 +225,8 @@ do { \
#undef PDBG
#define PDBG(fmt, args...) \
-dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n", \
- __FUNCTION__, __LINE__ , ## args)
+dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __FUNCTION__, \
+ __LINE__ , ## args)
#undef PDBGG
#define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index a6525513cd1e..585bd1fe0765 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -45,11 +45,11 @@
#define ET61X251_MODULE_NAME "V4L2 driver for ET61X[12]51 " \
"PC Camera Controllers"
-#define ET61X251_MODULE_AUTHOR "(C) 2006 Luca Risolia"
+#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define ET61X251_MODULE_LICENSE "GPL"
-#define ET61X251_MODULE_VERSION "1:1.04"
-#define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 4)
+#define ET61X251_MODULE_VERSION "1:1.09"
+#define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 9)
/*****************************************************************************/
@@ -245,7 +245,8 @@ int et61x251_read_reg(struct et61x251_device* cam, u16 index)
static int
-et61x251_i2c_wait(struct et61x251_device* cam, struct et61x251_sensor* sensor)
+et61x251_i2c_wait(struct et61x251_device* cam,
+ const struct et61x251_sensor* sensor)
{
int i, r;
@@ -270,7 +271,7 @@ et61x251_i2c_wait(struct et61x251_device* cam, struct et61x251_sensor* sensor)
int
et61x251_i2c_try_read(struct et61x251_device* cam,
- struct et61x251_sensor* sensor, u8 address)
+ const struct et61x251_sensor* sensor, u8 address)
{
struct usb_device* udev = cam->usbdev;
u8* data = cam->control_buffer;
@@ -303,7 +304,8 @@ et61x251_i2c_try_read(struct et61x251_device* cam,
int
et61x251_i2c_try_write(struct et61x251_device* cam,
- struct et61x251_sensor* sensor, u8 address, u8 value)
+ const struct et61x251_sensor* sensor, u8 address,
+ u8 value)
{
struct usb_device* udev = cam->usbdev;
u8* data = cam->control_buffer;
@@ -615,7 +617,7 @@ static int et61x251_start_transfer(struct et61x251_device* cam)
return 0;
free_urbs:
- for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++)
+ for (i = 0; (i < ET61X251_URBS) && cam->urb[i]; i++)
usb_free_urb(cam->urb[i]);
free_buffers:
@@ -682,7 +684,7 @@ static u8 et61x251_strtou8(const char* buff, size_t len, ssize_t* count)
if (len < 4) {
strncpy(str, buff, len);
- str[len+1] = '\0';
+ str[len] = '\0';
} else {
strncpy(str, buff, 4);
str[4] = '\0';
@@ -977,30 +979,30 @@ static CLASS_DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR,
static int et61x251_create_sysfs(struct et61x251_device* cam)
{
- struct video_device *v4ldev = cam->v4ldev;
+ struct class_device *classdev = &(cam->v4ldev->class_dev);
int err = 0;
- if ((err = video_device_create_file(v4ldev, &class_device_attr_reg)))
+ if ((err = class_device_create_file(classdev, &class_device_attr_reg)))
goto err_out;
- if ((err = video_device_create_file(v4ldev, &class_device_attr_val)))
+ if ((err = class_device_create_file(classdev, &class_device_attr_val)))
goto err_reg;
if (cam->sensor.sysfs_ops) {
- if ((err = video_device_create_file(v4ldev,
+ if ((err = class_device_create_file(classdev,
&class_device_attr_i2c_reg)))
goto err_val;
- if ((err = video_device_create_file(v4ldev,
+ if ((err = class_device_create_file(classdev,
&class_device_attr_i2c_val)))
goto err_i2c_reg;
}
err_i2c_reg:
if (cam->sensor.sysfs_ops)
- video_device_remove_file(v4ldev, &class_device_attr_i2c_reg);
+ class_device_remove_file(classdev, &class_device_attr_i2c_reg);
err_val:
- video_device_remove_file(v4ldev, &class_device_attr_val);
+ class_device_remove_file(classdev, &class_device_attr_val);
err_reg:
- video_device_remove_file(v4ldev, &class_device_attr_reg);
+ class_device_remove_file(classdev, &class_device_attr_reg);
err_out:
return err;
}
@@ -1103,7 +1105,8 @@ static int et61x251_init(struct et61x251_device* cam)
int err = 0;
if (!(cam->state & DEV_INITIALIZED)) {
- init_waitqueue_head(&cam->open);
+ mutex_init(&cam->open_mutex);
+ init_waitqueue_head(&cam->wait_open);
qctrl = s->qctrl;
rect = &(s->cropcap.defrect);
cam->compression.quality = ET61X251_COMPRESSION_QUALITY;
@@ -1177,64 +1180,80 @@ static int et61x251_init(struct et61x251_device* cam)
return 0;
}
+/*****************************************************************************/
-static void et61x251_release_resources(struct et61x251_device* cam)
+static void et61x251_release_resources(struct kref *kref)
{
+ struct et61x251_device *cam;
+
mutex_lock(&et61x251_sysfs_lock);
+ cam = container_of(kref, struct et61x251_device, kref);
+
DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
+ usb_put_dev(cam->usbdev);
+ kfree(cam->control_buffer);
+ kfree(cam);
mutex_unlock(&et61x251_sysfs_lock);
-
- kfree(cam->control_buffer);
}
-/*****************************************************************************/
static int et61x251_open(struct inode* inode, struct file* filp)
{
struct et61x251_device* cam;
int err = 0;
- /*
- This is the only safe way to prevent race conditions with
- disconnect
- */
- if (!down_read_trylock(&et61x251_disconnect))
+ if (!down_read_trylock(&et61x251_dev_lock))
return -ERESTARTSYS;
cam = video_get_drvdata(video_devdata(filp));
- if (mutex_lock_interruptible(&cam->dev_mutex)) {
- up_read(&et61x251_disconnect);
+ if (wait_for_completion_interruptible(&cam->probe)) {
+ up_read(&et61x251_dev_lock);
return -ERESTARTSYS;
}
+ kref_get(&cam->kref);
+
+ if (mutex_lock_interruptible(&cam->open_mutex)) {
+ kref_put(&cam->kref, et61x251_release_resources);
+ up_read(&et61x251_dev_lock);
+ return -ERESTARTSYS;
+ }
+
+ if (cam->state & DEV_DISCONNECTED) {
+ DBG(1, "Device not present");
+ err = -ENODEV;
+ goto out;
+ }
+
if (cam->users) {
- DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+ DBG(2, "Device /dev/video%d is already in use",
+ cam->v4ldev->minor);
+ DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
err = -EWOULDBLOCK;
goto out;
}
- mutex_unlock(&cam->dev_mutex);
- err = wait_event_interruptible_exclusive(cam->open,
- cam->state & DEV_DISCONNECTED
+ DBG(2, "A blocking open() has been requested. Wait for the "
+ "device to be released...");
+ up_read(&et61x251_dev_lock);
+ err = wait_event_interruptible_exclusive(cam->wait_open,
+ (cam->state & DEV_DISCONNECTED)
|| !cam->users);
- if (err) {
- up_read(&et61x251_disconnect);
- return err;
- }
+ down_read(&et61x251_dev_lock);
+ if (err)
+ goto out;
if (cam->state & DEV_DISCONNECTED) {
- up_read(&et61x251_disconnect);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
- mutex_lock(&cam->dev_mutex);
}
-
if (cam->state & DEV_MISCONFIGURED) {
err = et61x251_init(cam);
if (err) {
@@ -1259,36 +1278,32 @@ static int et61x251_open(struct inode* inode, struct file* filp)
DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
out:
- mutex_unlock(&cam->dev_mutex);
- up_read(&et61x251_disconnect);
+ mutex_unlock(&cam->open_mutex);
+ if (err)
+ kref_put(&cam->kref, et61x251_release_resources);
+ up_read(&et61x251_dev_lock);
return err;
}
static int et61x251_release(struct inode* inode, struct file* filp)
{
- struct et61x251_device* cam = video_get_drvdata(video_devdata(filp));
+ struct et61x251_device* cam;
- mutex_lock(&cam->dev_mutex); /* prevent disconnect() to be called */
+ down_write(&et61x251_dev_lock);
- et61x251_stop_transfer(cam);
+ cam = video_get_drvdata(video_devdata(filp));
+ et61x251_stop_transfer(cam);
et61x251_release_buffers(cam);
-
- if (cam->state & DEV_DISCONNECTED) {
- et61x251_release_resources(cam);
- usb_put_dev(cam->usbdev);
- mutex_unlock(&cam->dev_mutex);
- kfree(cam);
- return 0;
- }
-
cam->users--;
- wake_up_interruptible_nr(&cam->open, 1);
+ wake_up_interruptible_nr(&cam->wait_open, 1);
DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
- mutex_unlock(&cam->dev_mutex);
+ kref_put(&cam->kref, et61x251_release_resources);
+
+ up_write(&et61x251_dev_lock);
return 0;
}
@@ -1324,7 +1339,7 @@ et61x251_read(struct file* filp, char __user * buf,
DBG(3, "Close and open the device again to choose the read "
"method");
mutex_unlock(&cam->fileop_mutex);
- return -EINVAL;
+ return -EBUSY;
}
if (cam->io == IO_NONE) {
@@ -1504,7 +1519,12 @@ static int et61x251_mmap(struct file* filp, struct vm_area_struct *vma)
return -EIO;
}
- if (cam->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) ||
+ if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
+ mutex_unlock(&cam->fileop_mutex);
+ return -EACCES;
+ }
+
+ if (cam->io != IO_MMAP ||
size != PAGE_ALIGN(cam->frame[0].buf.length)) {
mutex_unlock(&cam->fileop_mutex);
return -EINVAL;
@@ -1535,7 +1555,6 @@ static int et61x251_mmap(struct file* filp, struct vm_area_struct *vma)
vma->vm_ops = &et61x251_vm_ops;
vma->vm_private_data = &cam->frame[i];
-
et61x251_vm_open(vma);
mutex_unlock(&cam->fileop_mutex);
@@ -1764,7 +1783,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_S_CROP failed. "
"Unmap the buffers first.");
- return -EINVAL;
+ return -EBUSY;
}
/* Preserve R,G or B origin */
@@ -1921,6 +1940,8 @@ et61x251_vidioc_g_fmt(struct et61x251_device* cam, void __user * arg)
if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+ pfmt->colorspace = (pfmt->pixelformat == V4L2_PIX_FMT_ET61X251) ?
+ 0 : V4L2_COLORSPACE_SRGB;
pfmt->bytesperline = (pfmt->pixelformat==V4L2_PIX_FMT_ET61X251)
? 0 : (pfmt->width * pfmt->priv) / 8;
pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8);
@@ -1996,6 +2017,8 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
pix->pixelformat != V4L2_PIX_FMT_SBGGR8)
pix->pixelformat = pfmt->pixelformat;
pix->priv = pfmt->priv; /* bpp */
+ pix->colorspace = (pix->pixelformat == V4L2_PIX_FMT_ET61X251) ?
+ 0 : V4L2_COLORSPACE_SRGB;
pix->colorspace = pfmt->colorspace;
pix->bytesperline = (pix->pixelformat == V4L2_PIX_FMT_ET61X251)
? 0 : (pix->width * pix->priv) / 8;
@@ -2013,7 +2036,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_S_FMT failed. "
"Unmap the buffers first.");
- return -EINVAL;
+ return -EBUSY;
}
if (cam->stream == STREAM_ON)
@@ -2129,14 +2152,14 @@ et61x251_vidioc_reqbufs(struct et61x251_device* cam, void __user * arg)
if (cam->io == IO_READ) {
DBG(3, "Close and open the device again to choose the mmap "
"I/O method");
- return -EINVAL;
+ return -EBUSY;
}
for (i = 0; i < cam->nbuffers; i++)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_REQBUFS failed. "
"Previous buffers are still mapped.");
- return -EINVAL;
+ return -EBUSY;
}
if (cam->stream == STREAM_ON)
@@ -2284,9 +2307,6 @@ et61x251_vidioc_streamon(struct et61x251_device* cam, void __user * arg)
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
return -EINVAL;
- if (list_empty(&cam->inqueue))
- return -EINVAL;
-
cam->stream = STREAM_ON;
DBG(3, "Stream on");
@@ -2535,8 +2555,6 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- mutex_init(&cam->dev_mutex);
-
DBG(2, "ET61X[12]51 PC Camera Controller detected "
"(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
@@ -2568,7 +2586,7 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
cam->v4ldev->release = video_device_release;
video_set_drvdata(cam->v4ldev, cam);
- mutex_lock(&cam->dev_mutex);
+ init_completion(&cam->probe);
err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
video_nr[dev_nr]);
@@ -2578,7 +2596,7 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
DBG(1, "Free /dev/videoX node not found");
video_nr[dev_nr] = -1;
dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
- mutex_unlock(&cam->dev_mutex);
+ complete_all(&cam->probe);
goto fail;
}
@@ -2599,11 +2617,15 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
"device controlling. Error #%d", err);
#else
DBG(2, "Optional device control through 'sysfs' interface disabled");
+ DBG(3, "Compile the kernel with the 'CONFIG_VIDEO_ADV_DEBUG' "
+ "configuration option to enable it.");
#endif
usb_set_intfdata(intf, cam);
+ kref_init(&cam->kref);
+ usb_get_dev(cam->usbdev);
- mutex_unlock(&cam->dev_mutex);
+ complete_all(&cam->probe);
return 0;
@@ -2620,40 +2642,31 @@ fail:
static void et61x251_usb_disconnect(struct usb_interface* intf)
{
- struct et61x251_device* cam = usb_get_intfdata(intf);
-
- if (!cam)
- return;
+ struct et61x251_device* cam;
- down_write(&et61x251_disconnect);
+ down_write(&et61x251_dev_lock);
- mutex_lock(&cam->dev_mutex);
+ cam = usb_get_intfdata(intf);
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
- wake_up_interruptible_all(&cam->open);
-
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
- "memory deallocation are deferred on close.",
+ "memory deallocation are deferred.",
cam->v4ldev->minor);
cam->state |= DEV_MISCONFIGURED;
et61x251_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
wake_up_interruptible(&cam->wait_frame);
wake_up(&cam->wait_stream);
- usb_get_dev(cam->usbdev);
- } else {
+ } else
cam->state |= DEV_DISCONNECTED;
- et61x251_release_resources(cam);
- }
- mutex_unlock(&cam->dev_mutex);
+ wake_up_interruptible_all(&cam->wait_open);
- if (!cam->users)
- kfree(cam);
+ kref_put(&cam->kref, et61x251_release_resources);
- up_write(&et61x251_disconnect);
+ up_write(&et61x251_dev_lock);
}
diff --git a/drivers/media/video/et61x251/et61x251_sensor.h b/drivers/media/video/et61x251/et61x251_sensor.h
index 5fadb5de68bf..e14586330623 100644
--- a/drivers/media/video/et61x251/et61x251_sensor.h
+++ b/drivers/media/video/et61x251/et61x251_sensor.h
@@ -22,7 +22,7 @@
#define _ET61X251_SENSOR_H_
#include <linux/usb.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <linux/device.h>
#include <linux/stddef.h>
#include <linux/errno.h>
@@ -47,7 +47,7 @@ et61x251_match_id(struct et61x251_device* cam, const struct usb_device_id *id);
extern void
et61x251_attach_sensor(struct et61x251_device* cam,
- struct et61x251_sensor* sensor);
+ const struct et61x251_sensor* sensor);
/*****************************************************************************/
@@ -56,10 +56,10 @@ extern int et61x251_read_reg(struct et61x251_device*, u16 index);
extern int et61x251_i2c_write(struct et61x251_device*, u8 address, u8 value);
extern int et61x251_i2c_read(struct et61x251_device*, u8 address);
extern int et61x251_i2c_try_write(struct et61x251_device*,
- struct et61x251_sensor*, u8 address,
+ const struct et61x251_sensor*, u8 address,
u8 value);
extern int et61x251_i2c_try_read(struct et61x251_device*,
- struct et61x251_sensor*, u8 address);
+ const struct et61x251_sensor*, u8 address);
extern int et61x251_i2c_raw_write(struct et61x251_device*, u8 n, u8 data1,
u8 data2, u8 data3, u8 data4, u8 data5,
u8 data6, u8 data7, u8 data8, u8 address);
diff --git a/drivers/media/video/et61x251/et61x251_tas5130d1b.c b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
index b06643409842..04b7fbb310a8 100644
--- a/drivers/media/video/et61x251/et61x251_tas5130d1b.c
+++ b/drivers/media/video/et61x251/et61x251_tas5130d1b.c
@@ -69,7 +69,7 @@ static int tas5130d1b_set_ctrl(struct et61x251_device* cam,
}
-static struct et61x251_sensor tas5130d1b = {
+static const struct et61x251_sensor tas5130d1b = {
.name = "TAS5130D1B",
.interface = ET61X251_I2C_3WIRES,
.rsta = ET61X251_I2C_RSTA_STOP,
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index ed92b6f7187a..2d709e064679 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -37,6 +37,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/i2c-id.h>
#include <linux/workqueue.h>
#include <asm/semaphore.h>
@@ -60,21 +61,22 @@ MODULE_PARM_DESC(hauppauge, "Specify Hauppauge remote: 0=black, 1=grey (defaults
/* ----------------------------------------------------------------------- */
-static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+static int get_key_haup_common(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
+ int size, int offset)
{
- unsigned char buf[3];
+ unsigned char buf[6];
int start, range, toggle, dev, code;
/* poll IR chip */
- if (3 != i2c_master_recv(&ir->c,buf,3))
+ if (size != i2c_master_recv(&ir->c,buf,size))
return -EIO;
/* split rc5 data block ... */
- start = (buf[0] >> 7) & 1;
- range = (buf[0] >> 6) & 1;
- toggle = (buf[0] >> 5) & 1;
- dev = buf[0] & 0x1f;
- code = (buf[1] >> 2) & 0x3f;
+ start = (buf[offset] >> 7) & 1;
+ range = (buf[offset] >> 6) & 1;
+ toggle = (buf[offset] >> 5) & 1;
+ dev = buf[offset] & 0x1f;
+ code = (buf[offset+1] >> 2) & 0x3f;
/* rc5 has two start bits
* the first bit must be one
@@ -96,6 +98,16 @@ static int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
return 1;
}
+static inline int get_key_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+ return get_key_haup_common (ir, ir_key, ir_raw, 3, 0);
+}
+
+static inline int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+ return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
+}
+
static int get_key_pixelview(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char b;
@@ -270,8 +282,9 @@ static void ir_timer(unsigned long data)
static void ir_work(struct work_struct *work)
{
struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
+
ir_key_poll(ir);
- mod_timer(&ir->timer, jiffies+HZ/10);
+ mod_timer(&ir->timer, jiffies + msecs_to_jiffies(100));
}
/* ----------------------------------------------------------------------- */
@@ -354,9 +367,21 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
case 0x7a:
case 0x47:
case 0x71:
- /* Handled by saa7134-input */
- name = "SAA713x remote";
- ir_type = IR_TYPE_OTHER;
+ if (adap->id == I2C_HW_B_CX2388x) {
+ /* Handled by cx88-input */
+ name = "CX2388x remote";
+ ir_type = IR_TYPE_RC5;
+ ir->get_key = get_key_haup_xvr;
+ if (hauppauge == 1) {
+ ir_codes = ir_codes_hauppauge_new;
+ } else {
+ ir_codes = ir_codes_rc5_tv;
+ }
+ } else {
+ /* Handled by saa7134-input */
+ name = "SAA713x remote";
+ ir_type = IR_TYPE_OTHER;
+ }
break;
default:
/* shouldn't happen */
@@ -450,6 +475,7 @@ static int ir_probe(struct i2c_adapter *adap)
static const int probe_bttv[] = { 0x1a, 0x18, 0x4b, 0x64, 0x30, -1};
static const int probe_saa7134[] = { 0x7a, 0x47, 0x71, -1 };
static const int probe_em28XX[] = { 0x30, 0x47, -1 };
+ static const int probe_cx88[] = { 0x18, 0x71, -1 };
const int *probe = NULL;
struct i2c_client c;
unsigned char buf;
@@ -468,6 +494,9 @@ static int ir_probe(struct i2c_adapter *adap)
case I2C_HW_B_EM28XX:
probe = probe_em28XX;
break;
+ case I2C_HW_B_CX2388x:
+ probe = probe_cx88;
+ break;
}
if (NULL == probe)
return 0;
diff --git a/drivers/media/video/ivtv/Kconfig b/drivers/media/video/ivtv/Kconfig
index 1aaeaa02f158..e43beb2c9cbf 100644
--- a/drivers/media/video/ivtv/Kconfig
+++ b/drivers/media/video/ivtv/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_IVTV
tristate "Conexant cx23416/cx23415 MPEG encoder/decoder support"
depends on VIDEO_V4L1 && VIDEO_V4L2 && PCI && I2C && EXPERIMENTAL
+ select I2C_ALGOBIT
select FW_LOADER
select VIDEO_TUNER
select VIDEO_TVEEPROM
@@ -16,11 +17,11 @@ config VIDEO_IVTV
select VIDEO_UPD64031A
select VIDEO_UPD64083
---help---
- This is a video4linux driver for Conexant cx23416 or cx23416 based
+ This is a video4linux driver for Conexant cx23416 or cx23415 based
PCI personal video recorder devices.
This is used in devices such as the Hauppauge PVR-150/250/350/500
- cards.
+ cards. There is a driver homepage at <http://www.ivtvdriver.org>.
To compile this driver as a module, choose M here: the
module will be called ivtv.
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index efc66355339a..d73d433a4ff6 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -56,7 +56,6 @@
#include "ivtv-gpio.h"
#include "ivtv-yuv.h"
-#include <linux/vermagic.h>
#include <media/tveeprom.h>
#include <media/v4l2-chip-ident.h>
@@ -181,7 +180,7 @@ MODULE_PARM_DESC(secam, "Set SECAM standard: B, G, H, D, K, L, LC");
MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J, K");
MODULE_PARM_DESC(debug,
"Debug level (bitmask). Default: errors only\n"
- "\t\t\t(debug = 511 gives full debugging)");
+ "\t\t\t(debug = 1023 gives full debugging)");
MODULE_PARM_DESC(ivtv_pci_latency,
"Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
"\t\t\tDefault: Yes");
@@ -276,9 +275,10 @@ int ivtv_waitq(wait_queue_head_t *waitq)
}
/* Generic utility functions */
-int ivtv_sleep_timeout(int timeout, int intr)
+int ivtv_msleep_timeout(unsigned int msecs, int intr)
{
int ret;
+ int timeout = msecs_to_jiffies(msecs);
do {
set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
@@ -339,6 +339,7 @@ static void ivtv_process_eeprom(struct ivtv *itv)
/* In a few cases the PCI subsystem IDs do not correctly
identify the card. A better method is to check the
model number from the eeprom instead. */
+ case 30012 ... 30039: /* Low profile PVR250 */
case 32000 ... 32999:
case 48000 ... 48099: /* 48??? range are PVR250s with a cx23415 */
case 48400 ... 48599:
@@ -426,7 +427,7 @@ static void ivtv_process_eeprom(struct ivtv *itv)
if (itv->options.newi2c == -1 && tv.has_ir != -1 && tv.has_ir != 2) {
itv->options.newi2c = (tv.has_ir & 2) ? 1 : 0;
if (itv->options.newi2c) {
- IVTV_INFO("reopen i2c bus for IR-blaster support\n");
+ IVTV_INFO("Reopen i2c bus for IR-blaster support\n");
exit_ivtv_i2c(itv);
init_ivtv_i2c(itv);
}
@@ -622,6 +623,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
+ mutex_init(&itv->serialize_lock);
mutex_init(&itv->i2c_bus_lock);
mutex_init(&itv->udma.lock);
@@ -949,7 +951,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
/* Make sure we've got a place for this card */
if (ivtv_cards_active == IVTV_MAX_CARDS) {
- printk(KERN_ERR "ivtv: Maximum number of cards detected (%d).\n",
+ printk(KERN_ERR "ivtv: Maximum number of cards detected (%d)\n",
ivtv_cards_active);
spin_unlock(&ivtv_cards_lock);
return -ENOMEM;
@@ -964,9 +966,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
itv->dev = dev;
itv->num = ivtv_cards_active++;
snprintf(itv->name, sizeof(itv->name) - 1, "ivtv%d", itv->num);
- if (itv->num) {
- printk(KERN_INFO "ivtv: ====================== NEXT CARD ======================\n");
- }
+ IVTV_INFO("Initializing card #%d\n", itv->num);
spin_unlock(&ivtv_cards_lock);
@@ -1213,7 +1213,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
if (itv->has_cx23415)
ivtv_set_osd_alpha(itv);
- IVTV_INFO("Initialized %s, card #%d\n", itv->card_name, itv->num);
+ IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name);
return 0;
@@ -1246,15 +1246,15 @@ static void ivtv_remove(struct pci_dev *pci_dev)
{
struct ivtv *itv = pci_get_drvdata(pci_dev);
- IVTV_DEBUG_INFO("Removing Card #%d.\n", itv->num);
+ IVTV_DEBUG_INFO("Removing Card #%d\n", itv->num);
/* Stop all captures */
- IVTV_DEBUG_INFO(" Stopping all streams.\n");
+ IVTV_DEBUG_INFO("Stopping all streams\n");
if (atomic_read(&itv->capturing) > 0)
ivtv_stop_all_captures(itv);
/* Stop all decoding */
- IVTV_DEBUG_INFO(" Stopping decoding.\n");
+ IVTV_DEBUG_INFO("Stopping decoding\n");
if (atomic_read(&itv->decoding) > 0) {
int type;
@@ -1267,33 +1267,30 @@ static void ivtv_remove(struct pci_dev *pci_dev)
}
/* Interrupts */
- IVTV_DEBUG_INFO(" Disabling interrupts.\n");
+ IVTV_DEBUG_INFO("Disabling interrupts\n");
ivtv_set_irq_mask(itv, 0xffffffff);
del_timer_sync(&itv->dma_timer);
/* Stop all Work Queues */
- IVTV_DEBUG_INFO(" Stop Work Queues.\n");
+ IVTV_DEBUG_INFO("Stop Work Queues\n");
flush_workqueue(itv->irq_work_queues);
destroy_workqueue(itv->irq_work_queues);
- IVTV_DEBUG_INFO(" Stopping Firmware.\n");
+ IVTV_DEBUG_INFO("Stopping Firmware\n");
ivtv_halt_firmware(itv);
- IVTV_DEBUG_INFO(" Unregistering v4l devices.\n");
+ IVTV_DEBUG_INFO("Unregistering v4l devices\n");
ivtv_streams_cleanup(itv);
- IVTV_DEBUG_INFO(" Freeing dma resources.\n");
+ IVTV_DEBUG_INFO("Freeing dma resources\n");
ivtv_udma_free(itv);
exit_ivtv_i2c(itv);
- IVTV_DEBUG_INFO(" Releasing irq.\n");
+ IVTV_DEBUG_INFO(" Releasing irq\n");
free_irq(itv->dev->irq, (void *)itv);
+ ivtv_iounmap(itv);
- if (itv->dev) {
- ivtv_iounmap(itv);
- }
-
- IVTV_DEBUG_INFO(" Releasing mem.\n");
+ IVTV_DEBUG_INFO(" Releasing mem\n");
release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (itv->has_cx23415)
@@ -1314,28 +1311,27 @@ static struct pci_driver ivtv_pci_driver = {
static int module_start(void)
{
- printk(KERN_INFO "ivtv: ==================== START INIT IVTV ====================\n");
- printk(KERN_INFO "ivtv: version %s (" VERMAGIC_STRING ") loading\n", IVTV_VERSION);
+ printk(KERN_INFO "ivtv: Start initialization, version %s\n", IVTV_VERSION);
memset(ivtv_cards, 0, sizeof(ivtv_cards));
/* Validate parameters */
if (ivtv_first_minor < 0 || ivtv_first_minor >= IVTV_MAX_CARDS) {
- printk(KERN_ERR "ivtv: ivtv_first_minor must be between 0 and %d. Exiting...\n",
+ printk(KERN_ERR "ivtv: Exiting, ivtv_first_minor must be between 0 and %d\n",
IVTV_MAX_CARDS - 1);
return -1;
}
- if (ivtv_debug < 0 || ivtv_debug > 511) {
+ if (ivtv_debug < 0 || ivtv_debug > 1023) {
ivtv_debug = 0;
- printk(KERN_INFO "ivtv: debug value must be >= 0 and <= 511!\n");
+ printk(KERN_INFO "ivtv: Debug value must be >= 0 and <= 1023\n");
}
if (pci_register_driver(&ivtv_pci_driver)) {
printk(KERN_ERR "ivtv: Error detecting PCI card\n");
return -ENODEV;
}
- printk(KERN_INFO "ivtv: ==================== END INIT IVTV ====================\n");
+ printk(KERN_INFO "ivtv: End initialization\n");
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index e6e56f175f3f..91b588d261ae 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -268,6 +268,8 @@ extern const u32 yuv_offset[4];
#define IVTV_DBGFLG_IRQ (1 << 6)
#define IVTV_DBGFLG_DEC (1 << 7)
#define IVTV_DBGFLG_YUV (1 << 8)
+/* Flag to turn on high volume debugging */
+#define IVTV_DBGFLG_HIGHVOL (1 << 9)
/* NOTE: extra space before comma in 'itv->num , ## args' is required for
gcc-2.95, otherwise it won't compile. */
@@ -286,6 +288,21 @@ extern const u32 yuv_offset[4];
#define IVTV_DEBUG_DEC(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_DEC, "dec", fmt , ## args)
#define IVTV_DEBUG_YUV(fmt, args...) IVTV_DEBUG(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
+#define IVTV_DEBUG_HIGH_VOL(x, type, fmt, args...) \
+ do { \
+ if (((x) & ivtv_debug) && (ivtv_debug & IVTV_DBGFLG_HIGHVOL)) \
+ printk(KERN_INFO "ivtv%d " type ": " fmt, itv->num , ## args); \
+ } while (0)
+#define IVTV_DEBUG_HI_WARN(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_WARN, "warning", fmt , ## args)
+#define IVTV_DEBUG_HI_INFO(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_INFO, "info",fmt , ## args)
+#define IVTV_DEBUG_HI_API(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_API, "api", fmt , ## args)
+#define IVTV_DEBUG_HI_DMA(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_DMA, "dma", fmt , ## args)
+#define IVTV_DEBUG_HI_IOCTL(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_IOCTL, "ioctl", fmt , ## args)
+#define IVTV_DEBUG_HI_I2C(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_I2C, "i2c", fmt , ## args)
+#define IVTV_DEBUG_HI_IRQ(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_IRQ, "irq", fmt , ## args)
+#define IVTV_DEBUG_HI_DEC(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_DEC, "dec", fmt , ## args)
+#define IVTV_DEBUG_HI_YUV(fmt, args...) IVTV_DEBUG_HIGH_VOL(IVTV_DBGFLG_YUV, "yuv", fmt , ## args)
+
#define IVTV_FB_DEBUG(x, type, fmt, args...) \
do { \
if ((x) & ivtv_debug) \
@@ -650,7 +667,6 @@ struct vbi_info {
/* convenience pointer to sliced struct in vbi_in union */
struct v4l2_sliced_vbi_format *sliced_in;
u32 service_set_in;
- u32 service_set_out;
int insert_mpeg;
/* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines.
@@ -723,6 +739,7 @@ struct ivtv {
int search_pack_header;
spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
+ struct mutex serialize_lock; /* lock used to serialize starting streams */
/* User based DMA for OSD */
struct ivtv_user_dma udma;
@@ -831,7 +848,7 @@ int ivtv_set_output_mode(struct ivtv *itv, int mode);
struct ivtv_stream *ivtv_get_output_stream(struct ivtv *itv);
/* Return non-zero if a signal is pending */
-int ivtv_sleep_timeout(int timeout, int intr);
+int ivtv_msleep_timeout(unsigned int msecs, int intr);
/* Wait on queue, returns -EINTR if interrupted */
int ivtv_waitq(wait_queue_head_t *waitq);
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index 555d5e6369c3..8e97a938398f 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -218,7 +218,7 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
/* Process pending program info updates and pending VBI data */
ivtv_update_pgm_info(itv);
- if (jiffies - itv->dualwatch_jiffies > HZ) {
+ if (jiffies - itv->dualwatch_jiffies > msecs_to_jiffies(1000)) {
itv->dualwatch_jiffies = jiffies;
ivtv_dualwatch(itv);
}
@@ -406,7 +406,7 @@ static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t co
ssize_t rc = count ? ivtv_read(s, ubuf, count, non_block) : 0;
struct ivtv *itv = s->itv;
- IVTV_DEBUG_INFO("read %zd from %s, got %zd\n", count, s->name, rc);
+ IVTV_DEBUG_HI_INFO("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
pos += rc;
return rc;
@@ -497,7 +497,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
struct ivtv_stream *s = &itv->streams[id->type];
int rc;
- IVTV_DEBUG_IOCTL("read %zd bytes from %s\n", count, s->name);
+ IVTV_DEBUG_HI_IOCTL("read %zd bytes from %s\n", count, s->name);
rc = ivtv_start_capture(id);
if (rc)
@@ -535,7 +535,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
int rc;
DEFINE_WAIT(wait);
- IVTV_DEBUG_IOCTL("write %zd bytes to %s\n", count, s->name);
+ IVTV_DEBUG_HI_IOCTL("write %zd bytes to %s\n", count, s->name);
if (s->type != IVTV_DEC_STREAM_TYPE_MPG &&
s->type != IVTV_DEC_STREAM_TYPE_YUV &&
@@ -643,7 +643,7 @@ retry:
to transfer the rest. */
if (count && !(filp->f_flags & O_NONBLOCK))
goto retry;
- IVTV_DEBUG_INFO("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
+ IVTV_DEBUG_HI_INFO("Wrote %d bytes to %s (%d)\n", bytes_written, s->name, s->q_full.bytesused);
return bytes_written;
}
@@ -832,7 +832,7 @@ int ivtv_v4l2_open(struct inode *inode, struct file *filp)
if (itv == NULL) {
/* Couldn't find a device registered
on that minor, shouldn't happen! */
- printk(KERN_WARNING "ivtv: no ivtv device found on minor %d\n", minor);
+ printk(KERN_WARNING "ivtv: No ivtv device found on minor %d\n", minor);
return -ENXIO;
}
@@ -924,7 +924,7 @@ void ivtv_unmute(struct ivtv *itv)
if (atomic_read(&itv->capturing) == 0)
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
- ivtv_sleep_timeout(HZ / 10, 0);
+ ivtv_msleep_timeout(100, 0);
if (atomic_read(&itv->capturing)) {
ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12);
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
index d4c910b782af..d0feabf93080 100644
--- a/drivers/media/video/ivtv/ivtv-firmware.c
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -36,7 +36,7 @@
#define IVTV_CMD_SPU_STOP 0x00000001
#define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A
#define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640
-#define IVTV_SDRAM_SLEEPTIME (60 * HZ / 100) /* 600 ms */
+#define IVTV_SDRAM_SLEEPTIME 600
#define IVTV_DECODE_INIT_MPEG_FILENAME "v4l-cx2341x-init.mpg"
#define IVTV_DECODE_INIT_MPEG_SIZE (152*1024)
@@ -56,14 +56,12 @@ retry:
volatile u32 __iomem *dst = (volatile u32 __iomem *)mem;
const u32 *src = (const u32 *)fw->data;
- /* temporarily allow 256 KB encoding firmwares as well for
- compatibility with blackbird cards */
- if (fw->size != size && fw->size != 256 * 1024) {
+ if (fw->size != size) {
/* Due to race conditions in firmware loading (esp. with udev <0.95)
the wrong file was sometimes loaded. So we check filesizes to
see if at least the right-sized file was loaded. If not, then we
retry. */
- IVTV_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size);
+ IVTV_INFO("Retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size);
release_firmware(fw);
retries--;
goto retry;
@@ -75,11 +73,11 @@ retry:
src++;
}
release_firmware(fw);
- IVTV_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
+ IVTV_INFO("Loaded %s firmware (%zd bytes)\n", fn, fw->size);
return size;
}
- IVTV_ERR("unable to open firmware %s (must be %ld bytes)\n", fn, size);
- IVTV_ERR("did you put the firmware in the hotplug firmware directory?\n");
+ IVTV_ERR("Unable to open firmware %s (must be %ld bytes)\n", fn, size);
+ IVTV_ERR("Did you put the firmware in the hotplug firmware directory?\n");
return -ENOMEM;
}
@@ -91,7 +89,7 @@ void ivtv_halt_firmware(struct ivtv *itv)
if (itv->enc_mbox.mbox)
ivtv_vapi(itv, CX2341X_ENC_HALT_FW, 0);
- ivtv_sleep_timeout(HZ / 100, 0);
+ ivtv_msleep_timeout(10, 0);
itv->enc_mbox.mbox = itv->dec_mbox.mbox = NULL;
IVTV_DEBUG_INFO("Stopping VDM\n");
@@ -115,7 +113,7 @@ void ivtv_halt_firmware(struct ivtv *itv)
IVTV_DEBUG_INFO("Stopping SPU\n");
write_reg(IVTV_CMD_SPU_STOP, IVTV_REG_SPU);
- ivtv_sleep_timeout(HZ / 100, 0);
+ ivtv_msleep_timeout(10, 0);
IVTV_DEBUG_INFO("init Encoder SDRAM pre-charge\n");
write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_ENC_SDRAM_PRECHARGE);
@@ -131,9 +129,8 @@ void ivtv_halt_firmware(struct ivtv *itv)
write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_DEC_SDRAM_REFRESH);
}
- IVTV_DEBUG_INFO("Sleeping for %dms (600 recommended)\n",
- (int)(IVTV_SDRAM_SLEEPTIME * 1000 / HZ));
- ivtv_sleep_timeout(IVTV_SDRAM_SLEEPTIME, 0);
+ IVTV_DEBUG_INFO("Sleeping for %dms\n", IVTV_SDRAM_SLEEPTIME);
+ ivtv_msleep_timeout(IVTV_SDRAM_SLEEPTIME, 0);
}
void ivtv_firmware_versions(struct ivtv *itv)
@@ -206,12 +203,12 @@ int ivtv_firmware_init(struct ivtv *itv)
/* start firmware */
write_reg(read_reg(IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE, IVTV_REG_SPU);
- ivtv_sleep_timeout(HZ / 10, 0);
+ ivtv_msleep_timeout(100, 0);
if (itv->has_cx23415)
write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE15, IVTV_REG_VPU);
else
write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE16, IVTV_REG_VPU);
- ivtv_sleep_timeout(HZ / 10, 0);
+ ivtv_msleep_timeout(100, 0);
/* find mailboxes and ping firmware */
itv->enc_mbox.mbox = ivtv_search_mailbox(itv->enc_mem, IVTV_ENCODER_SIZE);
@@ -266,7 +263,7 @@ void ivtv_init_mpeg_decoder(struct ivtv *itv)
IVTV_DECODE_INIT_MPEG_FILENAME);
} else {
ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, readbytes, 0);
- ivtv_sleep_timeout(HZ / 10, 0);
+ ivtv_msleep_timeout(100, 0);
}
ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 4, 0, 0, 0, 1);
}
diff --git a/drivers/media/video/ivtv/ivtv-gpio.c b/drivers/media/video/ivtv/ivtv-gpio.c
index bc8f8ca2961f..6a5a7aa66976 100644
--- a/drivers/media/video/ivtv/ivtv-gpio.c
+++ b/drivers/media/video/ivtv/ivtv-gpio.c
@@ -115,8 +115,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
curout = (curout & ~0xF) | 1;
write_reg(curout, IVTV_REG_GPIO_OUT);
/* We could use something else for smaller time */
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_timeout_interruptible(msecs_to_jiffies(1));
curout |= 2;
write_reg(curout, IVTV_REG_GPIO_OUT);
curdir &= ~0x80;
@@ -131,20 +130,18 @@ int ivtv_reset_tuner_gpio(enum v4l2_tuner_type mode, void *priv, int ptr)
if (itv->card->type != IVTV_CARD_PG600V2 || itv->options.tuner != TUNER_XCEIVE_XC3028)
return -EINVAL;
- IVTV_INFO("Resetting tuner.\n");
+ IVTV_INFO("Resetting tuner\n");
curout = read_reg(IVTV_REG_GPIO_OUT);
curdir = read_reg(IVTV_REG_GPIO_DIR);
curdir |= (1 << 12); /* GPIO bit 12 */
curout &= ~(1 << 12);
write_reg(curout, IVTV_REG_GPIO_OUT);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_timeout_interruptible(msecs_to_jiffies(1));
curout |= (1 << 12);
write_reg(curout, IVTV_REG_GPIO_OUT);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_timeout_interruptible(msecs_to_jiffies(1));
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 50624c6a62a5..b3557435456d 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -144,7 +144,7 @@ static int attach_inform(struct i2c_client *client)
}
}
if (i == I2C_CLIENTS_MAX) {
- IVTV_ERR("insufficient room for new I2C client!\n");
+ IVTV_ERR("Insufficient room for new I2C client\n");
}
return 0;
}
@@ -236,7 +236,7 @@ static int ivtv_ack(struct ivtv *itv)
int ret = 0;
if (ivtv_getscl(itv) == 1) {
- IVTV_DEBUG_I2C("SCL was high starting an ack\n");
+ IVTV_DEBUG_HI_I2C("SCL was high starting an ack\n");
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("Could not set SCL low starting an ack\n");
@@ -263,7 +263,7 @@ static int ivtv_sendbyte(struct ivtv *itv, unsigned char byte)
{
int i, bit;
- IVTV_DEBUG_I2C("write %x\n",byte);
+ IVTV_DEBUG_HI_I2C("write %x\n",byte);
for (i = 0; i < 8; ++i, byte<<=1) {
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
@@ -318,7 +318,7 @@ static int ivtv_readbyte(struct ivtv *itv, unsigned char *byte, int nack)
ivtv_scldelay(itv);
ivtv_setscl(itv, 0);
ivtv_scldelay(itv);
- IVTV_DEBUG_I2C("read %x\n",*byte);
+ IVTV_DEBUG_HI_I2C("read %x\n",*byte);
return 0;
}
@@ -330,7 +330,7 @@ static int ivtv_start(struct ivtv *itv)
sda = ivtv_getsda(itv);
if (sda != 1) {
- IVTV_DEBUG_I2C("SDA was low at start\n");
+ IVTV_DEBUG_HI_I2C("SDA was low at start\n");
ivtv_setsda(itv, 1);
if (!ivtv_waitsda(itv, 1)) {
IVTV_DEBUG_I2C("SDA stuck low\n");
@@ -355,7 +355,7 @@ static int ivtv_stop(struct ivtv *itv)
int i;
if (ivtv_getscl(itv) != 0) {
- IVTV_DEBUG_I2C("SCL not low when stopping\n");
+ IVTV_DEBUG_HI_I2C("SCL not low when stopping\n");
ivtv_setscl(itv, 0);
if (!ivtv_waitscl(itv, 0)) {
IVTV_DEBUG_I2C("SCL could not be set low\n");
@@ -569,7 +569,7 @@ int ivtv_call_i2c_client(struct ivtv *itv, int addr, unsigned int cmd, void *arg
}
}
if (cmd != VIDIOC_G_CHIP_IDENT)
- IVTV_ERR("i2c addr 0x%02x not found for command 0x%x!\n", addr, cmd);
+ IVTV_ERR("i2c addr 0x%02x not found for command 0x%x\n", addr, cmd);
return -ENODEV;
}
@@ -640,7 +640,7 @@ int ivtv_i2c_hw(struct ivtv *itv, u32 hw, unsigned int cmd, void *arg)
addr = ivtv_i2c_hw_addr(itv, hw);
if (addr < 0) {
- IVTV_ERR("i2c hardware 0x%08x (%s) not found for command 0x%x!\n",
+ IVTV_ERR("i2c hardware 0x%08x (%s) not found for command 0x%x\n",
hw, ivtv_i2c_hw_name(hw), cmd);
return addr;
}
@@ -655,7 +655,7 @@ int ivtv_i2c_id(struct ivtv *itv, u32 id, unsigned int cmd, void *arg)
addr = ivtv_i2c_id_addr(itv, id);
if (addr < 0) {
if (cmd != VIDIOC_G_CHIP_IDENT)
- IVTV_ERR("i2c ID 0x%08x (%s) not found for command 0x%x!\n",
+ IVTV_ERR("i2c ID 0x%08x (%s) not found for command 0x%x\n",
id, ivtv_i2c_id_name(id), cmd);
return addr;
}
@@ -696,7 +696,7 @@ int ivtv_upd64083(struct ivtv *itv, unsigned int cmd, void *arg)
void ivtv_call_i2c_clients(struct ivtv *itv, unsigned int cmd, void *arg)
{
if (itv->i2c_adap.algo == NULL) {
- IVTV_ERR("adapter is not set");
+ IVTV_ERR("Adapter is not set");
return;
}
i2c_clients_command(&itv->i2c_adap, cmd, arg);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 57af1762de1f..4773453e8dab 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1159,7 +1159,7 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
memset(fb, 0, sizeof(*fb));
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
- break;
+ return -EINVAL;
fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY |
V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_GLOBAL_ALPHA;
fb->fmt.pixelformat = itv->osd_pixelformat;
@@ -1179,7 +1179,7 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
struct v4l2_framebuffer *fb = arg;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
- break;
+ return -EINVAL;
itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0;
itv->osd_local_alpha_state = (fb->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) != 0;
itv->osd_color_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0;
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index ba98bf054f2e..fcd6e7f5f121 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -48,7 +48,7 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
struct list_head *p;
int i = 0;
- IVTV_DEBUG_DMA("ivtv_pio_work_handler\n");
+ IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
s->v4l2dev == NULL || !ivtv_use_pio(s)) {
itv->cur_pio_stream = -1;
@@ -56,7 +56,7 @@ static void ivtv_pio_work_handler(struct ivtv *itv)
write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
return;
}
- IVTV_DEBUG_DMA("Process PIO %s\n", s->name);
+ IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
list_for_each(p, &s->q_dma.list) {
struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
@@ -187,7 +187,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
bytes_needed += UVsize;
}
- IVTV_DEBUG_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
+ IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
@@ -242,7 +242,7 @@ static void dma_post(struct ivtv_stream *s)
u32 *u32buf;
int x = 0;
- IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
+ IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
s->name, s->dma_offset);
list_for_each(p, &s->q_dma.list) {
buf = list_entry(p, struct ivtv_buffer, list);
@@ -321,7 +321,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
unsigned long flags = 0;
int idx = 0;
- IVTV_DEBUG_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
+ IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
list_for_each(p, &s->q_predma.list) {
struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
@@ -368,7 +368,7 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
int i;
- IVTV_DEBUG_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
+ IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
if (s->q_predma.bytesused)
ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
@@ -397,12 +397,17 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
itv->vbi.dma_offset = s_vbi->dma_offset;
s_vbi->SG_length = 0;
set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
- IVTV_DEBUG_DMA("include DMA for %s\n", s->name);
+ IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
}
/* Mark last buffer size for Interrupt flag */
s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
+ if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
+ set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
+ else
+ clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
+
if (ivtv_use_pio(s)) {
for (i = 0; i < s->SG_length; i++) {
s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
@@ -420,7 +425,7 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
set_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = s->type;
- itv->dma_timer.expires = jiffies + HZ / 10;
+ itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
add_timer(&itv->dma_timer);
}
}
@@ -431,13 +436,13 @@ static void ivtv_dma_dec_start(struct ivtv_stream *s)
if (s->q_predma.bytesused)
ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
- IVTV_DEBUG_DMA("start DMA for %s\n", s->name);
+ IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
/* put SG Handle into register 0x0c */
write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
set_bit(IVTV_F_I_DMA, &itv->i_flags);
itv->cur_dma_stream = s->type;
- itv->dma_timer.expires = jiffies + HZ / 10;
+ itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
add_timer(&itv->dma_timer);
}
@@ -447,7 +452,7 @@ static void ivtv_irq_dma_read(struct ivtv *itv)
struct ivtv_buffer *buf;
int hw_stream_type;
- IVTV_DEBUG_IRQ("DEC DMA READ\n");
+ IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
del_timer(&itv->dma_timer);
if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
@@ -462,7 +467,7 @@ static void ivtv_irq_dma_read(struct ivtv *itv)
s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
hw_stream_type = 0;
}
- IVTV_DEBUG_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
+ IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
ivtv_stream_sync_for_cpu(s);
@@ -495,7 +500,7 @@ static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
del_timer(&itv->dma_timer);
ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
- IVTV_DEBUG_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
+ IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
data[1] = 3;
else if (data[1] > 2)
@@ -532,7 +537,7 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
return;
}
s = &itv->streams[itv->cur_pio_stream];
- IVTV_DEBUG_IRQ("ENC PIO COMPLETE %s\n", s->name);
+ IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
s->SG_length = 0;
clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
clear_bit(IVTV_F_I_PIO, &itv->i_flags);
@@ -590,14 +595,13 @@ static void ivtv_irq_enc_start_cap(struct ivtv *itv)
/* Get DMA destination and size arguments from card */
ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
- IVTV_DEBUG_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
+ IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
data[0], data[1], data[2]);
return;
}
- clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
s = &itv->streams[ivtv_stream_map[data[0]]];
if (!stream_enc_dma_append(s, data)) {
set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
@@ -610,7 +614,7 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s;
- IVTV_DEBUG_IRQ("ENC START VBI CAP\n");
+ IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
/* If more than two VBI buffers are pending, then
@@ -634,7 +638,6 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
then start a DMA request for just the VBI data. */
if (!stream_enc_dma_append(s, data) &&
!test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
- set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
}
}
@@ -644,7 +647,7 @@ static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
u32 data[CX2341X_MBOX_MAX_DATA];
struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
- IVTV_DEBUG_IRQ("DEC VBI REINSERT\n");
+ IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
!stream_enc_dma_append(s, data)) {
set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
@@ -669,7 +672,7 @@ static void ivtv_irq_dec_data_req(struct ivtv *itv)
itv->dma_data_req_offset = data[1];
s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
}
- IVTV_DEBUG_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
+ IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
itv->dma_data_req_offset, itv->dma_data_req_size);
if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
@@ -791,10 +794,10 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
/* Exclude interrupts noted below from the output, otherwise the log is flooded with
these messages */
if (combo & ~0xff6d0400)
- IVTV_DEBUG_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
+ IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
- IVTV_DEBUG_IRQ("DEC DMA COMPLETE\n");
+ IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
}
if (combo & IVTV_IRQ_DMA_READ) {
diff --git a/drivers/media/video/ivtv/ivtv-mailbox.c b/drivers/media/video/ivtv/ivtv-mailbox.c
index 6ae42a3b03cc..814a673712b3 100644
--- a/drivers/media/video/ivtv/ivtv-mailbox.c
+++ b/drivers/media/video/ivtv/ivtv-mailbox.c
@@ -37,6 +37,7 @@
#define API_RESULT (1 << 1) /* Allow 1 second for this cmd to end */
#define API_FAST_RESULT (3 << 1) /* Allow 0.1 second for this cmd to end */
#define API_DMA (1 << 3) /* DMA mailbox, has special handling */
+#define API_HIGH_VOL (1 << 5) /* High volume command (i.e. called during encoding or decoding) */
#define API_NO_WAIT_MB (1 << 4) /* Command may not wait for a free mailbox */
#define API_NO_WAIT_RES (1 << 5) /* Command may not wait for the result */
@@ -77,11 +78,11 @@ static const struct ivtv_api_info api_info[256] = {
API_ENTRY(CX2341X_ENC_SET_DMA_BLOCK_SIZE, API_CACHE),
API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_10, API_FAST_RESULT),
API_ENTRY(CX2341X_ENC_GET_PREV_DMA_INFO_MB_9, API_FAST_RESULT),
- API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA),
+ API_ENTRY(CX2341X_ENC_SCHED_DMA_TO_HOST, API_DMA | API_HIGH_VOL),
API_ENTRY(CX2341X_ENC_INITIALIZE_INPUT, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_FRAME_DROP_RATE, API_CACHE),
API_ENTRY(CX2341X_ENC_PAUSE_ENCODER, API_RESULT),
- API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB),
+ API_ENTRY(CX2341X_ENC_REFRESH_INPUT, API_NO_WAIT_MB | API_HIGH_VOL),
API_ENTRY(CX2341X_ENC_SET_COPYRIGHT, API_CACHE),
API_ENTRY(CX2341X_ENC_SET_EVENT_NOTIFICATION, API_RESULT),
API_ENTRY(CX2341X_ENC_SET_NUM_VSYNC_LINES, API_CACHE),
@@ -102,7 +103,7 @@ static const struct ivtv_api_info api_info[256] = {
API_ENTRY(CX2341X_DEC_SET_DMA_BLOCK_SIZE, API_CACHE),
API_ENTRY(CX2341X_DEC_GET_XFER_INFO, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_GET_DMA_STATUS, API_FAST_RESULT),
- API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA),
+ API_ENTRY(CX2341X_DEC_SCHED_DMA_FROM_HOST, API_DMA | API_HIGH_VOL),
API_ENTRY(CX2341X_DEC_PAUSE_PLAYBACK, API_RESULT),
API_ENTRY(CX2341X_DEC_HALT_FW, API_FAST_RESULT),
API_ENTRY(CX2341X_DEC_SET_STANDARD, API_CACHE),
@@ -175,9 +176,9 @@ static int get_mailbox(struct ivtv *itv, struct ivtv_mailbox_data *mbdata, int f
/* Sleep before a retry, if not atomic */
if (!(flags & API_NO_WAIT_MB)) {
- if (jiffies - then > retries * HZ / 100)
+ if (jiffies - then > msecs_to_jiffies(10*retries))
break;
- ivtv_sleep_timeout(HZ / 100, 0);
+ ivtv_msleep_timeout(10, 0);
}
}
return -ENODEV;
@@ -212,7 +213,7 @@ static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
{
struct ivtv_mailbox_data *mbdata = (cmd >= 128) ? &itv->enc_mbox : &itv->dec_mbox;
volatile struct ivtv_mailbox __iomem *mbox;
- int api_timeout = HZ;
+ int api_timeout = msecs_to_jiffies(1000);
int flags, mb, i;
unsigned long then;
@@ -227,7 +228,12 @@ static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
return -EINVAL;
}
- IVTV_DEBUG_API("API Call: %s\n", api_info[cmd].name);
+ if (api_info[cmd].flags & API_HIGH_VOL) {
+ IVTV_DEBUG_HI_API("API Call: %s\n", api_info[cmd].name);
+ }
+ else {
+ IVTV_DEBUG_API("API Call: %s\n", api_info[cmd].name);
+ }
/* clear possibly uninitialized part of data array */
for (i = args; i < CX2341X_MBOX_MAX_DATA; i++)
@@ -237,7 +243,7 @@ static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
data, then just return 0 as there is no need to issue this command again.
Just an optimization to prevent unnecessary use of mailboxes. */
if (itv->api_cache[cmd].last_jiffies &&
- jiffies - itv->api_cache[cmd].last_jiffies < HZ * 1800 &&
+ jiffies - itv->api_cache[cmd].last_jiffies < msecs_to_jiffies(1800000) &&
!memcmp(data, itv->api_cache[cmd].data, sizeof(itv->api_cache[cmd].data))) {
itv->api_cache[cmd].last_jiffies = jiffies;
return 0;
@@ -262,7 +268,7 @@ static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
}
if ((flags & API_FAST_RESULT) == API_FAST_RESULT)
- api_timeout = HZ / 10;
+ api_timeout = msecs_to_jiffies(100);
mb = get_mailbox(itv, mbdata, flags);
if (mb < 0) {
@@ -295,11 +301,12 @@ static int ivtv_api_call(struct ivtv *itv, int cmd, int args, u32 data[])
if (flags & API_NO_WAIT_RES)
mdelay(1);
else
- ivtv_sleep_timeout(HZ / 100, 0);
+ ivtv_msleep_timeout(10, 0);
}
- if (jiffies - then > HZ / 10)
- IVTV_DEBUG_WARN("%s took %lu jiffies (%d per HZ)\n",
- api_info[cmd].name, jiffies - then, HZ);
+ if (jiffies - then > msecs_to_jiffies(100))
+ IVTV_DEBUG_WARN("%s took %u jiffies\n",
+ api_info[cmd].name,
+ jiffies_to_msecs(jiffies - then));
for (i = 0; i < CX2341X_MBOX_MAX_DATA; i++)
data[i] = readl(&mbox->data[i]);
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 6af88ae9295f..322b347b67c2 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -446,6 +446,9 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
if (s->v4l2dev == NULL)
return -EINVAL;
+ /* Big serialization lock to ensure no two streams are started
+ simultaneously: that can give all sorts of weird results. */
+ mutex_lock(&itv->serialize_lock);
IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
switch (s->type) {
@@ -487,6 +490,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
0, sizeof(itv->vbi.sliced_mpeg_size));
break;
default:
+ mutex_unlock(&itv->serialize_lock);
return -EINVAL;
}
s->subtype = subtype;
@@ -561,13 +565,14 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
/* Initialize Digitizer for Capture */
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
- ivtv_sleep_timeout(HZ / 10, 0);
+ ivtv_msleep_timeout(100, 0);
}
/* begin_capture */
if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
{
IVTV_DEBUG_WARN( "Error starting capture!\n");
+ mutex_unlock(&itv->serialize_lock);
return -EINVAL;
}
@@ -583,6 +588,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
/* you're live! sit back and await interrupts :) */
atomic_inc(&itv->capturing);
+ mutex_unlock(&itv->serialize_lock);
return 0;
}
@@ -762,17 +768,6 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
/* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);
- /* only run these if we're shutting down the last cap */
- if (atomic_read(&itv->capturing) - 1 == 0) {
- /* event notification (off) */
- if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
- /* type: 0 = refresh */
- /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
- ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
- ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
- }
- }
-
then = jiffies;
if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
@@ -786,8 +781,9 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
set_current_state(TASK_INTERRUPTIBLE);
/* wait 2s for EOS interrupt */
- while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) && jiffies < then + 2 * HZ) {
- schedule_timeout(HZ / 100);
+ while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
+ jiffies < then + msecs_to_jiffies (2000)) {
+ schedule_timeout(msecs_to_jiffies(10));
}
/* To convert jiffies to ms, we must multiply by 1000
@@ -812,7 +808,6 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
then = jiffies;
/* Make sure DMA is complete */
add_wait_queue(&s->waitq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
do {
/* check if DMA is pending */
if ((s->type == IVTV_ENC_STREAM_TYPE_MPG) && /* MPG Only */
@@ -827,9 +822,8 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
} else if (read_reg(IVTV_REG_DMASTATUS) & 0x02) {
break;
}
-
- ivtv_sleep_timeout(HZ / 100, 1);
- } while (then + HZ * 2 > jiffies);
+ } while (!ivtv_msleep_timeout(10, 1) &&
+ then + msecs_to_jiffies(2000) > jiffies);
set_current_state(TASK_RUNNING);
remove_wait_queue(&s->waitq, &wait);
@@ -840,17 +834,30 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
/* Clear capture and no-read bits */
clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
+ /* ensure these global cleanup actions are done only once */
+ mutex_lock(&itv->serialize_lock);
+
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
if (atomic_read(&itv->capturing) > 0) {
+ mutex_unlock(&itv->serialize_lock);
return 0;
}
/* Set the following Interrupt mask bits for capture */
ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
+ /* event notification (off) */
+ if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
+ /* type: 0 = refresh */
+ /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
+ ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
+ ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
+ }
+
wake_up(&s->waitq);
+ mutex_unlock(&itv->serialize_lock);
return 0;
}
@@ -887,7 +894,7 @@ int ivtv_stop_v4l2_decode_stream(struct ivtv_stream *s, int flags, u64 pts)
break;
tmp = data[3];
}
- if (ivtv_sleep_timeout(HZ/10, 1))
+ if (ivtv_msleep_timeout(100, 1))
break;
}
}
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index 3ba46e07ea1f..a7282a91bd97 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -219,31 +219,23 @@ ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count)
int found_cc = 0;
int cc_pos = itv->vbi.cc_pos;
- if (itv->vbi.service_set_out == 0)
- return -EPERM;
-
while (count >= sizeof(struct v4l2_sliced_vbi_data)) {
switch (p->id) {
case V4L2_SLICED_CAPTION_525:
- if (p->id == V4L2_SLICED_CAPTION_525 &&
- p->line == 21 &&
- (itv->vbi.service_set_out &
- V4L2_SLICED_CAPTION_525) == 0) {
- break;
- }
- found_cc = 1;
- if (p->field) {
- cc[2] = p->data[0];
- cc[3] = p->data[1];
- } else {
- cc[0] = p->data[0];
- cc[1] = p->data[1];
+ if (p->line == 21) {
+ found_cc = 1;
+ if (p->field) {
+ cc[2] = p->data[0];
+ cc[3] = p->data[1];
+ } else {
+ cc[0] = p->data[0];
+ cc[1] = p->data[1];
+ }
}
break;
case V4L2_SLICED_VPS:
- if (p->line == 16 && p->field == 0 &&
- (itv->vbi.service_set_out & V4L2_SLICED_VPS)) {
+ if (p->line == 16 && p->field == 0) {
itv->vbi.vps[0] = p->data[2];
itv->vbi.vps[1] = p->data[8];
itv->vbi.vps[2] = p->data[9];
@@ -255,8 +247,7 @@ ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count)
break;
case V4L2_SLICED_WSS_625:
- if (p->line == 23 && p->field == 0 &&
- (itv->vbi.service_set_out & V4L2_SLICED_WSS_625)) {
+ if (p->line == 23 && p->field == 0) {
/* No lock needed for WSS */
itv->vbi.wss = p->data[0] | (p->data[1] << 8);
itv->vbi.wss_found = 1;
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 664aba8b4d85..7533fc203319 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1809,7 +1809,6 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
{
int ret = -EBUSY;
unsigned long mchip_adr;
- u8 revision;
if (meye.mchip_dev != NULL) {
printk(KERN_ERR "meye: only one device allowed!\n");
@@ -1885,7 +1884,6 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
goto outreqirq;
}
- pci_read_config_byte(meye.mchip_dev, PCI_REVISION_ID, &revision);
pci_write_config_byte(meye.mchip_dev, PCI_CACHE_LINE_SIZE, 8);
pci_write_config_byte(meye.mchip_dev, PCI_LATENCY_TIMER, 64);
@@ -1939,7 +1937,7 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
printk(KERN_INFO "meye: Motion Eye Camera Driver v%s.\n",
MEYE_DRIVER_VERSION);
printk(KERN_INFO "meye: mchip KL5A72002 rev. %d, base %lx, irq %d\n",
- revision, mchip_adr, meye.mchip_irq);
+ meye.mchip_dev->revision, mchip_adr, meye.mchip_irq);
return 0;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index 3bb7d6634862..11cfcf18ec34 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -157,8 +157,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
break;
v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
dev, addr);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(msecs_to_jiffies(10));
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
}
if (err == 3) {
v4l_warn(client, "giving up, resetting chip. Sound will go off, sorry folks :-|\n");
@@ -197,8 +196,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
break;
v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
dev, addr);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(msecs_to_jiffies(10));
+ schedule_timeout_interruptible(msecs_to_jiffies(10));
}
if (err == 3) {
v4l_warn(client, "giving up, resetting chip. Sound will go off, sorry folks :-|\n");
@@ -814,10 +812,9 @@ static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
int msp_product, msp_prod_hi, msp_prod_lo;
int msp_rom;
- client = kmalloc(sizeof(*client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (client == NULL)
return -ENOMEM;
- memset(client, 0, sizeof(*client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver;
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index e1821eb82fb5..d5ee2629121e 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/freezer.h>
#include <linux/videodev.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -468,6 +469,7 @@ int msp3400c_thread(void *data)
v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+ set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
msp_sleep(state, -1);
@@ -646,7 +648,7 @@ int msp3410d_thread(void *data)
int val, i, std, count;
v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
-
+ set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
msp_sleep(state,-1);
@@ -940,7 +942,7 @@ int msp34xxg_thread(void *data)
int val, i;
v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
-
+ set_freezable();
for (;;) {
v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
msp_sleep(state, -1);
diff --git a/drivers/media/video/mt20xx.c b/drivers/media/video/mt20xx.c
index c7c9f3f8715c..7549114aaaca 100644
--- a/drivers/media/video/mt20xx.c
+++ b/drivers/media/video/mt20xx.c
@@ -7,7 +7,7 @@
#include <linux/i2c.h>
#include <linux/videodev.h>
#include <linux/moduleparam.h>
-#include <media/tuner.h>
+#include "tuner-driver.h"
/* ---------------------------------------------------------------------- */
@@ -37,6 +37,19 @@ static char *microtune_part[] = {
[ MT2050 ] = "MT2050",
};
+struct microtune_priv {
+ unsigned int xogc;
+ unsigned int radio_if2;
+};
+
+static void microtune_release(struct i2c_client *c)
+{
+ struct tuner *t = i2c_get_clientdata(c);
+
+ kfree(t->priv);
+ t->priv = NULL;
+}
+
// IsSpurInBand()?
static int mt2032_spurcheck(struct i2c_client *c,
int f1, int f2, int spectrum_from,int spectrum_to)
@@ -218,6 +231,7 @@ static void mt2032_set_if_freq(struct i2c_client *c, unsigned int rfin,
unsigned char buf[21];
int lint_try,ret,sel,lock=0;
struct tuner *t = i2c_get_clientdata(c);
+ struct microtune_priv *priv = t->priv;
tuner_dbg("mt2032_set_if_freq rfin=%d if1=%d if2=%d from=%d to=%d\n",
rfin,if1,if2,from,to);
@@ -227,7 +241,7 @@ static void mt2032_set_if_freq(struct i2c_client *c, unsigned int rfin,
i2c_master_recv(c,buf,21);
buf[0]=0;
- ret=mt2032_compute_freq(c,rfin,if1,if2,from,to,&buf[1],&sel,t->xogc);
+ ret=mt2032_compute_freq(c,rfin,if1,if2,from,to,&buf[1],&sel,priv->xogc);
if (ret<0)
return;
@@ -251,10 +265,10 @@ static void mt2032_set_if_freq(struct i2c_client *c, unsigned int rfin,
tuner_dbg("mt2032: re-init PLLs by LINT\n");
buf[0]=7;
- buf[1]=0x80 +8+t->xogc; // set LINT to re-init PLLs
+ buf[1]=0x80 +8+priv->xogc; // set LINT to re-init PLLs
i2c_master_send(c,buf,2);
mdelay(10);
- buf[1]=8+t->xogc;
+ buf[1]=8+priv->xogc;
i2c_master_send(c,buf,2);
}
@@ -294,17 +308,25 @@ static void mt2032_set_tv_freq(struct i2c_client *c, unsigned int freq)
static void mt2032_set_radio_freq(struct i2c_client *c, unsigned int freq)
{
struct tuner *t = i2c_get_clientdata(c);
- int if2 = t->radio_if2;
+ struct microtune_priv *priv = t->priv;
+ int if2 = priv->radio_if2;
// per Manual for FM tuning: first if center freq. 1085 MHz
mt2032_set_if_freq(c, freq * 1000 / 16,
1085*1000*1000,if2,if2,if2);
}
+static struct tuner_operations mt2032_tuner_ops = {
+ .set_tv_freq = mt2032_set_tv_freq,
+ .set_radio_freq = mt2032_set_radio_freq,
+ .release = microtune_release,
+};
+
// Initalization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001
static int mt2032_init(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct microtune_priv *priv = t->priv;
unsigned char buf[21];
int ret,xogc,xok=0;
@@ -351,23 +373,23 @@ static int mt2032_init(struct i2c_client *c)
if (ret!=2)
tuner_warn("i2c i/o error: rc == %d (should be 2)\n",ret);
} while (xok != 1 );
- t->xogc=xogc;
+ priv->xogc=xogc;
+
+ memcpy(&t->ops, &mt2032_tuner_ops, sizeof(struct tuner_operations));
- t->set_tv_freq = mt2032_set_tv_freq;
- t->set_radio_freq = mt2032_set_radio_freq;
return(1);
}
static void mt2050_set_antenna(struct i2c_client *c, unsigned char antenna)
{
struct tuner *t = i2c_get_clientdata(c);
- unsigned char buf[2];
- int ret;
+ unsigned char buf[2];
+ int ret;
- buf[0] = 6;
- buf[1] = antenna ? 0x11 : 0x10;
- ret=i2c_master_send(c,buf,2);
- tuner_dbg("mt2050: enabled antenna connector %d\n", antenna);
+ buf[0] = 6;
+ buf[1] = antenna ? 0x11 : 0x10;
+ ret=i2c_master_send(c,buf,2);
+ tuner_dbg("mt2050: enabled antenna connector %d\n", antenna);
}
static void mt2050_set_if_freq(struct i2c_client *c,unsigned int freq, unsigned int if2)
@@ -456,12 +478,19 @@ static void mt2050_set_tv_freq(struct i2c_client *c, unsigned int freq)
static void mt2050_set_radio_freq(struct i2c_client *c, unsigned int freq)
{
struct tuner *t = i2c_get_clientdata(c);
- int if2 = t->radio_if2;
+ struct microtune_priv *priv = t->priv;
+ int if2 = priv->radio_if2;
mt2050_set_if_freq(c, freq * 1000 / 16, if2);
mt2050_set_antenna(c, radio_antenna);
}
+static struct tuner_operations mt2050_tuner_ops = {
+ .set_tv_freq = mt2050_set_tv_freq,
+ .set_radio_freq = mt2050_set_radio_freq,
+ .release = microtune_release,
+};
+
static int mt2050_init(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
@@ -481,28 +510,35 @@ static int mt2050_init(struct i2c_client *c)
i2c_master_recv(c,buf,1);
tuner_dbg("mt2050: sro is %x\n",buf[0]);
- t->set_tv_freq = mt2050_set_tv_freq;
- t->set_radio_freq = mt2050_set_radio_freq;
+
+ memcpy(&t->ops, &mt2050_tuner_ops, sizeof(struct tuner_operations));
+
return 0;
}
int microtune_init(struct i2c_client *c)
{
+ struct microtune_priv *priv = NULL;
struct tuner *t = i2c_get_clientdata(c);
char *name;
unsigned char buf[21];
int company_code;
+ priv = kzalloc(sizeof(struct microtune_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ t->priv = priv;
+
+ priv->radio_if2 = 10700 * 1000; /* 10.7MHz - FM radio */
+
memset(buf,0,sizeof(buf));
- t->set_tv_freq = NULL;
- t->set_radio_freq = NULL;
- t->standby = NULL;
+
if (t->std & V4L2_STD_525_60) {
tuner_dbg("pinnacle ntsc\n");
- t->radio_if2 = 41300 * 1000;
+ priv->radio_if2 = 41300 * 1000;
} else {
tuner_dbg("pinnacle pal\n");
- t->radio_if2 = 33300 * 1000;
+ priv->radio_if2 = 33300 * 1000;
}
name = "unknown";
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 3ceb8a6249dd..f8f21ddd9843 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -617,7 +617,7 @@ static struct ov7670_win_size {
},
};
-#define N_WIN_SIZES (sizeof(ov7670_win_sizes)/sizeof(ov7670_win_sizes[0]))
+#define N_WIN_SIZES (ARRAY_SIZE(ov7670_win_sizes))
/*
@@ -1183,7 +1183,7 @@ static struct ov7670_control {
.query = ov7670_q_hflip,
},
};
-#define N_CONTROLS (sizeof(ov7670_controls)/sizeof(ov7670_controls[0]))
+#define N_CONTROLS (ARRAY_SIZE(ov7670_controls))
static struct ov7670_control *ov7670_find_control(__u32 id)
{
diff --git a/drivers/media/video/planb.c b/drivers/media/video/planb.c
index 1455a8f4e930..4ab1af74a970 100644
--- a/drivers/media/video/planb.c
+++ b/drivers/media/video/planb.c
@@ -353,9 +353,8 @@ static int planb_prepare_open(struct planb *pb)
* PLANB_DUMMY)*sizeof(struct dbdma_cmd)
+(PLANB_MAXLINES*((PLANB_MAXPIXELS+7)& ~7))/8
+MAX_GBUFFERS*sizeof(unsigned int);
- if ((pb->priv_space = kmalloc (size, GFP_KERNEL)) == 0)
+ if ((pb->priv_space = kzalloc (size, GFP_KERNEL)) == 0)
return -ENOMEM;
- memset ((void *) pb->priv_space, 0, size);
pb->overlay_last1 = pb->ch1_cmd = (volatile struct dbdma_cmd *)
DBDMA_ALIGN (pb->priv_space);
pb->overlay_last2 = pb->ch2_cmd = pb->ch1_cmd + pb->tab_size;
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 085332a503de..9c0e8d18c2f6 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1099,7 +1099,7 @@ static int pwc_video_open(struct inode *inode, struct file *file)
return -EBUSY;
}
- down(&pdev->modlock);
+ mutex_lock(&pdev->modlock);
if (!pdev->usb_init) {
PWC_DEBUG_OPEN("Doing first time initialization.\n");
pdev->usb_init = 1;
@@ -1131,7 +1131,7 @@ static int pwc_video_open(struct inode *inode, struct file *file)
if (i < 0) {
PWC_DEBUG_OPEN("Failed to allocate buffers memory.\n");
pwc_free_buffers(pdev);
- up(&pdev->modlock);
+ mutex_unlock(&pdev->modlock);
return i;
}
@@ -1172,7 +1172,7 @@ static int pwc_video_open(struct inode *inode, struct file *file)
if (i) {
PWC_DEBUG_OPEN("Second attempt at set_video_mode failed.\n");
pwc_free_buffers(pdev);
- up(&pdev->modlock);
+ mutex_unlock(&pdev->modlock);
return i;
}
@@ -1181,7 +1181,7 @@ static int pwc_video_open(struct inode *inode, struct file *file)
PWC_DEBUG_OPEN("Failed to init ISOC stuff = %d.\n", i);
pwc_isoc_cleanup(pdev);
pwc_free_buffers(pdev);
- up(&pdev->modlock);
+ mutex_unlock(&pdev->modlock);
return i;
}
@@ -1191,7 +1191,7 @@ static int pwc_video_open(struct inode *inode, struct file *file)
pdev->vopen++;
file->private_data = vdev;
- up(&pdev->modlock);
+ mutex_unlock(&pdev->modlock);
PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
return 0;
}
@@ -1685,7 +1685,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->angle_range.tilt_max = 2500;
}
- init_MUTEX(&pdev->modlock);
+ mutex_init(&pdev->modlock);
spin_lock_init(&pdev->ptrlock);
pdev->udev = udev;
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index acbb9312960a..910a04f53920 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -31,7 +31,7 @@
#include <linux/wait.h>
#include <linux/smp_lock.h>
#include <linux/version.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <asm/errno.h>
#include <linux/videodev.h>
#include <media/v4l2-common.h>
@@ -244,7 +244,7 @@ struct pwc_device
int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */
int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
- struct semaphore modlock; /* to prevent races in video_open(), etc */
+ struct mutex modlock; /* to prevent races in video_open(), etc */
spinlock_t ptrlock; /* for manipulating the buffer pointers */
/*** motorized pan/tilt feature */
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
index f2a2f34cd626..17f1e2e9a66b 100644
--- a/drivers/media/video/saa5249.c
+++ b/drivers/media/video/saa5249.c
@@ -86,9 +86,9 @@ static const int disp_modes[8][3] =
-#define PAGE_WAIT (300*HZ/1000) /* Time between requesting page and */
+#define PAGE_WAIT msecs_to_jiffies(300) /* Time between requesting page and */
/* checking status bits */
-#define PGBUF_EXPIRE (15*HZ) /* Time to wait before retransmitting */
+#define PGBUF_EXPIRE msecs_to_jiffies(15000) /* Time to wait before retransmitting */
/* page regardless of infobits */
typedef struct {
u8 pgbuf[VTX_VIRTUALSIZE]; /* Page-buffer */
@@ -115,8 +115,8 @@ struct saa5249_device
#define CCTWR 34 /* I²C write/read-address of vtx-chip */
#define CCTRD 35
#define NOACK_REPEAT 10 /* Retry access this many times on failure */
-#define CLEAR_DELAY (HZ/20) /* Time required to clear a page */
-#define READY_TIMEOUT (30*HZ/1000) /* Time to wait for ready signal of I²C-bus interface */
+#define CLEAR_DELAY msecs_to_jiffies(50) /* Time required to clear a page */
+#define READY_TIMEOUT msecs_to_jiffies(30) /* Time to wait for ready signal of I2C-bus interface */
#define INIT_DELAY 500 /* Time in usec to wait at initialization of CEA interface */
#define START_DELAY 10 /* Time in usec to wait before starting write-cycle (CEA) */
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index 676b9970eb2e..061134a7ba9f 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -208,7 +208,7 @@ determine_norm (struct i2c_client *client)
saa7110_write_block(client, initseq, sizeof(initseq));
saa7110_selmux(client, decoder->input);
prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/4);
+ schedule_timeout(msecs_to_jiffies(250));
finish_wait(&decoder->wq, &wait);
status = saa7110_read(client);
if (status & 0x40) {
@@ -249,7 +249,7 @@ determine_norm (struct i2c_client *client)
//saa7110_write(client,0x2E,0x9A);
prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/4);
+ schedule_timeout(msecs_to_jiffies(250));
finish_wait(&decoder->wq, &wait);
status = saa7110_read(client);
diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c
index c1a392e47170..7ae2d646d000 100644
--- a/drivers/media/video/saa7111.c
+++ b/drivers/media/video/saa7111.c
@@ -37,23 +37,23 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_decoder.h>
MODULE_DESCRIPTION("Philips SAA7111 video decoder driver");
MODULE_AUTHOR("Dave Perks");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(s) (s)->name
-#include <linux/video_decoder.h>
static int debug = 0;
module_param(debug, int, 0644);
diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c
index 87c3144ec7fc..677df51de1a9 100644
--- a/drivers/media/video/saa7114.c
+++ b/drivers/media/video/saa7114.c
@@ -35,28 +35,26 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/major.h>
-
#include <linux/slab.h>
-
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_decoder.h>
MODULE_DESCRIPTION("Philips SAA7114H video decoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(x) (x)->name
-#include <linux/video_decoder.h>
static int debug = 0;
module_param(debug, int, 0);
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 309dca368f4a..9f1417a4f7d2 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -40,7 +40,7 @@ config VIDEO_SAA7134_DVB
depends on VIDEO_SAA7134 && DVB_CORE
select VIDEO_BUF_DVB
select FW_LOADER
- select DVB_PLL
+ select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_MT352 if !DVB_FE_CUSTOMISE
select DVB_TDA1004X if !DVB_FE_CUSTOMISE
select DVB_NXT200X if !DVB_FE_CUSTOMISE
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index ffb0f647a86d..3c0fc9027ad0 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -75,7 +75,8 @@ typedef struct snd_card_saa7134 {
struct saa7134_dev *dev;
unsigned long iobase;
- int irq;
+ s16 irq;
+ u16 mute_was_on;
spinlock_t lock;
} snd_card_saa7134_t;
@@ -589,8 +590,10 @@ static int snd_card_saa7134_capture_close(struct snd_pcm_substream * substream)
snd_card_saa7134_t *saa7134 = snd_pcm_substream_chip(substream);
struct saa7134_dev *dev = saa7134->dev;
- dev->ctl_mute = 1;
- saa7134_tvaudio_setmute(dev);
+ if (saa7134->mute_was_on) {
+ dev->ctl_mute = 1;
+ saa7134_tvaudio_setmute(dev);
+ }
return 0;
}
@@ -637,8 +640,11 @@ static int snd_card_saa7134_capture_open(struct snd_pcm_substream * substream)
runtime->private_free = snd_card_saa7134_runtime_free;
runtime->hw = snd_card_saa7134_capture;
- dev->ctl_mute = 0;
- saa7134_tvaudio_setmute(dev);
+ if (dev->ctl_mute != 0) {
+ saa7134->mute_was_on = 1;
+ dev->ctl_mute = 0;
+ saa7134_tvaudio_setmute(dev);
+ }
if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
return err;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 50f15adfa7c8..8ec83bd70094 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -400,7 +400,7 @@ struct saa7134_board saa7134_boards[] = {
.inputs = {{
.name = name_tv,
.vmux = 1,
- .amux = LINE2,
+ .amux = TV,
.tv = 1,
.gpio = 0x20000,
},{
@@ -3502,6 +3502,38 @@ struct saa7134_board saa7134_boards[] = {
.amux = TV,
},
},
+ [SAA7134_BOARD_10MOONSTVMASTER3] = {
+ /* Tony Wan <aloha_cn@hotmail.com> */
+ .name = "10MOONS TM300 TV Card",
+ .audio_clock = 0x00200000,
+ .tuner_type = TUNER_LG_PAL_NEW_TAPC,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x7000,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 1,
+ .amux = LINE2,
+ .gpio = 0x0000,
+ .tv = 1,
+ },{
+ .name = name_comp1,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x2000,
+ },{
+ .name = name_svideo,
+ .vmux = 8,
+ .amux = LINE1,
+ .gpio = 0x2000,
+ }},
+ .mute = {
+ .name = name_mute,
+ .amux = LINE2,
+ .gpio = 0x3000,
+ },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -4219,6 +4251,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x2003, /* OEM cardbus */
.driver_data = SAA7134_BOARD_SABRENT_TV_PCB05,
},{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
+ .subvendor = PCI_VENDOR_ID_PHILIPS,
+ .subdevice = 0x2304,
+ .driver_data = SAA7134_BOARD_10MOONSTVMASTER3,
+ },{
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -4330,6 +4368,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_A16AR:
case SAA7134_BOARD_ENCORE_ENLTV:
case SAA7134_BOARD_ENCORE_ENLTV_FM:
+ case SAA7134_BOARD_10MOONSTVMASTER3:
dev->has_remote = SAA7134_REMOTE_GPIO;
break;
case SAA7134_BOARD_FLYDVBS_LR300:
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index e0eec80088c7..1f6bd3300715 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -175,18 +175,6 @@ static int mt352_pinnacle_tuner_set_params(struct dvb_frontend* fe,
return mt352_pinnacle_init(fe);
}
-static int mt352_aver777_tuner_calc_regs(struct dvb_frontend *fe, struct dvb_frontend_parameters *params, u8* pllbuf, int buf_len)
-{
- if (buf_len < 5)
- return -EINVAL;
-
- pllbuf[0] = 0x61;
- dvb_pll_configure(&dvb_pll_philips_td1316, pllbuf+1,
- params->frequency,
- params->u.ofdm.bandwidth);
- return 5;
-}
-
static struct mt352_config pinnacle_300i = {
.demod_address = 0x3c >> 1,
.adc_clock = 20333,
@@ -444,135 +432,6 @@ static struct tda1004x_config philips_europa_config = {
/* ------------------------------------------------------------------ */
-static int philips_fmd1216_tuner_init(struct dvb_frontend *fe)
-{
- struct saa7134_dev *dev = fe->dvb->priv;
- struct tda1004x_state *state = fe->demodulator_priv;
- u8 addr = state->config->tuner_address;
- /* this message is to set up ATC and ALC */
- static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 };
- struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) };
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1)
- return -EIO;
- msleep(1);
-
- return 0;
-}
-
-static int philips_fmd1216_tuner_sleep(struct dvb_frontend *fe)
-{
- struct saa7134_dev *dev = fe->dvb->priv;
- struct tda1004x_state *state = fe->demodulator_priv;
- u8 addr = state->config->tuner_address;
- /* this message actually turns the tuner back to analog mode */
- u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0x60 };
- struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = fmd1216_init,.len = sizeof(fmd1216_init) };
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
- msleep(1);
- fmd1216_init[2] = 0x86;
- fmd1216_init[3] = 0x54;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- i2c_transfer(&dev->i2c_adap, &tuner_msg, 1);
- msleep(1);
- return 0;
-}
-
-static int philips_fmd1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
-{
- struct saa7134_dev *dev = fe->dvb->priv;
- struct tda1004x_state *state = fe->demodulator_priv;
- u8 addr = state->config->tuner_address;
- u8 tuner_buf[4];
- struct i2c_msg tuner_msg = {.addr = addr,.flags = 0,.buf = tuner_buf,.len =
- sizeof(tuner_buf) };
- int tuner_frequency = 0;
- int divider = 0;
- u8 band, mode, cp;
-
- /* determine charge pump */
- tuner_frequency = params->frequency + 36130000;
- if (tuner_frequency < 87000000)
- return -EINVAL;
- /* low band */
- else if (tuner_frequency < 180000000) {
- band = 1;
- mode = 7;
- cp = 0;
- } else if (tuner_frequency < 195000000) {
- band = 1;
- mode = 6;
- cp = 1;
- /* mid band */
- } else if (tuner_frequency < 366000000) {
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
- band = 10;
- } else {
- band = 2;
- }
- mode = 7;
- cp = 0;
- } else if (tuner_frequency < 478000000) {
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
- band = 10;
- } else {
- band = 2;
- }
- mode = 6;
- cp = 1;
- /* high band */
- } else if (tuner_frequency < 662000000) {
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
- band = 12;
- } else {
- band = 4;
- }
- mode = 7;
- cp = 0;
- } else if (tuner_frequency < 840000000) {
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
- band = 12;
- } else {
- band = 4;
- }
- mode = 6;
- cp = 1;
- } else {
- if (params->u.ofdm.bandwidth == BANDWIDTH_8_MHZ) {
- band = 12;
- } else {
- band = 4;
- }
- mode = 7;
- cp = 1;
-
- }
- /* calculate divisor */
- /* ((36166000 + Finput) / 166666) rounded! */
- divider = (tuner_frequency + 83333) / 166667;
-
- /* setup tuner buffer */
- tuner_buf[0] = (divider >> 8) & 0x7f;
- tuner_buf[1] = divider & 0xff;
- tuner_buf[2] = 0x80 | (cp << 6) | (mode << 3) | 4;
- tuner_buf[3] = 0x40 | band;
-
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if (i2c_transfer(&dev->i2c_adap, &tuner_msg, 1) != 1) {
- wprintk("could not write to tuner at addr: 0x%02x\n",
- addr << 1);
- return -EIO;
- }
- return 0;
-}
-
static struct tda1004x_config medion_cardbus = {
.demod_address = 0x08,
.invert = 1,
@@ -958,18 +817,8 @@ static struct nxt200x_config avertvhda180 = {
.demod_address = 0x0a,
};
-static int nxt200x_set_pll_input(u8 *buf, int input)
-{
- if (input)
- buf[3] |= 0x08;
- else
- buf[3] &= ~0x08;
- return 0;
-}
-
static struct nxt200x_config kworldatsc110 = {
.demod_address = 0x0a,
- .set_pll_input = nxt200x_set_pll_input,
};
/* ==================================================================
@@ -1005,7 +854,8 @@ static int dvb_init(struct saa7134_dev *dev)
dev->dvb.frontend = dvb_attach(mt352_attach, &avermedia_777,
&dev->i2c_adap);
if (dev->dvb.frontend) {
- dev->dvb.frontend->ops.tuner_ops.calc_regs = mt352_aver777_tuner_calc_regs;
+ dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
+ NULL, DVB_PLL_PHILIPS_TD1316);
}
break;
case SAA7134_BOARD_MD7134:
@@ -1013,9 +863,8 @@ static int dvb_init(struct saa7134_dev *dev)
&medion_cardbus,
&dev->i2c_adap);
if (dev->dvb.frontend) {
- dev->dvb.frontend->ops.tuner_ops.init = philips_fmd1216_tuner_init;
- dev->dvb.frontend->ops.tuner_ops.sleep = philips_fmd1216_tuner_sleep;
- dev->dvb.frontend->ops.tuner_ops.set_params = philips_fmd1216_tuner_set_params;
+ dvb_attach(dvb_pll_attach, dev->dvb.frontend, medion_cardbus.tuner_address,
+ &dev->i2c_adap, DVB_PLL_FMD1216ME);
}
break;
case SAA7134_BOARD_PHILIPS_TOUGH:
@@ -1113,7 +962,7 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap);
if (dev->dvb.frontend) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_tdhu2);
+ NULL, DVB_PLL_TDHU2);
}
break;
case SAA7134_BOARD_KWORLD_ATSC110:
@@ -1121,7 +970,7 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap);
if (dev->dvb.frontend) {
dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
- NULL, &dvb_pll_tuv1236d);
+ NULL, DVB_PLL_TUV1236D);
}
break;
case SAA7134_BOARD_FLYDVBS_LR300:
@@ -1144,9 +993,9 @@ static int dvb_init(struct saa7134_dev *dev)
if (dev->dvb.frontend) {
dev->original_demod_sleep = dev->dvb.frontend->ops.sleep;
dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
- dev->dvb.frontend->ops.tuner_ops.init = philips_fmd1216_tuner_init;
- dev->dvb.frontend->ops.tuner_ops.sleep = philips_fmd1216_tuner_sleep;
- dev->dvb.frontend->ops.tuner_ops.set_params = philips_fmd1216_tuner_set_params;
+
+ dvb_attach(dvb_pll_attach, dev->dvb.frontend, medion_cardbus.tuner_address,
+ &dev->i2c_adap, DVB_PLL_FMD1216ME);
}
break;
case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index f521603482ca..fc260ec8fdc2 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -96,6 +96,10 @@ static int ts_open(struct inode *inode, struct file *file)
if (dev->empress_users)
goto done_up;
+ /* Unmute audio */
+ saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+ saa_readb(SAA7134_AUDIO_MUTE_CTRL) & ~(1 << 6));
+
dev->empress_users++;
file->private_data = dev;
err = 0;
@@ -121,6 +125,10 @@ static int ts_release(struct inode *inode, struct file *file)
/* stop the encoder */
ts_reset_encoder(dev);
+ /* Mute audio */
+ saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
+ saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
+
mutex_unlock(&dev->empress_tsq.lock);
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index c0de37e3f5c6..1b6dfd801cc1 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -153,21 +153,18 @@ void saa7134_input_irq(struct saa7134_dev *dev)
static void saa7134_input_timer(unsigned long data)
{
- struct saa7134_dev *dev = (struct saa7134_dev*)data;
+ struct saa7134_dev *dev = (struct saa7134_dev *)data;
struct card_ir *ir = dev->remote;
- unsigned long timeout;
build_key(dev);
- timeout = jiffies + (ir->polling * HZ / 1000);
- mod_timer(&ir->timer, timeout);
+ mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
static void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir)
{
if (ir->polling) {
- init_timer(&ir->timer);
- ir->timer.function = saa7134_input_timer;
- ir->timer.data = (unsigned long)dev;
+ setup_timer(&ir->timer, saa7134_input_timer,
+ (unsigned long)dev);
ir->timer.expires = jiffies + HZ;
add_timer(&ir->timer);
} else if (ir->rc5_gpio) {
@@ -314,6 +311,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0x003F00;
mask_keyup = 0x040000;
break;
+ case SAA7134_BOARD_FLYDVBS_LR300:
case SAA7134_BOARD_FLYDVBT_LR301:
case SAA7134_BOARD_FLYDVBTDUO:
ir_codes = ir_codes_flydvb;
@@ -333,6 +331,12 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keyup = 0x040000;
polling = 50; // ms
break;
+ case SAA7134_BOARD_10MOONSTVMASTER3:
+ ir_codes = ir_codes_encore_enltv;
+ mask_keycode = 0x5f80000;
+ mask_keyup = 0x8000000;
+ polling = 50; //ms
+ break;
}
if (NULL == ir_codes) {
printk("%s: Oops: IR config error [card=%d]\n",
@@ -374,7 +378,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
input_dev->id.vendor = dev->pci->vendor;
input_dev->id.product = dev->pci->device;
}
- input_dev->cdev.dev = &dev->pci->dev;
+ input_dev->dev.parent = &dev->pci->dev;
dev->remote = ir;
saa7134_ir_start(dev, ir);
diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c
index 30395d6b5f14..18b4817b4aac 100644
--- a/drivers/media/video/saa7134/saa7134-tvaudio.c
+++ b/drivers/media/video/saa7134/saa7134-tvaudio.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <asm/div64.h>
@@ -341,10 +342,8 @@ static void tvaudio_setmode(struct saa7134_dev *dev,
static int tvaudio_sleep(struct saa7134_dev *dev, int timeout)
{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&dev->thread.wq, &wait);
- if (dev->thread.scan1 == dev->thread.scan2 && !dev->thread.shutdown) {
+ if (dev->thread.scan1 == dev->thread.scan2 &&
+ !kthread_should_stop()) {
if (timeout < 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
@@ -353,7 +352,6 @@ static int tvaudio_sleep(struct saa7134_dev *dev, int timeout)
(msecs_to_jiffies(timeout));
}
}
- remove_wait_queue(&dev->thread.wq, &wait);
return dev->thread.scan1 != dev->thread.scan2;
}
@@ -505,11 +503,10 @@ static int tvaudio_thread(void *data)
unsigned int i, audio, nscan;
int max1,max2,carrier,rx,mode,lastmode,default_carrier;
- daemonize("%s", dev->name);
allow_signal(SIGTERM);
for (;;) {
tvaudio_sleep(dev,-1);
- if (dev->thread.shutdown || signal_pending(current))
+ if (kthread_should_stop() || signal_pending(current))
goto done;
restart:
@@ -618,7 +615,7 @@ static int tvaudio_thread(void *data)
for (;;) {
if (tvaudio_sleep(dev,5000))
goto restart;
- if (dev->thread.shutdown || signal_pending(current))
+ if (kthread_should_stop() || signal_pending(current))
break;
if (UNSET == dev->thread.mode) {
rx = tvaudio_getstereo(dev,&tvaudio[i]);
@@ -634,7 +631,6 @@ static int tvaudio_thread(void *data)
}
done:
- complete_and_exit(&dev->thread.exit, 0);
return 0;
}
@@ -782,7 +778,6 @@ static int tvaudio_thread_ddep(void *data)
struct saa7134_dev *dev = data;
u32 value, norms, clock;
- daemonize("%s", dev->name);
allow_signal(SIGTERM);
clock = saa7134_boards[dev->board].audio_clock;
@@ -796,7 +791,7 @@ static int tvaudio_thread_ddep(void *data)
for (;;) {
tvaudio_sleep(dev,-1);
- if (dev->thread.shutdown || signal_pending(current))
+ if (kthread_should_stop() || signal_pending(current))
goto done;
restart:
@@ -876,7 +871,6 @@ static int tvaudio_thread_ddep(void *data)
}
done:
- complete_and_exit(&dev->thread.exit, 0);
return 0;
}
@@ -973,7 +967,6 @@ int saa7134_tvaudio_getstereo(struct saa7134_dev *dev)
int saa7134_tvaudio_init2(struct saa7134_dev *dev)
{
- DECLARE_MUTEX_LOCKED(sem);
int (*my_thread)(void *data) = NULL;
switch (dev->pci->device) {
@@ -986,15 +979,15 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev)
break;
}
- dev->thread.pid = -1;
+ dev->thread.thread = NULL;
if (my_thread) {
/* start tvaudio thread */
- init_waitqueue_head(&dev->thread.wq);
- init_completion(&dev->thread.exit);
- dev->thread.pid = kernel_thread(my_thread,dev,0);
- if (dev->thread.pid < 0)
+ dev->thread.thread = kthread_run(my_thread, dev, "%s", dev->name);
+ if (IS_ERR(dev->thread.thread)) {
printk(KERN_WARNING "%s: kernel_thread() failed\n",
dev->name);
+ /* XXX: missing error handling here */
+ }
saa7134_tvaudio_do_scan(dev);
}
@@ -1005,11 +998,9 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev)
int saa7134_tvaudio_fini(struct saa7134_dev *dev)
{
/* shutdown tvaudio thread */
- if (dev->thread.pid > 0) {
- dev->thread.shutdown = 1;
- wake_up_interruptible(&dev->thread.wq);
- wait_for_completion(&dev->thread.exit);
- }
+ if (dev->thread.thread)
+ kthread_stop(dev->thread.thread);
+
saa_andorb(SAA7134_ANALOG_IO_SELECT, 0x07, 0x00); /* LINE1 */
return 0;
}
@@ -1020,10 +1011,10 @@ int saa7134_tvaudio_do_scan(struct saa7134_dev *dev)
dprintk("sound IF not in use, skipping scan\n");
dev->automute = 0;
saa7134_tvaudio_setmute(dev);
- } else if (dev->thread.pid >= 0) {
+ } else if (dev->thread.thread) {
dev->thread.mode = UNSET;
dev->thread.scan2++;
- wake_up_interruptible(&dev->thread.wq);
+ wake_up_process(dev->thread.thread);
} else {
dev->automute = 0;
saa7134_tvaudio_setmute(dev);
@@ -1040,4 +1031,3 @@ EXPORT_SYMBOL(saa7134_tvaudio_setmute);
* c-basic-offset: 8
* End:
*/
-
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 15623b27ad2e..346255468dad 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -238,6 +238,7 @@ struct saa7134_format {
#define SAA7134_BOARD_ECS_TVP3XP_4CB6 113
#define SAA7134_BOARD_KWORLD_DVBT_210 114
#define SAA7134_BOARD_SABRENT_TV_PCB05 115
+#define SAA7134_BOARD_10MOONSTVMASTER3 116
#define SAA7134_MAXBOARDS 8
#define SAA7134_INPUT_MAX 8
@@ -313,7 +314,7 @@ struct saa7134_board {
#define INTERLACE_ON 1
#define INTERLACE_OFF 2
-#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
+#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
struct saa7134_dev;
struct saa7134_dma;
@@ -327,10 +328,7 @@ struct saa7134_pgtable {
/* tvaudio thread status */
struct saa7134_thread {
- pid_t pid;
- struct completion exit;
- wait_queue_head_t wq;
- unsigned int shutdown;
+ struct task_struct *thread;
unsigned int scan1;
unsigned int scan2;
unsigned int mode;
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 339592e7722d..66cc92c0ea66 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -34,23 +34,23 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <linux/types.h>
+#include <asm/uaccess.h>
#include <linux/videodev.h>
-#include <asm/uaccess.h>
+#include <linux/video_encoder.h>
MODULE_DESCRIPTION("Philips SAA7185 video encoder driver");
MODULE_AUTHOR("Dave Perks");
MODULE_LICENSE("GPL");
-#include <linux/i2c.h>
#define I2C_NAME(s) (s)->name
-#include <linux/video_encoder.h>
static int debug = 0;
module_param(debug, int, 0);
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index 11fcb49f5b99..2e3c3de793a7 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/stddef.h>
+#include <linux/kref.h>
#include "sn9c102_config.h"
#include "sn9c102_sensor.h"
@@ -94,7 +95,7 @@ struct sn9c102_module_param {
};
static DEFINE_MUTEX(sn9c102_sysfs_lock);
-static DECLARE_RWSEM(sn9c102_disconnect);
+static DECLARE_RWSEM(sn9c102_dev_lock);
struct sn9c102_device {
struct video_device* v4ldev;
@@ -122,12 +123,14 @@ struct sn9c102_device {
struct sn9c102_module_param module_param;
+ struct kref kref;
enum sn9c102_dev_state state;
u8 users;
- struct mutex dev_mutex, fileop_mutex;
+ struct completion probe;
+ struct mutex open_mutex, fileop_mutex;
spinlock_t queue_lock;
- wait_queue_head_t open, wait_frame, wait_stream;
+ wait_queue_head_t wait_open, wait_frame, wait_stream;
};
/*****************************************************************************/
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 74a204f8ebc8..36d8a455e0ec 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -48,8 +48,8 @@
#define SN9C102_MODULE_AUTHOR "(C) 2004-2007 Luca Risolia"
#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define SN9C102_MODULE_LICENSE "GPL"
-#define SN9C102_MODULE_VERSION "1:1.44"
-#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 44)
+#define SN9C102_MODULE_VERSION "1:1.47"
+#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 47)
/*****************************************************************************/
@@ -64,9 +64,10 @@ MODULE_LICENSE(SN9C102_MODULE_LICENSE);
static short video_nr[] = {[0 ... SN9C102_MAX_DEVICES-1] = -1};
module_param_array(video_nr, short, NULL, 0444);
MODULE_PARM_DESC(video_nr,
- "\n<-1|n[,...]> Specify V4L2 minor mode number."
- "\n -1 = use next available (default)"
- "\n n = use minor number n (integer >= 0)"
+ " <-1|n[,...]>"
+ "\nSpecify V4L2 minor mode number."
+ "\n-1 = use next available (default)"
+ "\n n = use minor number n (integer >= 0)"
"\nYou can specify up to "__MODULE_STRING(SN9C102_MAX_DEVICES)
" cameras this way."
"\nFor example:"
@@ -79,13 +80,14 @@ static short force_munmap[] = {[0 ... SN9C102_MAX_DEVICES-1] =
SN9C102_FORCE_MUNMAP};
module_param_array(force_munmap, bool, NULL, 0444);
MODULE_PARM_DESC(force_munmap,
- "\n<0|1[,...]> Force the application to unmap previously"
+ " <0|1[,...]>"
+ "\nForce the application to unmap previously"
"\nmapped buffer memory before calling any VIDIOC_S_CROP or"
"\nVIDIOC_S_FMT ioctl's. Not all the applications support"
"\nthis feature. This parameter is specific for each"
"\ndetected camera."
- "\n 0 = do not force memory unmapping"
- "\n 1 = force memory unmapping (save memory)"
+ "\n0 = do not force memory unmapping"
+ "\n1 = force memory unmapping (save memory)"
"\nDefault value is "__MODULE_STRING(SN9C102_FORCE_MUNMAP)"."
"\n");
@@ -93,7 +95,8 @@ static unsigned int frame_timeout[] = {[0 ... SN9C102_MAX_DEVICES-1] =
SN9C102_FRAME_TIMEOUT};
module_param_array(frame_timeout, uint, NULL, 0644);
MODULE_PARM_DESC(frame_timeout,
- "\n<0|n[,...]> Timeout for a video frame in seconds before"
+ " <0|n[,...]>"
+ "\nTimeout for a video frame in seconds before"
"\nreturning an I/O error; 0 for infinity."
"\nThis parameter is specific for each detected camera."
"\nDefault value is "__MODULE_STRING(SN9C102_FRAME_TIMEOUT)"."
@@ -103,7 +106,8 @@ MODULE_PARM_DESC(frame_timeout,
static unsigned short debug = SN9C102_DEBUG_LEVEL;
module_param(debug, ushort, 0644);
MODULE_PARM_DESC(debug,
- "\n<n> Debugging information level, from 0 to 3:"
+ " <n>"
+ "\nDebugging information level, from 0 to 3:"
"\n0 = none (use carefully)"
"\n1 = critical errors"
"\n2 = significant informations"
@@ -1616,7 +1620,8 @@ static int sn9c102_init(struct sn9c102_device* cam)
int err = 0;
if (!(cam->state & DEV_INITIALIZED)) {
- init_waitqueue_head(&cam->open);
+ mutex_init(&cam->open_mutex);
+ init_waitqueue_head(&cam->wait_open);
qctrl = s->qctrl;
rect = &(s->cropcap.defrect);
} else { /* use current values */
@@ -1706,21 +1711,27 @@ static int sn9c102_init(struct sn9c102_device* cam)
return 0;
}
+/*****************************************************************************/
-static void sn9c102_release_resources(struct sn9c102_device* cam)
+static void sn9c102_release_resources(struct kref *kref)
{
+ struct sn9c102_device *cam;
+
mutex_lock(&sn9c102_sysfs_lock);
+ cam = container_of(kref, struct sn9c102_device, kref);
+
DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
+ usb_put_dev(cam->usbdev);
+ kfree(cam->control_buffer);
+ kfree(cam);
mutex_unlock(&sn9c102_sysfs_lock);
- kfree(cam->control_buffer);
}
-/*****************************************************************************/
static int sn9c102_open(struct inode* inode, struct file* filp)
{
@@ -1728,43 +1739,78 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
int err = 0;
/*
- This is the only safe way to prevent race conditions with
- disconnect
+ A read_trylock() in open() is the only safe way to prevent race
+ conditions with disconnect(), one close() and multiple (not
+ necessarily simultaneous) attempts to open(). For example, it
+ prevents from waiting for a second access, while the device
+ structure is being deallocated, after a possible disconnect() and
+ during a following close() holding the write lock: given that, after
+ this deallocation, no access will be possible anymore, using the
+ non-trylock version would have let open() gain the access to the
+ device structure improperly.
+ For this reason the lock must also not be per-device.
*/
- if (!down_read_trylock(&sn9c102_disconnect))
+ if (!down_read_trylock(&sn9c102_dev_lock))
return -ERESTARTSYS;
cam = video_get_drvdata(video_devdata(filp));
- if (mutex_lock_interruptible(&cam->dev_mutex)) {
- up_read(&sn9c102_disconnect);
+ if (wait_for_completion_interruptible(&cam->probe)) {
+ up_read(&sn9c102_dev_lock);
+ return -ERESTARTSYS;
+ }
+
+ kref_get(&cam->kref);
+
+ /*
+ Make sure to isolate all the simultaneous opens.
+ */
+ if (mutex_lock_interruptible(&cam->open_mutex)) {
+ kref_put(&cam->kref, sn9c102_release_resources);
+ up_read(&sn9c102_dev_lock);
return -ERESTARTSYS;
}
+ if (cam->state & DEV_DISCONNECTED) {
+ DBG(1, "Device not present");
+ err = -ENODEV;
+ goto out;
+ }
+
if (cam->users) {
- DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+ DBG(2, "Device /dev/video%d is already in use",
+ cam->v4ldev->minor);
DBG(3, "Simultaneous opens are not supported");
+ /*
+ open() must follow the open flags and should block
+ eventually while the device is in use.
+ */
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
err = -EWOULDBLOCK;
goto out;
}
- mutex_unlock(&cam->dev_mutex);
- err = wait_event_interruptible_exclusive(cam->open,
- cam->state & DEV_DISCONNECTED
+ DBG(2, "A blocking open() has been requested. Wait for the "
+ "device to be released...");
+ up_read(&sn9c102_dev_lock);
+ /*
+ We will not release the "open_mutex" lock, so that only one
+ process can be in the wait queue below. This way the process
+ will be sleeping while holding the lock, without loosing its
+ priority after any wake_up().
+ */
+ err = wait_event_interruptible_exclusive(cam->wait_open,
+ (cam->state & DEV_DISCONNECTED)
|| !cam->users);
- if (err) {
- up_read(&sn9c102_disconnect);
- return err;
- }
+ down_read(&sn9c102_dev_lock);
+ if (err)
+ goto out;
if (cam->state & DEV_DISCONNECTED) {
- up_read(&sn9c102_disconnect);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
- mutex_lock(&cam->dev_mutex);
}
-
if (cam->state & DEV_MISCONFIGURED) {
err = sn9c102_init(cam);
if (err) {
@@ -1789,36 +1835,33 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
out:
- mutex_unlock(&cam->dev_mutex);
- up_read(&sn9c102_disconnect);
+ mutex_unlock(&cam->open_mutex);
+ if (err)
+ kref_put(&cam->kref, sn9c102_release_resources);
+
+ up_read(&sn9c102_dev_lock);
return err;
}
static int sn9c102_release(struct inode* inode, struct file* filp)
{
- struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp));
+ struct sn9c102_device* cam;
- mutex_lock(&cam->dev_mutex); /* prevent disconnect() to be called */
+ down_write(&sn9c102_dev_lock);
- sn9c102_stop_transfer(cam);
+ cam = video_get_drvdata(video_devdata(filp));
+ sn9c102_stop_transfer(cam);
sn9c102_release_buffers(cam);
-
- if (cam->state & DEV_DISCONNECTED) {
- sn9c102_release_resources(cam);
- usb_put_dev(cam->usbdev);
- mutex_unlock(&cam->dev_mutex);
- kfree(cam);
- return 0;
- }
-
cam->users--;
- wake_up_interruptible_nr(&cam->open, 1);
+ wake_up_interruptible_nr(&cam->wait_open, 1);
DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
- mutex_unlock(&cam->dev_mutex);
+ kref_put(&cam->kref, sn9c102_release_resources);
+
+ up_write(&sn9c102_dev_lock);
return 0;
}
@@ -2085,7 +2128,6 @@ static int sn9c102_mmap(struct file* filp, struct vm_area_struct *vma)
vma->vm_ops = &sn9c102_vm_ops;
vma->vm_private_data = &cam->frame[i];
-
sn9c102_vm_open(vma);
mutex_unlock(&cam->fileop_mutex);
@@ -3215,8 +3257,6 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- mutex_init(&cam->dev_mutex);
-
r = sn9c102_read_reg(cam, 0x00);
if (r < 0 || (r != 0x10 && r != 0x11 && r != 0x12)) {
DBG(1, "Sorry, this is not a SN9C1xx-based camera "
@@ -3282,7 +3322,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
cam->v4ldev->release = video_device_release;
video_set_drvdata(cam->v4ldev, cam);
- mutex_lock(&cam->dev_mutex);
+ init_completion(&cam->probe);
err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
video_nr[dev_nr]);
@@ -3292,7 +3332,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
DBG(1, "Free /dev/videoX node not found");
video_nr[dev_nr] = -1;
dev_nr = (dev_nr < SN9C102_MAX_DEVICES-1) ? dev_nr+1 : 0;
- mutex_unlock(&cam->dev_mutex);
+ complete_all(&cam->probe);
goto fail;
}
@@ -3318,8 +3358,10 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
#endif
usb_set_intfdata(intf, cam);
+ kref_init(&cam->kref);
+ usb_get_dev(cam->usbdev);
- mutex_unlock(&cam->dev_mutex);
+ complete_all(&cam->probe);
return 0;
@@ -3336,40 +3378,31 @@ fail:
static void sn9c102_usb_disconnect(struct usb_interface* intf)
{
- struct sn9c102_device* cam = usb_get_intfdata(intf);
-
- if (!cam)
- return;
+ struct sn9c102_device* cam;
- down_write(&sn9c102_disconnect);
+ down_write(&sn9c102_dev_lock);
- mutex_lock(&cam->dev_mutex);
+ cam = usb_get_intfdata(intf);
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
- wake_up_interruptible_all(&cam->open);
-
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
- "memory deallocation are deferred on close.",
+ "memory deallocation are deferred.",
cam->v4ldev->minor);
cam->state |= DEV_MISCONFIGURED;
sn9c102_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
wake_up_interruptible(&cam->wait_frame);
wake_up(&cam->wait_stream);
- usb_get_dev(cam->usbdev);
- } else {
+ } else
cam->state |= DEV_DISCONNECTED;
- sn9c102_release_resources(cam);
- }
- mutex_unlock(&cam->dev_mutex);
+ wake_up_interruptible_all(&cam->wait_open);
- if (!cam->users)
- kfree(cam);
+ kref_put(&cam->kref, sn9c102_release_resources);
- up_write(&sn9c102_disconnect);
+ up_write(&sn9c102_dev_lock);
}
diff --git a/drivers/media/video/sn9c102/sn9c102_ov7630.c b/drivers/media/video/sn9c102/sn9c102_ov7630.c
index e6832347894f..e4856fd77982 100644
--- a/drivers/media/video/sn9c102/sn9c102_ov7630.c
+++ b/drivers/media/video/sn9c102/sn9c102_ov7630.c
@@ -104,6 +104,145 @@ static int ov7630_init(struct sn9c102_device* cam)
err += sn9c102_i2c_write(cam, 0x74, 0x21);
err += sn9c102_i2c_write(cam, 0x7d, 0xf7);
break;
+ case BRIDGE_SN9C105:
+ case BRIDGE_SN9C120:
+ err = sn9c102_write_const_regs(cam, {0x40, 0x02}, {0x00, 0x03},
+ {0x1a, 0x04}, {0x03, 0x10},
+ {0x0a, 0x14}, {0xe2, 0x17},
+ {0x0b, 0x18}, {0x00, 0x19},
+ {0x1d, 0x1a}, {0x10, 0x1b},
+ {0x02, 0x1c}, {0x03, 0x1d},
+ {0x0f, 0x1e}, {0x0c, 0x1f},
+ {0x00, 0x20}, {0x24, 0x21},
+ {0x3b, 0x22}, {0x47, 0x23},
+ {0x60, 0x24}, {0x71, 0x25},
+ {0x80, 0x26}, {0x8f, 0x27},
+ {0x9d, 0x28}, {0xaa, 0x29},
+ {0xb8, 0x2a}, {0xc4, 0x2b},
+ {0xd1, 0x2c}, {0xdd, 0x2d},
+ {0xe8, 0x2e}, {0xf4, 0x2f},
+ {0xff, 0x30}, {0x00, 0x3f},
+ {0xc7, 0x40}, {0x01, 0x41},
+ {0x44, 0x42}, {0x00, 0x43},
+ {0x44, 0x44}, {0x00, 0x45},
+ {0x44, 0x46}, {0x00, 0x47},
+ {0xc7, 0x48}, {0x01, 0x49},
+ {0xc7, 0x4a}, {0x01, 0x4b},
+ {0xc7, 0x4c}, {0x01, 0x4d},
+ {0x44, 0x4e}, {0x00, 0x4f},
+ {0x44, 0x50}, {0x00, 0x51},
+ {0x44, 0x52}, {0x00, 0x53},
+ {0xc7, 0x54}, {0x01, 0x55},
+ {0xc7, 0x56}, {0x01, 0x57},
+ {0xc7, 0x58}, {0x01, 0x59},
+ {0x44, 0x5a}, {0x00, 0x5b},
+ {0x44, 0x5c}, {0x00, 0x5d},
+ {0x44, 0x5e}, {0x00, 0x5f},
+ {0xc7, 0x60}, {0x01, 0x61},
+ {0xc7, 0x62}, {0x01, 0x63},
+ {0xc7, 0x64}, {0x01, 0x65},
+ {0x44, 0x66}, {0x00, 0x67},
+ {0x44, 0x68}, {0x00, 0x69},
+ {0x44, 0x6a}, {0x00, 0x6b},
+ {0xc7, 0x6c}, {0x01, 0x6d},
+ {0xc7, 0x6e}, {0x01, 0x6f},
+ {0xc7, 0x70}, {0x01, 0x71},
+ {0x44, 0x72}, {0x00, 0x73},
+ {0x44, 0x74}, {0x00, 0x75},
+ {0x44, 0x76}, {0x00, 0x77},
+ {0xc7, 0x78}, {0x01, 0x79},
+ {0xc7, 0x7a}, {0x01, 0x7b},
+ {0xc7, 0x7c}, {0x01, 0x7d},
+ {0x44, 0x7e}, {0x00, 0x7f},
+ {0x17, 0x84}, {0x00, 0x85},
+ {0x2e, 0x86}, {0x00, 0x87},
+ {0x09, 0x88}, {0x00, 0x89},
+ {0xe8, 0x8a}, {0x0f, 0x8b},
+ {0xda, 0x8c}, {0x0f, 0x8d},
+ {0x40, 0x8e}, {0x00, 0x8f},
+ {0x37, 0x90}, {0x00, 0x91},
+ {0xcf, 0x92}, {0x0f, 0x93},
+ {0xfa, 0x94}, {0x0f, 0x95},
+ {0x00, 0x96}, {0x00, 0x97},
+ {0x00, 0x98}, {0x66, 0x99},
+ {0x00, 0x9a}, {0x40, 0x9b},
+ {0x20, 0x9c}, {0x00, 0x9d},
+ {0x00, 0x9e}, {0x00, 0x9f},
+ {0x2d, 0xc0}, {0x2d, 0xc1},
+ {0x3a, 0xc2}, {0x00, 0xc3},
+ {0x04, 0xc4}, {0x3f, 0xc5},
+ {0x00, 0xc6}, {0x00, 0xc7},
+ {0x50, 0xc8}, {0x3c, 0xc9},
+ {0x28, 0xca}, {0xd8, 0xcb},
+ {0x14, 0xcc}, {0xec, 0xcd},
+ {0x32, 0xce}, {0xdd, 0xcf},
+ {0x32, 0xd0}, {0xdd, 0xd1},
+ {0x6a, 0xd2}, {0x50, 0xd3},
+ {0x60, 0xd4}, {0x00, 0xd5},
+ {0x00, 0xd6});
+
+ err += sn9c102_i2c_write(cam, 0x12, 0x80);
+ err += sn9c102_i2c_write(cam, 0x12, 0x48);
+ err += sn9c102_i2c_write(cam, 0x01, 0x80);
+ err += sn9c102_i2c_write(cam, 0x02, 0x80);
+ err += sn9c102_i2c_write(cam, 0x03, 0x80);
+ err += sn9c102_i2c_write(cam, 0x04, 0x10);
+ err += sn9c102_i2c_write(cam, 0x05, 0x20);
+ err += sn9c102_i2c_write(cam, 0x06, 0x80);
+ err += sn9c102_i2c_write(cam, 0x11, 0x00);
+ err += sn9c102_i2c_write(cam, 0x0c, 0x20);
+ err += sn9c102_i2c_write(cam, 0x0d, 0x20);
+ err += sn9c102_i2c_write(cam, 0x15, 0x80);
+ err += sn9c102_i2c_write(cam, 0x16, 0x03);
+ err += sn9c102_i2c_write(cam, 0x17, 0x1b);
+ err += sn9c102_i2c_write(cam, 0x18, 0xbd);
+ err += sn9c102_i2c_write(cam, 0x19, 0x05);
+ err += sn9c102_i2c_write(cam, 0x1a, 0xf6);
+ err += sn9c102_i2c_write(cam, 0x1b, 0x04);
+ err += sn9c102_i2c_write(cam, 0x21, 0x1b);
+ err += sn9c102_i2c_write(cam, 0x22, 0x00);
+ err += sn9c102_i2c_write(cam, 0x23, 0xde);
+ err += sn9c102_i2c_write(cam, 0x24, 0x10);
+ err += sn9c102_i2c_write(cam, 0x25, 0x8a);
+ err += sn9c102_i2c_write(cam, 0x26, 0xa0);
+ err += sn9c102_i2c_write(cam, 0x27, 0xca);
+ err += sn9c102_i2c_write(cam, 0x28, 0xa2);
+ err += sn9c102_i2c_write(cam, 0x29, 0x74);
+ err += sn9c102_i2c_write(cam, 0x2a, 0x88);
+ err += sn9c102_i2c_write(cam, 0x2b, 0x34);
+ err += sn9c102_i2c_write(cam, 0x2c, 0x88);
+ err += sn9c102_i2c_write(cam, 0x2e, 0x00);
+ err += sn9c102_i2c_write(cam, 0x2f, 0x00);
+ err += sn9c102_i2c_write(cam, 0x30, 0x00);
+ err += sn9c102_i2c_write(cam, 0x32, 0xc2);
+ err += sn9c102_i2c_write(cam, 0x33, 0x08);
+ err += sn9c102_i2c_write(cam, 0x4c, 0x40);
+ err += sn9c102_i2c_write(cam, 0x4d, 0xf3);
+ err += sn9c102_i2c_write(cam, 0x60, 0x05);
+ err += sn9c102_i2c_write(cam, 0x61, 0x40);
+ err += sn9c102_i2c_write(cam, 0x62, 0x12);
+ err += sn9c102_i2c_write(cam, 0x63, 0x57);
+ err += sn9c102_i2c_write(cam, 0x64, 0x73);
+ err += sn9c102_i2c_write(cam, 0x65, 0x00);
+ err += sn9c102_i2c_write(cam, 0x66, 0x55);
+ err += sn9c102_i2c_write(cam, 0x67, 0x01);
+ err += sn9c102_i2c_write(cam, 0x68, 0xac);
+ err += sn9c102_i2c_write(cam, 0x69, 0x38);
+ err += sn9c102_i2c_write(cam, 0x6f, 0x1f);
+ err += sn9c102_i2c_write(cam, 0x70, 0x01);
+ err += sn9c102_i2c_write(cam, 0x71, 0x00);
+ err += sn9c102_i2c_write(cam, 0x72, 0x10);
+ err += sn9c102_i2c_write(cam, 0x73, 0x50);
+ err += sn9c102_i2c_write(cam, 0x74, 0x20);
+ err += sn9c102_i2c_write(cam, 0x76, 0x01);
+ err += sn9c102_i2c_write(cam, 0x77, 0xf3);
+ err += sn9c102_i2c_write(cam, 0x78, 0x90);
+ err += sn9c102_i2c_write(cam, 0x79, 0x98);
+ err += sn9c102_i2c_write(cam, 0x7a, 0x98);
+ err += sn9c102_i2c_write(cam, 0x7b, 0x00);
+ err += sn9c102_i2c_write(cam, 0x7c, 0x38);
+ err += sn9c102_i2c_write(cam, 0x7d, 0xff);
+ break;
default:
break;
}
@@ -115,6 +254,7 @@ static int ov7630_init(struct sn9c102_device* cam)
static int ov7630_get_ctrl(struct sn9c102_device* cam,
struct v4l2_control* ctrl)
{
+ enum sn9c102_bridge bridge = sn9c102_get_bridge(cam);
int err = 0;
switch (ctrl->id) {
@@ -123,13 +263,20 @@ static int ov7630_get_ctrl(struct sn9c102_device* cam,
return -EIO;
break;
case V4L2_CID_RED_BALANCE:
- ctrl->value = sn9c102_pread_reg(cam, 0x07);
+ if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+ ctrl->value = sn9c102_pread_reg(cam, 0x05);
+ else
+ ctrl->value = sn9c102_pread_reg(cam, 0x07);
break;
case V4L2_CID_BLUE_BALANCE:
ctrl->value = sn9c102_pread_reg(cam, 0x06);
break;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- ctrl->value = sn9c102_pread_reg(cam, 0x05);
+ if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+ ctrl->value = sn9c102_pread_reg(cam, 0x07);
+ else
+ ctrl->value = sn9c102_pread_reg(cam, 0x05);
+ break;
break;
case V4L2_CID_GAIN:
if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0)
@@ -177,6 +324,7 @@ static int ov7630_get_ctrl(struct sn9c102_device* cam,
static int ov7630_set_ctrl(struct sn9c102_device* cam,
const struct v4l2_control* ctrl)
{
+ enum sn9c102_bridge bridge = sn9c102_get_bridge(cam);
int err = 0;
switch (ctrl->id) {
@@ -184,13 +332,19 @@ static int ov7630_set_ctrl(struct sn9c102_device* cam,
err += sn9c102_i2c_write(cam, 0x10, ctrl->value);
break;
case V4L2_CID_RED_BALANCE:
- err += sn9c102_write_reg(cam, ctrl->value, 0x07);
+ if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+ err += sn9c102_write_reg(cam, ctrl->value, 0x05);
+ else
+ err += sn9c102_write_reg(cam, ctrl->value, 0x07);
break;
case V4L2_CID_BLUE_BALANCE:
err += sn9c102_write_reg(cam, ctrl->value, 0x06);
break;
case SN9C102_V4L2_CID_GREEN_BALANCE:
- err += sn9c102_write_reg(cam, ctrl->value, 0x05);
+ if (bridge == BRIDGE_SN9C105 || bridge == BRIDGE_SN9C120)
+ err += sn9c102_write_reg(cam, ctrl->value, 0x07);
+ else
+ err += sn9c102_write_reg(cam, ctrl->value, 0x05);
break;
case V4L2_CID_GAIN:
err += sn9c102_i2c_write(cam, 0x00, ctrl->value);
@@ -227,8 +381,21 @@ static int ov7630_set_crop(struct sn9c102_device* cam,
{
struct sn9c102_sensor* s = sn9c102_get_sensor(cam);
int err = 0;
- u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1,
- v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
+ u8 h_start = 0, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1;
+
+ switch (sn9c102_get_bridge(cam)) {
+ case BRIDGE_SN9C101:
+ case BRIDGE_SN9C102:
+ case BRIDGE_SN9C103:
+ h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1;
+ break;
+ case BRIDGE_SN9C105:
+ case BRIDGE_SN9C120:
+ h_start = (u8)(rect->left - s->cropcap.bounds.left) + 4;
+ break;
+ default:
+ break;
+ }
err += sn9c102_write_reg(cam, h_start, 0x12);
err += sn9c102_write_reg(cam, v_start, 0x13);
@@ -242,10 +409,28 @@ static int ov7630_set_pix_format(struct sn9c102_device* cam,
{
int err = 0;
- if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X)
- err += sn9c102_write_reg(cam, 0x20, 0x19);
- else
- err += sn9c102_write_reg(cam, 0x50, 0x19);
+ switch (sn9c102_get_bridge(cam)) {
+ case BRIDGE_SN9C101:
+ case BRIDGE_SN9C102:
+ case BRIDGE_SN9C103:
+ if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8)
+ err += sn9c102_write_reg(cam, 0x50, 0x19);
+ else
+ err += sn9c102_write_reg(cam, 0x20, 0x19);
+ break;
+ case BRIDGE_SN9C105:
+ case BRIDGE_SN9C120:
+ if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) {
+ err += sn9c102_write_reg(cam, 0xe5, 0x17);
+ err += sn9c102_i2c_write(cam, 0x11, 0x04);
+ } else {
+ err += sn9c102_write_reg(cam, 0xe2, 0x17);
+ err += sn9c102_i2c_write(cam, 0x11, 0x02);
+ }
+ break;
+ default:
+ break;
+ }
return err;
}
@@ -254,7 +439,8 @@ static int ov7630_set_pix_format(struct sn9c102_device* cam,
static const struct sn9c102_sensor ov7630 = {
.name = "OV7630",
.maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>",
- .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103,
+ .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102 | BRIDGE_SN9C103 |
+ BRIDGE_SN9C105 | BRIDGE_SN9C120,
.sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE,
.frequency = SN9C102_I2C_100KHZ,
.interface = SN9C102_I2C_2WIRES,
@@ -417,6 +603,12 @@ int sn9c102_probe_ov7630(struct sn9c102_device* cam)
err += sn9c102_write_const_regs(cam, {0x01, 0x01},
{0x00, 0x01});
break;
+ case BRIDGE_SN9C105:
+ case BRIDGE_SN9C120:
+ err = sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1},
+ {0x29, 0x01}, {0x74, 0x02},
+ {0x0e, 0x01}, {0x44, 0x01});
+ break;
default:
break;
}
diff --git a/drivers/media/video/sn9c102/sn9c102_ov7660.c b/drivers/media/video/sn9c102/sn9c102_ov7660.c
index 4b6474048a72..8aae416ba8ec 100644
--- a/drivers/media/video/sn9c102/sn9c102_ov7660.c
+++ b/drivers/media/video/sn9c102/sn9c102_ov7660.c
@@ -41,65 +41,65 @@ static int ov7660_init(struct sn9c102_device* cam)
{0xbb, 0x2a}, {0xc7, 0x2b},
{0xd3, 0x2c}, {0xde, 0x2d},
{0xea, 0x2e}, {0xf4, 0x2f},
- {0xff, 0x30}, {0x00, 0x3F},
- {0xC7, 0x40}, {0x01, 0x41},
+ {0xff, 0x30}, {0x00, 0x3f},
+ {0xc7, 0x40}, {0x01, 0x41},
{0x44, 0x42}, {0x00, 0x43},
{0x44, 0x44}, {0x00, 0x45},
{0x44, 0x46}, {0x00, 0x47},
- {0xC7, 0x48}, {0x01, 0x49},
- {0xC7, 0x4A}, {0x01, 0x4B},
- {0xC7, 0x4C}, {0x01, 0x4D},
- {0x44, 0x4E}, {0x00, 0x4F},
+ {0xc7, 0x48}, {0x01, 0x49},
+ {0xc7, 0x4a}, {0x01, 0x4b},
+ {0xc7, 0x4c}, {0x01, 0x4d},
+ {0x44, 0x4e}, {0x00, 0x4f},
{0x44, 0x50}, {0x00, 0x51},
{0x44, 0x52}, {0x00, 0x53},
- {0xC7, 0x54}, {0x01, 0x55},
- {0xC7, 0x56}, {0x01, 0x57},
- {0xC7, 0x58}, {0x01, 0x59},
- {0x44, 0x5A}, {0x00, 0x5B},
- {0x44, 0x5C}, {0x00, 0x5D},
- {0x44, 0x5E}, {0x00, 0x5F},
- {0xC7, 0x60}, {0x01, 0x61},
- {0xC7, 0x62}, {0x01, 0x63},
- {0xC7, 0x64}, {0x01, 0x65},
+ {0xc7, 0x54}, {0x01, 0x55},
+ {0xc7, 0x56}, {0x01, 0x57},
+ {0xc7, 0x58}, {0x01, 0x59},
+ {0x44, 0x5a}, {0x00, 0x5b},
+ {0x44, 0x5c}, {0x00, 0x5d},
+ {0x44, 0x5e}, {0x00, 0x5f},
+ {0xc7, 0x60}, {0x01, 0x61},
+ {0xc7, 0x62}, {0x01, 0x63},
+ {0xc7, 0x64}, {0x01, 0x65},
{0x44, 0x66}, {0x00, 0x67},
{0x44, 0x68}, {0x00, 0x69},
- {0x44, 0x6A}, {0x00, 0x6B},
- {0xC7, 0x6C}, {0x01, 0x6D},
- {0xC7, 0x6E}, {0x01, 0x6F},
- {0xC7, 0x70}, {0x01, 0x71},
+ {0x44, 0x6a}, {0x00, 0x6b},
+ {0xc7, 0x6c}, {0x01, 0x6d},
+ {0xc7, 0x6e}, {0x01, 0x6f},
+ {0xc7, 0x70}, {0x01, 0x71},
{0x44, 0x72}, {0x00, 0x73},
{0x44, 0x74}, {0x00, 0x75},
{0x44, 0x76}, {0x00, 0x77},
- {0xC7, 0x78}, {0x01, 0x79},
- {0xC7, 0x7A}, {0x01, 0x7B},
- {0xC7, 0x7C}, {0x01, 0x7D},
- {0x44, 0x7E}, {0x00, 0x7F},
+ {0xc7, 0x78}, {0x01, 0x79},
+ {0xc7, 0x7a}, {0x01, 0x7b},
+ {0xc7, 0x7c}, {0x01, 0x7d},
+ {0x44, 0x7e}, {0x00, 0x7f},
{0x14, 0x84}, {0x00, 0x85},
{0x27, 0x86}, {0x00, 0x87},
{0x07, 0x88}, {0x00, 0x89},
- {0xEC, 0x8A}, {0x0f, 0x8B},
- {0xD8, 0x8C}, {0x0f, 0x8D},
- {0x3D, 0x8E}, {0x00, 0x8F},
- {0x3D, 0x90}, {0x00, 0x91},
- {0xCD, 0x92}, {0x0f, 0x93},
+ {0xec, 0x8a}, {0x0f, 0x8b},
+ {0xd8, 0x8c}, {0x0f, 0x8d},
+ {0x3d, 0x8e}, {0x00, 0x8f},
+ {0x3d, 0x90}, {0x00, 0x91},
+ {0xcd, 0x92}, {0x0f, 0x93},
{0xf7, 0x94}, {0x0f, 0x95},
- {0x0C, 0x96}, {0x00, 0x97},
+ {0x0c, 0x96}, {0x00, 0x97},
{0x00, 0x98}, {0x66, 0x99},
- {0x05, 0x9A}, {0x00, 0x9B},
- {0x04, 0x9C}, {0x00, 0x9D},
- {0x08, 0x9E}, {0x00, 0x9F},
- {0x2D, 0xC0}, {0x2D, 0xC1},
- {0x3A, 0xC2}, {0x05, 0xC3},
- {0x04, 0xC4}, {0x3F, 0xC5},
- {0x00, 0xC6}, {0x00, 0xC7},
- {0x50, 0xC8}, {0x3C, 0xC9},
- {0x28, 0xCA}, {0xD8, 0xCB},
- {0x14, 0xCC}, {0xEC, 0xCD},
- {0x32, 0xCE}, {0xDD, 0xCF},
- {0x32, 0xD0}, {0xDD, 0xD1},
- {0x6A, 0xD2}, {0x50, 0xD3},
- {0x00, 0xD4}, {0x00, 0xD5},
- {0x00, 0xD6});
+ {0x05, 0x9a}, {0x00, 0x9b},
+ {0x04, 0x9c}, {0x00, 0x9d},
+ {0x08, 0x9e}, {0x00, 0x9f},
+ {0x2d, 0xc0}, {0x2d, 0xc1},
+ {0x3a, 0xc2}, {0x05, 0xc3},
+ {0x04, 0xc4}, {0x3f, 0xc5},
+ {0x00, 0xc6}, {0x00, 0xc7},
+ {0x50, 0xc8}, {0x3C, 0xc9},
+ {0x28, 0xca}, {0xd8, 0xcb},
+ {0x14, 0xcc}, {0xec, 0xcd},
+ {0x32, 0xce}, {0xdd, 0xcf},
+ {0x32, 0xd0}, {0xdd, 0xd1},
+ {0x6a, 0xd2}, {0x50, 0xd3},
+ {0x00, 0xd4}, {0x00, 0xd5},
+ {0x00, 0xd6});
err += sn9c102_i2c_write(cam, 0x12, 0x80);
err += sn9c102_i2c_write(cam, 0x11, 0x09);
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index 3e736be5de84..eb220461ac77 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -1321,7 +1321,7 @@ static int saa_ioctl(struct inode *inode, struct file *file,
u32 format;
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
- if (p.palette < sizeof(palette2fmt) / sizeof(u32)) {
+ if (p.palette < ARRAY_SIZE(palette2fmt)) {
format = palette2fmt[p.palette];
saa->win.color_fmt = format;
saawrite(format | 0x60,
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index bf3aa8d2d57e..4dc5bc714b95 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -715,8 +715,11 @@ static int stv680_start_stream (struct usb_stv *stv680)
stv680_video_irq, stv680);
stv680->urb[i] = urb;
err = usb_submit_urb (stv680->urb[i], GFP_KERNEL);
- if (err)
- PDEBUG (0, "STV(e): urb burned down in start stream");
+ if (err) {
+ PDEBUG (0, "STV(e): urb burned down with err "
+ "%d in start stream %d", err, i);
+ goto nomem_err;
+ }
} /* i STV680_NUMSBUF */
stv680->framecount = 0;
diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c
index 1a1bef0e9c3d..59cff5a3c59e 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/video/tda8290.c
@@ -21,7 +21,17 @@
#include <linux/i2c.h>
#include <linux/videodev.h>
#include <linux/delay.h>
-#include <media/tuner.h>
+#include "tuner-driver.h"
+
+/* ---------------------------------------------------------------------- */
+
+struct tda8290_priv {
+ unsigned char tda8290_easy_mode;
+ unsigned char tda827x_lpsel;
+ unsigned char tda827x_addr;
+ unsigned char tda827x_ver;
+ unsigned int sgIF;
+};
/* ---------------------------------------------------------------------- */
@@ -76,7 +86,8 @@ static void tda827x_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
u32 N;
int i;
struct tuner *t = i2c_get_clientdata(c);
- struct i2c_msg msg = {.addr = t->tda827x_addr, .flags = 0};
+ struct tda8290_priv *priv = t->priv;
+ struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags = 0};
if (t->mode == V4L2_TUNER_RADIO)
freq = freq / 1000;
@@ -95,7 +106,7 @@ static void tda827x_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
tuner_reg[1] = (unsigned char)(N>>8);
tuner_reg[2] = (unsigned char) N;
tuner_reg[3] = 0x40;
- tuner_reg[4] = 0x52 + (t->tda827x_lpsel << 5);
+ tuner_reg[4] = 0x52 + (priv->tda827x_lpsel << 5);
tuner_reg[5] = (tda827x_analog[i].spd << 6) + (tda827x_analog[i].div1p5 <<5) +
(tda827x_analog[i].bs <<3) + tda827x_analog[i].bp;
tuner_reg[6] = 0x8f + (tda827x_analog[i].gc3 << 4);
@@ -146,8 +157,9 @@ static void tda827x_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
static void tda827x_agcf(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct tda8290_priv *priv = t->priv;
unsigned char data[] = {0x80, 0x0c};
- struct i2c_msg msg = {.addr = t->tda827x_addr, .buf = data,
+ struct i2c_msg msg = {.addr = priv->tda827x_addr, .buf = data,
.flags = 0, .len = 2};
i2c_transfer(c->adapter, &msg, 1);
}
@@ -234,7 +246,8 @@ static void tda827xa_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
u32 N;
int i;
struct tuner *t = i2c_get_clientdata(c);
- struct i2c_msg msg = {.addr = t->tda827x_addr, .flags = 0, .buf = tuner_reg};
+ struct tda8290_priv *priv = t->priv;
+ struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags = 0, .buf = tuner_reg};
tda827xa_lna_gain( c, 1);
msleep(10);
@@ -271,7 +284,7 @@ static void tda827xa_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
tuner_reg[1] = 0xff;
tuner_reg[2] = 0xe0;
tuner_reg[3] = 0;
- tuner_reg[4] = 0x99 + (t->tda827x_lpsel << 1);
+ tuner_reg[4] = 0x99 + (priv->tda827x_lpsel << 1);
msg.len = 5;
i2c_transfer(c->adapter, &msg, 1);
@@ -311,15 +324,16 @@ static void tda827xa_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
i2c_transfer(c->adapter, &msg, 1);
tuner_reg[0] = 0xc0;
- tuner_reg[1] = 0x19 + (t->tda827x_lpsel << 1);
+ tuner_reg[1] = 0x19 + (priv->tda827x_lpsel << 1);
i2c_transfer(c->adapter, &msg, 1);
}
static void tda827xa_agcf(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct tda8290_priv *priv = t->priv;
unsigned char data[] = {0x80, 0x2c};
- struct i2c_msg msg = {.addr = t->tda827x_addr, .buf = data,
+ struct i2c_msg msg = {.addr = priv->tda827x_addr, .buf = data,
.flags = 0, .len = 2};
i2c_transfer(c->adapter, &msg, 1);
}
@@ -347,8 +361,9 @@ static void tda8290_i2c_bridge(struct i2c_client *c, int close)
static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct tda8290_priv *priv = t->priv;
unsigned char soft_reset[] = { 0x00, 0x00 };
- unsigned char easy_mode[] = { 0x01, t->tda8290_easy_mode };
+ unsigned char easy_mode[] = { 0x01, priv->tda8290_easy_mode };
unsigned char expert_mode[] = { 0x01, 0x80 };
unsigned char agc_out_on[] = { 0x02, 0x00 };
unsigned char gainset_off[] = { 0x28, 0x14 };
@@ -375,18 +390,18 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
i2c_master_send(c, soft_reset, 2);
msleep(1);
- expert_mode[1] = t->tda8290_easy_mode + 0x80;
+ expert_mode[1] = priv->tda8290_easy_mode + 0x80;
i2c_master_send(c, expert_mode, 2);
i2c_master_send(c, gainset_off, 2);
i2c_master_send(c, if_agc_spd, 2);
- if (t->tda8290_easy_mode & 0x60)
+ if (priv->tda8290_easy_mode & 0x60)
i2c_master_send(c, adc_head_9, 2);
else
i2c_master_send(c, adc_head_6, 2);
i2c_master_send(c, pll_bw_nom, 2);
tda8290_i2c_bridge(c, 1);
- if (t->tda827x_ver != 0)
+ if (priv->tda827x_ver != 0)
tda827xa_tune(c, ifc, freq);
else
tda827x_tune(c, ifc, freq);
@@ -418,7 +433,7 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
if ((agc_stat > 115) || !(pll_stat & 0x80)) {
tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
agc_stat, pll_stat & 0x80);
- if (t->tda827x_ver != 0)
+ if (priv->tda827x_ver != 0)
tda827xa_agcf(c);
else
tda827x_agcf(c);
@@ -437,7 +452,7 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
}
/* l/ l' deadlock? */
- if(t->tda8290_easy_mode & 0x60) {
+ if(priv->tda8290_easy_mode & 0x60) {
i2c_master_send(c, &addr_adc_sat, 1);
i2c_master_recv(c, &adc_sat, 1);
i2c_master_send(c, &addr_pll_stat, 1);
@@ -459,41 +474,42 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
static void set_audio(struct tuner *t)
{
+ struct tda8290_priv *priv = t->priv;
char* mode;
- t->tda827x_lpsel = 0;
+ priv->tda827x_lpsel = 0;
if (t->std & V4L2_STD_MN) {
- t->sgIF = 92;
- t->tda8290_easy_mode = 0x01;
- t->tda827x_lpsel = 1;
+ priv->sgIF = 92;
+ priv->tda8290_easy_mode = 0x01;
+ priv->tda827x_lpsel = 1;
mode = "MN";
} else if (t->std & V4L2_STD_B) {
- t->sgIF = 108;
- t->tda8290_easy_mode = 0x02;
+ priv->sgIF = 108;
+ priv->tda8290_easy_mode = 0x02;
mode = "B";
} else if (t->std & V4L2_STD_GH) {
- t->sgIF = 124;
- t->tda8290_easy_mode = 0x04;
+ priv->sgIF = 124;
+ priv->tda8290_easy_mode = 0x04;
mode = "GH";
} else if (t->std & V4L2_STD_PAL_I) {
- t->sgIF = 124;
- t->tda8290_easy_mode = 0x08;
+ priv->sgIF = 124;
+ priv->tda8290_easy_mode = 0x08;
mode = "I";
} else if (t->std & V4L2_STD_DK) {
- t->sgIF = 124;
- t->tda8290_easy_mode = 0x10;
+ priv->sgIF = 124;
+ priv->tda8290_easy_mode = 0x10;
mode = "DK";
} else if (t->std & V4L2_STD_SECAM_L) {
- t->sgIF = 124;
- t->tda8290_easy_mode = 0x20;
+ priv->sgIF = 124;
+ priv->tda8290_easy_mode = 0x20;
mode = "L";
} else if (t->std & V4L2_STD_SECAM_LC) {
- t->sgIF = 20;
- t->tda8290_easy_mode = 0x40;
+ priv->sgIF = 20;
+ priv->tda8290_easy_mode = 0x40;
mode = "LC";
} else {
- t->sgIF = 124;
- t->tda8290_easy_mode = 0x10;
+ priv->sgIF = 124;
+ priv->tda8290_easy_mode = 0x10;
mode = "xx";
}
tuner_dbg("setting tda8290 to system %s\n", mode);
@@ -502,9 +518,10 @@ static void set_audio(struct tuner *t)
static void set_tv_freq(struct i2c_client *c, unsigned int freq)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct tda8290_priv *priv = t->priv;
set_audio(t);
- tda8290_tune(c, t->sgIF, freq);
+ tda8290_tune(c, priv->sgIF, freq);
}
static void set_radio_freq(struct i2c_client *c, unsigned int freq)
@@ -528,13 +545,14 @@ static int has_signal(struct i2c_client *c)
static void standby(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct tda8290_priv *priv = t->priv;
unsigned char cb1[] = { 0x30, 0xD0 };
unsigned char tda8290_standby[] = { 0x00, 0x02 };
unsigned char tda8290_agc_tri[] = { 0x02, 0x20 };
- struct i2c_msg msg = {.addr = t->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
+ struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0, .buf=cb1, .len = 2};
tda8290_i2c_bridge(c, 1);
- if (t->tda827x_ver != 0)
+ if (priv->tda827x_ver != 0)
cb1[1] = 0x90;
i2c_transfer(c->adapter, &msg, 1);
tda8290_i2c_bridge(c, 0);
@@ -560,13 +578,14 @@ static void tda8290_init_if(struct i2c_client *c)
static void tda8290_init_tuner(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
+ struct tda8290_priv *priv = t->priv;
unsigned char tda8275_init[] = { 0x00, 0x00, 0x00, 0x40, 0xdC, 0x04, 0xAf,
0x3F, 0x2A, 0x04, 0xFF, 0x00, 0x00, 0x40 };
unsigned char tda8275a_init[] = { 0x00, 0x00, 0x00, 0x00, 0xdC, 0x05, 0x8b,
0x0c, 0x04, 0x20, 0xFF, 0x00, 0x00, 0x4b };
- struct i2c_msg msg = {.addr = t->tda827x_addr, .flags=0,
+ struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0,
.buf=tda8275_init, .len = 14};
- if (t->tda827x_ver != 0)
+ if (priv->tda827x_ver != 0)
msg.buf = tda8275a_init;
tda8290_i2c_bridge(c, 1);
@@ -576,14 +595,36 @@ static void tda8290_init_tuner(struct i2c_client *c)
/*---------------------------------------------------------------------*/
+static void tda8290_release(struct i2c_client *c)
+{
+ struct tuner *t = i2c_get_clientdata(c);
+
+ kfree(t->priv);
+ t->priv = NULL;
+}
+
+static struct tuner_operations tda8290_tuner_ops = {
+ .set_tv_freq = set_tv_freq,
+ .set_radio_freq = set_radio_freq,
+ .has_signal = has_signal,
+ .standby = standby,
+ .release = tda8290_release,
+};
+
int tda8290_init(struct i2c_client *c)
{
+ struct tda8290_priv *priv = NULL;
struct tuner *t = i2c_get_clientdata(c);
u8 data;
int i, ret, tuners_found;
u32 tuner_addrs;
struct i2c_msg msg = {.flags=I2C_M_RD, .buf=&data, .len = 1};
+ priv = kzalloc(sizeof(struct tda8290_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ t->priv = priv;
+
tda8290_i2c_bridge(c, 1);
/* probe for tuner chip */
tuners_found = 0;
@@ -618,7 +659,7 @@ int tda8290_init(struct i2c_client *c)
tuner_addrs = tuner_addrs & 0xff;
tuner_info ("setting tuner address to %x\n", tuner_addrs);
}
- t->tda827x_addr = tuner_addrs;
+ priv->tda827x_addr = tuner_addrs;
msg.addr = tuner_addrs;
tda8290_i2c_bridge(c, 1);
@@ -627,18 +668,16 @@ int tda8290_init(struct i2c_client *c)
tuner_warn ("TDA827x access failed!\n");
if ((data & 0x3c) == 0) {
strlcpy(c->name, "tda8290+75", sizeof(c->name));
- t->tda827x_ver = 0;
+ priv->tda827x_ver = 0;
} else {
strlcpy(c->name, "tda8290+75a", sizeof(c->name));
- t->tda827x_ver = 2;
+ priv->tda827x_ver = 2;
}
tuner_info("type set to %s\n", c->name);
- t->set_tv_freq = set_tv_freq;
- t->set_radio_freq = set_radio_freq;
- t->has_signal = has_signal;
- t->standby = standby;
- t->tda827x_lpsel = 0;
+ memcpy(&t->ops, &tda8290_tuner_ops, sizeof(struct tuner_operations));
+
+ priv->tda827x_lpsel = 0;
t->mode = V4L2_TUNER_ANALOG_TV;
tda8290_init_tuner(c);
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index fde576f1101c..a8f773274fe3 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -11,6 +11,7 @@
#include <media/v4l2-common.h>
#include <media/tuner.h>
+#include "tuner-driver.h"
/* Chips:
@@ -29,6 +30,9 @@
printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+struct tda9887_priv {
+ unsigned char data[4];
+};
/* ---------------------------------------------------------------------- */
@@ -508,10 +512,11 @@ static int tda9887_status(struct tuner *t)
static void tda9887_configure(struct i2c_client *client)
{
struct tuner *t = i2c_get_clientdata(client);
+ struct tda9887_priv *priv = t->priv;
int rc;
- memset(t->tda9887_data,0,sizeof(t->tda9887_data));
- tda9887_set_tvnorm(t,t->tda9887_data);
+ memset(priv->data,0,sizeof(priv->data));
+ tda9887_set_tvnorm(t,priv->data);
/* A note on the port settings:
These settings tend to depend on the specifics of the board.
@@ -526,22 +531,22 @@ static void tda9887_configure(struct i2c_client *client)
the ports should be set to active (0), but, again, that may
differ depending on the precise hardware configuration.
*/
- t->tda9887_data[1] |= cOutputPort1Inactive;
- t->tda9887_data[1] |= cOutputPort2Inactive;
+ priv->data[1] |= cOutputPort1Inactive;
+ priv->data[1] |= cOutputPort2Inactive;
- tda9887_set_config(t,t->tda9887_data);
- tda9887_set_insmod(t,t->tda9887_data);
+ tda9887_set_config(t,priv->data);
+ tda9887_set_insmod(t,priv->data);
if (t->mode == T_STANDBY) {
- t->tda9887_data[1] |= cForcedMuteAudioON;
+ priv->data[1] |= cForcedMuteAudioON;
}
tda9887_dbg("writing: b=0x%02x c=0x%02x e=0x%02x\n",
- t->tda9887_data[1],t->tda9887_data[2],t->tda9887_data[3]);
+ priv->data[1],priv->data[2],priv->data[3]);
if (tuner_debug > 1)
- dump_write_message(t, t->tda9887_data);
+ dump_write_message(t, priv->data);
- if (4 != (rc = i2c_master_send(&t->i2c,t->tda9887_data,4)))
+ if (4 != (rc = i2c_master_send(&t->i2c,priv->data,4)))
tda9887_info("i2c i/o error: rc == %d (should be 4)\n",rc);
if (tuner_debug > 2) {
@@ -555,7 +560,8 @@ static void tda9887_configure(struct i2c_client *client)
static void tda9887_tuner_status(struct i2c_client *client)
{
struct tuner *t = i2c_get_clientdata(client);
- tda9887_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n", t->tda9887_data[1], t->tda9887_data[2], t->tda9887_data[3]);
+ struct tda9887_priv *priv = t->priv;
+ tda9887_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n", priv->data[1], priv->data[2], priv->data[3]);
}
static int tda9887_get_afc(struct i2c_client *client)
@@ -586,20 +592,39 @@ static void tda9887_set_freq(struct i2c_client *client, unsigned int freq)
tda9887_configure(client);
}
+static void tda9887_release(struct i2c_client *c)
+{
+ struct tuner *t = i2c_get_clientdata(c);
+
+ kfree(t->priv);
+ t->priv = NULL;
+}
+
+static struct tuner_operations tda9887_tuner_ops = {
+ .set_tv_freq = tda9887_set_freq,
+ .set_radio_freq = tda9887_set_freq,
+ .standby = tda9887_standby,
+ .tuner_status = tda9887_tuner_status,
+ .get_afc = tda9887_get_afc,
+ .release = tda9887_release,
+};
+
int tda9887_tuner_init(struct i2c_client *c)
{
+ struct tda9887_priv *priv = NULL;
struct tuner *t = i2c_get_clientdata(c);
+ priv = kzalloc(sizeof(struct tda9887_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ t->priv = priv;
+
strlcpy(c->name, "tda9887", sizeof(c->name));
tda9887_info("tda988[5/6/7] found @ 0x%x (%s)\n", t->i2c.addr,
t->i2c.driver->driver.name);
- t->set_tv_freq = tda9887_set_freq;
- t->set_radio_freq = tda9887_set_freq;
- t->standby = tda9887_standby;
- t->tuner_status = tda9887_tuner_status;
- t->get_afc = tda9887_get_afc;
+ memcpy(&t->ops, &tda9887_tuner_ops, sizeof(struct tuner_operations));
return 0;
}
diff --git a/drivers/media/video/tea5761.c b/drivers/media/video/tea5761.c
new file mode 100644
index 000000000000..ae105c2cd0ac
--- /dev/null
+++ b/drivers/media/video/tea5761.c
@@ -0,0 +1,243 @@
+/*
+ * For Philips TEA5761 FM Chip
+ * I2C address is allways 0x20 (0x10 at 7-bit mode).
+ *
+ * Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * This code is placed under the terms of the GNUv2 General Public License
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/videodev.h>
+#include <linux/delay.h>
+#include <media/tuner.h>
+#include "tuner-driver.h"
+
+#define PREFIX "TEA5761 "
+
+/* from tuner-core.c */
+extern int tuner_debug;
+
+/*****************************************************************************/
+
+/***************************
+ * TEA5761HN I2C registers *
+ ***************************/
+
+/* INTREG - Read: bytes 0 and 1 / Write: byte 0 */
+
+ /* first byte for reading */
+#define TEA5761_INTREG_IFFLAG 0x10
+#define TEA5761_INTREG_LEVFLAG 0x8
+#define TEA5761_INTREG_FRRFLAG 0x2
+#define TEA5761_INTREG_BLFLAG 0x1
+
+ /* second byte for reading / byte for writing */
+#define TEA5761_INTREG_IFMSK 0x10
+#define TEA5761_INTREG_LEVMSK 0x8
+#define TEA5761_INTREG_FRMSK 0x2
+#define TEA5761_INTREG_BLMSK 0x1
+
+/* FRQSET - Read: bytes 2 and 3 / Write: byte 1 and 2 */
+
+ /* First byte */
+#define TEA5761_FRQSET_SEARCH_UP 0x80 /* 1=Station search from botton to up */
+#define TEA5761_FRQSET_SEARCH_MODE 0x40 /* 1=Search mode */
+
+ /* Bits 0-5 for divider MSB */
+
+ /* Second byte */
+ /* Bits 0-7 for divider LSB */
+
+/* TNCTRL - Read: bytes 4 and 5 / Write: Bytes 3 and 4 */
+
+ /* first byte */
+
+#define TEA5761_TNCTRL_PUPD_0 0x40 /* Power UP/Power Down MSB */
+#define TEA5761_TNCTRL_BLIM 0X20 /* 1= Japan Frequencies, 0= European frequencies */
+#define TEA5761_TNCTRL_SWPM 0x10 /* 1= software port is FRRFLAG */
+#define TEA5761_TNCTRL_IFCTC 0x08 /* 1= IF count time 15.02 ms, 0= IF count time 2.02 ms */
+#define TEA5761_TNCTRL_AFM 0x04
+#define TEA5761_TNCTRL_SMUTE 0x02 /* 1= Soft mute */
+#define TEA5761_TNCTRL_SNC 0x01
+
+ /* second byte */
+
+#define TEA5761_TNCTRL_MU 0x80 /* 1=Hard mute */
+#define TEA5761_TNCTRL_SSL_1 0x40
+#define TEA5761_TNCTRL_SSL_0 0x20
+#define TEA5761_TNCTRL_HLSI 0x10
+#define TEA5761_TNCTRL_MST 0x08 /* 1 = mono */
+#define TEA5761_TNCTRL_SWP 0x04
+#define TEA5761_TNCTRL_DTC 0x02 /* 1 = deemphasis 50 us, 0 = deemphasis 75 us */
+#define TEA5761_TNCTRL_AHLSI 0x01
+
+/* FRQCHECK - Read: bytes 6 and 7 */
+ /* First byte */
+
+ /* Bits 0-5 for divider MSB */
+
+ /* Second byte */
+ /* Bits 0-7 for divider LSB */
+
+/* TUNCHECK - Read: bytes 8 and 9 */
+
+ /* First byte */
+#define TEA5761_TUNCHECK_IF_MASK 0x7e /* IF count */
+#define TEA5761_TUNCHECK_TUNTO 0x01
+
+ /* Second byte */
+#define TEA5761_TUNCHECK_LEV_MASK 0xf0 /* Level Count */
+#define TEA5761_TUNCHECK_LD 0x08
+#define TEA5761_TUNCHECK_STEREO 0x04
+
+/* TESTREG - Read: bytes 10 and 11 / Write: bytes 5 and 6 */
+
+ /* All zero = no test mode */
+
+/* MANID - Read: bytes 12 and 13 */
+
+ /* First byte - should be 0x10 */
+#define TEA5767_MANID_VERSION_MASK 0xf0 /* Version = 1 */
+#define TEA5767_MANID_ID_MSB_MASK 0x0f /* Manufacurer ID - should be 0 */
+
+ /* Second byte - Should be 0x2b */
+
+#define TEA5767_MANID_ID_LSB_MASK 0xfe /* Manufacturer ID - should be 0x15 */
+#define TEA5767_MANID_IDAV 0x01 /* 1 = Chip has ID, 0 = Chip has no ID */
+
+/* Chip ID - Read: bytes 14 and 15 */
+
+ /* First byte - should be 0x57 */
+
+ /* Second byte - should be 0x61 */
+
+/*****************************************************************************/
+
+static void set_tv_freq(struct i2c_client *c, unsigned int freq)
+{
+ struct tuner *t = i2c_get_clientdata(c);
+
+ tuner_warn("This tuner doesn't support TV freq.\n");
+}
+
+#define FREQ_OFFSET 0 /* for TEA5767, it is 700 to give the right freq */
+static void tea5761_status_dump(unsigned char *buffer)
+{
+ unsigned int div, frq;
+
+ div = ((buffer[2] & 0x3f) << 8) | buffer[3];
+
+ frq = 1000 * (div * 32768 / 1000 + FREQ_OFFSET + 225) / 4; /* Freq in KHz */
+
+ printk(PREFIX "Frequency %d.%03d KHz (divider = 0x%04x)\n",
+ frq / 1000, frq % 1000, div);
+}
+
+/* Freq should be specifyed at 62.5 Hz */
+static void set_radio_freq(struct i2c_client *c, unsigned int frq)
+{
+ struct tuner *t = i2c_get_clientdata(c);
+ unsigned char buffer[7] = {0, 0, 0, 0, 0, 0, 0 };
+ unsigned div;
+ int rc;
+
+ tuner_dbg (PREFIX "radio freq counter %d\n", frq);
+
+ if (t->mode == T_STANDBY) {
+ tuner_dbg("TEA5761 set to standby mode\n");
+ buffer[5] |= TEA5761_TNCTRL_MU;
+ } else {
+ buffer[4] |= TEA5761_TNCTRL_PUPD_0;
+ }
+
+
+ if (t->audmode == V4L2_TUNER_MODE_MONO) {
+ tuner_dbg("TEA5761 set to mono\n");
+ buffer[5] |= TEA5761_TNCTRL_MST;
+;
+ } else {
+ tuner_dbg("TEA5761 set to stereo\n");
+ }
+
+ div = (1000 * (frq * 4 / 16 + 700 + 225) ) >> 15;
+ buffer[1] = (div >> 8) & 0x3f;
+ buffer[2] = div & 0xff;
+
+ if (tuner_debug)
+ tea5761_status_dump(buffer);
+
+ if (7 != (rc = i2c_master_send(c, buffer, 7)))
+ tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
+}
+
+static int tea5761_signal(struct i2c_client *c)
+{
+ unsigned char buffer[16];
+ int rc;
+ struct tuner *t = i2c_get_clientdata(c);
+
+ memset(buffer, 0, sizeof(buffer));
+ if (16 != (rc = i2c_master_recv(c, buffer, 16)))
+ tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
+
+ return ((buffer[9] & TEA5761_TUNCHECK_LEV_MASK) << (13 - 4));
+}
+
+static int tea5761_stereo(struct i2c_client *c)
+{
+ unsigned char buffer[16];
+ int rc;
+ struct tuner *t = i2c_get_clientdata(c);
+
+ memset(buffer, 0, sizeof(buffer));
+ if (16 != (rc = i2c_master_recv(c, buffer, 16)))
+ tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
+
+ rc = buffer[9] & TEA5761_TUNCHECK_STEREO;
+
+ tuner_dbg("TEA5761 radio ST GET = %02x\n", rc);
+
+ return (rc ? V4L2_TUNER_SUB_STEREO : 0);
+}
+
+int tea5761_autodetection(struct i2c_client *c)
+{
+ unsigned char buffer[16];
+ int rc;
+ struct tuner *t = i2c_get_clientdata(c);
+
+ if (16 != (rc = i2c_master_recv(c, buffer, 16))) {
+ tuner_warn("it is not a TEA5761. Received %i chars.\n", rc);
+ return EINVAL;
+ }
+
+ if (!((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061))) {
+ tuner_warn("Manufacturer ID= 0x%02x, Chip ID = %02x%02x. It is not a TEA5761\n",buffer[13],buffer[14],buffer[15]);
+ return EINVAL;
+ }
+ tuner_warn("TEA5761 detected.\n");
+ return 0;
+}
+
+static struct tuner_operations tea5761_tuner_ops = {
+ .set_tv_freq = set_tv_freq,
+ .set_radio_freq = set_radio_freq,
+ .has_signal = tea5761_signal,
+ .is_stereo = tea5761_stereo,
+};
+
+int tea5761_tuner_init(struct i2c_client *c)
+{
+ struct tuner *t = i2c_get_clientdata(c);
+
+ if (tea5761_autodetection(c) == EINVAL)
+ return EINVAL;
+
+ tuner_info("type set to %d (%s)\n", t->type, "Philips TEA5761HN FM Radio");
+ strlcpy(c->name, "tea5761", sizeof(c->name));
+
+ memcpy(&t->ops, &tea5761_tuner_ops, sizeof(struct tuner_operations));
+
+ return (0);
+}
diff --git a/drivers/media/video/tea5767.c b/drivers/media/video/tea5767.c
index d1c41781ccc4..4985d47a508f 100644
--- a/drivers/media/video/tea5767.c
+++ b/drivers/media/video/tea5767.c
@@ -13,7 +13,7 @@
#include <linux/i2c.h>
#include <linux/videodev.h>
#include <linux/delay.h>
-#include <media/tuner.h>
+#include "tuner-driver.h"
#define PREFIX "TEA5767 "
@@ -343,6 +343,14 @@ int tea5767_autodetection(struct i2c_client *c)
return 0;
}
+static struct tuner_operations tea5767_tuner_ops = {
+ .set_tv_freq = set_tv_freq,
+ .set_radio_freq = set_radio_freq,
+ .has_signal = tea5767_signal,
+ .is_stereo = tea5767_stereo,
+ .standby = tea5767_standby,
+};
+
int tea5767_tuner_init(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
@@ -350,11 +358,7 @@ int tea5767_tuner_init(struct i2c_client *c)
tuner_info("type set to %d (%s)\n", t->type, "Philips TEA5767HN FM Radio");
strlcpy(c->name, "tea5767", sizeof(c->name));
- t->set_tv_freq = set_tv_freq;
- t->set_radio_freq = set_radio_freq;
- t->has_signal = tea5767_signal;
- t->is_stereo = tea5767_stereo;
- t->standby = tea5767_standby;
+ memcpy(&t->ops, &tea5767_tuner_ops, sizeof(struct tuner_operations));
return (0);
}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 505591a7abe9..e646465464a1 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -20,11 +20,15 @@
#include <media/tuner.h>
#include <media/v4l2-common.h>
+#include "tuner-driver.h"
#define UNSET (-1U)
/* standard i2c insmod options */
static unsigned short normal_i2c[] = {
+#ifdef CONFIG_TUNER_TEA5761
+ 0x10,
+#endif
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
@@ -77,7 +81,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
tuner_warn ("tuner type not set\n");
return;
}
- if (NULL == t->set_tv_freq) {
+ if (NULL == t->ops.set_tv_freq) {
tuner_warn ("Tuner has no way to set tv freq\n");
return;
}
@@ -92,7 +96,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
else
freq = tv_range[1] * 16;
}
- t->set_tv_freq(c, freq);
+ t->ops.set_tv_freq(c, freq);
}
static void set_radio_freq(struct i2c_client *c, unsigned int freq)
@@ -103,7 +107,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
tuner_warn ("tuner type not set\n");
return;
}
- if (NULL == t->set_radio_freq) {
+ if (NULL == t->ops.set_radio_freq) {
tuner_warn ("tuner has no way to set radio frequency\n");
return;
}
@@ -119,7 +123,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
freq = radio_range[1] * 16000;
}
- t->set_radio_freq(c, freq);
+ t->ops.set_radio_freq(c, freq);
}
static void set_freq(struct i2c_client *c, unsigned long freq)
@@ -174,6 +178,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
return;
}
+ /* discard private data, in case set_type() was previously called */
+ if (t->ops.release)
+ t->ops.release(c);
+ else {
+ kfree(t->priv);
+ t->priv = NULL;
+ }
+
switch (t->type) {
case TUNER_MT2032:
microtune_init(c);
@@ -189,6 +201,16 @@ static void set_type(struct i2c_client *c, unsigned int type,
}
t->mode_mask = T_RADIO;
break;
+#ifdef CONFIG_TUNER_TEA5761
+ case TUNER_TEA5761:
+ if (tea5761_tuner_init(c) == EINVAL) {
+ t->type = TUNER_ABSENT;
+ t->mode_mask = T_UNINITIALIZED;
+ return;
+ }
+ t->mode_mask = T_RADIO;
+ break;
+#endif
case TUNER_PHILIPS_FMD1216ME_MK3:
buffer[0] = 0x0b;
buffer[1] = 0xdc;
@@ -408,11 +430,11 @@ static void tuner_status(struct i2c_client *client)
tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std);
if (t->mode != V4L2_TUNER_RADIO)
return;
- if (t->has_signal) {
- tuner_info("Signal strength: %d\n", t->has_signal(client));
+ if (t->ops.has_signal) {
+ tuner_info("Signal strength: %d\n", t->ops.has_signal(client));
}
- if (t->is_stereo) {
- tuner_info("Stereo: %s\n", t->is_stereo(client) ? "yes" : "no");
+ if (t->ops.is_stereo) {
+ tuner_info("Stereo: %s\n", t->ops.is_stereo(client) ? "yes" : "no");
}
}
@@ -437,10 +459,9 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
memcpy(&t->i2c, &client_template, sizeof(struct i2c_client));
i2c_set_clientdata(&t->i2c, t);
t->type = UNSET;
- t->radio_if2 = 10700 * 1000; /* 10.7MHz - FM radio */
t->audmode = V4L2_TUNER_MODE_STEREO;
t->mode_mask = T_UNINITIALIZED;
- t->tuner_status = tuner_status;
+ t->ops.tuner_status = tuner_status;
if (show_i2c) {
unsigned char buffer[16];
@@ -460,6 +481,19 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
/* autodetection code based on the i2c addr */
if (!no_autodetect) {
switch (addr) {
+#ifdef CONFIG_TUNER_TEA5761
+ case 0x10:
+ if (tea5761_autodetection(&t->i2c) != EINVAL) {
+ t->type = TUNER_TEA5761;
+ t->mode_mask = T_RADIO;
+ t->mode = T_STANDBY;
+ t->radio_freq = 87.5 * 16000; /* Sets freq to FM range */
+ default_mode_mask &= ~T_RADIO;
+
+ goto register_client;
+ }
+ break;
+#endif
case 0x42:
case 0x43:
case 0x4a:
@@ -533,6 +567,11 @@ static int tuner_detach(struct i2c_client *client)
return err;
}
+ if (t->ops.release)
+ t->ops.release(client);
+ else {
+ kfree(t->priv);
+ }
kfree(t);
return 0;
}
@@ -553,8 +592,8 @@ static inline int set_mode(struct i2c_client *client, struct tuner *t, int mode,
if (check_mode(t, cmd) == EINVAL) {
t->mode = T_STANDBY;
- if (t->standby)
- t->standby (client);
+ if (t->ops.standby)
+ t->ops.standby (client);
return EINVAL;
}
return 0;
@@ -602,8 +641,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (check_mode(t, "TUNER_SET_STANDBY") == EINVAL)
return 0;
t->mode = T_STANDBY;
- if (t->standby)
- t->standby (client);
+ if (t->ops.standby)
+ t->ops.standby (client);
break;
#ifdef CONFIG_VIDEO_V4L1
case VIDIOCSAUDIO:
@@ -662,10 +701,10 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
return 0;
if (V4L2_TUNER_RADIO == t->mode) {
- if (t->has_signal)
- vt->signal = t->has_signal(client);
- if (t->is_stereo) {
- if (t->is_stereo(client))
+ if (t->ops.has_signal)
+ vt->signal = t->ops.has_signal(client);
+ if (t->ops.is_stereo) {
+ if (t->ops.is_stereo(client))
vt->flags |=
VIDEO_TUNER_STEREO_ON;
else
@@ -693,8 +732,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
if (check_v4l2(t) == EINVAL)
return 0;
- if (V4L2_TUNER_RADIO == t->mode && t->is_stereo)
- va->mode = t->is_stereo(client)
+ if (V4L2_TUNER_RADIO == t->mode && t->ops.is_stereo)
+ va->mode = t->ops.is_stereo(client)
? VIDEO_SOUND_STEREO : VIDEO_SOUND_MONO;
return 0;
}
@@ -759,8 +798,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
switch_v4l2();
tuner->type = t->mode;
- if (t->get_afc)
- tuner->afc=t->get_afc(client);
+ if (t->ops.get_afc)
+ tuner->afc=t->ops.get_afc(client);
if (t->mode == V4L2_TUNER_ANALOG_TV)
tuner->capability |= V4L2_TUNER_CAP_NORM;
if (t->mode != V4L2_TUNER_RADIO) {
@@ -770,13 +809,13 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
}
/* radio mode */
- if (t->has_signal)
- tuner->signal = t->has_signal(client);
+ if (t->ops.has_signal)
+ tuner->signal = t->ops.has_signal(client);
tuner->rxsubchans =
V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- if (t->is_stereo) {
- tuner->rxsubchans = t->is_stereo(client) ?
+ if (t->ops.is_stereo) {
+ tuner->rxsubchans = t->ops.is_stereo(client) ?
V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
}
@@ -804,8 +843,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
break;
}
case VIDIOC_LOG_STATUS:
- if (t->tuner_status)
- t->tuner_status(client);
+ if (t->ops.tuner_status)
+ t->ops.tuner_status(client);
break;
}
diff --git a/drivers/media/video/tuner-driver.h b/drivers/media/video/tuner-driver.h
new file mode 100644
index 000000000000..0334a9125077
--- /dev/null
+++ b/drivers/media/video/tuner-driver.h
@@ -0,0 +1,107 @@
+/*
+ tuner-driver.h - interface for different tuners
+
+ Copyright (C) 1997 Markus Schroeder (schroedm@uni-duesseldorf.de)
+ minor modifications by Ralph Metzler (rjkm@thp.uni-koeln.de)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __TUNER_HW_H__
+#define __TUNER_HW_H__
+
+#include <linux/videodev2.h>
+#include <linux/i2c.h>
+
+extern unsigned const int tuner_count;
+
+struct tuner_operations {
+ void (*set_tv_freq)(struct i2c_client *c, unsigned int freq);
+ void (*set_radio_freq)(struct i2c_client *c, unsigned int freq);
+ int (*has_signal)(struct i2c_client *c);
+ int (*is_stereo)(struct i2c_client *c);
+ int (*get_afc)(struct i2c_client *c);
+ void (*tuner_status)(struct i2c_client *c);
+ void (*standby)(struct i2c_client *c);
+ void (*release)(struct i2c_client *c);
+};
+
+struct tuner {
+ /* device */
+ struct i2c_client i2c;
+
+ unsigned int type; /* chip type */
+
+ unsigned int mode;
+ unsigned int mode_mask; /* Combination of allowable modes */
+
+ unsigned int tv_freq; /* keep track of the current settings */
+ unsigned int radio_freq;
+ u16 last_div;
+ unsigned int audmode;
+ v4l2_std_id std;
+
+ int using_v4l2;
+ void *priv;
+
+ /* used by tda9887 */
+ unsigned int tda9887_config;
+
+ unsigned int config;
+ int (*tuner_callback) (void *dev, int command,int arg);
+
+ struct tuner_operations ops;
+};
+
+/* ------------------------------------------------------------------------ */
+
+extern int default_tuner_init(struct i2c_client *c);
+
+extern int tda9887_tuner_init(struct i2c_client *c);
+
+extern int microtune_init(struct i2c_client *c);
+
+extern int tda8290_init(struct i2c_client *c);
+extern int tda8290_probe(struct i2c_client *c);
+
+extern int tea5761_tuner_init(struct i2c_client *c);
+extern int tea5761_autodetection(struct i2c_client *c);
+
+extern int tea5767_autodetection(struct i2c_client *c);
+extern int tea5767_tuner_init(struct i2c_client *c);
+
+/* ------------------------------------------------------------------------ */
+
+#define tuner_warn(fmt, arg...) do {\
+ printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+ i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+#define tuner_info(fmt, arg...) do {\
+ printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+ i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+#define tuner_dbg(fmt, arg...) do {\
+ extern int tuner_debug; \
+ if (tuner_debug) \
+ printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+ i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
+
+#endif /* __TUNER_HW_H__ */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/video/tuner-simple.c
index c40b92ce1fad..2d57e8bc0db3 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/video/tuner-simple.c
@@ -8,6 +8,8 @@
#include <linux/videodev.h>
#include <media/tuner.h>
#include <media/v4l2-common.h>
+#include <media/tuner-types.h>
+#include "tuner-driver.h"
static int offset = 0;
module_param(offset, int, 0664);
@@ -54,9 +56,9 @@ MODULE_PARM_DESC(offset,"Allows to specify an offset for tuner");
sound 2 33.16 - -
NICAM 33.05 33.05 39.80
*/
-#define PHILIPS_MF_SET_BG 0x01 /* Bit 2 must be zero, Bit 3 is system output */
-#define PHILIPS_MF_SET_PAL_L 0x03 // France
-#define PHILIPS_MF_SET_PAL_L2 0x02 // L'
+#define PHILIPS_MF_SET_STD_BG 0x01 /* Bit 2 must be zero, Bit 3 is system output */
+#define PHILIPS_MF_SET_STD_L 0x03 /* Used on Secam France */
+#define PHILIPS_MF_SET_STD_LC 0x02 /* Used on SECAM L' */
/* Control byte */
@@ -207,11 +209,11 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
/* 0x04 -> ??? PAL others / SECAM others ??? */
cb &= ~0x03;
if (t->std & V4L2_STD_SECAM_L) //also valid for V4L2_STD_SECAM
- cb |= PHILIPS_MF_SET_PAL_L;
+ cb |= PHILIPS_MF_SET_STD_L;
else if (t->std & V4L2_STD_SECAM_LC)
- cb |= PHILIPS_MF_SET_PAL_L2;
+ cb |= PHILIPS_MF_SET_STD_LC;
else /* V4L2_STD_B|V4L2_STD_GH */
- cb |= PHILIPS_MF_SET_BG;
+ cb |= PHILIPS_MF_SET_STD_BG;
break;
case TUNER_TEMIC_4046FM5:
@@ -479,6 +481,13 @@ static void default_set_radio_freq(struct i2c_client *c, unsigned int freq)
tuner_warn("i2c i/o error: rc == %d (should be 4)\n",rc);
}
+static struct tuner_operations simple_tuner_ops = {
+ .set_tv_freq = default_set_tv_freq,
+ .set_radio_freq = default_set_radio_freq,
+ .has_signal = tuner_signal,
+ .is_stereo = tuner_stereo,
+};
+
int default_tuner_init(struct i2c_client *c)
{
struct tuner *t = i2c_get_clientdata(c);
@@ -487,11 +496,7 @@ int default_tuner_init(struct i2c_client *c)
t->type, tuners[t->type].name);
strlcpy(c->name, tuners[t->type].name, sizeof(c->name));
- t->set_tv_freq = default_set_tv_freq;
- t->set_radio_freq = default_set_radio_freq;
- t->has_signal = tuner_signal;
- t->is_stereo = tuner_stereo;
- t->standby = NULL;
+ memcpy(&t->ops, &simple_tuner_ops, sizeof(struct tuner_operations));
return 0;
}
diff --git a/drivers/media/video/tuner-types.c b/drivers/media/video/tuner-types.c
index 74c3e6f96f1a..417f642b4359 100644
--- a/drivers/media/video/tuner-types.c
+++ b/drivers/media/video/tuner-types.c
@@ -594,19 +594,19 @@ static struct tuner_params tuner_philips_pal_mk_params[] = {
},
};
-/* ------------ TUNER_PHILIPS_ATSC - Philips ATSC ------------ */
+/* ---- TUNER_PHILIPS_ATSC - Philips FCV1236D (ATSC/NTSC) ---- */
-static struct tuner_range tuner_philips_atsc_ranges[] = {
+static struct tuner_range tuner_philips_fcv1236d_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
- { 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
+ { 16 * 451.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_atsc_params[] = {
+static struct tuner_params tuner_philips_fcv1236d_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
- .ranges = tuner_philips_atsc_ranges,
- .count = ARRAY_SIZE(tuner_philips_atsc_ranges),
+ .ranges = tuner_philips_fcv1236d_ranges,
+ .count = ARRAY_SIZE(tuner_philips_fcv1236d_ranges),
},
};
@@ -1296,9 +1296,9 @@ struct tunertype tuners[] = {
.count = ARRAY_SIZE(tuner_philips_pal_mk_params),
},
[TUNER_PHILIPS_ATSC] = { /* Philips ATSC */
- .name = "Philips 1236D ATSC/NTSC dual in",
- .params = tuner_philips_atsc_params,
- .count = ARRAY_SIZE(tuner_philips_atsc_params),
+ .name = "Philips FCV1236D ATSC/NTSC dual in",
+ .params = tuner_philips_fcv1236d_params,
+ .count = ARRAY_SIZE(tuner_philips_fcv1236d_params),
},
[TUNER_PHILIPS_FM1236_MK3] = { /* Philips NTSC */
.name = "Philips NTSC MK3 (FM1236MK3 or FM1236/F)",
@@ -1463,6 +1463,10 @@ struct tunertype tuners[] = {
.name = "Philips TDA988[5,6,7] IF PLL Demodulator",
/* see tda9887.c for details */
},
+ [TUNER_TEA5761] = { /* Philips RADIO */
+ .name = "Philips TEA5761 FM Radio",
+ /* see tea5767.c for details */
+ },
};
unsigned const int tuner_count = ARRAY_SIZE(tuners);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c9bf9dbc2ea3..cffb011590e3 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -271,7 +271,7 @@ static int chip_thread(void *data)
struct CHIPDESC *desc = chiplist + chip->type;
v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name);
-
+ set_freezable();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
@@ -290,7 +290,7 @@ static int chip_thread(void *data)
desc->checkmode(chip);
/* schedule next check */
- mod_timer(&chip->wt, jiffies+2*HZ);
+ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
}
v4l_dbg(1, debug, &chip->c, "%s: thread exiting\n", chip->c.name);
@@ -1770,7 +1770,7 @@ static int chip_command(struct i2c_client *client,
desc->setmode(chip,VIDEO_SOUND_MONO);
if (chip->prevmode != VIDEO_SOUND_MONO)
chip->prevmode = -1; /* reset previous mode */
- mod_timer(&chip->wt, jiffies+2*HZ);
+ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
/* the thread will call checkmode() later */
}
break;
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index a1136da74ba8..fdc3def437b1 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -183,7 +183,7 @@ hauppauge_tuner[] =
{ TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
{ TUNER_ABSENT, "Thompson DTT757"},
/* 80-89 */
- { TUNER_ABSENT, "Philips FQ1216LME MK3"},
+ { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216LME MK3"},
{ TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
{ TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
{ TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
@@ -490,7 +490,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
to indicate 4052 mux was removed in favor of using MSP
inputs directly. */
audioic = eeprom_data[i+2] & 0x7f;
- if (audioic < sizeof(audioIC)/sizeof(*audioIC))
+ if (audioic < ARRAY_SIZE(audioIC))
tvee->audio_processor = audioIC[audioic].id;
else
tvee->audio_processor = AUDIO_CHIP_UNKNOWN;
@@ -523,7 +523,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
to indicate 4052 mux was removed in favor of using MSP
inputs directly. */
audioic = eeprom_data[i+1] & 0x7f;
- if (audioic < sizeof(audioIC)/sizeof(*audioIC))
+ if (audioic < ARRAY_SIZE(audioIC))
tvee->audio_processor = audioIC[audioic].id;
else
tvee->audio_processor = AUDIO_CHIP_UNKNOWN;
@@ -678,7 +678,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
tveeprom_info("audio processor is unknown (no idx)\n");
tvee->audio_processor=AUDIO_CHIP_UNKNOWN;
} else {
- if (audioic < sizeof(audioIC)/sizeof(*audioIC))
+ if (audioic < ARRAY_SIZE(audioIC))
tveeprom_info("audio processor is %s (idx %d)\n",
audioIC[audioic].name,audioic);
else
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index d5ec05f56adf..e2f1c972754b 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -1006,7 +1006,7 @@ static int tvp5150_command(struct i2c_client *c,
{
struct v4l2_control *ctrl = arg;
u8 i, n;
- n = sizeof(tvp5150_qctrl) / sizeof(tvp5150_qctrl[0]);
+ n = ARRAY_SIZE(tvp5150_qctrl);
for (i = 0; i < n; i++)
if (ctrl->id == tvp5150_qctrl[i].id) {
if (ctrl->value <
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index abe214619092..491505d6fdee 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -236,7 +236,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
input_dev->name = "Konicawc snapshot button";
input_dev->phys = cam->input_physname;
usb_to_input_id(dev, &input_dev->id);
- input_dev->cdev.dev = &dev->dev;
+ input_dev->dev.parent = &dev->dev;
input_dev->evbit[0] = BIT(EV_KEY);
input_dev->keybit[LONG(BTN_0)] = BIT(BTN_0);
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index ec0ff2247f06..dd1a6d6bbc9e 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -100,7 +100,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
input_dev->name = "QCM button";
input_dev->phys = cam->input_physname;
usb_to_input_id(dev, &input_dev->id);
- input_dev->cdev.dev = &dev->dev;
+ input_dev->dev.parent = &dev->dev;
input_dev->evbit[0] = BIT(EV_KEY);
input_dev->keybit[LONG(BTN_0)] = BIT(BTN_0);
@@ -439,7 +439,7 @@ static int qcm_sensor_init(struct uvd *uvd)
int ret;
int i;
- for (i=0; i < sizeof(regval_table)/sizeof(regval_table[0]) ; i++) {
+ for (i=0; i < ARRAY_SIZE(regval_table) ; i++) {
CHECK_RET(ret, qcm_stv_setb(uvd->dev,
regval_table[i].reg,
regval_table[i].val));
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 982b115193f8..ff555129c82f 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -42,7 +42,6 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include "usbvideo.h"
@@ -417,11 +416,6 @@ struct vicam_camera {
u8 open_count;
u8 bulkEndpoint;
int needsDummyRead;
-
-#if defined(CONFIG_VIDEO_PROC_FS)
- struct proc_dir_entry *proc_dir;
-#endif
-
};
static int vicam_probe( struct usb_interface *intf, const struct usb_device_id *id);
@@ -1065,175 +1059,6 @@ vicam_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-#if defined(CONFIG_VIDEO_PROC_FS)
-
-static struct proc_dir_entry *vicam_proc_root = NULL;
-
-static int vicam_read_helper(char *page, char **start, off_t off,
- int count, int *eof, int value)
-{
- char *out = page;
- int len;
-
- out += sprintf(out, "%d",value);
-
- len = out - page;
- len -= off;
- if (len < count) {
- *eof = 1;
- if (len <= 0)
- return 0;
- } else
- len = count;
-
- *start = page + off;
- return len;
-}
-
-static int vicam_read_proc_shutter(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- return vicam_read_helper(page,start,off,count,eof,
- ((struct vicam_camera *)data)->shutter_speed);
-}
-
-static int vicam_read_proc_gain(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- return vicam_read_helper(page,start,off,count,eof,
- ((struct vicam_camera *)data)->gain);
-}
-
-static int
-vicam_write_proc_shutter(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- u16 stmp;
- char kbuf[8];
- struct vicam_camera *cam = (struct vicam_camera *) data;
-
- if (count > 6)
- return -EINVAL;
-
- if (copy_from_user(kbuf, buffer, count))
- return -EFAULT;
-
- stmp = (u16) simple_strtoul(kbuf, NULL, 10);
- if (stmp < 4 || stmp > 32000)
- return -EINVAL;
-
- cam->shutter_speed = stmp;
-
- return count;
-}
-
-static int
-vicam_write_proc_gain(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- u16 gtmp;
- char kbuf[8];
-
- struct vicam_camera *cam = (struct vicam_camera *) data;
-
- if (count > 4)
- return -EINVAL;
-
- if (copy_from_user(kbuf, buffer, count))
- return -EFAULT;
-
- gtmp = (u16) simple_strtoul(kbuf, NULL, 10);
- if (gtmp > 255)
- return -EINVAL;
- cam->gain = gtmp;
-
- return count;
-}
-
-static void
-vicam_create_proc_root(void)
-{
- vicam_proc_root = proc_mkdir("video/vicam", NULL);
-
- if (vicam_proc_root)
- vicam_proc_root->owner = THIS_MODULE;
- else
- printk(KERN_ERR
- "could not create /proc entry for vicam!");
-}
-
-static void
-vicam_destroy_proc_root(void)
-{
- if (vicam_proc_root)
- remove_proc_entry("video/vicam", 0);
-}
-
-static void
-vicam_create_proc_entry(struct vicam_camera *cam)
-{
- char name[64];
- struct proc_dir_entry *ent;
-
- DBG(KERN_INFO "vicam: creating proc entry\n");
-
- if (!vicam_proc_root || !cam) {
- printk(KERN_INFO
- "vicam: could not create proc entry, %s pointer is null.\n",
- (!cam ? "camera" : "root"));
- return;
- }
-
- sprintf(name, "video%d", cam->vdev.minor);
-
- cam->proc_dir = proc_mkdir(name, vicam_proc_root);
-
- if ( !cam->proc_dir )
- return; // FIXME: We should probably return an error here
-
- ent = create_proc_entry("shutter", S_IFREG | S_IRUGO | S_IWUSR,
- cam->proc_dir);
- if (ent) {
- ent->data = cam;
- ent->read_proc = vicam_read_proc_shutter;
- ent->write_proc = vicam_write_proc_shutter;
- ent->size = 64;
- }
-
- ent = create_proc_entry("gain", S_IFREG | S_IRUGO | S_IWUSR,
- cam->proc_dir);
- if (ent) {
- ent->data = cam;
- ent->read_proc = vicam_read_proc_gain;
- ent->write_proc = vicam_write_proc_gain;
- ent->size = 64;
- }
-}
-
-static void
-vicam_destroy_proc_entry(void *ptr)
-{
- struct vicam_camera *cam = (struct vicam_camera *) ptr;
- char name[16];
-
- if ( !cam->proc_dir )
- return;
-
- sprintf(name, "video%d", cam->vdev.minor);
- remove_proc_entry("shutter", cam->proc_dir);
- remove_proc_entry("gain", cam->proc_dir);
- remove_proc_entry(name,vicam_proc_root);
- cam->proc_dir = NULL;
-
-}
-
-#else
-static inline void vicam_create_proc_root(void) { }
-static inline void vicam_destroy_proc_root(void) { }
-static inline void vicam_create_proc_entry(struct vicam_camera *cam) { }
-static inline void vicam_destroy_proc_entry(void *ptr) { }
-#endif
-
static const struct file_operations vicam_fops = {
.owner = THIS_MODULE,
.open = vicam_open,
@@ -1305,13 +1130,12 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
}
if ((cam =
- kmalloc(sizeof (struct vicam_camera), GFP_KERNEL)) == NULL) {
+ kzalloc(sizeof (struct vicam_camera), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING
"could not allocate kernel memory for vicam_camera struct\n");
return -ENOMEM;
}
- memset(cam, 0, sizeof (struct vicam_camera));
cam->shutter_speed = 15;
@@ -1330,8 +1154,6 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
return -EIO;
}
- vicam_create_proc_entry(cam);
-
printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",cam->vdev.minor);
usb_set_intfdata (intf, cam);
@@ -1363,8 +1185,6 @@ vicam_disconnect(struct usb_interface *intf)
cam->udev = NULL;
- vicam_destroy_proc_entry(cam);
-
/* the only thing left to do is synchronize with
* our close/release function on who should release
* the camera memory. if there are any users using the
@@ -1390,7 +1210,6 @@ usb_vicam_init(void)
{
int retval;
DBG(KERN_INFO "ViCam-based WebCam driver startup\n");
- vicam_create_proc_root();
retval = usb_register(&vicam_driver);
if (retval)
printk(KERN_WARNING "usb_register failed!\n");
@@ -1404,7 +1223,6 @@ usb_vicam_exit(void)
"ViCam-based WebCam driver shutdown\n");
usb_deregister(&vicam_driver);
- vicam_destroy_proc_root();
}
module_init(usb_vicam_init);
diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c
index 51ab265d566a..380564cd3317 100644
--- a/drivers/media/video/usbvision/usbvision-cards.c
+++ b/drivers/media/video/usbvision/usbvision-cards.c
@@ -79,7 +79,7 @@ struct usbvision_device_data_st usbvision_device_data[] = {
.Interface = -1,
.Codec = CODEC_SAA7113,
.VideoChannels = 2,
- .VideoNorm = V4L2_STD_PAL,
+ .VideoNorm = V4L2_STD_NTSC,
.AudioChannels = 1,
.Radio = 0,
.vbi = 1,
@@ -311,8 +311,8 @@ struct usbvision_device_data_st usbvision_device_data[] = {
.vbi = 1,
.Tuner = 1,
.TunerType = TUNER_PHILIPS_SECAM,
- .X_Offset = -1,
- .Y_Offset = -1,
+ .X_Offset = 0x80,
+ .Y_Offset = 0x16,
.ModelString = "Hauppauge WinTV USB (PAL/SECAM L)",
},
[HPG_WINTV_PAL_D_K] = {
@@ -586,7 +586,7 @@ struct usbvision_device_data_st usbvision_device_data[] = {
.Radio = 0,
.vbi = 1,
.Tuner = 1,
- .TunerType = TUNER_PHILIPS_PAL,
+ .TunerType = TUNER_LG_PAL_NEW_TAPC,
.X_Offset = 0,
.Y_Offset = 3,
.Dvi_yuv_override = 1,
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index 7df071eb0a3b..5b1e346df206 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -1742,7 +1742,7 @@ static int usbvision_set_video_format(struct usb_usbvision *usbvision, int forma
format = ISOC_MODE_YUV420;
}
value[0] = 0x0A; //TODO: See the effect of the filter
- value[1] = format;
+ value[1] = format; // Sets the VO_MODE register which follows FILT_CONT
rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1),
USBVISION_OP_CODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
@@ -1831,10 +1831,10 @@ int usbvision_set_output(struct usb_usbvision *usbvision, int width,
frameRate = FRAMERATE_MAX;
}
- if (usbvision->tvnorm->id & V4L2_STD_625_50) {
+ if (usbvision->tvnormId & V4L2_STD_625_50) {
frameDrop = frameRate * 32 / 25 - 1;
}
- else if (usbvision->tvnorm->id & V4L2_STD_525_60) {
+ else if (usbvision->tvnormId & V4L2_STD_525_60) {
frameDrop = frameRate * 32 / 30 - 1;
}
@@ -2067,7 +2067,7 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
}
- if (usbvision->tvnorm->id & V4L2_STD_PAL) {
+ if (usbvision->tvnormId & V4L2_STD_PAL) {
value[0] = 0xC0;
value[1] = 0x02; //0x02C0 -> 704 Input video line length
value[2] = 0x20;
@@ -2076,7 +2076,7 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
value[5] = 0x00; //0x0060 -> 96 Input video h offset
value[6] = 0x16;
value[7] = 0x00; //0x0016 -> 22 Input video v offset
- } else if (usbvision->tvnorm->id & V4L2_STD_SECAM) {
+ } else if (usbvision->tvnormId & V4L2_STD_SECAM) {
value[0] = 0xC0;
value[1] = 0x02; //0x02C0 -> 704 Input video line length
value[2] = 0x20;
@@ -2537,7 +2537,9 @@ void usbvision_stop_isoc(struct usb_usbvision *usbvision)
int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
{
- int mode[4];
+ /* inputs #0 and #3 are constant for every SAA711x. */
+ /* inputs #1 and #2 are variable for SAA7111 and SAA7113 */
+ int mode[4]= {SAA7115_COMPOSITE0, 0, 0, SAA7115_COMPOSITE3};
int audio[]= {1, 0, 0, 0};
struct v4l2_routing route;
//channel 0 is TV with audiochannel 1 (tuner mono)
@@ -2547,10 +2549,6 @@ int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
RESTRICT_TO_RANGE(channel, 0, usbvision->video_inputs);
usbvision->ctl_input = channel;
- route.input = SAA7115_COMPOSITE1;
- route.output = 0;
- call_i2c_clients(usbvision, VIDIOC_INT_S_VIDEO_ROUTING,&route);
- call_i2c_clients(usbvision, VIDIOC_S_INPUT, &usbvision->ctl_input);
// set the new channel
// Regular USB TV Tuners -> channel: 0 = Television, 1 = Composite, 2 = S-Video
@@ -2558,28 +2556,27 @@ int usbvision_muxsel(struct usb_usbvision *usbvision, int channel)
switch (usbvision_device_data[usbvision->DevModel].Codec) {
case CODEC_SAA7113:
- if (SwitchSVideoInput) { // To handle problems with S-Video Input for some devices. Use SwitchSVideoInput parameter when loading the module.
- mode[2] = 1;
+ mode[1] = SAA7115_COMPOSITE2;
+ if (SwitchSVideoInput) {
+ /* To handle problems with S-Video Input for
+ * some devices. Use SwitchSVideoInput
+ * parameter when loading the module.*/
+ mode[2] = SAA7115_COMPOSITE1;
}
else {
- mode[2] = 7;
- }
- if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
- mode[0] = 0; mode[1] = 2; mode[3] = 3; // Special for four input devices
- }
- else {
- mode[0] = 0; mode[1] = 2; //modes for regular saa7113 devices
+ mode[2] = SAA7115_SVIDEO1;
}
break;
case CODEC_SAA7111:
- mode[0] = 0; mode[1] = 1; mode[2] = 7; //modes for saa7111
- break;
default:
- mode[0] = 0; mode[1] = 1; mode[2] = 7; //default modes
+ /* modes for saa7111 */
+ mode[1] = SAA7115_COMPOSITE1;
+ mode[2] = SAA7115_SVIDEO1;
+ break;
}
route.input = mode[channel];
+ route.output = 0;
call_i2c_clients(usbvision, VIDIOC_INT_S_VIDEO_ROUTING,&route);
- usbvision->channel = channel;
usbvision_set_audio(usbvision, audio[channel]);
return 0;
}
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index aa3258bbb4af..868b6886fe7f 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -36,7 +36,8 @@
* - use submit_urb for all setup packets
* - Fix memory settings for nt1004. It is 4 times as big as the
* nt1003 memory.
- * - Add audio on endpoint 3 for nt1004 chip. Seems impossible, needs a codec interface. Which one?
+ * - Add audio on endpoint 3 for nt1004 chip.
+ * Seems impossible, needs a codec interface. Which one?
* - Clean up the driver.
* - optimization for performance.
* - Add Videotext capability (VBI). Working on it.....
@@ -77,7 +78,8 @@
#include "usbvision.h"
#include "usbvision-cards.h"
-#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>, Dwaine Garden <DwaineGarden@rogers.com>"
+#define DRIVER_AUTHOR "Joerg Heckenbach <joerg@heckenbach-aw.de>,\
+ Dwaine Garden <DwaineGarden@rogers.com>"
#define DRIVER_NAME "usbvision"
#define DRIVER_ALIAS "USBVision"
#define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
@@ -85,20 +87,25 @@
#define USBVISION_DRIVER_VERSION_MAJOR 0
#define USBVISION_DRIVER_VERSION_MINOR 9
#define USBVISION_DRIVER_VERSION_PATCHLEVEL 9
-#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,USBVISION_DRIVER_VERSION_MINOR,USBVISION_DRIVER_VERSION_PATCHLEVEL)
-#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) "." __stringify(USBVISION_DRIVER_VERSION_MINOR) "." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
+#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,\
+USBVISION_DRIVER_VERSION_MINOR,\
+USBVISION_DRIVER_VERSION_PATCHLEVEL)
+#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR)\
+ "." __stringify(USBVISION_DRIVER_VERSION_MINOR)\
+ "." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
#define ENABLE_HEXDUMP 0 /* Enable if you need it */
#ifdef USBVISION_DEBUG
#define PDEBUG(level, fmt, args...) \
- if (video_debug & (level)) info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ , ## args)
+ if (video_debug & (level)) \
+ info("[%s:%d] " fmt, __PRETTY_FUNCTION__, __LINE__ ,\
+ ## args)
#else
#define PDEBUG(level, fmt, args...) do {} while(0)
#endif
-#define DBG_IOCTL 1<<0
#define DBG_IO 1<<1
#define DBG_PROBE 1<<2
#define DBG_MMAP 1<<3
@@ -108,7 +115,8 @@
#define goto2next(str) while(*str!=' ') str++; while(*str==' ') str++;
-static int usbvision_nr = 0; // sequential number of usbvision device
+/* sequential number of usbvision device */
+static int usbvision_nr = 0;
static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
{ 1, 1, 8, V4L2_PIX_FMT_GREY , "GREY" },
@@ -121,55 +129,32 @@ static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = {
{ 1, 2, 16, V4L2_PIX_FMT_YUV422P , "YUV422P" }
};
-/* supported tv norms */
-static struct usbvision_tvnorm tvnorms[] = {
- {
- .name = "PAL",
- .id = V4L2_STD_PAL,
- }, {
- .name = "NTSC",
- .id = V4L2_STD_NTSC,
- }, {
- .name = "SECAM",
- .id = V4L2_STD_SECAM,
- }, {
- .name = "PAL-M",
- .id = V4L2_STD_PAL_M,
- }
-};
-
-#define TVNORMS ARRAY_SIZE(tvnorms)
-
-// Function prototypes
+/* Function prototypes */
static void usbvision_release(struct usb_usbvision *usbvision);
-// Default initalization of device driver parameters
-static int isocMode = ISOC_MODE_COMPRESS; // Set the default format for ISOC endpoint
-static int video_debug = 0; // Set the default Debug Mode of the device driver
-static int PowerOnAtOpen = 1; // Set the default device to power on at startup
-static int video_nr = -1; // Sequential Number of Video Device
-static int radio_nr = -1; // Sequential Number of Radio Device
-static int vbi_nr = -1; // Sequential Number of VBI Device
-
-// Grab parameters for the device driver
-
-#if defined(module_param) // Showing parameters under SYSFS
+/* Default initalization of device driver parameters */
+/* Set the default format for ISOC endpoint */
+static int isocMode = ISOC_MODE_COMPRESS;
+/* Set the default Debug Mode of the device driver */
+static int video_debug = 0;
+/* Set the default device to power on at startup */
+static int PowerOnAtOpen = 1;
+/* Sequential Number of Video Device */
+static int video_nr = -1;
+/* Sequential Number of Radio Device */
+static int radio_nr = -1;
+/* Sequential Number of VBI Device */
+static int vbi_nr = -1;
+
+/* Grab parameters for the device driver */
+
+/* Showing parameters under SYSFS */
module_param(isocMode, int, 0444);
module_param(video_debug, int, 0444);
module_param(PowerOnAtOpen, int, 0444);
module_param(video_nr, int, 0444);
module_param(radio_nr, int, 0444);
module_param(vbi_nr, int, 0444);
-#else // Old Style
-MODULE_PARAM(isocMode, "i");
-MODULE_PARM(video_debug, "i"); // Grab the Debug Mode of the device driver
-MODULE_PARM(adjustCompression, "i"); // Grab the compression to be adaptive
-MODULE_PARM(PowerOnAtOpen, "i"); // Grab the device to power on at startup
-MODULE_PARM(SwitchSVideoInput, "i"); // To help people with Black and White output with using s-video input. Some cables and input device are wired differently.
-MODULE_PARM(video_nr, "i"); // video_nr option allows to specify a certain /dev/videoX device (like /dev/video0 or /dev/video1 ...)
-MODULE_PARM(radio_nr, "i"); // radio_nr option allows to specify a certain /dev/radioX device (like /dev/radio0 or /dev/radio1 ...)
-MODULE_PARM(vbi_nr, "i"); // vbi_nr option allows to specify a certain /dev/vbiX device (like /dev/vbi0 or /dev/vbi1 ...)
-#endif
MODULE_PARM_DESC(isocMode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)");
MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)");
@@ -187,19 +172,21 @@ MODULE_VERSION(USBVISION_VERSION_STRING);
MODULE_ALIAS(DRIVER_ALIAS);
-/****************************************************************************************/
-/* SYSFS Code - Copied from the stv680.c usb module. */
-/* Device information is located at /sys/class/video4linux/video0 */
-/* Device parameters information is located at /sys/module/usbvision */
-/* Device USB Information is located at /sys/bus/usb/drivers/USBVision Video Grabber */
-/****************************************************************************************/
+/*****************************************************************************/
+/* SYSFS Code - Copied from the stv680.c usb module. */
+/* Device information is located at /sys/class/video4linux/video0 */
+/* Device parameters information is located at /sys/module/usbvision */
+/* Device USB Information is located at */
+/* /sys/bus/usb/drivers/USBVision Video Grabber */
+/*****************************************************************************/
#define YES_NO(x) ((x) ? "Yes" : "No")
static inline struct usb_usbvision *cd_to_usbvision(struct class_device *cd)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
return video_get_drvdata(vdev);
}
@@ -211,15 +198,18 @@ static CLASS_DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static ssize_t show_model(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%s\n", usbvision_device_data[usbvision->DevModel].ModelString);
+ return sprintf(buf, "%s\n",
+ usbvision_device_data[usbvision->DevModel].ModelString);
}
static CLASS_DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
static ssize_t show_hue(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_HUE;
@@ -232,7 +222,8 @@ static CLASS_DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
static ssize_t show_contrast(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_CONTRAST;
@@ -245,7 +236,8 @@ static CLASS_DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
static ssize_t show_brightness(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_BRIGHTNESS;
@@ -258,7 +250,8 @@ static CLASS_DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
static ssize_t show_saturation(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
struct v4l2_control ctrl;
ctrl.id = V4L2_CID_SATURATION;
@@ -271,23 +264,28 @@ static CLASS_DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
static ssize_t show_streaming(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%s\n", YES_NO(usbvision->streaming==Stream_On?1:0));
+ return sprintf(buf, "%s\n",
+ YES_NO(usbvision->streaming==Stream_On?1:0));
}
static CLASS_DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
static ssize_t show_compression(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- return sprintf(buf, "%s\n", YES_NO(usbvision->isocMode==ISOC_MODE_COMPRESS));
+ return sprintf(buf, "%s\n",
+ YES_NO(usbvision->isocMode==ISOC_MODE_COMPRESS));
}
static CLASS_DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
static ssize_t show_device_bridge(struct class_device *cd, char *buf)
{
- struct video_device *vdev = container_of(cd, struct video_device, class_dev);
+ struct video_device *vdev =
+ container_of(cd, struct video_device, class_dev);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
return sprintf(buf, "%d\n", usbvision->bridgeType);
}
@@ -376,7 +374,8 @@ static void usbvision_remove_sysfs(struct video_device *vdev)
static int usbvision_v4l2_open(struct inode *inode, struct file *file)
{
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
int errCode = 0;
PDEBUG(DBG_IO, "open");
@@ -390,7 +389,8 @@ static int usbvision_v4l2_open(struct inode *inode, struct file *file)
/* Allocate memory for the scratch ring buffer */
errCode = usbvision_scratch_alloc(usbvision);
if (isocMode==ISOC_MODE_COMPRESS) {
- /* Allocate intermediate decompression buffers only if needed */
+ /* Allocate intermediate decompression buffers
+ only if needed */
errCode = usbvision_decompress_alloc(usbvision);
}
if (errCode) {
@@ -421,11 +421,10 @@ static int usbvision_v4l2_open(struct inode *inode, struct file *file)
if (!errCode) {
usbvision_begin_streaming(usbvision);
errCode = usbvision_init_isoc(usbvision);
- /* device needs to be initialized before isoc transfer */
+ /* device must be initialized before isoc transfer */
usbvision_muxsel(usbvision,0);
usbvision->user++;
- }
- else {
+ } else {
if (PowerOnAtOpen) {
usbvision_i2c_unregister(usbvision);
usbvision_power_off(usbvision);
@@ -456,7 +455,8 @@ static int usbvision_v4l2_open(struct inode *inode, struct file *file)
static int usbvision_v4l2_close(struct inode *inode, struct file *file)
{
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
PDEBUG(DBG_IO, "close");
down(&usbvision->lock);
@@ -473,7 +473,8 @@ static int usbvision_v4l2_close(struct inode *inode, struct file *file)
usbvision->user--;
if (PowerOnAtOpen) {
- /* power off in a little while to avoid off/on every close/open short sequences */
+ /* power off in a little while
+ to avoid off/on every close/open short sequences */
usbvision_set_powerOffTimer(usbvision);
usbvision->initialized = 0;
}
@@ -498,583 +499,612 @@ static int usbvision_v4l2_close(struct inode *inode, struct file *file)
* This is part of Video 4 Linux API. The procedure handles ioctl() calls.
*
*/
-static int usbvision_v4l2_do_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int vidioc_g_register (struct file *file, void *priv,
+ struct v4l2_register *reg)
{
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return -EFAULT;
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int errCode;
- switch (cmd) {
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
+ return -EINVAL;
+ /* NT100x has a 8-bit register space */
+ errCode = usbvision_read_reg(usbvision, reg->reg&0xff);
+ if (errCode < 0) {
+ err("%s: VIDIOC_DBG_G_REGISTER failed: error %d",
+ __FUNCTION__, errCode);
+ return errCode;
+ }
+ return 0;
+}
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- /* ioctls to allow direct acces to the NT100x registers */
- case VIDIOC_DBG_G_REGISTER:
- case VIDIOC_DBG_S_REGISTER:
- {
- struct v4l2_register *reg = arg;
- int errCode;
-
- if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
- return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- /* NT100x has a 8-bit register space */
- if (cmd == VIDIOC_DBG_G_REGISTER)
- errCode = usbvision_read_reg(usbvision, reg->reg&0xff);
- else
- errCode = usbvision_write_reg(usbvision, reg->reg&0xff, reg->val);
- if (errCode < 0) {
- err("%s: VIDIOC_DBG_%c_REGISTER failed: error %d", __FUNCTION__,
- cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S', errCode);
- return errCode;
- }
- if (cmd == VIDIOC_DBG_S_REGISTER)
- reg->val = (u8)errCode;
+static int vidioc_s_register (struct file *file, void *priv,
+ struct v4l2_register *reg)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int errCode;
- PDEBUG(DBG_IOCTL, "VIDIOC_DBG_%c_REGISTER reg=0x%02X, value=0x%02X",
- cmd == VIDIOC_DBG_G_REGISTER ? 'G' : 'S',
- (unsigned int)reg->reg, (unsigned int)reg->val);
- return 0;
- }
+ if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
+ return -EINVAL;
+ /* NT100x has a 8-bit register space */
+ reg->val = (u8)usbvision_write_reg(usbvision, reg->reg&0xff, reg->val);
+ if (reg->val < 0) {
+ err("%s: VIDIOC_DBG_S_REGISTER failed: error %d",
+ __FUNCTION__, errCode);
+ return errCode;
+ }
+ return 0;
+}
#endif
- case VIDIOC_QUERYCAP:
- {
- struct v4l2_capability *vc=arg;
-
- memset(vc, 0, sizeof(*vc));
- strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
- strlcpy(vc->card, usbvision_device_data[usbvision->DevModel].ModelString,
- sizeof(vc->card));
- strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
- sizeof(vc->bus_info));
- vc->version = USBVISION_DRIVER_VERSION;
- vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_AUDIO |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
- PDEBUG(DBG_IOCTL, "VIDIOC_QUERYCAP");
- return 0;
- }
- case VIDIOC_ENUMINPUT:
- {
- struct v4l2_input *vi = arg;
- int chan;
-
- if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) )
- return -EINVAL;
- if (usbvision->have_tuner) {
- chan = vi->index;
- }
- else {
- chan = vi->index + 1; //skip Television string
- }
- switch(chan) {
- case 0:
- if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
- strcpy(vi->name, "White Video Input");
- }
- else {
- strcpy(vi->name, "Television");
- vi->type = V4L2_INPUT_TYPE_TUNER;
- vi->audioset = 1;
- vi->tuner = chan;
- vi->std = V4L2_STD_PAL | V4L2_STD_NTSC | V4L2_STD_SECAM;
- }
- break;
- case 1:
- vi->type = V4L2_INPUT_TYPE_CAMERA;
- if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
- strcpy(vi->name, "Green Video Input");
- }
- else {
- strcpy(vi->name, "Composite Video Input");
- }
- vi->std = V4L2_STD_PAL;
- break;
- case 2:
- vi->type = V4L2_INPUT_TYPE_CAMERA;
- if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
- strcpy(vi->name, "Yellow Video Input");
- }
- else {
- strcpy(vi->name, "S-Video Input");
- }
- vi->std = V4L2_STD_PAL;
- break;
- case 3:
- vi->type = V4L2_INPUT_TYPE_CAMERA;
- strcpy(vi->name, "Red Video Input");
- vi->std = V4L2_STD_PAL;
- break;
- }
- PDEBUG(DBG_IOCTL, "VIDIOC_ENUMINPUT name=%s:%d tuners=%d type=%d norm=%x",
- vi->name, vi->index, vi->tuner,vi->type,(int)vi->std);
- return 0;
- }
- case VIDIOC_ENUMSTD:
- {
- struct v4l2_standard *e = arg;
- unsigned int i;
- int ret;
-
- i = e->index;
- if (i >= TVNORMS)
- return -EINVAL;
- ret = v4l2_video_std_construct(e, tvnorms[e->index].id,
- tvnorms[e->index].name);
- e->index = i;
- if (ret < 0)
- return ret;
- return 0;
+
+static int vidioc_querycap (struct file *file, void *priv,
+ struct v4l2_capability *vc)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+
+ strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
+ strlcpy(vc->card,
+ usbvision_device_data[usbvision->DevModel].ModelString,
+ sizeof(vc->card));
+ strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
+ sizeof(vc->bus_info));
+ vc->version = USBVISION_DRIVER_VERSION;
+ vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_AUDIO |
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING |
+ (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
+ return 0;
+}
+
+static int vidioc_enum_input (struct file *file, void *priv,
+ struct v4l2_input *vi)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int chan;
+
+ if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) )
+ return -EINVAL;
+ if (usbvision->have_tuner) {
+ chan = vi->index;
+ } else {
+ chan = vi->index + 1; /*skip Television string*/
+ }
+ /* Determine the requested input characteristics
+ specific for each usbvision card model */
+ switch(chan) {
+ case 0:
+ if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
+ strcpy(vi->name, "White Video Input");
+ } else {
+ strcpy(vi->name, "Television");
+ vi->type = V4L2_INPUT_TYPE_TUNER;
+ vi->audioset = 1;
+ vi->tuner = chan;
+ vi->std = USBVISION_NORMS;
}
- case VIDIOC_G_INPUT:
- {
- int *input = arg;
- *input = usbvision->ctl_input;
- return 0;
+ break;
+ case 1:
+ vi->type = V4L2_INPUT_TYPE_CAMERA;
+ if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
+ strcpy(vi->name, "Green Video Input");
+ } else {
+ strcpy(vi->name, "Composite Video Input");
}
- case VIDIOC_S_INPUT:
- {
- int *input = arg;
- if ((*input >= usbvision->video_inputs) || (*input < 0) )
- return -EINVAL;
- usbvision->ctl_input = *input;
-
- down(&usbvision->lock);
- usbvision_muxsel(usbvision, usbvision->ctl_input);
- usbvision_set_input(usbvision);
- usbvision_set_output(usbvision, usbvision->curwidth, usbvision->curheight);
- up(&usbvision->lock);
- return 0;
+ vi->std = V4L2_STD_PAL;
+ break;
+ case 2:
+ vi->type = V4L2_INPUT_TYPE_CAMERA;
+ if (usbvision_device_data[usbvision->DevModel].VideoChannels == 4) {
+ strcpy(vi->name, "Yellow Video Input");
+ } else {
+ strcpy(vi->name, "S-Video Input");
}
- case VIDIOC_G_STD:
- {
- v4l2_std_id *id = arg;
+ vi->std = V4L2_STD_PAL;
+ break;
+ case 3:
+ vi->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(vi->name, "Red Video Input");
+ vi->std = V4L2_STD_PAL;
+ break;
+ }
+ return 0;
+}
- *id = usbvision->tvnorm->id;
+static int vidioc_g_input (struct file *file, void *priv, unsigned int *input)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- PDEBUG(DBG_IOCTL, "VIDIOC_G_STD std_id=%s", usbvision->tvnorm->name);
- return 0;
- }
- case VIDIOC_S_STD:
- {
- v4l2_std_id *id = arg;
- unsigned int i;
-
- for (i = 0; i < TVNORMS; i++)
- if (*id == tvnorms[i].id)
- break;
- if (i == TVNORMS)
- for (i = 0; i < TVNORMS; i++)
- if (*id & tvnorms[i].id)
- break;
- if (i == TVNORMS)
- return -EINVAL;
-
- down(&usbvision->lock);
- usbvision->tvnorm = &tvnorms[i];
-
- call_i2c_clients(usbvision, VIDIOC_S_STD,
- &usbvision->tvnorm->id);
+ *input = usbvision->ctl_input;
+ return 0;
+}
- up(&usbvision->lock);
+static int vidioc_s_input (struct file *file, void *priv, unsigned int input)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- PDEBUG(DBG_IOCTL, "VIDIOC_S_STD std_id=%s", usbvision->tvnorm->name);
- return 0;
- }
- case VIDIOC_G_TUNER:
- {
- struct v4l2_tuner *vt = arg;
-
- if (!usbvision->have_tuner || vt->index) // Only tuner 0
- return -EINVAL;
- strcpy(vt->name, "Television");
- /* Let clients fill in the remainder of this struct */
- call_i2c_clients(usbvision,VIDIOC_G_TUNER,vt);
-
- PDEBUG(DBG_IOCTL, "VIDIOC_G_TUNER signal=%x, afc=%x",vt->signal,vt->afc);
- return 0;
- }
- case VIDIOC_S_TUNER:
- {
- struct v4l2_tuner *vt = arg;
-
- // Only no or one tuner for now
- if (!usbvision->have_tuner || vt->index)
- return -EINVAL;
- /* let clients handle this */
- call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
-
- PDEBUG(DBG_IOCTL, "VIDIOC_S_TUNER");
- return 0;
- }
- case VIDIOC_G_FREQUENCY:
- {
- struct v4l2_frequency *freq = arg;
-
- freq->tuner = 0; // Only one tuner
- freq->type = V4L2_TUNER_ANALOG_TV;
- freq->frequency = usbvision->freq;
- PDEBUG(DBG_IOCTL, "VIDIOC_G_FREQUENCY freq=0x%X", (unsigned)freq->frequency);
- return 0;
- }
- case VIDIOC_S_FREQUENCY:
- {
- struct v4l2_frequency *freq = arg;
-
- // Only no or one tuner for now
- if (!usbvision->have_tuner || freq->tuner)
- return -EINVAL;
-
- usbvision->freq = freq->frequency;
- call_i2c_clients(usbvision, cmd, freq);
- PDEBUG(DBG_IOCTL, "VIDIOC_S_FREQUENCY freq=0x%X", (unsigned)freq->frequency);
- return 0;
- }
- case VIDIOC_G_AUDIO:
- {
- struct v4l2_audio *v = arg;
- memset(v,0, sizeof(v));
- strcpy(v->name, "TV");
- PDEBUG(DBG_IOCTL, "VIDIOC_G_AUDIO");
- return 0;
- }
- case VIDIOC_S_AUDIO:
- {
- struct v4l2_audio *v = arg;
- if(v->index) {
- return -EINVAL;
- }
- PDEBUG(DBG_IOCTL, "VIDIOC_S_AUDIO");
- return 0;
- }
- case VIDIOC_QUERYCTRL:
- {
- struct v4l2_queryctrl *ctrl = arg;
- int id=ctrl->id;
+ if ((input >= usbvision->video_inputs) || (input < 0) )
+ return -EINVAL;
- memset(ctrl,0,sizeof(*ctrl));
- ctrl->id=id;
+ down(&usbvision->lock);
+ usbvision_muxsel(usbvision, input);
+ usbvision_set_input(usbvision);
+ usbvision_set_output(usbvision,
+ usbvision->curwidth,
+ usbvision->curheight);
+ up(&usbvision->lock);
+ return 0;
+}
- call_i2c_clients(usbvision, cmd, arg);
+static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ usbvision->tvnormId=*id;
- if (ctrl->type)
- return 0;
- else
- return -EINVAL;
+ down(&usbvision->lock);
+ call_i2c_clients(usbvision, VIDIOC_S_STD,
+ &usbvision->tvnormId);
+ up(&usbvision->lock);
+ /* propagate the change to the decoder */
+ usbvision_muxsel(usbvision, usbvision->ctl_input);
- PDEBUG(DBG_IOCTL,"VIDIOC_QUERYCTRL id=%x value=%x",ctrl->id,ctrl->type);
- }
- case VIDIOC_G_CTRL:
- {
- struct v4l2_control *ctrl = arg;
- call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
- PDEBUG(DBG_IOCTL,"VIDIOC_G_CTRL id=%x value=%x",ctrl->id,ctrl->value);
- return 0;
- }
- case VIDIOC_S_CTRL:
- {
- struct v4l2_control *ctrl = arg;
+ return 0;
+}
- PDEBUG(DBG_IOCTL, "VIDIOC_S_CTRL id=%x value=%x",ctrl->id,ctrl->value);
- call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
- return 0;
- }
- case VIDIOC_REQBUFS:
- {
- struct v4l2_requestbuffers *vr = arg;
- int ret;
+static int vidioc_g_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *vt)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- RESTRICT_TO_RANGE(vr->count,1,USBVISION_NUMFRAMES);
+ if (!usbvision->have_tuner || vt->index) // Only tuner 0
+ return -EINVAL;
+ if(usbvision->radio) {
+ strcpy(vt->name, "Radio");
+ vt->type = V4L2_TUNER_RADIO;
+ } else {
+ strcpy(vt->name, "Television");
+ }
+ /* Let clients fill in the remainder of this struct */
+ call_i2c_clients(usbvision,VIDIOC_G_TUNER,vt);
- // Check input validity : the user must do a VIDEO CAPTURE and MMAP method.
- if((vr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (vr->memory != V4L2_MEMORY_MMAP))
- return -EINVAL;
+ return 0;
+}
- if(usbvision->streaming == Stream_On) {
- if ((ret = usbvision_stream_interrupt(usbvision)))
- return ret;
- }
+static int vidioc_s_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *vt)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- usbvision_frames_free(usbvision);
- usbvision_empty_framequeues(usbvision);
- vr->count = usbvision_frames_alloc(usbvision,vr->count);
+ // Only no or one tuner for now
+ if (!usbvision->have_tuner || vt->index)
+ return -EINVAL;
+ /* let clients handle this */
+ call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
- usbvision->curFrame = NULL;
+ return 0;
+}
- PDEBUG(DBG_IOCTL, "VIDIOC_REQBUFS count=%d",vr->count);
- return 0;
- }
- case VIDIOC_QUERYBUF:
- {
- struct v4l2_buffer *vb = arg;
- struct usbvision_frame *frame;
+static int vidioc_g_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *freq)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- // FIXME : must control that buffers are mapped (VIDIOC_REQBUFS has been called)
+ freq->tuner = 0; // Only one tuner
+ if(usbvision->radio) {
+ freq->type = V4L2_TUNER_RADIO;
+ } else {
+ freq->type = V4L2_TUNER_ANALOG_TV;
+ }
+ freq->frequency = usbvision->freq;
- if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
- return -EINVAL;
- }
- if(vb->index>=usbvision->num_frames) {
- return -EINVAL;
- }
- // Updating the corresponding frame state
- vb->flags = 0;
- frame = &usbvision->frame[vb->index];
- if(frame->grabstate >= FrameState_Ready)
- vb->flags |= V4L2_BUF_FLAG_QUEUED;
- if(frame->grabstate >= FrameState_Done)
- vb->flags |= V4L2_BUF_FLAG_DONE;
- if(frame->grabstate == FrameState_Unused)
- vb->flags |= V4L2_BUF_FLAG_MAPPED;
- vb->memory = V4L2_MEMORY_MMAP;
-
- vb->m.offset = vb->index*PAGE_ALIGN(usbvision->max_frame_size);
-
- vb->memory = V4L2_MEMORY_MMAP;
- vb->field = V4L2_FIELD_NONE;
- vb->length = usbvision->curwidth*usbvision->curheight*usbvision->palette.bytes_per_pixel;
- vb->timestamp = usbvision->frame[vb->index].timestamp;
- vb->sequence = usbvision->frame[vb->index].sequence;
- return 0;
- }
- case VIDIOC_QBUF:
- {
- struct v4l2_buffer *vb = arg;
- struct usbvision_frame *frame;
- unsigned long lock_flags;
-
- // FIXME : works only on VIDEO_CAPTURE MODE, MMAP.
- if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
- return -EINVAL;
- }
- if(vb->index>=usbvision->num_frames) {
- return -EINVAL;
- }
+ return 0;
+}
- frame = &usbvision->frame[vb->index];
+static int vidioc_s_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *freq)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- if (frame->grabstate != FrameState_Unused) {
- return -EAGAIN;
- }
+ // Only no or one tuner for now
+ if (!usbvision->have_tuner || freq->tuner)
+ return -EINVAL;
- /* Mark it as ready and enqueue frame */
- frame->grabstate = FrameState_Ready;
- frame->scanstate = ScanState_Scanning;
- frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */
+ usbvision->freq = freq->frequency;
+ call_i2c_clients(usbvision, VIDIOC_S_FREQUENCY, freq);
- vb->flags &= ~V4L2_BUF_FLAG_DONE;
+ return 0;
+}
- /* set v4l2_format index */
- frame->v4l2_format = usbvision->palette;
+static int vidioc_g_audio (struct file *file, void *priv, struct v4l2_audio *a)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue);
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+ memset(a,0,sizeof(*a));
+ if(usbvision->radio) {
+ strcpy(a->name,"Radio");
+ } else {
+ strcpy(a->name, "TV");
+ }
- PDEBUG(DBG_IOCTL, "VIDIOC_QBUF frame #%d",vb->index);
- return 0;
- }
- case VIDIOC_DQBUF:
- {
- struct v4l2_buffer *vb = arg;
- int ret;
- struct usbvision_frame *f;
- unsigned long lock_flags;
-
- if (vb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if (list_empty(&(usbvision->outqueue))) {
- if (usbvision->streaming == Stream_Idle)
- return -EINVAL;
- ret = wait_event_interruptible
- (usbvision->wait_frame,
- !list_empty(&(usbvision->outqueue)));
- if (ret)
- return ret;
- }
+ return 0;
+}
- spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
- f = list_entry(usbvision->outqueue.next,
- struct usbvision_frame, frame);
- list_del(usbvision->outqueue.next);
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
-
- f->grabstate = FrameState_Unused;
-
- vb->memory = V4L2_MEMORY_MMAP;
- vb->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE;
- vb->index = f->index;
- vb->sequence = f->sequence;
- vb->timestamp = f->timestamp;
- vb->field = V4L2_FIELD_NONE;
- vb->bytesused = f->scanlength;
-
- return 0;
- }
- case VIDIOC_STREAMON:
- {
- int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
+static int vidioc_s_audio (struct file *file, void *fh,
+ struct v4l2_audio *a)
+{
+ if(a->index) {
+ return -EINVAL;
+ }
- usbvision->streaming = Stream_On;
+ return 0;
+}
- call_i2c_clients(usbvision,VIDIOC_STREAMON , &b);
+static int vidioc_queryctrl (struct file *file, void *priv,
+ struct v4l2_queryctrl *ctrl)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int id=ctrl->id;
- PDEBUG(DBG_IOCTL, "VIDIOC_STREAMON");
+ memset(ctrl,0,sizeof(*ctrl));
+ ctrl->id=id;
- return 0;
- }
- case VIDIOC_STREAMOFF:
- {
- int *type = arg;
- int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- if (*type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- if(usbvision->streaming == Stream_On) {
- usbvision_stream_interrupt(usbvision);
- // Stop all video streamings
- call_i2c_clients(usbvision,VIDIOC_STREAMOFF , &b);
- }
- usbvision_empty_framequeues(usbvision);
+ call_i2c_clients(usbvision, VIDIOC_QUERYCTRL, ctrl);
- PDEBUG(DBG_IOCTL, "VIDIOC_STREAMOFF");
- return 0;
- }
- case VIDIOC_ENUM_FMT:
- {
- struct v4l2_fmtdesc *vfd = arg;
+ if (!ctrl->type)
+ return -EINVAL;
- if(vfd->index>=USBVISION_SUPPORTED_PALETTES-1) {
- return -EINVAL;
- }
- vfd->flags = 0;
- vfd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- strcpy(vfd->description,usbvision_v4l2_format[vfd->index].desc);
- vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
- memset(vfd->reserved, 0, sizeof(vfd->reserved));
- return 0;
- }
- case VIDIOC_G_FMT:
- {
- struct v4l2_format *vf = arg;
-
- switch (vf->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- {
- vf->fmt.pix.width = usbvision->curwidth;
- vf->fmt.pix.height = usbvision->curheight;
- vf->fmt.pix.pixelformat = usbvision->palette.format;
- vf->fmt.pix.bytesperline = usbvision->curwidth*usbvision->palette.bytes_per_pixel;
- vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*usbvision->curheight;
- vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
- PDEBUG(DBG_IOCTL, "VIDIOC_G_FMT w=%d, h=%d, format=%s",
- vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
- return 0;
- }
- default:
- PDEBUG(DBG_IOCTL, "VIDIOC_G_FMT invalid type %d",vf->type);
- return -EINVAL;
- }
- return 0;
- }
- case VIDIOC_TRY_FMT:
- case VIDIOC_S_FMT:
- {
- struct v4l2_format *vf = arg;
- int formatIdx,ret;
-
- switch(vf->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- {
- /* Find requested format in available ones */
- for(formatIdx=0;formatIdx<USBVISION_SUPPORTED_PALETTES;formatIdx++) {
- if(vf->fmt.pix.pixelformat == usbvision_v4l2_format[formatIdx].format) {
- usbvision->palette = usbvision_v4l2_format[formatIdx];
- break;
- }
- }
- /* robustness */
- if(formatIdx == USBVISION_SUPPORTED_PALETTES) {
- return -EINVAL;
- }
- RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH);
- RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT);
-
- vf->fmt.pix.bytesperline = vf->fmt.pix.width*usbvision->palette.bytes_per_pixel;
- vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
-
- if(cmd == VIDIOC_TRY_FMT) {
- PDEBUG(DBG_IOCTL, "VIDIOC_TRY_FMT grabdisplay w=%d, h=%d, format=%s",
- vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
- return 0;
- }
-
- /* stop io in case it is already in progress */
- if(usbvision->streaming == Stream_On) {
- if ((ret = usbvision_stream_interrupt(usbvision)))
- return ret;
- }
- usbvision_frames_free(usbvision);
- usbvision_empty_framequeues(usbvision);
-
- usbvision->curFrame = NULL;
-
- // by now we are committed to the new data...
- down(&usbvision->lock);
- usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height);
- up(&usbvision->lock);
-
- PDEBUG(DBG_IOCTL, "VIDIOC_S_FMT grabdisplay w=%d, h=%d, format=%s",
- vf->fmt.pix.width, vf->fmt.pix.height,usbvision->palette.desc);
- return 0;
- }
- default:
- return -EINVAL;
- }
- }
- default:
- return -ENOIOCTLCMD;
+ return 0;
+}
+
+static int vidioc_g_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
+
+ return 0;
+}
+
+static int vidioc_s_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
+
+ return 0;
+}
+
+static int vidioc_reqbufs (struct file *file,
+ void *priv, struct v4l2_requestbuffers *vr)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int ret;
+
+ RESTRICT_TO_RANGE(vr->count,1,USBVISION_NUMFRAMES);
+
+ /* Check input validity:
+ the user must do a VIDEO CAPTURE and MMAP method. */
+ if((vr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+ (vr->memory != V4L2_MEMORY_MMAP))
+ return -EINVAL;
+
+ if(usbvision->streaming == Stream_On) {
+ if ((ret = usbvision_stream_interrupt(usbvision)))
+ return ret;
}
+
+ usbvision_frames_free(usbvision);
+ usbvision_empty_framequeues(usbvision);
+ vr->count = usbvision_frames_alloc(usbvision,vr->count);
+
+ usbvision->curFrame = NULL;
+
return 0;
}
-static int usbvision_v4l2_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int vidioc_querybuf (struct file *file,
+ void *priv, struct v4l2_buffer *vb)
{
- return video_usercopy(inode, file, cmd, arg, usbvision_v4l2_do_ioctl);
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usbvision_frame *frame;
+
+ /* FIXME : must control
+ that buffers are mapped (VIDIOC_REQBUFS has been called) */
+ if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
+ return -EINVAL;
+ }
+ if(vb->index>=usbvision->num_frames) {
+ return -EINVAL;
+ }
+ /* Updating the corresponding frame state */
+ vb->flags = 0;
+ frame = &usbvision->frame[vb->index];
+ if(frame->grabstate >= FrameState_Ready)
+ vb->flags |= V4L2_BUF_FLAG_QUEUED;
+ if(frame->grabstate >= FrameState_Done)
+ vb->flags |= V4L2_BUF_FLAG_DONE;
+ if(frame->grabstate == FrameState_Unused)
+ vb->flags |= V4L2_BUF_FLAG_MAPPED;
+ vb->memory = V4L2_MEMORY_MMAP;
+
+ vb->m.offset = vb->index*PAGE_ALIGN(usbvision->max_frame_size);
+
+ vb->memory = V4L2_MEMORY_MMAP;
+ vb->field = V4L2_FIELD_NONE;
+ vb->length = usbvision->curwidth*
+ usbvision->curheight*
+ usbvision->palette.bytes_per_pixel;
+ vb->timestamp = usbvision->frame[vb->index].timestamp;
+ vb->sequence = usbvision->frame[vb->index].sequence;
+ return 0;
+}
+
+static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *vb)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usbvision_frame *frame;
+ unsigned long lock_flags;
+
+ /* FIXME : works only on VIDEO_CAPTURE MODE, MMAP. */
+ if(vb->type != V4L2_CAP_VIDEO_CAPTURE) {
+ return -EINVAL;
+ }
+ if(vb->index>=usbvision->num_frames) {
+ return -EINVAL;
+ }
+
+ frame = &usbvision->frame[vb->index];
+
+ if (frame->grabstate != FrameState_Unused) {
+ return -EAGAIN;
+ }
+
+ /* Mark it as ready and enqueue frame */
+ frame->grabstate = FrameState_Ready;
+ frame->scanstate = ScanState_Scanning;
+ frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */
+
+ vb->flags &= ~V4L2_BUF_FLAG_DONE;
+
+ /* set v4l2_format index */
+ frame->v4l2_format = usbvision->palette;
+
+ spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
+ list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue);
+ spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+
+ return 0;
}
+static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *vb)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int ret;
+ struct usbvision_frame *f;
+ unsigned long lock_flags;
+
+ if (vb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (list_empty(&(usbvision->outqueue))) {
+ if (usbvision->streaming == Stream_Idle)
+ return -EINVAL;
+ ret = wait_event_interruptible
+ (usbvision->wait_frame,
+ !list_empty(&(usbvision->outqueue)));
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
+ f = list_entry(usbvision->outqueue.next,
+ struct usbvision_frame, frame);
+ list_del(usbvision->outqueue.next);
+ spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+
+ f->grabstate = FrameState_Unused;
+
+ vb->memory = V4L2_MEMORY_MMAP;
+ vb->flags = V4L2_BUF_FLAG_MAPPED |
+ V4L2_BUF_FLAG_QUEUED |
+ V4L2_BUF_FLAG_DONE;
+ vb->index = f->index;
+ vb->sequence = f->sequence;
+ vb->timestamp = f->timestamp;
+ vb->field = V4L2_FIELD_NONE;
+ vb->bytesused = f->scanlength;
+
+ return 0;
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ usbvision->streaming = Stream_On;
+ call_i2c_clients(usbvision,VIDIOC_STREAMON , &b);
+
+ return 0;
+}
+
+static int vidioc_streamoff(struct file *file,
+ void *priv, enum v4l2_buf_type type)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int b=V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if(usbvision->streaming == Stream_On) {
+ usbvision_stream_interrupt(usbvision);
+ /* Stop all video streamings */
+ call_i2c_clients(usbvision,VIDIOC_STREAMOFF , &b);
+ }
+ usbvision_empty_framequeues(usbvision);
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_cap (struct file *file, void *priv,
+ struct v4l2_fmtdesc *vfd)
+{
+ if(vfd->index>=USBVISION_SUPPORTED_PALETTES-1) {
+ return -EINVAL;
+ }
+ vfd->flags = 0;
+ vfd->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ strcpy(vfd->description,usbvision_v4l2_format[vfd->index].desc);
+ vfd->pixelformat = usbvision_v4l2_format[vfd->index].format;
+ memset(vfd->reserved, 0, sizeof(vfd->reserved));
+ return 0;
+}
+
+static int vidioc_g_fmt_cap (struct file *file, void *priv,
+ struct v4l2_format *vf)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ vf->fmt.pix.width = usbvision->curwidth;
+ vf->fmt.pix.height = usbvision->curheight;
+ vf->fmt.pix.pixelformat = usbvision->palette.format;
+ vf->fmt.pix.bytesperline =
+ usbvision->curwidth*usbvision->palette.bytes_per_pixel;
+ vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*usbvision->curheight;
+ vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */
+
+ return 0;
+}
+
+static int vidioc_try_fmt_cap (struct file *file, void *priv,
+ struct v4l2_format *vf)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int formatIdx;
+
+ /* Find requested format in available ones */
+ for(formatIdx=0;formatIdx<USBVISION_SUPPORTED_PALETTES;formatIdx++) {
+ if(vf->fmt.pix.pixelformat ==
+ usbvision_v4l2_format[formatIdx].format) {
+ usbvision->palette = usbvision_v4l2_format[formatIdx];
+ break;
+ }
+ }
+ /* robustness */
+ if(formatIdx == USBVISION_SUPPORTED_PALETTES) {
+ return -EINVAL;
+ }
+ RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH);
+ RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT);
+
+ vf->fmt.pix.bytesperline = vf->fmt.pix.width*
+ usbvision->palette.bytes_per_pixel;
+ vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *vf)
+{
+ struct video_device *dev = video_devdata(file);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
+ int ret;
+
+ if( 0 != (ret=vidioc_try_fmt_cap (file, priv, vf)) ) {
+ return ret;
+ }
+
+ /* stop io in case it is already in progress */
+ if(usbvision->streaming == Stream_On) {
+ if ((ret = usbvision_stream_interrupt(usbvision)))
+ return ret;
+ }
+ usbvision_frames_free(usbvision);
+ usbvision_empty_framequeues(usbvision);
+
+ usbvision->curFrame = NULL;
+
+ /* by now we are committed to the new data... */
+ down(&usbvision->lock);
+ usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height);
+ up(&usbvision->lock);
+
+ return 0;
+}
static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
int noblock = file->f_flags & O_NONBLOCK;
unsigned long lock_flags;
int ret,i;
struct usbvision_frame *frame;
- PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __FUNCTION__, (unsigned long)count, noblock);
+ PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __FUNCTION__,
+ (unsigned long)count, noblock);
if (!USBVISION_IS_OPERATIONAL(usbvision) || (buf == NULL))
return -EFAULT;
- /* This entry point is compatible with the mmap routines so that a user can do either
- VIDIOC_QBUF/VIDIOC_DQBUF to get frames or call read on the device. */
+ /* This entry point is compatible with the mmap routines
+ so that a user can do either VIDIOC_QBUF/VIDIOC_DQBUF
+ to get frames or call read on the device. */
if(!usbvision->num_frames) {
- /* First, allocate some frames to work with if this has not been done with
- VIDIOC_REQBUF */
+ /* First, allocate some frames to work with
+ if this has not been done with VIDIOC_REQBUF */
usbvision_frames_free(usbvision);
usbvision_empty_framequeues(usbvision);
usbvision_frames_alloc(usbvision,USBVISION_NUMFRAMES);
@@ -1086,21 +1116,24 @@ static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
call_i2c_clients(usbvision,VIDIOC_STREAMON , NULL);
}
- /* Then, enqueue as many frames as possible (like a user of VIDIOC_QBUF would do) */
+ /* Then, enqueue as many frames as possible
+ (like a user of VIDIOC_QBUF would do) */
for(i=0;i<usbvision->num_frames;i++) {
frame = &usbvision->frame[i];
if(frame->grabstate == FrameState_Unused) {
/* Mark it as ready and enqueue frame */
frame->grabstate = FrameState_Ready;
frame->scanstate = ScanState_Scanning;
- frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */
+ /* Accumulated in usbvision_parse_data() */
+ frame->scanlength = 0;
/* set v4l2_format index */
frame->v4l2_format = usbvision->palette;
spin_lock_irqsave(&usbvision->queue_lock, lock_flags);
list_add_tail(&frame->frame, &usbvision->inqueue);
- spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags);
+ spin_unlock_irqrestore(&usbvision->queue_lock,
+ lock_flags);
}
}
@@ -1128,8 +1161,9 @@ static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
return 0;
}
- PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld", __FUNCTION__,
- frame->index, frame->bytes_read, frame->scanlength);
+ PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld",
+ __FUNCTION__,
+ frame->index, frame->bytes_read, frame->scanlength);
/* copy bytes to user space; we allow for partials reads */
if ((count + frame->bytes_read) > (unsigned long)frame->scanlength)
@@ -1140,10 +1174,11 @@ static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
}
frame->bytes_read += count;
- PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld", __FUNCTION__,
- (unsigned long)count, frame->bytes_read);
+ PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld",
+ __FUNCTION__,
+ (unsigned long)count, frame->bytes_read);
- // For now, forget the frame if it has not been read in one shot.
+ /* For now, forget the frame if it has not been read in one shot. */
/* if (frame->bytes_read >= frame->scanlength) {// All data has been read */
frame->bytes_read = 0;
@@ -1162,7 +1197,8 @@ static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
u32 i;
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
PDEBUG(DBG_MMAP, "mmap");
@@ -1180,11 +1216,13 @@ static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
}
for (i = 0; i < usbvision->num_frames; i++) {
- if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) == vma->vm_pgoff)
+ if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) ==
+ vma->vm_pgoff)
break;
}
if (i == usbvision->num_frames) {
- PDEBUG(DBG_MMAP, "mmap: user supplied mapping address is out of range");
+ PDEBUG(DBG_MMAP,
+ "mmap: user supplied mapping address is out of range");
up(&usbvision->lock);
return -EINVAL;
}
@@ -1218,8 +1256,8 @@ static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
static int usbvision_radio_open(struct inode *inode, struct file *file)
{
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
- struct v4l2_frequency freq;
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
int errCode = 0;
PDEBUG(DBG_IO, "%s:", __FUNCTION__);
@@ -1249,8 +1287,6 @@ static int usbvision_radio_open(struct inode *inode, struct file *file)
// If so far no errors then we shall start the radio
usbvision->radio = 1;
call_i2c_clients(usbvision,AUDC_SET_RADIO,&usbvision->tuner_type);
- freq.frequency = 1517; //SWR3 @ 94.8MHz
- call_i2c_clients(usbvision, VIDIOC_S_FREQUENCY, &freq);
usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO);
usbvision->user++;
}
@@ -1270,7 +1306,8 @@ static int usbvision_radio_open(struct inode *inode, struct file *file)
static int usbvision_radio_close(struct inode *inode, struct file *file)
{
struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
+ struct usb_usbvision *usbvision =
+ (struct usb_usbvision *) video_get_drvdata(dev);
int errCode = 0;
PDEBUG(DBG_IO, "");
@@ -1304,149 +1341,6 @@ static int usbvision_radio_close(struct inode *inode, struct file *file)
return errCode;
}
-static int usbvision_do_radio_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
-{
- struct video_device *dev = video_devdata(file);
- struct usb_usbvision *usbvision = (struct usb_usbvision *) video_get_drvdata(dev);
-
- if (!USBVISION_IS_OPERATIONAL(usbvision))
- return -EIO;
-
- switch (cmd) {
- case VIDIOC_QUERYCAP:
- {
- struct v4l2_capability *vc=arg;
-
- memset(vc, 0, sizeof(*vc));
- strlcpy(vc->driver, "USBVision", sizeof(vc->driver));
- strlcpy(vc->card, usbvision_device_data[usbvision->DevModel].ModelString,
- sizeof(vc->card));
- strlcpy(vc->bus_info, usbvision->dev->dev.bus_id,
- sizeof(vc->bus_info));
- vc->version = USBVISION_DRIVER_VERSION;
- vc->capabilities = (usbvision->have_tuner ? V4L2_CAP_TUNER : 0);
- PDEBUG(DBG_IO, "VIDIOC_QUERYCAP");
- return 0;
- }
- case VIDIOC_QUERYCTRL:
- {
- struct v4l2_queryctrl *ctrl = arg;
- int id=ctrl->id;
-
- memset(ctrl,0,sizeof(*ctrl));
- ctrl->id=id;
-
- call_i2c_clients(usbvision, cmd, arg);
- PDEBUG(DBG_IO,"VIDIOC_QUERYCTRL id=%x value=%x",ctrl->id,ctrl->type);
-
- if (ctrl->type)
- return 0;
- else
- return -EINVAL;
-
- }
- case VIDIOC_G_CTRL:
- {
- struct v4l2_control *ctrl = arg;
-
- call_i2c_clients(usbvision, VIDIOC_G_CTRL, ctrl);
- PDEBUG(DBG_IO,"VIDIOC_G_CTRL id=%x value=%x",ctrl->id,ctrl->value);
- return 0;
- }
- case VIDIOC_S_CTRL:
- {
- struct v4l2_control *ctrl = arg;
-
- call_i2c_clients(usbvision, VIDIOC_S_CTRL, ctrl);
- PDEBUG(DBG_IO, "VIDIOC_S_CTRL id=%x value=%x",ctrl->id,ctrl->value);
- return 0;
- }
- case VIDIOC_G_TUNER:
- {
- struct v4l2_tuner *t = arg;
-
- if (t->index > 0)
- return -EINVAL;
-
- memset(t,0,sizeof(*t));
- strcpy(t->name, "Radio");
- t->type = V4L2_TUNER_RADIO;
-
- /* Let clients fill in the remainder of this struct */
- call_i2c_clients(usbvision,VIDIOC_G_TUNER,t);
- PDEBUG(DBG_IO, "VIDIOC_G_TUNER signal=%x, afc=%x",t->signal,t->afc);
- return 0;
- }
- case VIDIOC_S_TUNER:
- {
- struct v4l2_tuner *vt = arg;
-
- // Only no or one tuner for now
- if (!usbvision->have_tuner || vt->index)
- return -EINVAL;
- /* let clients handle this */
- call_i2c_clients(usbvision,VIDIOC_S_TUNER,vt);
-
- PDEBUG(DBG_IO, "VIDIOC_S_TUNER");
- return 0;
- }
- case VIDIOC_G_AUDIO:
- {
- struct v4l2_audio *a = arg;
-
- memset(a,0,sizeof(*a));
- strcpy(a->name,"Radio");
- PDEBUG(DBG_IO, "VIDIOC_G_AUDIO");
- return 0;
- }
- case VIDIOC_S_AUDIO:
- case VIDIOC_S_INPUT:
- case VIDIOC_S_STD:
- return 0;
-
- case VIDIOC_G_FREQUENCY:
- {
- struct v4l2_frequency *f = arg;
-
- memset(f,0,sizeof(*f));
-
- f->type = V4L2_TUNER_RADIO;
- f->frequency = usbvision->freq;
- call_i2c_clients(usbvision, cmd, f);
- PDEBUG(DBG_IO, "VIDIOC_G_FREQUENCY freq=0x%X", (unsigned)f->frequency);
-
- return 0;
- }
- case VIDIOC_S_FREQUENCY:
- {
- struct v4l2_frequency *f = arg;
-
- if (f->tuner != 0)
- return -EINVAL;
- usbvision->freq = f->frequency;
- call_i2c_clients(usbvision, cmd, f);
- PDEBUG(DBG_IO, "VIDIOC_S_FREQUENCY freq=0x%X", (unsigned)f->frequency);
-
- return 0;
- }
- default:
- {
- PDEBUG(DBG_IO, "%s: Unknown command %x", __FUNCTION__, cmd);
- return -ENOIOCTLCMD;
- }
- }
- return 0;
-}
-
-
-static int usbvision_radio_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return video_usercopy(inode, file, cmd, arg, usbvision_do_radio_ioctl);
-}
-
-
/*
* Here comes the stuff for vbi on usbvision based devices
*
@@ -1454,21 +1348,21 @@ static int usbvision_radio_ioctl(struct inode *inode, struct file *file,
static int usbvision_vbi_open(struct inode *inode, struct file *file)
{
/* TODO */
- return -EINVAL;
+ return -ENODEV;
}
static int usbvision_vbi_close(struct inode *inode, struct file *file)
{
/* TODO */
- return -EINVAL;
+ return -ENODEV;
}
static int usbvision_do_vbi_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, void *arg)
{
/* TODO */
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
static int usbvision_vbi_ioctl(struct inode *inode, struct file *file,
@@ -1489,8 +1383,11 @@ static const struct file_operations usbvision_fops = {
.release = usbvision_v4l2_close,
.read = usbvision_v4l2_read,
.mmap = usbvision_v4l2_mmap,
- .ioctl = usbvision_v4l2_ioctl,
+ .ioctl = video_ioctl2,
.llseek = no_llseek,
+/* .poll = video_poll, */
+ .mmap = usbvision_v4l2_mmap,
+ .compat_ioctl = v4l_compat_ioctl32,
};
static struct video_device usbvision_video_template = {
.owner = THIS_MODULE,
@@ -1500,6 +1397,39 @@ static struct video_device usbvision_video_template = {
.name = "usbvision-video",
.release = video_device_release,
.minor = -1,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_cap = vidioc_enum_fmt_cap,
+ .vidioc_g_fmt_cap = vidioc_g_fmt_cap,
+ .vidioc_try_fmt_cap = vidioc_try_fmt_cap,
+ .vidioc_s_fmt_cap = vidioc_s_fmt_cap,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_audio = vidioc_g_audio,
+ .vidioc_g_audio = vidioc_s_audio,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+/* .vidiocgmbuf = vidiocgmbuf, */
+#endif
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vidioc_g_register,
+ .vidioc_s_register = vidioc_s_register,
+#endif
+ .tvnorms = USBVISION_NORMS,
+ .current_norm = V4L2_STD_PAL
};
@@ -1508,8 +1438,9 @@ static const struct file_operations usbvision_radio_fops = {
.owner = THIS_MODULE,
.open = usbvision_radio_open,
.release = usbvision_radio_close,
- .ioctl = usbvision_radio_ioctl,
+ .ioctl = video_ioctl2,
.llseek = no_llseek,
+ .compat_ioctl = v4l_compat_ioctl32,
};
static struct video_device usbvision_radio_template=
@@ -1518,12 +1449,27 @@ static struct video_device usbvision_radio_template=
.type = VID_TYPE_TUNER,
.hardware = VID_HARDWARE_USBVISION,
.fops = &usbvision_radio_fops,
- .release = video_device_release,
.name = "usbvision-radio",
+ .release = video_device_release,
.minor = -1,
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_audio = vidioc_g_audio,
+ .vidioc_g_audio = vidioc_s_audio,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+
+ .tvnorms = USBVISION_NORMS,
+ .current_norm = V4L2_STD_PAL
};
-
// vbi template
static const struct file_operations usbvision_vbi_fops = {
.owner = THIS_MODULE,
@@ -1531,6 +1477,7 @@ static const struct file_operations usbvision_vbi_fops = {
.release = usbvision_vbi_close,
.ioctl = usbvision_vbi_ioctl,
.llseek = no_llseek,
+ .compat_ioctl = v4l_compat_ioctl32,
};
static struct video_device usbvision_vbi_template=
@@ -1574,11 +1521,11 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
{
// vbi Device:
if (usbvision->vbi) {
- PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]", usbvision->vbi->minor & 0x1f);
+ PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
+ usbvision->vbi->minor & 0x1f);
if (usbvision->vbi->minor != -1) {
video_unregister_device(usbvision->vbi);
- }
- else {
+ } else {
video_device_release(usbvision->vbi);
}
usbvision->vbi = NULL;
@@ -1586,11 +1533,11 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// Radio Device:
if (usbvision->rdev) {
- PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]", usbvision->rdev->minor & 0x1f);
+ PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
+ usbvision->rdev->minor & 0x1f);
if (usbvision->rdev->minor != -1) {
video_unregister_device(usbvision->rdev);
- }
- else {
+ } else {
video_device_release(usbvision->rdev);
}
usbvision->rdev = NULL;
@@ -1598,11 +1545,11 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// Video Device:
if (usbvision->vdev) {
- PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]", usbvision->vdev->minor & 0x1f);
+ PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
+ usbvision->vdev->minor & 0x1f);
if (usbvision->vdev->minor != -1) {
video_unregister_device(usbvision->vdev);
- }
- else {
+ } else {
video_device_release(usbvision->vdev);
}
usbvision->vdev = NULL;
@@ -1613,37 +1560,52 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
{
// Video Device:
- usbvision->vdev = usbvision_vdev_init(usbvision, &usbvision_video_template, "USBVision Video");
+ usbvision->vdev = usbvision_vdev_init(usbvision,
+ &usbvision_video_template,
+ "USBVision Video");
if (usbvision->vdev == NULL) {
goto err_exit;
}
- if (video_register_device(usbvision->vdev, VFL_TYPE_GRABBER, video_nr)<0) {
+ if (video_register_device(usbvision->vdev,
+ VFL_TYPE_GRABBER,
+ video_nr)<0) {
goto err_exit;
}
- printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n", usbvision->nr,usbvision->vdev->minor & 0x1f);
+ printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
+ usbvision->nr,usbvision->vdev->minor & 0x1f);
// Radio Device:
if (usbvision_device_data[usbvision->DevModel].Radio) {
// usbvision has radio
- usbvision->rdev = usbvision_vdev_init(usbvision, &usbvision_radio_template, "USBVision Radio");
+ usbvision->rdev = usbvision_vdev_init(usbvision,
+ &usbvision_radio_template,
+ "USBVision Radio");
if (usbvision->rdev == NULL) {
goto err_exit;
}
- if (video_register_device(usbvision->rdev, VFL_TYPE_RADIO, radio_nr)<0) {
+ if (video_register_device(usbvision->rdev,
+ VFL_TYPE_RADIO,
+ radio_nr)<0) {
goto err_exit;
}
- printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n", usbvision->nr, usbvision->rdev->minor & 0x1f);
+ printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
+ usbvision->nr, usbvision->rdev->minor & 0x1f);
}
// vbi Device:
if (usbvision_device_data[usbvision->DevModel].vbi) {
- usbvision->vbi = usbvision_vdev_init(usbvision, &usbvision_vbi_template, "USBVision VBI");
+ usbvision->vbi = usbvision_vdev_init(usbvision,
+ &usbvision_vbi_template,
+ "USBVision VBI");
if (usbvision->vdev == NULL) {
goto err_exit;
}
- if (video_register_device(usbvision->vbi, VFL_TYPE_VBI, vbi_nr)<0) {
+ if (video_register_device(usbvision->vbi,
+ VFL_TYPE_VBI,
+ vbi_nr)<0) {
goto err_exit;
}
- printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n", usbvision->nr,usbvision->vbi->minor & 0x1f);
+ printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
+ usbvision->nr,usbvision->vbi->minor & 0x1f);
}
// all done
return 0;
@@ -1657,7 +1619,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
/*
* usbvision_alloc()
*
- * This code allocates the struct usb_usbvision. It is filled with default values.
+ * This code allocates the struct usb_usbvision.
+ * It is filled with default values.
*
* Returns NULL on error, a pointer to usb_usbvision else.
*
@@ -1666,7 +1629,8 @@ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev)
{
struct usb_usbvision *usbvision;
- if ((usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL)) == NULL) {
+ if ((usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL)) ==
+ NULL) {
goto err_exit;
}
@@ -1728,11 +1692,11 @@ static void usbvision_release(struct usb_usbvision *usbvision)
}
-/******************************** usb interface *****************************************/
+/*********************** usb interface **********************************/
static void usbvision_configure_video(struct usb_usbvision *usbvision)
{
- int model,i;
+ int model;
if (usbvision == NULL)
return;
@@ -1741,25 +1705,23 @@ static void usbvision_configure_video(struct usb_usbvision *usbvision)
usbvision->palette = usbvision_v4l2_format[2]; // V4L2_PIX_FMT_RGB24;
if (usbvision_device_data[usbvision->DevModel].Vin_Reg2_override) {
- usbvision->Vin_Reg2_Preset = usbvision_device_data[usbvision->DevModel].Vin_Reg2;
+ usbvision->Vin_Reg2_Preset =
+ usbvision_device_data[usbvision->DevModel].Vin_Reg2;
} else {
usbvision->Vin_Reg2_Preset = 0;
}
- for (i = 0; i < TVNORMS; i++)
- if (usbvision_device_data[model].VideoNorm == tvnorms[i].mode)
- break;
- if (i == TVNORMS)
- i = 0;
- usbvision->tvnorm = &tvnorms[i]; /* set default norm */
+ usbvision->tvnormId = usbvision_device_data[model].VideoNorm;
usbvision->video_inputs = usbvision_device_data[model].VideoChannels;
usbvision->ctl_input = 0;
/* This should be here to make i2c clients to be able to register */
- usbvision_audio_off(usbvision); //first switch off audio
+ /* first switch off audio */
+ usbvision_audio_off(usbvision);
if (!PowerOnAtOpen) {
- usbvision_power_on(usbvision); //and then power up the noisy tuner
+ /* and then power up the noisy tuner */
+ usbvision_power_on(usbvision);
usbvision_i2c_register(usbvision);
}
}
@@ -1796,18 +1758,22 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
if (usbvision_device_data[model].Interface >= 0) {
interface = &dev->actconfig->interface[usbvision_device_data[model].Interface]->altsetting[0];
- }
- else {
+ } else {
interface = &dev->actconfig->interface[ifnum]->altsetting[0];
}
endpoint = &interface->endpoint[1].desc;
- if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC) {
- err("%s: interface %d. has non-ISO endpoint!", __FUNCTION__, ifnum);
- err("%s: Endpoint attributes %d", __FUNCTION__, endpoint->bmAttributes);
+ if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
+ USB_ENDPOINT_XFER_ISOC) {
+ err("%s: interface %d. has non-ISO endpoint!",
+ __FUNCTION__, ifnum);
+ err("%s: Endpoint attributes %d",
+ __FUNCTION__, endpoint->bmAttributes);
return -ENODEV;
}
- if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) {
- err("%s: interface %d. has ISO OUT endpoint!", __FUNCTION__, ifnum);
+ if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
+ USB_DIR_OUT) {
+ err("%s: interface %d. has ISO OUT endpoint!",
+ __FUNCTION__, ifnum);
return -ENODEV;
}
@@ -1818,11 +1784,9 @@ static int __devinit usbvision_probe(struct usb_interface *intf,
if (dev->descriptor.bNumConfigurations > 1) {
usbvision->bridgeType = BRIDGE_NT1004;
- }
- else if (model == DAZZLE_DVC_90_REV_1_SECAM) {
+ } else if (model == DAZZLE_DVC_90_REV_1_SECAM) {
usbvision->bridgeType = BRIDGE_NT1005;
- }
- else {
+ } else {
usbvision->bridgeType = BRIDGE_NT1003;
}
PDEBUG(DBG_PROBE, "bridgeType %d", usbvision->bridgeType);
@@ -1919,11 +1883,11 @@ static void __devexit usbvision_disconnect(struct usb_interface *intf)
up(&usbvision->lock);
if (usbvision->user) {
- printk(KERN_INFO "%s: In use, disconnect pending\n", __FUNCTION__);
+ printk(KERN_INFO "%s: In use, disconnect pending\n",
+ __FUNCTION__);
wake_up_interruptible(&usbvision->wait_frame);
wake_up_interruptible(&usbvision->wait_stream);
- }
- else {
+ } else {
usbvision_release(usbvision);
}
@@ -1950,7 +1914,6 @@ static int __init usbvision_init(void)
PDEBUG(DBG_PROBE, "");
- PDEBUG(DBG_IOCTL, "IOCTL debugging is enabled [video]");
PDEBUG(DBG_IO, "IO debugging is enabled [video]");
PDEBUG(DBG_PROBE, "PROBE debugging is enabled [video]");
PDEBUG(DBG_MMAP, "MMAP debugging is enabled [video]");
diff --git a/drivers/media/video/usbvision/usbvision.h b/drivers/media/video/usbvision/usbvision.h
index c759d00d7014..c5b6c501c869 100644
--- a/drivers/media/video/usbvision/usbvision.h
+++ b/drivers/media/video/usbvision/usbvision.h
@@ -221,6 +221,8 @@ enum {
#define I2C_USB_ADAP_MAX 16
+#define USBVISION_NORMS (V4L2_STD_PAL | V4L2_STD_NTSC | V4L2_STD_SECAM | V4L2_STD_PAL_M)
+
/* ----------------------------------------------------------------- */
/* usbvision video structures */
/* ----------------------------------------------------------------- */
@@ -301,14 +303,6 @@ struct usbvision_frame_header {
__u16 frameHeight; /* 10 - 11 after endian correction*/
};
-/* tvnorms */
-struct usbvision_tvnorm {
- char *name;
- v4l2_std_id id;
- /* mode for saa7113h */
- int mode;
-};
-
struct usbvision_frame {
char *data; /* Frame buffer */
struct usbvision_frame_header isocHeader; /* Header from stream */
@@ -386,7 +380,6 @@ struct usb_usbvision {
int tuner_type;
int tuner_addr;
int bridgeType; // NT1003, NT1004, NT1005
- int channel;
int radio;
int video_inputs; // # of inputs
unsigned long freq;
@@ -441,7 +434,7 @@ struct usb_usbvision {
struct v4l2_capability vcap; /* Video capabilities */
unsigned int ctl_input; /* selected input */
- struct usbvision_tvnorm *tvnorm; /* selected tv norm */
+ v4l2_std_id tvnormId; /* selected tv norm */
unsigned char video_endp; /* 0x82 for USBVISION devices based */
// Decompression stuff:
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 13ee550d3215..d2915d3530ea 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -939,16 +939,25 @@ int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qc
When no more controls are available 0 is returned. */
u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
{
- u32 ctrl_class;
+ u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
const u32 *pctrl;
- /* if no query is desired, then just return the control ID */
- if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0)
- return id;
if (ctrl_classes == NULL)
return 0;
+
+ /* if no query is desired, then check if the ID is part of ctrl_classes */
+ if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
+ /* find class */
+ while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
+ ctrl_classes++;
+ if (*ctrl_classes == NULL)
+ return 0;
+ pctrl = *ctrl_classes;
+ /* find control ID */
+ while (*pctrl && *pctrl != id) pctrl++;
+ return *pctrl ? id : 0;
+ }
id &= V4L2_CTRL_ID_MASK;
- ctrl_class = V4L2_CTRL_ID2CLASS(id);
id++; /* select next control */
/* find first class that matches (or is greater than) the class of
the ID */
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index fcc5467e7636..e617925ba31e 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -47,6 +47,7 @@ static int videobuf_dvb_thread(void *data)
int err;
dprintk("dvb thread started\n");
+ set_freezable();
videobuf_read_start(&dvb->dvbq);
for (;;) {
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 0c658b74f2c4..a0c1647a2ba4 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -2077,12 +2077,10 @@ static int vino_wait_for_frame(struct vino_channel_settings *vcs)
init_waitqueue_entry(&wait, current);
/* add ourselves into wait queue */
add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
- /* and set current state */
- set_current_state(TASK_INTERRUPTIBLE);
/* to ensure that schedule_timeout will return immediately
- * if VINO interrupt was triggred meanwhile */
- schedule_timeout(HZ / 10);
+ * if VINO interrupt was triggered meanwhile */
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
if (signal_pending(current))
err = -EINTR;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index f7e1d1910374..f6d3a9460ccc 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/version.h>
+#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_VIDEO_V4L1_COMPAT
@@ -145,9 +146,6 @@ struct vivi_buffer {
struct vivi_fmt *fmt;
-#ifdef CONFIG_VIVI_SCATTER
- struct sg_to_addr *to_addr;
-#endif
};
struct vivi_dmaqueue {
@@ -168,7 +166,7 @@ static LIST_HEAD(vivi_devlist);
struct vivi_dev {
struct list_head vivi_devlist;
- struct semaphore lock;
+ struct mutex lock;
int users;
@@ -232,68 +230,13 @@ static u8 bars[8][3] = {
#define TSTAMP_MAX_Y TSTAMP_MIN_Y+15
#define TSTAMP_MIN_X 64
-#ifdef CONFIG_VIVI_SCATTER
-static void prep_to_addr(struct sg_to_addr to_addr[],
- struct videobuf_buffer *vb)
-{
- int i, pos=0;
-
- for (i=0;i<vb->dma.nr_pages;i++) {
- to_addr[i].sg=&vb->dma.sglist[i];
- to_addr[i].pos=pos;
- pos += vb->dma.sglist[i].length;
- }
-}
-
-static int get_addr_pos(int pos, int pages, struct sg_to_addr to_addr[])
-{
- int p1=0,p2=pages-1,p3=pages/2;
-
- /* Sanity test */
- BUG_ON (pos>=to_addr[p2].pos+to_addr[p2].sg->length);
-
- while (p1+1<p2) {
- if (pos < to_addr[p3].pos) {
- p2=p3;
- } else {
- p1=p3;
- }
- p3=(p1+p2)/2;
- }
- if (pos >= to_addr[p2].pos)
- p1=p2;
-
- return (p1);
-}
-#endif
-#ifdef CONFIG_VIVI_SCATTER
-static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
- int hmax, int line, char *timestr)
-#else
static void gen_line(char *basep,int inipos,int wmax,
int hmax, int line, char *timestr)
-#endif
{
int w,i,j,pos=inipos,y;
char *p,*s;
u8 chr,r,g,b,color;
-#ifdef CONFIG_VIVI_SCATTER
- int pgpos,oldpg;
- char *basep;
- struct page *pg;
-
- unsigned long flags;
- spinlock_t spinlock;
-
- spin_lock_init(&spinlock);
-
- /* Get first addr pointed to pixel position */
- oldpg=get_addr_pos(pos,pages,to_addr);
- pg=pfn_to_page(sg_dma_address(to_addr[oldpg].sg) >> PAGE_SHIFT);
- spin_lock_irqsave(&spinlock,flags);
- basep = kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[oldpg].sg->offset;
-#endif
/* We will just duplicate the second pixel at the packet */
wmax/=2;
@@ -305,18 +248,7 @@ static void gen_line(char *basep,int inipos,int wmax,
b=bars[w*7/wmax][2];
for (color=0;color<4;color++) {
-#ifdef CONFIG_VIVI_SCATTER
- pgpos=get_addr_pos(pos,pages,to_addr);
- if (pgpos!=oldpg) {
- pg=pfn_to_page(sg_dma_address(to_addr[pgpos].sg) >> PAGE_SHIFT);
- kunmap_atomic(basep, KM_BOUNCE_READ);
- basep= kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[pgpos].sg->offset;
- oldpg=pgpos;
- }
- p=basep+pos-to_addr[pgpos].pos;
-#else
p=basep+pos;
-#endif
switch (color) {
case 0:
@@ -361,23 +293,7 @@ static void gen_line(char *basep,int inipos,int wmax,
pos=inipos+j*2;
for (color=0;color<4;color++) {
-#ifdef CONFIG_VIVI_SCATTER
- pgpos=get_addr_pos(pos,pages,to_addr);
- if (pgpos!=oldpg) {
- pg=pfn_to_page(sg_dma_address(
- to_addr[pgpos].sg)
- >> PAGE_SHIFT);
- kunmap_atomic(basep,
- KM_BOUNCE_READ);
- basep= kmap_atomic(pg,
- KM_BOUNCE_READ)+
- to_addr[pgpos].sg->offset;
- oldpg=pgpos;
- }
- p=basep+pos-to_addr[pgpos].pos;
-#else
p=basep+pos;
-#endif
y=TO_Y(r,g,b);
@@ -402,12 +318,7 @@ static void gen_line(char *basep,int inipos,int wmax,
end:
-#ifdef CONFIG_VIVI_SCATTER
- kunmap_atomic(basep, KM_BOUNCE_READ);
- spin_unlock_irqrestore(&spinlock,flags);
-#else
return;
-#endif
}
static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf)
{
@@ -415,35 +326,16 @@ static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf)
int hmax = buf->vb.height;
int wmax = buf->vb.width;
struct timeval ts;
-#ifdef CONFIG_VIVI_SCATTER
- struct sg_to_addr *to_addr=buf->to_addr;
- struct videobuf_buffer *vb=&buf->vb;
-#else
char *tmpbuf;
-#endif
-
-#ifdef CONFIG_VIVI_SCATTER
- /* Test if DMA mapping is ready */
- if (!sg_dma_address(&vb->dma.sglist[0]))
- return;
-
- prep_to_addr(to_addr,vb);
- /* Check if there is enough memory */
- BUG_ON(buf->vb.dma.nr_pages << PAGE_SHIFT < (buf->vb.width*buf->vb.height)*2);
-#else
if (buf->vb.dma.varea) {
tmpbuf=kmalloc (wmax*2, GFP_KERNEL);
} else {
tmpbuf=buf->vb.dma.vmalloc;
}
-#endif
for (h=0;h<hmax;h++) {
-#ifdef CONFIG_VIVI_SCATTER
- gen_line(to_addr,pos,vb->dma.nr_pages,wmax,hmax,h,dev->timestr);
-#else
if (buf->vb.dma.varea) {
gen_line(tmpbuf,0,wmax,hmax,h,dev->timestr);
/* FIXME: replacing to __copy_to_user */
@@ -452,7 +344,6 @@ static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf)
} else {
gen_line(tmpbuf,pos,wmax,hmax,h,dev->timestr);
}
-#endif
pos += wmax*2;
}
@@ -573,6 +464,7 @@ static int vivi_thread(void *data)
dprintk(1,"thread started\n");
mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT);
+ set_freezable();
for (;;) {
vivi_sleep(dma_q);
@@ -717,11 +609,6 @@ static void free_buffer(struct videobuf_queue *vq, struct vivi_buffer *buf)
if (in_interrupt())
BUG();
-#ifdef CONFIG_VIVI_SCATTER
- /*FIXME: Maybe a spinlock is required here */
- kfree(buf->to_addr);
- buf->to_addr=NULL;
-#endif
videobuf_waiton(&buf->vb,0,0);
videobuf_dma_unmap(vq, &buf->vb.dma);
@@ -767,12 +654,6 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
buf->vb.state = STATE_PREPARED;
-#ifdef CONFIG_VIVI_SCATTER
- if (NULL == (buf->to_addr = kmalloc(sizeof(*buf->to_addr) * vb->dma.nr_pages,GFP_KERNEL))) {
- rc=-ENOMEM;
- goto fail;
- }
-#endif
return 0;
fail:
@@ -837,40 +718,6 @@ static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb
free_buffer(vq,buf);
}
-#ifdef CONFIG_VIVI_SCATTER
-static int vivi_map_sg(void *dev, struct scatterlist *sg, int nents,
- int direction)
-{
- int i;
-
- dprintk(1,"%s, number of pages=%d\n",__FUNCTION__,nents);
- BUG_ON(direction == DMA_NONE);
-
- for (i = 0; i < nents; i++ ) {
- BUG_ON(!sg[i].page);
-
- sg_dma_address(&sg[i]) = page_to_phys(sg[i].page) + sg[i].offset;
- }
-
- return nents;
-}
-
-static int vivi_unmap_sg(void *dev,struct scatterlist *sglist,int nr_pages,
- int direction)
-{
- dprintk(1,"%s\n",__FUNCTION__);
- return 0;
-}
-
-static int vivi_dma_sync_sg(void *dev,struct scatterlist *sglist, int nr_pages,
- int direction)
-{
-// dprintk(1,"%s\n",__FUNCTION__);
-
-// flush_write_buffers();
- return 0;
-}
-#endif
static struct videobuf_queue_ops vivi_video_qops = {
.buf_setup = buffer_setup,
@@ -892,16 +739,16 @@ static struct videobuf_queue_ops vivi_video_qops = {
static int res_get(struct vivi_dev *dev, struct vivi_fh *fh)
{
/* is it free? */
- down(&dev->lock);
+ mutex_lock(&dev->lock);
if (dev->resources) {
/* no, someone else uses it */
- up(&dev->lock);
+ mutex_unlock(&dev->lock);
return 0;
}
/* it's free, grab it */
dev->resources =1;
dprintk(1,"res: get\n");
- up(&dev->lock);
+ mutex_unlock(&dev->lock);
return 1;
}
@@ -912,10 +759,10 @@ static int res_locked(struct vivi_dev *dev)
static void res_free(struct vivi_dev *dev, struct vivi_fh *fh)
{
- down(&dev->lock);
+ mutex_lock(&dev->lock);
dev->resources = 0;
dprintk(1,"res: put\n");
- up(&dev->lock);
+ mutex_lock(&dev->lock);
}
/* ------------------------------------------------------------------
@@ -1259,19 +1106,11 @@ static int vivi_open(struct inode *inode, struct file *file)
sprintf(dev->timestr,"%02d:%02d:%02d:%03d",
dev->h,dev->m,dev->s,(dev->us+500)/1000);
-#ifdef CONFIG_VIVI_SCATTER
- videobuf_queue_init(&fh->vb_vidq,VIDEOBUF_DMA_SCATTER, &vivi_video_qops,
- NULL, NULL,
- fh->type,
- V4L2_FIELD_INTERLACED,
- sizeof(struct vivi_buffer),fh);
-#else
videobuf_queue_init(&fh->vb_vidq, &vivi_video_qops,
NULL, NULL,
fh->type,
V4L2_FIELD_INTERLACED,
sizeof(struct vivi_buffer),fh);
-#endif
return 0;
}
@@ -1422,7 +1261,7 @@ static int __init vivi_init(void)
init_waitqueue_head(&dev->vidq.wq);
/* initialize locks */
- init_MUTEX(&dev->lock);
+ mutex_init(&dev->lock);
dev->vidq.timeout.function = vivi_vid_timeout;
dev->vidq.timeout.data = (unsigned long)dev;
diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
index 8f6741a28a47..1bf4cbec6a87 100644
--- a/drivers/media/video/wm8739.c
+++ b/drivers/media/video/wm8739.c
@@ -321,12 +321,14 @@ static int wm8739_probe(struct i2c_adapter *adapter)
static int wm8739_detach(struct i2c_client *client)
{
+ struct wm8739_state *state = i2c_get_clientdata(client);
int err;
err = i2c_detach_client(client);
if (err)
return err;
+ kfree(state);
kfree(client);
return 0;
}
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index 4df5d30d4d09..9f7e894ef962 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -222,12 +222,14 @@ static int wm8775_probe(struct i2c_adapter *adapter)
static int wm8775_detach(struct i2c_client *client)
{
+ struct wm8775_state *state = i2c_get_clientdata(client);
int err;
err = i2c_detach_client(client);
if (err) {
return err;
}
+ kfree(state);
kfree(client);
return 0;
diff --git a/drivers/media/video/zc0301/Kconfig b/drivers/media/video/zc0301/Kconfig
index 47cd93f9c7de..edb00293cd59 100644
--- a/drivers/media/video/zc0301/Kconfig
+++ b/drivers/media/video/zc0301/Kconfig
@@ -1,6 +1,6 @@
config USB_ZC0301
tristate "USB ZC0301[P] Image Processor and Control Chip support"
- depends on VIDEO_V4L1
+ depends on VIDEO_V4L2
---help---
Say Y here if you want support for cameras based on the ZC0301 or
ZC0301P Image Processors and Control Chips.
diff --git a/drivers/media/video/zc0301/zc0301.h b/drivers/media/video/zc0301/zc0301.h
index 710f12eb9126..a2de50efa31a 100644
--- a/drivers/media/video/zc0301/zc0301.h
+++ b/drivers/media/video/zc0301/zc0301.h
@@ -36,6 +36,7 @@
#include <linux/rwsem.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/kref.h>
#include "zc0301_sensor.h"
@@ -98,7 +99,7 @@ struct zc0301_module_param {
u16 frame_timeout;
};
-static DECLARE_RWSEM(zc0301_disconnect);
+static DECLARE_RWSEM(zc0301_dev_lock);
struct zc0301_device {
struct video_device* v4ldev;
@@ -121,12 +122,14 @@ struct zc0301_device {
struct zc0301_module_param module_param;
+ struct kref kref;
enum zc0301_dev_state state;
u8 users;
- struct mutex dev_mutex, fileop_mutex;
+ struct completion probe;
+ struct mutex open_mutex, fileop_mutex;
spinlock_t queue_lock;
- wait_queue_head_t open, wait_frame, wait_stream;
+ wait_queue_head_t wait_open, wait_frame, wait_stream;
};
/*****************************************************************************/
@@ -156,8 +159,8 @@ do { \
else if ((level) == 2) \
dev_info(&cam->usbdev->dev, fmt "\n", ## args); \
else if ((level) >= 3) \
- dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n", \
- __FUNCTION__, __LINE__ , ## args); \
+ dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", \
+ __FILE__, __FUNCTION__, __LINE__ , ## args); \
} \
} while (0)
# define KDBG(level, fmt, args...) \
@@ -166,8 +169,8 @@ do { \
if ((level) == 1 || (level) == 2) \
pr_info("zc0301: " fmt "\n", ## args); \
else if ((level) == 3) \
- pr_debug("zc0301: [%s:%d] " fmt "\n", __FUNCTION__, \
- __LINE__ , ## args); \
+ pr_debug("sn9c102: [%s:%s:%d] " fmt "\n", __FILE__, \
+ __FUNCTION__, __LINE__ , ## args); \
} \
} while (0)
# define V4LDBG(level, name, cmd) \
@@ -183,8 +186,8 @@ do { \
#undef PDBG
#define PDBG(fmt, args...) \
-dev_info(&cam->usbdev->dev, "[%s:%d] " fmt "\n", \
- __FUNCTION__, __LINE__ , ## args)
+dev_info(&cam->usbdev->dev, "[%s:%s:%d] " fmt "\n", __FILE__, __FUNCTION__, \
+ __LINE__ , ## args)
#undef PDBGG
#define PDBGG(fmt, args...) do {;} while(0) /* placeholder */
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index f1120551c70c..703b741e46df 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -49,11 +49,11 @@
#define ZC0301_MODULE_NAME "V4L2 driver for ZC0301[P] " \
"Image Processor and Control Chip"
-#define ZC0301_MODULE_AUTHOR "(C) 2006 Luca Risolia"
+#define ZC0301_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
#define ZC0301_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define ZC0301_MODULE_LICENSE "GPL"
-#define ZC0301_MODULE_VERSION "1:1.07"
-#define ZC0301_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 7)
+#define ZC0301_MODULE_VERSION "1:1.10"
+#define ZC0301_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 10)
/*****************************************************************************/
@@ -573,7 +573,8 @@ static int zc0301_init(struct zc0301_device* cam)
int err = 0;
if (!(cam->state & DEV_INITIALIZED)) {
- init_waitqueue_head(&cam->open);
+ mutex_init(&cam->open_mutex);
+ init_waitqueue_head(&cam->wait_open);
qctrl = s->qctrl;
rect = &(s->cropcap.defrect);
cam->compression.quality = ZC0301_COMPRESSION_QUALITY;
@@ -634,59 +635,73 @@ static int zc0301_init(struct zc0301_device* cam)
return 0;
}
+/*****************************************************************************/
-static void zc0301_release_resources(struct zc0301_device* cam)
+static void zc0301_release_resources(struct kref *kref)
{
+ struct zc0301_device *cam = container_of(kref, struct zc0301_device,
+ kref);
DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
+ usb_put_dev(cam->usbdev);
kfree(cam->control_buffer);
+ kfree(cam);
}
-/*****************************************************************************/
static int zc0301_open(struct inode* inode, struct file* filp)
{
struct zc0301_device* cam;
int err = 0;
- /*
- This is the only safe way to prevent race conditions with
- disconnect
- */
- if (!down_read_trylock(&zc0301_disconnect))
+ if (!down_read_trylock(&zc0301_dev_lock))
return -ERESTARTSYS;
cam = video_get_drvdata(video_devdata(filp));
- if (mutex_lock_interruptible(&cam->dev_mutex)) {
- up_read(&zc0301_disconnect);
+ if (wait_for_completion_interruptible(&cam->probe)) {
+ up_read(&zc0301_dev_lock);
return -ERESTARTSYS;
}
+ kref_get(&cam->kref);
+
+ if (mutex_lock_interruptible(&cam->open_mutex)) {
+ kref_put(&cam->kref, zc0301_release_resources);
+ up_read(&zc0301_dev_lock);
+ return -ERESTARTSYS;
+ }
+
+ if (cam->state & DEV_DISCONNECTED) {
+ DBG(1, "Device not present");
+ err = -ENODEV;
+ goto out;
+ }
+
if (cam->users) {
DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor);
+ DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
err = -EWOULDBLOCK;
goto out;
}
- mutex_unlock(&cam->dev_mutex);
- err = wait_event_interruptible_exclusive(cam->open,
- cam->state & DEV_DISCONNECTED
+ DBG(2, "A blocking open() has been requested. Wait for the "
+ "device to be released...");
+ up_read(&zc0301_dev_lock);
+ err = wait_event_interruptible_exclusive(cam->wait_open,
+ (cam->state & DEV_DISCONNECTED)
|| !cam->users);
- if (err) {
- up_read(&zc0301_disconnect);
- return err;
- }
+ down_read(&zc0301_dev_lock);
+ if (err)
+ goto out;
if (cam->state & DEV_DISCONNECTED) {
- up_read(&zc0301_disconnect);
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
- mutex_lock(&cam->dev_mutex);
}
-
if (cam->state & DEV_MISCONFIGURED) {
err = zc0301_init(cam);
if (err) {
@@ -711,36 +726,32 @@ static int zc0301_open(struct inode* inode, struct file* filp)
DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor);
out:
- mutex_unlock(&cam->dev_mutex);
- up_read(&zc0301_disconnect);
+ mutex_unlock(&cam->open_mutex);
+ if (err)
+ kref_put(&cam->kref, zc0301_release_resources);
+ up_read(&zc0301_dev_lock);
return err;
}
static int zc0301_release(struct inode* inode, struct file* filp)
{
- struct zc0301_device* cam = video_get_drvdata(video_devdata(filp));
+ struct zc0301_device* cam;
- mutex_lock(&cam->dev_mutex); /* prevent disconnect() to be called */
+ down_write(&zc0301_dev_lock);
- zc0301_stop_transfer(cam);
+ cam = video_get_drvdata(video_devdata(filp));
+ zc0301_stop_transfer(cam);
zc0301_release_buffers(cam);
-
- if (cam->state & DEV_DISCONNECTED) {
- zc0301_release_resources(cam);
- usb_put_dev(cam->usbdev);
- mutex_unlock(&cam->dev_mutex);
- kfree(cam);
- return 0;
- }
-
cam->users--;
- wake_up_interruptible_nr(&cam->open, 1);
+ wake_up_interruptible_nr(&cam->wait_open, 1);
DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor);
- mutex_unlock(&cam->dev_mutex);
+ kref_put(&cam->kref, zc0301_release_resources);
+
+ up_write(&zc0301_dev_lock);
return 0;
}
@@ -775,7 +786,7 @@ zc0301_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos)
DBG(3, "Close and open the device again to choose the read "
"method");
mutex_unlock(&cam->fileop_mutex);
- return -EINVAL;
+ return -EBUSY;
}
if (cam->io == IO_NONE) {
@@ -953,7 +964,12 @@ static int zc0301_mmap(struct file* filp, struct vm_area_struct *vma)
return -EIO;
}
- if (cam->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) ||
+ if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
+ mutex_unlock(&cam->fileop_mutex);
+ return -EACCES;
+ }
+
+ if (cam->io != IO_MMAP ||
size != PAGE_ALIGN(cam->frame[0].buf.length)) {
mutex_unlock(&cam->fileop_mutex);
return -EINVAL;
@@ -984,7 +1000,6 @@ static int zc0301_mmap(struct file* filp, struct vm_area_struct *vma)
vma->vm_ops = &zc0301_vm_ops;
vma->vm_private_data = &cam->frame[i];
-
zc0301_vm_open(vma);
mutex_unlock(&cam->fileop_mutex);
@@ -1211,7 +1226,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_S_CROP failed. "
"Unmap the buffers first.");
- return -EINVAL;
+ return -EBUSY;
}
if (!s->set_crop) {
@@ -1434,7 +1449,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_S_FMT failed. "
"Unmap the buffers first.");
- return -EINVAL;
+ return -EBUSY;
}
if (cam->stream == STREAM_ON)
@@ -1544,14 +1559,14 @@ zc0301_vidioc_reqbufs(struct zc0301_device* cam, void __user * arg)
if (cam->io == IO_READ) {
DBG(3, "Close and open the device again to choose the mmap "
"I/O method");
- return -EINVAL;
+ return -EBUSY;
}
for (i = 0; i < cam->nbuffers; i++)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_REQBUFS failed. "
"Previous buffers are still mapped.");
- return -EINVAL;
+ return -EBUSY;
}
if (cam->stream == STREAM_ON)
@@ -1699,9 +1714,6 @@ zc0301_vidioc_streamon(struct zc0301_device* cam, void __user * arg)
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
return -EINVAL;
- if (list_empty(&cam->inqueue))
- return -EINVAL;
-
cam->stream = STREAM_ON;
DBG(3, "Stream on");
@@ -1949,8 +1961,6 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- mutex_init(&cam->dev_mutex);
-
DBG(2, "ZC0301[P] Image Processor and Control Chip detected "
"(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
@@ -1982,7 +1992,7 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
cam->v4ldev->release = video_device_release;
video_set_drvdata(cam->v4ldev, cam);
- mutex_lock(&cam->dev_mutex);
+ init_completion(&cam->probe);
err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
video_nr[dev_nr]);
@@ -1992,7 +2002,7 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
DBG(1, "Free /dev/videoX node not found");
video_nr[dev_nr] = -1;
dev_nr = (dev_nr < ZC0301_MAX_DEVICES-1) ? dev_nr+1 : 0;
- mutex_unlock(&cam->dev_mutex);
+ complete_all(&cam->probe);
goto fail;
}
@@ -2004,8 +2014,10 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
dev_nr = (dev_nr < ZC0301_MAX_DEVICES-1) ? dev_nr+1 : 0;
usb_set_intfdata(intf, cam);
+ kref_init(&cam->kref);
+ usb_get_dev(cam->usbdev);
- mutex_unlock(&cam->dev_mutex);
+ complete_all(&cam->probe);
return 0;
@@ -2022,40 +2034,31 @@ fail:
static void zc0301_usb_disconnect(struct usb_interface* intf)
{
- struct zc0301_device* cam = usb_get_intfdata(intf);
-
- if (!cam)
- return;
+ struct zc0301_device* cam;
- down_write(&zc0301_disconnect);
+ down_write(&zc0301_dev_lock);
- mutex_lock(&cam->dev_mutex);
+ cam = usb_get_intfdata(intf);
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
- wake_up_interruptible_all(&cam->open);
-
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
- "memory deallocation are deferred on close.",
+ "memory deallocation are deferred.",
cam->v4ldev->minor);
cam->state |= DEV_MISCONFIGURED;
zc0301_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
wake_up_interruptible(&cam->wait_frame);
wake_up(&cam->wait_stream);
- usb_get_dev(cam->usbdev);
- } else {
+ } else
cam->state |= DEV_DISCONNECTED;
- zc0301_release_resources(cam);
- }
- mutex_unlock(&cam->dev_mutex);
+ wake_up_interruptible_all(&cam->wait_open);
- if (!cam->users)
- kfree(cam);
+ kref_put(&cam->kref, zc0301_release_resources);
- up_write(&zc0301_disconnect);
+ up_write(&zc0301_dev_lock);
}
diff --git a/drivers/media/video/zc0301/zc0301_pas202bcb.c b/drivers/media/video/zc0301/zc0301_pas202bcb.c
index 3efb92a0d0da..24b0dfba357e 100644
--- a/drivers/media/video/zc0301/zc0301_pas202bcb.c
+++ b/drivers/media/video/zc0301/zc0301_pas202bcb.c
@@ -327,6 +327,7 @@ static struct zc0301_sensor pas202bcb = {
.height = 480,
.pixelformat = V4L2_PIX_FMT_JPEG,
.priv = 8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
},
};
diff --git a/drivers/media/video/zc0301/zc0301_pb0330.c b/drivers/media/video/zc0301/zc0301_pb0330.c
index 5784b1d1491c..9519aba3612e 100644
--- a/drivers/media/video/zc0301/zc0301_pb0330.c
+++ b/drivers/media/video/zc0301/zc0301_pb0330.c
@@ -157,6 +157,7 @@ static struct zc0301_sensor pb0330 = {
.height = 480,
.pixelformat = V4L2_PIX_FMT_JPEG,
.priv = 8,
+ .colorspace = V4L2_COLORSPACE_JPEG,
},
};
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 44e82cff9319..70fe6fc6cdd5 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -23,7 +23,7 @@
#define _ZC0301_SENSOR_H_
#include <linux/usb.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
#include <linux/device.h>
#include <linux/stddef.h>
#include <linux/errno.h>
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index cf0ed6cbb0e3..17118a490f81 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -183,14 +183,7 @@ static const int zoran_num_formats =
(sizeof(zoran_formats) / sizeof(struct zoran_format));
// RJ: Test only - want to test BUZ_USE_HIMEM even when CONFIG_BIGPHYS_AREA is defined
-#if !defined(CONFIG_BIGPHYS_AREA)
-//#undef CONFIG_BIGPHYS_AREA
-#define BUZ_USE_HIMEM
-#endif
-#if defined(CONFIG_BIGPHYS_AREA)
-# include <linux/bigphysarea.h>
-#endif
extern int *zr_debug;
@@ -250,7 +243,6 @@ static void jpg_fbuffer_free(struct file *file);
* Linux with the necessary memory left over).
*/
-#if defined(BUZ_USE_HIMEM) && !defined(CONFIG_BIGPHYS_AREA)
static unsigned long
get_high_mem (unsigned long size)
{
@@ -314,7 +306,6 @@ get_high_mem (unsigned long size)
return hi_mem_ph;
}
-#endif
static int
v4l_fbuffer_alloc (struct file *file)
@@ -323,9 +314,7 @@ v4l_fbuffer_alloc (struct file *file)
struct zoran *zr = fh->zr;
int i, off;
unsigned char *mem;
-#if defined(BUZ_USE_HIMEM) && !defined(CONFIG_BIGPHYS_AREA)
unsigned long pmem = 0;
-#endif
/* we might have old buffers lying around... */
if (fh->v4l_buffers.ready_to_be_freed) {
@@ -369,39 +358,6 @@ v4l_fbuffer_alloc (struct file *file)
ZR_DEVNAME(zr), i, (unsigned long) mem,
virt_to_bus(mem));
} else {
-#if defined(CONFIG_BIGPHYS_AREA)
- /* Use bigphysarea_alloc_pages */
-
- int n =
- (fh->v4l_buffers.buffer_size + PAGE_SIZE -
- 1) / PAGE_SIZE;
-
- mem =
- (unsigned char *) bigphysarea_alloc_pages(n, 0,
- GFP_KERNEL);
- if (mem == 0) {
- dprintk(1,
- KERN_ERR
- "%s: v4l_fbuffer_alloc() - bigphysarea_alloc_pages for V4L buf %d failed\n",
- ZR_DEVNAME(zr), i);
- v4l_fbuffer_free(file);
- return -ENOBUFS;
- }
- fh->v4l_buffers.buffer[i].fbuffer = mem;
- fh->v4l_buffers.buffer[i].fbuffer_phys =
- virt_to_phys(mem);
- fh->v4l_buffers.buffer[i].fbuffer_bus =
- virt_to_bus(mem);
- dprintk(4,
- KERN_INFO
- "%s: Bigphysarea frame %d mem 0x%x (bus: 0x%x)\n",
- ZR_DEVNAME(zr), i, (unsigned) mem,
- (unsigned) virt_to_bus(mem));
-
- /* Zero out the allocated memory */
- memset(fh->v4l_buffers.buffer[i].fbuffer, 0,
- fh->v4l_buffers.buffer_size);
-#elif defined(BUZ_USE_HIMEM)
/* Use high memory which has been left at boot time */
@@ -441,20 +397,6 @@ v4l_fbuffer_alloc (struct file *file)
fh->v4l_buffers.buffer[i].fbuffer_bus =
pmem + i * fh->v4l_buffers.buffer_size;
}
-#else
- /* No bigphysarea present, usage of high memory disabled,
- * but user wants buffers of more than MAX_KMALLOC_MEM */
- dprintk(1,
- KERN_ERR
- "%s: v4l_fbuffer_alloc() - no bigphysarea_patch present, usage of high memory disabled,\n",
- ZR_DEVNAME(zr));
- dprintk(1,
- KERN_ERR
- "%s: v4l_fbuffer_alloc() - sorry, could not allocate %d V4L buffers of size %d KB.\n",
- ZR_DEVNAME(zr), fh->v4l_buffers.num_buffers,
- fh->v4l_buffers.buffer_size >> 10);
- return -ENOBUFS;
-#endif
}
}
@@ -485,11 +427,6 @@ v4l_fbuffer_free (struct file *file)
ClearPageReserved(MAP_NR(mem + off));
kfree((void *) fh->v4l_buffers.buffer[i].fbuffer);
}
-#if defined(CONFIG_BIGPHYS_AREA)
- else
- bigphysarea_free_pages((void *) fh->v4l_buffers.
- buffer[i].fbuffer);
-#endif
fh->v4l_buffers.buffer[i].fbuffer = NULL;
}
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index b5d3364c94c7..6f1892585cbb 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -92,6 +92,7 @@ static struct usb_device_id device_table[] = {
{USB_DEVICE(0x0784, 0x0040), .driver_info = METHOD1 },
{USB_DEVICE(0x06d6, 0x0034), .driver_info = METHOD0 },
{USB_DEVICE(0x0a17, 0x0062), .driver_info = METHOD2 },
+ {USB_DEVICE(0x06d6, 0x003b), .driver_info = METHOD0 },
{} /* Terminating entry */
};
@@ -792,6 +793,7 @@ static int zr364xx_probe(struct usb_interface *intf,
{
struct usb_device *udev = interface_to_usbdev(intf);
struct zr364xx_camera *cam = NULL;
+ int err;
DBG("probing...");
@@ -799,12 +801,11 @@ static int zr364xx_probe(struct usb_interface *intf,
info("model %04x:%04x detected", udev->descriptor.idVendor,
udev->descriptor.idProduct);
- if ((cam =
- kmalloc(sizeof(struct zr364xx_camera), GFP_KERNEL)) == NULL) {
+ cam = kzalloc(sizeof(struct zr364xx_camera), GFP_KERNEL);
+ if (cam == NULL) {
info("cam: out of memory !");
- return -ENODEV;
+ return -ENOMEM;
}
- memset(cam, 0x00, sizeof(struct zr364xx_camera));
/* save the init method used by this camera */
cam->method = id->driver_info;
@@ -812,7 +813,7 @@ static int zr364xx_probe(struct usb_interface *intf,
if (cam->vdev == NULL) {
info("cam->vdev: out of memory !");
kfree(cam);
- return -ENODEV;
+ return -ENOMEM;
}
memcpy(cam->vdev, &zr364xx_template, sizeof(zr364xx_template));
video_set_drvdata(cam->vdev, cam);
@@ -858,12 +859,13 @@ static int zr364xx_probe(struct usb_interface *intf,
cam->brightness = 64;
mutex_init(&cam->lock);
- if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, -1) == -1) {
+ err = video_register_device(cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (err) {
info("video_register_device failed");
video_device_release(cam->vdev);
kfree(cam->buffer);
kfree(cam);
- return -ENODEV;
+ return err;
}
usb_set_intfdata(intf, cam);
@@ -905,7 +907,7 @@ static struct usb_driver zr364xx_driver = {
static int __init zr364xx_init(void)
{
int retval;
- retval = usb_register(&zr364xx_driver) < 0;
+ retval = usb_register(&zr364xx_driver);
if (retval)
info("usb_register failed!");
else
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index c88cc75ab49b..4494e0fd36c6 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -37,6 +37,7 @@ config FUSION_FC
LSIFC929
LSIFC929X
LSIFC929XL
+ Brocade FC 410/420
config FUSION_SAS
tristate "Fusion MPT ScsiHost drivers for SAS"
diff --git a/drivers/message/fusion/linux_compat.h b/drivers/message/fusion/linux_compat.h
deleted file mode 100644
index bb2bf5aa0b62..000000000000
--- a/drivers/message/fusion/linux_compat.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* drivers/message/fusion/linux_compat.h */
-
-#ifndef FUSION_LINUX_COMPAT_H
-#define FUSION_LINUX_COMPAT_H
-
-#include <linux/version.h>
-#include <scsi/scsi_device.h>
-
-#endif /* _LINUX_COMPAT_H */
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 75223bf24ae8..6a92e3d118fe 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi.h
* Title: MPI Message independent structures and definitions
* Creation Date: July 27, 2000
*
- * mpi.h Version: 01.05.12
+ * mpi.h Version: 01.05.13
*
* Version History
* ---------------
@@ -78,6 +78,7 @@
* 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
* 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
* 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
+ * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -108,7 +109,7 @@
/* Note: The major versions of 0xe0 through 0xff are reserved */
/* versioning for this MPI header set */
-#define MPI_HEADER_VERSION_UNIT (0x0E)
+#define MPI_HEADER_VERSION_UNIT (0x10)
#define MPI_HEADER_VERSION_DEV (0x00)
#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 0e4c8e77a81d..eda769730e39 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi_cnfg.h
* Title: MPI Config message, structures, and Pages
* Creation Date: July 27, 2000
*
- * mpi_cnfg.h Version: 01.05.13
+ * mpi_cnfg.h Version: 01.05.15
*
* Version History
* ---------------
@@ -293,6 +293,21 @@
* Added more AccessStatus values for SAS Device Page 0.
* Added bit for SATA Asynchronous Notification Support in
* Flags field of SAS Device Page 0.
+ * 02-28-07 01.05.14 Added ExtFlags field to Manufacturing Page 4.
+ * Added Disable SMART Polling for CapabilitiesFlags of
+ * IOC Page 6.
+ * Added Disable SMART Polling to DeviceSettings of BIOS
+ * Page 1.
+ * Added Multi-Port Domain bit for DiscoveryStatus field
+ * of SAS IO Unit Page.
+ * Added Multi-Port Domain Illegal flag for SAS IO Unit
+ * Page 1 AdditionalControlFlags field.
+ * 05-24-07 01.05.15 Added Hide Physical Disks with Non-Integrated RAID
+ * Metadata bit to Manufacturing Page 4 ExtFlags field.
+ * Added Internal Connector to End Device Present bit to
+ * Expander Page 0 Flags field.
+ * Fixed define for
+ * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
* --------------------------------------------------------------------------
*/
@@ -639,7 +654,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
U8 InfoSize1; /* 0Bh */
U8 InquirySize; /* 0Ch */
U8 Flags; /* 0Dh */
- U16 Reserved2; /* 0Eh */
+ U16 ExtFlags; /* 0Eh */
U8 InquiryData[56]; /* 10h */
U32 ISVolumeSettings; /* 48h */
U32 IMEVolumeSettings; /* 4Ch */
@@ -658,7 +673,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
} CONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4,
ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t;
-#define MPI_MANUFACTURING4_PAGEVERSION (0x04)
+#define MPI_MANUFACTURING4_PAGEVERSION (0x05)
/* defines for the Flags field */
#define MPI_MANPAGE4_FORCE_BAD_BLOCK_TABLE (0x80)
@@ -670,6 +685,12 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
#define MPI_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x02)
#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
+/* defines for the ExtFlags field */
+#define MPI_MANPAGE4_EXTFLAGS_HIDE_NON_IR_METADATA (0x0008)
+#define MPI_MANPAGE4_EXTFLAGS_SAS_CACHE_DISABLE (0x0004)
+#define MPI_MANPAGE4_EXTFLAGS_SATA_CACHE_DISABLE (0x0002)
+#define MPI_MANPAGE4_EXTFLAGS_LEGACY_MODE (0x0001)
+
#ifndef MPI_MANPAGE5_NUM_FORCEWWID
#define MPI_MANPAGE5_NUM_FORCEWWID (1)
@@ -781,7 +802,7 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_9
} CONFIG_PAGE_MANUFACTURING_9, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_9,
ManufacturingPage9_t, MPI_POINTER pManufacturingPage9_t;
-#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
+#define MPI_MANUFACTURING9_PAGEVERSION (0x00)
typedef struct _CONFIG_PAGE_MANUFACTURING_10
@@ -1138,6 +1159,8 @@ typedef struct _CONFIG_PAGE_IOC_6
/* IOC Page 6 Capabilities Flags */
+#define MPI_IOCPAGE6_CAP_FLAGS_DISABLE_SMART_POLLING (0x00000008)
+
#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006)
#define MPI_IOCPAGE6_CAP_FLAGS_64MB_METADATA_SIZE (0x00000000)
#define MPI_IOCPAGE6_CAP_FLAGS_512MB_METADATA_SIZE (0x00000002)
@@ -1208,6 +1231,7 @@ typedef struct _CONFIG_PAGE_BIOS_1
#define MPI_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
/* values for the DeviceSettings field */
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
#define MPI_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
#define MPI_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
#define MPI_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
@@ -2281,11 +2305,11 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
typedef struct _CONFIG_PAGE_RAID_VOL_1
{
CONFIG_PAGE_HEADER Header; /* 00h */
- U8 VolumeID; /* 01h */
- U8 VolumeBus; /* 02h */
- U8 VolumeIOC; /* 03h */
- U8 Reserved0; /* 04h */
- U8 GUID[24]; /* 05h */
+ U8 VolumeID; /* 04h */
+ U8 VolumeBus; /* 05h */
+ U8 VolumeIOC; /* 06h */
+ U8 Reserved0; /* 07h */
+ U8 GUID[24]; /* 08h */
U8 Name[32]; /* 20h */
U64 WWID; /* 40h */
U32 Reserved1; /* 48h */
@@ -2340,7 +2364,7 @@ typedef struct _RAID_PHYS_DISK0_STATUS
} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS,
RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t;
-/* RAID Volume 2 IM Physical Disk DiskStatus flags */
+/* RAID Physical Disk PhysDiskStatus flags */
#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
@@ -2544,6 +2568,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400)
#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
#define MPI_SAS_IOUNIT0_DS_MAX_SATA_TARGETS (0x00001000)
+#define MPI_SAS_IOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
@@ -2607,6 +2632,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/* values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI_SAS_IOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI_SAS_IOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
#define MPI_SAS_IOUNIT1_ACONTROL_HIDE_NONZERO_ATTACHED_PHY_IDENT (0x0020)
#define MPI_SAS_IOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
@@ -2734,6 +2760,7 @@ typedef struct _CONFIG_PAGE_SAS_EXPANDER_0
#define MPI_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
/* values for SAS Expander Page 0 Flags field */
+#define MPI_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x04)
#define MPI_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x02)
#define MPI_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x01)
@@ -2774,7 +2801,7 @@ typedef struct _CONFIG_PAGE_SAS_EXPANDER_1
/* see mpi_sas.h for values for SAS Expander Page 1 AttachedDeviceInfo values */
/* values for SAS Expander Page 1 DiscoveryInfo field */
-#define MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY DISABLED (0x04)
+#define MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
@@ -2895,11 +2922,11 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
U8 AttachedPhyIdentifier; /* 16h */
U8 Reserved2; /* 17h */
U32 AttachedDeviceInfo; /* 18h */
- U8 ProgrammedLinkRate; /* 20h */
- U8 HwLinkRate; /* 21h */
- U8 ChangeCount; /* 22h */
- U8 Flags; /* 23h */
- U32 PhyInfo; /* 24h */
+ U8 ProgrammedLinkRate; /* 1Ch */
+ U8 HwLinkRate; /* 1Dh */
+ U8 ChangeCount; /* 1Eh */
+ U8 Flags; /* 1Fh */
+ U32 PhyInfo; /* 20h */
} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index ddc7ae029dd3..a1f479057ea3 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -3,28 +3,28 @@
MPI Header File Change History
==============================
- Copyright (c) 2000-2006 LSI Logic Corporation.
+ Copyright (c) 2000-2007 LSI Logic Corporation.
---------------------------------------
- Header Set Release Version: 01.05.14
- Header Set Release Date: 10-11-06
+ Header Set Release Version: 01.05.16
+ Header Set Release Date: 05-24-07
---------------------------------------
Filename Current version Prior version
---------- --------------- -------------
- mpi.h 01.05.12 01.05.11
- mpi_ioc.h 01.05.12 01.05.11
- mpi_cnfg.h 01.05.13 01.05.12
- mpi_init.h 01.05.08 01.05.07
+ mpi.h 01.05.13 01.05.12
+ mpi_ioc.h 01.05.14 01.05.13
+ mpi_cnfg.h 01.05.15 01.05.14
+ mpi_init.h 01.05.09 01.05.09
mpi_targ.h 01.05.06 01.05.06
mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01
- mpi_raid.h 01.05.02 01.05.02
+ mpi_raid.h 01.05.03 01.05.03
mpi_tool.h 01.05.03 01.05.03
mpi_inb.h 01.05.01 01.05.01
- mpi_sas.h 01.05.04 01.05.03
+ mpi_sas.h 01.05.04 01.05.04
mpi_type.h 01.05.02 01.05.02
- mpi_history.txt 01.05.14 01.05.13
+ mpi_history.txt 01.05.14 01.05.14
* Date Version Description
@@ -95,6 +95,7 @@ mpi.h
* 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
* 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
* 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
+ * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
mpi_ioc.h
@@ -191,6 +192,13 @@ mpi_ioc.h
* data structure.
* Added new ImageType values for FWDownload and FWUpload
* requests.
+ * 02-28-07 01.05.13 Added MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT for SAS
+ * Broadcast Event Data (replacing _RESERVED2).
+ * For Discovery Error Event Data DiscoveryStatus field,
+ * replaced _MULTPL_PATHS with _UNSUPPORTED_DEVICE and
+ * added _MULTI_PORT_DOMAIN.
+ * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
+ * Added Common Boot Block type to FWUpload Request.
* --------------------------------------------------------------------------
mpi_cnfg.h
@@ -473,6 +481,21 @@ mpi_cnfg.h
* Added more AccessStatus values for SAS Device Page 0.
* Added bit for SATA Asynchronous Notification Support in
* Flags field of SAS Device Page 0.
+ * 02-28-07 01.05.14 Added ExtFlags field to Manufacturing Page 4.
+ * Added Disable SMART Polling for CapabilitiesFlags of
+ * IOC Page 6.
+ * Added Disable SMART Polling to DeviceSettings of BIOS
+ * Page 1.
+ * Added Multi-Port Domain bit for DiscoveryStatus field
+ * of SAS IO Unit Page.
+ * Added Multi-Port Domain Illegal flag for SAS IO Unit
+ * Page 1 AdditionalControlFlags field.
+ * 05-24-07 01.05.15 Added Hide Physical Disks with Non-Integrated RAID
+ * Metadata bit to Manufacturing Page 4 ExtFlags field.
+ * Added Internal Connector to End Device Present bit to
+ * Expander Page 0 Flags field.
+ * Fixed define for
+ * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
* --------------------------------------------------------------------------
mpi_init.h
@@ -517,6 +540,8 @@ mpi_init.h
* unique in the first 32 characters.
* 03-27-06 01.05.07 Added Task Management type of Clear ACA.
* 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
+ * 02-28-07 01.05.09 Defined two new MsgFlags bits for SCSI Task Management
+ * Request: Do Not Send Task IU and Soft Reset Option.
* --------------------------------------------------------------------------
mpi_targ.h
@@ -571,7 +596,7 @@ mpi_fc.h
* 11-02-00 01.01.01 Original release for post 1.0 work
* 12-04-00 01.01.02 Added messages for Common Transport Send and
* Primitive Send.
- * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
+ * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
* and modified the FcPrimitiveSend flags.
* 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
* field.
@@ -634,6 +659,8 @@ mpi_raid.h
* 08-19-04 01.05.01 Original release for MPI v1.5.
* 01-15-05 01.05.02 Added defines for the two new RAID Actions for
* _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
+ * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
+ * associated defines.
* --------------------------------------------------------------------------
mpi_tool.h
@@ -682,7 +709,22 @@ mpi_type.h
mpi_history.txt Parts list history
-Filename 01.05.13 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
+Filename 01.05.15 01.05.15
+---------- -------- --------
+mpi.h 01.05.12 01.05.13
+mpi_ioc.h 01.05.13 01.05.14
+mpi_cnfg.h 01.05.14 01.05.15
+mpi_init.h 01.05.09 01.05.09
+mpi_targ.h 01.05.06 01.05.06
+mpi_fc.h 01.05.01 01.05.01
+mpi_lan.h 01.05.01 01.05.01
+mpi_raid.h 01.05.03 01.05.03
+mpi_tool.h 01.05.03 01.05.03
+mpi_inb.h 01.05.01 01.05.01
+mpi_sas.h 01.05.04 01.05.04
+mpi_type.h 01.05.02 01.05.02
+
+Filename 01.05.14 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
---------- -------- -------- -------- -------- -------- --------
mpi.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07
mpi_ioc.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08
diff --git a/drivers/message/fusion/lsi/mpi_inb.h b/drivers/message/fusion/lsi/mpi_inb.h
deleted file mode 100644
index ff167309ba27..000000000000
--- a/drivers/message/fusion/lsi/mpi_inb.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2003-2004 LSI Logic Corporation.
- *
- *
- * Name: mpi_inb.h
- * Title: MPI Inband structures and definitions
- * Creation Date: September 30, 2003
- *
- * mpi_inb.h Version: 01.05.01
- *
- * Version History
- * ---------------
- *
- * Date Version Description
- * -------- -------- ------------------------------------------------------
- * 05-11-04 01.03.01 Original release.
- * 08-19-04 01.05.01 Original release for MPI v1.5.
- * --------------------------------------------------------------------------
- */
-
-#ifndef MPI_INB_H
-#define MPI_INB_H
-
-/******************************************************************************
-*
-* I n b a n d M e s s a g e s
-*
-*******************************************************************************/
-
-
-/****************************************************************************/
-/* Inband Buffer Post Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_BUFFER_POST_REQUEST
-{
- U8 Reserved1; /* 00h */
- U8 BufferCount; /* 01h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- SGE_TRANS_SIMPLE_UNION SGL; /* 10h */
-} MSG_INBAND_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REQUEST,
- MpiInbandBufferPostRequest_t , MPI_POINTER pMpiInbandBufferPostRequest_t;
-
-
-typedef struct _WWN_FC_FORMAT
-{
- U64 NodeName; /* 00h */
- U64 PortName; /* 08h */
-} WWN_FC_FORMAT, MPI_POINTER PTR_WWN_FC_FORMAT,
- WwnFcFormat_t, MPI_POINTER pWwnFcFormat_t;
-
-typedef struct _WWN_SAS_FORMAT
-{
- U64 WorldWideID; /* 00h */
- U32 Reserved1; /* 08h */
- U32 Reserved2; /* 0Ch */
-} WWN_SAS_FORMAT, MPI_POINTER PTR_WWN_SAS_FORMAT,
- WwnSasFormat_t, MPI_POINTER pWwnSasFormat_t;
-
-typedef union _WWN_INBAND_FORMAT
-{
- WWN_FC_FORMAT Fc;
- WWN_SAS_FORMAT Sas;
-} WWN_INBAND_FORMAT, MPI_POINTER PTR_WWN_INBAND_FORMAT,
- WwnInbandFormat, MPI_POINTER pWwnInbandFormat;
-
-
-/* Inband Buffer Post reply message */
-
-typedef struct _MSG_INBAND_BUFFER_POST_REPLY
-{
- U16 Reserved1; /* 00h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
- U32 TransferLength; /* 14h */
- U32 TransactionContext; /* 18h */
- WWN_INBAND_FORMAT Wwn; /* 1Ch */
- U32 IOCIdentifier[4]; /* 2Ch */
-} MSG_INBAND_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REPLY,
- MpiInbandBufferPostReply_t, MPI_POINTER pMpiInbandBufferPostReply_t;
-
-
-/****************************************************************************/
-/* Inband Send Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_SEND_REQUEST
-{
- U16 Reserved1; /* 00h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- WWN_INBAND_FORMAT Wwn; /* 10h */
- U32 Reserved5; /* 20h */
- SGE_IO_UNION SGL; /* 24h */
-} MSG_INBAND_SEND_REQUEST, MPI_POINTER PTR_MSG_INBAND_SEND_REQUEST,
- MpiInbandSendRequest_t , MPI_POINTER pMpiInbandSendRequest_t;
-
-
-/* Inband Send reply message */
-
-typedef struct _MSG_INBAND_SEND_REPLY
-{
- U16 Reserved1; /* 00h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
- U32 ResponseLength; /* 14h */
-} MSG_INBAND_SEND_REPLY, MPI_POINTER PTR_MSG_INBAND_SEND_REPLY,
- MpiInbandSendReply_t, MPI_POINTER pMpiInbandSendReply_t;
-
-
-/****************************************************************************/
-/* Inband Response Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_RSP_REQUEST
-{
- U16 Reserved1; /* 00h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- WWN_INBAND_FORMAT Wwn; /* 10h */
- U32 IOCIdentifier[4]; /* 20h */
- U32 ResponseLength; /* 30h */
- SGE_IO_UNION SGL; /* 34h */
-} MSG_INBAND_RSP_REQUEST, MPI_POINTER PTR_MSG_INBAND_RSP_REQUEST,
- MpiInbandRspRequest_t , MPI_POINTER pMpiInbandRspRequest_t;
-
-
-/* Inband Response reply message */
-
-typedef struct _MSG_INBAND_RSP_REPLY
-{
- U16 Reserved1; /* 00h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
-} MSG_INBAND_RSP_REPLY, MPI_POINTER PTR_MSG_INBAND_RSP_REPLY,
- MpiInbandRspReply_t, MPI_POINTER pMpiInbandRspReply_t;
-
-
-/****************************************************************************/
-/* Inband Abort Request */
-/****************************************************************************/
-
-typedef struct _MSG_INBAND_ABORT_REQUEST
-{
- U8 Reserved1; /* 00h */
- U8 AbortType; /* 01h */
- U8 ChainOffset; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U32 Reserved4; /* 0Ch */
- U32 ContextToAbort; /* 10h */
-} MSG_INBAND_ABORT_REQUEST, MPI_POINTER PTR_MSG_INBAND_ABORT_REQUEST,
- MpiInbandAbortRequest_t , MPI_POINTER pMpiInbandAbortRequest_t;
-
-#define MPI_INBAND_ABORT_TYPE_ALL_BUFFERS (0x00)
-#define MPI_INBAND_ABORT_TYPE_EXACT_BUFFER (0x01)
-#define MPI_INBAND_ABORT_TYPE_SEND_REQUEST (0x02)
-#define MPI_INBAND_ABORT_TYPE_RESPONSE_REQUEST (0x03)
-
-
-/* Inband Abort reply message */
-
-typedef struct _MSG_INBAND_ABORT_REPLY
-{
- U8 Reserved1; /* 00h */
- U8 AbortType; /* 01h */
- U8 MsgLength; /* 02h */
- U8 Function; /* 03h */
- U16 Reserved2; /* 04h */
- U8 Reserved3; /* 06h */
- U8 MsgFlags; /* 07h */
- U32 MsgContext; /* 08h */
- U16 Reserved4; /* 0Ch */
- U16 IOCStatus; /* 0Eh */
- U32 IOCLogInfo; /* 10h */
-} MSG_INBAND_ABORT_REPLY, MPI_POINTER PTR_MSG_INBAND_ABORT_REPLY,
- MpiInbandAbortReply_t, MPI_POINTER pMpiInbandAbortReply_t;
-
-
-#endif
-
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index ec9dff2249a7..3a02615f12d6 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi_init.h
* Title: MPI initiator mode messages and structures
* Creation Date: June 8, 2000
*
- * mpi_init.h Version: 01.05.08
+ * mpi_init.h Version: 01.05.09
*
* Version History
* ---------------
@@ -54,6 +54,8 @@
* unique in the first 32 characters.
* 03-27-06 01.05.07 Added Task Management type of Clear ACA.
* 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
+ * 02-28-07 01.05.09 Defined two new MsgFlags bits for SCSI Task Management
+ * Request: Do Not Send Task IU and Soft Reset Option.
* --------------------------------------------------------------------------
*/
@@ -432,10 +434,14 @@ typedef struct _MSG_SCSI_TASK_MGMT
#define MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
/* MsgFlags bits */
+#define MPI_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)
#define MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION (0x02)
#define MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION (0x04)
+#define MPI_SCSITASKMGMT_MSGFLAGS_SOFT_RESET_OPTION (0x08)
+
/* SCSI Task Management Reply */
typedef struct _MSG_SCSI_TASK_MGMT_REPLY
{
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 6c33e3353375..b1893d185bc4 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2006 LSI Logic Corporation.
+ * Copyright (c) 2000-2007 LSI Logic Corporation.
*
*
* Name: mpi_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: August 11, 2000
*
- * mpi_ioc.h Version: 01.05.12
+ * mpi_ioc.h Version: 01.05.14
*
* Version History
* ---------------
@@ -106,6 +106,13 @@
* data structure.
* Added new ImageType values for FWDownload and FWUpload
* requests.
+ * 02-28-07 01.05.13 Added MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT for SAS
+ * Broadcast Event Data (replacing _RESERVED2).
+ * For Discovery Error Event Data DiscoveryStatus field,
+ * replaced _MULTPL_PATHS with _UNSUPPORTED_DEVICE and
+ * added _MULTI_PORT_DOMAIN.
+ * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
+ * Added Common Boot Block type to FWUpload Request.
* --------------------------------------------------------------------------
*/
@@ -792,7 +799,7 @@ typedef struct _EVENT_DATA_SAS_BROADCAST_PRIMITIVE
#define MPI_EVENT_PRIMITIVE_CHANGE (0x01)
#define MPI_EVENT_PRIMITIVE_EXPANDER (0x03)
-#define MPI_EVENT_PRIMITIVE_RESERVED2 (0x04)
+#define MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
#define MPI_EVENT_PRIMITIVE_RESERVED3 (0x05)
#define MPI_EVENT_PRIMITIVE_RESERVED4 (0x06)
#define MPI_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
@@ -857,8 +864,9 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
#define MPI_EVENT_DSCVRY_ERR_DS_SMP_CRC_ERROR (0x00000100)
#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200)
#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400)
-#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800)
+#define MPI_EVENT_DSCVRY_ERR_DS_UNSUPPORTED_DEVICE (0x00000800)
#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
+#define MPI_EVENT_DSCVRY_ERR_DS_MULTI_PORT_DOMAIN (0x00002000)
/* SAS SMP Error Event data */
@@ -990,6 +998,7 @@ typedef struct _MSG_FW_DOWNLOAD
#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
#define MPI_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
typedef struct _FWDownloadTCSGE
@@ -1038,17 +1047,18 @@ typedef struct _MSG_FW_UPLOAD
} MSG_FW_UPLOAD, MPI_POINTER PTR_MSG_FW_UPLOAD,
FWUpload_t, MPI_POINTER pFWUpload_t;
-#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00)
-#define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
-#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
-#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
-#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
-#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
-#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
-#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
-#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
-#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
-#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00)
+#define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
+#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
+#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
typedef struct _FWUploadTCSGE
{
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
index 802255d2747c..32819b1ec8ec 100644
--- a/drivers/message/fusion/lsi/mpi_raid.h
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2001-2005 LSI Logic Corporation.
+ * Copyright (c) 2001-2007 LSI Logic Corporation.
*
*
* Name: mpi_raid.h
* Title: MPI RAID message and structures
* Creation Date: February 27, 2001
*
- * mpi_raid.h Version: 01.05.02
+ * mpi_raid.h Version: 01.05.03
*
* Version History
* ---------------
@@ -32,6 +32,8 @@
* 08-19-04 01.05.01 Original release for MPI v1.5.
* 01-15-05 01.05.02 Added defines for the two new RAID Actions for
* _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
+ * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
+ * associated defines.
* --------------------------------------------------------------------------
*/
@@ -90,6 +92,7 @@ typedef struct _MSG_RAID_ACTION
#define MPI_RAID_ACTION_INACTIVATE_VOLUME (0x12)
#define MPI_RAID_ACTION_SET_RESYNC_RATE (0x13)
#define MPI_RAID_ACTION_SET_DATA_SCRUB_RATE (0x14)
+#define MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001)
@@ -111,6 +114,10 @@ typedef struct _MSG_RAID_ACTION
/* ActionDataWord defines for use with MPI_RAID_ACTION_SET_DATA_SCRUB_RATE action */
#define MPI_RAID_ACTION_ADATA_DATA_SCRUB_RATE_MASK (0x000000FF)
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x00000001)
+#define MPI_RAID_ACTION_ADATA_MASK_FW_UPDATE_TIMEOUT (0x0000FF00)
+#define MPI_RAID_ACTION_ADATA_SHIFT_FW_UPDATE_TIMEOUT (8)
/* RAID Action reply message */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5021d1a2a1d4..04f75e24dcec 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -64,6 +64,7 @@
#endif
#include "mptbase.h"
+#include "lsi/mpi_log_fc.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT base driver"
@@ -160,6 +161,7 @@ static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_timer_expired(unsigned long data);
+static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
@@ -1130,6 +1132,248 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
return -1;
}
+/**
+ * mpt_get_product_name - returns product string
+ * @vendor: pci vendor id
+ * @device: pci device id
+ * @revision: pci revision id
+ * @prod_name: string returned
+ *
+ * Returns product string displayed when driver loads,
+ * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
+ *
+ **/
+static void
+mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
+{
+ char *product_str = NULL;
+
+ if (vendor == PCI_VENDOR_ID_BROCADE) {
+ switch (device)
+ {
+ case MPI_MANUFACTPAGE_DEVICEID_FC949E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "BRE040 A0";
+ break;
+ case 0x01:
+ product_str = "BRE040 A1";
+ break;
+ default:
+ product_str = "BRE040";
+ break;
+ }
+ break;
+ }
+ goto out;
+ }
+
+ switch (device)
+ {
+ case MPI_MANUFACTPAGE_DEVICEID_FC909:
+ product_str = "LSIFC909 B1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC919:
+ product_str = "LSIFC919 B0";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC929:
+ product_str = "LSIFC929 B0";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC919X:
+ if (revision < 0x80)
+ product_str = "LSIFC919X A0";
+ else
+ product_str = "LSIFC919XL A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC929X:
+ if (revision < 0x80)
+ product_str = "LSIFC929X A0";
+ else
+ product_str = "LSIFC929XL A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC939X:
+ product_str = "LSIFC939X A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC949X:
+ product_str = "LSIFC949X A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC949E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSIFC949E A0";
+ break;
+ case 0x01:
+ product_str = "LSIFC949E A1";
+ break;
+ default:
+ product_str = "LSIFC949E";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_53C1030:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSI53C1030 A0";
+ break;
+ case 0x01:
+ product_str = "LSI53C1030 B0";
+ break;
+ case 0x03:
+ product_str = "LSI53C1030 B1";
+ break;
+ case 0x07:
+ product_str = "LSI53C1030 B2";
+ break;
+ case 0x08:
+ product_str = "LSI53C1030 C0";
+ break;
+ case 0x80:
+ product_str = "LSI53C1030T A0";
+ break;
+ case 0x83:
+ product_str = "LSI53C1030T A2";
+ break;
+ case 0x87:
+ product_str = "LSI53C1030T A3";
+ break;
+ case 0xc1:
+ product_str = "LSI53C1020A A1";
+ break;
+ default:
+ product_str = "LSI53C1030";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
+ switch (revision)
+ {
+ case 0x03:
+ product_str = "LSI53C1035 A2";
+ break;
+ case 0x04:
+ product_str = "LSI53C1035 B0";
+ break;
+ default:
+ product_str = "LSI53C1035";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1064:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1064 A1";
+ break;
+ case 0x01:
+ product_str = "LSISAS1064 A2";
+ break;
+ case 0x02:
+ product_str = "LSISAS1064 A3";
+ break;
+ case 0x03:
+ product_str = "LSISAS1064 A4";
+ break;
+ default:
+ product_str = "LSISAS1064";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1064E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1064E A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1064E B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1064E B1";
+ break;
+ case 0x04:
+ product_str = "LSISAS1064E B2";
+ break;
+ case 0x08:
+ product_str = "LSISAS1064E B3";
+ break;
+ default:
+ product_str = "LSISAS1064E";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1068:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1068 A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1068 B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1068 B1";
+ break;
+ default:
+ product_str = "LSISAS1068";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1068E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1068E A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1068E B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1068E B1";
+ break;
+ case 0x04:
+ product_str = "LSISAS1068E B2";
+ break;
+ case 0x08:
+ product_str = "LSISAS1068E B3";
+ break;
+ default:
+ product_str = "LSISAS1068E";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1078:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1078 A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1078 B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1078 C0";
+ break;
+ case 0x03:
+ product_str = "LSISAS1078 C1";
+ break;
+ case 0x04:
+ product_str = "LSISAS1078 C2";
+ break;
+ default:
+ product_str = "LSISAS1078";
+ break;
+ }
+ break;
+ }
+
+ out:
+ if (product_str)
+ sprintf(prod_name, "%s", product_str);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_attach - Install a PCI intelligent MPT adapter.
@@ -1273,23 +1517,23 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->pio_chip = (SYSIF_REGS __iomem *)pmem;
}
- if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) {
- ioc->prod_name = "LSIFC909";
- ioc->bus_type = FC;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
- ioc->prod_name = "LSIFC929";
- ioc->bus_type = FC;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919) {
- ioc->prod_name = "LSIFC919";
- ioc->bus_type = FC;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929X) {
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+
+ switch (pdev->device)
+ {
+ case MPI_MANUFACTPAGE_DEVICEID_FC939X:
+ case MPI_MANUFACTPAGE_DEVICEID_FC949X:
+ ioc->errata_flag_1064 = 1;
+ case MPI_MANUFACTPAGE_DEVICEID_FC909:
+ case MPI_MANUFACTPAGE_DEVICEID_FC929:
+ case MPI_MANUFACTPAGE_DEVICEID_FC919:
+ case MPI_MANUFACTPAGE_DEVICEID_FC949E:
ioc->bus_type = FC;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < XL_929) {
- ioc->prod_name = "LSIFC929X";
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
@@ -1297,75 +1541,46 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
} else {
- ioc->prod_name = "LSIFC929XL";
/* 929XL Chip Fix. Set MMRBC to 0x08.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd |= 0x08;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919X) {
- ioc->prod_name = "LSIFC919X";
ioc->bus_type = FC;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVICEID_FC919X:
/* 919X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC939X) {
- ioc->prod_name = "LSIFC939X";
- ioc->bus_type = FC;
- ioc->errata_flag_1064 = 1;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC949X) {
- ioc->prod_name = "LSIFC949X";
- ioc->bus_type = FC;
- ioc->errata_flag_1064 = 1;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC949E) {
- ioc->prod_name = "LSIFC949E";
ioc->bus_type = FC;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
- ioc->prod_name = "LSI53C1030";
- ioc->bus_type = SPI;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVID_53C1030:
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
if (revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
- ioc->prod_name = "LSI53C1035";
+
+ case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
- ioc->prod_name = "LSISAS1064";
- ioc->bus_type = SAS;
- ioc->errata_flag_1064 = 1;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
- ioc->prod_name = "LSISAS1068";
- ioc->bus_type = SAS;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVID_SAS1064:
+ case MPI_MANUFACTPAGE_DEVID_SAS1068:
ioc->errata_flag_1064 = 1;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
- ioc->prod_name = "LSISAS1064E";
- ioc->bus_type = SAS;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
- ioc->prod_name = "LSISAS1068E";
- ioc->bus_type = SAS;
- }
- else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
- ioc->prod_name = "LSISAS1078";
+
+ case MPI_MANUFACTPAGE_DEVID_SAS1064E:
+ case MPI_MANUFACTPAGE_DEVID_SAS1068E:
+ case MPI_MANUFACTPAGE_DEVID_SAS1078:
ioc->bus_type = SAS;
}
@@ -1879,6 +2094,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
}
GetIoUnitPage2(ioc);
+ mpt_get_manufacturing_pg_0(ioc);
}
/*
@@ -2137,8 +2353,8 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
int i = 0;
printk(KERN_INFO "%s: ", ioc->name);
- if (ioc->prod_name && strlen(ioc->prod_name) > 3)
- printk("%s: ", ioc->prod_name+3);
+ if (ioc->prod_name)
+ printk("%s: ", ioc->prod_name);
printk("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
@@ -5189,6 +5405,49 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
return;
}
+static void
+mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t buf_dma;
+ ManufacturingPage0_t *pbuf = NULL;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+
+ hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = 10;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!cfg.cfghdr.hdr->PageLength)
+ goto out;
+
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ if (!pbuf)
+ goto out;
+
+ cfg.physAddr = buf_dma;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
+ memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
+ memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
+
+ out:
+
+ if (pbuf)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+}
+
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendEventNotification - Send EventNotification (on or off) request to adapter
@@ -6349,14 +6608,37 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
static void
mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
{
- static char *subcl_str[8] = {
- "FCP Initiator", "FCP Target", "LAN", "MPI Message Layer",
- "FC Link", "Context Manager", "Invalid Field Offset", "State Change Info"
- };
- u8 subcl = (log_info >> 24) & 0x7;
+ char *desc = "unknown";
+
+ switch (log_info & 0xFF000000) {
+ case MPI_IOCLOGINFO_FC_INIT_BASE:
+ desc = "FCP Initiator";
+ break;
+ case MPI_IOCLOGINFO_FC_TARGET_BASE:
+ desc = "FCP Target";
+ break;
+ case MPI_IOCLOGINFO_FC_LAN_BASE:
+ desc = "LAN";
+ break;
+ case MPI_IOCLOGINFO_FC_MSG_BASE:
+ desc = "MPI Message Layer";
+ break;
+ case MPI_IOCLOGINFO_FC_LINK_BASE:
+ desc = "FC Link";
+ break;
+ case MPI_IOCLOGINFO_FC_CTX_BASE:
+ desc = "Context Manager";
+ break;
+ case MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET:
+ desc = "Invalid Field Offset";
+ break;
+ case MPI_IOCLOGINFO_FC_STATE_CHANGE:
+ desc = "State Change Info";
+ break;
+ }
- printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubCl={%s}\n",
- ioc->name, log_info, subcl_str[subcl]);
+ printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubClass={%s}, Value=(0x%06x)\n",
+ ioc->name, log_info, desc, (log_info & 0xFFFFFF));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 165f81d16d00..98eb9c688e17 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -75,8 +75,8 @@
#define COPYRIGHT "Copyright (c) 1999-2007 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.04"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.04"
+#define MPT_LINUX_VERSION_COMMON "3.04.05"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.05"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
@@ -537,7 +537,14 @@ typedef struct _MPT_ADAPTER
int id; /* Unique adapter id N {0,1,2,...} */
int pci_irq; /* This irq */
char name[MPT_NAME_LENGTH]; /* "iocN" */
- char *prod_name; /* "LSIFC9x9" */
+ char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
+ char board_name[16];
+ char board_assembly[16];
+ char board_tracer[16];
+ u16 nvdata_version_persistent;
+ u16 nvdata_version_default;
+ u8 io_missing_delay;
+ u8 device_missing_delay;
SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
u8 bus_type;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 9d0f30478e46..58e6c319cc76 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
index f7e72c5e47de..180b3c156247 100644
--- a/drivers/message/fusion/mptctl.h
+++ b/drivers/message/fusion/mptctl.h
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 0caaf6403993..f2ebaa9992fe 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -43,7 +43,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-#include "linux_compat.h" /* linux-2.6 tweaks */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -131,6 +130,7 @@ static struct scsi_host_template mptfc_driver_template = {
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mptscsih_host_attrs,
};
/****************************************************************************
@@ -154,6 +154,8 @@ static struct pci_device_id mptfc_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E,
PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptfc_pci_table);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 7dd34bd28efc..7e8a90cb484e 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
index fe438bf119f6..8d08c2bed24a 100644
--- a/drivers/message/fusion/mptlan.h
+++ b/drivers/message/fusion/mptlan.h
@@ -5,7 +5,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 2000-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 1d2d03f77894..d50664640512 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
* Copyright (c) 2005-2007 Dell
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1119,6 +1119,7 @@ static struct scsi_host_template mptsas_driver_template = {
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mptscsih_host_attrs,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
@@ -1390,6 +1391,11 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
goto out_free_consistent;
}
+ ioc->nvdata_version_persistent =
+ le16_to_cpu(buffer->NvdataVersionPersistent);
+ ioc->nvdata_version_default =
+ le16_to_cpu(buffer->NvdataVersionDefault);
+
for (i = 0; i < port_info->num_phys; i++) {
mptsas_print_phy_data(&buffer->PhyData[i]);
port_info->phy_info[i].phy_id = i;
@@ -1410,6 +1416,63 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
}
static int
+mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage1_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+ u16 device_missing_delay;
+
+ memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
+ memset(&cfg, 0, sizeof(CONFIGPARMS));
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = 10;
+ cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
+ cfg.cfghdr.ehdr->PageNumber = 1;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ ioc->io_missing_delay =
+ le16_to_cpu(buffer->IODeviceMissingDelay);
+ device_missing_delay = le16_to_cpu(buffer->ReportDeviceMissingDelay);
+ ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
+ (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
+ device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
u32 form, u32 form_specific)
{
@@ -1990,6 +2053,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
if (error)
goto out_free_port_info;
+ mptsas_sas_io_unit_pg1(ioc);
mutex_lock(&ioc->sas_topology_mutex);
ioc->handle = hba->phy_info[0].handle;
port_info = mptsas_find_portinfo_by_handle(ioc, ioc->handle);
@@ -3237,6 +3301,8 @@ static struct pci_driver mptsas_driver = {
static int __init
mptsas_init(void)
{
+ int error;
+
show_mptmod_ver(my_NAME, my_VERSION);
mptsas_transport_template =
@@ -3260,7 +3326,11 @@ mptsas_init(void)
": Registered for IOC reset notifications\n"));
}
- return pci_register_driver(&mptsas_driver);
+ error = pci_register_driver(&mptsas_driver);
+ if (error)
+ sas_release_transport(mptsas_transport_template);
+
+ return error;
}
static void __exit
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3bd94f11e7d6..fd3aa2619f42 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -44,7 +44,6 @@
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-#include "linux_compat.h" /* linux-2.6 tweaks */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -260,30 +259,13 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
/* Map the data portion, if any.
* sges_left = 0 if no data transfer.
*/
- if ( (sges_left = SCpnt->use_sg) ) {
- sges_left = pci_map_sg(ioc->pcidev,
- (struct scatterlist *) SCpnt->request_buffer,
- SCpnt->use_sg,
- SCpnt->sc_data_direction);
- if (sges_left == 0)
- return FAILED;
- } else if (SCpnt->request_bufflen) {
- SCpnt->SCp.dma_handle = pci_map_single(ioc->pcidev,
- SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n",
- ioc->name, SCpnt, SCpnt->request_bufflen));
- mptscsih_add_sge((char *) &pReq->SGL,
- 0xD1000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|SCpnt->request_bufflen,
- SCpnt->SCp.dma_handle);
-
- return SUCCESS;
- }
+ sges_left = scsi_dma_map(SCpnt);
+ if (sges_left < 0)
+ return FAILED;
/* Handle the SG case.
*/
- sg = (struct scatterlist *) SCpnt->request_buffer;
+ sg = scsi_sglist(SCpnt);
sg_done = 0;
sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
chainSge = NULL;
@@ -465,7 +447,12 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
MPT_FRAME_HDR *mf;
SEPRequest_t *SEPMsg;
- if (ioc->bus_type == FC)
+ if (ioc->bus_type != SAS)
+ return;
+
+ /* Not supported for hidden raid components
+ */
+ if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
return;
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
@@ -662,7 +649,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
scsi_state = pScsiReply->SCSIState;
scsi_status = pScsiReply->SCSIStatus;
xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
- sc->resid = sc->request_bufflen - xfer_cnt;
+ scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
log_info = le32_to_cpu(pScsiReply->IOCLogInfo);
/*
@@ -767,7 +754,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
break;
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
- sc->resid = sc->request_bufflen - xfer_cnt;
+ scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
sc->result=DID_SOFT_ERROR << 16;
else /* Sufficient data transfer occurred */
@@ -816,7 +803,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
break;
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
- sc->resid=0;
+ scsi_set_resid(sc, 0);
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
sc->result = (DID_OK << 16) | scsi_status;
@@ -899,23 +886,18 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
scsi_state, scsi_status, log_info));
dreplyprintk(("%s: [%d:%d:%d:%d] resid=%d "
- "bufflen=%d xfer_cnt=%d\n", __FUNCTION__,
- sc->device->host->host_no, sc->device->channel, sc->device->id,
- sc->device->lun, sc->resid, sc->request_bufflen,
- xfer_cnt));
+ "bufflen=%d xfer_cnt=%d\n", __FUNCTION__,
+ sc->device->host->host_no,
+ sc->device->channel, sc->device->id,
+ sc->device->lun, scsi_get_resid(sc),
+ scsi_bufflen(sc), xfer_cnt));
}
#endif
} /* end of address reply case */
/* Unmap the DMA buffers, if any. */
- if (sc->use_sg) {
- pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer,
- sc->use_sg, sc->sc_data_direction);
- } else if (sc->request_bufflen) {
- pci_unmap_single(ioc->pcidev, sc->SCp.dma_handle,
- sc->request_bufflen, sc->sc_data_direction);
- }
+ scsi_dma_unmap(sc);
sc->scsi_done(sc); /* Issue the command callback */
@@ -970,17 +952,8 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
/* Set status, free OS resources (SG DMA buffers)
* Do OS callback
*/
- if (SCpnt->use_sg) {
- pci_unmap_sg(ioc->pcidev,
- (struct scatterlist *) SCpnt->request_buffer,
- SCpnt->use_sg,
- SCpnt->sc_data_direction);
- } else if (SCpnt->request_bufflen) {
- pci_unmap_single(ioc->pcidev,
- SCpnt->SCp.dma_handle,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- }
+ scsi_dma_unmap(SCpnt);
+
SCpnt->result = DID_RESET << 16;
SCpnt->host_scribble = NULL;
@@ -1023,14 +996,19 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
if (mf == NULL)
continue;
+ /* If the device is a hidden raid component, then its
+ * expected that the mf->function will be RAID_SCSI_IO
+ */
+ if (vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT && mf->Function !=
+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ continue;
+
int_to_scsilun(vdevice->lun, &lun);
if ((mf->Bus != vdevice->vtarget->channel) ||
(mf->TargetID != vdevice->vtarget->id) ||
memcmp(lun.scsi_lun, mf->LUN, 8))
continue;
- dsprintk(( "search_running: found (sc=%p, mf = %p) "
- "channel %d id %d, lun %d \n", hd->ScsiLookup[ii],
- mf, mf->Bus, mf->TargetID, vdevice->lun));
/* Cleanup
*/
@@ -1039,19 +1017,12 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
if ((unsigned char *)mf != sc->host_scribble)
continue;
- if (sc->use_sg) {
- pci_unmap_sg(hd->ioc->pcidev,
- (struct scatterlist *) sc->request_buffer,
- sc->use_sg,
- sc->sc_data_direction);
- } else if (sc->request_bufflen) {
- pci_unmap_single(hd->ioc->pcidev,
- sc->SCp.dma_handle,
- sc->request_bufflen,
- sc->sc_data_direction);
- }
+ scsi_dma_unmap(sc);
sc->host_scribble = NULL;
sc->result = DID_NO_CONNECT << 16;
+ dsprintk(( "search_running: found (sc=%p, mf = %p) "
+ "channel %d id %d, lun %d \n", sc, mf,
+ vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun));
sc->scsi_done(sc);
}
}
@@ -1380,10 +1351,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
* will be no data transfer! GRRRRR...
*/
if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
- datalen = SCpnt->request_bufflen;
+ datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
} else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
- datalen = SCpnt->request_bufflen;
+ datalen = scsi_bufflen(SCpnt);
scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
} else {
datalen = 0;
@@ -1768,20 +1739,45 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
u32 ctx2abort;
int scpnt_idx;
int retval;
- VirtDevice *vdev;
+ VirtDevice *vdevice;
ulong sn = SCpnt->serial_number;
+ MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
SCpnt->result = DID_RESET << 16;
SCpnt->scsi_done(SCpnt);
- dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
- "Can't locate host! (sc=%p)\n",
- SCpnt));
+ dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: Can't locate "
+ "host! (sc=%p)\n", SCpnt));
return FAILED;
}
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting task abort! (sc=%p)\n",
+ ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget) {
+ dtmprintk((MYIOC_s_DEBUG_FMT "task abort: device has been "
+ "deleted (sc=%p)\n", ioc->name, SCpnt));
+ SCpnt->result = DID_NO_CONNECT << 16;
+ SCpnt->scsi_done(SCpnt);
+ retval = 0;
+ goto out;
+ }
+
+ /* Task aborts are not supported for hidden raid components.
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ dtmprintk((MYIOC_s_DEBUG_FMT "task abort: hidden raid "
+ "component (sc=%p)\n", ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
/* Find this command
*/
if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
@@ -1790,21 +1786,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
*/
SCpnt->result = DID_RESET << 16;
dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
- "Command not in the active list! (sc=%p)\n",
- hd->ioc->name, SCpnt));
- return SUCCESS;
+ "Command not in the active list! (sc=%p)\n", ioc->name,
+ SCpnt));
+ retval = 0;
+ goto out;
}
- if (hd->resetPending)
- return FAILED;
+ if (hd->resetPending) {
+ retval = FAILED;
+ goto out;
+ }
if (hd->timeouts < -1)
hd->timeouts++;
- printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
- hd->ioc->name, SCpnt);
- scsi_print_command(SCpnt);
-
/* Most important! Set TaskMsgContext to SCpnt's MsgContext!
* (the IO to be ABORT'd)
*
@@ -1817,18 +1812,17 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
hd->abortSCpnt = SCpnt;
- vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- vdev->vtarget->channel, vdev->vtarget->id, vdev->lun,
- ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
+ vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun,
+ ctx2abort, mptscsih_get_tm_timeout(ioc));
if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx &&
SCpnt->serial_number == sn)
retval = FAILED;
- printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
- hd->ioc->name,
- ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ out:
+ printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
@@ -1850,32 +1844,47 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
{
MPT_SCSI_HOST *hd;
int retval;
- VirtDevice *vdev;
+ VirtDevice *vdevice;
+ MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
- "Can't locate host! (sc=%p)\n",
- SCpnt));
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: Can't "
+ "locate host! (sc=%p)\n", SCpnt));
return FAILED;
}
- if (hd->resetPending)
- return FAILED;
-
- printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
+ ioc->name, SCpnt);
scsi_print_command(SCpnt);
- vdev = SCpnt->device->hostdata;
+ if (hd->resetPending) {
+ retval = FAILED;
+ goto out;
+ }
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget) {
+ retval = 0;
+ goto out;
+ }
+
+ /* Target reset to hidden raid component is not supported
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ retval = FAILED;
+ goto out;
+ }
+
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- vdev->vtarget->channel, vdev->vtarget->id,
- 0, 0, mptscsih_get_tm_timeout(hd->ioc));
+ vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0,
+ mptscsih_get_tm_timeout(ioc));
- printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
- hd->ioc->name,
- ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ out:
+ printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
@@ -1899,18 +1908,19 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
MPT_SCSI_HOST *hd;
int retval;
VirtDevice *vdev;
+ MPT_ADAPTER *ioc;
/* If we can't locate our host adapter structure, return FAILED status.
*/
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
- "Can't locate host! (sc=%p)\n",
- SCpnt ) );
+ dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: Can't "
+ "locate host! (sc=%p)\n", SCpnt ));
return FAILED;
}
- printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting bus reset! (sc=%p)\n",
+ ioc->name, SCpnt);
scsi_print_command(SCpnt);
if (hd->timeouts < -1)
@@ -1918,11 +1928,10 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
vdev = SCpnt->device->hostdata;
retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
- vdev->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(hd->ioc));
+ vdev->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc));
- printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
- hd->ioc->name,
- ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+ printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
if (retval == 0)
return SUCCESS;
@@ -1943,37 +1952,38 @@ int
mptscsih_host_reset(struct scsi_cmnd *SCpnt)
{
MPT_SCSI_HOST * hd;
- int status = SUCCESS;
+ int retval;
+ MPT_ADAPTER *ioc;
/* If we can't locate the host to reset, then we failed. */
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
- dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
- "Can't locate host! (sc=%p)\n",
- SCpnt ) );
+ dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: Can't "
+ "locate host! (sc=%p)\n", SCpnt));
return FAILED;
}
- printk(KERN_WARNING MYNAM ": %s: Attempting host reset! (sc=%p)\n",
- hd->ioc->name, SCpnt);
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
+ ioc->name, SCpnt);
/* If our attempts to reset the host failed, then return a failed
* status. The host will be taken off line by the SCSI mid-layer.
*/
- if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0){
- status = FAILED;
+ if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0) {
+ retval = FAILED;
} else {
/* Make sure TM pending is cleared and TM state is set to
* NONE.
*/
+ retval = 0;
hd->tmPending = 0;
hd->tmState = TM_STATE_NONE;
}
- dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
- "Status = %s\n",
- (status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
+ printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
- return status;
+ return retval;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3150,6 +3160,16 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
{
INTERNAL_CMD iocmd;
+ /* Ignore hidden raid components, this is handled when the command
+ * is sent to the volume
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ return;
+
+ if (vdevice->vtarget->type != TYPE_DISK || vdevice->vtarget->deleted ||
+ !vdevice->configured_lun)
+ return;
+
/* Following parameters will not change
* in this routine.
*/
@@ -3164,11 +3184,162 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
iocmd.id = vdevice->vtarget->id;
iocmd.lun = vdevice->lun;
- if ((vdevice->vtarget->type == TYPE_DISK) &&
- (vdevice->configured_lun))
- mptscsih_do_cmd(hd, &iocmd);
+ mptscsih_do_cmd(hd, &iocmd);
}
+static ssize_t
+mptscsih_version_fw_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static CLASS_DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL);
+
+static ssize_t
+mptscsih_version_bios_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (ioc->biosVersion & 0xFF000000) >> 24,
+ (ioc->biosVersion & 0x00FF0000) >> 16,
+ (ioc->biosVersion & 0x0000FF00) >> 8,
+ ioc->biosVersion & 0x000000FF);
+}
+static CLASS_DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL);
+
+static ssize_t
+mptscsih_version_mpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
+}
+static CLASS_DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL);
+
+static ssize_t
+mptscsih_version_product_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
+}
+static CLASS_DEVICE_ATTR(version_product, S_IRUGO,
+ mptscsih_version_product_show, NULL);
+
+static ssize_t
+mptscsih_version_nvdata_persistent_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02xh\n",
+ ioc->nvdata_version_persistent);
+}
+static CLASS_DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ mptscsih_version_nvdata_persistent_show, NULL);
+
+static ssize_t
+mptscsih_version_nvdata_default_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
+}
+static CLASS_DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ mptscsih_version_nvdata_default_show, NULL);
+
+static ssize_t
+mptscsih_board_name_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
+}
+static CLASS_DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL);
+
+static ssize_t
+mptscsih_board_assembly_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
+}
+static CLASS_DEVICE_ATTR(board_assembly, S_IRUGO,
+ mptscsih_board_assembly_show, NULL);
+
+static ssize_t
+mptscsih_board_tracer_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
+}
+static CLASS_DEVICE_ATTR(board_tracer, S_IRUGO,
+ mptscsih_board_tracer_show, NULL);
+
+static ssize_t
+mptscsih_io_delay_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static CLASS_DEVICE_ATTR(io_delay, S_IRUGO,
+ mptscsih_io_delay_show, NULL);
+
+static ssize_t
+mptscsih_device_delay_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(cdev);
+ MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static CLASS_DEVICE_ATTR(device_delay, S_IRUGO,
+ mptscsih_device_delay_show, NULL);
+
+struct class_device_attribute *mptscsih_host_attrs[] = {
+ &class_device_attr_version_fw,
+ &class_device_attr_version_bios,
+ &class_device_attr_version_mpi,
+ &class_device_attr_version_product,
+ &class_device_attr_version_nvdata_persistent,
+ &class_device_attr_version_nvdata_default,
+ &class_device_attr_board_name,
+ &class_device_attr_board_assembly,
+ &class_device_attr_board_tracer,
+ &class_device_attr_io_delay,
+ &class_device_attr_device_delay,
+ NULL,
+};
+EXPORT_SYMBOL(mptscsih_host_attrs);
+
EXPORT_SYMBOL(mptscsih_remove);
EXPORT_SYMBOL(mptscsih_shutdown);
#ifdef CONFIG_PM
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 843c01a6aa0e..67b088db2f10 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -6,7 +6,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -129,3 +129,4 @@ extern void mptscsih_timer_expired(unsigned long data);
extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
+extern struct class_device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 37bf65348372..947fe2901800 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -4,7 +4,7 @@
* running LSI Logic Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2007 LSI Logic Corporation
- * (mailto:mpt_linux_developer@lsi.com)
+ * (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -44,7 +44,6 @@
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-#include "linux_compat.h" /* linux-2.6 tweaks */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -822,6 +821,7 @@ static struct scsi_host_template mptspi_driver_template = {
.max_sectors = 8192,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mptscsih_host_attrs,
};
static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
@@ -1524,6 +1524,8 @@ static struct pci_driver mptspi_driver = {
static int __init
mptspi_init(void)
{
+ int error;
+
show_mptmod_ver(my_NAME, my_VERSION);
mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions);
@@ -1544,7 +1546,11 @@ mptspi_init(void)
": Registered for IOC reset notifications\n"));
}
- return pci_register_driver(&mptspi_driver);
+ error = pci_register_driver(&mptspi_driver);
+ if (error)
+ spi_release_transport(mptspi_transport_template);
+
+ return error;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index f4ac21e5771e..5afa0e393ecf 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -1,9 +1,6 @@
-menu "I2O device support"
- depends on PCI
-
-config I2O
- tristate "I2O support"
+menuconfig I2O
+ tristate "I2O device support"
depends on PCI
---help---
The Intelligent Input/Output (I2O) architecture allows hardware
@@ -25,9 +22,10 @@ config I2O
If unsure, say N.
+if I2O
+
config I2O_LCT_NOTIFY_ON_CHANGES
bool "Enable LCT notification"
- depends on I2O
default y
---help---
Only say N here if you have a I2O controller from SUN. The SUN
@@ -39,7 +37,6 @@ config I2O_LCT_NOTIFY_ON_CHANGES
config I2O_EXT_ADAPTEC
bool "Enable Adaptec extensions"
- depends on I2O
default y
---help---
Say Y for support of raidutils for Adaptec I2O controllers. You also
@@ -57,7 +54,7 @@ config I2O_EXT_ADAPTEC_DMA64
config I2O_CONFIG
tristate "I2O Configuration support"
- depends on I2O
+ depends on VIRT_TO_BUS
---help---
Say Y for support of the configuration interface for the I2O adapters.
If you have a RAID controller from Adaptec and you want to use the
@@ -78,7 +75,6 @@ config I2O_CONFIG_OLD_IOCTL
config I2O_BUS
tristate "I2O Bus Adapter OSM"
- depends on I2O
---help---
Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM
provides access to the busses on the I2O controller. The main purpose
@@ -89,7 +85,7 @@ config I2O_BUS
config I2O_BLOCK
tristate "I2O Block OSM"
- depends on I2O && BLOCK
+ depends on BLOCK
---help---
Include support for the I2O Block OSM. The Block OSM presents disk
and other structured block devices to the operating system. If you
@@ -102,7 +98,7 @@ config I2O_BLOCK
config I2O_SCSI
tristate "I2O SCSI OSM"
- depends on I2O && SCSI
+ depends on SCSI
---help---
Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel
I2O controller. You can use both the SCSI and Block OSM together if
@@ -114,7 +110,6 @@ config I2O_SCSI
config I2O_PROC
tristate "I2O /proc support"
- depends on I2O
---help---
If you say Y here and to "/proc file system support", you will be
able to read I2O related information from the virtual directory
@@ -123,5 +118,4 @@ config I2O_PROC
To compile this support as a module, choose M here: the
module will be called i2o_proc.
-endmenu
-
+endif # I2O
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 8abe45e49ad7..ce62d8bfe1c8 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -24,7 +24,7 @@ void i2o_report_status(const char *severity, const char *str,
if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
return; // No status in this reply
- printk(KERN_DEBUG "%s%s: ", severity, str);
+ printk("%s%s: ", severity, str);
if (cmd < 0x1F) // Utility cmd
i2o_report_util_cmd(cmd);
@@ -32,7 +32,7 @@ void i2o_report_status(const char *severity, const char *str,
else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
i2o_report_exec_cmd(cmd);
else
- printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds
+ printk("Cmd = %0#2x, ", cmd); // Other cmds
if (msg[0] & MSG_FAIL) {
i2o_report_fail_status(req_status, msg);
@@ -44,7 +44,7 @@ void i2o_report_status(const char *severity, const char *str,
if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
i2o_report_common_dsc(detailed_status);
else
- printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+ printk(" / DetailedStatus = %0#4x.\n",
detailed_status);
}
@@ -89,10 +89,10 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
};
if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
- printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
+ printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
req_status);
else
- printk(KERN_DEBUG "TRANSPORT_%s.\n",
+ printk("TRANSPORT_%s.\n",
FAIL_STATUS[req_status & 0x0F]);
/* Dump some details */
@@ -104,7 +104,7 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
msg[5] >> 16, msg[5] & 0xFFF);
- printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
+ printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF);
if (msg[4] & (1 << 16))
printk(KERN_DEBUG "(FormatError), "
"this msg can never be delivered/processed.\n");
@@ -142,9 +142,9 @@ static void i2o_report_common_status(u8 req_status)
};
if (req_status >= ARRAY_SIZE(REPLY_STATUS))
- printk(KERN_DEBUG "RequestStatus = %0#2x", req_status);
+ printk("RequestStatus = %0#2x", req_status);
else
- printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]);
+ printk("%s", REPLY_STATUS[req_status]);
}
/*
@@ -187,10 +187,10 @@ static void i2o_report_common_dsc(u16 detailed_status)
};
if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
- printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+ printk(" / DetailedStatus = %0#4x.\n",
detailed_status);
else
- printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]);
+ printk(" / %s.\n", COMMON_DSC[detailed_status]);
}
/*
@@ -200,49 +200,49 @@ static void i2o_report_util_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_UTIL_NOP:
- printk(KERN_DEBUG "UTIL_NOP, ");
+ printk("UTIL_NOP, ");
break;
case I2O_CMD_UTIL_ABORT:
- printk(KERN_DEBUG "UTIL_ABORT, ");
+ printk("UTIL_ABORT, ");
break;
case I2O_CMD_UTIL_CLAIM:
- printk(KERN_DEBUG "UTIL_CLAIM, ");
+ printk("UTIL_CLAIM, ");
break;
case I2O_CMD_UTIL_RELEASE:
- printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, ");
+ printk("UTIL_CLAIM_RELEASE, ");
break;
case I2O_CMD_UTIL_CONFIG_DIALOG:
- printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, ");
+ printk("UTIL_CONFIG_DIALOG, ");
break;
case I2O_CMD_UTIL_DEVICE_RESERVE:
- printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, ");
+ printk("UTIL_DEVICE_RESERVE, ");
break;
case I2O_CMD_UTIL_DEVICE_RELEASE:
- printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, ");
+ printk("UTIL_DEVICE_RELEASE, ");
break;
case I2O_CMD_UTIL_EVT_ACK:
- printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, ");
+ printk("UTIL_EVENT_ACKNOWLEDGE, ");
break;
case I2O_CMD_UTIL_EVT_REGISTER:
- printk(KERN_DEBUG "UTIL_EVENT_REGISTER, ");
+ printk("UTIL_EVENT_REGISTER, ");
break;
case I2O_CMD_UTIL_LOCK:
- printk(KERN_DEBUG "UTIL_LOCK, ");
+ printk("UTIL_LOCK, ");
break;
case I2O_CMD_UTIL_LOCK_RELEASE:
- printk(KERN_DEBUG "UTIL_LOCK_RELEASE, ");
+ printk("UTIL_LOCK_RELEASE, ");
break;
case I2O_CMD_UTIL_PARAMS_GET:
- printk(KERN_DEBUG "UTIL_PARAMS_GET, ");
+ printk("UTIL_PARAMS_GET, ");
break;
case I2O_CMD_UTIL_PARAMS_SET:
- printk(KERN_DEBUG "UTIL_PARAMS_SET, ");
+ printk("UTIL_PARAMS_SET, ");
break;
case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
- printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, ");
+ printk("UTIL_REPLY_FAULT_NOTIFY, ");
break;
default:
- printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);
+ printk("Cmd = %0#2x, ", cmd);
}
}
@@ -253,106 +253,106 @@ static void i2o_report_exec_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_ADAPTER_ASSIGN:
- printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, ");
+ printk("EXEC_ADAPTER_ASSIGN, ");
break;
case I2O_CMD_ADAPTER_READ:
- printk(KERN_DEBUG "EXEC_ADAPTER_READ, ");
+ printk("EXEC_ADAPTER_READ, ");
break;
case I2O_CMD_ADAPTER_RELEASE:
- printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, ");
+ printk("EXEC_ADAPTER_RELEASE, ");
break;
case I2O_CMD_BIOS_INFO_SET:
- printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, ");
+ printk("EXEC_BIOS_INFO_SET, ");
break;
case I2O_CMD_BOOT_DEVICE_SET:
- printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, ");
+ printk("EXEC_BOOT_DEVICE_SET, ");
break;
case I2O_CMD_CONFIG_VALIDATE:
- printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, ");
+ printk("EXEC_CONFIG_VALIDATE, ");
break;
case I2O_CMD_CONN_SETUP:
- printk(KERN_DEBUG "EXEC_CONN_SETUP, ");
+ printk("EXEC_CONN_SETUP, ");
break;
case I2O_CMD_DDM_DESTROY:
- printk(KERN_DEBUG "EXEC_DDM_DESTROY, ");
+ printk("EXEC_DDM_DESTROY, ");
break;
case I2O_CMD_DDM_ENABLE:
- printk(KERN_DEBUG "EXEC_DDM_ENABLE, ");
+ printk("EXEC_DDM_ENABLE, ");
break;
case I2O_CMD_DDM_QUIESCE:
- printk(KERN_DEBUG "EXEC_DDM_QUIESCE, ");
+ printk("EXEC_DDM_QUIESCE, ");
break;
case I2O_CMD_DDM_RESET:
- printk(KERN_DEBUG "EXEC_DDM_RESET, ");
+ printk("EXEC_DDM_RESET, ");
break;
case I2O_CMD_DDM_SUSPEND:
- printk(KERN_DEBUG "EXEC_DDM_SUSPEND, ");
+ printk("EXEC_DDM_SUSPEND, ");
break;
case I2O_CMD_DEVICE_ASSIGN:
- printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, ");
+ printk("EXEC_DEVICE_ASSIGN, ");
break;
case I2O_CMD_DEVICE_RELEASE:
- printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, ");
+ printk("EXEC_DEVICE_RELEASE, ");
break;
case I2O_CMD_HRT_GET:
- printk(KERN_DEBUG "EXEC_HRT_GET, ");
+ printk("EXEC_HRT_GET, ");
break;
case I2O_CMD_ADAPTER_CLEAR:
- printk(KERN_DEBUG "EXEC_IOP_CLEAR, ");
+ printk("EXEC_IOP_CLEAR, ");
break;
case I2O_CMD_ADAPTER_CONNECT:
- printk(KERN_DEBUG "EXEC_IOP_CONNECT, ");
+ printk("EXEC_IOP_CONNECT, ");
break;
case I2O_CMD_ADAPTER_RESET:
- printk(KERN_DEBUG "EXEC_IOP_RESET, ");
+ printk("EXEC_IOP_RESET, ");
break;
case I2O_CMD_LCT_NOTIFY:
- printk(KERN_DEBUG "EXEC_LCT_NOTIFY, ");
+ printk("EXEC_LCT_NOTIFY, ");
break;
case I2O_CMD_OUTBOUND_INIT:
- printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, ");
+ printk("EXEC_OUTBOUND_INIT, ");
break;
case I2O_CMD_PATH_ENABLE:
- printk(KERN_DEBUG "EXEC_PATH_ENABLE, ");
+ printk("EXEC_PATH_ENABLE, ");
break;
case I2O_CMD_PATH_QUIESCE:
- printk(KERN_DEBUG "EXEC_PATH_QUIESCE, ");
+ printk("EXEC_PATH_QUIESCE, ");
break;
case I2O_CMD_PATH_RESET:
- printk(KERN_DEBUG "EXEC_PATH_RESET, ");
+ printk("EXEC_PATH_RESET, ");
break;
case I2O_CMD_STATIC_MF_CREATE:
- printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, ");
+ printk("EXEC_STATIC_MF_CREATE, ");
break;
case I2O_CMD_STATIC_MF_RELEASE:
- printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, ");
+ printk("EXEC_STATIC_MF_RELEASE, ");
break;
case I2O_CMD_STATUS_GET:
- printk(KERN_DEBUG "EXEC_STATUS_GET, ");
+ printk("EXEC_STATUS_GET, ");
break;
case I2O_CMD_SW_DOWNLOAD:
- printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, ");
+ printk("EXEC_SW_DOWNLOAD, ");
break;
case I2O_CMD_SW_UPLOAD:
- printk(KERN_DEBUG "EXEC_SW_UPLOAD, ");
+ printk("EXEC_SW_UPLOAD, ");
break;
case I2O_CMD_SW_REMOVE:
- printk(KERN_DEBUG "EXEC_SW_REMOVE, ");
+ printk("EXEC_SW_REMOVE, ");
break;
case I2O_CMD_SYS_ENABLE:
- printk(KERN_DEBUG "EXEC_SYS_ENABLE, ");
+ printk("EXEC_SYS_ENABLE, ");
break;
case I2O_CMD_SYS_MODIFY:
- printk(KERN_DEBUG "EXEC_SYS_MODIFY, ");
+ printk("EXEC_SYS_MODIFY, ");
break;
case I2O_CMD_SYS_QUIESCE:
- printk(KERN_DEBUG "EXEC_SYS_QUIESCE, ");
+ printk("EXEC_SYS_QUIESCE, ");
break;
case I2O_CMD_SYS_TAB_SET:
- printk(KERN_DEBUG "EXEC_SYS_TAB_SET, ");
+ printk("EXEC_SYS_TAB_SET, ");
break;
default:
- printk(KERN_DEBUG "Cmd = %#02x, ", cmd);
+ printk("Cmd = %#02x, ", cmd);
}
}
@@ -361,28 +361,28 @@ void i2o_debug_state(struct i2o_controller *c)
printk(KERN_INFO "%s: State = ", c->name);
switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
case 0x01:
- printk(KERN_DEBUG "INIT\n");
+ printk("INIT\n");
break;
case 0x02:
- printk(KERN_DEBUG "RESET\n");
+ printk("RESET\n");
break;
case 0x04:
- printk(KERN_DEBUG "HOLD\n");
+ printk("HOLD\n");
break;
case 0x05:
- printk(KERN_DEBUG "READY\n");
+ printk("READY\n");
break;
case 0x08:
- printk(KERN_DEBUG "OPERATIONAL\n");
+ printk("OPERATIONAL\n");
break;
case 0x10:
- printk(KERN_DEBUG "FAILED\n");
+ printk("FAILED\n");
break;
case 0x11:
- printk(KERN_DEBUG "FAULTED\n");
+ printk("FAULTED\n");
break;
default:
- printk(KERN_DEBUG "%x (unknown !!)\n",
+ printk("%x (unknown !!)\n",
((i2o_status_block *) c->status_block.virt)->iop_state);
}
};
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index b9df143e4ff1..489d7c5c4965 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -62,7 +62,7 @@ int i2o_device_claim(struct i2o_device *dev)
{
int rc = 0;
- down(&dev->lock);
+ mutex_lock(&dev->lock);
rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY);
if (!rc)
@@ -72,7 +72,7 @@ int i2o_device_claim(struct i2o_device *dev)
pr_debug("i2o: claim of device %d failed %d\n",
dev->lct_data.tid, rc);
- up(&dev->lock);
+ mutex_unlock(&dev->lock);
return rc;
}
@@ -96,7 +96,7 @@ int i2o_device_claim_release(struct i2o_device *dev)
int tries;
int rc = 0;
- down(&dev->lock);
+ mutex_lock(&dev->lock);
/*
* If the controller takes a nonblocking approach to
@@ -118,7 +118,7 @@ int i2o_device_claim_release(struct i2o_device *dev)
pr_debug("i2o: claim release of device %d failed %d\n",
dev->lct_data.tid, rc);
- up(&dev->lock);
+ mutex_unlock(&dev->lock);
return rc;
}
@@ -198,7 +198,7 @@ static struct i2o_device *i2o_device_alloc(void)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&dev->list);
- init_MUTEX(&dev->lock);
+ mutex_init(&dev->lock);
dev->device.bus = &i2o_bus_type;
dev->device.release = &i2o_device_release;
@@ -326,7 +326,7 @@ int i2o_device_parse_lct(struct i2o_controller *c)
u16 table_size;
u32 buf;
- down(&c->lct_lock);
+ mutex_lock(&c->lct_lock);
kfree(c->lct);
@@ -335,7 +335,7 @@ int i2o_device_parse_lct(struct i2o_controller *c)
lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
if (!lct) {
- up(&c->lct_lock);
+ mutex_unlock(&c->lct_lock);
return -ENOMEM;
}
@@ -408,7 +408,7 @@ int i2o_device_parse_lct(struct i2o_controller *c)
i2o_device_remove(dev);
}
- up(&c->lct_lock);
+ mutex_unlock(&c->lct_lock);
return 0;
}
@@ -485,7 +485,7 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
u8 *resblk; /* 8 bytes for header */
int rc;
- resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
+ resblk = kmalloc(buflen + 8, GFP_KERNEL);
if (!resblk)
return -ENOMEM;
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 5278aad92bc4..8c83ee3b0920 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -131,8 +131,10 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
int rc = 0;
wait = i2o_exec_wait_alloc();
- if (!wait)
+ if (!wait) {
+ i2o_msg_nop(c, msg);
return -ENOMEM;
+ }
if (tcntxt == 0xffffffff)
tcntxt = 0x80000000;
@@ -337,6 +339,8 @@ static int i2o_exec_probe(struct device *dev)
rc = device_create_file(dev, &dev_attr_product_id);
if (rc) goto err_vid;
+ i2o_dev->iop->exec = i2o_dev;
+
return 0;
err_vid:
@@ -537,7 +541,7 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
struct device *dev;
struct i2o_message *msg;
- down(&c->lct_lock);
+ mutex_lock(&c->lct_lock);
dev = &c->pdev->dev;
@@ -561,7 +565,7 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
i2o_msg_post(c, msg);
- up(&c->lct_lock);
+ mutex_unlock(&c->lct_lock);
return 0;
};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b17c4b2bc9ef..988c8ce47f58 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -215,7 +215,7 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
struct i2o_message *msg;
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
- if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
+ if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
@@ -1171,8 +1171,7 @@ static int __init i2o_block_init(void)
/* Allocate request mempool and slab */
size = sizeof(struct i2o_block_request);
i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
- SLAB_HWCACHE_ALIGN, NULL,
- NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!i2o_blk_req_pool.slab) {
osm_err("can't init request slab\n");
rc = -ENOMEM;
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 8ba275a12773..84e046e94f5f 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -554,8 +554,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
return -ENXIO;
}
- msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
-
sb = c->status_block.virt;
if (get_user(size, &user_msg[0])) {
@@ -573,24 +571,30 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
size <<= 2; // Convert to bytes
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ rcode = -EFAULT;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size)) {
osm_warn("unable to copy user message\n");
- return -EFAULT;
+ goto out;
}
i2o_dump_message(msg);
if (get_user(reply_size, &user_reply[0]) < 0)
- return -EFAULT;
+ goto out;
reply_size >>= 16;
reply_size <<= 2;
+ rcode = -ENOMEM;
reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
- return -ENOMEM;
+ goto out;
}
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -661,13 +665,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
rcode = i2o_msg_post_wait(c, msg, 60);
+ msg = NULL;
if (rcode) {
reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
}
if (sg_offset) {
- u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
+ u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
@@ -675,7 +680,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+ memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
@@ -684,7 +689,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
- if (copy_from_user(msg, user_msg, size)) {
+ if (copy_from_user(rmsg, user_msg, size)) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
@@ -692,7 +697,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
// TODO 64bit fix
- sg = (struct sg_simple_element *)(msg + sg_offset);
+ sg = (struct sg_simple_element *)(rmsg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if (!
@@ -714,7 +719,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- sg_list_cleanup:
+sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -723,7 +728,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
"%s: Could not copy message context FROM user\n",
c->name);
rcode = -EFAULT;
- goto sg_list_cleanup;
}
if (copy_to_user(user_reply, reply, reply_size)) {
printk(KERN_WARNING
@@ -731,12 +735,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
rcode = -EFAULT;
}
}
-
for (i = 0; i < sg_index; i++)
i2o_dma_free(&c->pdev->dev, &sg_list[i]);
- cleanup:
+cleanup:
kfree(reply);
+out:
+ if (msg)
+ i2o_msg_nop(c, msg);
return rcode;
}
@@ -793,8 +799,6 @@ static int i2o_cfg_passthru(unsigned long arg)
return -ENXIO;
}
- msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
-
sb = c->status_block.virt;
if (get_user(size, &user_msg[0]))
@@ -810,12 +814,17 @@ static int i2o_cfg_passthru(unsigned long arg)
size <<= 2; // Convert to bytes
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ rcode = -EFAULT;
/* Copy in the user's I2O command */
if (copy_from_user(msg, user_msg, size))
- return -EFAULT;
+ goto out;
if (get_user(reply_size, &user_reply[0]) < 0)
- return -EFAULT;
+ goto out;
reply_size >>= 16;
reply_size <<= 2;
@@ -824,7 +833,8 @@ static int i2o_cfg_passthru(unsigned long arg)
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
- return -ENOMEM;
+ rcode = -ENOMEM;
+ goto out;
}
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -891,13 +901,14 @@ static int i2o_cfg_passthru(unsigned long arg)
}
rcode = i2o_msg_post_wait(c, msg, 60);
+ msg = NULL;
if (rcode) {
reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
}
if (sg_offset) {
- u32 msg[128];
+ u32 rmsg[128];
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO 64bit fix
@@ -905,7 +916,7 @@ static int i2o_cfg_passthru(unsigned long arg)
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+ memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
// get user msg size in u32s
if (get_user(size, &user_msg[0])) {
rcode = -EFAULT;
@@ -914,7 +925,7 @@ static int i2o_cfg_passthru(unsigned long arg)
size = size >> 16;
size *= 4;
/* Copy in the user's I2O command */
- if (copy_from_user(msg, user_msg, size)) {
+ if (copy_from_user(rmsg, user_msg, size)) {
rcode = -EFAULT;
goto sg_list_cleanup;
}
@@ -922,7 +933,7 @@ static int i2o_cfg_passthru(unsigned long arg)
(size - sg_offset * 4) / sizeof(struct sg_simple_element);
// TODO 64bit fix
- sg = (struct sg_simple_element *)(msg + sg_offset);
+ sg = (struct sg_simple_element *)(rmsg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if (!
@@ -944,7 +955,7 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- sg_list_cleanup:
+sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -964,8 +975,11 @@ static int i2o_cfg_passthru(unsigned long arg)
for (i = 0; i < sg_index; i++)
kfree(sg_list[i]);
- cleanup:
+cleanup:
kfree(reply);
+out:
+ if (msg)
+ i2o_msg_nop(c, msg);
return rcode;
}
#endif
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 1045c8a518bb..aa6fb9429d58 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -377,12 +377,8 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
osm_err("SCSI error %08x\n", error);
dev = &c->pdev->dev;
- if (cmd->use_sg)
- dma_unmap_sg(dev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- else if (cmd->SCp.dma_handle)
- dma_unmap_single(dev, cmd->SCp.dma_handle, cmd->request_bufflen,
- cmd->sc_data_direction);
+
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
@@ -664,21 +660,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
if (sgl_offset != SGL_OFFSET_0) {
/* write size of data addressed by SGL */
- *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
+ *mptr++ = cpu_to_le32(scsi_bufflen(SCpnt));
/* Now fill in the SGList and command */
- if (SCpnt->use_sg) {
- if (!i2o_dma_map_sg(c, SCpnt->request_buffer,
- SCpnt->use_sg,
+
+ if (scsi_sg_count(SCpnt)) {
+ if (!i2o_dma_map_sg(c, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt),
SCpnt->sc_data_direction, &mptr))
goto nomem;
- } else {
- SCpnt->SCp.dma_handle =
- i2o_dma_map_single(c, SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction, &mptr);
- if (dma_mapping_error(SCpnt->SCp.dma_handle))
- goto nomem;
}
}
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 3305c12372a2..a1ec16a075c6 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -1067,7 +1067,7 @@ struct i2o_controller *i2o_iop_alloc(void)
INIT_LIST_HEAD(&c->devices);
spin_lock_init(&c->lock);
- init_MUTEX(&c->lct_lock);
+ mutex_init(&c->lct_lock);
device_initialize(&c->device);
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 75f401d52fda..b4ed57e02729 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -200,9 +200,8 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
{
struct mcp *mcp;
- mcp = kmalloc(sizeof(struct mcp) + size, GFP_KERNEL);
+ mcp = kzalloc(sizeof(struct mcp) + size, GFP_KERNEL);
if (mcp) {
- memset(mcp, 0, sizeof(struct mcp) + size);
spin_lock_init(&mcp->lock);
mcp->attached_device.parent = parent;
mcp->attached_device.bus = &mcp_bus_type;
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 149810a084f5..e03f1bcd4f9f 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -484,12 +484,11 @@ static int ucb1x00_probe(struct mcp *mcp)
goto err_disable;
}
- ucb = kmalloc(sizeof(struct ucb1x00), GFP_KERNEL);
+ ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
ret = -ENOMEM;
if (!ucb)
goto err_disable;
- memset(ucb, 0, sizeof(struct ucb1x00));
ucb->cdev.class = &ucb1x00_class;
ucb->cdev.dev = &mcp->attached_device;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38e815a2e871..fdbaa776f249 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -209,6 +209,7 @@ static int ucb1x00_thread(void *_ts)
DECLARE_WAITQUEUE(wait, tsk);
int valid = 0;
+ set_freezable();
add_wait_queue(&ts->irq_wait, &wait);
while (!kthread_should_stop()) {
unsigned int x, y, p;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bd601efa7bd1..aaaa61ea4217 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -2,11 +2,15 @@
# Misc strange devices
#
-menu "Misc devices"
+menuconfig MISC_DEVICES
+ bool "Misc devices"
+ default y
+
+if MISC_DEVICES
config IBM_ASM
tristate "Device driver for IBM RSA service processor"
- depends on X86 && PCI && EXPERIMENTAL
+ depends on X86 && PCI && INPUT && EXPERIMENTAL
---help---
This option enables device driver support for in-band access to the
IBM RSA (Condor) service processor in eServer xSeries systems.
@@ -146,6 +150,7 @@ config THINKPAD_ACPI
depends on X86 && ACPI
select BACKLIGHT_CLASS_DEVICE
select HWMON
+ select NVRAM
---help---
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
@@ -192,4 +197,17 @@ config THINKPAD_ACPI_BAY
If you are not sure, say Y here.
-endmenu
+config THINKPAD_ACPI_INPUT_ENABLED
+ bool "Enable input layer support by default"
+ depends on THINKPAD_ACPI
+ default y
+ ---help---
+ Enables hot key handling over the input layer by default. If unset,
+ the driver does not enable any hot key handling by default, and also
+ starts up with a mostly empty keymap.
+
+ If you are not sure, say Y here. Say N to retain the deprecated
+ behavior of ibm-acpi, and thinkpad-acpi for kernels up to 2.6.21.
+
+
+endif # MISC_DEVICES
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 4f9060a2a2f2..f75306059971 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -737,8 +737,7 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
struct device_attribute dev_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
- .mode = 0, \
- .owner = THIS_MODULE }, \
+ .mode = 0 }, \
.show = NULL, \
.store = NULL, \
}
@@ -980,10 +979,9 @@ static int asus_hotk_add(struct acpi_device *device)
printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
- hotk = kmalloc(sizeof(struct asus_hotk), GFP_KERNEL);
+ hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
if (!hotk)
return -ENOMEM;
- memset(hotk, 0, sizeof(struct asus_hotk));
hotk->handle = device->handle;
strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 07a085ccbd5b..6497872df524 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -41,18 +41,16 @@ struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_s
if (buffer_size > IBMASM_CMD_MAX_BUFFER_SIZE)
return NULL;
- cmd = kmalloc(sizeof(struct command), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct command), GFP_KERNEL);
if (cmd == NULL)
return NULL;
- memset(cmd, 0, sizeof(*cmd));
- cmd->buffer = kmalloc(buffer_size, GFP_KERNEL);
+ cmd->buffer = kzalloc(buffer_size, GFP_KERNEL);
if (cmd->buffer == NULL) {
kfree(cmd);
return NULL;
}
- memset(cmd->buffer, 0, buffer_size);
cmd->buffer_size = buffer_size;
kobject_init(&cmd->kobj);
@@ -72,7 +70,7 @@ struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_s
static void free_command(struct kobject *kobj)
{
struct command *cmd = to_command(kobj);
-
+
list_del(&cmd->queue_node);
atomic_dec(&command_count);
dbg("command count: %d\n", atomic_read(&command_count));
@@ -113,14 +111,14 @@ static inline void do_exec_command(struct service_processor *sp)
exec_next_command(sp);
}
}
-
+
/**
* exec_command
* send a command to a service processor
* Commands are executed sequentially. One command (sp->current_command)
* is sent to the service processor. Once the interrupt handler gets a
* message of type command_response, the message is copied into
- * the current commands buffer,
+ * the current commands buffer,
*/
void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
{
@@ -160,7 +158,7 @@ static void exec_next_command(struct service_processor *sp)
}
}
-/**
+/**
* Sleep until a command has failed or a response has been received
* and the command status been updated by the interrupt handler.
* (see receive_response).
@@ -182,8 +180,8 @@ void ibmasm_receive_command_response(struct service_processor *sp, void *respons
{
struct command *cmd = sp->current_command;
- if (!sp->current_command)
- return;
+ if (!sp->current_command)
+ return;
memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
cmd->status = IBMASM_CMD_COMPLETE;
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 13c52f866e2e..3dd2dfb8da17 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -44,11 +44,11 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
size = message_size;
switch (header->type) {
- case sp_event:
+ case sp_event:
ibmasm_receive_event(sp, message, size);
break;
case sp_command_response:
- ibmasm_receive_command_response(sp, message, size);
+ ibmasm_receive_command_response(sp, message, size);
break;
case sp_heartbeat:
ibmasm_receive_heartbeat(sp, message, size);
@@ -95,7 +95,7 @@ int ibmasm_send_driver_vpd(struct service_processor *sp)
strcat(vpd_data, IBMASM_DRIVER_VPD);
vpd_data[10] = 0;
vpd_data[15] = 0;
-
+
ibmasm_exec_command(sp, command);
ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
@@ -118,7 +118,7 @@ struct os_state_command {
* During driver init this function is called with os state "up".
* This causes the service processor to start sending heartbeats the
* driver.
- * During driver exit the function is called with os state "down",
+ * During driver exit the function is called with os state "down",
* causing the service processor to stop the heartbeats.
*/
int ibmasm_send_os_state(struct service_processor *sp, int os_state)
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 2d21c2741b6a..6cbba1afef35 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index fe1e819235a4..fda6a4d3bf23 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -51,7 +51,7 @@ static void wake_up_event_readers(struct service_processor *sp)
* event readers.
* There is no reader marker in the buffer, therefore readers are
* responsible for keeping up with the writer, or they will loose events.
- */
+ */
void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
{
struct event_buffer *buffer = sp->event_buffer;
@@ -77,13 +77,13 @@ void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int
static inline int event_available(struct event_buffer *b, struct event_reader *r)
{
- return (r->next_serial_number < b->next_serial_number);
+ return (r->next_serial_number < b->next_serial_number);
}
/**
* get_next_event
* Called by event readers (initiated from user space through the file
- * system).
+ * system).
* Sleeps until a new event is available.
*/
int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 7fd7a43e38de..3036e785b3e4 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index 958c957a5e75..bf2c738d2b72 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -26,9 +26,9 @@ struct i2o_header {
u8 version;
u8 message_flags;
u16 message_size;
- u8 target;
+ u8 target;
u8 initiator_and_target;
- u8 initiator;
+ u8 initiator;
u8 function;
u32 initiator_context;
};
@@ -64,12 +64,12 @@ static inline unsigned short outgoing_message_size(unsigned int data_size)
size = sizeof(struct i2o_header) + data_size;
i2o_size = size / sizeof(u32);
-
+
if (size % sizeof(u32))
i2o_size++;
return i2o_size;
-}
+}
static inline u32 incoming_data_size(struct i2o_message *i2o_message)
{
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 48d5abebfc30..de860bc6d3f5 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -58,8 +58,8 @@ static inline char *get_timestamp(char *buf)
return buf;
}
-#define IBMASM_CMD_PENDING 0
-#define IBMASM_CMD_COMPLETE 1
+#define IBMASM_CMD_PENDING 0
+#define IBMASM_CMD_COMPLETE 1
#define IBMASM_CMD_FAILED 2
#define IBMASM_CMD_TIMEOUT_NORMAL 45
@@ -163,55 +163,55 @@ struct service_processor {
};
/* command processing */
-extern struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
-extern void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
-extern void ibmasm_wait_for_response(struct command *cmd, int timeout);
-extern void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
+struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
+void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
+void ibmasm_wait_for_response(struct command *cmd, int timeout);
+void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
/* event processing */
-extern int ibmasm_event_buffer_init(struct service_processor *sp);
-extern void ibmasm_event_buffer_exit(struct service_processor *sp);
-extern void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
-extern void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
-extern void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
-extern int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
-extern void ibmasm_cancel_next_event(struct event_reader *reader);
+int ibmasm_event_buffer_init(struct service_processor *sp);
+void ibmasm_event_buffer_exit(struct service_processor *sp);
+void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
+void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
+int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
+void ibmasm_cancel_next_event(struct event_reader *reader);
/* heartbeat - from SP to OS */
-extern void ibmasm_register_panic_notifier(void);
-extern void ibmasm_unregister_panic_notifier(void);
-extern int ibmasm_heartbeat_init(struct service_processor *sp);
-extern void ibmasm_heartbeat_exit(struct service_processor *sp);
-extern void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
+void ibmasm_register_panic_notifier(void);
+void ibmasm_unregister_panic_notifier(void);
+int ibmasm_heartbeat_init(struct service_processor *sp);
+void ibmasm_heartbeat_exit(struct service_processor *sp);
+void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
/* reverse heartbeat - from OS to SP */
-extern void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
-extern int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
-extern void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
+void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
+void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
/* dot commands */
-extern void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
-extern int ibmasm_send_driver_vpd(struct service_processor *sp);
-extern int ibmasm_send_os_state(struct service_processor *sp, int os_state);
+void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
+int ibmasm_send_driver_vpd(struct service_processor *sp);
+int ibmasm_send_os_state(struct service_processor *sp, int os_state);
/* low level message processing */
-extern int ibmasm_send_i2o_message(struct service_processor *sp);
-extern irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
+int ibmasm_send_i2o_message(struct service_processor *sp);
+irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
/* remote console */
-extern void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
-extern int ibmasm_init_remote_input_dev(struct service_processor *sp);
-extern void ibmasm_free_remote_input_dev(struct service_processor *sp);
+void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
+int ibmasm_init_remote_input_dev(struct service_processor *sp);
+void ibmasm_free_remote_input_dev(struct service_processor *sp);
/* file system */
-extern int ibmasmfs_register(void);
-extern void ibmasmfs_unregister(void);
-extern void ibmasmfs_add_sp(struct service_processor *sp);
+int ibmasmfs_register(void);
+void ibmasmfs_unregister(void);
+void ibmasmfs_add_sp(struct service_processor *sp);
/* uart */
#ifdef CONFIG_SERIAL_8250
-extern void ibmasm_register_uart(struct service_processor *sp);
-extern void ibmasm_unregister_uart(struct service_processor *sp);
+void ibmasm_register_uart(struct service_processor *sp);
+void ibmasm_unregister_uart(struct service_processor *sp);
#else
#define ibmasm_register_uart(sp) do { } while(0)
#define ibmasm_unregister_uart(sp) do { } while(0)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c436d3de8b8b..22a7e8ba211d 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,12 +17,12 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
/*
- * Parts of this code are based on an article by Jonathan Corbet
+ * Parts of this code are based on an article by Jonathan Corbet
* that appeared in Linux Weekly News.
*/
@@ -55,22 +55,22 @@
* For each service processor the following files are created:
*
* command: execute dot commands
- * write: execute a dot command on the service processor
- * read: return the result of a previously executed dot command
+ * write: execute a dot command on the service processor
+ * read: return the result of a previously executed dot command
*
* events: listen for service processor events
- * read: sleep (interruptible) until an event occurs
+ * read: sleep (interruptible) until an event occurs
* write: wakeup sleeping event listener
*
* reverse_heartbeat: send a heartbeat to the service processor
- * read: sleep (interruptible) until the reverse heartbeat fails
+ * read: sleep (interruptible) until the reverse heartbeat fails
* write: wakeup sleeping heartbeat listener
*
* remote_video/width
* remote_video/height
* remote_video/width: control remote display settings
- * write: set value
- * read: read value
+ * write: set value
+ * read: read value
*/
#include <linux/fs.h>
@@ -155,7 +155,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
static struct dentry *ibmasmfs_create_file (struct super_block *sb,
struct dentry *parent,
- const char *name,
+ const char *name,
const struct file_operations *fops,
void *data,
int mode)
@@ -261,7 +261,7 @@ static int command_file_close(struct inode *inode, struct file *file)
struct ibmasmfs_command_data *command_data = file->private_data;
if (command_data->command)
- command_put(command_data->command);
+ command_put(command_data->command);
kfree(command_data);
return 0;
@@ -348,7 +348,7 @@ static ssize_t command_file_write(struct file *file, const char __user *ubuff, s
static int event_file_open(struct inode *inode, struct file *file)
{
struct ibmasmfs_event_data *event_data;
- struct service_processor *sp;
+ struct service_processor *sp;
if (!inode->i_private)
return -ENODEV;
@@ -563,17 +563,16 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user *
if (*offset != 0)
return 0;
- buff = kmalloc (count + 1, GFP_KERNEL);
+ buff = kzalloc (count + 1, GFP_KERNEL);
if (!buff)
return -ENOMEM;
- memset(buff, 0x0, count + 1);
if (copy_from_user(buff, ubuff, count)) {
kfree(buff);
return -EFAULT;
}
-
+
value = simple_strtoul(buff, NULL, 10);
writel(value, address);
kfree(buff);
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index a3c589b7cbfa..4b2398e27fd5 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index e5ed59c589aa..766766523a60 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -48,9 +48,9 @@
#define INTR_CONTROL_REGISTER 0x13A4
#define SCOUT_COM_A_BASE 0x0000
-#define SCOUT_COM_B_BASE 0x0100
-#define SCOUT_COM_C_BASE 0x0200
-#define SCOUT_COM_D_BASE 0x0300
+#define SCOUT_COM_B_BASE 0x0100
+#define SCOUT_COM_C_BASE 0x0200
+#define SCOUT_COM_D_BASE 0x0300
static inline int sp_interrupt_pending(void __iomem *base_address)
{
@@ -86,12 +86,12 @@ static inline void disable_sp_interrupts(void __iomem *base_address)
static inline void enable_uart_interrupts(void __iomem *base_address)
{
- ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
+ ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
}
static inline void disable_uart_interrupts(void __iomem *base_address)
{
- ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
+ ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
}
#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE )
@@ -111,7 +111,7 @@ static inline u32 get_mfa_outbound(void __iomem *base_address)
static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa)
{
- writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
+ writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
}
static inline u32 get_mfa_inbound(void __iomem *base_address)
@@ -126,7 +126,7 @@ static inline u32 get_mfa_inbound(void __iomem *base_address)
static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa)
{
- writel(mfa, base_address + INBOUND_QUEUE_PORT);
+ writel(mfa, base_address + INBOUND_QUEUE_PORT);
}
static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa)
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 2f3bddfab937..4f9d4a9da983 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,9 +18,9 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
- * This driver is based on code originally written by Pete Reynolds
+ * This driver is based on code originally written by Pete Reynolds
* and others.
*
*/
@@ -30,13 +30,13 @@
*
* 1) When loaded it sends a message to the service processor,
* indicating that an OS is * running. This causes the service processor
- * to send periodic heartbeats to the OS.
+ * to send periodic heartbeats to the OS.
*
* 2) Answers the periodic heartbeats sent by the service processor.
* Failure to do so would result in system reboot.
*
* 3) Acts as a pass through for dot commands sent from user applications.
- * The interface for this is the ibmasmfs file system.
+ * The interface for this is the ibmasmfs file system.
*
* 4) Allows user applications to register for event notification. Events
* are sent to the driver through interrupts. They can be read from user
@@ -77,13 +77,12 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
/* vnc client won't work without bus-mastering */
pci_set_master(pdev);
- sp = kmalloc(sizeof(struct service_processor), GFP_KERNEL);
+ sp = kzalloc(sizeof(struct service_processor), GFP_KERNEL);
if (sp == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
result = -ENOMEM;
goto error_kmalloc;
}
- memset(sp, 0, sizeof(struct service_processor));
spin_lock_init(&sp->lock);
INIT_LIST_HEAD(&sp->command_queue);
@@ -105,7 +104,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
}
sp->irq = pdev->irq;
- sp->base_address = ioremap(pci_resource_start(pdev, 0),
+ sp->base_address = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (sp->base_address == 0) {
dev_err(sp->dev, "Failed to ioremap pci memory\n");
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index f8fdb2d5417e..bec9e2c44bef 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
@@ -36,10 +36,10 @@ static struct {
unsigned char command[3];
} rhb_dot_cmd = {
.header = {
- .type = sp_read,
+ .type = sp_read,
.command_size = 3,
.data_size = 0,
- .status = 0
+ .status = 0
},
.command = { 4, 3, 6 }
};
@@ -76,9 +76,9 @@ int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_
if (cmd->status != IBMASM_CMD_COMPLETE)
times_failed++;
- wait_event_interruptible_timeout(rhb->wait,
+ wait_event_interruptible_timeout(rhb->wait,
rhb->stopped,
- REVERSE_HEARTBEAT_TIMEOUT * HZ);
+ REVERSE_HEARTBEAT_TIMEOUT * HZ);
if (signal_pending(current) || rhb->stopped) {
result = -EINTR;
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
index a40fda6c402c..0550ce075fc4 100644
--- a/drivers/misc/ibmasm/remote.c
+++ b/drivers/misc/ibmasm/remote.c
@@ -28,11 +28,10 @@
#include "ibmasm.h"
#include "remote.h"
-static int xmax = 1600;
-static int ymax = 1200;
+#define MOUSE_X_MAX 1600
+#define MOUSE_Y_MAX 1200
-
-static unsigned short xlate_high[XLATE_SIZE] = {
+static const unsigned short xlate_high[XLATE_SIZE] = {
[KEY_SYM_ENTER & 0xff] = KEY_ENTER,
[KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
[KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
@@ -81,7 +80,8 @@ static unsigned short xlate_high[XLATE_SIZE] = {
[KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
[KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
};
-static unsigned short xlate[XLATE_SIZE] = {
+
+static const unsigned short xlate[XLATE_SIZE] = {
[NO_KEYCODE] = KEY_RESERVED,
[KEY_SYM_SPACE] = KEY_SPACE,
[KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE,
@@ -133,19 +133,16 @@ static unsigned short xlate[XLATE_SIZE] = {
[KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z,
};
-static char remote_mouse_name[] = "ibmasm RSA I remote mouse";
-static char remote_keybd_name[] = "ibmasm RSA I remote keyboard";
-
static void print_input(struct remote_input *input)
{
if (input->type == INPUT_TYPE_MOUSE) {
unsigned char buttons = input->mouse_buttons;
dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
input->data.mouse.x, input->data.mouse.y,
- (buttons)?" -- buttons:":"",
- (buttons & REMOTE_BUTTON_LEFT)?"left ":"",
- (buttons & REMOTE_BUTTON_MIDDLE)?"middle ":"",
- (buttons & REMOTE_BUTTON_RIGHT)?"right":""
+ (buttons) ? " -- buttons:" : "",
+ (buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
+ (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
+ (buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
);
} else {
dbg("remote keypress (code, flag, down):"
@@ -180,7 +177,7 @@ static void send_keyboard_event(struct input_dev *dev,
key = xlate_high[code & 0xff];
else
key = xlate[code];
- input_report_key(dev, key, (input->data.keyboard.key_down) ? 1 : 0);
+ input_report_key(dev, key, input->data.keyboard.key_down);
input_sync(dev);
}
@@ -228,20 +225,22 @@ int ibmasm_init_remote_input_dev(struct service_processor *sp)
mouse_dev->id.vendor = pdev->vendor;
mouse_dev->id.product = pdev->device;
mouse_dev->id.version = 1;
+ mouse_dev->dev.parent = sp->dev;
mouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) |
BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
set_bit(BTN_TOUCH, mouse_dev->keybit);
- mouse_dev->name = remote_mouse_name;
- input_set_abs_params(mouse_dev, ABS_X, 0, xmax, 0, 0);
- input_set_abs_params(mouse_dev, ABS_Y, 0, ymax, 0, 0);
+ mouse_dev->name = "ibmasm RSA I remote mouse";
+ input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
+ input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
- mouse_dev->id.bustype = BUS_PCI;
+ keybd_dev->id.bustype = BUS_PCI;
keybd_dev->id.vendor = pdev->vendor;
keybd_dev->id.product = pdev->device;
- mouse_dev->id.version = 2;
+ keybd_dev->id.version = 2;
+ keybd_dev->dev.parent = sp->dev;
keybd_dev->evbit[0] = BIT(EV_KEY);
- keybd_dev->name = remote_keybd_name;
+ keybd_dev->name = "ibmasm RSA I remote keyboard";
for (i = 0; i < XLATE_SIZE; i++) {
if (xlate_high[i])
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index b7076a8442d2..72acf5af7a2a 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
* Orignally written by Pete Reynolds
*/
@@ -73,7 +73,7 @@ struct keyboard_input {
-struct remote_input {
+struct remote_input {
union {
struct mouse_input mouse;
struct keyboard_input keyboard;
@@ -85,7 +85,7 @@ struct remote_input {
unsigned char pad3;
};
-#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
+#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX)
#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY)
#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS)
@@ -93,7 +93,7 @@ struct remote_input {
#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS)
#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
-#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
+#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 9783caf49696..93baa350d698 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asböck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index 41e901f53e7c..932a415197b3 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -23,6 +23,8 @@
* msi-laptop.c - MSI S270 laptop support. This laptop is sold under
* various brands, including "Cytron/TCM/Medion/Tchibo MD96100".
*
+ * Driver also supports S271, S420 models.
+ *
* This driver exports a few files in /sys/devices/platform/msi-laptop-pf/:
*
* lcd_level - Screen brightness: contains a single integer in the
@@ -281,25 +283,56 @@ static struct platform_device *msipf_device;
/* Initialization */
+static int dmi_check_cb(struct dmi_system_id *id)
+{
+ printk("msi-laptop: Identified laptop model '%s'.\n", id->ident);
+ return 0;
+}
+
static struct dmi_system_id __initdata msi_dmi_table[] = {
{
.ident = "MSI S270",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
- }
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT'L CO.,LTD")
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI S271",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1058"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0581"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1058")
+ },
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI S420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1412"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "MS-1412")
+ },
+ .callback = dmi_check_cb
},
{
.ident = "Medion MD96100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
- }
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT'L CO.,LTD")
+ },
+ .callback = dmi_check_cb
},
{ }
};
-
static int __init msi_init(void)
{
int ret;
@@ -394,3 +427,8 @@ MODULE_AUTHOR("Lennart Poettering");
MODULE_DESCRIPTION("MSI Laptop Support");
MODULE_VERSION(MSI_DRIVER_VERSION);
MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("dmi:*:svnMICRO-STARINT'LCO.,LTD:pnMS-1013:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
+MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1058:pvr0581:rvnMSI:rnMS-1058:*:ct10:*");
+MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1412:*:rvnMSI:rnMS-1412:*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
+MODULE_ALIAS("dmi:*:svnNOTEBOOK:pnSAM2000:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index 8ee0321ef1c8..303e48ca0e8a 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -142,43 +142,124 @@ struct sony_laptop_keypress {
int key;
};
-/* Correspondance table between sonypi events and input layer events */
-static struct {
- int sonypiev;
- int inputev;
-} sony_laptop_inputkeys[] = {
- { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA },
- { SONYPI_EVENT_FNKEY_ONLY, KEY_FN },
- { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC },
- { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 },
- { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 },
- { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 },
- { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 },
- { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 },
- { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 },
- { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 },
- { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 },
- { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 },
- { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 },
- { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 },
- { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 },
- { SONYPI_EVENT_FNKEY_1, KEY_FN_1 },
- { SONYPI_EVENT_FNKEY_2, KEY_FN_2 },
- { SONYPI_EVENT_FNKEY_D, KEY_FN_D },
- { SONYPI_EVENT_FNKEY_E, KEY_FN_E },
- { SONYPI_EVENT_FNKEY_F, KEY_FN_F },
- { SONYPI_EVENT_FNKEY_S, KEY_FN_S },
- { SONYPI_EVENT_FNKEY_B, KEY_FN_B },
- { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE },
- { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE },
- { SONYPI_EVENT_PKEY_P1, KEY_PROG1 },
- { SONYPI_EVENT_PKEY_P2, KEY_PROG2 },
- { SONYPI_EVENT_PKEY_P3, KEY_PROG3 },
- { SONYPI_EVENT_BACK_PRESSED, KEY_BACK },
- { SONYPI_EVENT_HELP_PRESSED, KEY_HELP },
- { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM },
- { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB },
- { 0, 0 },
+/* Correspondance table between sonypi events
+ * and input layer indexes in the keymap
+ */
+static int sony_laptop_input_index[] = {
+ -1, /* no event */
+ -1, /* SONYPI_EVENT_JOGDIAL_DOWN */
+ -1, /* SONYPI_EVENT_JOGDIAL_UP */
+ -1, /* SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
+ -1, /* SONYPI_EVENT_JOGDIAL_UP_PRESSED */
+ -1, /* SONYPI_EVENT_JOGDIAL_PRESSED */
+ -1, /* SONYPI_EVENT_JOGDIAL_RELEASED */
+ 0, /* SONYPI_EVENT_CAPTURE_PRESSED */
+ 1, /* SONYPI_EVENT_CAPTURE_RELEASED */
+ 2, /* SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+ 3, /* SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+ 4, /* SONYPI_EVENT_FNKEY_ESC */
+ 5, /* SONYPI_EVENT_FNKEY_F1 */
+ 6, /* SONYPI_EVENT_FNKEY_F2 */
+ 7, /* SONYPI_EVENT_FNKEY_F3 */
+ 8, /* SONYPI_EVENT_FNKEY_F4 */
+ 9, /* SONYPI_EVENT_FNKEY_F5 */
+ 10, /* SONYPI_EVENT_FNKEY_F6 */
+ 11, /* SONYPI_EVENT_FNKEY_F7 */
+ 12, /* SONYPI_EVENT_FNKEY_F8 */
+ 13, /* SONYPI_EVENT_FNKEY_F9 */
+ 14, /* SONYPI_EVENT_FNKEY_F10 */
+ 15, /* SONYPI_EVENT_FNKEY_F11 */
+ 16, /* SONYPI_EVENT_FNKEY_F12 */
+ 17, /* SONYPI_EVENT_FNKEY_1 */
+ 18, /* SONYPI_EVENT_FNKEY_2 */
+ 19, /* SONYPI_EVENT_FNKEY_D */
+ 20, /* SONYPI_EVENT_FNKEY_E */
+ 21, /* SONYPI_EVENT_FNKEY_F */
+ 22, /* SONYPI_EVENT_FNKEY_S */
+ 23, /* SONYPI_EVENT_FNKEY_B */
+ 24, /* SONYPI_EVENT_BLUETOOTH_PRESSED */
+ 25, /* SONYPI_EVENT_PKEY_P1 */
+ 26, /* SONYPI_EVENT_PKEY_P2 */
+ 27, /* SONYPI_EVENT_PKEY_P3 */
+ 28, /* SONYPI_EVENT_BACK_PRESSED */
+ -1, /* SONYPI_EVENT_LID_CLOSED */
+ -1, /* SONYPI_EVENT_LID_OPENED */
+ 29, /* SONYPI_EVENT_BLUETOOTH_ON */
+ 30, /* SONYPI_EVENT_BLUETOOTH_OFF */
+ 31, /* SONYPI_EVENT_HELP_PRESSED */
+ 32, /* SONYPI_EVENT_FNKEY_ONLY */
+ 33, /* SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+ 34, /* SONYPI_EVENT_JOGDIAL_FAST_UP */
+ 35, /* SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+ 36, /* SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+ 37, /* SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+ 38, /* SONYPI_EVENT_JOGDIAL_VFAST_UP */
+ 39, /* SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+ 40, /* SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+ 41, /* SONYPI_EVENT_ZOOM_PRESSED */
+ 42, /* SONYPI_EVENT_THUMBPHRASE_PRESSED */
+ 43, /* SONYPI_EVENT_MEYE_FACE */
+ 44, /* SONYPI_EVENT_MEYE_OPPOSITE */
+ 45, /* SONYPI_EVENT_MEMORYSTICK_INSERT */
+ 46, /* SONYPI_EVENT_MEMORYSTICK_EJECT */
+ -1, /* SONYPI_EVENT_ANYBUTTON_RELEASED */
+ -1, /* SONYPI_EVENT_BATTERY_INSERT */
+ -1, /* SONYPI_EVENT_BATTERY_REMOVE */
+ -1, /* SONYPI_EVENT_FNKEY_RELEASED */
+ 47, /* SONYPI_EVENT_WIRELESS_ON */
+ 48, /* SONYPI_EVENT_WIRELESS_OFF */
+};
+
+static int sony_laptop_input_keycode_map[] = {
+ KEY_CAMERA, /* 0 SONYPI_EVENT_CAPTURE_PRESSED */
+ KEY_RESERVED, /* 1 SONYPI_EVENT_CAPTURE_RELEASED */
+ KEY_RESERVED, /* 2 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+ KEY_RESERVED, /* 3 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+ KEY_FN_ESC, /* 4 SONYPI_EVENT_FNKEY_ESC */
+ KEY_FN_F1, /* 5 SONYPI_EVENT_FNKEY_F1 */
+ KEY_FN_F2, /* 6 SONYPI_EVENT_FNKEY_F2 */
+ KEY_FN_F3, /* 7 SONYPI_EVENT_FNKEY_F3 */
+ KEY_FN_F4, /* 8 SONYPI_EVENT_FNKEY_F4 */
+ KEY_FN_F5, /* 9 SONYPI_EVENT_FNKEY_F5 */
+ KEY_FN_F6, /* 10 SONYPI_EVENT_FNKEY_F6 */
+ KEY_FN_F7, /* 11 SONYPI_EVENT_FNKEY_F7 */
+ KEY_FN_F8, /* 12 SONYPI_EVENT_FNKEY_F8 */
+ KEY_FN_F9, /* 13 SONYPI_EVENT_FNKEY_F9 */
+ KEY_FN_F10, /* 14 SONYPI_EVENT_FNKEY_F10 */
+ KEY_FN_F11, /* 15 SONYPI_EVENT_FNKEY_F11 */
+ KEY_FN_F12, /* 16 SONYPI_EVENT_FNKEY_F12 */
+ KEY_FN_F1, /* 17 SONYPI_EVENT_FNKEY_1 */
+ KEY_FN_F2, /* 18 SONYPI_EVENT_FNKEY_2 */
+ KEY_FN_D, /* 19 SONYPI_EVENT_FNKEY_D */
+ KEY_FN_E, /* 20 SONYPI_EVENT_FNKEY_E */
+ KEY_FN_F, /* 21 SONYPI_EVENT_FNKEY_F */
+ KEY_FN_S, /* 22 SONYPI_EVENT_FNKEY_S */
+ KEY_FN_B, /* 23 SONYPI_EVENT_FNKEY_B */
+ KEY_BLUETOOTH, /* 24 SONYPI_EVENT_BLUETOOTH_PRESSED */
+ KEY_PROG1, /* 25 SONYPI_EVENT_PKEY_P1 */
+ KEY_PROG2, /* 26 SONYPI_EVENT_PKEY_P2 */
+ KEY_PROG3, /* 27 SONYPI_EVENT_PKEY_P3 */
+ KEY_BACK, /* 28 SONYPI_EVENT_BACK_PRESSED */
+ KEY_BLUETOOTH, /* 29 SONYPI_EVENT_BLUETOOTH_ON */
+ KEY_BLUETOOTH, /* 30 SONYPI_EVENT_BLUETOOTH_OFF */
+ KEY_HELP, /* 31 SONYPI_EVENT_HELP_PRESSED */
+ KEY_FN, /* 32 SONYPI_EVENT_FNKEY_ONLY */
+ KEY_RESERVED, /* 33 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+ KEY_RESERVED, /* 34 SONYPI_EVENT_JOGDIAL_FAST_UP */
+ KEY_RESERVED, /* 35 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+ KEY_RESERVED, /* 36 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+ KEY_RESERVED, /* 37 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+ KEY_RESERVED, /* 38 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+ KEY_RESERVED, /* 39 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+ KEY_RESERVED, /* 40 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+ KEY_ZOOM, /* 41 SONYPI_EVENT_ZOOM_PRESSED */
+ BTN_THUMB, /* 42 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+ KEY_RESERVED, /* 43 SONYPI_EVENT_MEYE_FACE */
+ KEY_RESERVED, /* 44 SONYPI_EVENT_MEYE_OPPOSITE */
+ KEY_RESERVED, /* 45 SONYPI_EVENT_MEMORYSTICK_INSERT */
+ KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
+ KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */
+ KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */
};
/* release buttons after a short delay if pressed */
@@ -202,7 +283,6 @@ static void sony_laptop_report_input_event(u8 event)
struct input_dev *jog_dev = sony_laptop_input.jog_dev;
struct input_dev *key_dev = sony_laptop_input.key_dev;
struct sony_laptop_keypress kp = { NULL };
- int i;
if (event == SONYPI_EVENT_FNKEY_RELEASED) {
/* Nothing, not all VAIOs generate this event */
@@ -231,17 +311,22 @@ static void sony_laptop_report_input_event(u8 event)
break;
default:
- for (i = 0; sony_laptop_inputkeys[i].sonypiev; i++)
- if (event == sony_laptop_inputkeys[i].sonypiev) {
+ if (event > ARRAY_SIZE (sony_laptop_input_keycode_map)) {
+ dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
+ break;
+ }
+ if (sony_laptop_input_index[event] != -1) {
+ kp.key = sony_laptop_input_keycode_map[sony_laptop_input_index[event]];
+ if (kp.key != KEY_UNKNOWN)
kp.dev = key_dev;
- kp.key = sony_laptop_inputkeys[i].inputev;
- break;
- }
+ }
break;
}
if (kp.dev) {
input_report_key(kp.dev, kp.key, 1);
+ /* we emit the scancode so we can always remap the key */
+ input_event(kp.dev, EV_MSC, MSC_SCAN, event);
input_sync(kp.dev);
kfifo_put(sony_laptop_input.fifo,
(unsigned char *)&kp, sizeof(kp));
@@ -296,11 +381,18 @@ static int sony_laptop_setup_input(void)
key_dev->id.vendor = PCI_VENDOR_ID_SONY;
/* Initialize the Input Drivers: special keys */
- key_dev->evbit[0] = BIT(EV_KEY);
- for (i = 0; sony_laptop_inputkeys[i].sonypiev; i++)
- if (sony_laptop_inputkeys[i].inputev)
- set_bit(sony_laptop_inputkeys[i].inputev,
- key_dev->keybit);
+ set_bit(EV_KEY, key_dev->evbit);
+ set_bit(EV_MSC, key_dev->evbit);
+ set_bit(MSC_SCAN, key_dev->mscbit);
+ key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]);
+ key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map);
+ key_dev->keycode = &sony_laptop_input_keycode_map;
+ for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) {
+ if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) {
+ set_bit(sony_laptop_input_keycode_map[i],
+ key_dev->keybit);
+ }
+ }
error = input_register_device(key_dev);
if (error)
@@ -487,6 +579,14 @@ SNC_HANDLE_NAMES(audiopower_set, "AZPW");
SNC_HANDLE_NAMES(lanpower_get, "GLNP");
SNC_HANDLE_NAMES(lanpower_set, "LNPW");
+SNC_HANDLE_NAMES(lidstate_get, "GLID");
+
+SNC_HANDLE_NAMES(indicatorlamp_get, "GILS");
+SNC_HANDLE_NAMES(indicatorlamp_set, "SILS");
+
+SNC_HANDLE_NAMES(gainbass_get, "GMGB");
+SNC_HANDLE_NAMES(gainbass_set, "CMGB");
+
SNC_HANDLE_NAMES(PID_get, "GPID");
SNC_HANDLE_NAMES(CTR_get, "GCTR");
@@ -507,6 +607,12 @@ static struct sony_nc_value sony_nc_values[] = {
boolean_validate, 0),
SNC_HANDLE(lanpower, snc_lanpower_get, snc_lanpower_set,
boolean_validate, 1),
+ SNC_HANDLE(lidstate, snc_lidstate_get, NULL,
+ boolean_validate, 0),
+ SNC_HANDLE(indicatorlamp, snc_indicatorlamp_get, snc_indicatorlamp_set,
+ boolean_validate, 0),
+ SNC_HANDLE(gainbass, snc_gainbass_get, snc_gainbass_set,
+ boolean_validate, 0),
/* unknown methods */
SNC_HANDLE(PID, snc_PID_get, NULL, NULL, 1),
SNC_HANDLE(CTR, snc_CTR_get, snc_CTR_set, NULL, 1),
@@ -689,13 +795,116 @@ static struct backlight_ops sony_backlight_ops = {
};
/*
+ * New SNC-only Vaios event mapping to driver known keys
+ */
+struct sony_nc_event {
+ u8 data;
+ u8 event;
+};
+
+static struct sony_nc_event *sony_nc_events;
+
+/* Vaio C* --maybe also FE*, N* and AR* ?-- special init sequence
+ * for Fn keys
+ */
+static int sony_nc_C_enable(struct dmi_system_id *id)
+{
+ int result = 0;
+
+ printk(KERN_NOTICE DRV_PFX "detected %s\n", id->ident);
+
+ sony_nc_events = id->driver_data;
+
+ if (acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x4, &result) < 0
+ || acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x2, &result) < 0
+ || acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0x10, &result) < 0
+ || acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x0, &result) < 0
+ || acpi_callsetfunc(sony_nc_acpi_handle, "SN03", 0x2, &result) < 0
+ || acpi_callsetfunc(sony_nc_acpi_handle, "SN07", 0x101, &result) < 0) {
+ printk(KERN_WARNING DRV_PFX "failed to initialize SNC, some "
+ "functionalities may be missing\n");
+ return 1;
+ }
+ return 0;
+}
+
+static struct sony_nc_event sony_C_events[] = {
+ { 0x81, SONYPI_EVENT_FNKEY_F1 },
+ { 0x01, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x85, SONYPI_EVENT_FNKEY_F5 },
+ { 0x05, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x86, SONYPI_EVENT_FNKEY_F6 },
+ { 0x06, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x87, SONYPI_EVENT_FNKEY_F7 },
+ { 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8A, SONYPI_EVENT_FNKEY_F10 },
+ { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8C, SONYPI_EVENT_FNKEY_F12 },
+ { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0, 0 },
+};
+
+/* SNC-only model map */
+struct dmi_system_id sony_nc_ids[] = {
+ {
+ .ident = "Sony Vaio FE Series",
+ .callback = sony_nc_C_enable,
+ .driver_data = sony_C_events,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FE"),
+ },
+ },
+ {
+ .ident = "Sony Vaio C Series",
+ .callback = sony_nc_C_enable,
+ .driver_data = sony_C_events,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-C"),
+ },
+ },
+ { }
+};
+
+/*
* ACPI callbacks
*/
static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
{
- dprintk("sony_acpi_notify, event: %d\n", event);
- sony_laptop_report_input_event(event);
- acpi_bus_generate_event(sony_nc_acpi_device, 1, event);
+ struct sony_nc_event *evmap;
+ u32 ev = event;
+ int result;
+
+ if (ev == 0x92) {
+ /* read the key pressed from EC.GECR
+ * A call to SN07 with 0x0202 will do it as well respecting
+ * the current protocol on different OSes
+ *
+ * Note: the path for GECR may be
+ * \_SB.PCI0.LPCB.EC (C, FE, AR, N and friends)
+ * \_SB.PCI0.PIB.EC0 (VGN-FR notifications are sent directly, no GECR)
+ *
+ * TODO: we may want to do the same for the older GHKE -need
+ * dmi list- so this snippet may become one more callback.
+ */
+ if (acpi_callsetfunc(handle, "SN07", 0x0202, &result) < 0)
+ dprintk("sony_acpi_notify, unable to decode event 0x%.2x\n", ev);
+ else
+ ev = result & 0xFF;
+ }
+
+ if (sony_nc_events)
+ for (evmap = sony_nc_events; evmap->event; evmap++) {
+ if (evmap->data == ev) {
+ ev = evmap->event;
+ break;
+ }
+ }
+
+ dprintk("sony_acpi_notify, event: 0x%.2x\n", ev);
+ sony_laptop_report_input_event(ev);
+ acpi_bus_generate_event(sony_nc_acpi_device, 1, ev);
}
static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -732,6 +941,10 @@ static int sony_nc_resume(struct acpi_device *device)
break;
}
}
+
+ /* re-initialize models with specific requirements */
+ dmi_check_system(sony_nc_ids);
+
return 0;
}
@@ -750,6 +963,15 @@ static int sony_nc_add(struct acpi_device *device)
sony_nc_acpi_handle = device->handle;
+ /* read device status */
+ result = acpi_bus_get_status(device);
+ /* bail IFF the above call was successful and the device is not present */
+ if (!result && !device->status.present) {
+ dprintk("Device not present\n");
+ result = -ENODEV;
+ goto outwalk;
+ }
+
if (debug) {
status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_nc_acpi_handle,
1, sony_walk_callback, NULL, NULL);
@@ -760,6 +982,15 @@ static int sony_nc_add(struct acpi_device *device)
}
}
+ /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
+ * should be respected as we already checked for the device presence above */
+ if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
+ dprintk("Invoking _INI\n");
+ if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
+ NULL, NULL)))
+ dprintk("_INI Method failed\n");
+ }
+
/* setup input devices and helper fifo */
result = sony_laptop_setup_input();
if (result) {
@@ -772,7 +1003,7 @@ static int sony_nc_add(struct acpi_device *device)
ACPI_DEVICE_NOTIFY,
sony_acpi_notify, NULL);
if (ACPI_FAILURE(status)) {
- printk(KERN_WARNING DRV_PFX "unable to install notify handler\n");
+ printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status);
result = -ENODEV;
goto outinput;
}
@@ -795,6 +1026,9 @@ static int sony_nc_add(struct acpi_device *device)
}
+ /* initialize models with specific requirements */
+ dmi_check_system(sony_nc_ids);
+
result = sony_pf_add();
if (result)
goto outbacklight;
@@ -908,7 +1142,9 @@ static struct acpi_driver sony_nc_driver = {
#define SONYPI_DEVICE_TYPE2 0x00000002
#define SONYPI_DEVICE_TYPE3 0x00000004
-#define SONY_PIC_EV_MASK 0xff
+#define SONYPI_TYPE1_OFFSET 0x04
+#define SONYPI_TYPE2_OFFSET 0x12
+#define SONYPI_TYPE3_OFFSET 0x12
struct sony_pic_ioport {
struct acpi_resource_io io;
@@ -922,6 +1158,7 @@ struct sony_pic_irq {
struct sony_pic_dev {
int model;
+ u16 evport_offset;
u8 camera_power;
u8 bluetooth_power;
u8 wwan_power;
@@ -1917,7 +2154,8 @@ end:
*/
static int sony_pic_disable(struct acpi_device *device)
{
- if (ACPI_FAILURE(acpi_evaluate_object(device->handle, "_DIS", 0, NULL)))
+ if (ACPI_FAILURE(acpi_evaluate_object(device->handle,
+ "_DIS", NULL, NULL)))
return -ENXIO;
dprintk("Device disabled\n");
@@ -1998,20 +2236,17 @@ end:
static irqreturn_t sony_pic_irq(int irq, void *dev_id)
{
int i, j;
- u32 port_val = 0;
u8 ev = 0;
u8 data_mask = 0;
u8 device_event = 0;
struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
- acpi_os_read_port(dev->cur_ioport->io.minimum, &port_val,
- dev->cur_ioport->io.address_length);
- ev = port_val & SONY_PIC_EV_MASK;
- data_mask = 0xff & (port_val >> (dev->cur_ioport->io.address_length - 8));
+ ev = inb_p(dev->cur_ioport->io.minimum);
+ data_mask = inb_p(dev->cur_ioport->io.minimum + dev->evport_offset);
- dprintk("event (0x%.8x [%.2x] [%.2x]) at port 0x%.4x\n",
- port_val, ev, data_mask, dev->cur_ioport->io.minimum);
+ dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+ ev, data_mask, dev->cur_ioport->io.minimum, dev->evport_offset);
if (ev == 0x00 || ev == 0xff)
return IRQ_HANDLED;
@@ -2102,6 +2337,20 @@ static int sony_pic_add(struct acpi_device *device)
spic_dev.model = sony_pic_detect_device_type();
mutex_init(&spic_dev.lock);
+ /* model specific characteristics */
+ switch(spic_dev.model) {
+ case SONYPI_DEVICE_TYPE1:
+ spic_dev.evport_offset = SONYPI_TYPE1_OFFSET;
+ break;
+ case SONYPI_DEVICE_TYPE3:
+ spic_dev.evport_offset = SONYPI_TYPE3_OFFSET;
+ break;
+ case SONYPI_DEVICE_TYPE2:
+ default:
+ spic_dev.evport_offset = SONYPI_TYPE2_OFFSET;
+ break;
+ }
+
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 95c0b96e83f2..f15a58f7403f 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -21,8 +21,8 @@
* 02110-1301, USA.
*/
-#define IBM_VERSION "0.14"
-#define TPACPI_SYSFS_VERSION 0x000100
+#define IBM_VERSION "0.15"
+#define TPACPI_SYSFS_VERSION 0x010000
/*
* Changelog:
@@ -92,6 +92,29 @@ MODULE_LICENSE("GPL");
/* Please remove this in year 2009 */
MODULE_ALIAS("ibm_acpi");
+/*
+ * DMI matching for module autoloading
+ *
+ * See http://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ *
+ * Only models listed in thinkwiki will be supported, so add yours
+ * if it is not there yet.
+ */
+#define IBM_BIOS_MODULE_ALIAS(__type) \
+ MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
+
+/* Non-ancient thinkpads */
+MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
+MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
+
+/* Ancient thinkpad BIOSes have to be identified by
+ * BIOS type or model number, and there are far less
+ * BIOS types than model numbers... */
+IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
+IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
+IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
+
#define __unused __attribute__ ((unused))
/****************************************************************************
@@ -106,7 +129,7 @@ MODULE_ALIAS("ibm_acpi");
* ACPI basic handles
*/
-static acpi_handle root_handle = NULL;
+static acpi_handle root_handle;
#define IBM_HANDLE(object, parent, paths...) \
static acpi_handle object##_handle; \
@@ -487,19 +510,36 @@ static char *next_cmd(char **cmds)
/****************************************************************************
****************************************************************************
*
- * Device model: hwmon and platform
+ * Device model: input, hwmon and platform
*
****************************************************************************
****************************************************************************/
-static struct platform_device *tpacpi_pdev = NULL;
-static struct class_device *tpacpi_hwmon = NULL;
+static struct platform_device *tpacpi_pdev;
+static struct class_device *tpacpi_hwmon;
+static struct input_dev *tpacpi_inputdev;
+
+
+static int tpacpi_resume_handler(struct platform_device *pdev)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ if (ibm->resume)
+ (ibm->resume)();
+ }
+
+ return 0;
+}
static struct platform_driver tpacpi_pdriver = {
.driver = {
.name = IBM_DRVR_NAME,
.owner = THIS_MODULE,
},
+ .resume = tpacpi_resume_handler,
};
@@ -677,9 +717,19 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
printk(IBM_INFO "%s v%s\n", IBM_DESC, IBM_VERSION);
printk(IBM_INFO "%s\n", IBM_URL);
- if (ibm_thinkpad_ec_found)
- printk(IBM_INFO "ThinkPad EC firmware %s\n",
- ibm_thinkpad_ec_found);
+ printk(IBM_INFO "ThinkPad BIOS %s, EC %s\n",
+ (thinkpad_id.bios_version_str) ?
+ thinkpad_id.bios_version_str : "unknown",
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "unknown");
+
+ if (thinkpad_id.vendor && thinkpad_id.model_str)
+ printk(IBM_INFO "%s %s\n",
+ (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
+ "IBM" : ((thinkpad_id.vendor ==
+ PCI_VENDOR_ID_LENOVO) ?
+ "Lenovo" : "Unknown vendor"),
+ thinkpad_id.model_str);
return 0;
}
@@ -704,16 +754,28 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
*/
static int hotkey_orig_status;
-static int hotkey_orig_mask;
+static u32 hotkey_orig_mask;
+static u32 hotkey_all_mask;
+static u32 hotkey_reserved_mask;
+
+static u16 *hotkey_keycode_map;
-static struct attribute_set *hotkey_dev_attributes = NULL;
+static struct attribute_set *hotkey_dev_attributes;
+
+static int hotkey_get_wlsw(int *status)
+{
+ if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
+ return -EIO;
+ return 0;
+}
/* sysfs hotkey enable ------------------------------------------------- */
static ssize_t hotkey_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int res, status, mask;
+ int res, status;
+ u32 mask;
res = hotkey_get(&status, &mask);
if (res)
@@ -727,7 +789,8 @@ static ssize_t hotkey_enable_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
- int res, status, mask;
+ int res, status;
+ u32 mask;
if (parse_strtoul(buf, 1, &t))
return -EINVAL;
@@ -748,13 +811,14 @@ static ssize_t hotkey_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int res, status, mask;
+ int res, status;
+ u32 mask;
res = hotkey_get(&status, &mask);
if (res)
return res;
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", mask);
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", mask);
}
static ssize_t hotkey_mask_store(struct device *dev,
@@ -762,9 +826,10 @@ static ssize_t hotkey_mask_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
- int res, status, mask;
+ int res, status;
+ u32 mask;
- if (parse_strtoul(buf, 0xffff, &t))
+ if (parse_strtoul(buf, 0xffffffffUL, &t))
return -EINVAL;
res = hotkey_get(&status, &mask);
@@ -794,26 +859,123 @@ static ssize_t hotkey_bios_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", hotkey_orig_mask);
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
}
static struct device_attribute dev_attr_hotkey_bios_mask =
__ATTR(hotkey_bios_mask, S_IRUGO, hotkey_bios_mask_show, NULL);
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_all_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_all_mask);
+}
+
+static struct device_attribute dev_attr_hotkey_all_mask =
+ __ATTR(hotkey_all_mask, S_IRUGO, hotkey_all_mask_show, NULL);
+
+/* sysfs hotkey recommended_mask --------------------------------------- */
+static ssize_t hotkey_recommended_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ hotkey_all_mask & ~hotkey_reserved_mask);
+}
+
+static struct device_attribute dev_attr_hotkey_recommended_mask =
+ __ATTR(hotkey_recommended_mask, S_IRUGO,
+ hotkey_recommended_mask_show, NULL);
+
+/* sysfs hotkey radio_sw ----------------------------------------------- */
+static ssize_t hotkey_radio_sw_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res, s;
+ res = hotkey_get_wlsw(&s);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
+}
+
+static struct device_attribute dev_attr_hotkey_radio_sw =
+ __ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
+
/* --------------------------------------------------------------------- */
static struct attribute *hotkey_mask_attributes[] = {
&dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_bios_mask.attr,
+ &dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_recommended_mask.attr,
};
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
- int res;
+
+ static u16 ibm_keycode_map[] __initdata = {
+ /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
+ KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP,
+ KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+ KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
+ /* Scan codes 0x0C to 0x0F: Other ACPI HKEY hot keys */
+ KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
+ KEY_UNKNOWN, /* 0x0D: FN+INSERT */
+ KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+ KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
+ /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
+ KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+ KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+ KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
+ KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+ KEY_RESERVED, /* 0x14: VOLUME UP */
+ KEY_RESERVED, /* 0x15: VOLUME DOWN */
+ KEY_RESERVED, /* 0x16: MUTE */
+ KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ };
+ static u16 lenovo_keycode_map[] __initdata = {
+ /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
+ KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
+ KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
+ KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
+ /* Scan codes 0x0C to 0x0F: Other ACPI HKEY hot keys */
+ KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
+ KEY_UNKNOWN, /* 0x0D: FN+INSERT */
+ KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+ KEY_BRIGHTNESSUP, /* 0x0F: FN+HOME (brightness up) */
+ /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
+ KEY_BRIGHTNESSDOWN, /* 0x10: FN+END (brightness down) */
+ KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+ KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
+ KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+ KEY_RESERVED, /* 0x14: VOLUME UP */
+ KEY_RESERVED, /* 0x15: VOLUME DOWN */
+ KEY_RESERVED, /* 0x16: MUTE */
+ KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ };
+
+#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map)
+#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map)
+#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0])
+
+ int res, i;
+ int status;
vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n");
+ BUG_ON(!tpacpi_inputdev);
+
IBM_ACPIHANDLE_INIT(hkey);
mutex_init(&hotkey_mutex);
@@ -824,7 +986,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
str_supported(tp_features.hotkey));
if (tp_features.hotkey) {
- hotkey_dev_attributes = create_attr_set(4, NULL);
+ hotkey_dev_attributes = create_attr_set(7, NULL);
if (!hotkey_dev_attributes)
return -ENOMEM;
res = add_to_attr_set(hotkey_dev_attributes,
@@ -840,19 +1002,92 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n",
str_supported(tp_features.hotkey_mask));
+ if (tp_features.hotkey_mask) {
+ /* MHKA available in A31, R40, R40e, T4x, X31, and later */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "qd"))
+ hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
+ }
+
res = hotkey_get(&hotkey_orig_status, &hotkey_orig_mask);
if (!res && tp_features.hotkey_mask) {
res = add_many_to_attr_set(hotkey_dev_attributes,
hotkey_mask_attributes,
ARRAY_SIZE(hotkey_mask_attributes));
}
+
+ /* Not all thinkpads have a hardware radio switch */
+ if (!res && acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
+ tp_features.hotkey_wlsw = 1;
+ printk(IBM_INFO
+ "radio switch found; radios are %s\n",
+ enabled(status, 0));
+ res = add_to_attr_set(hotkey_dev_attributes,
+ &dev_attr_hotkey_radio_sw.attr);
+ }
+
if (!res)
res = register_attr_set_with_sysfs(
hotkey_dev_attributes,
&tpacpi_pdev->dev.kobj);
+ if (res)
+ return res;
+
+ /* Set up key map */
+
+ hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
+ GFP_KERNEL);
+ if (!hotkey_keycode_map) {
+ printk(IBM_ERR "failed to allocate memory for key map\n");
+ return -ENOMEM;
+ }
+
+ if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) {
+ dbg_printk(TPACPI_DBG_INIT,
+ "using Lenovo default hot key map\n");
+ memcpy(hotkey_keycode_map, &lenovo_keycode_map,
+ TPACPI_HOTKEY_MAP_SIZE);
+ } else {
+ dbg_printk(TPACPI_DBG_INIT,
+ "using IBM default hot key map\n");
+ memcpy(hotkey_keycode_map, &ibm_keycode_map,
+ TPACPI_HOTKEY_MAP_SIZE);
+ }
+#ifndef CONFIG_THINKPAD_ACPI_INPUT_ENABLED
+ for (i = 0; i < 12; i++)
+ hotkey_keycode_map[i] = KEY_UNKNOWN;
+#endif /* ! CONFIG_THINKPAD_ACPI_INPUT_ENABLED */
+
+ set_bit(EV_KEY, tpacpi_inputdev->evbit);
+ set_bit(EV_MSC, tpacpi_inputdev->evbit);
+ set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
+ tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
+ tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
+ tpacpi_inputdev->keycode = hotkey_keycode_map;
+ for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
+ if (hotkey_keycode_map[i] != KEY_RESERVED) {
+ set_bit(hotkey_keycode_map[i],
+ tpacpi_inputdev->keybit);
+ } else {
+ if (i < sizeof(hotkey_reserved_mask)*8)
+ hotkey_reserved_mask |= 1 << i;
+ }
+ }
+
+ if (tp_features.hotkey_wlsw) {
+ set_bit(EV_SW, tpacpi_inputdev->evbit);
+ set_bit(SW_RADIO, tpacpi_inputdev->swbit);
+ }
+
+#ifdef CONFIG_THINKPAD_ACPI_INPUT_ENABLED
+ dbg_printk(TPACPI_DBG_INIT,
+ "enabling hot key handling\n");
+ res = hotkey_set(1, (hotkey_all_mask & ~hotkey_reserved_mask)
+ | hotkey_orig_mask);
if (res)
return res;
+#endif /* CONFIG_THINKPAD_ACPI_INPUT_ENABLED */
}
return (tp_features.hotkey)? 0 : 1;
@@ -875,22 +1110,101 @@ static void hotkey_exit(void)
}
}
+static void tpacpi_input_send_key(unsigned int scancode,
+ unsigned int keycode)
+{
+ if (keycode != KEY_RESERVED) {
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ if (keycode == KEY_UNKNOWN)
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+ scancode);
+ input_sync(tpacpi_inputdev);
+
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ if (keycode == KEY_UNKNOWN)
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+ scancode);
+ input_sync(tpacpi_inputdev);
+ }
+}
+
+static void tpacpi_input_send_radiosw(void)
+{
+ int wlsw;
+
+ if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw))
+ input_report_switch(tpacpi_inputdev,
+ SW_RADIO, !!wlsw);
+}
+
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
{
- int hkey;
+ u32 hkey;
+ unsigned int keycode, scancode;
+ int sendacpi = 1;
+
+ if (event == 0x80 && acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
+ if (tpacpi_inputdev->users > 0) {
+ switch (hkey >> 12) {
+ case 1:
+ /* 0x1000-0x1FFF: key presses */
+ scancode = hkey & 0xfff;
+ if (scancode > 0 && scancode < 0x21) {
+ scancode--;
+ keycode = hotkey_keycode_map[scancode];
+ tpacpi_input_send_key(scancode, keycode);
+ sendacpi = (keycode == KEY_RESERVED
+ || keycode == KEY_UNKNOWN);
+ } else {
+ printk(IBM_ERR
+ "hotkey 0x%04x out of range for keyboard map\n",
+ hkey);
+ }
+ break;
+ case 5:
+ /* 0x5000-0x5FFF: LID */
+ /* we don't handle it through this path, just
+ * eat up known LID events */
+ if (hkey != 0x5001 && hkey != 0x5002) {
+ printk(IBM_ERR
+ "unknown LID-related hotkey event: 0x%04x\n",
+ hkey);
+ }
+ break;
+ case 7:
+ /* 0x7000-0x7FFF: misc */
+ if (tp_features.hotkey_wlsw && hkey == 0x7000) {
+ tpacpi_input_send_radiosw();
+ sendacpi = 0;
+ break;
+ }
+ /* fallthrough to default */
+ default:
+ /* case 2: dock-related */
+ /* 0x2305 - T43 waking up due to bay lever eject while aslept */
+ /* case 3: ultra-bay related. maybe bay in dock? */
+ /* 0x3003 - T43 after wake up by bay lever eject (0x2305) */
+ printk(IBM_NOTICE "unhandled hotkey event 0x%04x\n", hkey);
+ }
+ }
- if (acpi_evalf(hkey_handle, &hkey, "MHKP", "d"))
- acpi_bus_generate_event(ibm->acpi->device, event, hkey);
- else {
- printk(IBM_ERR "unknown hotkey event %d\n", event);
+ if (sendacpi)
+ acpi_bus_generate_event(ibm->acpi->device, event, hkey);
+ } else {
+ printk(IBM_ERR "unknown hotkey notification event %d\n", event);
acpi_bus_generate_event(ibm->acpi->device, event, 0);
}
}
+static void hotkey_resume(void)
+{
+ tpacpi_input_send_radiosw();
+}
+
/*
* Call with hotkey_mutex held
*/
-static int hotkey_get(int *status, int *mask)
+static int hotkey_get(int *status, u32 *mask)
{
if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
return -EIO;
@@ -905,7 +1219,7 @@ static int hotkey_get(int *status, int *mask)
/*
* Call with hotkey_mutex held
*/
-static int hotkey_set(int status, int mask)
+static int hotkey_set(int status, u32 mask)
{
int i;
@@ -926,7 +1240,8 @@ static int hotkey_set(int status, int mask)
/* procfs -------------------------------------------------------------- */
static int hotkey_read(char *p)
{
- int res, status, mask;
+ int res, status;
+ u32 mask;
int len = 0;
if (!tp_features.hotkey) {
@@ -944,7 +1259,7 @@ static int hotkey_read(char *p)
len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
if (tp_features.hotkey_mask) {
- len += sprintf(p + len, "mask:\t\t0x%04x\n", mask);
+ len += sprintf(p + len, "mask:\t\t0x%08x\n", mask);
len += sprintf(p + len,
"commands:\tenable, disable, reset, <mask>\n");
} else {
@@ -957,7 +1272,8 @@ static int hotkey_read(char *p)
static int hotkey_write(char *buf)
{
- int res, status, mask;
+ int res, status;
+ u32 mask;
char *cmd;
int do_cmd = 0;
@@ -1012,6 +1328,7 @@ static struct ibm_struct hotkey_driver_data = {
.read = hotkey_read,
.write = hotkey_write,
.exit = hotkey_exit,
+ .resume = hotkey_resume,
.acpi = &ibm_hotkey_acpidriver,
};
@@ -1770,7 +2087,10 @@ static struct tp_acpi_drv_struct ibm_dock_acpidriver[2] = {
.type = ACPI_SYSTEM_NOTIFY,
},
{
- .hid = IBM_PCI_HID,
+ /* THIS ONE MUST NEVER BE USED FOR DRIVER AUTOLOADING.
+ * We just use it to get notifications of dock hotplug
+ * in very old thinkpads */
+ .hid = PCI_ROOT_HID_STRING,
.notify = dock_notify,
.handle = &pci_handle,
.type = ACPI_SYSTEM_NOTIFY,
@@ -1829,7 +2149,7 @@ static int __init dock_init2(struct ibm_init_struct *iibm)
static void dock_notify(struct ibm_struct *ibm, u32 event)
{
int docked = dock_docked();
- int pci = ibm->acpi->hid && strstr(ibm->acpi->hid, IBM_PCI_HID);
+ int pci = ibm->acpi->hid && strstr(ibm->acpi->hid, PCI_ROOT_HID_STRING);
if (event == 1 && !pci) /* 570 */
acpi_bus_generate_event(ibm->acpi->device, event, 1); /* button */
@@ -2389,7 +2709,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
acpi_tmp7 = acpi_evalf(ec_handle, NULL, "TMP7", "qv");
- if (ibm_thinkpad_ec_found && experimental) {
+ if (thinkpad_id.ec_model) {
/*
* Direct EC access mode: sensors at registers
* 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for
@@ -2533,6 +2853,8 @@ static int thermal_get_sensor(int idx, s32 *value)
snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
return -EIO;
+ if (t > 127 || t < -127)
+ t = TP_EC_THERMAL_TMP_NA;
*value = t * 1000;
return 0;
}
@@ -2671,22 +2993,39 @@ static struct ibm_struct ecdump_driver_data = {
* Backlight/brightness subdriver
*/
-static struct backlight_device *ibm_backlight_device = NULL;
+static struct backlight_device *ibm_backlight_device;
static struct backlight_ops ibm_backlight_data = {
.get_brightness = brightness_get,
.update_status = brightness_update_status,
};
+static struct mutex brightness_mutex;
+
static int __init brightness_init(struct ibm_init_struct *iibm)
{
int b;
vdbg_printk(TPACPI_DBG_INIT, "initializing brightness subdriver\n");
+ mutex_init(&brightness_mutex);
+
+ if (!brightness_mode) {
+ if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO)
+ brightness_mode = 2;
+ else
+ brightness_mode = 3;
+
+ dbg_printk(TPACPI_DBG_INIT, "selected brightness_mode=%d\n",
+ brightness_mode);
+ }
+
+ if (brightness_mode > 3)
+ return -EINVAL;
+
b = brightness_get(NULL);
if (b < 0)
- return b;
+ return 1;
ibm_backlight_device = backlight_device_register(
TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
@@ -2722,34 +3061,79 @@ static int brightness_update_status(struct backlight_device *bd)
bd->props.brightness : 0);
}
+/*
+ * ThinkPads can read brightness from two places: EC 0x31, or
+ * CMOS NVRAM byte 0x5E, bits 0-3.
+ */
static int brightness_get(struct backlight_device *bd)
{
- u8 level;
- if (!acpi_ec_read(brightness_offset, &level))
- return -EIO;
+ u8 lec = 0, lcmos = 0, level = 0;
- level &= 0x7;
+ if (brightness_mode & 1) {
+ if (!acpi_ec_read(brightness_offset, &lec))
+ return -EIO;
+ lec &= 7;
+ level = lec;
+ };
+ if (brightness_mode & 2) {
+ lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
+ & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ level = lcmos;
+ }
+
+ if (brightness_mode == 3 && lec != lcmos) {
+ printk(IBM_ERR
+ "CMOS NVRAM (%u) and EC (%u) do not agree "
+ "on display brightness level\n",
+ (unsigned int) lcmos,
+ (unsigned int) lec);
+ return -EIO;
+ }
return level;
}
static int brightness_set(int value)
{
- int cmos_cmd, inc, i;
- int current_value = brightness_get(NULL);
+ int cmos_cmd, inc, i, res;
+ int current_value;
+
+ if (value > 7)
+ return -EINVAL;
- value &= 7;
+ res = mutex_lock_interruptible(&brightness_mutex);
+ if (res < 0)
+ return res;
+
+ current_value = brightness_get(NULL);
+ if (current_value < 0) {
+ res = current_value;
+ goto errout;
+ }
- cmos_cmd = value > current_value ? TP_CMOS_BRIGHTNESS_UP : TP_CMOS_BRIGHTNESS_DOWN;
+ cmos_cmd = value > current_value ?
+ TP_CMOS_BRIGHTNESS_UP :
+ TP_CMOS_BRIGHTNESS_DOWN;
inc = value > current_value ? 1 : -1;
+
+ res = 0;
for (i = current_value; i != value; i += inc) {
- if (issue_thinkpad_cmos_command(cmos_cmd))
- return -EIO;
- if (!acpi_ec_write(brightness_offset, i + inc))
- return -EIO;
+ if ((brightness_mode & 2) &&
+ issue_thinkpad_cmos_command(cmos_cmd)) {
+ res = -EIO;
+ goto errout;
+ }
+ if ((brightness_mode & 1) &&
+ !acpi_ec_write(brightness_offset, i + inc)) {
+ res = -EIO;
+ goto errout;;
+ }
}
- return 0;
+errout:
+ mutex_unlock(&brightness_mutex);
+ return res;
}
static int brightness_read(char *p)
@@ -3273,20 +3657,19 @@ static int __init fan_init(struct ibm_init_struct *iibm)
* Enable for TP-1Y (T43), TP-78 (R51e),
* TP-76 (R52), TP-70 (T43, R52), which are known
* to be buggy. */
- if (fan_control_initial_status == 0x07 &&
- ibm_thinkpad_ec_found &&
- ((ibm_thinkpad_ec_found[0] == '1' &&
- ibm_thinkpad_ec_found[1] == 'Y') ||
- (ibm_thinkpad_ec_found[0] == '7' &&
- (ibm_thinkpad_ec_found[1] == '6' ||
- ibm_thinkpad_ec_found[1] == '8' ||
- ibm_thinkpad_ec_found[1] == '0'))
- )) {
- printk(IBM_NOTICE
- "fan_init: initial fan status is "
- "unknown, assuming it is in auto "
- "mode\n");
- tp_features.fan_ctrl_status_undef = 1;
+ if (fan_control_initial_status == 0x07) {
+ switch (thinkpad_id.ec_model) {
+ case 0x5931: /* TP-1Y */
+ case 0x3837: /* TP-78 */
+ case 0x3637: /* TP-76 */
+ case 0x3037: /* TP-70 */
+ printk(IBM_NOTICE
+ "fan_init: initial fan status is "
+ "unknown, assuming it is in auto "
+ "mode\n");
+ tp_features.fan_ctrl_status_undef = 1;
+ ;;
+ }
}
} else {
printk(IBM_ERR
@@ -3474,7 +3857,7 @@ static void fan_watchdog_fire(struct work_struct *ignored)
static void fan_watchdog_reset(void)
{
- static int fan_watchdog_active = 0;
+ static int fan_watchdog_active;
if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
return;
@@ -3877,7 +4260,7 @@ static struct ibm_struct fan_driver_data = {
****************************************************************************/
/* /proc support */
-static struct proc_dir_entry *proc_dir = NULL;
+static struct proc_dir_entry *proc_dir;
/* Subdriver registry */
static LIST_HEAD(tpacpi_all_drivers);
@@ -4020,13 +4403,30 @@ static void ibm_exit(struct ibm_struct *ibm)
/* Probing */
-static char *ibm_thinkpad_ec_found = NULL;
-
-static char* __init check_dmi_for_ec(void)
+static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
{
struct dmi_device *dev = NULL;
char ec_fw_string[18];
+ if (!tp)
+ return;
+
+ memset(tp, 0, sizeof(*tp));
+
+ if (dmi_name_in_vendors("IBM"))
+ tp->vendor = PCI_VENDOR_ID_IBM;
+ else if (dmi_name_in_vendors("LENOVO"))
+ tp->vendor = PCI_VENDOR_ID_LENOVO;
+ else
+ return;
+
+ tp->bios_version_str = kstrdup(dmi_get_system_info(DMI_BIOS_VERSION),
+ GFP_KERNEL);
+ if (!tp->bios_version_str)
+ return;
+ tp->bios_model = tp->bios_version_str[0]
+ | (tp->bios_version_str[1] << 8);
+
/*
* ThinkPad T23 or newer, A31 or newer, R50e or newer,
* X32 or newer, all Z series; Some models must have an
@@ -4040,10 +4440,20 @@ static char* __init check_dmi_for_ec(void)
ec_fw_string) == 1) {
ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
- return kstrdup(ec_fw_string, GFP_KERNEL);
+
+ tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
+ tp->ec_model = ec_fw_string[0]
+ | (ec_fw_string[1] << 8);
+ break;
}
}
- return NULL;
+
+ tp->model_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_VERSION),
+ GFP_KERNEL);
+ if (strnicmp(tp->model_str, "ThinkPad", 8) != 0) {
+ kfree(tp->model_str);
+ tp->model_str = NULL;
+ }
}
static int __init probe_for_thinkpad(void)
@@ -4057,7 +4467,7 @@ static int __init probe_for_thinkpad(void)
* Non-ancient models have better DMI tagging, but very old models
* don't.
*/
- is_thinkpad = dmi_name_in_vendors("ThinkPad");
+ is_thinkpad = (thinkpad_id.model_str != NULL);
/* ec is required because many other handles are relative to it */
IBM_ACPIHANDLE_INIT(ec);
@@ -4073,7 +4483,7 @@ static int __init probe_for_thinkpad(void)
* false positives a damn great deal
*/
if (!is_thinkpad)
- is_thinkpad = dmi_name_in_vendors("IBM");
+ is_thinkpad = (thinkpad_id.vendor == PCI_VENDOR_ID_IBM);
if (!is_thinkpad && !force_load)
return -ENODEV;
@@ -4185,10 +4595,13 @@ static u32 dbg_level;
module_param_named(debug, dbg_level, uint, 0);
static int force_load;
-module_param(force_load, int, 0);
+module_param(force_load, bool, 0);
static int fan_control_allowed;
-module_param_named(fan_control, fan_control_allowed, int, 0);
+module_param_named(fan_control, fan_control_allowed, bool, 0);
+
+static int brightness_mode;
+module_param_named(brightness_mode, brightness_mode, int, 0);
#define IBM_PARAM(feature) \
module_param_call(feature, set_ibm_param, NULL, NULL, 0)
@@ -4216,12 +4629,16 @@ static int __init thinkpad_acpi_module_init(void)
int ret, i;
/* Driver-level probe */
+
+ get_thinkpad_model_data(&thinkpad_id);
ret = probe_for_thinkpad();
- if (ret)
+ if (ret) {
+ thinkpad_acpi_module_exit();
return ret;
+ }
/* Driver initialization */
- ibm_thinkpad_ec_found = check_dmi_for_ec();
+
IBM_ACPIHANDLE_INIT(ecrd);
IBM_ACPIHANDLE_INIT(ecwr);
@@ -4265,6 +4682,22 @@ static int __init thinkpad_acpi_module_init(void)
thinkpad_acpi_module_exit();
return ret;
}
+ tpacpi_inputdev = input_allocate_device();
+ if (!tpacpi_inputdev) {
+ printk(IBM_ERR "unable to allocate input device\n");
+ thinkpad_acpi_module_exit();
+ return -ENOMEM;
+ } else {
+ /* Prepare input device, but don't register */
+ tpacpi_inputdev->name = "ThinkPad Extra Buttons";
+ tpacpi_inputdev->phys = IBM_DRVR_NAME "/input0";
+ tpacpi_inputdev->id.bustype = BUS_HOST;
+ tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ?
+ thinkpad_id.vendor :
+ PCI_VENDOR_ID_IBM;
+ tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
+ tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ }
for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
ret = ibm_init(&ibms_init[i]);
if (ret >= 0 && *ibms_init[i].param)
@@ -4274,6 +4707,14 @@ static int __init thinkpad_acpi_module_init(void)
return ret;
}
}
+ ret = input_register_device(tpacpi_inputdev);
+ if (ret < 0) {
+ printk(IBM_ERR "unable to register input device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ } else {
+ tp_features.input_device_registered = 1;
+ }
return 0;
}
@@ -4290,6 +4731,13 @@ static void thinkpad_acpi_module_exit(void)
dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+ if (tpacpi_inputdev) {
+ if (tp_features.input_device_registered)
+ input_unregister_device(tpacpi_inputdev);
+ else
+ input_free_device(tpacpi_inputdev);
+ }
+
if (tpacpi_hwmon)
hwmon_device_unregister(tpacpi_hwmon);
@@ -4302,7 +4750,9 @@ static void thinkpad_acpi_module_exit(void)
if (proc_dir)
remove_proc_entry(IBM_PROC_DIR, acpi_root_dir);
- kfree(ibm_thinkpad_ec_found);
+ kfree(thinkpad_id.bios_version_str);
+ kfree(thinkpad_id.ec_version_str);
+ kfree(thinkpad_id.model_str);
}
module_init(thinkpad_acpi_module_init);
diff --git a/drivers/misc/thinkpad_acpi.h b/drivers/misc/thinkpad_acpi.h
index 72d62f2dabb9..b7a4a888cc8b 100644
--- a/drivers/misc/thinkpad_acpi.h
+++ b/drivers/misc/thinkpad_acpi.h
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/nvram.h>
#include <linux/proc_fs.h>
#include <linux/sysfs.h>
#include <linux/backlight.h>
@@ -39,6 +40,7 @@
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/input.h>
#include <asm/uaccess.h>
#include <linux/dmi.h>
@@ -48,6 +50,7 @@
#include <acpi/acpi_drivers.h>
#include <acpi/acnamesp.h>
+#include <linux/pci_ids.h>
/****************************************************************************
* Main driver
@@ -78,6 +81,11 @@
#define TP_CMOS_BRIGHTNESS_UP 4
#define TP_CMOS_BRIGHTNESS_DOWN 5
+/* ThinkPad CMOS NVRAM constants */
+#define TP_NVRAM_ADDR_BRIGHTNESS 0x5e
+#define TP_NVRAM_MASK_LEVEL_BRIGHTNESS 0x07
+#define TP_NVRAM_POS_LEVEL_BRIGHTNESS 0
+
#define onoff(status,bit) ((status) & (1 << (bit)) ? "on" : "off")
#define enabled(status,bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
#define strlencmp(a,b) (strncmp((a), (b), strlen(b)))
@@ -98,9 +106,13 @@ static const char *str_supported(int is_supported);
#define vdbg_printk(a_dbg_level, format, arg...)
#endif
+/* Input IDs */
+#define TPACPI_HKEY_INPUT_VENDOR PCI_VENDOR_ID_IBM
+#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
+#define TPACPI_HKEY_INPUT_VERSION 0x4101
+
/* ACPI HIDs */
#define IBM_HKEY_HID "IBM0068"
-#define IBM_PCI_HID "PNP0A03"
/* ACPI helpers */
static int __must_check acpi_evalf(acpi_handle handle,
@@ -161,6 +173,7 @@ static int parse_strtoul(const char *buf, unsigned long max,
static struct platform_device *tpacpi_pdev;
static struct class_device *tpacpi_hwmon;
static struct platform_driver tpacpi_pdriver;
+static struct input_dev *tpacpi_inputdev;
static int tpacpi_create_driver_attributes(struct device_driver *drv);
static void tpacpi_remove_driver_attributes(struct device_driver *drv);
@@ -168,9 +181,7 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv);
static int experimental;
static u32 dbg_level;
static int force_load;
-static char *ibm_thinkpad_ec_found;
-static char* check_dmi_for_ec(void);
static int thinkpad_acpi_module_init(void);
static void thinkpad_acpi_module_exit(void);
@@ -197,6 +208,7 @@ struct ibm_struct {
int (*read) (char *);
int (*write) (char *);
void (*exit) (void);
+ void (*resume) (void);
struct list_head all_drivers;
@@ -228,12 +240,29 @@ static struct {
u16 bluetooth:1;
u16 hotkey:1;
u16 hotkey_mask:1;
+ u16 hotkey_wlsw:1;
u16 light:1;
u16 light_status:1;
u16 wan:1;
u16 fan_ctrl_status_undef:1;
+ u16 input_device_registered:1;
} tp_features;
+struct thinkpad_id_data {
+ unsigned int vendor; /* ThinkPad vendor:
+ * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
+
+ char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
+ char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
+
+ u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+ u16 ec_model;
+
+ char *model_str;
+};
+
+static struct thinkpad_id_data thinkpad_id;
+
static struct list_head tpacpi_all_drivers;
static struct ibm_init_struct ibms_init[];
@@ -300,6 +329,7 @@ static int bluetooth_write(char *buf);
static struct backlight_device *ibm_backlight_device;
static int brightness_offset = 0x31;
+static int brightness_mode;
static int brightness_init(struct ibm_init_struct *iibm);
static void brightness_exit(void);
@@ -415,14 +445,14 @@ static int fan_write_cmd_watchdog(const char *cmd, int *rc);
*/
static int hotkey_orig_status;
-static int hotkey_orig_mask;
+static u32 hotkey_orig_mask;
static struct mutex hotkey_mutex;
static int hotkey_init(struct ibm_init_struct *iibm);
static void hotkey_exit(void);
-static int hotkey_get(int *status, int *mask);
-static int hotkey_set(int status, int mask);
+static int hotkey_get(int *status, u32 *mask);
+static int hotkey_set(int status, u32 mask);
static void hotkey_notify(struct ibm_struct *ibm, u32 event);
static int hotkey_read(char *p);
static int hotkey_write(char *buf);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index cbd4b6e3e17c..93fe2e5dd616 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -414,13 +414,12 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
return ERR_PTR(-ENOSPC);
__set_bit(devidx, dev_use);
- md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+ md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
ret = -ENOMEM;
goto out;
}
- memset(md, 0, sizeof(struct mmc_blk_data));
/*
* Set the read-only status based on the supported commands
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4fb2089dc690..b53dac8d1b69 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/mmc/card.h>
@@ -44,11 +45,7 @@ static int mmc_queue_thread(void *d)
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
- /*
- * Set iothread to ensure that we aren't put to sleep by
- * the process freezing. We handle suspension ourselves.
- */
- current->flags |= PF_MEMALLOC|PF_NOFREEZE;
+ current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 28c881895ab7..15aab374127e 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -903,8 +903,10 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* Add host to MMC layer
*/
- if (host->board->det_pin)
+ if (host->board->det_pin) {
host->present = !at91_get_gpio_value(host->board->det_pin);
+ device_init_wakeup(&pdev->dev, 1);
+ }
else
host->present = -1;
@@ -940,6 +942,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
host = mmc_priv(mmc);
if (host->present != -1) {
+ device_init_wakeup(&pdev->dev, 0);
free_irq(host->board->det_pin, host);
cancel_delayed_work(&host->mmc->detect);
}
@@ -966,8 +969,12 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(host->board->det_pin);
+
if (mmc)
ret = mmc_suspend_host(mmc, state);
@@ -977,8 +984,12 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
static int at91_mci_resume(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(host->board->det_pin);
+
if (mmc)
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h
index 1b163220df2b..df17c281278a 100644
--- a/drivers/mmc/host/pxamci.h
+++ b/drivers/mmc/host/pxamci.h
@@ -1,25 +1,3 @@
-#undef MMC_STRPCL
-#undef MMC_STAT
-#undef MMC_CLKRT
-#undef MMC_SPI
-#undef MMC_CMDAT
-#undef MMC_RESTO
-#undef MMC_RDTO
-#undef MMC_BLKLEN
-#undef MMC_NOB
-#undef MMC_PRTBUF
-#undef MMC_I_MASK
-#undef END_CMD_RES
-#undef PRG_DONE
-#undef DATA_TRAN_DONE
-#undef MMC_I_REG
-#undef MMC_CMD
-#undef MMC_ARGH
-#undef MMC_ARGL
-#undef MMC_RES
-#undef MMC_RXFIFO
-#undef MMC_TXFIFO
-
#define MMC_STRPCL 0x0000
#define STOP_CLOCK (1 << 0)
#define START_CLOCK (2 << 0)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 10d15c39d003..4a24db028d87 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1024,6 +1024,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
+ intmask &= ~SDHCI_INT_ERROR;
+
if (intmask & SDHCI_INT_BUS_POWER) {
printk(KERN_ERR "%s: Card is consuming too much power!\n",
mmc_hostname(host->mmc));
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7400f4bc114f..a6c870480b8a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -107,6 +107,7 @@
#define SDHCI_INT_CARD_INSERT 0x00000040
#define SDHCI_INT_CARD_REMOVE 0x00000080
#define SDHCI_INT_CARD_INT 0x00000100
+#define SDHCI_INT_ERROR 0x00008000
#define SDHCI_INT_TIMEOUT 0x00010000
#define SDHCI_INT_CRC 0x00020000
#define SDHCI_INT_END_BIT 0x00040000
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index a8a158708293..74d9d30edabd 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -16,6 +16,7 @@
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
+#include <linux/freezer.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -79,7 +80,7 @@ static int mtd_blktrans_thread(void *arg)
struct request_queue *rq = tr->blkcore_priv->rq;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
- current->flags |= PF_MEMALLOC | PF_NOFREEZE;
+ current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 555d594d1811..1cb22bfae750 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -33,6 +33,7 @@
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/stat.h>
+#include <linux/log2.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
@@ -369,7 +370,7 @@ static int attach_by_scanning(struct ubi_device *ubi)
out_wl:
ubi_wl_close(ubi);
out_vtbl:
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
out_si:
ubi_scan_destroy_si(si);
return err;
@@ -422,8 +423,7 @@ static int io_init(struct ubi_device *ubi)
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
/* Make sure minimal I/O unit is power of 2 */
- if (ubi->min_io_size == 0 ||
- (ubi->min_io_size & (ubi->min_io_size - 1))) {
+ if (!is_power_of_2(ubi->min_io_size)) {
ubi_err("bad min. I/O unit");
return -EINVAL;
}
@@ -593,8 +593,6 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
if (err)
goto out_detach;
- ubi_devices_cnt += 1;
-
ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
ubi_msg("MTD device name: \"%s\"", ubi->mtd->name);
ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
@@ -624,12 +622,13 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
wake_up_process(ubi->bgt_thread);
}
+ ubi_devices_cnt += 1;
return 0;
out_detach:
ubi_eba_close(ubi);
ubi_wl_close(ubi);
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
out_free:
kfree(ubi);
out_mtd:
@@ -650,7 +649,7 @@ static void detach_mtd_dev(struct ubi_device *ubi)
uif_close(ubi);
ubi_eba_close(ubi);
ubi_wl_close(ubi);
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
kfree(ubi_devices[ubi_num]);
ubi_devices[ubi_num] = NULL;
@@ -686,13 +685,6 @@ static int __init ubi_init(void)
struct mtd_dev_param *p = &mtd_dev_param[i];
cond_resched();
-
- if (!p->name) {
- dbg_err("empty name");
- err = -EINVAL;
- goto out_detach;
- }
-
err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
if (err)
goto out_detach;
@@ -799,7 +791,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
/* Get rid of the final newline */
if (buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ buf[len - 1] = '\0';
for (i = 0; i < 3; i++)
tokens[i] = strsep(&pbuf, ",");
@@ -809,9 +801,6 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
return -EINVAL;
}
- if (tokens[0] == '\0')
- return -EINVAL;
-
p = &mtd_dev_param[mtd_devs];
strcpy(&p->name[0], tokens[0]);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 6612eb79bf17..fe4da1e96c52 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -64,6 +64,7 @@ static struct ubi_device *major_to_device(int major)
if (ubi_devices[i] && ubi_devices[i]->major == major)
return ubi_devices[i];
BUG();
+ return NULL;
}
/**
@@ -153,7 +154,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
vol->updating = 0;
- kfree(vol->upd_buf);
+ vfree(vol->upd_buf);
}
ubi_close_volume(desc);
@@ -232,7 +233,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
- tbuf = kmalloc(tbuf_size, GFP_KERNEL);
+ tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
@@ -271,7 +272,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
len = count > tbuf_size ? tbuf_size : count;
} while (count);
- kfree(tbuf);
+ vfree(tbuf);
return err ? err : count_save - count;
}
@@ -320,7 +321,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
- tbuf = kmalloc(tbuf_size, GFP_KERNEL);
+ tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
@@ -355,7 +356,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
len = count > tbuf_size ? tbuf_size : count;
}
- kfree(tbuf);
+ vfree(tbuf);
return err ? err : count_save - count;
}
@@ -397,6 +398,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
vol->corrupted = 1;
}
vol->checked = 1;
+ ubi_gluebi_updated(vol);
revoke_exclusive(desc, UBI_READWRITE);
}
@@ -413,19 +415,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_device *ubi = vol->ubi;
void __user *argp = (void __user *)arg;
- if (_IOC_NR(cmd) > VOL_CDEV_IOC_MAX_SEQ ||
- _IOC_TYPE(cmd) != UBI_VOL_IOC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_DIR(cmd) && _IOC_READ)
- err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) && _IOC_WRITE)
- err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
switch (cmd) {
-
/* Volume update command */
case UBI_IOCVOLUP:
{
@@ -471,7 +461,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
{
int32_t lnum;
- err = __get_user(lnum, (__user int32_t *)argp);
+ err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
@@ -587,17 +577,6 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_volume_desc *desc;
void __user *argp = (void __user *)arg;
- if (_IOC_NR(cmd) > UBI_CDEV_IOC_MAX_SEQ ||
- _IOC_TYPE(cmd) != UBI_IOC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_DIR(cmd) && _IOC_READ)
- err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) && _IOC_WRITE)
- err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
@@ -612,7 +591,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_mkvol_req req;
dbg_msg("create volume");
- err = __copy_from_user(&req, argp,
+ err = copy_from_user(&req, argp,
sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
@@ -629,7 +608,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
if (err)
break;
- err = __put_user(req.vol_id, (__user int32_t *)argp);
+ err = put_user(req.vol_id, (__user int32_t *)argp);
if (err)
err = -EFAULT;
@@ -642,7 +621,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
int vol_id;
dbg_msg("remove volume");
- err = __get_user(vol_id, (__user int32_t *)argp);
+ err = get_user(vol_id, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
@@ -669,7 +648,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_rsvol_req req;
dbg_msg("re-size volume");
- err = __copy_from_user(&req, argp,
+ err = copy_from_user(&req, argp,
sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
@@ -707,7 +686,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.ioctl = ubi_cdev_ioctl,
- .llseek = no_llseek
+ .llseek = no_llseek,
};
/* UBI volume character device operations */
@@ -718,5 +697,5 @@ struct file_operations ubi_vol_cdev_operations = {
.llseek = vol_cdev_llseek,
.read = vol_cdev_read,
.write = vol_cdev_write,
- .ioctl = vol_cdev_ioctl
+ .ioctl = vol_cdev_ioctl,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 86364221fafe..310341e5cd43 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -35,12 +35,12 @@
void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
dbg_msg("erase counter header dump:");
- dbg_msg("magic %#08x", ubi32_to_cpu(ec_hdr->magic));
+ dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic));
dbg_msg("version %d", (int)ec_hdr->version);
- dbg_msg("ec %llu", (long long)ubi64_to_cpu(ec_hdr->ec));
- dbg_msg("vid_hdr_offset %d", ubi32_to_cpu(ec_hdr->vid_hdr_offset));
- dbg_msg("data_offset %d", ubi32_to_cpu(ec_hdr->data_offset));
- dbg_msg("hdr_crc %#08x", ubi32_to_cpu(ec_hdr->hdr_crc));
+ dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec));
+ dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset));
+ dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc));
dbg_msg("erase counter header hexdump:");
ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE);
}
@@ -52,20 +52,20 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
dbg_msg("volume identifier header dump:");
- dbg_msg("magic %08x", ubi32_to_cpu(vid_hdr->magic));
+ dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic));
dbg_msg("version %d", (int)vid_hdr->version);
dbg_msg("vol_type %d", (int)vid_hdr->vol_type);
dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag);
dbg_msg("compat %d", (int)vid_hdr->compat);
- dbg_msg("vol_id %d", ubi32_to_cpu(vid_hdr->vol_id));
- dbg_msg("lnum %d", ubi32_to_cpu(vid_hdr->lnum));
- dbg_msg("leb_ver %u", ubi32_to_cpu(vid_hdr->leb_ver));
- dbg_msg("data_size %d", ubi32_to_cpu(vid_hdr->data_size));
- dbg_msg("used_ebs %d", ubi32_to_cpu(vid_hdr->used_ebs));
- dbg_msg("data_pad %d", ubi32_to_cpu(vid_hdr->data_pad));
+ dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id));
+ dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum));
+ dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver));
+ dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size));
+ dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs));
+ dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad));
dbg_msg("sqnum %llu",
- (unsigned long long)ubi64_to_cpu(vid_hdr->sqnum));
- dbg_msg("hdr_crc %08x", ubi32_to_cpu(vid_hdr->hdr_crc));
+ (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+ dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc));
dbg_msg("volume identifier header hexdump:");
}
@@ -91,7 +91,7 @@ void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- dbg_msg("name %s", vol->name);
+ dbg_msg("name %s", vol->name);
} else {
dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
vol->name[0], vol->name[1], vol->name[2],
@@ -106,30 +106,30 @@ void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
*/
void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
- int name_len = ubi16_to_cpu(r->name_len);
+ int name_len = be16_to_cpu(r->name_len);
dbg_msg("volume table record %d dump:", idx);
- dbg_msg("reserved_pebs %d", ubi32_to_cpu(r->reserved_pebs));
- dbg_msg("alignment %d", ubi32_to_cpu(r->alignment));
- dbg_msg("data_pad %d", ubi32_to_cpu(r->data_pad));
+ dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs));
+ dbg_msg("alignment %d", be32_to_cpu(r->alignment));
+ dbg_msg("data_pad %d", be32_to_cpu(r->data_pad));
dbg_msg("vol_type %d", (int)r->vol_type);
dbg_msg("upd_marker %d", (int)r->upd_marker);
dbg_msg("name_len %d", name_len);
if (r->name[0] == '\0') {
- dbg_msg("name NULL");
+ dbg_msg("name NULL");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- dbg_msg("name %s", &r->name[0]);
+ dbg_msg("name %s", &r->name[0]);
} else {
dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- dbg_msg("crc %#08x", ubi32_to_cpu(r->crc));
+ dbg_msg("crc %#08x", be32_to_cpu(r->crc));
}
/**
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index f816ad9a36c0..ff8f39548cd8 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -52,7 +52,6 @@ struct ubi_scan_volume;
struct ubi_scan_leb;
struct ubi_mkvol_req;
-void ubi_dbg_print(int type, const char *func, const char *fmt, ...);
void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
@@ -66,7 +65,6 @@ void ubi_dbg_hexdump(const void *buf, int size);
#define dbg_msg(fmt, ...) ({})
#define ubi_dbg_dump_stack() ({})
-#define ubi_dbg_print(func, fmt, ...) ({})
#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
#define ubi_dbg_dump_vol_info(vol) ({})
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 74002945b71b..7c5e29eaf118 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -368,7 +368,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
struct ubi_vid_hdr *vid_hdr;
struct ubi_volume *vol = ubi->volumes[idx];
- uint32_t crc, crc1;
+ uint32_t uninitialized_var(crc);
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
@@ -425,10 +425,10 @@ retry:
} else if (err == UBI_IO_BITFLIPS)
scrub = 1;
- ubi_assert(lnum < ubi32_to_cpu(vid_hdr->used_ebs));
- ubi_assert(len == ubi32_to_cpu(vid_hdr->data_size));
+ ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+ ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
- crc = ubi32_to_cpu(vid_hdr->data_crc);
+ crc = be32_to_cpu(vid_hdr->data_crc);
ubi_free_vid_hdr(ubi, vid_hdr);
}
@@ -451,7 +451,7 @@ retry:
}
if (check) {
- crc1 = crc32(UBI_CRC32_INIT, buf, len);
+ uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
if (crc1 != crc) {
ubi_warn("CRC error: calculated %#08x, must be %#08x",
crc1, crc);
@@ -518,13 +518,13 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
data_size = offset + len;
- new_buf = kmalloc(data_size, GFP_KERNEL);
+ new_buf = vmalloc(data_size);
if (!new_buf) {
err = -ENOMEM;
goto out_put;
@@ -535,7 +535,7 @@ retry:
if (offset > 0) {
err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS) {
- kfree(new_buf);
+ vfree(new_buf);
goto out_put;
}
}
@@ -544,11 +544,11 @@ retry:
err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size);
if (err) {
- kfree(new_buf);
+ vfree(new_buf);
goto write_error;
}
- kfree(new_buf);
+ vfree(new_buf);
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
@@ -634,11 +634,11 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
- vid_hdr->vol_id = cpu_to_ubi32(vol_id);
- vid_hdr->lnum = cpu_to_ubi32(lnum);
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
- vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
@@ -692,7 +692,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -748,17 +748,17 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
- vid_hdr->vol_id = cpu_to_ubi32(vol_id);
- vid_hdr->lnum = cpu_to_ubi32(lnum);
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
- vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, data_size);
vid_hdr->vol_type = UBI_VID_STATIC;
- vid_hdr->data_size = cpu_to_ubi32(data_size);
- vid_hdr->used_ebs = cpu_to_ubi32(used_ebs);
- vid_hdr->data_crc = cpu_to_ubi32(crc);
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+ vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
@@ -813,7 +813,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -854,17 +854,17 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
- vid_hdr->vol_id = cpu_to_ubi32(vol_id);
- vid_hdr->lnum = cpu_to_ubi32(lnum);
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
- vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, len);
- vid_hdr->vol_type = UBI_VID_STATIC;
- vid_hdr->data_size = cpu_to_ubi32(len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
vid_hdr->copy_flag = 1;
- vid_hdr->data_crc = cpu_to_ubi32(crc);
+ vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
@@ -891,11 +891,13 @@ retry:
goto write_error;
}
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
- if (err) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- leb_write_unlock(ubi, vol_id, lnum);
- return err;
+ if (vol->eba_tbl[lnum] >= 0) {
+ err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
+ if (err) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
}
vol->eba_tbl[lnum] = pnum;
@@ -924,7 +926,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -965,19 +967,19 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
uint32_t crc;
void *buf, *buf1 = NULL;
- vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- lnum = ubi32_to_cpu(vid_hdr->lnum);
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
- data_size = ubi32_to_cpu(vid_hdr->data_size);
+ data_size = be32_to_cpu(vid_hdr->data_size);
aldata_size = ALIGN(data_size, ubi->min_io_size);
} else
data_size = aldata_size =
- ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad);
+ ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
- buf = kmalloc(aldata_size, GFP_KERNEL);
+ buf = vmalloc(aldata_size);
if (!buf)
return -ENOMEM;
@@ -987,7 +989,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
- kfree(buf);
+ vfree(buf);
return err;
}
@@ -1054,10 +1056,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
if (data_size > 0) {
vid_hdr->copy_flag = 1;
- vid_hdr->data_size = cpu_to_ubi32(data_size);
- vid_hdr->data_crc = cpu_to_ubi32(crc);
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err)
@@ -1082,7 +1084,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
- buf1 = kmalloc(aldata_size, GFP_KERNEL);
+ buf1 = vmalloc(aldata_size);
if (!buf1) {
err = -ENOMEM;
goto out_unlock;
@@ -1111,15 +1113,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vol->eba_tbl[lnum] = to;
leb_write_unlock(ubi, vol_id, lnum);
- kfree(buf);
- kfree(buf1);
+ vfree(buf);
+ vfree(buf1);
return 0;
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
- kfree(buf);
- kfree(buf1);
+ vfree(buf);
+ vfree(buf1);
return err;
}
@@ -1147,7 +1149,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi_devices_cnt == 0) {
ltree_slab = kmem_cache_create("ubi_ltree_slab",
sizeof(struct ltree_entry), 0,
- 0, &ltree_entry_ctor, NULL);
+ 0, &ltree_entry_ctor);
if (!ltree_slab)
return -ENOMEM;
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index fc9478d605ff..41ff74c60e14 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -282,7 +282,6 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
mtd->flags = MTD_WRITEABLE;
mtd->writesize = ubi->min_io_size;
mtd->owner = THIS_MODULE;
- mtd->size = vol->usable_leb_size * vol->reserved_pebs;
mtd->erasesize = vol->usable_leb_size;
mtd->read = gluebi_read;
mtd->write = gluebi_write;
@@ -290,6 +289,15 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
mtd->get_device = gluebi_get_device;
mtd->put_device = gluebi_put_device;
+ /*
+ * In case of dynamic volume, MTD device size is just volume size. In
+ * case of a static volume the size is equivalent to the amount of data
+ * bytes, which is zero at this moment and will be changed after volume
+ * update.
+ */
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ mtd->size = vol->usable_leb_size * vol->reserved_pebs;
+
if (add_mtd_device(mtd)) {
ubi_err("cannot not add MTD device\n");
kfree(mtd->name);
@@ -321,3 +329,20 @@ int ubi_destroy_gluebi(struct ubi_volume *vol)
kfree(mtd->name);
return 0;
}
+
+/**
+ * ubi_gluebi_updated - UBI volume was updated notifier.
+ * @vol: volume description object
+ *
+ * This function is called every time an UBI volume is updated. This function
+ * does nothing if volume @vol is dynamic, and changes MTD device size if the
+ * volume is static. This is needed because static volumes cannot be read past
+ * data they contain.
+ */
+void ubi_gluebi_updated(struct ubi_volume *vol)
+{
+ struct mtd_info *mtd = &vol->gluebi_mtd;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME)
+ mtd->size = vol->used_bytes;
+}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 438914d05151..b0d8f4cede97 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -125,9 +125,9 @@ static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
* o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
* correctable bit-flips were detected; this is harmless but may indicate
* that this eraseblock may become bad soon (but do not have to);
- * o %-EBADMSG if the MTD subsystem reported about data data integrity
- * problems, for example it can me an ECC error in case of NAND; this most
- * probably means that the data is corrupted;
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ * example it can be an ECC error in case of NAND; this most probably means
+ * that the data is corrupted;
* o %-EIO if some I/O error occurred;
* o other negative error codes in case of other errors.
*/
@@ -298,7 +298,7 @@ retry:
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = ubi->mtd;
- ei.addr = pnum * ubi->peb_size;
+ ei.addr = (loff_t)pnum * ubi->peb_size;
ei.len = ubi->peb_size;
ei.callback = erase_callback;
ei.priv = (unsigned long)&wq;
@@ -382,7 +382,7 @@ static int torture_peb(const struct ubi_device *ubi, int pnum)
void *buf;
int err, i, patt_count;
- buf = kmalloc(ubi->peb_size, GFP_KERNEL);
+ buf = vmalloc(ubi->peb_size);
if (!buf)
return -ENOMEM;
@@ -437,7 +437,7 @@ out:
* physical eraseblock which means something is wrong with it.
*/
err = -EIO;
- kfree(buf);
+ vfree(buf);
return err;
}
@@ -557,9 +557,9 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
long long ec;
int vid_hdr_offset, leb_start;
- ec = ubi64_to_cpu(ec_hdr->ec);
- vid_hdr_offset = ubi32_to_cpu(ec_hdr->vid_hdr_offset);
- leb_start = ubi32_to_cpu(ec_hdr->data_offset);
+ ec = be64_to_cpu(ec_hdr->ec);
+ vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+ leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
ubi_err("node with incompatible UBI version found: "
@@ -640,7 +640,7 @@ int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
read_err = err;
}
- magic = ubi32_to_cpu(ec_hdr->magic);
+ magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
/*
* The magic field is wrong. Let's check if we have read all
@@ -684,7 +684,7 @@ int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
}
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
if (verbose) {
@@ -729,12 +729,12 @@ int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum,
dbg_io("write EC header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- ec_hdr->magic = cpu_to_ubi32(UBI_EC_HDR_MAGIC);
+ ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
ec_hdr->version = UBI_VERSION;
- ec_hdr->vid_hdr_offset = cpu_to_ubi32(ubi->vid_hdr_offset);
- ec_hdr->data_offset = cpu_to_ubi32(ubi->leb_start);
+ ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+ ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
- ec_hdr->hdr_crc = cpu_to_ubi32(crc);
+ ec_hdr->hdr_crc = cpu_to_be32(crc);
err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
@@ -757,13 +757,13 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
{
int vol_type = vid_hdr->vol_type;
int copy_flag = vid_hdr->copy_flag;
- int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- int lnum = ubi32_to_cpu(vid_hdr->lnum);
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int lnum = be32_to_cpu(vid_hdr->lnum);
int compat = vid_hdr->compat;
- int data_size = ubi32_to_cpu(vid_hdr->data_size);
- int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
- int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
- int data_crc = ubi32_to_cpu(vid_hdr->data_crc);
+ int data_size = be32_to_cpu(vid_hdr->data_size);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+ int data_crc = be32_to_cpu(vid_hdr->data_crc);
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
@@ -914,7 +914,7 @@ int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
read_err = err;
}
- magic = ubi32_to_cpu(vid_hdr->magic);
+ magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
/*
* If we have read all 0xFF bytes, the VID header probably does
@@ -957,7 +957,7 @@ int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
}
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
if (verbose) {
@@ -1007,10 +1007,10 @@ int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
if (err)
return err > 0 ? -EINVAL: err;
- vid_hdr->magic = cpu_to_ubi32(UBI_VID_HDR_MAGIC);
+ vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
- vid_hdr->hdr_crc = cpu_to_ubi32(crc);
+ vid_hdr->hdr_crc = cpu_to_be32(crc);
err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
@@ -1060,7 +1060,7 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- magic = ubi32_to_cpu(ec_hdr->magic);
+ magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
ubi_err("bad magic %#08x, must be %#08x",
magic, UBI_EC_HDR_MAGIC);
@@ -1105,7 +1105,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
goto exit;
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
ubi_err("paranoid check failed for PEB %d", pnum);
@@ -1137,7 +1137,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- magic = ubi32_to_cpu(vid_hdr->magic);
+ magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
magic, pnum, UBI_VID_HDR_MAGIC);
@@ -1187,7 +1187,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
@@ -1224,9 +1224,10 @@ static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = vmalloc(len);
if (!buf)
return -ENOMEM;
+ memset(buf, 0, len);
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
@@ -1242,7 +1243,7 @@ static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
goto fail;
}
- kfree(buf);
+ vfree(buf);
return 0;
fail:
@@ -1252,7 +1253,7 @@ fail:
err = 1;
error:
ubi_dbg_dump_stack();
- kfree(buf);
+ vfree(buf);
return err;
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d352c4575c3d..4a458e83e4e9 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -37,14 +37,9 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
{
const struct ubi_device *ubi;
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
- !ubi_devices[ubi_num]) {
- module_put(THIS_MODULE);
+ !ubi_devices[ubi_num])
return -ENODEV;
- }
ubi = ubi_devices[ubi_num];
di->ubi_num = ubi->ubi_num;
@@ -52,7 +47,6 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
di->min_io_size = ubi->min_io_size;
di->ro_mode = ubi->ro_mode;
di->cdev = MKDEV(ubi->major, 0);
- module_put(THIS_MODULE);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -319,9 +313,14 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
offset + len > vol->usable_leb_size)
return -EINVAL;
- if (vol->vol_type == UBI_STATIC_VOLUME && lnum == vol->used_ebs - 1 &&
- offset + len > vol->last_eb_bytes)
- return -EINVAL;
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
if (vol->upd_marker)
return -EBADF;
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 38d4e6757dc7..9e2338c8e2cf 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -67,7 +67,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
if (vol->vol_type != UBI_STATIC_VOLUME)
return 0;
- buf = kmalloc(vol->usable_leb_size, GFP_KERNEL);
+ buf = vmalloc(vol->usable_leb_size);
if (!buf)
return -ENOMEM;
@@ -87,7 +87,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
}
- kfree(buf);
+ vfree(buf);
return err;
}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 473f3200b868..94ee54934411 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -24,7 +24,7 @@
* This unit is responsible for scanning the flash media, checking UBI
* headers and providing complete information about the UBI flash image.
*
- * The scanning information is reoresented by a &struct ubi_scan_info' object.
+ * The scanning information is represented by a &struct ubi_scan_info' object.
* Information about found volumes is represented by &struct ubi_scan_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
@@ -55,8 +55,19 @@ static int paranoid_check_si(const struct ubi_device *ubi,
static struct ubi_ec_hdr *ech;
static struct ubi_vid_hdr *vidh;
-int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
- struct list_head *list)
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @si: scanning information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ * @list: the list to add to
+ *
+ * This function adds physical eraseblock @pnum to free, erase, corrupted or
+ * alien lists. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
+ struct list_head *list)
{
struct ubi_scan_leb *seb;
@@ -121,9 +132,9 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
const struct ubi_scan_volume *sv, int pnum)
{
int vol_type = vid_hdr->vol_type;
- int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
- int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
if (sv->leb_count != 0) {
int sv_vol_type;
@@ -189,7 +200,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
struct ubi_scan_volume *sv;
struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
- ubi_assert(vol_id == ubi32_to_cpu(vid_hdr->vol_id));
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
@@ -211,11 +222,10 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
return ERR_PTR(-ENOMEM);
sv->highest_lnum = sv->leb_count = 0;
- si->max_sqnum = 0;
sv->vol_id = vol_id;
sv->root = RB_ROOT;
- sv->used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
- sv->data_pad = ubi32_to_cpu(vid_hdr->data_pad);
+ sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
sv->compat = vid_hdr->compat;
sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
@@ -257,10 +267,10 @@ static int compare_lebs(const struct ubi_device *ubi,
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vidh = NULL;
- unsigned long long sqnum2 = ubi64_to_cpu(vid_hdr->sqnum);
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (seb->sqnum == 0 && sqnum2 == 0) {
- long long abs, v1 = seb->leb_ver, v2 = ubi32_to_cpu(vid_hdr->leb_ver);
+ long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
/*
* UBI constantly increases the logical eraseblock version
@@ -344,8 +354,8 @@ static int compare_lebs(const struct ubi_device *ubi,
/* Read the data of the copy and check the CRC */
- len = ubi32_to_cpu(vid_hdr->data_size);
- buf = kmalloc(len, GFP_KERNEL);
+ len = be32_to_cpu(vid_hdr->data_size);
+ buf = vmalloc(len);
if (!buf) {
err = -ENOMEM;
goto out_free_vidh;
@@ -355,7 +365,7 @@ static int compare_lebs(const struct ubi_device *ubi,
if (err && err != UBI_IO_BITFLIPS)
goto out_free_buf;
- data_crc = ubi32_to_cpu(vid_hdr->data_crc);
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
crc = crc32(UBI_CRC32_INIT, buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
@@ -368,7 +378,7 @@ static int compare_lebs(const struct ubi_device *ubi,
bitflips = !!err;
}
- kfree(buf);
+ vfree(buf);
ubi_free_vid_hdr(ubi, vidh);
if (second_is_newer)
@@ -379,7 +389,7 @@ static int compare_lebs(const struct ubi_device *ubi,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
out_free_buf:
- kfree(buf);
+ vfree(buf);
out_free_vidh:
ubi_free_vid_hdr(ubi, vidh);
ubi_assert(err < 0);
@@ -396,8 +406,12 @@ out_free_vidh:
* @vid_hdr: the volume identifier header
* @bitflips: if bit-flips were detected when this physical eraseblock was read
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
*/
int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
@@ -410,10 +424,10 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
struct ubi_scan_leb *seb;
struct rb_node **p, *parent = NULL;
- vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- lnum = ubi32_to_cpu(vid_hdr->lnum);
- sqnum = ubi64_to_cpu(vid_hdr->sqnum);
- leb_ver = ubi32_to_cpu(vid_hdr->leb_ver);
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+ leb_ver = be32_to_cpu(vid_hdr->leb_ver);
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
@@ -422,6 +436,9 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
if (IS_ERR(sv) < 0)
return PTR_ERR(sv);
+ if (si->max_sqnum < sqnum)
+ si->max_sqnum = sqnum;
+
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
@@ -492,11 +509,11 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
return err;
if (cmp_res & 4)
- err = ubi_scan_add_to_list(si, seb->pnum,
- seb->ec, &si->corr);
+ err = add_to_list(si, seb->pnum, seb->ec,
+ &si->corr);
else
- err = ubi_scan_add_to_list(si, seb->pnum,
- seb->ec, &si->erase);
+ err = add_to_list(si, seb->pnum, seb->ec,
+ &si->erase);
if (err)
return err;
@@ -508,7 +525,7 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
if (sv->highest_lnum == lnum)
sv->last_data_size =
- ubi32_to_cpu(vid_hdr->data_size);
+ be32_to_cpu(vid_hdr->data_size);
return 0;
} else {
@@ -517,11 +534,9 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
* previously.
*/
if (cmp_res & 4)
- return ubi_scan_add_to_list(si, pnum, ec,
- &si->corr);
+ return add_to_list(si, pnum, ec, &si->corr);
else
- return ubi_scan_add_to_list(si, pnum, ec,
- &si->erase);
+ return add_to_list(si, pnum, ec, &si->erase);
}
}
@@ -547,12 +562,9 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
if (sv->highest_lnum <= lnum) {
sv->highest_lnum = lnum;
- sv->last_data_size = ubi32_to_cpu(vid_hdr->data_size);
+ sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
- if (si->max_sqnum < sqnum)
- si->max_sqnum = sqnum;
-
sv->leb_count += 1;
rb_link_node(&seb->u.rb, parent, p);
rb_insert_color(&seb->u.rb, &sv->root);
@@ -674,7 +686,7 @@ int ubi_scan_erase_peb(const struct ubi_device *ubi,
return -EINVAL;
}
- ec_hdr->ec = cpu_to_ubi64(ec);
+ ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_sync_erase(ubi, pnum, 0);
if (err < 0)
@@ -754,7 +766,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi,
* @si: scanning information
* @pnum: the physical eraseblock number
*
- * This function returns a zero if the physical eraseblock was succesfully
+ * This function returns a zero if the physical eraseblock was successfully
* handled and a negative error code in case of failure.
*/
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
@@ -783,8 +795,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
else if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else if (err == UBI_IO_PEB_EMPTY)
- return ubi_scan_add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC,
- &si->erase);
+ return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase);
else if (err == UBI_IO_BAD_EC_HDR) {
/*
* We have to also look at the VID header, possibly it is not
@@ -806,7 +817,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
return -EINVAL;
}
- ec = ubi64_to_cpu(ech->ec);
+ ec = be64_to_cpu(ech->ec);
if (ec > UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. The EC headers have 64 bits
@@ -832,28 +843,28 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
else if (err == UBI_IO_BAD_VID_HDR ||
(err == UBI_IO_PEB_FREE && ec_corr)) {
/* VID header is corrupted */
- err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->corr);
if (err)
return err;
goto adjust_mean_ec;
} else if (err == UBI_IO_PEB_FREE) {
/* No VID header - the physical eraseblock is free */
- err = ubi_scan_add_to_list(si, pnum, ec, &si->free);
+ err = add_to_list(si, pnum, ec, &si->free);
if (err)
return err;
goto adjust_mean_ec;
}
- vol_id = ubi32_to_cpu(vidh->vol_id);
+ vol_id = be32_to_cpu(vidh->vol_id);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
- int lnum = ubi32_to_cpu(vidh->lnum);
+ int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, remove it", vol_id, lnum);
- err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->corr);
if (err)
return err;
break;
@@ -868,7 +879,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
case UBI_COMPAT_PRESERVE:
ubi_msg("\"preserve\" compatible internal volume %d:%d"
" found", vol_id, lnum);
- err = ubi_scan_add_to_list(si, pnum, ec, &si->alien);
+ err = add_to_list(si, pnum, ec, &si->alien);
if (err)
return err;
si->alien_peb_count += 1;
@@ -1109,7 +1120,7 @@ static int paranoid_check_si(const struct ubi_device *ubi,
uint8_t *buf;
/*
- * At first, check that scanning information is ok.
+ * At first, check that scanning information is OK.
*/
ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
int leb_count = 0;
@@ -1249,12 +1260,12 @@ static int paranoid_check_si(const struct ubi_device *ubi,
goto bad_vid_hdr;
}
- if (seb->sqnum != ubi64_to_cpu(vidh->sqnum)) {
+ if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
ubi_err("bad sqnum %llu", seb->sqnum);
goto bad_vid_hdr;
}
- if (sv->vol_id != ubi32_to_cpu(vidh->vol_id)) {
+ if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
ubi_err("bad vol_id %d", sv->vol_id);
goto bad_vid_hdr;
}
@@ -1264,22 +1275,22 @@ static int paranoid_check_si(const struct ubi_device *ubi,
goto bad_vid_hdr;
}
- if (seb->lnum != ubi32_to_cpu(vidh->lnum)) {
+ if (seb->lnum != be32_to_cpu(vidh->lnum)) {
ubi_err("bad lnum %d", seb->lnum);
goto bad_vid_hdr;
}
- if (sv->used_ebs != ubi32_to_cpu(vidh->used_ebs)) {
+ if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
ubi_err("bad used_ebs %d", sv->used_ebs);
goto bad_vid_hdr;
}
- if (sv->data_pad != ubi32_to_cpu(vidh->data_pad)) {
+ if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
ubi_err("bad data_pad %d", sv->data_pad);
goto bad_vid_hdr;
}
- if (seb->leb_ver != ubi32_to_cpu(vidh->leb_ver)) {
+ if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
ubi_err("bad leb_ver %u", seb->leb_ver);
goto bad_vid_hdr;
}
@@ -1288,12 +1299,12 @@ static int paranoid_check_si(const struct ubi_device *ubi,
if (!last_seb)
continue;
- if (sv->highest_lnum != ubi32_to_cpu(vidh->lnum)) {
+ if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
ubi_err("bad highest_lnum %d", sv->highest_lnum);
goto bad_vid_hdr;
}
- if (sv->last_data_size != ubi32_to_cpu(vidh->data_size)) {
+ if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
ubi_err("bad last_data_size %d", sv->last_data_size);
goto bad_vid_hdr;
}
@@ -1310,8 +1321,10 @@ static int paranoid_check_si(const struct ubi_device *ubi,
memset(buf, 1, ubi->peb_count);
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
err = ubi_io_is_bad(ubi, pnum);
- if (err < 0)
+ if (err < 0) {
+ kfree(buf);
return err;
+ }
else if (err)
buf[pnum] = 0;
}
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 3949f6192c76..140e82e26534 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -147,8 +147,6 @@ static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
list_add_tail(&seb->u.list, list);
}
-int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
- struct list_head *list);
int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
int bitflips);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index feb647f108f0..5959f91be240 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -35,6 +35,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <mtd/ubi-header.h>
@@ -374,9 +375,11 @@ void ubi_calculate_reserved(struct ubi_device *ubi);
#ifdef CONFIG_MTD_UBI_GLUEBI
int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
int ubi_destroy_gluebi(struct ubi_volume *vol);
+void ubi_gluebi_updated(struct ubi_volume *vol);
#else
#define ubi_create_gluebi(ubi, vol) 0
#define ubi_destroy_gluebi(vol) 0
+#define ubi_gluebi_updated(vol)
#endif
/* eba.c */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 8925b977e3dc..0efc586a8328 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -150,7 +150,7 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
vol->updating = 0;
}
- vol->upd_buf = kmalloc(ubi->leb_size, GFP_KERNEL);
+ vol->upd_buf = vmalloc(ubi->leb_size);
if (!vol->upd_buf)
return -ENOMEM;
@@ -339,7 +339,7 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
err = ubi_wl_flush(ubi);
if (err == 0) {
err = to_write;
- kfree(vol->upd_buf);
+ vfree(vol->upd_buf);
vol->updating = 0;
}
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 622d0d18952c..ea0d5c825ab4 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -228,7 +228,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
- strcmp(ubi->volumes[i]->name, req->name) == 0) {
+ !strcmp(ubi->volumes[i]->name, req->name)) {
dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
@@ -243,7 +243,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
- spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
goto out_unlock;
}
@@ -281,7 +280,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
- vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
} else {
bytes = vol->used_bytes;
vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
@@ -320,10 +320,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Fill volume table record */
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
- vtbl_rec.reserved_pebs = cpu_to_ubi32(vol->reserved_pebs);
- vtbl_rec.alignment = cpu_to_ubi32(vol->alignment);
- vtbl_rec.data_pad = cpu_to_ubi32(vol->data_pad);
- vtbl_rec.name_len = cpu_to_ubi16(vol->name_len);
+ vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+ vtbl_rec.alignment = cpu_to_be32(vol->alignment);
+ vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
+ vtbl_rec.name_len = cpu_to_be16(vol->name_len);
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
vtbl_rec.vol_type = UBI_VID_DYNAMIC;
else
@@ -352,6 +352,7 @@ out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
+ ubi->volumes[vol_id] = NULL;
out_unlock:
spin_unlock(&ubi->volumes_lock);
kfree(vol);
@@ -368,6 +369,7 @@ out_sysfs:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
+ ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
volume_sysfs_close(vol);
return err;
@@ -503,7 +505,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
/* Change volume table record */
memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
- vtbl_rec.reserved_pebs = cpu_to_ubi32(reserved_pebs);
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_acc;
@@ -537,7 +539,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
- vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
}
paranoid_check_volumes(ubi);
@@ -643,21 +646,33 @@ void ubi_free_volume(struct ubi_device *ubi, int vol_id)
* @ubi: UBI device description object
* @vol_id: volume ID
*/
-static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
+static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
- const struct ubi_volume *vol = ubi->volumes[idx];
+ const struct ubi_volume *vol;
long long n;
const char *name;
- reserved_pebs = ubi32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ spin_lock(&ubi->volumes_lock);
+ reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ vol = ubi->volumes[idx];
if (!vol) {
if (reserved_pebs) {
ubi_err("no volume info, but volume exists");
goto fail;
}
+ spin_unlock(&ubi->volumes_lock);
+ return;
+ }
+
+ if (vol->exclusive) {
+ /*
+ * The volume may be being created at the moment, do not check
+ * it (e.g., it may be in the middle of ubi_create_volume().
+ */
+ spin_unlock(&ubi->volumes_lock);
return;
}
@@ -726,7 +741,7 @@ static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
goto fail;
}
- n = vol->used_ebs * vol->usable_leb_size;
+ n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->corrupted != 0) {
ubi_err("corrupted dynamic volume");
@@ -765,9 +780,9 @@ static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
}
}
- alignment = ubi32_to_cpu(ubi->vtbl[vol_id].alignment);
- data_pad = ubi32_to_cpu(ubi->vtbl[vol_id].data_pad);
- name_len = ubi16_to_cpu(ubi->vtbl[vol_id].name_len);
+ alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+ data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+ name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
upd_marker = ubi->vtbl[vol_id].upd_marker;
name = &ubi->vtbl[vol_id].name[0];
if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
@@ -782,12 +797,14 @@ static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
goto fail;
}
+ spin_unlock(&ubi->volumes_lock);
return;
fail:
- ubi_err("paranoid check failed");
+ ubi_err("paranoid check failed for volume %d", vol_id);
ubi_dbg_dump_vol_info(vol);
ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ spin_unlock(&ubi->volumes_lock);
BUG();
}
@@ -800,10 +817,8 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
int i;
mutex_lock(&ubi->vtbl_mutex);
- spin_lock(&ubi->volumes_lock);
for (i = 0; i < ubi->vtbl_slots; i++)
paranoid_check_volume(ubi, i);
- spin_unlock(&ubi->volumes_lock);
mutex_unlock(&ubi->vtbl_mutex);
}
#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index b6fd6bbd941e..bc5df50813d6 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -93,12 +93,9 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
vtbl_rec = &empty_vtbl_record;
else {
crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
- vtbl_rec->crc = cpu_to_ubi32(crc);
+ vtbl_rec->crc = cpu_to_be32(crc);
}
- dbg_msg("change record %d", idx);
- ubi_dbg_dump_vtbl_record(vtbl_rec, idx);
-
mutex_lock(&ubi->vtbl_mutex);
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
@@ -141,18 +138,18 @@ static int vtbl_check(const struct ubi_device *ubi,
for (i = 0; i < ubi->vtbl_slots; i++) {
cond_resched();
- reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
- alignment = ubi32_to_cpu(vtbl[i].alignment);
- data_pad = ubi32_to_cpu(vtbl[i].data_pad);
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
upd_marker = vtbl[i].upd_marker;
vol_type = vtbl[i].vol_type;
- name_len = ubi16_to_cpu(vtbl[i].name_len);
+ name_len = be16_to_cpu(vtbl[i].name_len);
name = &vtbl[i].name[0];
crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
- if (ubi32_to_cpu(vtbl[i].crc) != crc) {
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
- i, crc, ubi32_to_cpu(vtbl[i].crc));
+ i, crc, be32_to_cpu(vtbl[i].crc));
ubi_dbg_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -225,8 +222,8 @@ static int vtbl_check(const struct ubi_device *ubi,
/* Checks that all names are unique */
for (i = 0; i < ubi->vtbl_slots - 1; i++) {
for (n = i + 1; n < ubi->vtbl_slots; n++) {
- int len1 = ubi16_to_cpu(vtbl[i].name_len);
- int len2 = ubi16_to_cpu(vtbl[n].name_len);
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
@@ -288,13 +285,13 @@ retry:
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->vol_id = cpu_to_ubi32(UBI_LAYOUT_VOL_ID);
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
- vid_hdr->data_pad = cpu_to_ubi32(0);
- vid_hdr->lnum = cpu_to_ubi32(copy);
- vid_hdr->sqnum = cpu_to_ubi64(++si->max_sqnum);
- vid_hdr->leb_ver = cpu_to_ubi32(old_seb ? old_seb->leb_ver + 1: 0);
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->lnum = cpu_to_be32(copy);
+ vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
/* The EC header is already there, write the VID header */
err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
@@ -317,14 +314,15 @@ retry:
return err;
write_error:
- kfree(new_seb);
- /* May be this physical eraseblock went bad, try to pick another one */
- if (++tries <= 5) {
- err = ubi_scan_add_to_list(si, new_seb->pnum, new_seb->ec,
- &si->corr);
- if (!err)
- goto retry;
+ if (err == -EIO && ++tries <= 5) {
+ /*
+ * Probably this physical eraseblock went bad, try to pick
+ * another one.
+ */
+ list_add_tail(&new_seb->u.list, &si->corr);
+ goto retry;
}
+ kfree(new_seb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -380,11 +378,12 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
/* Read both LEB 0 and LEB 1 into memory */
ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = kzalloc(ubi->vtbl_size, GFP_KERNEL);
+ leb[seb->lnum] = vmalloc(ubi->vtbl_size);
if (!leb[seb->lnum]) {
err = -ENOMEM;
goto out_free;
}
+ memset(leb[seb->lnum], 0, ubi->vtbl_size);
err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
ubi->vtbl_size);
@@ -415,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
}
/* Both LEB 1 and LEB 2 are OK and consistent */
- kfree(leb[1]);
+ vfree(leb[1]);
return leb[0];
} else {
/* LEB 0 is corrupted or does not exist */
@@ -436,13 +435,13 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
goto out_free;
ubi_msg("volume table was restored");
- kfree(leb[0]);
+ vfree(leb[0]);
return leb[1];
}
out_free:
- kfree(leb[0]);
- kfree(leb[1]);
+ vfree(leb[0]);
+ vfree(leb[1]);
return ERR_PTR(err);
}
@@ -460,9 +459,10 @@ static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi,
int i;
struct ubi_vtbl_record *vtbl;
- vtbl = kzalloc(ubi->vtbl_size, GFP_KERNEL);
+ vtbl = vmalloc(ubi->vtbl_size);
if (!vtbl)
return ERR_PTR(-ENOMEM);
+ memset(vtbl, 0, ubi->vtbl_size);
for (i = 0; i < ubi->vtbl_slots; i++)
memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
@@ -472,7 +472,7 @@ static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi,
err = create_vtbl(ubi, si, i, vtbl);
if (err) {
- kfree(vtbl);
+ vfree(vtbl);
return ERR_PTR(err);
}
}
@@ -500,19 +500,19 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
for (i = 0; i < ubi->vtbl_slots; i++) {
cond_resched();
- if (ubi32_to_cpu(vtbl[i].reserved_pebs) == 0)
+ if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
continue; /* Empty record */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
- vol->reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
- vol->alignment = ubi32_to_cpu(vtbl[i].alignment);
- vol->data_pad = ubi32_to_cpu(vtbl[i].data_pad);
+ vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ vol->alignment = be32_to_cpu(vtbl[i].alignment);
+ vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
- vol->name_len = ubi16_to_cpu(vtbl[i].name_len);
+ vol->name_len = be16_to_cpu(vtbl[i].name_len);
vol->usable_leb_size = ubi->leb_size - vol->data_pad;
memcpy(vol->name, vtbl[i].name, vol->name_len);
vol->name[vol->name_len] = '\0';
@@ -531,7 +531,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
- vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
continue;
}
@@ -561,7 +562,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
vol->used_ebs = sv->used_ebs;
- vol->used_bytes = (vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
vol->used_bytes += sv->last_data_size;
vol->last_eb_bytes = sv->last_data_size;
}
@@ -578,7 +580,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->usable_leb_size = ubi->leb_size;
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->reserved_pebs;
- vol->used_bytes = vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
vol->vol_id = UBI_LAYOUT_VOL_ID;
ubi_assert(!ubi->volumes[i]);
@@ -718,7 +721,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
int i, err;
struct ubi_scan_volume *sv;
- empty_vtbl_record.crc = cpu_to_ubi32(0xf116c36b);
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
/*
* The number of supported volumes is limited by the eraseblock size
@@ -783,7 +786,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return 0;
out_free:
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
if (ubi->volumes[i]) {
kfree(ubi->volumes[i]);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9ecaf77eca9e..a5a9b8d87302 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -667,7 +667,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
- ec_hdr->ec = cpu_to_ubi64(ec);
+ ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
if (err)
@@ -1060,9 +1060,8 @@ out_unlock:
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
- int err;
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum;
+ int pnum = e->pnum, err, need;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1097,62 +1096,70 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e);
- if (err != -EIO) {
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+ err1 = schedule_erase(ubi, e, 0);
+ if (err1) {
+ err = err1;
+ goto out_ro;
+ }
+ return err;
+ } else if (err != -EIO) {
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to RO mode.
*/
- ubi_ro_mode(ubi);
- return err;
+ goto out_ro;
}
/* It is %-EIO, the PEB went bad */
if (!ubi->bad_allowed) {
ubi_err("bad physical eraseblock %d detected", pnum);
- ubi_ro_mode(ubi);
- err = -EIO;
- } else {
- int need;
-
- spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
+ goto out_ro;
+ }
- if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- ubi_ro_mode(ubi);
- return -EIO;
- }
+ spin_lock(&ubi->volumes_lock);
+ need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
+ if (need > 0) {
+ need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ if (need > 0)
+ ubi_msg("reserve more %d PEBs", need);
+ }
+ if (ubi->beb_rsvd_pebs == 0) {
spin_unlock(&ubi->volumes_lock);
- ubi_msg("mark PEB %d as bad", pnum);
+ ubi_err("no reserved physical eraseblocks");
+ goto out_ro;
+ }
- err = ubi_io_mark_bad(ubi, pnum);
- if (err) {
- ubi_ro_mode(ubi);
- return err;
- }
+ spin_unlock(&ubi->volumes_lock);
+ ubi_msg("mark PEB %d as bad", pnum);
- spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
- ubi->bad_peb_count += 1;
- ubi->good_peb_count -= 1;
- ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs == 0)
- ubi_warn("last PEB from the reserved pool was used");
- spin_unlock(&ubi->volumes_lock);
- }
+ err = ubi_io_mark_bad(ubi, pnum);
+ if (err)
+ goto out_ro;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->beb_rsvd_pebs -= 1;
+ ubi->bad_peb_count += 1;
+ ubi->good_peb_count -= 1;
+ ubi_calculate_reserved(ubi);
+ if (ubi->beb_rsvd_pebs == 0)
+ ubi_warn("last PEB from the reserved pool was used");
+ spin_unlock(&ubi->volumes_lock);
+
+ return err;
+out_ro:
+ ubi_ro_mode(ubi);
return err;
}
@@ -1346,6 +1353,7 @@ static int ubi_thread(void *u)
ubi_msg("background thread \"%s\" started, PID %d",
ubi->bgt_name, current->pid);
+ set_freezable();
for (;;) {
int err;
@@ -1444,7 +1452,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (ubi_devices_cnt == 0) {
wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
sizeof(struct ubi_wl_entry),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!wl_entries_slab)
return -ENOMEM;
}
@@ -1633,7 +1641,7 @@ static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
goto out_free;
}
- read_ec = ubi64_to_cpu(ec_hdr->ec);
+ read_ec = be64_to_cpu(ec_hdr->ec);
if (ec != read_ec) {
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index da1a22c13865..ab18343e58ef 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -990,7 +990,7 @@ static void elmc_rcv_int(struct net_device *dev)
if (skb != NULL) {
skb_reserve(skb, 2); /* 16 byte alignment */
skb_put(skb,totlen);
- eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 0877fc372f4b..e89ace109a5d 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -333,9 +333,9 @@ static int lance_rx (struct net_device *dev)
skb_reserve (skb, 2); /* 16 byte align */
skb_put (skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 58bbc3e6d0de..e970e64bf966 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -26,7 +26,6 @@
TODO:
* Test Tx checksumming thoroughly
- * Implement dev->tx_timeout
Low priority TODO:
* Complete reset on PciErr
@@ -1218,6 +1217,30 @@ static int cp_close (struct net_device *dev)
return 0;
}
+static void cp_tx_timeout(struct net_device *dev)
+{
+ struct cp_private *cp = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
+ dev->name, cpr8(Cmd), cpr16(CpCmd),
+ cpr16(IntrStatus), cpr16(IntrMask));
+
+ spin_lock_irqsave(&cp->lock, flags);
+
+ cp_stop_hw(cp);
+ cp_clean_rings(cp);
+ rc = cp_init_rings(cp);
+ cp_start_hw(cp);
+
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&cp->lock, flags);
+
+ return;
+}
+
#ifdef BROKEN
static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
@@ -1799,7 +1822,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *regs;
resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
- u8 pci_rev;
#ifndef MODULE
static int version_printed;
@@ -1807,13 +1829,11 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
printk("%s", version);
#endif
- pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
-
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
- pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) {
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
dev_err(&pdev->dev,
"This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
- pdev->vendor, pdev->device, pci_rev);
+ pdev->vendor, pdev->device, pdev->revision);
dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
return -ENODEV;
}
@@ -1923,10 +1943,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->change_mtu = cp_change_mtu;
#endif
dev->ethtool_ops = &cp_ethtool_ops;
-#if 0
dev->tx_timeout = cp_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
-#endif
#if CP_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a844b1fe2dc4..327eaa7b4999 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -931,7 +931,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
int i, addr_len, option;
void __iomem *ioaddr;
static int board_idx = -1;
- u8 pci_rev;
assert (pdev != NULL);
assert (ent != NULL);
@@ -949,13 +948,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
}
#endif
- pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
-
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
- pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) {
+ pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
dev_info(&pdev->dev,
"This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
- pdev->vendor, pdev->device, pci_rev);
+ pdev->vendor, pdev->device, pdev->revision);
dev_info(&pdev->dev,
"Use the \"8139cp\" driver for improved performance and stability.\n");
}
@@ -2017,7 +2014,7 @@ no_early_rx:
#if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
#else
- eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
#endif
skb_put (skb, pkt_size);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b941c74a06c4..f8a602caabcb 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -5,6 +5,7 @@
menuconfig NETDEVICES
default y if UML
+ depends on NET
bool "Network device support"
---help---
You can say N here if you don't intend to connect your Linux box to
@@ -25,6 +26,14 @@ menuconfig NETDEVICES
# that for each of the symbols.
if NETDEVICES
+config NETDEVICES_MULTIQUEUE
+ bool "Netdevice multiple hardware queue support"
+ ---help---
+ Say Y here if you want to allow the network stack to use multiple
+ hardware TX queues on an ethernet device.
+
+ Most people will say N here.
+
config IFB
tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT
@@ -74,6 +83,16 @@ config BONDING
To compile this driver as a module, choose M here: the module
will be called bonding.
+config MACVLAN
+ tristate "MAC-VLAN support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This allows one to create virtual interfaces that map packets to
+ or from specific MAC addresses to a particular interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macvlan.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -187,7 +206,7 @@ config MII
config MACB
tristate "Atmel MACB support"
depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263
- select MII
+ select PHYLIB
help
The Atmel MACB ethernet interface is found on many AT32 and AT91
parts. Say Y to include support for the MACB chip.
@@ -387,22 +406,6 @@ config ATARILANCE
on the AMD Lance chipset: RieblCard (with or without battery), or
PAMCard VME (also the version by Rhotron, with different addresses).
-config ATARI_BIONET
- tristate "BioNet-100 support"
- depends on ATARI && ATARI_ACSI && BROKEN
- help
- Say Y to include support for BioData's BioNet-100 Ethernet adapter
- for the ACSI port. The driver works (has to work...) with a polled
- I/O scheme, so it's rather slow :-(
-
-config ATARI_PAMSNET
- tristate "PAMsNet support"
- depends on ATARI && ATARI_ACSI && BROKEN
- help
- Say Y to include support for the PAMsNet Ethernet adapter for the
- ACSI port ("ACSI node"). The driver works (has to work...) with a
- polled I/O scheme, so it's rather slow :-(
-
config SUN3LANCE
tristate "Sun3/Sun3x on-board LANCE support"
depends on SUN3 || SUN3X
@@ -586,6 +589,12 @@ config CASSINI
Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
<http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+config SUNVNET
+ tristate "Sun Virtual Network support"
+ depends on SUN_LDOMS
+ help
+ Support for virtual network devices under Sun Logical Domains.
+
config NET_VENDOR_3COM
bool "3COM cards"
depends on ISA || EISA || MCA || PCI
@@ -830,6 +839,50 @@ config ULTRA32
<file:Documentation/networking/net-modules.txt>. The module
will be called smc-ultra32.
+config BFIN_MAC
+ tristate "Blackfin 536/537 on-chip mac support"
+ depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H)
+ select CRC32
+ select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
+ help
+ This is the driver for blackfin on-chip mac device. Say Y if you want it
+ compiled into the kernel. This driver is also available as a module
+ ( = code which can be inserted in and removed from the running kernel
+ whenever you want). The module will be called bfin_mac.
+
+config BFIN_MAC_USE_L1
+ bool "Use L1 memory for rx/tx packets"
+ depends on BFIN_MAC && BF537
+ default y
+ help
+ To get maximum network performace, you should use L1 memory as rx/tx buffers.
+ Say N here if you want to reserve L1 memory for other uses.
+
+config BFIN_TX_DESC_NUM
+ int "Number of transmit buffer packets"
+ depends on BFIN_MAC
+ range 6 10 if BFIN_MAC_USE_L1
+ range 10 100
+ default "10"
+ help
+ Set the number of buffer packets used in driver.
+
+config BFIN_RX_DESC_NUM
+ int "Number of receive buffer packets"
+ depends on BFIN_MAC
+ range 20 100 if BFIN_MAC_USE_L1
+ range 20 800
+ default "20"
+ help
+ Set the number of buffer packets used in driver.
+
+config BFIN_MAC_RMII
+ bool "RMII PHY Interface (EXPERIMENTAL)"
+ depends on BFIN_MAC && EXPERIMENTAL
+ default n
+ help
+ Use Reduced PHY MII Interface
+
config SMC9194
tristate "SMC 9194 support"
depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -877,7 +930,7 @@ config NET_NETX
config DM9000
tristate "DM9000 support"
- depends on ARM || MIPS
+ depends on ARM || BLACKFIN || MIPS
select CRC32
select MII
---help---
@@ -2478,6 +2531,18 @@ source "drivers/atm/Kconfig"
source "drivers/s390/net/Kconfig"
+config XEN_NETDEV_FRONTEND
+ tristate "Xen network device frontend driver"
+ depends on XEN
+ default y
+ help
+ The network device frontend driver allows the kernel to
+ access network devices exported exported by a virtual
+ machine containing a physical network device driver. The
+ frontend driver is intended for unprivileged guest domains;
+ if you are compiling a kernel for a Xen guest, you almost
+ certainly want to enable this.
+
config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"
depends on PPC_ISERIES
@@ -2784,6 +2849,19 @@ config PPPOATM
which can lead to bad results if the ATM peer loses state and
changes its encapsulation unilaterally.
+config PPPOL2TP
+ tristate "PPP over L2TP (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PPP
+ help
+ Support for PPP-over-L2TP socket family. L2TP is a protocol
+ used by ISPs and enterprises to tunnel PPP traffic over UDP
+ tunnels. L2TP is replacing PPTP for VPN uses.
+
+ This kernel component handles only L2TP data packets: a
+ userland daemon handles L2TP the control protocol (tunnel
+ and session setup). One such daemon is OpenL2TP
+ (http://openl2tp.sourceforge.net/).
+
config SLIP
tristate "SLIP (serial line) support"
---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1bbcbedad04a..336af0635df8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SUNBMAC) += sunbmac.o
obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
obj-$(CONFIG_CASSINI) += cassini.o
+obj-$(CONFIG_SUNVNET) += sunvnet.o
obj-$(CONFIG_MACE) += mace.o
obj-$(CONFIG_BMAC) += bmac.o
@@ -121,12 +122,16 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
+obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
+
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
+obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o
@@ -172,14 +177,13 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
+obj-$(CONFIG_LGUEST_GUEST) += lguest_net.o
obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
obj-$(CONFIG_DECLANCE) += declance.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
-obj-$(CONFIG_ATARI_BIONET) += atari_bionet.o
-obj-$(CONFIG_ATARI_PAMSNET) += atari_pamsnet.o
obj-$(CONFIG_A2065) += a2065.o
obj-$(CONFIG_HYDRA) += hydra.o
obj-$(CONFIG_ARIADNE) += ariadne.o
@@ -197,6 +201,7 @@ obj-$(CONFIG_S2IO) += s2io.o
obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
+obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_FEC_8XX) += fec_8xx/
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 1c3e293fbaf7..3b79c6cf21a3 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -75,8 +75,6 @@ extern struct net_device *atarilance_probe(int unit);
extern struct net_device *sun3lance_probe(int unit);
extern struct net_device *sun3_82586_probe(int unit);
extern struct net_device *apne_probe(int unit);
-extern struct net_device *bionet_probe(int unit);
-extern struct net_device *pamsnet_probe(int unit);
extern struct net_device *cs89x0_probe(int unit);
extern struct net_device *hplance_probe(int unit);
extern struct net_device *bagetlance_probe(int unit);
@@ -264,12 +262,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
#ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */
{apne_probe, 0},
#endif
-#ifdef CONFIG_ATARI_BIONET /* Atari Bionet Ethernet board */
- {bionet_probe, 0},
-#endif
-#ifdef CONFIG_ATARI_PAMSNET /* Atari PAMsNet Ethernet board */
- {pamsnet_probe, 0},
-#endif
#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */
{mvme147lance_probe, 0},
#endif
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 81d5a374042a..a45de6975bfe 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -322,9 +322,9 @@ static int lance_rx (struct net_device *dev)
skb_reserve (skb, 2); /* 16 byte align */
skb_put (skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index a241ae7855a3..bc5a38a6705f 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -746,7 +746,7 @@ static int ariadne_rx(struct net_device *dev)
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
+ skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
skb->protocol=eth_type_trans(skb,dev);
#if 0
printk(KERN_DEBUG "RX pkt type 0x%04x from ",
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 5bf2d33887ac..f9cc2b621fe2 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -43,6 +43,7 @@ config ARM_AT91_ETHER
config EP93XX_ETH
tristate "EP93xx Ethernet support"
depends on ARM && ARCH_EP93XX
+ select MII
help
This is a driver for the ethernet hardware included in EP93xx CPUs.
Say Y if you are building a kernel for EP93xx based devices.
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 2438c5bff237..f6ece1d43f6e 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -258,7 +258,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget)
skb_reserve(skb, 2);
dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
length, DMA_FROM_DEVICE);
- eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0);
+ skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index f21148e7b579..80f33b6d5713 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -36,7 +36,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
@@ -75,7 +74,7 @@ static void ether1_timeout(struct net_device *dev);
/* ------------------------------------------------------------------------- */
-static char version[] __initdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
+static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
#define BUS_16 16
#define BUS_8 8
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index da713500654d..3805506a3ab8 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -51,7 +51,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
@@ -69,7 +68,7 @@
#include <asm/ecard.h>
#include <asm/io.h>
-static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
+static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
#include "ether3.h"
@@ -464,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI) {
+ } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 769ba69451f4..0d37d9d1fd78 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -31,7 +31,6 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
-#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
diff --git a/drivers/net/atari_bionet.c b/drivers/net/atari_bionet.c
deleted file mode 100644
index 3d87bd2b4194..000000000000
--- a/drivers/net/atari_bionet.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/* bionet.c BioNet-100 device driver for linux68k.
- *
- * Version: @(#)bionet.c 1.0 02/06/96
- *
- * Author: Hartmut Laue <laue@ifk-mp.uni-kiel.de>
- * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de>
- *
- * Little adaptions for integration into pl7 by Roman Hodek
- *
- * Some changes in bionet_poll_rx by Karl-Heinz Lohner
- *
- What is it ?
- ------------
- This driver controls the BIONET-100 LAN-Adapter which connects
- an ATARI ST/TT via the ACSI-port to an Ethernet-based network.
-
- This version can be compiled as a loadable module (See the
- compile command at the bottom of this file).
- At load time, you can optionally set the debugging level and the
- fastest response time on the command line of 'insmod'.
-
- 'bionet_debug'
- controls the amount of diagnostic messages:
- 0 : no messages
- >0 : see code for meaning of printed messages
-
- 'bionet_min_poll_time' (always >=1)
- gives the time (in jiffies) between polls. Low values
- increase the system load (beware!)
-
- When loaded, a net device with the name 'bio0' becomes available,
- which can be controlled with the usual 'ifconfig' command.
-
- It is possible to compile this driver into the kernel like other
- (net) drivers. For this purpose, some source files (e.g. config-files
- makefiles, Space.c) must be changed accordingly. (You may refer to
- other drivers how to do it.) In this case, the device will be detected
- at boot time and (probably) appear as 'eth0'.
-
- This code is based on several sources:
- - The driver code for a parallel port ethernet adapter by
- Donald Becker (see file 'atp.c' from the PC linux distribution)
- - The ACSI code by Roman Hodek for the ATARI-ACSI harddisk support
- and DMA handling.
- - Very limited information about moving packets in and out of the
- BIONET-adapter from the TCP package for TOS by BioData GmbH.
-
- Theory of Operation
- -------------------
- Because the ATARI DMA port is usually shared between several
- devices (eg. harddisk, floppy) we cannot block the ACSI bus
- while waiting for interrupts. Therefore we use a polling mechanism
- to fetch packets from the adapter. For the same reason, we send
- packets without checking that the previous packet has been sent to
- the LAN. We rely on the higher levels of the networking code to detect
- missing packets and resend them.
-
- Before we access the ATARI DMA controller, we check if another
- process is using the DMA. If not, we lock the DMA, perform one or
- more packet transfers and unlock the DMA before returning.
- We do not use 'stdma_lock' unconditionally because it is unclear
- if the networking code can be set to sleep, which will happen if
- another (possibly slow) device is using the DMA controller.
-
- The polling is done via timer interrupts which periodically
- 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies)
- between polls varies depending on an estimate of the net activity.
- The allowed range is given by the variable 'bionet_min_poll_time'
- for the lower (fastest) limit and the constant 'MAX_POLL_TIME'
- for the higher (slowest) limit.
-
- Whenever a packet arrives, we switch to fastest response by setting
- the polling time to its lowest limit. If the following poll fails,
- because no packets have arrived, we increase the time for the next
- poll. When the net activity is low, the polling time effectively
- stays at its maximum value, resulting in the lowest load for the
- machine.
- */
-
-#define MAX_POLL_TIME 10
-
-static char version[] =
- "bionet.c:v1.0 06-feb-96 (c) Hartmut Laue.\n";
-
-#include <linux/module.h>
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/setup.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_acsi.h>
-#include <asm/atari_stdma.h>
-
-
-/* use 0 for production, 1 for verification, >2 for debug
- */
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-/*
- * Global variable 'bionet_debug'. Can be set at load time by 'insmod'
- */
-unsigned int bionet_debug = NET_DEBUG;
-module_param(bionet_debug, int, 0);
-MODULE_PARM_DESC(bionet_debug, "bionet debug level (0-2)");
-MODULE_LICENSE("GPL");
-
-static unsigned int bionet_min_poll_time = 2;
-
-
-/* Information that need to be kept for each board.
- */
-struct net_local {
- struct net_device_stats stats;
- long open_time; /* for debugging */
- int poll_time; /* polling time varies with net load */
-};
-
-static struct nic_pkt_s { /* packet format */
- unsigned char status;
- unsigned char dummy;
- unsigned char l_lo, l_hi;
- unsigned char buffer[3000];
-} *nic_packet;
-unsigned char *phys_nic_packet;
-
-/* Index to functions, as function prototypes.
- */
-static int bionet_open(struct net_device *dev);
-static int bionet_send_packet(struct sk_buff *skb, struct net_device *dev);
-static void bionet_poll_rx(struct net_device *);
-static int bionet_close(struct net_device *dev);
-static struct net_device_stats *net_get_stats(struct net_device *dev);
-static void bionet_tick(unsigned long);
-
-static DEFINE_TIMER(bionet_timer, bionet_tick, 0, 0);
-
-#define STRAM_ADDR(a) (((a) & 0xff000000) == 0)
-
-/* The following routines access the ethernet board connected to the
- * ACSI port via the st_dma chip.
- */
-#define NODE_ADR 0x60
-
-#define C_READ 8
-#define C_WRITE 0x0a
-#define C_GETEA 0x0f
-#define C_SETCR 0x0e
-
-static int
-sendcmd(unsigned int a0, unsigned int mod, unsigned int cmd) {
- unsigned int c;
-
- dma_wd.dma_mode_status = (mod | ((a0) ? 2 : 0) | 0x88);
- dma_wd.fdc_acces_seccount = cmd;
- dma_wd.dma_mode_status = (mod | 0x8a);
-
- if( !acsi_wait_for_IRQ(HZ/2) ) /* wait for cmd ack */
- return -1; /* timeout */
-
- c = dma_wd.fdc_acces_seccount;
- return (c & 0xff);
-}
-
-
-static void
-set_status(int cr) {
- sendcmd(0,0x100,NODE_ADR | C_SETCR); /* CMD: SET CR */
- sendcmd(1,0x100,cr);
-
- dma_wd.dma_mode_status = 0x80;
-}
-
-static int
-get_status(unsigned char *adr) {
- int i,c;
-
- DISABLE_IRQ();
- c = sendcmd(0,0x00,NODE_ADR | C_GETEA); /* CMD: GET ETH ADR*/
- if( c < 0 ) goto gsend;
-
- /* now read status bytes */
-
- for (i=0; i<6; i++) {
- dma_wd.fdc_acces_seccount = 0; /* request next byte */
-
- if( !acsi_wait_for_IRQ(HZ/2) ) { /* wait for cmd ack */
- c = -1;
- goto gsend; /* timeout */
- }
- c = dma_wd.fdc_acces_seccount;
- *adr++ = (unsigned char)c;
- }
- c = 1;
-gsend:
- dma_wd.dma_mode_status = 0x80;
- return c;
-}
-
-static irqreturn_t
-bionet_intr(int irq, void *data) {
- return IRQ_HANDLED;
-}
-
-
-static int
-get_frame(unsigned long paddr, int odd) {
- int c;
- unsigned long flags;
-
- DISABLE_IRQ();
- local_irq_save(flags);
-
- dma_wd.dma_mode_status = 0x9a;
- dma_wd.dma_mode_status = 0x19a;
- dma_wd.dma_mode_status = 0x9a;
- dma_wd.fdc_acces_seccount = 0x04; /* sector count (was 5) */
- dma_wd.dma_lo = (unsigned char)paddr;
- paddr >>= 8;
- dma_wd.dma_md = (unsigned char)paddr;
- paddr >>= 8;
- dma_wd.dma_hi = (unsigned char)paddr;
- local_irq_restore(flags);
-
- c = sendcmd(0,0x00,NODE_ADR | C_READ); /* CMD: READ */
- if( c < 128 ) goto rend;
-
- /* now read block */
-
- c = sendcmd(1,0x00,odd); /* odd flag for address shift */
- dma_wd.dma_mode_status = 0x0a;
-
- if( !acsi_wait_for_IRQ(100) ) { /* wait for DMA to complete */
- c = -1;
- goto rend;
- }
- dma_wd.dma_mode_status = 0x8a;
- dma_wd.dma_mode_status = 0x18a;
- dma_wd.dma_mode_status = 0x8a;
- c = dma_wd.fdc_acces_seccount;
-
- dma_wd.dma_mode_status = 0x88;
- c = dma_wd.fdc_acces_seccount;
- c = 1;
-
-rend:
- dma_wd.dma_mode_status = 0x80;
- udelay(40);
- acsi_wait_for_noIRQ(20);
- return c;
-}
-
-
-static int
-hardware_send_packet(unsigned long paddr, int cnt) {
- unsigned int c;
- unsigned long flags;
-
- DISABLE_IRQ();
- local_irq_save(flags);
-
- dma_wd.dma_mode_status = 0x19a;
- dma_wd.dma_mode_status = 0x9a;
- dma_wd.dma_mode_status = 0x19a;
- dma_wd.dma_lo = (unsigned char)paddr;
- paddr >>= 8;
- dma_wd.dma_md = (unsigned char)paddr;
- paddr >>= 8;
- dma_wd.dma_hi = (unsigned char)paddr;
-
- dma_wd.fdc_acces_seccount = 0x4; /* sector count */
- local_irq_restore(flags);
-
- c = sendcmd(0,0x100,NODE_ADR | C_WRITE); /* CMD: WRITE */
- c = sendcmd(1,0x100,cnt&0xff);
- c = sendcmd(1,0x100,cnt>>8);
-
- /* now write block */
-
- dma_wd.dma_mode_status = 0x10a; /* DMA enable */
- if( !acsi_wait_for_IRQ(100) ) /* wait for DMA to complete */
- goto end;
-
- dma_wd.dma_mode_status = 0x19a; /* DMA disable ! */
- c = dma_wd.fdc_acces_seccount;
-
-end:
- c = sendcmd(1,0x100,0);
- c = sendcmd(1,0x100,0);
-
- dma_wd.dma_mode_status = 0x180;
- udelay(40);
- acsi_wait_for_noIRQ(20);
- return( c & 0x02);
-}
-
-
-/* Check for a network adaptor of this type, and return '0' if one exists.
- */
-struct net_device * __init bionet_probe(int unit)
-{
- struct net_device *dev;
- unsigned char station_addr[6];
- static unsigned version_printed;
- static int no_more_found; /* avoid "Probing for..." printed 4 times */
- int i;
- int err;
-
- if (!MACH_IS_ATARI || no_more_found)
- return ERR_PTR(-ENODEV);
-
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev)
- return ERR_PTR(-ENOMEM);
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
- SET_MODULE_OWNER(dev);
-
- printk("Probing for BioNet 100 Adapter...\n");
-
- stdma_lock(bionet_intr, NULL);
- i = get_status(station_addr); /* Read the station address PROM. */
- ENABLE_IRQ();
- stdma_release();
-
- /* Check the first three octets of the S.A. for the manufactor's code.
- */
-
- if( i < 0
- || station_addr[0] != 'B'
- || station_addr[1] != 'I'
- || station_addr[2] != 'O' ) {
- no_more_found = 1;
- printk( "No BioNet 100 found.\n" );
- free_netdev(dev);
- return ERR_PTR(-ENODEV);
- }
-
- if (bionet_debug > 0 && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s found, eth-addr: %02x-%02x-%02x:%02x-%02x-%02x.\n",
- dev->name, "BioNet 100",
- station_addr[0], station_addr[1], station_addr[2],
- station_addr[3], station_addr[4], station_addr[5]);
-
- /* Initialize the device structure. */
-
- nic_packet = (struct nic_pkt_s *)acsi_buffer;
- phys_nic_packet = (unsigned char *)phys_acsi_buffer;
- if (bionet_debug > 0) {
- printk("nic_packet at 0x%p, phys at 0x%p\n",
- nic_packet, phys_nic_packet );
- }
-
- dev->open = bionet_open;
- dev->stop = bionet_close;
- dev->hard_start_xmit = bionet_send_packet;
- dev->get_stats = net_get_stats;
-
- /* Fill in the fields of the device structure with ethernet-generic
- * values. This should be in a common file instead of per-driver.
- */
-
- for (i = 0; i < ETH_ALEN; i++) {
-#if 0
- dev->broadcast[i] = 0xff;
-#endif
- dev->dev_addr[i] = station_addr[i];
- }
- err = register_netdev(dev);
- if (!err)
- return dev;
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-static int
-bionet_open(struct net_device *dev) {
- struct net_local *lp = netdev_priv(dev);
-
- if (bionet_debug > 0)
- printk("bionet_open\n");
- stdma_lock(bionet_intr, NULL);
-
- /* Reset the hardware here.
- */
- set_status(4);
- lp->open_time = 0; /*jiffies*/
- lp->poll_time = MAX_POLL_TIME;
-
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
-
- stdma_release();
- bionet_timer.data = (long)dev;
- bionet_timer.expires = jiffies + lp->poll_time;
- add_timer(&bionet_timer);
- return 0;
-}
-
-static int
-bionet_send_packet(struct sk_buff *skb, struct net_device *dev) {
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- /* Block a timer-based transmit from overlapping. This could better be
- * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
- */
- local_irq_save(flags);
-
- if (stdma_islocked()) {
- local_irq_restore(flags);
- lp->stats.tx_errors++;
- }
- else {
- int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned long buf = virt_to_phys(skb->data);
- int stat;
-
- stdma_lock(bionet_intr, NULL);
- local_irq_restore(flags);
- if( !STRAM_ADDR(buf+length-1) ) {
- skb_copy_from_linear_data(skb, nic_packet->buffer,
- length);
- buf = (unsigned long)&((struct nic_pkt_s *)phys_nic_packet)->buffer;
- }
-
- if (bionet_debug >1) {
- u_char *data = nic_packet->buffer, *p;
- int i;
-
- printk( "%s: TX pkt type 0x%4x from ", dev->name,
- ((u_short *)data)[6]);
-
- for( p = &data[6], i = 0; i < 6; i++ )
- printk("%02x%s", *p++,i != 5 ? ":" : "" );
- printk(" to ");
-
- for( p = data, i = 0; i < 6; i++ )
- printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" );
-
- printk( "%s: ", dev->name );
- printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x"
- " %02x%02x%02x%02x len %d\n",
- data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27],
- data[28], data[29], data[30], data[31], data[32], data[33],
- length );
- }
- dma_cache_maintenance(buf, length, 1);
-
- stat = hardware_send_packet(buf, length);
- ENABLE_IRQ();
- stdma_release();
-
- dev->trans_start = jiffies;
- dev->tbusy = 0;
- lp->stats.tx_packets++;
- lp->stats.tx_bytes+=length;
- }
- dev_kfree_skb(skb);
-
- return 0;
-}
-
-/* We have a good packet(s), get it/them out of the buffers.
- */
-static void
-bionet_poll_rx(struct net_device *dev) {
- struct net_local *lp = netdev_priv(dev);
- int boguscount = 10;
- int pkt_len, status;
- unsigned long flags;
-
- local_irq_save(flags);
- /* ++roman: Take care at locking the ST-DMA... This must be done with ints
- * off, since otherwise an int could slip in between the question and the
- * locking itself, and then we'd go to sleep... And locking itself is
- * necessary to keep the floppy_change timer from working with ST-DMA
- * registers. */
- if (stdma_islocked()) {
- local_irq_restore(flags);
- return;
- }
- stdma_lock(bionet_intr, NULL);
- DISABLE_IRQ();
- local_irq_restore(flags);
-
- if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++;
-
- while(boguscount--) {
- status = get_frame((unsigned long)phys_nic_packet, 0);
-
- if( status == 0 ) break;
-
- /* Good packet... */
-
- dma_cache_maintenance((unsigned long)phys_nic_packet, 1520, 0);
-
- pkt_len = (nic_packet->l_hi << 8) | nic_packet->l_lo;
-
- lp->poll_time = bionet_min_poll_time; /* fast poll */
- if( pkt_len >= 60 && pkt_len <= 1520 ) {
- /* ^^^^ war 1514 KHL */
- /* Malloc up new buffer.
- */
- struct sk_buff *skb = dev_alloc_skb( pkt_len + 2 );
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- break;
- }
-
- skb_reserve( skb, 2 ); /* 16 Byte align */
- skb_put( skb, pkt_len ); /* make room */
-
- /* 'skb->data' points to the start of sk_buff data area.
- */
- skb_copy_to_linear_data(skb, nic_packet->buffer,
- pkt_len);
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx(skb);
- dev->last_rx = jiffies;
- lp->stats.rx_packets++;
- lp->stats.rx_bytes+=pkt_len;
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(INET_BH) for us and will work on them
- when we get to the bottom-half routine.
- */
-
- if (bionet_debug >1) {
- u_char *data = nic_packet->buffer, *p;
- int i;
-
- printk( "%s: RX pkt type 0x%4x from ", dev->name,
- ((u_short *)data)[6]);
-
-
- for( p = &data[6], i = 0; i < 6; i++ )
- printk("%02x%s", *p++,i != 5 ? ":" : "" );
- printk(" to ");
- for( p = data, i = 0; i < 6; i++ )
- printk("%02x%s", *p++,i != 5 ? ":" : "" "\n" );
-
- printk( "%s: ", dev->name );
- printk(" data %02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x"
- " %02x%02x%02x%02x len %d\n",
- data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27],
- data[28], data[29], data[30], data[31], data[32], data[33],
- pkt_len );
- }
- }
- else {
- printk(" Packet has wrong length: %04d bytes\n", pkt_len);
- lp->stats.rx_errors++;
- }
- }
- stdma_release();
- ENABLE_IRQ();
- return;
-}
-
-/* bionet_tick: called by bionet_timer. Reads packets from the adapter,
- * passes them to the higher layers and restarts the timer.
- */
-static void
-bionet_tick(unsigned long data) {
- struct net_device *dev = (struct net_device *)data;
- struct net_local *lp = netdev_priv(dev);
-
- if( bionet_debug > 0 && (lp->open_time++ & 7) == 8 )
- printk("bionet_tick: %ld\n", lp->open_time);
-
- if( !stdma_islocked() ) bionet_poll_rx(dev);
-
- bionet_timer.expires = jiffies + lp->poll_time;
- add_timer(&bionet_timer);
-}
-
-/* The inverse routine to bionet_open().
- */
-static int
-bionet_close(struct net_device *dev) {
- struct net_local *lp = netdev_priv(dev);
-
- if (bionet_debug > 0)
- printk("bionet_close, open_time=%ld\n", lp->open_time);
- del_timer(&bionet_timer);
- stdma_lock(bionet_intr, NULL);
-
- set_status(0);
- lp->open_time = 0;
-
- dev->tbusy = 1;
- dev->start = 0;
-
- stdma_release();
- return 0;
-}
-
-/* Get the current statistics.
- This may be called with the card open or closed.
- */
-static struct net_device_stats *net_get_stats(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- return &lp->stats;
-}
-
-
-#ifdef MODULE
-
-static struct net_device *bio_dev;
-
-int init_module(void)
-{
- bio_dev = bionet_probe(-1);
- if (IS_ERR(bio_dev))
- return PTR_ERR(bio_dev);
- return 0;
-}
-
-void cleanup_module(void)
-{
- unregister_netdev(bio_dev);
- free_netdev(bio_dev);
-}
-
-#endif /* MODULE */
-
-/* Local variables:
- * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include
- -b m68k-linuxaout -Wall -Wstrict-prototypes -O2
- -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c bionet.c"
- * version-control: t
- * kept-new-versions: 5
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/atari_pamsnet.c b/drivers/net/atari_pamsnet.c
deleted file mode 100644
index f7356374a2e7..000000000000
--- a/drivers/net/atari_pamsnet.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/* atari_pamsnet.c PAMsNet device driver for linux68k.
- *
- * Version: @(#)PAMsNet.c 0.2ß 03/31/96
- *
- * Author: Torsten Lang <Torsten.Lang@ap.physik.uni-giessen.de>
- * <Torsten.Lang@jung.de>
- *
- * This driver is based on my driver PAMSDMA.c for MiNT-Net and
- * on the driver bionet.c written by
- * Hartmut Laue <laue@ifk-mp.uni-kiel.de>
- * and Torsten Narjes <narjes@ifk-mp.uni-kiel.de>
- *
- * Little adaptions for integration into pl7 by Roman Hodek
- *
- What is it ?
- ------------
- This driver controls the PAMsNet LAN-Adapter which connects
- an ATARI ST/TT via the ACSI-port to an Ethernet-based network.
-
- This version can be compiled as a loadable module (See the
- compile command at the bottom of this file).
- At load time, you can optionally set the debugging level and the
- fastest response time on the command line of 'insmod'.
-
- 'pamsnet_debug'
- controls the amount of diagnostic messages:
- 0 : no messages
- >0 : see code for meaning of printed messages
-
- 'pamsnet_min_poll_time' (always >=1)
- gives the time (in jiffies) between polls. Low values
- increase the system load (beware!)
-
- When loaded, a net device with the name 'eth?' becomes available,
- which can be controlled with the usual 'ifconfig' command.
-
- It is possible to compile this driver into the kernel like other
- (net) drivers. For this purpose, some source files (e.g. config-files
- makefiles, Space.c) must be changed accordingly. (You may refer to
- other drivers how to do it.) In this case, the device will be detected
- at boot time and (probably) appear as 'eth0'.
-
- Theory of Operation
- -------------------
- Because the ATARI DMA port is usually shared between several
- devices (eg. harddisk, floppy) we cannot block the ACSI bus
- while waiting for interrupts. Therefore we use a polling mechanism
- to fetch packets from the adapter. For the same reason, we send
- packets without checking that the previous packet has been sent to
- the LAN. We rely on the higher levels of the networking code to detect
- missing packets and resend them.
-
- Before we access the ATARI DMA controller, we check if another
- process is using the DMA. If not, we lock the DMA, perform one or
- more packet transfers and unlock the DMA before returning.
- We do not use 'stdma_lock' unconditionally because it is unclear
- if the networking code can be set to sleep, which will happen if
- another (possibly slow) device is using the DMA controller.
-
- The polling is done via timer interrupts which periodically
- 'simulate' an interrupt from the Ethernet adapter. The time (in jiffies)
- between polls varies depending on an estimate of the net activity.
- The allowed range is given by the variable 'bionet_min_poll_time'
- for the lower (fastest) limit and the constant 'MAX_POLL_TIME'
- for the higher (slowest) limit.
-
- Whenever a packet arrives, we switch to fastest response by setting
- the polling time to its lowest limit. If the following poll fails,
- because no packets have arrived, we increase the time for the next
- poll. When the net activity is low, the polling time effectively
- stays at its maximum value, resulting in the lowest load for the
- machine.
- */
-
-#define MAX_POLL_TIME 10
-
-static char *version =
- "pamsnet.c:v0.2beta 30-mar-96 (c) Torsten Lang.\n";
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/errno.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atari_stdma.h>
-#include <asm/atari_acsi.h>
-
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#undef READ
-#undef WRITE
-
-/* use 0 for production, 1 for verification, >2 for debug
- */
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-/*
- * Global variable 'pamsnet_debug'. Can be set at load time by 'insmod'
- */
-unsigned int pamsnet_debug = NET_DEBUG;
-module_param(pamsnet_debug, int, 0);
-MODULE_PARM_DESC(pamsnet_debug, "pamsnet debug enable (0-1)");
-MODULE_LICENSE("GPL");
-
-static unsigned int pamsnet_min_poll_time = 2;
-
-
-/* Information that need to be kept for each board.
- */
-struct net_local {
- struct net_device_stats stats;
- long open_time; /* for debugging */
- int poll_time; /* polling time varies with net load */
-};
-
-static struct nic_pkt_s { /* packet format */
- unsigned char buffer[2048];
-} *nic_packet = 0;
-unsigned char *phys_nic_packet;
-
-typedef unsigned char HADDR[6]; /* 6-byte hardware address of lance */
-
-/* Index to functions, as function prototypes.
- */
-static void start (int target);
-static int stop (int target);
-static int testpkt (int target);
-static int sendpkt (int target, unsigned char *buffer, int length);
-static int receivepkt (int target, unsigned char *buffer);
-static int inquiry (int target, unsigned char *buffer);
-static HADDR *read_hw_addr(int target, unsigned char *buffer);
-static void setup_dma (void *address, unsigned rw_flag, int num_blocks);
-static int send_first (int target, unsigned char byte);
-static int send_1_5 (int lun, unsigned char *command, int dma);
-static int get_status (void);
-static int calc_received (void *start_address);
-
-static int pamsnet_open(struct net_device *dev);
-static int pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev);
-static void pamsnet_poll_rx(struct net_device *);
-static int pamsnet_close(struct net_device *dev);
-static struct net_device_stats *net_get_stats(struct net_device *dev);
-static void pamsnet_tick(unsigned long);
-
-static irqreturn_t pamsnet_intr(int irq, void *data);
-
-static DEFINE_TIMER(pamsnet_timer, pamsnet_tick, 0, 0);
-
-#define STRAM_ADDR(a) (((a) & 0xff000000) == 0)
-
-typedef struct
-{
- unsigned char reserved1[0x38];
- HADDR hwaddr;
- unsigned char reserved2[0x1c2];
-} DMAHWADDR;
-
-/*
- * Definitions of commands understood by the PAMs DMA adaptor.
- *
- * In general the DMA adaptor uses LUN 0, 5, 6 and 7 on one ID changeable
- * by the PAM's Net software.
- *
- * LUN 0 works as a harddisk. You can boot the PAM's Net driver there.
- * LUN 5 works as a harddisk and lets you access the RAM and some I/O HW
- * area. In sector 0, bytes 0x38-0x3d you find the ethernet HW address
- * of the adaptor.
- * LUN 6 works as a harddisk and lets you access the firmware ROM.
- * LUN 7 lets you send and receive packets.
- *
- * Some commands like the INQUIRY command work identical on all used LUNs.
- *
- * UNKNOWN1 seems to read some data.
- * Command length is 6 bytes.
- * UNKNOWN2 seems to read some data (command byte 1 must be !=0). The
- * following bytes seem to be something like an allocation length.
- * Command length is 6 bytes.
- * READPKT reads a packet received by the DMA adaptor.
- * Command length is 6 bytes.
- * WRITEPKT sends a packet transferred by the following DMA phase. The length
- * of the packet is transferred in command bytes 3 and 4.
- * The adaptor automatically replaces the src hw address in an ethernet
- * packet by its own hw address.
- * Command length is 6 bytes.
- * INQUIRY has the same function as the INQUIRY command supported by harddisks
- * and other SCSI devices. It lets you detect which device you found
- * at a given address.
- * Command length is 6 bytes.
- * START initializes the DMA adaptor. After this command it is able to send
- * and receive packets. There is no status byte returned!
- * Command length is 1 byte.
- * NUMPKTS gives back the number of received packets waiting in the queue in
- * the status byte.
- * Command length is 1 byte.
- * UNKNOWN3
- * UNKNOWN4 Function of these three commands is unknown.
- * UNKNOWN5 The command length of these three commands is 1 byte.
- * DESELECT immediately deselects the DMA adaptor. May important with interrupt
- * driven operation.
- * Command length is 1 byte.
- * STOP resets the DMA adaptor. After this command packets can no longer
- * be received or transferred.
- * Command length is 6 byte.
- */
-
-enum {UNKNOWN1=3, READPKT=8, UNKNOWN2, WRITEPKT=10, INQUIRY=18, START,
- NUMPKTS=22, UNKNOWN3, UNKNOWN4, UNKNOWN5, DESELECT, STOP};
-
-#define READSECTOR READPKT
-#define WRITESECTOR WRITEPKT
-
-u_char *inquire8="MV PAM's NET/GK";
-
-#define DMALOW dma_wd.dma_lo
-#define DMAMID dma_wd.dma_md
-#define DMAHIGH dma_wd.dma_hi
-#define DACCESS dma_wd.fdc_acces_seccount
-
-#define MFP_GPIP mfp.par_dt_reg
-
-/* Some useful functions */
-
-#define INT (!(MFP_GPIP & 0x20))
-#define DELAY ({MFP_GPIP; MFP_GPIP; MFP_GPIP;})
-#define WRITEMODE(value) \
- ({ u_short dummy = value; \
- __asm__ volatile("movew %0, 0xFFFF8606" : : "d"(dummy)); \
- DELAY; \
- })
-#define WRITEBOTH(value1, value2) \
- ({ u_long dummy = (u_long)(value1)<<16 | (u_short)(value2); \
- __asm__ volatile("movel %0, 0xFFFF8604" : : "d"(dummy)); \
- DELAY; \
- })
-
-/* Definitions for DMODE */
-
-#define READ 0x000
-#define WRITE 0x100
-
-#define DMA_FDC 0x080
-#define DMA_ACSI 0x000
-
-#define DMA_DISABLE 0x040
-
-#define SEC_COUNT 0x010
-#define DMA_WINDOW 0x000
-
-#define REG_ACSI 0x008
-#define REG_FDC 0x000
-
-#define A1 0x002
-
-/* Timeout constants */
-
-#define TIMEOUTCMD HZ/2 /* ca. 500ms */
-#define TIMEOUTDMA HZ /* ca. 1s */
-#define COMMAND_DELAY 500 /* ca. 0.5ms */
-
-unsigned rw;
-int lance_target = -1;
-int if_up = 0;
-
-/* The following routines access the ethernet board connected to the
- * ACSI port via the st_dma chip.
- */
-
-/* The following lowlevel routines work on physical addresses only and assume
- * that eventually needed buffers are
- * - completely located in ST RAM
- * - are contigous in the physical address space
- */
-
-/* Setup the DMA counter */
-
-static void
-setup_dma (void *address, unsigned rw_flag, int num_blocks)
-{
- WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI |
- A1);
- WRITEMODE((unsigned)(rw_flag ^ WRITE) | DMA_FDC | SEC_COUNT | REG_ACSI |
- A1);
- WRITEMODE((unsigned) rw_flag | DMA_FDC | SEC_COUNT | REG_ACSI |
- A1);
- DMALOW = (unsigned char)((unsigned long)address & 0xFF);
- DMAMID = (unsigned char)(((unsigned long)address >> 8) & 0xFF);
- DMAHIGH = (unsigned char)(((unsigned long)address >> 16) & 0xFF);
- WRITEBOTH((unsigned)num_blocks & 0xFF,
- rw_flag | DMA_FDC | DMA_WINDOW | REG_ACSI | A1);
- rw = rw_flag;
-}
-
-/* Send the first byte of an command block */
-
-static int
-send_first (int target, unsigned char byte)
-{
- rw = READ;
- acsi_delay_end(COMMAND_DELAY);
- /*
- * wake up ACSI
- */
- WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI);
- /*
- * write command byte
- */
- WRITEBOTH((target << 5) | (byte & 0x1F), DMA_FDC |
- DMA_WINDOW | REG_ACSI | A1);
- return (!acsi_wait_for_IRQ(TIMEOUTCMD));
-}
-
-/* Send the rest of an command block */
-
-static int
-send_1_5 (int lun, unsigned char *command, int dma)
-{
- int i, j;
-
- for (i=0; i<5; i++) {
- WRITEBOTH((!i ? (((lun & 0x7) << 5) | (command[i] & 0x1F))
- : command[i]),
- rw | REG_ACSI | DMA_WINDOW |
- ((i < 4) ? DMA_FDC
- : (dma ? DMA_ACSI
- : DMA_FDC)) | A1);
- if (i < 4 && (j = !acsi_wait_for_IRQ(TIMEOUTCMD)))
- return (j);
- }
- return (0);
-}
-
-/* Read a status byte */
-
-static int
-get_status (void)
-{
- WRITEMODE(DMA_FDC | DMA_WINDOW | REG_ACSI | A1);
- acsi_delay_start();
- return ((int)(DACCESS & 0xFF));
-}
-
-/* Calculate the number of received bytes */
-
-static int
-calc_received (void *start_address)
-{
- return (int)(
- (((unsigned long)DMAHIGH << 16) | ((unsigned)DMAMID << 8) | DMALOW)
- - (unsigned long)start_address);
-}
-
-/* The following midlevel routines still work on physical addresses ... */
-
-/* start() starts the PAM's DMA adaptor */
-
-static void
-start (int target)
-{
- send_first(target, START);
-}
-
-/* stop() stops the PAM's DMA adaptor and returns a value of zero in case of success */
-
-static int
-stop (int target)
-{
- int ret = -1;
- unsigned char cmd_buffer[5];
-
- if (send_first(target, STOP))
- goto bad;
- cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] =
- cmd_buffer[3] = cmd_buffer[4] = 0;
- if (send_1_5(7, cmd_buffer, 0) ||
- !acsi_wait_for_IRQ(TIMEOUTDMA) ||
- get_status())
- goto bad;
- ret = 0;
-bad:
- return (ret);
-}
-
-/* testpkt() returns the number of received packets waiting in the queue */
-
-static int
-testpkt(int target)
-{
- int ret = -1;
-
- if (send_first(target, NUMPKTS))
- goto bad;
- ret = get_status();
-bad:
- return (ret);
-}
-
-/* inquiry() returns 0 when PAM's DMA found, -1 when timeout, -2 otherwise */
-/* Please note: The buffer is for internal use only but must be defined! */
-
-static int
-inquiry (int target, unsigned char *buffer)
-{
- int ret = -1;
- unsigned char *vbuffer = phys_to_virt((unsigned long)buffer);
- unsigned char cmd_buffer[5];
-
- if (send_first(target, INQUIRY))
- goto bad;
- setup_dma(buffer, READ, 1);
- vbuffer[8] = vbuffer[27] = 0; /* Avoid confusion with previous read data */
- cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
- cmd_buffer[3] = 48;
- if (send_1_5(5, cmd_buffer, 1) ||
- !acsi_wait_for_IRQ(TIMEOUTDMA) ||
- get_status() ||
- (calc_received(buffer) < 32))
- goto bad;
- dma_cache_maintenance((unsigned long)(buffer+8), 20, 0);
- if (memcmp(inquire8, vbuffer+8, 20))
- goto bad;
- ret = 0;
-bad:
- if (!!NET_DEBUG) {
- vbuffer[8+20]=0;
- printk("inquiry of target %d: %s\n", target, vbuffer+8);
- }
- return (ret);
-}
-
-/*
- * read_hw_addr() reads the sector containing the hwaddr and returns
- * a pointer to it (virtual address!) or 0 in case of an error
- */
-
-static HADDR
-*read_hw_addr(int target, unsigned char *buffer)
-{
- HADDR *ret = 0;
- unsigned char cmd_buffer[5];
-
- if (send_first(target, READSECTOR))
- goto bad;
- setup_dma(buffer, READ, 1);
- cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
- cmd_buffer[3] = 1;
- if (send_1_5(5, cmd_buffer, 1) ||
- !acsi_wait_for_IRQ(TIMEOUTDMA) ||
- get_status())
- goto bad;
- ret = phys_to_virt((unsigned long)&(((DMAHWADDR *)buffer)->hwaddr));
- dma_cache_maintenance((unsigned long)buffer, 512, 0);
-bad:
- return (ret);
-}
-
-static irqreturn_t
-pamsnet_intr(int irq, void *data)
-{
- return IRQ_HANDLED;
-}
-
-/* receivepkt() loads a packet to a given buffer and returns its length */
-
-static int
-receivepkt (int target, unsigned char *buffer)
-{
- int ret = -1;
- unsigned char cmd_buffer[5];
-
- if (send_first(target, READPKT))
- goto bad;
- setup_dma(buffer, READ, 3);
- cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[2] = cmd_buffer[4] = 0;
- cmd_buffer[3] = 3;
- if (send_1_5(7, cmd_buffer, 1) ||
- !acsi_wait_for_IRQ(TIMEOUTDMA) ||
- get_status())
- goto bad;
- ret = calc_received(buffer);
-bad:
- return (ret);
-}
-
-/* sendpkt() sends a packet and returns a value of zero when the packet was sent
- successfully */
-
-static int
-sendpkt (int target, unsigned char *buffer, int length)
-{
- int ret = -1;
- unsigned char cmd_buffer[5];
-
- if (send_first(target, WRITEPKT))
- goto bad;
- setup_dma(buffer, WRITE, 3);
- cmd_buffer[0] = cmd_buffer[1] = cmd_buffer[4] = 0;
- cmd_buffer[2] = length >> 8;
- cmd_buffer[3] = length & 0xFF;
- if (send_1_5(7, cmd_buffer, 1) ||
- !acsi_wait_for_IRQ(TIMEOUTDMA) ||
- get_status())
- goto bad;
- ret = 0;
-bad:
- return (ret);
-}
-
-/* The following higher level routines work on virtual addresses and convert them to
- * physical addresses when passed to the lowlevel routines. It's up to the higher level
- * routines to copy data from Alternate RAM to ST RAM if neccesary!
- */
-
-/* Check for a network adaptor of this type, and return '0' if one exists.
- */
-
-struct net_device * __init pamsnet_probe (int unit)
-{
- struct net_device *dev;
- int i;
- HADDR *hwaddr;
- int err;
-
- unsigned char station_addr[6];
- static unsigned version_printed;
- /* avoid "Probing for..." printed 4 times - the driver is supporting only one adapter now! */
- static int no_more_found;
-
- if (no_more_found)
- return ERR_PTR(-ENODEV);
- no_more_found = 1;
-
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev)
- return ERR_PTR(-ENOMEM);
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
- SET_MODULE_OWNER(dev);
-
- printk("Probing for PAM's Net/GK Adapter...\n");
-
- /* Allocate the DMA buffer here since we need it for probing! */
-
- nic_packet = (struct nic_pkt_s *)acsi_buffer;
- phys_nic_packet = (unsigned char *)phys_acsi_buffer;
- if (pamsnet_debug > 0) {
- printk("nic_packet at 0x%p, phys at 0x%p\n",
- nic_packet, phys_nic_packet );
- }
-
- stdma_lock(pamsnet_intr, NULL);
- DISABLE_IRQ();
-
- for (i=0; i<8; i++) {
- /* Do two inquiries to cover cases with strange equipment on previous ID */
- /* blocking the ACSI bus (like the SLMC804 laser printer controller... */
- inquiry(i, phys_nic_packet);
- if (!inquiry(i, phys_nic_packet)) {
- lance_target = i;
- break;
- }
- }
-
- if (!!NET_DEBUG)
- printk("ID: %d\n",i);
-
- if (lance_target >= 0) {
- if (!(hwaddr = read_hw_addr(lance_target, phys_nic_packet)))
- lance_target = -1;
- else
- memcpy (station_addr, hwaddr, ETH_ALEN);
- }
-
- ENABLE_IRQ();
- stdma_release();
-
- if (lance_target < 0) {
- printk("No PAM's Net/GK found.\n");
- free_netdev(dev);
- return ERR_PTR(-ENODEV);
- }
-
- if (pamsnet_debug > 0 && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s found on target %01d, eth-addr: %02x:%02x:%02x:%02x:%02x:%02x.\n",
- dev->name, "PAM's Net/GK", lance_target,
- station_addr[0], station_addr[1], station_addr[2],
- station_addr[3], station_addr[4], station_addr[5]);
-
- /* Initialize the device structure. */
- dev->open = pamsnet_open;
- dev->stop = pamsnet_close;
- dev->hard_start_xmit = pamsnet_send_packet;
- dev->get_stats = net_get_stats;
-
- /* Fill in the fields of the device structure with ethernet-generic
- * values. This should be in a common file instead of per-driver.
- */
-
- for (i = 0; i < ETH_ALEN; i++) {
-#if 0
- dev->broadcast[i] = 0xff;
-#endif
- dev->dev_addr[i] = station_addr[i];
- }
- err = register_netdev(dev);
- if (!err)
- return dev;
-
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-static int
-pamsnet_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- if (pamsnet_debug > 0)
- printk("pamsnet_open\n");
- stdma_lock(pamsnet_intr, NULL);
- DISABLE_IRQ();
-
- /* Reset the hardware here.
- */
- if (!if_up)
- start(lance_target);
- if_up = 1;
- lp->open_time = 0; /*jiffies*/
- lp->poll_time = MAX_POLL_TIME;
-
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
-
- ENABLE_IRQ();
- stdma_release();
- pamsnet_timer.data = (long)dev;
- pamsnet_timer.expires = jiffies + lp->poll_time;
- add_timer(&pamsnet_timer);
- return 0;
-}
-
-static int
-pamsnet_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- /* Block a timer-based transmit from overlapping. This could better be
- * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
- */
- local_irq_save(flags);
-
- if (stdma_islocked()) {
- local_irq_restore(flags);
- lp->stats.tx_errors++;
- }
- else {
- int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned long buf = virt_to_phys(skb->data);
- int stat;
-
- stdma_lock(pamsnet_intr, NULL);
- DISABLE_IRQ();
-
- local_irq_restore(flags);
- if( !STRAM_ADDR(buf+length-1) ) {
- skb_copy_from_linear_data(skb, nic_packet->buffer,
- length);
- buf = (unsigned long)phys_nic_packet;
- }
-
- dma_cache_maintenance(buf, length, 1);
-
- stat = sendpkt(lance_target, (unsigned char *)buf, length);
- ENABLE_IRQ();
- stdma_release();
-
- dev->trans_start = jiffies;
- dev->tbusy = 0;
- lp->stats.tx_packets++;
- lp->stats.tx_bytes+=length;
- }
- dev_kfree_skb(skb);
-
- return 0;
-}
-
-/* We have a good packet(s), get it/them out of the buffers.
- */
-static void
-pamsnet_poll_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int boguscount;
- int pkt_len;
- struct sk_buff *skb;
- unsigned long flags;
-
- local_irq_save(flags);
- /* ++roman: Take care at locking the ST-DMA... This must be done with ints
- * off, since otherwise an int could slip in between the question and the
- * locking itself, and then we'd go to sleep... And locking itself is
- * necessary to keep the floppy_change timer from working with ST-DMA
- * registers. */
- if (stdma_islocked()) {
- local_irq_restore(flags);
- return;
- }
- stdma_lock(pamsnet_intr, NULL);
- DISABLE_IRQ();
- local_irq_restore(flags);
-
- boguscount = testpkt(lance_target);
- if( lp->poll_time < MAX_POLL_TIME ) lp->poll_time++;
-
- while(boguscount--) {
- pkt_len = receivepkt(lance_target, phys_nic_packet);
-
- if( pkt_len < 60 ) break;
-
- /* Good packet... */
-
- dma_cache_maintenance((unsigned long)phys_nic_packet, pkt_len, 0);
-
- lp->poll_time = pamsnet_min_poll_time; /* fast poll */
- if( pkt_len >= 60 && pkt_len <= 2048 ) {
- if (pkt_len > 1514)
- pkt_len = 1514;
-
- /* Malloc up new buffer.
- */
- skb = alloc_skb(pkt_len, GFP_ATOMIC);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- break;
- }
- skb->len = pkt_len;
- skb->dev = dev;
-
- /* 'skb->data' points to the start of sk_buff data area.
- */
- skb_copy_to_linear_data(skb, nic_packet->buffer,
- pkt_len);
- netif_rx(skb);
- dev->last_rx = jiffies;
- lp->stats.rx_packets++;
- lp->stats.rx_bytes+=pkt_len;
- }
- }
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(INET_BH) for us and will work on them
- when we get to the bottom-half routine.
- */
-
- ENABLE_IRQ();
- stdma_release();
- return;
-}
-
-/* pamsnet_tick: called by pamsnet_timer. Reads packets from the adapter,
- * passes them to the higher layers and restarts the timer.
- */
-static void
-pamsnet_tick(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct net_local *lp = netdev_priv(dev);
-
- if( pamsnet_debug > 0 && (lp->open_time++ & 7) == 8 )
- printk("pamsnet_tick: %ld\n", lp->open_time);
-
- pamsnet_poll_rx(dev);
-
- pamsnet_timer.expires = jiffies + lp->poll_time;
- add_timer(&pamsnet_timer);
-}
-
-/* The inverse routine to pamsnet_open().
- */
-static int
-pamsnet_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- if (pamsnet_debug > 0)
- printk("pamsnet_close, open_time=%ld\n", lp->open_time);
- del_timer(&pamsnet_timer);
- stdma_lock(pamsnet_intr, NULL);
- DISABLE_IRQ();
-
- if (if_up)
- stop(lance_target);
- if_up = 0;
-
- lp->open_time = 0;
-
- dev->tbusy = 1;
- dev->start = 0;
-
- ENABLE_IRQ();
- stdma_release();
- return 0;
-}
-
-/* Get the current statistics.
- This may be called with the card open or closed.
- */
-static struct net_device_stats *net_get_stats(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- return &lp->stats;
-}
-
-
-#ifdef MODULE
-
-static struct net_device *pam_dev;
-
-int init_module(void)
-{
- pam_dev = pamsnet_probe(-1);
- if (IS_ERR(pam_dev))
- return PTR_ERR(pam_dev);
- return 0;
-}
-
-void cleanup_module(void)
-{
- unregister_netdev(pam_dev);
- free_netdev(pam_dev);
-}
-
-#endif /* MODULE */
-
-/* Local variables:
- * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include
- -b m68k-linuxaout -Wall -Wstrict-prototypes -O2
- -fomit-frame-pointer -pipe -DMODULE -I../../net/inet -c atari_pamsnet.c"
- * version-control: t
- * kept-new-versions: 5
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
index b1c6034e68fa..ff4765f6c3de 100644
--- a/drivers/net/atl1/atl1.h
+++ b/drivers/net/atl1/atl1.h
@@ -43,6 +43,7 @@ extern const struct ethtool_ops atl1_ethtool_ops;
struct atl1_adapter;
#define ATL1_MAX_INTR 3
+#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
#define ATL1_DEFAULT_TPD 256
#define ATL1_MAX_TPD 1024
@@ -57,29 +58,45 @@ struct atl1_adapter;
#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
/*
+ * This detached comment is preserved for documentation purposes only.
+ * It was originally attached to some code that got deleted, but seems
+ * important enough to keep around...
+ *
+ * <begin detached comment>
* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
* but cannot process phy register reads/writes faster than millisecond
* intervals...and we establish link due to a "link status change" interrupt.
+ * <end detached comment>
+ */
+
+/*
+ * atl1_ring_header represents a single, contiguous block of DMA space
+ * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
+ * message blocks (cmb, smb) described below
*/
+struct atl1_ring_header {
+ void *desc; /* virtual address */
+ dma_addr_t dma; /* physical address*/
+ unsigned int size; /* length in bytes */
+};
/*
- * wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
+ * atl1_buffer is wrapper around a pointer to a socket buffer
+ * so a DMA handle can be stored along with the skb
*/
struct atl1_buffer {
- struct sk_buff *skb;
- u16 length;
- u16 alloced;
+ struct sk_buff *skb; /* socket buffer */
+ u16 length; /* rx buffer length */
+ u16 alloced; /* 1 if skb allocated */
dma_addr_t dma;
};
-#define MAX_TX_BUF_LEN 0x3000 /* 12KB */
-
+/* transmit packet descriptor (tpd) ring */
struct atl1_tpd_ring {
- void *desc; /* pointer to the descriptor ring memory */
- dma_addr_t dma; /* physical adress of the descriptor ring */
- u16 size; /* length of descriptor ring in bytes */
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 hw_idx; /* hardware index */
atomic_t next_to_clean;
@@ -87,36 +104,34 @@ struct atl1_tpd_ring {
struct atl1_buffer *buffer_info;
};
+/* receive free descriptor (rfd) ring */
struct atl1_rfd_ring {
- void *desc;
- dma_addr_t dma;
- u16 size;
- u16 count;
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ u16 size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
atomic_t next_to_use;
u16 next_to_clean;
struct atl1_buffer *buffer_info;
};
+/* receive return descriptor (rrd) ring */
struct atl1_rrd_ring {
- void *desc;
- dma_addr_t dma;
- unsigned int size;
- u16 count;
+ void *desc; /* descriptor ring virtual address */
+ dma_addr_t dma; /* descriptor ring physical address */
+ unsigned int size; /* descriptor ring length in bytes */
+ u16 count; /* number of descriptors in the ring */
u16 next_to_use;
atomic_t next_to_clean;
};
-struct atl1_ring_header {
- void *desc; /* pointer to the descriptor ring memory */
- dma_addr_t dma; /* physical adress of the descriptor ring */
- unsigned int size; /* length of descriptor ring in bytes */
-};
-
+/* coalescing message block (cmb) */
struct atl1_cmb {
struct coals_msg_block *cmb;
dma_addr_t dma;
};
+/* statistics message block (smb) */
struct atl1_smb {
struct stats_msg_block *smb;
dma_addr_t dma;
@@ -141,24 +156,26 @@ struct atl1_sft_stats {
u64 tx_aborted_errors;
u64 tx_window_errors;
u64 tx_carrier_errors;
-
- u64 tx_pause; /* num Pause packet transmitted. */
- u64 excecol; /* num tx packets aborted due to excessive collisions. */
- u64 deffer; /* num deferred tx packets */
- u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */
- u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */
+ u64 tx_pause; /* num pause packets transmitted. */
+ u64 excecol; /* num tx packets w/ excessive collisions. */
+ u64 deffer; /* num tx packets deferred */
+ u64 scc; /* num packets subsequently transmitted
+ * successfully w/ single prior collision. */
+ u64 mcc; /* num packets subsequently transmitted
+ * successfully w/ multiple prior collisions. */
u64 latecol; /* num tx packets w/ late collisions. */
- u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
- u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */
+ u64 tx_underun; /* num tx packets aborted due to transmit
+ * FIFO underrun, or TRD FIFO underrun */
+ u64 tx_trunc; /* num tx packets truncated due to size
+ * exceeding MTU, regardless whether truncated
+ * by the chip or not. (The name doesn't really
+ * reflect the meaning in this case.) */
u64 rx_pause; /* num Pause packets received. */
u64 rx_rrd_ov;
u64 rx_trunc;
};
-/* board specific private data structure */
-#define ATL1_REGS_LEN 8
-
-/* Structure containing variables used by the shared code */
+/* hardware structure */
struct atl1_hw {
u8 __iomem *hw_addr;
struct atl1_adapter *back;
@@ -167,24 +184,35 @@ struct atl1_hw {
enum atl1_dma_req_block dmar_block;
enum atl1_dma_req_block dmaw_block;
u8 preamble_len;
- u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */
- u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */
- u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */
- u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */
+ u8 max_retry; /* Retransmission maximum, after which the
+ * packet will be discarded */
+ u8 jam_ipg; /* IPG to start JAM for collision based flow
+ * control in half-duplex mode. In units of
+ * 8-bit time */
+ u8 ipgt; /* Desired back to back inter-packet gap.
+ * The default is 96-bit time */
+ u8 min_ifg; /* Minimum number of IFG to enforce in between
+ * receive frames. Frame gap below such IFP
+ * is dropped */
u8 ipgr1; /* 64bit Carrier-Sense window */
u8 ipgr2; /* 96-bit IPG window */
- u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */
- u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */
+ u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
+ * burst. Each TPD is 16 bytes long */
+ u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
+ * burst. Each RFD is 12 bytes long */
u8 rfd_fetch_gap;
- u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */
+ u8 rrd_burst; /* Threshold number of RRDs that can be retired
+ * in a burst. Each RRD is 16 bytes long */
u8 tpd_fetch_th;
u8 tpd_fetch_gap;
u16 tx_jumbo_task_th;
- u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is
- 8 bytes long */
- u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */
+ u16 txf_burst; /* Number of data bytes to read in a cache-
+ * aligned burst. Each SRAM entry is 8 bytes */
+ u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
+ * packets should add 4 bytes */
u16 rx_jumbo_lkah;
- u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */
+ u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
+ * every 512ns passes. */
u16 lcol; /* Collision Window */
u16 cmb_tpd;
@@ -194,49 +222,35 @@ struct atl1_hw {
u32 smb_timer;
u16 media_type;
u16 autoneg_advertised;
- u16 pci_cmd_word;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- u32 mem_rang;
- u32 txcw;
u32 max_frame_size;
u32 min_frame_size;
- u32 mc_filter_type;
- u32 num_mc_addrs;
- u32 collision_delta;
- u32 tx_packet_delta;
- u16 phy_spd_default;
u16 dev_rev;
- u8 revision_id;
/* spi flash */
u8 flash_vendor;
- u8 dma_fairness;
u8 mac_addr[ETH_ALEN];
u8 perm_mac_addr[ETH_ALEN];
- /* bool phy_preamble_sup; */
bool phy_configured;
};
struct atl1_adapter {
- /* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
struct atl1_sft_stats soft_stats;
-
struct vlan_group *vlgrp;
u32 rx_buffer_len;
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t lock;
- atomic_t irq_sem;
struct work_struct tx_timeout_task;
struct work_struct link_chg_task;
struct work_struct pcie_dma_to_rst_task;
@@ -244,9 +258,7 @@ struct atl1_adapter {
struct timer_list phy_config_timer;
bool phy_timer_pending;
- bool mac_disabled;
-
- /* All descriptor rings' memory */
+ /* all descriptor rings' memory */
struct atl1_ring_header ring_header;
/* TX */
@@ -259,25 +271,16 @@ struct atl1_adapter {
u64 hw_csum_err;
u64 hw_csum_good;
- u32 gorcl;
- u64 gorcl_old;
-
- /* Interrupt Moderator timer ( 2us resolution) */
- u16 imt;
- /* Interrupt Clear timer (2us resolution) */
- u16 ict;
-
- /* MII interface info */
- struct mii_if_info mii;
+ u16 imt; /* interrupt moderator timer (2us resolution */
+ u16 ict; /* interrupt clear timer (2us resolution */
+ struct mii_if_info mii; /* MII interface info */
/* structs defined in atl1_hw.h */
- u32 bd_number; /* board number */
+ u32 bd_number; /* board number */
bool pci_using_64;
struct atl1_hw hw;
struct atl1_smb smb;
struct atl1_cmb cmb;
-
- u32 pci_state[16];
};
#endif /* _ATL1_H_ */
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 3bb40dd4a410..fd1e156f1747 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -38,7 +38,7 @@
* TODO:
* Fix TSO; tx performance is horrible with TSO enabled.
* Wake on LAN.
- * Add more ethtool functions, including set ring parameters.
+ * Add more ethtool functions.
* Fix abstruse irq enable/disable condition described here:
* http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
*
@@ -75,6 +75,7 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/mii.h>
+#include <linux/interrupt.h>
#include <net/checksum.h>
#include <asm/atomic.h>
@@ -118,10 +119,6 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
{
struct atl1_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -162,13 +159,70 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
hw->cmb_tx_timer = 1; /* about 2us */
hw->smb_timer = 100000; /* about 200ms */
- atomic_set(&adapter->irq_sem, 0);
spin_lock_init(&adapter->lock);
spin_lock_init(&adapter->mb_lock);
return 0;
}
+static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ u16 result;
+
+ atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
+
+ return result;
+}
+
+static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
+ int val)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_write_phy_reg(&adapter->hw, reg_num, val);
+}
+
+/*
+ * atl1_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ int retval;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ return retval;
+}
+
+/*
+ * atl1_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atl1_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/*
* atl1_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
@@ -192,19 +246,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
rfd_ring->buffer_info =
- (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+ (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
- /* real ring DMA buffer */
- ring_header->size = size = sizeof(struct tx_packet_desc) *
- tpd_ring->count
- + sizeof(struct rx_free_desc) * rfd_ring->count
- + sizeof(struct rx_return_desc) * rrd_ring->count
- + sizeof(struct coals_msg_block)
- + sizeof(struct stats_msg_block)
- + 40; /* "40: for 8 bytes align" huh? -- CHS */
+ /* real ring DMA buffer
+ * each ring/block may need up to 8 bytes for alignment, hence the
+ * additional 40 bytes tacked onto the end.
+ */
+ ring_header->size = size =
+ sizeof(struct tx_packet_desc) * tpd_ring->count
+ + sizeof(struct rx_free_desc) * rfd_ring->count
+ + sizeof(struct rx_return_desc) * rrd_ring->count
+ + sizeof(struct coals_msg_block)
+ + sizeof(struct stats_msg_block)
+ + 40;
ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
- &ring_header->dma);
+ &ring_header->dma);
if (unlikely(!ring_header->desc)) {
dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
goto err_nomem;
@@ -218,8 +275,6 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
tpd_ring->dma += offset;
tpd_ring->desc = (u8 *) ring_header->desc + offset;
tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
- atomic_set(&tpd_ring->next_to_use, 0);
- atomic_set(&tpd_ring->next_to_clean, 0);
/* init RFD ring */
rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
@@ -227,9 +282,7 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
rfd_ring->dma += offset;
rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
- rfd_ring->next_to_clean = 0;
- /* rfd_ring->next_to_use = rfd_ring->count - 1; */
- atomic_set(&rfd_ring->next_to_use, 0);
+
/* init RRD ring */
rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
@@ -237,23 +290,22 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
rrd_ring->dma += offset;
rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
- rrd_ring->next_to_use = 0;
- atomic_set(&rrd_ring->next_to_clean, 0);
+
/* init CMB */
adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
adapter->cmb.dma += offset;
- adapter->cmb.cmb =
- (struct coals_msg_block *) ((u8 *) rrd_ring->desc +
- (rrd_ring->size + offset));
+ adapter->cmb.cmb = (struct coals_msg_block *)
+ ((u8 *) rrd_ring->desc + (rrd_ring->size + offset));
/* init SMB */
adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
adapter->smb.dma += offset;
adapter->smb.smb = (struct stats_msg_block *)
- ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset));
+ ((u8 *) adapter->cmb.cmb +
+ (sizeof(struct coals_msg_block) + offset));
return ATL1_SUCCESS;
@@ -262,559 +314,133 @@ err_nomem:
return -ENOMEM;
}
-/*
- * atl1_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- */
-static void atl1_irq_enable(struct atl1_adapter *adapter)
-{
- if (likely(!atomic_dec_and_test(&adapter->irq_sem)))
- iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
-}
-
-static void atl1_clear_phy_int(struct atl1_adapter *adapter)
-{
- u16 phy_data;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->lock, flags);
- atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
- spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-static void atl1_inc_smb(struct atl1_adapter *adapter)
-{
- struct stats_msg_block *smb = adapter->smb.smb;
-
- /* Fill out the OS statistics structure */
- adapter->soft_stats.rx_packets += smb->rx_ok;
- adapter->soft_stats.tx_packets += smb->tx_ok;
- adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
- adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
- adapter->soft_stats.multicast += smb->rx_mcast;
- adapter->soft_stats.collisions += (smb->tx_1_col +
- smb->tx_2_col * 2 +
- smb->tx_late_col +
- smb->tx_abort_col *
- adapter->hw.max_retry);
-
- /* Rx Errors */
- adapter->soft_stats.rx_errors += (smb->rx_frag +
- smb->rx_fcs_err +
- smb->rx_len_err +
- smb->rx_sz_ov +
- smb->rx_rxf_ov +
- smb->rx_rrd_ov + smb->rx_align_err);
- adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
- adapter->soft_stats.rx_length_errors += smb->rx_len_err;
- adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
- adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
- adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
- smb->rx_rxf_ov);
-
- adapter->soft_stats.rx_pause += smb->rx_pause;
- adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
- adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
-
- /* Tx Errors */
- adapter->soft_stats.tx_errors += (smb->tx_late_col +
- smb->tx_abort_col +
- smb->tx_underrun + smb->tx_trunc);
- adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
- adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
- adapter->soft_stats.tx_window_errors += smb->tx_late_col;
-
- adapter->soft_stats.excecol += smb->tx_abort_col;
- adapter->soft_stats.deffer += smb->tx_defer;
- adapter->soft_stats.scc += smb->tx_1_col;
- adapter->soft_stats.mcc += smb->tx_2_col;
- adapter->soft_stats.latecol += smb->tx_late_col;
- adapter->soft_stats.tx_underun += smb->tx_underrun;
- adapter->soft_stats.tx_trunc += smb->tx_trunc;
- adapter->soft_stats.tx_pause += smb->tx_pause;
-
- adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
- adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
- adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
- adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
- adapter->net_stats.multicast = adapter->soft_stats.multicast;
- adapter->net_stats.collisions = adapter->soft_stats.collisions;
- adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
- adapter->net_stats.rx_over_errors =
- adapter->soft_stats.rx_missed_errors;
- adapter->net_stats.rx_length_errors =
- adapter->soft_stats.rx_length_errors;
- adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
- adapter->net_stats.rx_frame_errors =
- adapter->soft_stats.rx_frame_errors;
- adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
- adapter->net_stats.rx_missed_errors =
- adapter->soft_stats.rx_missed_errors;
- adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
- adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
- adapter->net_stats.tx_aborted_errors =
- adapter->soft_stats.tx_aborted_errors;
- adapter->net_stats.tx_window_errors =
- adapter->soft_stats.tx_window_errors;
- adapter->net_stats.tx_carrier_errors =
- adapter->soft_stats.tx_carrier_errors;
-}
-
-static void atl1_rx_checksum(struct atl1_adapter *adapter,
- struct rx_return_desc *rrd,
- struct sk_buff *skb)
+void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
{
- skb->ip_summed = CHECKSUM_NONE;
-
- if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
- if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
- ERR_FLAG_CODE | ERR_FLAG_OV)) {
- adapter->hw_csum_err++;
- dev_dbg(&adapter->pdev->dev, "rx checksum error\n");
- return;
- }
- }
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
- /* not IPv4 */
- if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
- /* checksum is invalid, but it's not an IPv4 pkt, so ok */
- return;
+ atomic_set(&tpd_ring->next_to_use, 0);
+ atomic_set(&tpd_ring->next_to_clean, 0);
- /* IPv4 packet */
- if (likely(!(rrd->err_flg &
- (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_good++;
- return;
- }
+ rfd_ring->next_to_clean = 0;
+ atomic_set(&rfd_ring->next_to_use, 0);
- /* IPv4, but hardware thinks its checksum is wrong */
- dev_dbg(&adapter->pdev->dev,
- "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
- rrd->pkt_flg, rrd->err_flg);
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
- adapter->hw_csum_err++;
- return;
+ rrd_ring->next_to_use = 0;
+ atomic_set(&rrd_ring->next_to_clean, 0);
}
/*
- * atl1_alloc_rx_buffers - Replace used receive buffers
- * @adapter: address of board private structure
+ * atl1_clean_rx_ring - Free RFD Buffers
+ * @adapter: board private structure
*/
-static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
-{
- struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct pci_dev *pdev = adapter->pdev;
- struct page *page;
- unsigned long offset;
- struct atl1_buffer *buffer_info, *next_info;
- struct sk_buff *skb;
- u16 num_alloc = 0;
- u16 rfd_next_to_use, next_next;
- struct rx_free_desc *rfd_desc;
-
- next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
- if (++next_next == rfd_ring->count)
- next_next = 0;
- buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
- next_info = &rfd_ring->buffer_info[next_next];
-
- while (!buffer_info->alloced && !next_info->alloced) {
- if (buffer_info->skb) {
- buffer_info->alloced = 1;
- goto next;
- }
-
- rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
-
- skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
- if (unlikely(!skb)) { /* Better luck next round */
- adapter->net_stats.rx_dropped++;
- break;
- }
-
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
- buffer_info->alloced = 1;
- buffer_info->skb = skb;
- buffer_info->length = (u16) adapter->rx_buffer_len;
- page = virt_to_page(skb->data);
- offset = (unsigned long)skb->data & ~PAGE_MASK;
- buffer_info->dma = pci_map_page(pdev, page, offset,
- adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
- rfd_desc->coalese = 0;
-
-next:
- rfd_next_to_use = next_next;
- if (unlikely(++next_next == rfd_ring->count))
- next_next = 0;
-
- buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
- next_info = &rfd_ring->buffer_info[next_next];
- num_alloc++;
- }
-
- if (num_alloc) {
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
- }
- return num_alloc;
-}
-
-static void atl1_intr_rx(struct atl1_adapter *adapter)
+static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
{
- int i, count;
- u16 length;
- u16 rrd_next_to_clean;
- u32 value;
struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
struct atl1_buffer *buffer_info;
- struct rx_return_desc *rrd;
- struct sk_buff *skb;
-
- count = 0;
-
- rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
-
- while (1) {
- rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
- i = 1;
- if (likely(rrd->xsz.valid)) { /* packet valid */
-chk_rrd:
- /* check rrd status */
- if (likely(rrd->num_buf == 1))
- goto rrd_ok;
-
- /* rrd seems to be bad */
- if (unlikely(i-- > 0)) {
- /* rrd may not be DMAed completely */
- dev_dbg(&adapter->pdev->dev,
- "incomplete RRD DMA transfer\n");
- udelay(1);
- goto chk_rrd;
- }
- /* bad rrd */
- dev_dbg(&adapter->pdev->dev, "bad RRD\n");
- /* see if update RFD index */
- if (rrd->num_buf > 1) {
- u16 num_buf;
- num_buf =
- (rrd->xsz.xsum_sz.pkt_size +
- adapter->rx_buffer_len -
- 1) / adapter->rx_buffer_len;
- if (rrd->num_buf == num_buf) {
- /* clean alloc flag for bad rrd */
- while (rfd_ring->next_to_clean !=
- (rrd->buf_indx + num_buf)) {
- rfd_ring->buffer_info[rfd_ring->
- next_to_clean].alloced = 0;
- if (++rfd_ring->next_to_clean ==
- rfd_ring->count) {
- rfd_ring->
- next_to_clean = 0;
- }
- }
- }
- }
-
- /* update rrd */
- rrd->xsz.valid = 0;
- if (++rrd_next_to_clean == rrd_ring->count)
- rrd_next_to_clean = 0;
- count++;
- continue;
- } else { /* current rrd still not be updated */
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
- break;
- }
-rrd_ok:
- /* clean alloc flag for bad rrd */
- while (rfd_ring->next_to_clean != rrd->buf_indx) {
- rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced =
- 0;
- if (++rfd_ring->next_to_clean == rfd_ring->count)
- rfd_ring->next_to_clean = 0;
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rfd_ring->count; i++) {
+ buffer_info = &rfd_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
}
-
- buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
- if (++rfd_ring->next_to_clean == rfd_ring->count)
- rfd_ring->next_to_clean = 0;
-
- /* update rrd next to clean */
- if (++rrd_next_to_clean == rrd_ring->count)
- rrd_next_to_clean = 0;
- count++;
-
- if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
- if (!(rrd->err_flg &
- (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
- | ERR_FLAG_LEN))) {
- /* packet error, don't need upstream */
- buffer_info->alloced = 0;
- rrd->xsz.valid = 0;
- continue;
- }
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
}
-
- /* Good Receive */
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
- skb = buffer_info->skb;
- length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
-
- skb_put(skb, length - ETHERNET_FCS_SIZE);
-
- /* Receive Checksum Offload */
- atl1_rx_checksum(adapter, rrd, skb);
- skb->protocol = eth_type_trans(skb, adapter->netdev);
-
- if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
- u16 vlan_tag = (rrd->vlan_tag >> 4) |
- ((rrd->vlan_tag & 7) << 13) |
- ((rrd->vlan_tag & 8) << 9);
- vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
- } else
- netif_rx(skb);
-
- /* let protocol layer free skb */
- buffer_info->skb = NULL;
- buffer_info->alloced = 0;
- rrd->xsz.valid = 0;
-
- adapter->netdev->last_rx = jiffies;
}
- atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
-
- atl1_alloc_rx_buffers(adapter);
+ size = sizeof(struct atl1_buffer) * rfd_ring->count;
+ memset(rfd_ring->buffer_info, 0, size);
- /* update mailbox ? */
- if (count) {
- u32 tpd_next_to_use;
- u32 rfd_next_to_use;
- u32 rrd_next_to_clean;
+ /* Zero out the descriptor ring */
+ memset(rfd_ring->desc, 0, rfd_ring->size);
- spin_lock(&adapter->mb_lock);
+ rfd_ring->next_to_clean = 0;
+ atomic_set(&rfd_ring->next_to_use, 0);
- tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
- rfd_next_to_use =
- atomic_read(&adapter->rfd_ring.next_to_use);
- rrd_next_to_clean =
- atomic_read(&adapter->rrd_ring.next_to_clean);
- value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
- MB_RFD_PROD_INDX_SHIFT) |
- ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
- MB_RRD_CONS_INDX_SHIFT) |
- ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
- MB_TPD_PROD_INDX_SHIFT);
- iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
- spin_unlock(&adapter->mb_lock);
- }
+ rrd_ring->next_to_use = 0;
+ atomic_set(&rrd_ring->next_to_clean, 0);
}
-static void atl1_intr_tx(struct atl1_adapter *adapter)
+/*
+ * atl1_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
{
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
struct atl1_buffer *buffer_info;
- u16 sw_tpd_next_to_clean;
- u16 cmb_tpd_next_to_clean;
-
- sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
- cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
-
- while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
- struct tx_packet_desc *tpd;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
- tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
- buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tpd_ring->count; i++) {
+ buffer_info = &tpd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_TODEVICE);
buffer_info->dma = 0;
}
+ }
+ for (i = 0; i < tpd_ring->count; i++) {
+ buffer_info = &tpd_ring->buffer_info[i];
if (buffer_info->skb) {
- dev_kfree_skb_irq(buffer_info->skb);
+ dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
- tpd->buffer_addr = 0;
- tpd->desc.data = 0;
-
- if (++sw_tpd_next_to_clean == tpd_ring->count)
- sw_tpd_next_to_clean = 0;
- }
- atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
-
- if (netif_queue_stopped(adapter->netdev)
- && netif_carrier_ok(adapter->netdev))
- netif_wake_queue(adapter->netdev);
-}
-
-static void atl1_check_for_link(struct atl1_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- u16 phy_data = 0;
-
- spin_lock(&adapter->lock);
- adapter->phy_timer_pending = false;
- atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
- atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
- spin_unlock(&adapter->lock);
-
- /* notify upper layer link down ASAP */
- if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
- if (netif_carrier_ok(netdev)) { /* old link state: Up */
- dev_info(&adapter->pdev->dev, "%s link is down\n",
- netdev->name);
- adapter->link_speed = SPEED_0;
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- }
}
- schedule_work(&adapter->link_chg_task);
-}
-/*
- * atl1_intr - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
- */
-static irqreturn_t atl1_intr(int irq, void *data)
-{
- /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/
- struct atl1_adapter *adapter = netdev_priv(data);
- u32 status;
- u8 update_rx;
- int max_ints = 10;
-
- status = adapter->cmb.cmb->int_stats;
- if (!status)
- return IRQ_NONE;
-
- update_rx = 0;
-
- do {
- /* clear CMB interrupt status at once */
- adapter->cmb.cmb->int_stats = 0;
-
- if (status & ISR_GPHY) /* clear phy status */
- atl1_clear_phy_int(adapter);
-
- /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
- iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
-
- /* check if SMB intr */
- if (status & ISR_SMB)
- atl1_inc_smb(adapter);
-
- /* check if PCIE PHY Link down */
- if (status & ISR_PHY_LINKDOWN) {
- dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n",
- status);
- if (netif_running(adapter->netdev)) { /* reset MAC */
- iowrite32(0, adapter->hw.hw_addr + REG_IMR);
- schedule_work(&adapter->pcie_dma_to_rst_task);
- return IRQ_HANDLED;
- }
- }
-
- /* check if DMA read/write error ? */
- if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
- dev_dbg(&adapter->pdev->dev,
- "pcie DMA r/w error (status = 0x%x)\n",
- status);
- iowrite32(0, adapter->hw.hw_addr + REG_IMR);
- schedule_work(&adapter->pcie_dma_to_rst_task);
- return IRQ_HANDLED;
- }
-
- /* link event */
- if (status & ISR_GPHY) {
- adapter->soft_stats.tx_carrier_errors++;
- atl1_check_for_link(adapter);
- }
-
- /* transmit event */
- if (status & ISR_CMB_TX)
- atl1_intr_tx(adapter);
-
- /* rx exception */
- if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
- ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
- ISR_HOST_RRD_OV | ISR_CMB_RX))) {
- if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
- ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
- ISR_HOST_RRD_OV))
- dev_dbg(&adapter->pdev->dev,
- "rx exception, ISR = 0x%x\n", status);
- atl1_intr_rx(adapter);
- }
-
- if (--max_ints < 0)
- break;
+ size = sizeof(struct atl1_buffer) * tpd_ring->count;
+ memset(tpd_ring->buffer_info, 0, size);
- } while ((status = adapter->cmb.cmb->int_stats));
+ /* Zero out the descriptor ring */
+ memset(tpd_ring->desc, 0, tpd_ring->size);
- /* re-enable Interrupt */
- iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
- return IRQ_HANDLED;
+ atomic_set(&tpd_ring->next_to_use, 0);
+ atomic_set(&tpd_ring->next_to_clean, 0);
}
/*
- * atl1_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
+ * atl1_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
*
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
+ * Free all transmit software resources
*/
-static void atl1_set_multi(struct net_device *netdev)
+void atl1_free_ring_resources(struct atl1_adapter *adapter)
{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- struct atl1_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
- u32 rctl;
- u32 hash_value;
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_ring_header *ring_header = &adapter->ring_header;
- /* Check for Promiscuous and All Multicast modes */
- rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
- if (netdev->flags & IFF_PROMISC)
- rctl |= MAC_CTRL_PROMIS_EN;
- else if (netdev->flags & IFF_ALLMULTI) {
- rctl |= MAC_CTRL_MC_ALL_EN;
- rctl &= ~MAC_CTRL_PROMIS_EN;
- } else
- rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+ atl1_clean_tx_ring(adapter);
+ atl1_clean_rx_ring(adapter);
- iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
+ kfree(tpd_ring->buffer_info);
+ pci_free_consistent(pdev, ring_header->size, ring_header->desc,
+ ring_header->dma);
- /* clear the old settings from the multicast hash table */
- iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
- iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+ tpd_ring->buffer_info = NULL;
+ tpd_ring->desc = NULL;
+ tpd_ring->dma = 0;
- /* compute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
- hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
- atl1_hash_set(hw, hash_value);
- }
+ rfd_ring->buffer_info = NULL;
+ rfd_ring->desc = NULL;
+ rfd_ring->dma = 0;
+
+ rrd_ring->desc = NULL;
+ rrd_ring->dma = 0;
}
static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -855,6 +481,31 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
}
+/*
+ * atl1_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_set_mac(struct net_device *netdev, void *p)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atl1_set_mac_addr(&adapter->hw);
+ return 0;
+}
+
static u32 atl1_check_link(struct atl1_adapter *adapter)
{
struct atl1_hw *hw = &adapter->hw;
@@ -962,6 +613,103 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
return ATL1_SUCCESS;
}
+static void atl1_check_for_link(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 phy_data = 0;
+
+ spin_lock(&adapter->lock);
+ adapter->phy_timer_pending = false;
+ atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->lock);
+
+ /* notify upper layer link down ASAP */
+ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ dev_info(&adapter->pdev->dev, "%s link is down\n",
+ netdev->name);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ }
+ schedule_work(&adapter->link_chg_task);
+}
+
+/*
+ * atl1_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl1_set_multi(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ struct dev_mc_list *mc_ptr;
+ u32 rctl;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+ if (netdev->flags & IFF_PROMISC)
+ rctl |= MAC_CTRL_PROMIS_EN;
+ else if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= MAC_CTRL_MC_ALL_EN;
+ rctl &= ~MAC_CTRL_PROMIS_EN;
+ } else
+ rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+ iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
+
+ /* clear the old settings from the multicast hash table */
+ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+ /* compute mc addresses' hash value ,and put it into hash table */
+ for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ atl1_hash_set(hw, hash_value);
+ }
+}
+
+/*
+ * atl1_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int old_mtu = netdev->mtu;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+ if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ adapter->hw.max_frame_size = max_frame;
+ adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
+ adapter->rx_buffer_len = (max_frame + 7) & ~7;
+ adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
+
+ netdev->mtu = new_mtu;
+ if ((old_mtu != new_mtu) && netif_running(netdev)) {
+ atl1_down(adapter);
+ atl1_up(adapter);
+ }
+
+ return 0;
+}
+
static void set_flow_ctrl_old(struct atl1_adapter *adapter)
{
u32 hi, lo, value;
@@ -974,7 +722,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter)
lo = value * 7 / 8;
value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
- ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+ ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
/* RRD Flow Control */
@@ -984,7 +732,7 @@ static void set_flow_ctrl_old(struct atl1_adapter *adapter)
if (lo < 2)
lo = 2;
value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
- ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+ ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
}
@@ -1001,7 +749,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
if (hi < lo)
hi = lo + 16;
value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
- ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+ ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
/* RRD Flow Control */
@@ -1013,7 +761,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
if (hi < lo)
hi = lo + 3;
value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
- ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+ ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
}
@@ -1062,7 +810,8 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
value <<= 16;
value += adapter->rfd_ring.count;
iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
- iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE);
+ iowrite32(adapter->tpd_ring.count, hw->hw_addr +
+ REG_DESC_TPD_RING_SIZE);
/* Load Ptr */
iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
@@ -1070,31 +819,31 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
/* config Mailbox */
value = ((atomic_read(&adapter->tpd_ring.next_to_use)
& MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
- ((atomic_read(&adapter->rrd_ring.next_to_clean)
- & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
- ((atomic_read(&adapter->rfd_ring.next_to_use)
- & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
+ ((atomic_read(&adapter->rrd_ring.next_to_clean)
+ & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
+ ((atomic_read(&adapter->rfd_ring.next_to_use)
+ & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
iowrite32(value, hw->hw_addr + REG_MAILBOX);
/* config IPG/IFG */
value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
<< MAC_IPG_IFG_IPGT_SHIFT) |
- (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
- << MAC_IPG_IFG_MIFG_SHIFT) |
- (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
- << MAC_IPG_IFG_IPGR1_SHIFT) |
- (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
- << MAC_IPG_IFG_IPGR2_SHIFT);
+ (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
+ << MAC_IPG_IFG_MIFG_SHIFT) |
+ (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
+ << MAC_IPG_IFG_IPGR1_SHIFT) |
+ (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
+ << MAC_IPG_IFG_IPGR2_SHIFT);
iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
/* config Half-Duplex Control */
value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
- (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
- << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
- MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
- (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
- (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
- << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+ (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
+ << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+ MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+ (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+ (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
+ << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
/* set Interrupt Moderator Timer */
@@ -1110,10 +859,10 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
/* jumbo size & rrd retirement timer */
value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
<< RXQ_JMBOSZ_TH_SHIFT) |
- (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
- << RXQ_JMBO_LKAH_SHIFT) |
- (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
- << RXQ_RRD_TIMER_SHIFT);
+ (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
+ << RXQ_JMBO_LKAH_SHIFT) |
+ (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
+ << RXQ_RRD_TIMER_SHIFT);
iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
/* Flow Control */
@@ -1132,35 +881,36 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
/* config TXQ */
value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
<< TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
- (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
- << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
- (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
- << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN;
+ (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
+ << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
+ (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
+ << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE |
+ TXQ_CTRL_EN;
iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
/* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
- << TX_JUMBO_TASK_TH_SHIFT) |
- (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
- << TX_TPD_MIN_IPG_SHIFT);
+ << TX_JUMBO_TASK_TH_SHIFT) |
+ (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
+ << TX_TPD_MIN_IPG_SHIFT);
iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
/* config RXQ */
value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
- << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
- (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
- << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
- (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
- << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) |
- RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
+ << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
+ (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
+ << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
+ (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
+ << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN |
+ RXQ_CTRL_EN;
iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
/* config DMA Engine */
value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
- << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
- ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
- << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
- DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN;
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
+ ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN |
+ DMA_CTRL_DMAW_EN;
value |= (u32) hw->dma_ord;
if (atl1_rcb_128 == hw->rcb_value)
value |= DMA_CTRL_RCB_VALUE;
@@ -1190,56 +940,495 @@ static u32 atl1_configure(struct atl1_adapter *adapter)
}
/*
+ * atl1_pcie_patch - Patch for PCIE module
+ */
+static void atl1_pcie_patch(struct atl1_adapter *adapter)
+{
+ u32 value;
+
+ /* much vendor magic here */
+ value = 0x6500;
+ iowrite32(value, adapter->hw.hw_addr + 0x12FC);
+ /* pcie flow control mode change */
+ value = ioread32(adapter->hw.hw_addr + 0x1008);
+ value |= 0x8000;
+ iowrite32(value, adapter->hw.hw_addr + 0x1008);
+}
+
+/*
+ * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
+ * on PCI Command register is disable.
+ * The function enable this bit.
+ * Brackett, 2006/03/15
+ */
+static void atl1_via_workaround(struct atl1_adapter *adapter)
+{
+ unsigned long value;
+
+ value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
+ if (value & PCI_COMMAND_INTX_DISABLE)
+ value &= ~PCI_COMMAND_INTX_DISABLE;
+ iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
+}
+
+/*
+ * atl1_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void atl1_irq_enable(struct atl1_adapter *adapter)
+{
+ iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
+ ioread32(adapter->hw.hw_addr + REG_IMR);
+}
+
+/*
* atl1_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
static void atl1_irq_disable(struct atl1_adapter *adapter)
{
- atomic_inc(&adapter->irq_sem);
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
ioread32(adapter->hw.hw_addr + REG_IMR);
synchronize_irq(adapter->pdev->irq);
}
-static void atl1_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static void atl1_clear_phy_int(struct atl1_adapter *adapter)
{
- struct atl1_adapter *adapter = netdev_priv(netdev);
+ u16 phy_data;
unsigned long flags;
- u32 ctrl;
spin_lock_irqsave(&adapter->lock, flags);
- /* atl1_irq_disable(adapter); */
- adapter->vlgrp = grp;
+ atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
- ctrl |= MAC_CTRL_RMV_VLAN;
- iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
- } else {
- /* disable VLAN tag insert/strip */
- ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
- ctrl &= ~MAC_CTRL_RMV_VLAN;
- iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+static void atl1_inc_smb(struct atl1_adapter *adapter)
+{
+ struct stats_msg_block *smb = adapter->smb.smb;
+
+ /* Fill out the OS statistics structure */
+ adapter->soft_stats.rx_packets += smb->rx_ok;
+ adapter->soft_stats.tx_packets += smb->tx_ok;
+ adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
+ adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
+ adapter->soft_stats.multicast += smb->rx_mcast;
+ adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 +
+ smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry);
+
+ /* Rx Errors */
+ adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err +
+ smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov +
+ smb->rx_rrd_ov + smb->rx_align_err);
+ adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
+ adapter->soft_stats.rx_length_errors += smb->rx_len_err;
+ adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
+ adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
+ adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
+ smb->rx_rxf_ov);
+
+ adapter->soft_stats.rx_pause += smb->rx_pause;
+ adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
+ adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
+
+ /* Tx Errors */
+ adapter->soft_stats.tx_errors += (smb->tx_late_col +
+ smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc);
+ adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
+ adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
+ adapter->soft_stats.tx_window_errors += smb->tx_late_col;
+
+ adapter->soft_stats.excecol += smb->tx_abort_col;
+ adapter->soft_stats.deffer += smb->tx_defer;
+ adapter->soft_stats.scc += smb->tx_1_col;
+ adapter->soft_stats.mcc += smb->tx_2_col;
+ adapter->soft_stats.latecol += smb->tx_late_col;
+ adapter->soft_stats.tx_underun += smb->tx_underrun;
+ adapter->soft_stats.tx_trunc += smb->tx_trunc;
+ adapter->soft_stats.tx_pause += smb->tx_pause;
+
+ adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
+ adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
+ adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
+ adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
+ adapter->net_stats.multicast = adapter->soft_stats.multicast;
+ adapter->net_stats.collisions = adapter->soft_stats.collisions;
+ adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
+ adapter->net_stats.rx_over_errors =
+ adapter->soft_stats.rx_missed_errors;
+ adapter->net_stats.rx_length_errors =
+ adapter->soft_stats.rx_length_errors;
+ adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
+ adapter->net_stats.rx_frame_errors =
+ adapter->soft_stats.rx_frame_errors;
+ adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
+ adapter->net_stats.rx_missed_errors =
+ adapter->soft_stats.rx_missed_errors;
+ adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
+ adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
+ adapter->net_stats.tx_aborted_errors =
+ adapter->soft_stats.tx_aborted_errors;
+ adapter->net_stats.tx_window_errors =
+ adapter->soft_stats.tx_window_errors;
+ adapter->net_stats.tx_carrier_errors =
+ adapter->soft_stats.tx_carrier_errors;
+}
+
+/*
+ * atl1_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ return &adapter->net_stats;
+}
+
+static void atl1_update_mailbox(struct atl1_adapter *adapter)
+{
+ unsigned long flags;
+ u32 tpd_next_to_use;
+ u32 rfd_next_to_use;
+ u32 rrd_next_to_clean;
+ u32 value;
+
+ spin_lock_irqsave(&adapter->mb_lock, flags);
+
+ tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+ rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
+ rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
+
+ value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+ MB_RFD_PROD_INDX_SHIFT) |
+ ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+ MB_RRD_CONS_INDX_SHIFT) |
+ ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+ MB_TPD_PROD_INDX_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+
+ spin_unlock_irqrestore(&adapter->mb_lock, flags);
+}
+
+static void atl1_clean_alloc_flag(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd, u16 offset)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+
+ while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) {
+ rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0;
+ if (++rfd_ring->next_to_clean == rfd_ring->count) {
+ rfd_ring->next_to_clean = 0;
+ }
}
+}
- /* atl1_irq_enable(adapter); */
- spin_unlock_irqrestore(&adapter->lock, flags);
+static void atl1_update_rfd_index(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd)
+{
+ u16 num_buf;
+
+ num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) /
+ adapter->rx_buffer_len;
+ if (rrd->num_buf == num_buf)
+ /* clean alloc flag for bad rrd */
+ atl1_clean_alloc_flag(adapter, rrd, num_buf);
}
-static void atl1_restore_vlan(struct atl1_adapter *adapter)
+static void atl1_rx_checksum(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd, struct sk_buff *skb)
{
- atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ struct pci_dev *pdev = adapter->pdev;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
+ if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
+ ERR_FLAG_CODE | ERR_FLAG_OV)) {
+ adapter->hw_csum_err++;
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "rx checksum error\n");
+ return;
+ }
+ }
+
+ /* not IPv4 */
+ if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
+ /* checksum is invalid, but it's not an IPv4 pkt, so ok */
+ return;
+
+ /* IPv4 packet */
+ if (likely(!(rrd->err_flg &
+ (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_good++;
+ return;
+ }
+
+ /* IPv4, but hardware thinks its checksum is wrong */
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
+ rrd->pkt_flg, rrd->err_flg);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
+ adapter->hw_csum_err++;
+ return;
+}
+
+/*
+ * atl1_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ */
+static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct pci_dev *pdev = adapter->pdev;
+ struct page *page;
+ unsigned long offset;
+ struct atl1_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+ u16 num_alloc = 0;
+ u16 rfd_next_to_use, next_next;
+ struct rx_free_desc *rfd_desc;
+
+ next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
+ if (++next_next == rfd_ring->count)
+ next_next = 0;
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+
+ while (!buffer_info->alloced && !next_info->alloced) {
+ if (buffer_info->skb) {
+ buffer_info->alloced = 1;
+ goto next;
+ }
+
+ rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+ skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
+ if (unlikely(!skb)) { /* Better luck next round */
+ adapter->net_stats.rx_dropped++;
+ break;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16) adapter->rx_buffer_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(pdev, page, offset,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
+ rfd_desc->coalese = 0;
+
+next:
+ rfd_next_to_use = next_next;
+ if (unlikely(++next_next == rfd_ring->count))
+ next_next = 0;
+
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+ num_alloc++;
+ }
+
+ if (num_alloc) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
+ }
+ return num_alloc;
+}
+
+static void atl1_intr_rx(struct atl1_adapter *adapter)
+{
+ int i, count;
+ u16 length;
+ u16 rrd_next_to_clean;
+ u32 value;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_buffer *buffer_info;
+ struct rx_return_desc *rrd;
+ struct sk_buff *skb;
+
+ count = 0;
+
+ rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
+
+ while (1) {
+ rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
+ i = 1;
+ if (likely(rrd->xsz.valid)) { /* packet valid */
+chk_rrd:
+ /* check rrd status */
+ if (likely(rrd->num_buf == 1))
+ goto rrd_ok;
+
+ /* rrd seems to be bad */
+ if (unlikely(i-- > 0)) {
+ /* rrd may not be DMAed completely */
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "incomplete RRD DMA transfer\n");
+ udelay(1);
+ goto chk_rrd;
+ }
+ /* bad rrd */
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "bad RRD\n");
+ /* see if update RFD index */
+ if (rrd->num_buf > 1)
+ atl1_update_rfd_index(adapter, rrd);
+
+ /* update rrd */
+ rrd->xsz.valid = 0;
+ if (++rrd_next_to_clean == rrd_ring->count)
+ rrd_next_to_clean = 0;
+ count++;
+ continue;
+ } else { /* current rrd still not be updated */
+
+ break;
+ }
+rrd_ok:
+ /* clean alloc flag for bad rrd */
+ atl1_clean_alloc_flag(adapter, rrd, 0);
+
+ buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
+ if (++rfd_ring->next_to_clean == rfd_ring->count)
+ rfd_ring->next_to_clean = 0;
+
+ /* update rrd next to clean */
+ if (++rrd_next_to_clean == rrd_ring->count)
+ rrd_next_to_clean = 0;
+ count++;
+
+ if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
+ if (!(rrd->err_flg &
+ (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
+ | ERR_FLAG_LEN))) {
+ /* packet error, don't need upstream */
+ buffer_info->alloced = 0;
+ rrd->xsz.valid = 0;
+ continue;
+ }
+ }
+
+ /* Good Receive */
+ pci_unmap_page(adapter->pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_FROMDEVICE);
+ skb = buffer_info->skb;
+ length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
+
+ skb_put(skb, length - ETHERNET_FCS_SIZE);
+
+ /* Receive Checksum Offload */
+ atl1_rx_checksum(adapter, rrd, skb);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
+ u16 vlan_tag = (rrd->vlan_tag >> 4) |
+ ((rrd->vlan_tag & 7) << 13) |
+ ((rrd->vlan_tag & 8) << 9);
+ vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
+ } else
+ netif_rx(skb);
+
+ /* let protocol layer free skb */
+ buffer_info->skb = NULL;
+ buffer_info->alloced = 0;
+ rrd->xsz.valid = 0;
+
+ adapter->netdev->last_rx = jiffies;
+ }
+
+ atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
+
+ atl1_alloc_rx_buffers(adapter);
+
+ /* update mailbox ? */
+ if (count) {
+ u32 tpd_next_to_use;
+ u32 rfd_next_to_use;
+ u32 rrd_next_to_clean;
+
+ spin_lock(&adapter->mb_lock);
+
+ tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+ rfd_next_to_use =
+ atomic_read(&adapter->rfd_ring.next_to_use);
+ rrd_next_to_clean =
+ atomic_read(&adapter->rrd_ring.next_to_clean);
+ value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+ MB_RFD_PROD_INDX_SHIFT) |
+ ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+ MB_RRD_CONS_INDX_SHIFT) |
+ ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+ MB_TPD_PROD_INDX_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+ spin_unlock(&adapter->mb_lock);
+ }
+}
+
+static void atl1_intr_tx(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ u16 sw_tpd_next_to_clean;
+ u16 cmb_tpd_next_to_clean;
+
+ sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
+
+ while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
+ struct tx_packet_desc *tpd;
+
+ tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
+ buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
+ if (buffer_info->dma) {
+ pci_unmap_page(adapter->pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb_irq(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ tpd->buffer_addr = 0;
+ tpd->desc.data = 0;
+
+ if (++sw_tpd_next_to_clean == tpd_ring->count)
+ sw_tpd_next_to_clean = 0;
+ }
+ atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
+
+ if (netif_queue_stopped(adapter->netdev)
+ && netif_carrier_ok(adapter->netdev))
+ netif_wake_queue(adapter->netdev);
}
static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring)
{
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
- return ((next_to_clean >
- next_to_use) ? next_to_clean - next_to_use -
- 1 : tpd_ring->count + next_to_clean - next_to_use - 1);
+ return ((next_to_clean > next_to_use) ?
+ next_to_clean - next_to_use - 1 :
+ tpd_ring->count + next_to_clean - next_to_use - 1);
}
static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
@@ -1262,9 +1451,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ iph->daddr, 0, IPPROTO_TCP, 0);
ipofst = skb_network_offset(skb);
if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
@@ -1272,7 +1459,8 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
tso->tsopl |= (iph->ihl &
CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
tso->tsopl |= (tcp_hdrlen(skb) &
- TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
+ TSO_PARAM_TCPHDRLEN_MASK) <<
+ TSO_PARAM_TCPHDRLEN_SHIFT;
tso->tsopl |= (skb_shinfo(skb)->gso_size &
TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
@@ -1285,7 +1473,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
}
static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
- struct csum_param *csum)
+ struct csum_param *csum)
{
u8 css, cso;
@@ -1293,7 +1481,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
cso = skb_transport_offset(skb);
css = cso + skb->csum_offset;
if (unlikely(cso & 0x1)) {
- dev_dbg(&adapter->pdev->dev,
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
"payload offset not an even number\n");
return -1;
}
@@ -1308,8 +1496,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
return true;
}
-static void atl1_tx_map(struct atl1_adapter *adapter,
- struct sk_buff *skb, bool tcp_seg)
+static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
+ bool tcp_seg)
{
/* We enter this function holding a spinlock. */
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
@@ -1346,26 +1534,25 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
if (first_buf_len > proto_hdr_len) {
len12 = first_buf_len - proto_hdr_len;
- m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
for (i = 0; i < m; i++) {
buffer_info =
&tpd_ring->buffer_info[tpd_next_to_use];
buffer_info->skb = NULL;
buffer_info->length =
- (MAX_TX_BUF_LEN >=
- len12) ? MAX_TX_BUF_LEN : len12;
+ (ATL1_MAX_TX_BUF_LEN >=
+ len12) ? ATL1_MAX_TX_BUF_LEN : len12;
len12 -= buffer_info->length;
page = virt_to_page(skb->data +
- (proto_hdr_len +
- i * MAX_TX_BUF_LEN));
+ (proto_hdr_len +
+ i * ATL1_MAX_TX_BUF_LEN));
offset = (unsigned long)(skb->data +
- (proto_hdr_len +
- i * MAX_TX_BUF_LEN)) &
- ~PAGE_MASK;
- buffer_info->dma =
- pci_map_page(adapter->pdev, page, offset,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ (proto_hdr_len +
+ i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(adapter->pdev,
+ page, offset, buffer_info->length,
+ PCI_DMA_TODEVICE);
if (++tpd_next_to_use == tpd_ring->count)
tpd_next_to_use = 0;
}
@@ -1376,8 +1563,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
page = virt_to_page(skb->data);
offset = (unsigned long)skb->data & ~PAGE_MASK;
buffer_info->dma = pci_map_page(adapter->pdev, page,
- offset, first_buf_len,
- PCI_DMA_TODEVICE);
+ offset, first_buf_len, PCI_DMA_TODEVICE);
if (++tpd_next_to_use == tpd_ring->count)
tpd_next_to_use = 0;
}
@@ -1389,19 +1575,19 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
lenf = frag->size;
- m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN;
for (i = 0; i < m; i++) {
buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
if (unlikely(buffer_info->skb))
BUG();
buffer_info->skb = NULL;
- buffer_info->length =
- (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf;
+ buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ?
+ ATL1_MAX_TX_BUF_LEN : lenf;
lenf -= buffer_info->length;
- buffer_info->dma =
- pci_map_page(adapter->pdev, frag->page,
- frag->page_offset + i * MAX_TX_BUF_LEN,
- buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
+ buffer_info->length, PCI_DMA_TODEVICE);
if (++tpd_next_to_use == tpd_ring->count)
tpd_next_to_use = 0;
@@ -1413,7 +1599,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter,
}
static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
- union tpd_descr *descr)
+ union tpd_descr *descr)
{
/* We enter this function holding a spinlock. */
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
@@ -1457,31 +1643,6 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
}
-static void atl1_update_mailbox(struct atl1_adapter *adapter)
-{
- unsigned long flags;
- u32 tpd_next_to_use;
- u32 rfd_next_to_use;
- u32 rrd_next_to_clean;
- u32 value;
-
- spin_lock_irqsave(&adapter->mb_lock, flags);
-
- tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
- rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
- rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
-
- value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
- MB_RFD_PROD_INDX_SHIFT) |
- ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
- MB_RRD_CONS_INDX_SHIFT) |
- ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
- MB_TPD_PROD_INDX_SHIFT);
- iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
-
- spin_unlock_irqrestore(&adapter->mb_lock, flags);
-}
-
static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1517,8 +1678,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
for (f = 0; f < nr_frags; f++) {
frag_size = skb_shinfo(skb)->frags[f].size;
if (frag_size)
- count +=
- (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
}
/* mss will be nonzero if we're doing segment offload (TSO/GSO) */
@@ -1534,7 +1695,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* need additional TPD ? */
if (proto_hdr_len != len)
count += (len - proto_hdr_len +
- MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ ATL1_MAX_TX_BUF_LEN - 1) /
+ ATL1_MAX_TX_BUF_LEN;
}
}
@@ -1542,7 +1704,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (!spin_trylock(&adapter->lock)) {
/* Can't get lock - tell upper layer to requeue */
local_irq_restore(flags);
- dev_dbg(&adapter->pdev->dev, "tx locked\n");
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n");
return NETDEV_TX_LOCKED;
}
@@ -1550,7 +1712,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* not enough descriptors */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->lock, flags);
- dev_dbg(&adapter->pdev->dev, "tx busy\n");
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n");
return NETDEV_TX_BUSY;
}
@@ -1592,131 +1754,208 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
/*
- * atl1_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
+ * atl1_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
*/
-static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
+static irqreturn_t atl1_intr(int irq, void *data)
{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- return &adapter->net_stats;
-}
+ struct atl1_adapter *adapter = netdev_priv(data);
+ u32 status;
+ u8 update_rx;
+ int max_ints = 10;
-/*
- * atl1_clean_rx_ring - Free RFD Buffers
- * @adapter: board private structure
- */
-static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
-{
- struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
- struct atl1_buffer *buffer_info;
- struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
- unsigned int i;
+ status = adapter->cmb.cmb->int_stats;
+ if (!status)
+ return IRQ_NONE;
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rfd_ring->count; i++) {
- buffer_info = &rfd_ring->buffer_info[i];
- if (buffer_info->dma) {
- pci_unmap_page(pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- buffer_info->dma = 0;
+ update_rx = 0;
+
+ do {
+ /* clear CMB interrupt status at once */
+ adapter->cmb.cmb->int_stats = 0;
+
+ if (status & ISR_GPHY) /* clear phy status */
+ atl1_clear_phy_int(adapter);
+
+ /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
+
+ /* check if SMB intr */
+ if (status & ISR_SMB)
+ atl1_inc_smb(adapter);
+
+ /* check if PCIE PHY Link down */
+ if (status & ISR_PHY_LINKDOWN) {
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "pcie phy link down %x\n", status);
+ if (netif_running(adapter->netdev)) { /* reset MAC */
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ schedule_work(&adapter->pcie_dma_to_rst_task);
+ return IRQ_HANDLED;
+ }
}
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
+
+ /* check if DMA read/write error ? */
+ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "pcie DMA r/w error (status = 0x%x)\n",
+ status);
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ schedule_work(&adapter->pcie_dma_to_rst_task);
+ return IRQ_HANDLED;
}
- }
- size = sizeof(struct atl1_buffer) * rfd_ring->count;
- memset(rfd_ring->buffer_info, 0, size);
+ /* link event */
+ if (status & ISR_GPHY) {
+ adapter->soft_stats.tx_carrier_errors++;
+ atl1_check_for_link(adapter);
+ }
- /* Zero out the descriptor ring */
- memset(rfd_ring->desc, 0, rfd_ring->size);
+ /* transmit event */
+ if (status & ISR_CMB_TX)
+ atl1_intr_tx(adapter);
- rfd_ring->next_to_clean = 0;
- atomic_set(&rfd_ring->next_to_use, 0);
+ /* rx exception */
+ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+ ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+ ISR_HOST_RRD_OV | ISR_CMB_RX))) {
+ if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+ ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+ ISR_HOST_RRD_OV))
+ dev_printk(KERN_DEBUG, &adapter->pdev->dev,
+ "rx exception, ISR = 0x%x\n", status);
+ atl1_intr_rx(adapter);
+ }
- rrd_ring->next_to_use = 0;
- atomic_set(&rrd_ring->next_to_clean, 0);
+ if (--max_ints < 0)
+ break;
+
+ } while ((status = adapter->cmb.cmb->int_stats));
+
+ /* re-enable Interrupt */
+ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
+ return IRQ_HANDLED;
}
/*
- * atl1_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
+ * atl1_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
*/
-static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
+static void atl1_watchdog(unsigned long data)
{
- struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
- struct atl1_buffer *buffer_info;
- struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
- unsigned int i;
+ struct atl1_adapter *adapter = (struct atl1_adapter *)data;
- /* Free all the Tx ring sk_buffs */
- for (i = 0; i < tpd_ring->count; i++) {
- buffer_info = &tpd_ring->buffer_info[i];
- if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
- }
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
- for (i = 0; i < tpd_ring->count; i++) {
- buffer_info = &tpd_ring->buffer_info[i];
- if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- }
+/*
+ * atl1_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1_phy_config(unsigned long data)
+{
+ struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_hw *hw = &adapter->hw;
+ unsigned long flags;
- size = sizeof(struct atl1_buffer) * tpd_ring->count;
- memset(tpd_ring->buffer_info, 0, size);
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->phy_timer_pending = false;
+ atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+ atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
+ atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
- /* Zero out the descriptor ring */
- memset(tpd_ring->desc, 0, tpd_ring->size);
+/*
+ * atl1_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl1_tx_timeout(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->tx_timeout_task);
+}
- atomic_set(&tpd_ring->next_to_use, 0);
- atomic_set(&tpd_ring->next_to_clean, 0);
+/*
+ * Orphaned vendor comment left intact here:
+ * <vendor comment>
+ * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
+ * will assert. We do soft reset <0x1400=1> according
+ * with the SPEC. BUT, it seemes that PCIE or DMA
+ * state-machine will not be reset. DMAR_TO_INT will
+ * assert again and again.
+ * </vendor comment>
+ */
+static void atl1_tx_timeout_task(struct work_struct *work)
+{
+ struct atl1_adapter *adapter =
+ container_of(work, struct atl1_adapter, tx_timeout_task);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ atl1_down(adapter);
+ atl1_up(adapter);
+ netif_device_attach(netdev);
}
/*
- * atl1_free_ring_resources - Free Tx / RX descriptor Resources
- * @adapter: board private structure
- *
- * Free all transmit software resources
+ * atl1_link_chg_task - deal with link change event Out of interrupt context
*/
-void atl1_free_ring_resources(struct atl1_adapter *adapter)
+static void atl1_link_chg_task(struct work_struct *work)
{
- struct pci_dev *pdev = adapter->pdev;
- struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
- struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
- struct atl1_ring_header *ring_header = &adapter->ring_header;
+ struct atl1_adapter *adapter =
+ container_of(work, struct atl1_adapter, link_chg_task);
+ unsigned long flags;
- atl1_clean_tx_ring(adapter);
- atl1_clean_rx_ring(adapter);
+ spin_lock_irqsave(&adapter->lock, flags);
+ atl1_check_link(adapter);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
- kfree(tpd_ring->buffer_info);
- pci_free_consistent(pdev, ring_header->size, ring_header->desc,
- ring_header->dma);
+static void atl1_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u32 ctrl;
- tpd_ring->buffer_info = NULL;
- tpd_ring->desc = NULL;
- tpd_ring->dma = 0;
+ spin_lock_irqsave(&adapter->lock, flags);
+ /* atl1_irq_disable(adapter); */
+ adapter->vlgrp = grp;
- rfd_ring->buffer_info = NULL;
- rfd_ring->desc = NULL;
- rfd_ring->dma = 0;
+ if (grp) {
+ /* enable VLAN tag insert/strip */
+ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+ ctrl |= MAC_CTRL_RMV_VLAN;
+ iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+ ctrl &= ~MAC_CTRL_RMV_VLAN;
+ iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+ }
- rrd_ring->desc = NULL;
- rrd_ring->dma = 0;
+ /* atl1_irq_enable(adapter); */
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static void atl1_restore_vlan(struct atl1_adapter *adapter)
+{
+ atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+}
+
+int atl1_reset(struct atl1_adapter *adapter)
+{
+ int ret;
+
+ ret = atl1_reset_hw(&adapter->hw);
+ if (ret != ATL1_SUCCESS)
+ return ret;
+ return atl1_init_hw(&adapter->hw);
}
s32 atl1_up(struct atl1_adapter *adapter)
@@ -1727,6 +1966,7 @@ s32 atl1_up(struct atl1_adapter *adapter)
/* hardware has been reset, we need to reload some things */
atl1_set_multi(netdev);
+ atl1_init_ring_ptrs(adapter);
atl1_restore_vlan(adapter);
err = atl1_alloc_rx_buffers(adapter);
if (unlikely(!err)) /* no RX BUFFER allocated */
@@ -1754,11 +1994,6 @@ s32 atl1_up(struct atl1_adapter *adapter)
atl1_check_link(adapter);
return 0;
- /* FIXME: unreachable code! -- CHS */
- /* free irq disable any interrupt */
- iowrite32(0, adapter->hw.hw_addr + REG_IMR);
- free_irq(adapter->pdev->irq, netdev);
-
err_up:
pci_disable_msi(adapter->pdev);
/* free rx_buffers */
@@ -1790,172 +2025,6 @@ void atl1_down(struct atl1_adapter *adapter)
}
/*
- * atl1_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- */
-static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- int old_mtu = netdev->mtu;
- int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
-
- if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
- return -EINVAL;
- }
-
- adapter->hw.max_frame_size = max_frame;
- adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
- adapter->rx_buffer_len = (max_frame + 7) & ~7;
- adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
-
- netdev->mtu = new_mtu;
- if ((old_mtu != new_mtu) && netif_running(netdev)) {
- atl1_down(adapter);
- atl1_up(adapter);
- }
-
- return 0;
-}
-
-/*
- * atl1_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- */
-static int atl1_set_mac(struct net_device *netdev, void *p)
-{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
-
- atl1_set_mac_addr(&adapter->hw);
- return 0;
-}
-
-/*
- * atl1_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
- */
-static void atl1_watchdog(unsigned long data)
-{
- struct atl1_adapter *adapter = (struct atl1_adapter *)data;
-
- /* Reset the timer */
- mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
-}
-
-static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
-{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- u16 result;
-
- atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
-
- return result;
-}
-
-static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val)
-{
- struct atl1_adapter *adapter = netdev_priv(netdev);
-
- atl1_write_phy_reg(&adapter->hw, reg_num, val);
-}
-
-/*
- * atl1_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
-static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
- int retval;
-
- if (!netif_running(netdev))
- return -EINVAL;
-
- spin_lock_irqsave(&adapter->lock, flags);
- retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
- spin_unlock_irqrestore(&adapter->lock, flags);
-
- return retval;
-}
-
-/*
- * atl1_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
-static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return atl1_mii_ioctl(netdev, ifr, cmd);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/*
- * atl1_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- */
-static void atl1_tx_timeout(struct net_device *netdev)
-{
- struct atl1_adapter *adapter = netdev_priv(netdev);
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->tx_timeout_task);
-}
-
-/*
- * atl1_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
- */
-static void atl1_phy_config(unsigned long data)
-{
- struct atl1_adapter *adapter = (struct atl1_adapter *)data;
- struct atl1_hw *hw = &adapter->hw;
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->lock, flags);
- adapter->phy_timer_pending = false;
- atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
- atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
- atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
- spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-int atl1_reset(struct atl1_adapter *adapter)
-{
- int ret;
-
- ret = atl1_reset_hw(&adapter->hw);
- if (ret != ATL1_SUCCESS)
- return ret;
- return atl1_init_hw(&adapter->hw);
-}
-
-/*
* atl1_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2007,77 +2076,113 @@ static int atl1_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void atl1_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- atl1_intr(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif
-
-/*
- * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
- * will assert. We do soft reset <0x1400=1> according
- * with the SPEC. BUT, it seemes that PCIE or DMA
- * state-machine will not be reset. DMAR_TO_INT will
- * assert again and again.
- */
-static void atl1_tx_timeout_task(struct work_struct *work)
+#ifdef CONFIG_PM
+static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct atl1_adapter *adapter =
- container_of(work, struct atl1_adapter, tx_timeout_task);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ u32 ctrl = 0;
+ u32 wufc = adapter->wol;
netif_device_detach(netdev);
- atl1_down(adapter);
- atl1_up(adapter);
- netif_device_attach(netdev);
-}
+ if (netif_running(netdev))
+ atl1_down(adapter);
-/*
- * atl1_link_chg_task - deal with link change event Out of interrupt context
- */
-static void atl1_link_chg_task(struct work_struct *work)
-{
- struct atl1_adapter *adapter =
- container_of(work, struct atl1_adapter, link_chg_task);
- unsigned long flags;
+ atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+ atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+ if (ctrl & BMSR_LSTATUS)
+ wufc &= ~ATL1_WUFC_LNKC;
- spin_lock_irqsave(&adapter->lock, flags);
- atl1_check_link(adapter);
- spin_unlock_irqrestore(&adapter->lock, flags);
+ /* reduce speed to 10/100M */
+ if (wufc) {
+ atl1_phy_enter_power_saving(hw);
+ /* if resume, let driver to re- setup link */
+ hw->phy_configured = false;
+ atl1_set_mac_addr(hw);
+ atl1_set_multi(netdev);
+
+ ctrl = 0;
+ /* turn on magic packet wol */
+ if (wufc & ATL1_WUFC_MAG)
+ ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+
+ /* turn on Link change WOL */
+ if (wufc & ATL1_WUFC_LNKC)
+ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+ iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+ ctrl &= ~MAC_CTRL_DBG;
+ ctrl &= ~MAC_CTRL_PROMIS_EN;
+ if (wufc & ATL1_WUFC_MC)
+ ctrl |= MAC_CTRL_MC_ALL_EN;
+ else
+ ctrl &= ~MAC_CTRL_MC_ALL_EN;
+
+ /* turn on broadcast mode if wake on-BC is enabled */
+ if (wufc & ATL1_WUFC_BC)
+ ctrl |= MAC_CTRL_BC_EN;
+ else
+ ctrl &= ~MAC_CTRL_BC_EN;
+
+ /* enable RX */
+ ctrl |= MAC_CTRL_RX_EN;
+ iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ } else {
+ iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+ }
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
-/*
- * atl1_pcie_patch - Patch for PCIE module
- */
-static void atl1_pcie_patch(struct atl1_adapter *adapter)
+static int atl1_resume(struct pci_dev *pdev)
{
- u32 value;
- value = 0x6500;
- iowrite32(value, adapter->hw.hw_addr + 0x12FC);
- /* pcie flow control mode change */
- value = ioread32(adapter->hw.hw_addr + 0x1008);
- value |= 0x8000;
- iowrite32(value, adapter->hw.hw_addr + 0x1008);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ u32 ret_val;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ ret_val = pci_enable_device(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
+ atl1_reset(adapter);
+
+ if (netif_running(netdev))
+ atl1_up(adapter);
+ netif_device_attach(netdev);
+
+ atl1_via_workaround(adapter);
+
+ return 0;
}
+#else
+#define atl1_suspend NULL
+#define atl1_resume NULL
+#endif
-/*
- * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
- * on PCI Command register is disable.
- * The function enable this bit.
- * Brackett, 2006/03/15
- */
-static void atl1_via_workaround(struct atl1_adapter *adapter)
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void atl1_poll_controller(struct net_device *netdev)
{
- unsigned long value;
-
- value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
- if (value & PCI_COMMAND_INTX_DISABLE)
- value &= ~PCI_COMMAND_INTX_DISABLE;
- iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
+ disable_irq(netdev->irq);
+ atl1_intr(netdev->irq, netdev);
+ enable_irq(netdev->irq);
}
+#endif
/*
* atl1_probe - Device Initialization Routine
@@ -2091,7 +2196,7 @@ static void atl1_via_workaround(struct atl1_adapter *adapter)
* and a hardware reset occur.
*/
static int __devinit atl1_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl1_adapter *adapter;
@@ -2145,7 +2250,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
}
/* get device revision number */
adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
- (REG_MASTER_CTRL + 2));
+ (REG_MASTER_CTRL + 2));
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
/* set default ring resource counts */
@@ -2298,7 +2403,8 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
* address, we need to save the permanent one.
*/
if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) {
- memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN);
+ memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr,
+ ETH_ALEN);
atl1_set_mac_addr(&adapter->hw);
}
@@ -2310,112 +2416,11 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct atl1_adapter *adapter = netdev_priv(netdev);
- struct atl1_hw *hw = &adapter->hw;
- u32 ctrl = 0;
- u32 wufc = adapter->wol;
-
- netif_device_detach(netdev);
- if (netif_running(netdev))
- atl1_down(adapter);
-
- atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
- atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
- if (ctrl & BMSR_LSTATUS)
- wufc &= ~ATL1_WUFC_LNKC;
-
- /* reduce speed to 10/100M */
- if (wufc) {
- atl1_phy_enter_power_saving(hw);
- /* if resume, let driver to re- setup link */
- hw->phy_configured = false;
- atl1_set_mac_addr(hw);
- atl1_set_multi(netdev);
-
- ctrl = 0;
- /* turn on magic packet wol */
- if (wufc & ATL1_WUFC_MAG)
- ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
-
- /* turn on Link change WOL */
- if (wufc & ATL1_WUFC_LNKC)
- ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
- iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
-
- /* turn on all-multi mode if wake on multicast is enabled */
- ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
- ctrl &= ~MAC_CTRL_DBG;
- ctrl &= ~MAC_CTRL_PROMIS_EN;
- if (wufc & ATL1_WUFC_MC)
- ctrl |= MAC_CTRL_MC_ALL_EN;
- else
- ctrl &= ~MAC_CTRL_MC_ALL_EN;
-
- /* turn on broadcast mode if wake on-BC is enabled */
- if (wufc & ATL1_WUFC_BC)
- ctrl |= MAC_CTRL_BC_EN;
- else
- ctrl &= ~MAC_CTRL_BC_EN;
-
- /* enable RX */
- ctrl |= MAC_CTRL_RX_EN;
- iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */
- } else {
- iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
- }
-
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int atl1_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct atl1_adapter *adapter = netdev_priv(netdev);
- u32 ret_val;
-
- pci_set_power_state(pdev, 0);
- pci_restore_state(pdev);
-
- ret_val = pci_enable_device(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
- atl1_reset(adapter);
-
- if (netif_running(netdev))
- atl1_up(adapter);
- netif_device_attach(netdev);
-
- atl1_via_workaround(adapter);
-
- return 0;
-}
-#else
-#define atl1_suspend NULL
-#define atl1_resume NULL
-#endif
-
static struct pci_driver atl1_driver = {
.name = atl1_driver_name,
.id_table = atl1_pci_tbl,
.probe = atl1_probe,
.remove = __devexit_p(atl1_remove),
- /* Power Managment Hooks */
- /* probably broken right now -- CHS */
.suspend = atl1_suspend,
.resume = atl1_resume
};
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index c27cfcef45fa..e86b3691765b 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1205,8 +1205,8 @@ static int au1000_rx(struct net_device *dev)
continue;
}
skb_reserve(skb, 2); /* 16 byte IP header align */
- eth_copy_and_sum(skb,
- (unsigned char *)pDB->vaddr, frmlen, 0);
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index d19874bf0706..1d882360b34d 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -459,7 +459,7 @@ static int ax_open(struct net_device *dev)
struct ei_device *ei_local = netdev_priv(dev);
int ret;
- dev_dbg(ax->dev, "%s: open\n", dev->name);
+ dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
ret = request_irq(dev->irq, ax_ei_interrupt, 0, dev->name, dev);
if (ret)
@@ -492,7 +492,7 @@ static int ax_close(struct net_device *dev)
struct ax_device *ax = to_ax_dev(dev);
struct ei_device *ei_local = netdev_priv(dev);
- dev_dbg(ax->dev, "%s: close\n", dev->name);
+ dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
/* turn the phy off */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 96fb0ec905a7..37f1b6ff5c12 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1519,14 +1519,13 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
u8 *pwol_pattern;
u8 pwol_mask[B44_PMASK_SIZE];
- pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+ pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
if (!pwol_pattern) {
printk(KERN_ERR PFX "Memory not available for WOL\n");
return;
}
/* Ipv4 magic packet pattern - pattern 0.*/
- memset(pwol_pattern, 0, B44_PATTERN_SIZE);
memset(pwol_mask, 0, B44_PMASK_SIZE);
plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
B44_ETHIPV4UDP_HLEN);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
new file mode 100644
index 000000000000..9a08d656f1ce
--- /dev/null
+++ b/drivers/net/bfin_mac.c
@@ -0,0 +1,1009 @@
+/*
+ * File: drivers/net/bfin_mac.c
+ * Based on:
+ * Maintainer:
+ * Bryan Wu <bryan.wu@analog.com>
+ *
+ * Original author:
+ * Luke Yang <luke.yang@analog.com>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program ; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/blackfin.h>
+#include <asm/cacheflush.h>
+#include <asm/portmux.h>
+
+#include "bfin_mac.h"
+
+#define DRV_NAME "bfin_mac"
+#define DRV_VERSION "1.1"
+#define DRV_AUTHOR "Bryan Wu, Luke Yang"
+#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
+
+#if defined(CONFIG_BFIN_MAC_USE_L1)
+# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size)
+# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr)
+#else
+# define bfin_mac_alloc(dma_handle, size) \
+ dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL)
+# define bfin_mac_free(dma_handle, ptr) \
+ dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle)
+#endif
+
+#define PKT_BUF_SZ 1580
+
+#define MAX_TIMEOUT_CNT 500
+
+/* pointers to maintain transmit list */
+static struct net_dma_desc_tx *tx_list_head;
+static struct net_dma_desc_tx *tx_list_tail;
+static struct net_dma_desc_rx *rx_list_head;
+static struct net_dma_desc_rx *rx_list_tail;
+static struct net_dma_desc_rx *current_rx_ptr;
+static struct net_dma_desc_tx *current_tx_ptr;
+static struct net_dma_desc_tx *tx_desc;
+static struct net_dma_desc_rx *rx_desc;
+
+static void desc_list_free(void)
+{
+ struct net_dma_desc_rx *r;
+ struct net_dma_desc_tx *t;
+ int i;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+ dma_addr_t dma_handle = 0;
+#endif
+
+ if (tx_desc) {
+ t = tx_list_head;
+ for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+ if (t) {
+ if (t->skb) {
+ dev_kfree_skb(t->skb);
+ t->skb = NULL;
+ }
+ t = t->next;
+ }
+ }
+ bfin_mac_free(dma_handle, tx_desc);
+ }
+
+ if (rx_desc) {
+ r = rx_list_head;
+ for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+ if (r) {
+ if (r->skb) {
+ dev_kfree_skb(r->skb);
+ r->skb = NULL;
+ }
+ r = r->next;
+ }
+ }
+ bfin_mac_free(dma_handle, rx_desc);
+ }
+}
+
+static int desc_list_init(void)
+{
+ int i;
+ struct sk_buff *new_skb;
+#if !defined(CONFIG_BFIN_MAC_USE_L1)
+ /*
+ * This dma_handle is useless in Blackfin dma_alloc_coherent().
+ * The real dma handler is the return value of dma_alloc_coherent().
+ */
+ dma_addr_t dma_handle;
+#endif
+
+ tx_desc = bfin_mac_alloc(&dma_handle,
+ sizeof(struct net_dma_desc_tx) *
+ CONFIG_BFIN_TX_DESC_NUM);
+ if (tx_desc == NULL)
+ goto init_error;
+
+ rx_desc = bfin_mac_alloc(&dma_handle,
+ sizeof(struct net_dma_desc_rx) *
+ CONFIG_BFIN_RX_DESC_NUM);
+ if (rx_desc == NULL)
+ goto init_error;
+
+ /* init tx_list */
+ tx_list_head = tx_list_tail = tx_desc;
+
+ for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
+ struct net_dma_desc_tx *t = tx_desc + i;
+ struct dma_descriptor *a = &(t->desc_a);
+ struct dma_descriptor *b = &(t->desc_b);
+
+ /*
+ * disable DMA
+ * read from memory WNR = 0
+ * wordsize is 32 bits
+ * 6 half words is desc size
+ * large desc flow
+ */
+ a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ a->start_addr = (unsigned long)t->packet;
+ a->x_count = 0;
+ a->next_dma_desc = b;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * disable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ b->start_addr = (unsigned long)(&(t->status));
+ b->x_count = 0;
+
+ t->skb = NULL;
+ tx_list_tail->desc_b.next_dma_desc = a;
+ tx_list_tail->next = t;
+ tx_list_tail = t;
+ }
+ tx_list_tail->next = tx_list_head; /* tx_list is a circle */
+ tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
+ current_tx_ptr = tx_list_head;
+
+ /* init rx_list */
+ rx_list_head = rx_list_tail = rx_desc;
+
+ for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
+ struct net_dma_desc_rx *r = rx_desc + i;
+ struct dma_descriptor *a = &(r->desc_a);
+ struct dma_descriptor *b = &(r->desc_b);
+
+ /* allocate a new skb for next time receive */
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ if (!new_skb) {
+ printk(KERN_NOTICE DRV_NAME
+ ": init: low on mem - packet dropped\n");
+ goto init_error;
+ }
+ skb_reserve(new_skb, 2);
+ r->skb = new_skb;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * disable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
+ /* since RXDWA is enabled */
+ a->start_addr = (unsigned long)new_skb->data - 2;
+ a->x_count = 0;
+ a->next_dma_desc = b;
+
+ /*
+ * enabled DMA
+ * write to memory WNR = 1
+ * wordsize is 32 bits
+ * enable interrupt
+ * 6 half words is desc size
+ * large desc flow
+ */
+ b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
+ NDSIZE_6 | DMAFLOW_LARGE;
+ b->start_addr = (unsigned long)(&(r->status));
+ b->x_count = 0;
+
+ rx_list_tail->desc_b.next_dma_desc = a;
+ rx_list_tail->next = r;
+ rx_list_tail = r;
+ }
+ rx_list_tail->next = rx_list_head; /* rx_list is a circle */
+ rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
+ current_rx_ptr = rx_list_head;
+
+ return 0;
+
+init_error:
+ desc_list_free();
+ printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+ return -ENOMEM;
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+
+/* Set FER regs to MUX in Ethernet pins */
+static int setup_pin_mux(int action)
+{
+#if defined(CONFIG_BFIN_MAC_RMII)
+ u16 pin_req[] = P_RMII0;
+#else
+ u16 pin_req[] = P_MII0;
+#endif
+
+ if (action) {
+ if (peripheral_request_list(pin_req, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME
+ ": Requesting Peripherals failed\n");
+ return -EFAULT;
+ }
+ } else
+ peripheral_free_list(pin_req);
+
+ return 0;
+}
+
+/* Wait until the previous MDC/MDIO transaction has completed */
+static void poll_mdc_done(void)
+{
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ /* poll the STABUSY bit */
+ while ((bfin_read_EMAC_STAADD()) & STABUSY) {
+ mdelay(10);
+ if (timeout_cnt-- < 0) {
+ printk(KERN_ERR DRV_NAME
+ ": wait MDC/MDIO transaction to complete timeout\n");
+ break;
+ }
+ }
+}
+
+/* Read an off-chip register in a PHY through the MDC/MDIO port */
+static u16 read_phy_reg(u16 PHYAddr, u16 RegAddr)
+{
+ poll_mdc_done();
+ /* read mode */
+ bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) |
+ SET_REGAD(RegAddr) |
+ STABUSY);
+ poll_mdc_done();
+
+ return (u16) bfin_read_EMAC_STADAT();
+}
+
+/* Write an off-chip register in a PHY through the MDC/MDIO port */
+static void raw_write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data)
+{
+ bfin_write_EMAC_STADAT(Data);
+
+ /* write mode */
+ bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) |
+ SET_REGAD(RegAddr) |
+ STAOP |
+ STABUSY);
+
+ poll_mdc_done();
+}
+
+static void write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data)
+{
+ poll_mdc_done();
+ raw_write_phy_reg(PHYAddr, RegAddr, Data);
+}
+
+/* set up the phy */
+static void bf537mac_setphy(struct net_device *dev)
+{
+ u16 phydat;
+ struct bf537mac_local *lp = netdev_priv(dev);
+
+ /* Program PHY registers */
+ pr_debug("start setting up phy\n");
+
+ /* issue a reset */
+ raw_write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, 0x8000);
+
+ /* wait half a second */
+ msleep(500);
+
+ phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL);
+
+ /* advertise flow control supported */
+ phydat = read_phy_reg(lp->PhyAddr, PHYREG_ANAR);
+ phydat |= (1 << 10);
+ write_phy_reg(lp->PhyAddr, PHYREG_ANAR, phydat);
+
+ phydat = 0;
+ if (lp->Negotiate)
+ phydat |= 0x1000; /* enable auto negotiation */
+ else {
+ if (lp->FullDuplex)
+ phydat |= (1 << 8); /* full duplex */
+ else
+ phydat &= (~(1 << 8)); /* half duplex */
+
+ if (!lp->Port10)
+ phydat |= (1 << 13); /* 100 Mbps */
+ else
+ phydat &= (~(1 << 13)); /* 10 Mbps */
+ }
+
+ if (lp->Loopback)
+ phydat |= (1 << 14); /* enable TX->RX loopback */
+
+ write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, phydat);
+ msleep(500);
+
+ phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL);
+ /* check for SMSC PHY */
+ if ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID1) == 0x7) &&
+ ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID2) & 0xfff0) == 0xC0A0)) {
+ /*
+ * we have SMSC PHY so reqest interrupt
+ * on link down condition
+ */
+
+ /* enable interrupts */
+ write_phy_reg(lp->PhyAddr, 30, 0x0ff);
+ }
+}
+
+/**************************************************************************/
+void setup_system_regs(struct net_device *dev)
+{
+ int phyaddr;
+ unsigned short sysctl, phydat;
+ u32 opmode;
+ struct bf537mac_local *lp = netdev_priv(dev);
+ int count = 0;
+
+ phyaddr = lp->PhyAddr;
+
+ /* Enable PHY output */
+ if (!(bfin_read_VR_CTL() & PHYCLKOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+
+ /* MDC = 2.5 MHz */
+ sysctl = SET_MDCDIV(24);
+ /* Odd word alignment for Receive Frame DMA word */
+ /* Configure checksum support and rcve frame word alignment */
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ sysctl |= RXDWA | RXCKS;
+#else
+ sysctl |= RXDWA;
+#endif
+ bfin_write_EMAC_SYSCTL(sysctl);
+ /* auto negotiation on */
+ /* full duplex */
+ /* 100 Mbps */
+ phydat = PHY_ANEG_EN | PHY_DUPLEX | PHY_SPD_SET;
+ write_phy_reg(phyaddr, PHYREG_MODECTL, phydat);
+
+ /* test if full duplex supported */
+ do {
+ msleep(100);
+ phydat = read_phy_reg(phyaddr, PHYREG_MODESTAT);
+ if (count > 30) {
+ printk(KERN_NOTICE DRV_NAME ": Link is down\n");
+ printk(KERN_NOTICE DRV_NAME
+ "please check your network connection\n");
+ break;
+ }
+ count++;
+ } while (!(phydat & 0x0004));
+
+ phydat = read_phy_reg(phyaddr, PHYREG_ANLPAR);
+
+ if ((phydat & 0x0100) || (phydat & 0x0040)) {
+ opmode = FDMODE;
+ } else {
+ opmode = 0;
+ printk(KERN_INFO DRV_NAME
+ ": Network is set to half duplex\n");
+ }
+
+#if defined(CONFIG_BFIN_MAC_RMII)
+ opmode |= RMII; /* For Now only 100MBit are supported */
+#endif
+
+ bfin_write_EMAC_OPMODE(opmode);
+
+ bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
+
+ /* Initialize the TX DMA channel registers */
+ bfin_write_DMA2_X_COUNT(0);
+ bfin_write_DMA2_X_MODIFY(4);
+ bfin_write_DMA2_Y_COUNT(0);
+ bfin_write_DMA2_Y_MODIFY(0);
+
+ /* Initialize the RX DMA channel registers */
+ bfin_write_DMA1_X_COUNT(0);
+ bfin_write_DMA1_X_MODIFY(4);
+ bfin_write_DMA1_Y_COUNT(0);
+ bfin_write_DMA1_Y_MODIFY(0);
+}
+
+void setup_mac_addr(u8 * mac_addr)
+{
+ u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
+ u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
+
+ /* this depends on a little-endian machine */
+ bfin_write_EMAC_ADDRLO(addr_low);
+ bfin_write_EMAC_ADDRHI(addr_hi);
+}
+
+static void adjust_tx_list(void)
+{
+ int timeout_cnt = MAX_TIMEOUT_CNT;
+
+ if (tx_list_head->status.status_word != 0
+ && current_tx_ptr != tx_list_head) {
+ goto adjust_head; /* released something, just return; */
+ }
+
+ /*
+ * if nothing released, check wait condition
+ * current's next can not be the head,
+ * otherwise the dma will not stop as we want
+ */
+ if (current_tx_ptr->next->next == tx_list_head) {
+ while (tx_list_head->status.status_word == 0) {
+ mdelay(10);
+ if (tx_list_head->status.status_word != 0
+ || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
+ goto adjust_head;
+ }
+ if (timeout_cnt-- < 0) {
+ printk(KERN_ERR DRV_NAME
+ ": wait for adjust tx list head timeout\n");
+ break;
+ }
+ }
+ if (tx_list_head->status.status_word != 0) {
+ goto adjust_head;
+ }
+ }
+
+ return;
+
+adjust_head:
+ do {
+ tx_list_head->desc_a.config &= ~DMAEN;
+ tx_list_head->status.status_word = 0;
+ if (tx_list_head->skb) {
+ dev_kfree_skb(tx_list_head->skb);
+ tx_list_head->skb = NULL;
+ } else {
+ printk(KERN_ERR DRV_NAME
+ ": no sk_buff in a transmitted frame!\n");
+ }
+ tx_list_head = tx_list_head->next;
+ } while (tx_list_head->status.status_word != 0
+ && current_tx_ptr != tx_list_head);
+ return;
+
+}
+
+static int bf537mac_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bf537mac_local *lp = netdev_priv(dev);
+ unsigned int data;
+
+ current_tx_ptr->skb = skb;
+
+ /*
+ * Is skb->data always 16-bit aligned?
+ * Do we need to memcpy((char *)(tail->packet + 2), skb->data, len)?
+ */
+ if ((((unsigned int)(skb->data)) & 0x02) == 2) {
+ /* move skb->data to current_tx_ptr payload */
+ data = (unsigned int)(skb->data) - 2;
+ *((unsigned short *)data) = (unsigned short)(skb->len);
+ current_tx_ptr->desc_a.start_addr = (unsigned long)data;
+ /* this is important! */
+ blackfin_dcache_flush_range(data, (data + (skb->len)) + 2);
+
+ } else {
+ *((unsigned short *)(current_tx_ptr->packet)) =
+ (unsigned short)(skb->len);
+ memcpy((char *)(current_tx_ptr->packet + 2), skb->data,
+ (skb->len));
+ current_tx_ptr->desc_a.start_addr =
+ (unsigned long)current_tx_ptr->packet;
+ if (current_tx_ptr->status.status_word != 0)
+ current_tx_ptr->status.status_word = 0;
+ blackfin_dcache_flush_range((unsigned int)current_tx_ptr->
+ packet,
+ (unsigned int)(current_tx_ptr->
+ packet + skb->len) +
+ 2);
+ }
+
+ /* enable this packet's dma */
+ current_tx_ptr->desc_a.config |= DMAEN;
+
+ /* tx dma is running, just return */
+ if (bfin_read_DMA2_IRQ_STATUS() & 0x08)
+ goto out;
+
+ /* tx dma is not running */
+ bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
+ /* dma enabled, read from memory, size is 6 */
+ bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
+ /* Turn on the EMAC tx */
+ bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
+
+out:
+ adjust_tx_list();
+ current_tx_ptr = current_tx_ptr->next;
+ dev->trans_start = jiffies;
+ lp->stats.tx_packets++;
+ lp->stats.tx_bytes += (skb->len);
+ return 0;
+}
+
+static void bf537mac_rx(struct net_device *dev)
+{
+ struct sk_buff *skb, *new_skb;
+ struct bf537mac_local *lp = netdev_priv(dev);
+ unsigned short len;
+
+ /* allocate a new skb for next time receive */
+ skb = current_rx_ptr->skb;
+ new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
+ if (!new_skb) {
+ printk(KERN_NOTICE DRV_NAME
+ ": rx: low on mem - packet dropped\n");
+ lp->stats.rx_dropped++;
+ goto out;
+ }
+ /* reserve 2 bytes for RXDWA padding */
+ skb_reserve(new_skb, 2);
+ current_rx_ptr->skb = new_skb;
+ current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+
+ len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+ skb_put(skb, len);
+ blackfin_dcache_invalidate_range((unsigned long)skb->head,
+ (unsigned long)skb->tail);
+
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ skb->csum = current_rx_ptr->status.ip_payload_csum;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+#endif
+
+ netif_rx(skb);
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += len;
+ current_rx_ptr->status.status_word = 0x00000000;
+ current_rx_ptr = current_rx_ptr->next;
+
+out:
+ return;
+}
+
+/* interrupt routine to handle rx and error signal */
+static irqreturn_t bf537mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ int number = 0;
+
+get_one_packet:
+ if (current_rx_ptr->status.status_word == 0) {
+ /* no more new packet received */
+ if (number == 0) {
+ if (current_rx_ptr->next->status.status_word != 0) {
+ current_rx_ptr = current_rx_ptr->next;
+ goto real_rx;
+ }
+ }
+ bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
+ DMA_DONE | DMA_ERR);
+ return IRQ_HANDLED;
+ }
+
+real_rx:
+ bf537mac_rx(dev);
+ number++;
+ goto get_one_packet;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bf537mac_poll(struct net_device *dev)
+{
+ disable_irq(IRQ_MAC_RX);
+ bf537mac_interrupt(IRQ_MAC_RX, dev);
+ enable_irq(IRQ_MAC_RX);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static void bf537mac_reset(void)
+{
+ unsigned int opmode;
+
+ opmode = bfin_read_EMAC_OPMODE();
+ opmode &= (~RE);
+ opmode &= (~TE);
+ /* Turn off the EMAC */
+ bfin_write_EMAC_OPMODE(opmode);
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static int bf537mac_enable(struct net_device *dev)
+{
+ u32 opmode;
+
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ /* Set RX DMA */
+ bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
+ bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
+
+ /* Wait MII done */
+ poll_mdc_done();
+
+ /* We enable only RX here */
+ /* ASTP : Enable Automatic Pad Stripping
+ PR : Promiscuous Mode for test
+ PSF : Receive frames with total length less than 64 bytes.
+ FDMODE : Full Duplex Mode
+ LB : Internal Loopback for test
+ RE : Receiver Enable */
+ opmode = bfin_read_EMAC_OPMODE();
+ if (opmode & FDMODE)
+ opmode |= PSF;
+ else
+ opmode |= DRO | DC | PSF;
+ opmode |= RE;
+
+#if defined(CONFIG_BFIN_MAC_RMII)
+ opmode |= RMII; /* For Now only 100MBit are supported */
+#ifdef CONFIG_BF_REV_0_2
+ opmode |= TE;
+#endif
+#endif
+ /* Turn on the EMAC rx */
+ bfin_write_EMAC_OPMODE(opmode);
+
+ return 0;
+}
+
+/* Our watchdog timed out. Called by the networking layer */
+static void bf537mac_timeout(struct net_device *dev)
+{
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ bf537mac_reset();
+
+ /* reset tx queue */
+ tx_list_tail = tx_list_head->next;
+
+ bf537mac_enable(dev);
+
+ /* We can accept TX packets again */
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *bf537mac_query_statistics(struct net_device
+ *dev)
+{
+ struct bf537mac_local *lp = netdev_priv(dev);
+
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ return &lp->stats;
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void bf537mac_set_multicast_list(struct net_device *dev)
+{
+ u32 sysctl;
+
+ if (dev->flags & IFF_PROMISC) {
+ printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= RAF;
+ bfin_write_EMAC_OPMODE(sysctl);
+ } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ /* accept all multicast */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= PAM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ } else {
+ /* clear promisc or multicast mode */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl &= ~(RAF | PAM);
+ bfin_write_EMAC_OPMODE(sysctl);
+ }
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void bf537mac_shutdown(struct net_device *dev)
+{
+ /* Turn off the EMAC */
+ bfin_write_EMAC_OPMODE(0x00000000);
+ /* Turn off the EMAC RX DMA */
+ bfin_write_DMA1_CONFIG(0x0000);
+ bfin_write_DMA2_CONFIG(0x0000);
+}
+
+/*
+ * Open and Initialize the interface
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int bf537mac_open(struct net_device *dev)
+{
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ /*
+ * Check that the address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+ */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+ return -EINVAL;
+ }
+
+ /* initial rx and tx list */
+ desc_list_init();
+
+ bf537mac_setphy(dev);
+ setup_system_regs(dev);
+ bf537mac_reset();
+ bf537mac_enable(dev);
+
+ pr_debug("hardware init finished\n");
+ netif_start_queue(dev);
+ netif_carrier_on(dev);
+
+ return 0;
+}
+
+/*
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world. Caused by
+ * an 'ifconfig ethX down'
+ */
+static int bf537mac_close(struct net_device *dev)
+{
+ pr_debug("%s: %s\n", dev->name, __FUNCTION__);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ /* clear everything */
+ bf537mac_shutdown(dev);
+
+ /* free the rx/tx buffers */
+ desc_list_free();
+
+ return 0;
+}
+
+static int __init bf537mac_probe(struct net_device *dev)
+{
+ struct bf537mac_local *lp = netdev_priv(dev);
+ int retval;
+
+ /* Grab the MAC address in the MAC */
+ *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
+ *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
+
+ /* probe mac */
+ /*todo: how to proble? which is revision_register */
+ bfin_write_EMAC_ADDRLO(0x12345678);
+ if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
+ pr_debug("can't detect bf537 mac!\n");
+ retval = -ENODEV;
+ goto err_out;
+ }
+
+ /* set the GPIO pins to Ethernet mode */
+ retval = setup_pin_mux(1);
+
+ if (retval)
+ return retval;
+
+ /*Is it valid? (Did bootloader initialize it?) */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ /* Grab the MAC from the board somehow - this is done in the
+ arch/blackfin/mach-bf537/boards/eth_mac.c */
+ get_bf537_ether_addr(dev->dev_addr);
+ }
+
+ /* If still not valid, get a random one */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ }
+
+ setup_mac_addr(dev->dev_addr);
+
+ /* Fill in the fields of the device structure with ethernet values. */
+ ether_setup(dev);
+
+ dev->open = bf537mac_open;
+ dev->stop = bf537mac_close;
+ dev->hard_start_xmit = bf537mac_hard_start_xmit;
+ dev->tx_timeout = bf537mac_timeout;
+ dev->get_stats = bf537mac_query_statistics;
+ dev->set_multicast_list = bf537mac_set_multicast_list;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = bf537mac_poll;
+#endif
+
+ /* fill in some of the fields */
+ lp->version = 1;
+ lp->PhyAddr = 0x01;
+ lp->CLKIN = 25;
+ lp->FullDuplex = 0;
+ lp->Negotiate = 1;
+ lp->FlowControl = 0;
+ spin_lock_init(&lp->lock);
+
+ /* now, enable interrupts */
+ /* register irq handler */
+ if (request_irq
+ (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ "BFIN537_MAC_RX", dev)) {
+ printk(KERN_WARNING DRV_NAME
+ ": Unable to attach BlackFin MAC RX interrupt\n");
+ return -EBUSY;
+ }
+
+ /* Enable PHY output early */
+ if (!(bfin_read_VR_CTL() & PHYCLKOE))
+ bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
+
+ retval = register_netdev(dev);
+ if (retval == 0) {
+ /* now, print out the card info, in a short format.. */
+ printk(KERN_INFO "%s: Version %s, %s\n",
+ DRV_NAME, DRV_VERSION, DRV_DESC);
+ }
+
+err_out:
+ return retval;
+}
+
+static int bfin_mac_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+
+ ndev = alloc_etherdev(sizeof(struct bf537mac_local));
+ if (!ndev) {
+ printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
+ return -ENOMEM;
+ }
+
+ SET_MODULE_OWNER(ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (bf537mac_probe(ndev) != 0) {
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(ndev);
+ printk(KERN_WARNING DRV_NAME ": not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int bfin_mac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(ndev);
+
+ free_irq(IRQ_MAC_RX, ndev);
+
+ free_netdev(ndev);
+
+ setup_pin_mux(0);
+
+ return 0;
+}
+
+static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+static int bfin_mac_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver bfin_mac_driver = {
+ .probe = bfin_mac_probe,
+ .remove = bfin_mac_remove,
+ .resume = bfin_mac_resume,
+ .suspend = bfin_mac_suspend,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init bfin_mac_init(void)
+{
+ return platform_driver_register(&bfin_mac_driver);
+}
+
+module_init(bfin_mac_init);
+
+static void __exit bfin_mac_cleanup(void)
+{
+ platform_driver_unregister(&bfin_mac_driver);
+}
+
+module_exit(bfin_mac_cleanup);
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
new file mode 100644
index 000000000000..af87189b85fa
--- /dev/null
+++ b/drivers/net/bfin_mac.h
@@ -0,0 +1,132 @@
+/*
+ * File: drivers/net/bfin_mac.c
+ * Based on:
+ * Maintainer:
+ * Bryan Wu <bryan.wu@analog.com>
+ *
+ * Original author:
+ * Luke Yang <luke.yang@analog.com>
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program ; see the file COPYING.
+ * If not, write to the Free Software Foundation,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * PHY REGISTER NAMES
+ */
+#define PHYREG_MODECTL 0x0000
+#define PHYREG_MODESTAT 0x0001
+#define PHYREG_PHYID1 0x0002
+#define PHYREG_PHYID2 0x0003
+#define PHYREG_ANAR 0x0004
+#define PHYREG_ANLPAR 0x0005
+#define PHYREG_ANER 0x0006
+#define PHYREG_NSR 0x0010
+#define PHYREG_LBREMR 0x0011
+#define PHYREG_REC 0x0012
+#define PHYREG_10CFG 0x0013
+#define PHYREG_PHY1_1 0x0014
+#define PHYREG_PHY1_2 0x0015
+#define PHYREG_PHY2 0x0016
+#define PHYREG_TW_1 0x0017
+#define PHYREG_TW_2 0x0018
+#define PHYREG_TEST 0x0019
+
+#define PHY_RESET 0x8000
+#define PHY_ANEG_EN 0x1000
+#define PHY_DUPLEX 0x0100
+#define PHY_SPD_SET 0x2000
+
+#define BFIN_MAC_CSUM_OFFLOAD
+
+struct dma_descriptor {
+ struct dma_descriptor *next_dma_desc;
+ unsigned long start_addr;
+ unsigned short config;
+ unsigned short x_count;
+};
+
+struct status_area_rx {
+#if defined(BFIN_MAC_CSUM_OFFLOAD)
+ unsigned short ip_hdr_csum; /* ip header checksum */
+ /* ip payload(udp or tcp or others) checksum */
+ unsigned short ip_payload_csum;
+#endif
+ unsigned long status_word; /* the frame status word */
+};
+
+struct status_area_tx {
+ unsigned long status_word; /* the frame status word */
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_rx {
+ struct net_dma_desc_rx *next;
+ struct sk_buff *skb;
+ struct dma_descriptor desc_a;
+ struct dma_descriptor desc_b;
+ struct status_area_rx status;
+};
+
+/* use two descriptors for a packet */
+struct net_dma_desc_tx {
+ struct net_dma_desc_tx *next;
+ struct sk_buff *skb;
+ struct dma_descriptor desc_a;
+ struct dma_descriptor desc_b;
+ unsigned char packet[1560];
+ struct status_area_tx status;
+};
+
+struct bf537mac_local {
+ /*
+ * these are things that the kernel wants me to keep, so users
+ * can find out semi-useless statistics of how well the card is
+ * performing
+ */
+ struct net_device_stats stats;
+
+ int version;
+
+ int FlowEnabled; /* record if data flow is active */
+ int EtherIntIVG; /* IVG for the ethernet interrupt */
+ int RXIVG; /* IVG for the RX completion */
+ int TXIVG; /* IVG for the TX completion */
+ int PhyAddr; /* PHY address */
+ int OpMode; /* set these bits n the OPMODE regs */
+ int Port10; /* set port speed to 10 Mbit/s */
+ int GenChksums; /* IP checksums to be calculated */
+ int NoRcveLnth; /* dont insert recv length at start of buffer */
+ int StripPads; /* remove trailing pad bytes */
+ int FullDuplex; /* set full duplex mode */
+ int Negotiate; /* enable auto negotiation */
+ int Loopback; /* loopback at the PHY */
+ int Cache; /* Buffers may be cached */
+ int FlowControl; /* flow control active */
+ int CLKIN; /* clock in value in MHZ */
+ unsigned short IntMask; /* interrupt mask */
+ unsigned char Mac[6]; /* MAC address of the board */
+ spinlock_t lock;
+};
+
+extern void get_bf537_ether_addr(char *addr);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ce3ed67a878e..a729da061bbb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.5.11"
-#define DRV_MODULE_RELDATE "June 4, 2007"
+#define DRV_MODULE_VERSION "1.6.3"
+#define DRV_MODULE_RELDATE "July 16, 2007"
#define RUN_AT(x) (jiffies + (x))
@@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = {
static struct flash_spec flash_table[] =
{
+#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
/* Slow EEPROM */
{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
- 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - slow"},
/* Expansion entry 0001 */
{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0001"},
/* Saifun SA25F010 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
"Non-buffered flash (128kB)"},
/* Saifun SA25F020 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
"Non-buffered flash (256kB)"},
/* Expansion entry 0100 */
{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0100"},
/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
- 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
"Entry 0101: ST M45PE10 (128kB non-bufferred)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
- 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
"Entry 0110: ST M45PE20 (256kB non-bufferred)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
"Non-buffered flash (64kB)"},
/* Fast EEPROM */
{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
- 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - fast"},
/* Expansion entry 1001 */
{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1001"},
/* Expansion entry 1010 */
{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1010"},
/* ATMEL AT45DB011B (buffered flash) */
{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
"Buffered flash (128kB)"},
/* Expansion entry 1100 */
{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1100"},
/* Expansion entry 1101 */
{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1101"},
/* Ateml Expansion entry 1110 */
{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1110 (Atmel)"},
/* ATMEL AT45DB021B (buffered flash) */
{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
"Buffered flash (256kB)"},
};
+static struct flash_spec flash_5709 = {
+ .flags = BNX2_NV_BUFFERED,
+ .page_bits = BCM5709_FLASH_PAGE_BITS,
+ .page_size = BCM5709_FLASH_PAGE_SIZE,
+ .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
+ .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
+ .name = "5709 Buffered flash (256kB)",
+};
+
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
@@ -550,6 +561,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
{
u32 fw_link_status = 0;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return;
+
if (bp->link_up) {
u32 bmsr;
@@ -601,12 +615,21 @@ bnx2_report_fw_link(struct bnx2 *bp)
REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
}
+static char *
+bnx2_xceiver_str(struct bnx2 *bp)
+{
+ return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
+ ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
+ "Copper"));
+}
+
static void
bnx2_report_link(struct bnx2 *bp)
{
if (bp->link_up) {
netif_carrier_on(bp->dev);
- printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
+ printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
+ bnx2_xceiver_str(bp));
printk("%d Mbps ", bp->line_speed);
@@ -630,7 +653,8 @@ bnx2_report_link(struct bnx2 *bp)
}
else {
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
+ printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
+ bnx2_xceiver_str(bp));
}
bnx2_report_fw_link(bp);
@@ -1100,6 +1124,9 @@ bnx2_set_link(struct bnx2 *bp)
return 0;
}
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return 0;
+
link_up = bp->link_up;
bnx2_enable_bmsr1(bp);
@@ -1210,12 +1237,74 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp)
return adv;
}
+static int bnx2_fw_sync(struct bnx2 *, u32, int);
+
static int
-bnx2_setup_serdes_phy(struct bnx2 *bp)
+bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+{
+ u32 speed_arg = 0, pause_adv;
+
+ pause_adv = bnx2_phy_get_pause_adv(bp);
+
+ if (bp->autoneg & AUTONEG_SPEED) {
+ speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
+ if (bp->advertising & ADVERTISED_10baseT_Half)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+ if (bp->advertising & ADVERTISED_10baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+ if (bp->advertising & ADVERTISED_100baseT_Half)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+ if (bp->advertising & ADVERTISED_100baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+ if (bp->advertising & ADVERTISED_1000baseT_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+ if (bp->advertising & ADVERTISED_2500baseX_Full)
+ speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+ } else {
+ if (bp->req_line_speed == SPEED_2500)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+ else if (bp->req_line_speed == SPEED_1000)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+ else if (bp->req_line_speed == SPEED_100) {
+ if (bp->req_duplex == DUPLEX_FULL)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+ else
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+ } else if (bp->req_line_speed == SPEED_10) {
+ if (bp->req_duplex == DUPLEX_FULL)
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+ else
+ speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+ }
+ }
+
+ if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
+ speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
+ if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
+ speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
+
+ if (port == PORT_TP)
+ speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
+ BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
+
+ REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
+
+ spin_unlock_bh(&bp->phy_lock);
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
+ spin_lock_bh(&bp->phy_lock);
+
+ return 0;
+}
+
+static int
+bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
{
u32 adv, bmcr;
u32 new_adv = 0;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return (bnx2_setup_remote_phy(bp, port));
+
if (!(bp->autoneg & AUTONEG_SPEED)) {
u32 new_bmcr;
int force_link_down = 0;
@@ -1323,7 +1412,9 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
}
#define ETHTOOL_ALL_FIBRE_SPEED \
- (ADVERTISED_1000baseT_Full)
+ (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
+ (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
+ (ADVERTISED_1000baseT_Full)
#define ETHTOOL_ALL_COPPER_SPEED \
(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
@@ -1335,6 +1426,188 @@ bnx2_setup_serdes_phy(struct bnx2 *bp)
#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
+static void
+bnx2_set_default_remote_link(struct bnx2 *bp)
+{
+ u32 link;
+
+ if (bp->phy_port == PORT_TP)
+ link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
+ else
+ link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
+
+ if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
+ bp->req_line_speed = 0;
+ bp->autoneg |= AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_Autoneg;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+ bp->advertising |= ADVERTISED_10baseT_Half;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+ bp->advertising |= ADVERTISED_10baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+ bp->advertising |= ADVERTISED_100baseT_Half;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+ bp->advertising |= ADVERTISED_100baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+ bp->advertising |= ADVERTISED_1000baseT_Full;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+ bp->advertising |= ADVERTISED_2500baseX_Full;
+ } else {
+ bp->autoneg = 0;
+ bp->advertising = 0;
+ bp->req_duplex = DUPLEX_FULL;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
+ bp->req_line_speed = SPEED_10;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+ bp->req_duplex = DUPLEX_HALF;
+ }
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
+ bp->req_line_speed = SPEED_100;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+ bp->req_duplex = DUPLEX_HALF;
+ }
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+ bp->req_line_speed = SPEED_1000;
+ if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+ bp->req_line_speed = SPEED_2500;
+ }
+}
+
+static void
+bnx2_set_default_link(struct bnx2 *bp)
+{
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return bnx2_set_default_remote_link(bp);
+
+ bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
+ bp->req_line_speed = 0;
+ if (bp->phy_flags & PHY_SERDES_FLAG) {
+ u32 reg;
+
+ bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
+
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
+ reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
+ if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
+ bp->autoneg = 0;
+ bp->req_line_speed = bp->line_speed = SPEED_1000;
+ bp->req_duplex = DUPLEX_FULL;
+ }
+ } else
+ bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
+}
+
+static void
+bnx2_send_heart_beat(struct bnx2 *bp)
+{
+ u32 msg;
+ u32 addr;
+
+ spin_lock(&bp->indirect_lock);
+ msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
+ addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
+ REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
+ spin_unlock(&bp->indirect_lock);
+}
+
+static void
+bnx2_remote_phy_event(struct bnx2 *bp)
+{
+ u32 msg;
+ u8 link_up = bp->link_up;
+ u8 old_port;
+
+ msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
+
+ if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
+ bnx2_send_heart_beat(bp);
+
+ msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
+
+ if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
+ bp->link_up = 0;
+ else {
+ u32 speed;
+
+ bp->link_up = 1;
+ speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
+ bp->duplex = DUPLEX_FULL;
+ switch (speed) {
+ case BNX2_LINK_STATUS_10HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_10FULL:
+ bp->line_speed = SPEED_10;
+ break;
+ case BNX2_LINK_STATUS_100HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_100BASE_T4:
+ case BNX2_LINK_STATUS_100FULL:
+ bp->line_speed = SPEED_100;
+ break;
+ case BNX2_LINK_STATUS_1000HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_1000FULL:
+ bp->line_speed = SPEED_1000;
+ break;
+ case BNX2_LINK_STATUS_2500HALF:
+ bp->duplex = DUPLEX_HALF;
+ case BNX2_LINK_STATUS_2500FULL:
+ bp->line_speed = SPEED_2500;
+ break;
+ default:
+ bp->line_speed = 0;
+ break;
+ }
+
+ spin_lock(&bp->phy_lock);
+ bp->flow_ctrl = 0;
+ if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+ (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+ if (bp->duplex == DUPLEX_FULL)
+ bp->flow_ctrl = bp->req_flow_ctrl;
+ } else {
+ if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
+ bp->flow_ctrl |= FLOW_CTRL_TX;
+ if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
+ bp->flow_ctrl |= FLOW_CTRL_RX;
+ }
+
+ old_port = bp->phy_port;
+ if (msg & BNX2_LINK_STATUS_SERDES_LINK)
+ bp->phy_port = PORT_FIBRE;
+ else
+ bp->phy_port = PORT_TP;
+
+ if (old_port != bp->phy_port)
+ bnx2_set_default_link(bp);
+
+ spin_unlock(&bp->phy_lock);
+ }
+ if (bp->link_up != link_up)
+ bnx2_report_link(bp);
+
+ bnx2_set_mac_link(bp);
+}
+
+static int
+bnx2_set_remote_link(struct bnx2 *bp)
+{
+ u32 evt_code;
+
+ evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
+ switch (evt_code) {
+ case BNX2_FW_EVT_CODE_LINK_EVENT:
+ bnx2_remote_phy_event(bp);
+ break;
+ case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
+ default:
+ bnx2_send_heart_beat(bp);
+ break;
+ }
+ return 0;
+}
+
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
{
@@ -1433,13 +1706,13 @@ bnx2_setup_copper_phy(struct bnx2 *bp)
}
static int
-bnx2_setup_phy(struct bnx2 *bp)
+bnx2_setup_phy(struct bnx2 *bp, u8 port)
{
if (bp->loopback == MAC_LOOPBACK)
return 0;
if (bp->phy_flags & PHY_SERDES_FLAG) {
- return (bnx2_setup_serdes_phy(bp));
+ return (bnx2_setup_serdes_phy(bp, port));
}
else {
return (bnx2_setup_copper_phy(bp));
@@ -1659,6 +1932,9 @@ bnx2_init_phy(struct bnx2 *bp)
REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ goto setup_phy;
+
bnx2_read_phy(bp, MII_PHYSID1, &val);
bp->phy_id = val << 16;
bnx2_read_phy(bp, MII_PHYSID2, &val);
@@ -1676,7 +1952,9 @@ bnx2_init_phy(struct bnx2 *bp)
rc = bnx2_init_copper_phy(bp);
}
- bnx2_setup_phy(bp);
+setup_phy:
+ if (!rc)
+ rc = bnx2_setup_phy(bp, bp->phy_port);
return rc;
}
@@ -1984,6 +2262,9 @@ bnx2_phy_int(struct bnx2 *bp)
bnx2_set_link(bp);
spin_unlock(&bp->phy_lock);
}
+ if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
+ bnx2_set_remote_link(bp);
+
}
static void
@@ -2297,6 +2578,7 @@ bnx2_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = netdev_priv(dev);
+ struct status_block *sblk = bp->status_blk;
/* When using INTx, it is possible for the interrupt to arrive
* at the CPU before the status block posted prior to the
@@ -2304,7 +2586,7 @@ bnx2_interrupt(int irq, void *dev_instance)
* When using MSI, the MSI message will always complete after
* the status block write.
*/
- if ((bp->status_blk->status_idx == bp->last_status_idx) &&
+ if ((sblk->status_idx == bp->last_status_idx) &&
(REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
return IRQ_NONE;
@@ -2313,16 +2595,25 @@ bnx2_interrupt(int irq, void *dev_instance)
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+ /* Read back to deassert IRQ immediately to avoid too many
+ * spurious interrupts.
+ */
+ REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(dev);
+ if (netif_rx_schedule_prep(dev)) {
+ bp->last_status_idx = sblk->status_idx;
+ __netif_rx_schedule(dev);
+ }
return IRQ_HANDLED;
}
-#define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
+#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
+ STATUS_ATTN_BITS_TIMER_ABORT)
static inline int
bnx2_has_work(struct bnx2 *bp)
@@ -3009,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
val = REG_RD(bp, BNX2_MISC_CFG);
REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
- if (!bp->flash_info->buffered) {
+ if (bp->flash_info->flags & BNX2_NV_WREN) {
int j;
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
@@ -3069,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
u32 cmd;
int j;
- if (bp->flash_info->buffered)
+ if (bp->flash_info->flags & BNX2_NV_BUFFERED)
/* Buffered flash, no erase needed */
return 0;
@@ -3112,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
- /* Calculate an offset of a buffered flash. */
- if (bp->flash_info->buffered) {
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
@@ -3159,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
- /* Calculate an offset of a buffered flash. */
- if (bp->flash_info->buffered) {
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
@@ -3198,15 +3489,19 @@ static int
bnx2_init_nvram(struct bnx2 *bp)
{
u32 val;
- int j, entry_count, rc;
+ int j, entry_count, rc = 0;
struct flash_spec *flash;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flash_info = &flash_5709;
+ goto get_flash_size;
+ }
+
/* Determine the selected interface. */
val = REG_RD(bp, BNX2_NVM_CFG1);
entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
- rc = 0;
if (val & 0x40000000) {
/* Flash interface has been reconfigured */
@@ -3262,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp)
return -ENODEV;
}
+get_flash_size:
val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
if (val)
@@ -3426,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
buf = align_buf;
}
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
flash_buffer = kmalloc(264, GFP_KERNEL);
if (flash_buffer == NULL) {
rc = -ENOMEM;
@@ -3459,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
bnx2_enable_nvram_access(bp);
cmd_flags = BNX2_NVM_COMMAND_FIRST;
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
int j;
/* Read the whole page into the buffer
@@ -3487,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write back the buffer data from page_start to
* data_start */
i = 0;
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
/* Erase the page */
if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
goto nvram_write_end;
@@ -3511,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write the new data from data_start to data_end */
for (addr = data_start; addr < data_end; addr += 4, i += 4) {
if ((addr == page_end - 4) ||
- ((bp->flash_info->buffered) &&
+ ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
(addr == data_end - 4))) {
cmd_flags |= BNX2_NVM_COMMAND_LAST;
@@ -3528,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write back the buffer data from data_end
* to page_end */
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
for (addr = data_end; addr < page_end;
addr += 4, i += 4) {
@@ -3562,6 +3858,36 @@ nvram_write_end:
return rc;
}
+static void
+bnx2_init_remote_phy(struct bnx2 *bp)
+{
+ u32 val;
+
+ bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
+ if (!(bp->phy_flags & PHY_SERDES_FLAG))
+ return;
+
+ val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
+ if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
+ return;
+
+ if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
+ if (netif_running(bp->dev)) {
+ val = BNX2_DRV_ACK_CAP_SIGNATURE |
+ BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
+ REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
+ val);
+ }
+ bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
+
+ val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
+ if (val & BNX2_LINK_STATUS_SERDES_LINK)
+ bp->phy_port = PORT_FIBRE;
+ else
+ bp->phy_port = PORT_TP;
+ }
+}
+
static int
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
{
@@ -3642,6 +3968,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
if (rc)
return rc;
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_init_remote_phy(bp);
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ bnx2_set_default_remote_link(bp);
+ spin_unlock_bh(&bp->phy_lock);
+
if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
/* Adjust the voltage regular to two steps lower. The default
* of this register is 0x0000000e. */
@@ -3791,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp)
if (CHIP_NUM(bp) == CHIP_NUM_5708)
REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
else
- REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
+ REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
if (CHIP_ID(bp) == CHIP_ID_5706_A1)
@@ -3811,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
- if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
- BNX2_PORT_FEATURE_ASF_ENABLED)
- bp->flags |= ASF_ENABLE_FLAG;
-
/* Initialize the receive filter. */
bnx2_set_rx_mode(bp->dev);
@@ -3826,7 +4154,7 @@ bnx2_init_chip(struct bnx2 *bp)
rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
0);
- REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
+ REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
udelay(20);
@@ -4069,8 +4397,8 @@ bnx2_init_nic(struct bnx2 *bp)
spin_lock_bh(&bp->phy_lock);
bnx2_init_phy(bp);
- spin_unlock_bh(&bp->phy_lock);
bnx2_set_link(bp);
+ spin_unlock_bh(&bp->phy_lock);
return 0;
}
@@ -4600,6 +4928,9 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
static void
bnx2_5708_serdes_timer(struct bnx2 *bp)
{
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return;
+
if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
bp->serdes_an_pending = 0;
return;
@@ -4631,7 +4962,6 @@ static void
bnx2_timer(unsigned long data)
{
struct bnx2 *bp = (struct bnx2 *) data;
- u32 msg;
if (!netif_running(bp->dev))
return;
@@ -4639,8 +4969,7 @@ bnx2_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnx2_restart_timer;
- msg = (u32) ++bp->fw_drv_pulse_wr_seq;
- REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
+ bnx2_send_heart_beat(bp);
bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
@@ -5083,17 +5412,25 @@ static int
bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2 *bp = netdev_priv(dev);
+ int support_serdes = 0, support_copper = 0;
cmd->supported = SUPPORTED_Autoneg;
- if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
+ support_serdes = 1;
+ support_copper = 1;
+ } else if (bp->phy_port == PORT_FIBRE)
+ support_serdes = 1;
+ else
+ support_copper = 1;
+
+ if (support_serdes) {
cmd->supported |= SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
cmd->supported |= SUPPORTED_2500baseX_Full;
- cmd->port = PORT_FIBRE;
}
- else {
+ if (support_copper) {
cmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -5101,9 +5438,10 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_1000baseT_Full |
SUPPORTED_TP;
- cmd->port = PORT_TP;
}
+ spin_lock_bh(&bp->phy_lock);
+ cmd->port = bp->phy_port;
cmd->advertising = bp->advertising;
if (bp->autoneg & AUTONEG_SPEED) {
@@ -5121,6 +5459,7 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = -1;
cmd->duplex = -1;
}
+ spin_unlock_bh(&bp->phy_lock);
cmd->transceiver = XCVR_INTERNAL;
cmd->phy_address = bp->phy_addr;
@@ -5136,6 +5475,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
u8 req_duplex = bp->req_duplex;
u16 req_line_speed = bp->req_line_speed;
u32 advertising = bp->advertising;
+ int err = -EINVAL;
+
+ spin_lock_bh(&bp->phy_lock);
+
+ if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
+ goto err_out_unlock;
+
+ if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
+ goto err_out_unlock;
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
@@ -5148,44 +5496,41 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(cmd->advertising == ADVERTISED_100baseT_Half) ||
(cmd->advertising == ADVERTISED_100baseT_Full)) {
- if (bp->phy_flags & PHY_SERDES_FLAG)
- return -EINVAL;
+ if (cmd->port == PORT_FIBRE)
+ goto err_out_unlock;
advertising = cmd->advertising;
} else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
- if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
- return -EINVAL;
- } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
+ if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
+ (cmd->port == PORT_TP))
+ goto err_out_unlock;
+ } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
advertising = cmd->advertising;
- }
- else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
- return -EINVAL;
- }
+ else if (cmd->advertising == ADVERTISED_1000baseT_Half)
+ goto err_out_unlock;
else {
- if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (cmd->port == PORT_FIBRE)
advertising = ETHTOOL_ALL_FIBRE_SPEED;
- }
- else {
+ else
advertising = ETHTOOL_ALL_COPPER_SPEED;
- }
}
advertising |= ADVERTISED_Autoneg;
}
else {
- if (bp->phy_flags & PHY_SERDES_FLAG) {
+ if (cmd->port == PORT_FIBRE) {
if ((cmd->speed != SPEED_1000 &&
cmd->speed != SPEED_2500) ||
(cmd->duplex != DUPLEX_FULL))
- return -EINVAL;
+ goto err_out_unlock;
if (cmd->speed == SPEED_2500 &&
!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
- return -EINVAL;
- }
- else if (cmd->speed == SPEED_1000) {
- return -EINVAL;
+ goto err_out_unlock;
}
+ else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
+ goto err_out_unlock;
+
autoneg &= ~AUTONEG_SPEED;
req_line_speed = cmd->speed;
req_duplex = cmd->duplex;
@@ -5197,13 +5542,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->req_line_speed = req_line_speed;
bp->req_duplex = req_duplex;
- spin_lock_bh(&bp->phy_lock);
-
- bnx2_setup_phy(bp);
+ err = bnx2_setup_phy(bp, cmd->port);
+err_out_unlock:
spin_unlock_bh(&bp->phy_lock);
- return 0;
+ return err;
}
static void
@@ -5214,11 +5558,7 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
strcpy(info->bus_info, pci_name(bp->pdev));
- info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
- info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
- info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
- info->fw_version[1] = info->fw_version[3] = '.';
- info->fw_version[5] = 0;
+ strcpy(info->fw_version, bp->fw_version);
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -5330,6 +5670,14 @@ bnx2_nway_reset(struct net_device *dev)
spin_lock_bh(&bp->phy_lock);
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
+ int rc;
+
+ rc = bnx2_setup_remote_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
+ return rc;
+ }
+
/* Force a link down visible on the other side */
if (bp->phy_flags & PHY_SERDES_FLAG) {
bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
@@ -5450,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
bp->stats_ticks = USEC_PER_SEC;
}
- if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
- bp->stats_ticks &= 0xffff00;
+ if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+ bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+ bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
@@ -5543,7 +5892,7 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
spin_lock_bh(&bp->phy_lock);
- bnx2_setup_phy(bp);
+ bnx2_setup_phy(bp, bp->phy_port);
spin_unlock_bh(&bp->phy_lock);
@@ -5882,7 +6231,7 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
struct bnx2 *bp = netdev_priv(dev);
if (CHIP_NUM(bp) == CHIP_NUM_5709)
- return (ethtool_op_set_tx_hw_csum(dev, data));
+ return (ethtool_op_set_tx_ipv6_csum(dev, data));
else
return (ethtool_op_set_tx_csum(dev, data));
}
@@ -5939,6 +6288,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG: {
u32 mii_regval;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return -EOPNOTSUPP;
+
if (!netif_running(dev))
return -EAGAIN;
@@ -5955,6 +6307,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
+ return -EOPNOTSUPP;
+
if (!netif_running(dev))
return -EAGAIN;
@@ -6116,7 +6471,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
{
struct bnx2 *bp;
unsigned long mem_len;
- int rc;
+ int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
@@ -6273,7 +6628,47 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_unmap;
}
- bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
+ for (i = 0, j = 0; i < 3; i++) {
+ u8 num, k, skip0;
+
+ num = (u8) (reg >> (24 - (i * 8)));
+ for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
+ if (num >= k || !skip0 || k == 1) {
+ bp->fw_version[j++] = (num / k) + '0';
+ skip0 = 0;
+ }
+ }
+ if (i != 2)
+ bp->fw_version[j++] = '.';
+ }
+ if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
+ BNX2_PORT_FEATURE_ASF_ENABLED) {
+ bp->flags |= ASF_ENABLE_FLAG;
+
+ for (i = 0; i < 30; i++) {
+ reg = REG_RD_IND(bp, bp->shmem_base +
+ BNX2_BC_STATE_CONDITION);
+ if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+ break;
+ msleep(10);
+ }
+ }
+ reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
+ reg &= BNX2_CONDITION_MFW_RUN_MASK;
+ if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
+ reg != BNX2_CONDITION_MFW_RUN_NONE) {
+ int i;
+ u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
+
+ bp->fw_version[j++] = ' ';
+ for (i = 0; i < 3; i++) {
+ reg = REG_RD_IND(bp, addr + i * 4);
+ reg = swab32(reg);
+ memcpy(&bp->fw_version[j], &reg, 4);
+ j += 4;
+ }
+ }
reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
bp->mac_addr[0] = (u8) (reg >> 8);
@@ -6302,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ticks_int = 18;
bp->rx_ticks = 18;
- bp->stats_ticks = 1000000 & 0xffff00;
+ bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
bp->timer_interval = HZ;
bp->current_interval = HZ;
@@ -6315,7 +6710,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
bp->phy_flags |= PHY_SERDES_FLAG;
+ bp->phy_port = PORT_TP;
if (bp->phy_flags & PHY_SERDES_FLAG) {
+ bp->phy_port = PORT_FIBRE;
bp->flags |= NO_WOL_FLAG;
if (CHIP_NUM(bp) != CHIP_NUM_5706) {
bp->phy_addr = 2;
@@ -6324,6 +6721,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
}
+ bnx2_init_remote_phy(bp);
+
} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
CHIP_NUM(bp) == CHIP_NUM_5708)
bp->phy_flags |= PHY_CRC_FIX_FLAG;
@@ -6363,10 +6762,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_8132_BRIDGE,
amd_8132))) {
- u8 rev;
- pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
- if (rev >= 0x10 && rev <= 0x13) {
+ if (amd_8132->revision >= 0x10 &&
+ amd_8132->revision <= 0x13) {
disable_msi = 1;
pci_dev_put(amd_8132);
break;
@@ -6374,23 +6772,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
}
}
- bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
- bp->req_line_speed = 0;
- if (bp->phy_flags & PHY_SERDES_FLAG) {
- bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
-
- reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
- reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
- if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
- bp->autoneg = 0;
- bp->req_line_speed = bp->line_speed = SPEED_1000;
- bp->req_duplex = DUPLEX_FULL;
- }
- }
- else {
- bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
- }
-
+ bnx2_set_default_link(bp);
bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
init_timer(&bp->timer);
@@ -6490,10 +6872,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->perm_addr, bp->mac_addr, 6);
bp->name = board_info[ent->driver_data].name;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (CHIP_NUM(bp) == CHIP_NUM_5709)
- dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
- else
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ dev->features |= NETIF_F_IPV6_CSUM;
+
#ifdef BCM_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 49a5de253b17..102adfe1e923 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6338,6 +6338,8 @@ struct l2_fhdr {
#define RX_COPY_THRESH 92
+#define BNX2_MISC_ENABLE_DEFAULT 0x7ffffff
+
#define DMA_READ_CHANS 5
#define DMA_WRITE_CHANS 3
@@ -6431,6 +6433,11 @@ struct sw_bd {
#define ST_MICRO_FLASH_PAGE_SIZE 256
#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536
+#define BCM5709_FLASH_PAGE_BITS 8
+#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE 256
+
#define NVRAM_TIMEOUT_COUNT 30000
@@ -6447,7 +6454,10 @@ struct flash_spec {
u32 config2;
u32 config3;
u32 write1;
- u32 buffered;
+ u32 flags;
+#define BNX2_NV_BUFFERED 0x00000001
+#define BNX2_NV_TRANSLATE 0x00000002
+#define BNX2_NV_WREN 0x00000004
u32 page_bits;
u32 page_size;
u32 addr_mask;
@@ -6537,6 +6547,7 @@ struct bnx2 {
#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100
#define PHY_INT_MODE_LINK_READY_FLAG 0x200
#define PHY_DIS_EARLY_DAC_FLAG 0x400
+#define REMOTE_PHY_CAP_FLAG 0x800
u32 mii_bmcr;
u32 mii_bmsr;
@@ -6625,6 +6636,7 @@ struct bnx2 {
u16 req_line_speed;
u8 req_duplex;
+ u8 phy_port;
u8 link_up;
u16 line_speed;
@@ -6656,7 +6668,7 @@ struct bnx2 {
u32 shmem_base;
- u32 fw_ver;
+ char fw_version[32];
int pm_cap;
int pcix_cap;
@@ -6770,7 +6782,7 @@ struct fw_info {
* the firmware has timed out, the driver will assume there is no firmware
* running and there won't be any firmware-driver synchronization during a
* driver reset. */
-#define FW_ACK_TIME_OUT_MS 100
+#define FW_ACK_TIME_OUT_MS 1000
#define BNX2_DRV_RESET_SIGNATURE 0x00000000
@@ -6788,6 +6800,7 @@ struct fw_info {
#define BNX2_DRV_MSG_CODE_DIAG 0x07000000
#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000
#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000
+#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000
#define BNX2_DRV_MSG_DATA 0x00ff0000
#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
@@ -6836,6 +6849,7 @@ struct fw_info {
#define BNX2_LINK_STATUS_SERDES_LINK (1<<20)
#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21)
#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22)
+#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31)
#define BNX2_DRV_PULSE_MB 0x00000010
#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff
@@ -6845,6 +6859,30 @@ struct fw_info {
* This is used for debugging. */
#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000
+#define BNX2_DRV_MB_ARG0 0x00000014
+#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0)
+#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1)
+#define BNX2_NETLINK_SET_LINK_SPEED_10 \
+ (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \
+ BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2)
+#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3)
+#define BNX2_NETLINK_SET_LINK_SPEED_100 \
+ (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \
+ BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9)
+#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10)
+#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11)
+#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12)
+#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13)
+#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14)
+#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15)
+
#define BNX2_DEV_INFO_SIGNATURE 0x00000020
#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900
#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00
@@ -7006,6 +7044,8 @@ struct fw_info {
#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff
#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000
+#define BNX2_MFW_VER_PTR 0x00000014c
+
#define BNX2_BC_STATE_RESET_TYPE 0x000001c0
#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254
#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff
@@ -7059,12 +7099,42 @@ struct fw_info {
#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600)
#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700)
+#define BNX2_BC_STATE_CONDITION 0x000001c8
+#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000
+#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000
+#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000
+#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
+#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
+#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000
#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff
#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff
+#define BNX2_FW_EVT_CODE_MB 0x354
+#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
+#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001
+
+#define BNX2_DRV_ACK_CAP_MB 0x364
+#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000
+#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000
+
+#define BNX2_FW_CAP_MB 0x368
+#define BNX2_FW_CAP_SIGNATURE 0xaa550000
+#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000
+#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000
+#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001
+#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002
+
+#define BNX2_RPHY_SIGNATURE 0x36c
+#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a
+
+#define BNX2_RPHY_FLAGS 0x370
+#define BNX2_RPHY_SERDES_LINK 0x374
+#define BNX2_RPHY_COPPER_LINK 0x378
+
#define HOST_VIEW_SHMEM_BASE 0x167c00
#endif
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 7845eaf6f29f..202d4a4ef751 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -395,14 +395,13 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
* Allocate the main control structure for this instance.
*/
maxmaxcode = MAXCODE(bits);
- db = kmalloc(sizeof (struct bsd_db),
+ db = kzalloc(sizeof (struct bsd_db),
GFP_KERNEL);
if (!db)
{
return NULL;
}
- memset (db, 0, sizeof(struct bsd_db));
/*
* Allocate space for the dictionary. This may be more than one page in
* length.
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 59b9943b077d..f6e4030c73d1 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3422,21 +3422,19 @@ done:
static void cas_check_pci_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
- u8 rev;
cp->cas_flags = 0;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
(pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
- if (rev >= CAS_ID_REVPLUS)
+ if (pdev->revision >= CAS_ID_REVPLUS)
cp->cas_flags |= CAS_FLAG_REG_PLUS;
- if (rev < CAS_ID_REVPLUS02u)
+ if (pdev->revision < CAS_ID_REVPLUS02u)
cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
/* Original Cassini supports HW CSUM, but it's not
* enabled by default as it can trigger TX hangs.
*/
- if (rev < CAS_ID_REV2)
+ if (pdev->revision < CAS_ID_REV2)
cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
} else {
/* Only sun has original cassini chips. */
@@ -4919,13 +4917,13 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_cmd &= ~PCI_COMMAND_SERR;
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- if (pci_set_mwi(pdev))
+ if (pci_try_set_mwi(pdev))
printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
pci_name(pdev));
/*
* On some architectures, the default cache line size set
- * by pci_set_mwi reduces perforamnce. We have to increase
+ * by pci_try_set_mwi reduces perforamnce. We have to increase
* it for this case. To start, we'll print some configuration
* data.
*/
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index 8eddd23a3a51..eb508bf8022a 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
/* Firmware version */
#define FW_VERSION_MAJOR 4
-#define FW_VERSION_MINOR 1
+#define FW_VERSION_MINOR 3
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 74ec64a1625d..04e3710c9082 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -250,7 +250,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np->an_enable = 1;
mii_set_media (dev);
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
err = register_netdev (dev);
if (err)
@@ -866,9 +865,9 @@ receive_packet (struct net_device *dev)
PCI_DMA_FROMDEVICE);
/* 16 byte align the IP header */
skb_reserve (skb, 2);
- eth_copy_and_sum (skb,
+ skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
- pkt_len, 0);
+ pkt_len);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc->fraginfo &
@@ -879,7 +878,7 @@ receive_packet (struct net_device *dev)
skb->protocol = eth_type_trans (skb, dev);
#if 0
/* Checksum done by hw, but csum value unavailable. */
- if (np->pci_rev_id >= 0x0c &&
+ if (np->pdev->pci_rev_id >= 0x0c &&
!(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 814c449c359f..e443065a452e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -668,7 +668,6 @@ struct netdev_private {
unsigned int rx_flow:1; /* Rx flow control enable */
unsigned int phy_media:1; /* 1: fiber, 0: copper */
unsigned int link_status:1; /* Current link status */
- unsigned char pci_rev_id; /* PCI revision ID */
struct netdev_desc *last_tx; /* Last Tx descriptor used. */
unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
unsigned long cur_tx, old_tx;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 264fa0e2e075..c3de81bf090a 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -104,6 +104,18 @@
#define PRINTK(args...) printk(KERN_DEBUG args)
#endif
+#ifdef CONFIG_BLACKFIN
+#define readsb insb
+#define readsw insw
+#define readsl insl
+#define writesb outsb
+#define writesw outsw
+#define writesl outsl
+#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQF_TRIGGER_HIGH)
+#else
+#define DM9000_IRQ_FLAGS IRQF_SHARED
+#endif
+
/*
* Transmit timeout, default 5 seconds.
*/
@@ -431,6 +443,9 @@ dm9000_probe(struct platform_device *pdev)
db->io_addr = (void __iomem *)base;
db->io_data = (void __iomem *)(base + 4);
+ /* ensure at least we have a default set of IO routines */
+ dm9000_set_io(db, 2);
+
} else {
db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -614,7 +629,7 @@ dm9000_open(struct net_device *dev)
PRINTK2("entering dm9000_open\n");
- if (request_irq(dev->irq, &dm9000_interrupt, IRQF_SHARED, dev->name, dev))
+ if (request_irq(dev->irq, &dm9000_interrupt, DM9000_IRQ_FLAGS, dev->name, dev))
return -EAGAIN;
/* Initialize DM9000 board */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 60673bc292c0..756a6bcb038d 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -34,11 +34,12 @@
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
static int numdummies = 1;
static int dummy_xmit(struct sk_buff *skb, struct net_device *dev);
-static struct net_device_stats *dummy_get_stats(struct net_device *dev);
static int dummy_set_address(struct net_device *dev, void *p)
{
@@ -56,13 +57,13 @@ static void set_multicast_list(struct net_device *dev)
{
}
-static void __init dummy_setup(struct net_device *dev)
+static void dummy_setup(struct net_device *dev)
{
/* Initialize the device structure. */
- dev->get_stats = dummy_get_stats;
dev->hard_start_xmit = dummy_xmit;
dev->set_multicast_list = set_multicast_list;
dev->set_mac_address = dummy_set_address;
+ dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
@@ -76,77 +77,80 @@ static void __init dummy_setup(struct net_device *dev)
static int dummy_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct net_device_stats *stats = netdev_priv(dev);
-
- stats->tx_packets++;
- stats->tx_bytes+=skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
return 0;
}
-static struct net_device_stats *dummy_get_stats(struct net_device *dev)
+static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
{
- return netdev_priv(dev);
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
}
-static struct net_device **dummies;
+static struct rtnl_link_ops dummy_link_ops __read_mostly = {
+ .kind = "dummy",
+ .setup = dummy_setup,
+ .validate = dummy_validate,
+};
/* Number of dummy devices to be set up by this module. */
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
-static int __init dummy_init_one(int index)
+static int __init dummy_init_one(void)
{
struct net_device *dev_dummy;
int err;
- dev_dummy = alloc_netdev(sizeof(struct net_device_stats),
- "dummy%d", dummy_setup);
-
+ dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup);
if (!dev_dummy)
return -ENOMEM;
- if ((err = register_netdev(dev_dummy))) {
- free_netdev(dev_dummy);
- dev_dummy = NULL;
- } else {
- dummies[index] = dev_dummy;
- }
+ err = dev_alloc_name(dev_dummy, dev_dummy->name);
+ if (err < 0)
+ goto err;
- return err;
-}
+ dev_dummy->rtnl_link_ops = &dummy_link_ops;
+ err = register_netdevice(dev_dummy);
+ if (err < 0)
+ goto err;
+ return 0;
-static void dummy_free_one(int index)
-{
- unregister_netdev(dummies[index]);
- free_netdev(dummies[index]);
+err:
+ free_netdev(dev_dummy);
+ return err;
}
static int __init dummy_init_module(void)
{
int i, err = 0;
- dummies = kmalloc(numdummies * sizeof(void *), GFP_KERNEL);
- if (!dummies)
- return -ENOMEM;
+
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
+
for (i = 0; i < numdummies && !err; i++)
- err = dummy_init_one(i);
- if (err) {
- i--;
- while (--i >= 0)
- dummy_free_one(i);
- }
+ err = dummy_init_one();
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
+ rtnl_unlock();
+
return err;
}
static void __exit dummy_cleanup_module(void)
{
- int i;
- for (i = 0; i < numdummies; i++)
- dummy_free_one(i);
- kfree(dummies);
+ rtnl_link_unregister(&dummy_link_ops);
}
module_init(dummy_init_module);
module_exit(dummy_cleanup_module);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("dummy");
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 74ea6373c7cd..6b6401e9304e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -583,7 +583,6 @@ struct nic {
u32 rx_tco_frames;
u32 rx_over_length_errors;
- u8 rev_id;
u16 leds;
u16 eeprom_wc;
u16 eeprom[256];
@@ -937,9 +936,8 @@ static void e100_get_defaults(struct nic *nic)
struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
- pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
- nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
+ nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
if(nic->mac == mac_unknown)
nic->mac = mac_82557_D100_A;
@@ -1279,7 +1277,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb
if (nic->flags & ich)
goto noloaducode;
- /* Search for ucode match against h/w rev_id */
+ /* Search for ucode match against h/w revision */
for (opts = ucode_opts; opts->mac; opts++) {
int i;
u32 *ucode = opts->ucode;
@@ -2238,7 +2236,7 @@ static void e100_get_regs(struct net_device *netdev,
u32 *buff = p;
int i;
- regs->version = (1 << 24) | nic->rev_id;
+ regs->version = (1 << 24) | nic->pdev->revision;
buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
ioread8(&nic->csr->scb.cmd_lo) << 16 |
ioread16(&nic->csr->scb.status);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index cf8af928a69c..f48b659e0c2b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1266,8 +1266,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->revision_id = pdev->revision;
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9800341956a2..3c54014acece 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1801,7 +1801,7 @@ speedo_rx(struct net_device *dev)
#if 1 || USE_IP_CSUM
/* Packet is in one chunk -- we can copy + cksum. */
- eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
skb_copy_from_linear_data(sp->rx_skbuff[entry],
@@ -2292,10 +2292,15 @@ static int eepro100_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata (pdev);
struct speedo_private *sp = netdev_priv(dev);
void __iomem *ioaddr = sp->regs;
+ int rc;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
pci_set_master(pdev);
if (!netif_running(dev))
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f03f070451de..489c8b260dd8 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,13 +39,13 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0067"
+#define DRV_VERSION "EHEA_0071"
-/* EHEA capability flags */
+/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
-#define DLPAR_MEM_ADD 2
-#define DLPAR_MEM_REM 4
-#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
+#define DLPAR_MEM_ADD 2
+#define DLPAR_MEM_REM 4
+#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -113,6 +113,8 @@
/* Memory Regions */
#define EHEA_MR_ACC_CTRL 0x00800000
+#define EHEA_BUSMAP_START 0x8000000000000000ULL
+
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
/* utility functions */
@@ -186,6 +188,12 @@ struct h_epas {
set to 0 if unused */
};
+struct ehea_busmap {
+ unsigned int entries; /* total number of entries */
+ unsigned int valid_sections; /* number of valid sections */
+ u64 *vaddr;
+};
+
struct ehea_qp;
struct ehea_cq;
struct ehea_eq;
@@ -382,6 +390,8 @@ struct ehea_adapter {
struct ehea_mr mr;
u32 pd; /* protection domain */
u64 max_mc_mac; /* max number of multicast mac addresses */
+ int active_ports;
+ struct list_head list;
};
@@ -431,6 +441,9 @@ struct port_res_cfg {
int max_entries_rq3;
};
+enum ehea_flag_bits {
+ __EHEA_STOP_XFER
+};
void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 383144db4d18..4c70a9301c1b 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -79,6 +79,11 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
static int port_name_cnt = 0;
+static LIST_HEAD(adapter_list);
+u64 ehea_driver_flags = 0;
+struct workqueue_struct *ehea_driver_wq;
+struct work_struct ehea_rereg_mr_task;
+
static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
const struct of_device_id *id);
@@ -238,13 +243,17 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
- rwqe->sg_list[0].vaddr = (u64)skb->data;
+ rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
rwqe->sg_list[0].len = packet_size;
rwqe->data_segments = 1;
index++;
index &= max_index_mask;
+
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
+ goto out;
}
+
q_skba->index = index;
/* Ring doorbell */
@@ -253,7 +262,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
ehea_update_rq2a(pr->qp, i);
else
ehea_update_rq3a(pr->qp, i);
-
+out:
return ret;
}
@@ -457,6 +466,8 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
cqe->vlan_tag);
else
netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -1321,7 +1332,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
sg1entry->len = skb_data_size - headersize;
tmp_addr = (u64)(skb->data + headersize);
- sg1entry->vaddr = tmp_addr;
+ sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
}
} else
@@ -1352,7 +1363,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
sg1entry->l_key = lkey;
sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
- sg1entry->vaddr = tmp_addr;
+ sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
}
} else {
@@ -1391,7 +1402,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sg1entry->len = frag->size;
tmp_addr = (u64)(page_address(frag->page)
+ frag->page_offset);
- sg1entry->vaddr = tmp_addr;
+ sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
sg1entry_contains_frag_data = 1;
}
@@ -1406,7 +1417,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
tmp_addr = (u64)(page_address(frag->page)
+ frag->page_offset);
- sgentry->vaddr = tmp_addr;
+ sgentry->vaddr = ehea_map_vaddr(tmp_addr);
swqe->descriptors++;
}
}
@@ -1424,7 +1435,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("reg_dereg_bcmc failed (tagged)");
+ ehea_error("%sregistering bc address failed (tagged)",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
@@ -1435,7 +1447,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("reg_dereg_bcmc failed (vlan)");
+ ehea_error("%sregistering bc address failed (vlan)",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
}
out_herr:
@@ -1878,6 +1891,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
ehea_dump(swqe, 512, "swqe");
}
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
+ goto out;
+
ehea_post_swqe(pr->qp, swqe);
pr->tx_packets++;
@@ -1892,7 +1908,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
spin_unlock(&pr->xmit_lock);
-
+out:
return NETDEV_TX_OK;
}
@@ -2158,7 +2174,6 @@ static int ehea_up(struct net_device *dev)
{
int ret, i;
struct ehea_port *port = netdev_priv(dev);
- u64 mac_addr = 0;
if (port->state == EHEA_PORT_UP)
return 0;
@@ -2177,18 +2192,10 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
- if (ret) {
- ret = -EIO;
- ehea_error("out_clean_pr");
- goto out_clean_pr;
- }
- mac_addr = (*(u64*)dev->dev_addr) >> 16;
-
ret = ehea_reg_interrupts(dev);
if (ret) {
- ehea_error("out_dereg_bc");
- goto out_dereg_bc;
+ ehea_error("reg_interrupts failed. ret:%d", ret);
+ goto out_clean_pr;
}
for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -2214,12 +2221,12 @@ static int ehea_up(struct net_device *dev)
out_free_irqs:
ehea_free_interrupts(dev);
-out_dereg_bc:
- ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-
out_clean_pr:
ehea_clean_all_portres(port);
out:
+ if (ret)
+ ehea_info("Failed starting %s. ret=%i", dev->name, ret);
+
return ret;
}
@@ -2258,9 +2265,13 @@ static int ehea_down(struct net_device *dev)
&port->port_res[i].d_netdev->state))
msleep(1);
- ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
- ret = ehea_clean_all_portres(port);
port->state = EHEA_PORT_DOWN;
+
+ ret = ehea_clean_all_portres(port);
+ if (ret)
+ ehea_info("Failed freeing resources for %s. ret=%i",
+ dev->name, ret);
+
return ret;
}
@@ -2292,15 +2303,11 @@ static void ehea_reset_port(struct work_struct *work)
netif_stop_queue(dev);
netif_poll_disable(dev);
- ret = ehea_down(dev);
- if (ret)
- ehea_error("ehea_down failed. not all resources are freed");
+ ehea_down(dev);
ret = ehea_up(dev);
- if (ret) {
- ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
+ if (ret)
goto out;
- }
if (netif_msg_timer(port))
ehea_info("Device %s resetted successfully", dev->name);
@@ -2312,6 +2319,88 @@ out:
return;
}
+static void ehea_rereg_mrs(struct work_struct *work)
+{
+ int ret, i;
+ struct ehea_adapter *adapter;
+
+ ehea_info("LPAR memory enlarged - re-initializing driver");
+
+ list_for_each_entry(adapter, &adapter_list, list)
+ if (adapter->active_ports) {
+ /* Shutdown all ports */
+ for (i = 0; i < EHEA_MAX_PORTS; i++) {
+ struct ehea_port *port = adapter->port[i];
+
+ if (port) {
+ struct net_device *dev = port->netdev;
+
+ if (dev->flags & IFF_UP) {
+ ehea_info("stopping %s",
+ dev->name);
+ down(&port->port_lock);
+ netif_stop_queue(dev);
+ netif_poll_disable(dev);
+ ehea_down(dev);
+ up(&port->port_lock);
+ }
+ }
+ }
+
+ /* Unregister old memory region */
+ ret = ehea_rem_mr(&adapter->mr);
+ if (ret) {
+ ehea_error("unregister MR failed - driver"
+ " inoperable!");
+ goto out;
+ }
+ }
+
+ ehea_destroy_busmap();
+
+ ret = ehea_create_busmap();
+ if (ret)
+ goto out;
+
+ clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+
+ list_for_each_entry(adapter, &adapter_list, list)
+ if (adapter->active_ports) {
+ /* Register new memory region */
+ ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
+ if (ret) {
+ ehea_error("register MR failed - driver"
+ " inoperable!");
+ goto out;
+ }
+
+ /* Restart all ports */
+ for (i = 0; i < EHEA_MAX_PORTS; i++) {
+ struct ehea_port *port = adapter->port[i];
+
+ if (port) {
+ struct net_device *dev = port->netdev;
+
+ if (dev->flags & IFF_UP) {
+ ehea_info("restarting %s",
+ dev->name);
+ down(&port->port_lock);
+
+ ret = ehea_up(dev);
+ if (!ret) {
+ netif_poll_enable(dev);
+ netif_wake_queue(dev);
+ }
+
+ up(&port->port_lock);
+ }
+ }
+ }
+ }
+out:
+ return;
+}
+
static void ehea_tx_watchdog(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
@@ -2557,12 +2646,18 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
INIT_WORK(&port->reset_task, ehea_reset_port);
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret) {
+ ret = -EIO;
+ goto out_unreg_port;
+ }
+
ehea_set_ethtool_ops(dev);
ret = register_netdev(dev);
if (ret) {
ehea_error("register_netdev failed. ret=%d", ret);
- goto out_unreg_port;
+ goto out_dereg_bc;
}
ret = ehea_get_jumboframe_status(port, &jumbo);
@@ -2573,8 +2668,13 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ehea_info("%s: Jumbo frames are %sabled", dev->name,
jumbo == 1 ? "en" : "dis");
+ adapter->active_ports++;
+
return port;
+out_dereg_bc:
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
out_unreg_port:
ehea_unregister_port(port);
@@ -2594,8 +2694,10 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
{
unregister_netdev(port->netdev);
ehea_unregister_port(port);
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
kfree(port->mc_list);
free_netdev(port->netdev);
+ port->adapter->active_ports--;
}
static int ehea_setup_ports(struct ehea_adapter *adapter)
@@ -2788,6 +2890,8 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
goto out;
}
+ list_add(&adapter->list, &adapter_list);
+
adapter->ebus_dev = dev;
adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
@@ -2891,7 +2995,10 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
+ list_del(&adapter->list);
+
kfree(adapter);
+
return 0;
}
@@ -2939,9 +3046,18 @@ int __init ehea_module_init(void)
printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
DRV_VERSION);
+ ehea_driver_wq = create_workqueue("ehea_driver_wq");
+
+ INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
+
ret = check_module_parm();
if (ret)
goto out;
+
+ ret = ehea_create_busmap();
+ if (ret)
+ goto out;
+
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
ehea_error("failed registering eHEA device driver on ebus");
@@ -2965,6 +3081,7 @@ static void __exit ehea_module_exit(void)
{
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
+ ehea_destroy_busmap();
}
module_init(ehea_module_init);
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index d17a45a7e717..89b63531ff26 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -60,6 +60,9 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code)
}
}
+/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
+#define EHEA_MAX_RPAGE 512
+
/* Notification Event Queue (NEQ) Entry bit masks */
#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 29eaa46948b0..a36fa6c23fdf 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,6 +31,13 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
+
+struct ehea_busmap ehea_bmap = { 0, 0, NULL };
+extern u64 ehea_driver_flags;
+extern struct workqueue_struct *ehea_driver_wq;
+extern struct work_struct ehea_rereg_mr_task;
+
+
static void *hw_qpageit_get_inc(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
@@ -547,18 +554,84 @@ int ehea_destroy_qp(struct ehea_qp *qp)
return 0;
}
+int ehea_create_busmap( void )
+{
+ u64 vaddr = EHEA_BUSMAP_START;
+ unsigned long abs_max_pfn = 0;
+ unsigned long sec_max_pfn;
+ int i;
+
+ /*
+ * Sections are not in ascending order -> Loop over all sections and
+ * find the highest PFN to compute the required map size.
+ */
+ ehea_bmap.valid_sections = 0;
+
+ for (i = 0; i < NR_MEM_SECTIONS; i++)
+ if (valid_section_nr(i)) {
+ sec_max_pfn = section_nr_to_pfn(i);
+ if (sec_max_pfn > abs_max_pfn)
+ abs_max_pfn = sec_max_pfn;
+ ehea_bmap.valid_sections++;
+ }
+
+ ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1;
+ ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
+
+ if (!ehea_bmap.vaddr)
+ return -ENOMEM;
+
+ for (i = 0 ; i < ehea_bmap.entries; i++) {
+ unsigned long pfn = section_nr_to_pfn(i);
+
+ if (pfn_valid(pfn)) {
+ ehea_bmap.vaddr[i] = vaddr;
+ vaddr += EHEA_SECTSIZE;
+ } else
+ ehea_bmap.vaddr[i] = 0;
+ }
+
+ return 0;
+}
+
+void ehea_destroy_busmap( void )
+{
+ vfree(ehea_bmap.vaddr);
+}
+
+u64 ehea_map_vaddr(void *caddr)
+{
+ u64 mapped_addr;
+ unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
+
+ if (likely(index < ehea_bmap.entries)) {
+ mapped_addr = ehea_bmap.vaddr[index];
+ if (likely(mapped_addr))
+ mapped_addr |= (((unsigned long)caddr)
+ & (EHEA_SECTSIZE - 1));
+ else
+ mapped_addr = -1;
+ } else
+ mapped_addr = -1;
+
+ if (unlikely(mapped_addr == -1))
+ if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
+ queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
+
+ return mapped_addr;
+}
+
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
{
- int i, k, ret;
- u64 hret, pt_abs, start, end, nr_pages;
- u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+ int ret;
u64 *pt;
+ void *pg;
+ u64 hret, pt_abs, i, j, m, mr_len;
+ u32 acc_ctrl = EHEA_MR_ACC_CTRL;
- start = KERNELBASE;
- end = (u64)high_memory;
- nr_pages = (end - start) / EHEA_PAGESIZE;
+ mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
- pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
if (!pt) {
ehea_error("no mem");
ret = -ENOMEM;
@@ -566,7 +639,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
}
pt_abs = virt_to_abs(pt);
- hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+ hret = ehea_h_alloc_resource_mr(adapter->handle,
+ EHEA_BUSMAP_START, mr_len,
acc_ctrl, adapter->pd,
&mr->handle, &mr->lkey);
if (hret != H_SUCCESS) {
@@ -575,49 +649,43 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
goto out;
}
- mr->vaddr = KERNELBASE;
- k = 0;
-
- while (nr_pages > 0) {
- if (nr_pages > 1) {
- u64 num_pages = min(nr_pages, (u64)512);
- for (i = 0; i < num_pages; i++)
- pt[i] = virt_to_abs((void*)(((u64)start) +
- ((k++) *
- EHEA_PAGESIZE)));
-
- hret = ehea_h_register_rpage_mr(adapter->handle,
- mr->handle, 0,
- 0, (u64)pt_abs,
- num_pages);
- nr_pages -= num_pages;
- } else {
- u64 abs_adr = virt_to_abs((void*)(((u64)start) +
- (k * EHEA_PAGESIZE)));
-
- hret = ehea_h_register_rpage_mr(adapter->handle,
- mr->handle, 0,
- 0, abs_adr,1);
- nr_pages--;
- }
-
- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
- ehea_h_free_resource(adapter->handle,
- mr->handle, FORCE_FREE);
- ehea_error("register_rpage_mr failed");
- ret = -EIO;
- goto out;
+ for (i = 0 ; i < ehea_bmap.entries; i++)
+ if (ehea_bmap.vaddr[i]) {
+ void *sectbase = __va(i << SECTION_SIZE_BITS);
+ unsigned long k = 0;
+
+ for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
+ j++) {
+
+ for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+ pg = sectbase + ((k++) * EHEA_PAGESIZE);
+ pt[m] = virt_to_abs(pg);
+ }
+
+ hret = ehea_h_register_rpage_mr(adapter->handle,
+ mr->handle,
+ 0, 0, pt_abs,
+ EHEA_MAX_RPAGE);
+ if ((hret != H_SUCCESS)
+ && (hret != H_PAGE_REGISTERED)) {
+ ehea_h_free_resource(adapter->handle,
+ mr->handle,
+ FORCE_FREE);
+ ehea_error("register_rpage_mr failed");
+ ret = -EIO;
+ goto out;
+ }
+ }
}
- }
if (hret != H_SUCCESS) {
- ehea_h_free_resource(adapter->handle, mr->handle,
- FORCE_FREE);
- ehea_error("register_rpage failed for last page");
+ ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+ ehea_error("registering mr failed");
ret = -EIO;
goto out;
}
+ mr->vaddr = EHEA_BUSMAP_START;
mr->adapter = adapter;
ret = 0;
out:
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index c0eb3e03a102..b71f8452a5e3 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -36,8 +36,14 @@
* page size of ehea hardware queues
*/
-#define EHEA_PAGESHIFT 12
-#define EHEA_PAGESIZE 4096UL
+#define EHEA_PAGESHIFT 12
+#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
+#define EHEA_SECTSIZE (1UL << 24)
+#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT)
+
+#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
+#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#endif
/* Some abbreviations used here:
*
@@ -372,4 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+int ehea_create_busmap( void );
+void ehea_destroy_busmap( void );
+u64 ehea_map_vaddr(void *caddr);
+
#endif /* __EHEA_QMR_H__ */
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 5e517946f46a..119778401e48 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1201,7 +1201,7 @@ static int epic_rx(struct net_device *dev, int budget)
ep->rx_ring[entry].bufaddr,
ep->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(ep->pci_dev,
ep->rx_ring[entry].bufaddr,
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index abe9b089c610..ff9f177d7157 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1727,8 +1727,8 @@ static int netdev_rx(struct net_device *dev)
/* Call copy + cksum if available. */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb,
- np->cur_rx->skbuff->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ np->cur_rx->skbuff->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 255b09124e11..03023dd17829 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -648,7 +648,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
fep->stats.rx_dropped++;
} else {
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb, data, pkt_len-4, 0);
+ skb_copy_to_linear_data(skb, data, pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 42ba1c012ee2..6d1d50a19783 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -550,6 +550,8 @@ union ring_type {
/* PHY defines */
#define PHY_OUI_MARVELL 0x5043
#define PHY_OUI_CICADA 0x03f1
+#define PHY_OUI_VITESSE 0x01c1
+#define PHY_OUI_REALTEK 0x01c1
#define PHYID1_OUI_MASK 0x03ff
#define PHYID1_OUI_SHFT 6
#define PHYID2_OUI_MASK 0xfc00
@@ -557,12 +559,36 @@ union ring_type {
#define PHYID2_MODEL_MASK 0x03f0
#define PHY_MODEL_MARVELL_E3016 0x220
#define PHY_MARVELL_E3016_INITMASK 0x0300
-#define PHY_INIT1 0x0f000
-#define PHY_INIT2 0x0e00
-#define PHY_INIT3 0x01000
-#define PHY_INIT4 0x0200
-#define PHY_INIT5 0x0004
-#define PHY_INIT6 0x02000
+#define PHY_CICADA_INIT1 0x0f000
+#define PHY_CICADA_INIT2 0x0e00
+#define PHY_CICADA_INIT3 0x01000
+#define PHY_CICADA_INIT4 0x0200
+#define PHY_CICADA_INIT5 0x0004
+#define PHY_CICADA_INIT6 0x02000
+#define PHY_VITESSE_INIT_REG1 0x1f
+#define PHY_VITESSE_INIT_REG2 0x10
+#define PHY_VITESSE_INIT_REG3 0x11
+#define PHY_VITESSE_INIT_REG4 0x12
+#define PHY_VITESSE_INIT_MSK1 0xc
+#define PHY_VITESSE_INIT_MSK2 0x0180
+#define PHY_VITESSE_INIT1 0x52b5
+#define PHY_VITESSE_INIT2 0xaf8a
+#define PHY_VITESSE_INIT3 0x8
+#define PHY_VITESSE_INIT4 0x8f8a
+#define PHY_VITESSE_INIT5 0xaf86
+#define PHY_VITESSE_INIT6 0x8f86
+#define PHY_VITESSE_INIT7 0xaf82
+#define PHY_VITESSE_INIT8 0x0100
+#define PHY_VITESSE_INIT9 0x8f82
+#define PHY_VITESSE_INIT10 0x0
+#define PHY_REALTEK_INIT_REG1 0x1f
+#define PHY_REALTEK_INIT_REG2 0x19
+#define PHY_REALTEK_INIT_REG3 0x13
+#define PHY_REALTEK_INIT1 0x0000
+#define PHY_REALTEK_INIT2 0x8e00
+#define PHY_REALTEK_INIT3 0x0001
+#define PHY_REALTEK_INIT4 0xad17
+
#define PHY_GIGABIT 0x0100
#define PHY_TIMEOUT 0x1
@@ -1096,6 +1122,28 @@ static int phy_init(struct net_device *dev)
return PHY_ERROR;
}
}
+ if (np->phy_oui == PHY_OUI_REALTEK) {
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
/* set advertise register */
reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
@@ -1141,14 +1189,14 @@ static int phy_init(struct net_device *dev)
/* phy vendor specific configuration */
if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
- phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
- phy_reserved |= (PHY_INIT3 | PHY_INIT4);
+ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
- phy_reserved |= PHY_INIT5;
+ phy_reserved |= PHY_CICADA_INIT5;
if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
@@ -1156,12 +1204,106 @@ static int phy_init(struct net_device *dev)
}
if (np->phy_oui == PHY_OUI_CICADA) {
phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
- phy_reserved |= PHY_INIT6;
+ phy_reserved |= PHY_CICADA_INIT6;
if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
return PHY_ERROR;
}
}
+ if (np->phy_oui == PHY_OUI_VITESSE) {
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+ phy_reserved |= PHY_VITESSE_INIT3;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
+ phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+ phy_reserved |= PHY_VITESSE_INIT8;
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+ if (np->phy_oui == PHY_OUI_REALTEK) {
+ /* reset could have cleared these out, set them back */
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
+ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+ return PHY_ERROR;
+ }
+ }
+
/* some phys clear out pause advertisment on reset, set it back */
mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
@@ -4995,12 +5137,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
}
- np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
- np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
+ np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
+ np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
if (!np->rx_skb || !np->tx_skb)
goto out_freering;
- memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
- memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
dev->open = nv_open;
dev->stop = nv_close;
@@ -5084,15 +5224,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->wolenabled = 0;
if (id->driver_data & DEV_HAS_POWER_CNTRL) {
- u8 revision_id;
- pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
/* take phy and nic out of low power mode */
powerstate = readl(base + NvRegPowerState2);
powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
- revision_id >= 0xA3)
+ pci_dev->revision >= 0xA3)
powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
writel(powerstate, base + NvRegPowerState2);
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d7a1a58de766..f92690555dd9 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -420,8 +420,18 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
if (ecntrl & ECNTRL_REDUCED_MODE) {
if (ecntrl & ECNTRL_REDUCED_MII_MODE)
return PHY_INTERFACE_MODE_RMII;
- else
+ else {
+ phy_interface_t interface = priv->einfo->interface;
+
+ /*
+ * This isn't autodetected right now, so it must
+ * be set by the device tree or platform code.
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+
return PHY_INTERFACE_MODE_RGMII;
+ }
}
if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 5dd34a1a7b89..ac3596f45dd8 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -31,7 +31,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <asm/ocp.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 2521b111b3a5..15254dc7876a 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1575,8 +1575,8 @@ static int hamachi_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
/* Call copy + cksum if available. */
#if 1 || USE_IP_COPYSUM
- eth_copy_and_sum(skb,
- hmp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 84aa2117c0ee..355c6cf3d112 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc)
sprintf(portarg, "%ld", bc->pdev->port->base);
printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg);
- return call_usermodehelper(eppconfig_path, argv, envp, 1);
+ return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC);
}
/* ---------------------------------------------------------------------- */
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 3be8c5047599..205f09672492 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -453,8 +453,8 @@ static int __init setup_adapter(int card_base, int type, int n)
int scc_base = card_base + hw[type].scc_offset;
char *chipnames[] = CHIPNAMES;
- /* Allocate memory */
- info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
+ /* Initialize what is necessary for write_scc and write_scc_data */
+ info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
if (!info) {
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
@@ -462,8 +462,6 @@ static int __init setup_adapter(int card_base, int type, int n)
goto out;
}
- /* Initialize what is necessary for write_scc and write_scc_data */
- memset(info, 0, sizeof(struct scc_info));
info->dev[0] = alloc_netdev(0, "", dev_setup);
if (!info->dev[0]) {
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 6ec3d500f334..d96eb7229548 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1337,7 +1337,7 @@ const char * buf, size_t count)
#define ATTR(_name, _mode) \
struct attribute veth_##_name##_attr = { \
- .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
+ .name = __stringify(_name), .mode = _mode, \
};
static ATTR(active, 0644);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 07b4c0d7a75c..f5c3598e59af 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -136,13 +136,14 @@ resched:
}
-static void __init ifb_setup(struct net_device *dev)
+static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->get_stats = ifb_get_stats;
dev->hard_start_xmit = ifb_xmit;
dev->open = &ifb_open;
dev->stop = &ifb_close;
+ dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
@@ -197,12 +198,6 @@ static struct net_device_stats *ifb_get_stats(struct net_device *dev)
return stats;
}
-static struct net_device **ifbs;
-
-/* Number of ifb devices to be set up by this module. */
-module_param(numifbs, int, 0);
-MODULE_PARM_DESC(numifbs, "Number of ifb devices");
-
static int ifb_close(struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
@@ -226,6 +221,28 @@ static int ifb_open(struct net_device *dev)
return 0;
}
+static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops ifb_link_ops __read_mostly = {
+ .kind = "ifb",
+ .priv_size = sizeof(struct ifb_private),
+ .setup = ifb_setup,
+ .validate = ifb_validate,
+};
+
+/* Number of ifb devices to be set up by this module. */
+module_param(numifbs, int, 0);
+MODULE_PARM_DESC(numifbs, "Number of ifb devices");
+
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
@@ -237,49 +254,44 @@ static int __init ifb_init_one(int index)
if (!dev_ifb)
return -ENOMEM;
- if ((err = register_netdev(dev_ifb))) {
- free_netdev(dev_ifb);
- dev_ifb = NULL;
- } else {
- ifbs[index] = dev_ifb;
- }
+ err = dev_alloc_name(dev_ifb, dev_ifb->name);
+ if (err < 0)
+ goto err;
- return err;
-}
+ dev_ifb->rtnl_link_ops = &ifb_link_ops;
+ err = register_netdevice(dev_ifb);
+ if (err < 0)
+ goto err;
+ return 0;
-static void ifb_free_one(int index)
-{
- unregister_netdev(ifbs[index]);
- free_netdev(ifbs[index]);
+err:
+ free_netdev(dev_ifb);
+ return err;
}
static int __init ifb_init_module(void)
{
- int i, err = 0;
- ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL);
- if (!ifbs)
- return -ENOMEM;
+ int i, err;
+
+ rtnl_lock();
+ err = __rtnl_link_register(&ifb_link_ops);
+
for (i = 0; i < numifbs && !err; i++)
err = ifb_init_one(i);
- if (err) {
- i--;
- while (--i >= 0)
- ifb_free_one(i);
- }
+ if (err)
+ __rtnl_link_unregister(&ifb_link_ops);
+ rtnl_unlock();
return err;
}
static void __exit ifb_cleanup_module(void)
{
- int i;
-
- for (i = 0; i < numifbs; i++)
- ifb_free_one(i);
- kfree(ifbs);
+ rtnl_link_unregister(&ifb_link_ops);
}
module_init(ifb_init_module);
module_exit(ifb_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamal Hadi Salim");
+MODULE_ALIAS_RTNL_LINK("ifb");
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 829da9a1d113..2098d0af8ff5 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -155,6 +155,15 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
+config EP7211_DONGLE
+ tristate "EP7211 I/R support"
+ depends on IRTTY_SIR && ARCH_EP7211 && IRDA && EXPERIMENTAL
+ help
+ Say Y here if you want to build support for the Cirrus logic
+ EP7211 chipset's infrared module.
+
+
+
comment "Old SIR device drivers"
config IRPORT_SIR
@@ -355,7 +364,7 @@ config WINBOND_FIR
config TOSHIBA_FIR
tristate "Toshiba Type-O IR Port"
- depends on IRDA && PCI && !64BIT
+ depends on IRDA && PCI && !64BIT && VIRT_TO_BUS
help
Say Y here if you want to build support for the Toshiba Type-O IR
and Donau oboe chipsets. These chipsets are used by the Toshiba
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 233a2f923730..2808ef5c7b79 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
+obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
# The SIR helper module
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
new file mode 100644
index 000000000000..831572429bb9
--- /dev/null
+++ b/drivers/net/irda/ep7211-sir.c
@@ -0,0 +1,89 @@
+/*
+ * IR port driver for the Cirrus Logic EP7211 processor.
+ *
+ * Copyright 2001, Blue Mug Inc. All rights reserved.
+ * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#include "sir-dev.h"
+
+#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
+#define MAX_DELAY 10000 /* 1 ms */
+
+static int ep7211_open(struct sir_dev *dev);
+static int ep7211_close(struct sir_dev *dev);
+static int ep7211_change_speed(struct sir_dev *dev, unsigned speed);
+static int ep7211_reset(struct sir_dev *dev);
+
+static struct dongle_driver ep7211 = {
+ .owner = THIS_MODULE,
+ .driver_name = "EP7211 IR driver",
+ .type = IRDA_EP7211_DONGLE,
+ .open = ep7211_open,
+ .close = ep7211_close,
+ .reset = ep7211_reset,
+ .set_speed = ep7211_change_speed,
+};
+
+static int __init ep7211_sir_init(void)
+{
+ return irda_register_dongle(&ep7211);
+}
+
+static void __exit ep7211_sir_cleanup(void)
+{
+ irda_unregister_dongle(&ep7211);
+}
+
+static int ep7211_open(struct sir_dev *dev)
+{
+ unsigned int syscon;
+
+ /* Turn on the SIR encoder. */
+ syscon = clps_readl(SYSCON1);
+ syscon |= SYSCON1_SIREN;
+ clps_writel(syscon, SYSCON1);
+
+ return 0;
+}
+
+static int ep7211_close(struct sir_dev *dev)
+{
+ unsigned int syscon;
+
+ /* Turn off the SIR encoder. */
+ syscon = clps_readl(SYSCON1);
+ syscon &= ~SYSCON1_SIREN;
+ clps_writel(syscon, SYSCON1);
+
+ return 0;
+}
+
+static int ep7211_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ return 0;
+}
+
+static int ep7211_reset(struct sir_dev *dev)
+{
+ return 0;
+}
+
+MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
+MODULE_DESCRIPTION("EP7211 IR dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
+
+module_init(ep7211_sir_init);
+module_exit(ep7211_sir_cleanup);
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3078c419cb02..20732458f5ac 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -164,14 +164,13 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
/* Allocate memory if needed */
if (self->tx_buff.truesize > 0) {
- self->tx_buff.head = kmalloc(self->tx_buff.truesize,
+ self->tx_buff.head = kzalloc(self->tx_buff.truesize,
GFP_KERNEL);
if (self->tx_buff.head == NULL) {
IRDA_ERROR("%s(), can't allocate memory for "
"transmit buffer!\n", __FUNCTION__);
goto err_out4;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
}
self->tx_buff.data = self->tx_buff.head;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index ad1857364d51..6f5f697ec9f8 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -505,10 +505,9 @@ static int irtty_open(struct tty_struct *tty)
}
/* allocate private device info block */
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_put;
- memset(priv, 0, sizeof(*priv));
priv->magic = IRTTY_MAGIC;
priv->tty = tty;
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 217429122e79..bdd5c979bead 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -4,7 +4,7 @@
* Version: 0.1.1
* Description: Irda KingSun/DonShine USB Dongle
* Status: Experimental
-* Author: Alex Villac�s Lasso <a_villacis@palosanto.com>
+* Author: Alex Villacís Lasso <a_villacis@palosanto.com>
*
* Based on stir4200 and mcs7780 drivers, with (strange?) differences
*
@@ -652,6 +652,6 @@ static void __exit kingsun_cleanup(void)
}
module_exit(kingsun_cleanup);
-MODULE_AUTHOR("Alex Villac�s Lasso <a_villacis@palosanto.com>");
+MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bf78ef1120ad..0538ca9ce058 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -44,6 +44,7 @@ MODULE_LICENSE("GPL");
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -1660,8 +1661,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idev = ndev->priv;
spin_lock_init(&idev->lock);
- init_MUTEX(&idev->sem);
- down(&idev->sem);
+ mutex_init(&idev->mtx);
+ mutex_lock(&idev->mtx);
idev->pdev = pdev;
if (vlsi_irda_init(ndev) < 0)
@@ -1689,12 +1690,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
pci_set_drvdata(pdev, ndev);
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
out_freedev:
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
free_netdev(ndev);
out_disable:
pci_disable_device(pdev);
@@ -1716,12 +1717,12 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
idev = ndev->priv;
- down(&idev->sem);
+ mutex_lock(&idev->mtx);
if (idev->proc_entry) {
remove_proc_entry(ndev->name, vlsi_proc_root);
idev->proc_entry = NULL;
}
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
free_netdev(ndev);
@@ -1751,7 +1752,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
idev = ndev->priv;
- down(&idev->sem);
+ mutex_lock(&idev->mtx);
if (pdev->current_state != 0) { /* already suspended */
if (state.event > pdev->current_state) { /* simply go deeper */
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1759,7 +1760,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
}
else
IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, pci_name(pdev), pdev->current_state, state.event);
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
@@ -1775,7 +1776,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
pci_set_power_state(pdev, pci_choose_state(pdev, state));
pdev->current_state = state.event;
idev->resume_ok = 1;
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
@@ -1790,9 +1791,9 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
return 0;
}
idev = ndev->priv;
- down(&idev->sem);
+ mutex_lock(&idev->mtx);
if (pdev->current_state == 0) {
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
IRDA_WARNING("%s - %s: already resumed\n",
__FUNCTION__, pci_name(pdev));
return 0;
@@ -1814,7 +1815,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
* device and independently resume_ok should catch any garbage config.
*/
IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
@@ -1824,7 +1825,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
netif_device_attach(ndev);
}
idev->resume_ok = 0;
- up(&idev->sem);
+ mutex_unlock(&idev->mtx);
return 0;
}
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 2d3b773d8e35..ca12a6096419 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -728,7 +728,7 @@ typedef struct vlsi_irda_dev {
struct timeval last_rx;
spinlock_t lock;
- struct semaphore sem;
+ struct mutex mtx;
u8 resume_ok;
struct proc_dir_entry *proc_entry;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 347d50cd77d4..0433c41f9029 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -822,10 +822,9 @@ static int veth_init_connection(u8 rlp)
|| ! HvLpConfig_doLpsCommunicateOnVirtualLan(this_lp, rlp) )
return 0;
- cnx = kmalloc(sizeof(*cnx), GFP_KERNEL);
+ cnx = kzalloc(sizeof(*cnx), GFP_KERNEL);
if (! cnx)
return -ENOMEM;
- memset(cnx, 0, sizeof(*cnx));
cnx->remote_lp = rlp;
spin_lock_init(&cnx->lock);
@@ -852,14 +851,13 @@ static int veth_init_connection(u8 rlp)
if (rc != 0)
return rc;
- msgs = kmalloc(VETH_NUMBUFFERS * sizeof(struct veth_msg), GFP_KERNEL);
+ msgs = kcalloc(VETH_NUMBUFFERS, sizeof(struct veth_msg), GFP_KERNEL);
if (! msgs) {
veth_error("Can't allocate buffers for LPAR %d.\n", rlp);
return -ENOMEM;
}
cnx->msgs = msgs;
- memset(msgs, 0, VETH_NUMBUFFERS * sizeof(struct veth_msg));
for (i = 0; i < VETH_NUMBUFFERS; i++) {
msgs[i].token = i;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5f694fc4a21..d9ce1aef148a 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -111,7 +111,7 @@ static int ixpdev_rx(struct net_device *dev, int *budget)
skb = dev_alloc_skb(desc->pkt_length + 2);
if (likely(skb != NULL)) {
skb_reserve(skb, 2);
- eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
+ skb_copy_to_linear_data(skb, buf, desc->pkt_length);
skb_put(skb, desc->pkt_length);
skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 0fe96c85828b..a4e5fab12628 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -533,11 +533,10 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
dev->base_addr = ioaddr;
/* Make certain the data structures used by the LANCE are aligned and DMAble. */
- lp = kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
if(lp==NULL)
return -ENODEV;
if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
- memset(lp, 0, sizeof(*lp));
dev->priv = lp;
lp->name = chipname;
lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
@@ -1186,9 +1185,9 @@ lance_rx(struct net_device *dev)
}
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
- pkt_len,0);
+ pkt_len);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c
new file mode 100644
index 000000000000..112778652f7d
--- /dev/null
+++ b/drivers/net/lguest_net.c
@@ -0,0 +1,354 @@
+/* A simple network driver for lguest.
+ *
+ * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+//#define DEBUG
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/mm_types.h>
+#include <linux/io.h>
+#include <linux/lguest_bus.h>
+
+#define SHARED_SIZE PAGE_SIZE
+#define MAX_LANS 4
+#define NUM_SKBS 8
+
+struct lguestnet_info
+{
+ /* The shared page(s). */
+ struct lguest_net *peer;
+ unsigned long peer_phys;
+ unsigned long mapsize;
+
+ /* The lguest_device I come from */
+ struct lguest_device *lgdev;
+
+ /* My peerid. */
+ unsigned int me;
+
+ /* Receive queue. */
+ struct sk_buff *skb[NUM_SKBS];
+ struct lguest_dma dma[NUM_SKBS];
+};
+
+/* How many bytes left in this page. */
+static unsigned int rest_of_page(void *data)
+{
+ return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
+}
+
+/* Simple convention: offset 4 * peernum. */
+static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum)
+{
+ return info->peer_phys + 4 * peernum;
+}
+
+static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen,
+ struct lguest_dma *dma)
+{
+ unsigned int i, seg;
+
+ for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) {
+ dma->addr[seg] = virt_to_phys(skb->data + i);
+ dma->len[seg] = min((unsigned)(headlen - i),
+ rest_of_page(skb->data + i));
+ }
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) {
+ const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */
+ if (seg == LGUEST_MAX_DMA_SECTIONS) {
+ printk("Woah dude! Megapacket!\n");
+ break;
+ }
+ dma->addr[seg] = page_to_phys(f->page) + f->page_offset;
+ dma->len[seg] = f->size;
+ }
+ if (seg < LGUEST_MAX_DMA_SECTIONS)
+ dma->len[seg] = 0;
+}
+
+/* We overload multicast bit to show promiscuous mode. */
+#define PROMISC_BIT 0x01
+
+static void lguestnet_set_multicast(struct net_device *dev)
+{
+ struct lguestnet_info *info = netdev_priv(dev);
+
+ if ((dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) || dev->mc_count)
+ info->peer[info->me].mac[0] |= PROMISC_BIT;
+ else
+ info->peer[info->me].mac[0] &= ~PROMISC_BIT;
+}
+
+static int promisc(struct lguestnet_info *info, unsigned int peer)
+{
+ return info->peer[peer].mac[0] & PROMISC_BIT;
+}
+
+static int mac_eq(const unsigned char mac[ETH_ALEN],
+ struct lguestnet_info *info, unsigned int peer)
+{
+ /* Ignore multicast bit, which peer turns on to mean promisc. */
+ if ((info->peer[peer].mac[0] & (~PROMISC_BIT)) != mac[0])
+ return 0;
+ return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0;
+}
+
+static void transfer_packet(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned int peernum)
+{
+ struct lguestnet_info *info = netdev_priv(dev);
+ struct lguest_dma dma;
+
+ skb_to_dma(skb, skb_headlen(skb), &dma);
+ pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len);
+
+ lguest_send_dma(peer_key(info, peernum), &dma);
+ if (dma.used_len != skb->len) {
+ dev->stats.tx_carrier_errors++;
+ pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n",
+ peernum, dma.used_len, skb->len,
+ (void *)dma.addr[0], dma.len[0]);
+ } else {
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ }
+}
+
+static int unused_peer(const struct lguest_net peer[], unsigned int num)
+{
+ return peer[num].mac[0] == 0;
+}
+
+static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int i;
+ int broadcast;
+ struct lguestnet_info *info = netdev_priv(dev);
+ const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+
+ pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]);
+
+ broadcast = is_multicast_ether_addr(dest);
+ for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) {
+ if (i == info->me || unused_peer(info->peer, i))
+ continue;
+
+ if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i))
+ continue;
+
+ pr_debug("lguestnet %s: sending from %i to %i\n",
+ dev->name, info->me, i);
+ transfer_packet(dev, skb, i);
+ }
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+/* Find a new skb to put in this slot in shared mem. */
+static int fill_slot(struct net_device *dev, unsigned int slot)
+{
+ struct lguestnet_info *info = netdev_priv(dev);
+ /* Try to create and register a new one. */
+ info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN);
+ if (!info->skb[slot]) {
+ printk("%s: could not fill slot %i\n", dev->name, slot);
+ return -ENOMEM;
+ }
+
+ skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]);
+ wmb();
+ /* Now we tell hypervisor it can use the slot. */
+ info->dma[slot].used_len = 0;
+ return 0;
+}
+
+static irqreturn_t lguestnet_rcv(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lguestnet_info *info = netdev_priv(dev);
+ unsigned int i, done = 0;
+
+ for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
+ unsigned int length;
+ struct sk_buff *skb;
+
+ length = info->dma[i].used_len;
+ if (length == 0)
+ continue;
+
+ done++;
+ skb = info->skb[i];
+ fill_slot(dev, i);
+
+ if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) {
+ pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n",
+ dev->name, length);
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, dev);
+ /* This is a reliable transport. */
+ if (dev->features & NETIF_F_NO_CSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
+ ntohs(skb->protocol), skb->len, skb->pkt_type);
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ netif_rx(skb);
+ }
+ return done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int lguestnet_open(struct net_device *dev)
+{
+ int i;
+ struct lguestnet_info *info = netdev_priv(dev);
+
+ /* Set up our MAC address */
+ memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN);
+
+ /* Turn on promisc mode if needed */
+ lguestnet_set_multicast(dev);
+
+ for (i = 0; i < ARRAY_SIZE(info->dma); i++) {
+ if (fill_slot(dev, i) != 0)
+ goto cleanup;
+ }
+ if (lguest_bind_dma(peer_key(info,info->me), info->dma,
+ NUM_SKBS, lgdev_irq(info->lgdev)) != 0)
+ goto cleanup;
+ return 0;
+
+cleanup:
+ while (--i >= 0)
+ dev_kfree_skb(info->skb[i]);
+ return -ENOMEM;
+}
+
+static int lguestnet_close(struct net_device *dev)
+{
+ unsigned int i;
+ struct lguestnet_info *info = netdev_priv(dev);
+
+ /* Clear all trace: others might deliver packets, we'll ignore it. */
+ memset(&info->peer[info->me], 0, sizeof(info->peer[info->me]));
+
+ /* Deregister sg lists. */
+ lguest_unbind_dma(peer_key(info, info->me), info->dma);
+ for (i = 0; i < ARRAY_SIZE(info->dma); i++)
+ dev_kfree_skb(info->skb[i]);
+ return 0;
+}
+
+static int lguestnet_probe(struct lguest_device *lgdev)
+{
+ int err, irqf = IRQF_SHARED;
+ struct net_device *dev;
+ struct lguestnet_info *info;
+ struct lguest_device_desc *desc = &lguest_devices[lgdev->index];
+
+ pr_debug("lguest_net: probing for device %i\n", lgdev->index);
+
+ dev = alloc_etherdev(sizeof(struct lguestnet_info));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_MODULE_OWNER(dev);
+
+ /* Ethernet defaults with some changes */
+ ether_setup(dev);
+ dev->set_mac_address = NULL;
+
+ dev->dev_addr[0] = 0x02; /* set local assignment bit (IEEE802) */
+ dev->dev_addr[1] = 0x00;
+ memcpy(&dev->dev_addr[2], &lguest_data.guestid, 2);
+ dev->dev_addr[4] = 0x00;
+ dev->dev_addr[5] = 0x00;
+
+ dev->open = lguestnet_open;
+ dev->stop = lguestnet_close;
+ dev->hard_start_xmit = lguestnet_start_xmit;
+
+ /* Turning on/off promisc will call dev->set_multicast_list.
+ * We don't actually support multicast yet */
+ dev->set_multicast_list = lguestnet_set_multicast;
+ SET_NETDEV_DEV(dev, &lgdev->dev);
+ if (desc->features & LGUEST_NET_F_NOCSUM)
+ dev->features = NETIF_F_SG|NETIF_F_NO_CSUM;
+
+ info = netdev_priv(dev);
+ info->mapsize = PAGE_SIZE * desc->num_pages;
+ info->peer_phys = ((unsigned long)desc->pfn << PAGE_SHIFT);
+ info->lgdev = lgdev;
+ info->peer = lguest_map(info->peer_phys, desc->num_pages);
+ if (!info->peer) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ /* This stores our peerid (upper bits reserved for future). */
+ info->me = (desc->features & (info->mapsize-1));
+
+ err = register_netdev(dev);
+ if (err) {
+ pr_debug("lguestnet: registering device failed\n");
+ goto unmap;
+ }
+
+ if (lguest_devices[lgdev->index].features & LGUEST_DEVICE_F_RANDOMNESS)
+ irqf |= IRQF_SAMPLE_RANDOM;
+ if (request_irq(lgdev_irq(lgdev), lguestnet_rcv, irqf, "lguestnet",
+ dev) != 0) {
+ pr_debug("lguestnet: cannot get irq %i\n", lgdev_irq(lgdev));
+ goto unregister;
+ }
+
+ pr_debug("lguestnet: registered device %s\n", dev->name);
+ lgdev->private = dev;
+ return 0;
+
+unregister:
+ unregister_netdev(dev);
+unmap:
+ lguest_unmap(info->peer);
+free:
+ free_netdev(dev);
+ return err;
+}
+
+static struct lguest_driver lguestnet_drv = {
+ .name = "lguestnet",
+ .owner = THIS_MODULE,
+ .device_type = LGUEST_DEVICE_T_NET,
+ .probe = lguestnet_probe,
+};
+
+static __init int lguestnet_init(void)
+{
+ return register_lguest_driver(&lguestnet_drv);
+}
+module_init(lguestnet_init);
+
+MODULE_DESCRIPTION("Lguest network driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 26a3b45a4a34..62c1c6262feb 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -608,7 +608,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
MODULE_LICENSE("GPL");
-int
+int __init
init_module(void)
{
net_debug = debug;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 0e04f7ac3f2e..a4bb0264180a 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -17,13 +17,12 @@
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/mii.h>
-#include <linux/mutex.h>
#include <linux/dma-mapping.h>
-#include <linux/ethtool.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <asm/arch/board.h>
+#include <asm/arch/cpu.h>
#include "macb.h"
@@ -85,172 +84,202 @@ static void __init macb_get_hwaddr(struct macb *bp)
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
}
-static void macb_enable_mdio(struct macb *bp)
+static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&bp->lock, flags);
- reg = macb_readl(bp, NCR);
- reg |= MACB_BIT(MPE);
- macb_writel(bp, NCR, reg);
- macb_writel(bp, IER, MACB_BIT(MFD));
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static void macb_disable_mdio(struct macb *bp)
-{
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&bp->lock, flags);
- reg = macb_readl(bp, NCR);
- reg &= ~MACB_BIT(MPE);
- macb_writel(bp, NCR, reg);
- macb_writel(bp, IDR, MACB_BIT(MFD));
- spin_unlock_irqrestore(&bp->lock, flags);
-}
-
-static int macb_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- struct macb *bp = netdev_priv(dev);
+ struct macb *bp = bus->priv;
int value;
- mutex_lock(&bp->mdio_mutex);
-
- macb_enable_mdio(bp);
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
| MACB_BF(RW, MACB_MAN_READ)
- | MACB_BF(PHYA, phy_id)
- | MACB_BF(REGA, location)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
| MACB_BF(CODE, MACB_MAN_CODE)));
- wait_for_completion(&bp->mdio_complete);
+ /* wait for end of transfer */
+ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+ cpu_relax();
value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
- macb_disable_mdio(bp);
- mutex_unlock(&bp->mdio_mutex);
return value;
}
-static void macb_mdio_write(struct net_device *dev, int phy_id,
- int location, int val)
+static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
- struct macb *bp = netdev_priv(dev);
-
- dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n",
- phy_id, location, val);
-
- mutex_lock(&bp->mdio_mutex);
- macb_enable_mdio(bp);
+ struct macb *bp = bus->priv;
macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
| MACB_BF(RW, MACB_MAN_WRITE)
- | MACB_BF(PHYA, phy_id)
- | MACB_BF(REGA, location)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
| MACB_BF(CODE, MACB_MAN_CODE)
- | MACB_BF(DATA, val)));
+ | MACB_BF(DATA, value)));
- wait_for_completion(&bp->mdio_complete);
+ /* wait for end of transfer */
+ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+ cpu_relax();
- macb_disable_mdio(bp);
- mutex_unlock(&bp->mdio_mutex);
+ return 0;
}
-static int macb_phy_probe(struct macb *bp)
+static int macb_mdio_reset(struct mii_bus *bus)
{
- int phy_address;
- u16 phyid1, phyid2;
+ return 0;
+}
- for (phy_address = 0; phy_address < 32; phy_address++) {
- phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1);
- phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2);
+static void macb_handle_link_change(struct net_device *dev)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+ unsigned long flags;
- if (phyid1 != 0xffff && phyid1 != 0x0000
- && phyid2 != 0xffff && phyid2 != 0x0000)
- break;
+ int status_change = 0;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ if (phydev->link) {
+ if ((bp->speed != phydev->speed) ||
+ (bp->duplex != phydev->duplex)) {
+ u32 reg;
+
+ reg = macb_readl(bp, NCFGR);
+ reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+
+ if (phydev->duplex)
+ reg |= MACB_BIT(FD);
+ if (phydev->speed)
+ reg |= MACB_BIT(SPD);
+
+ macb_writel(bp, NCFGR, reg);
+
+ bp->speed = phydev->speed;
+ bp->duplex = phydev->duplex;
+ status_change = 1;
+ }
}
- if (phy_address == 32)
- return -ENODEV;
+ if (phydev->link != bp->link) {
+ if (phydev->link)
+ netif_schedule(dev);
+ else {
+ bp->speed = 0;
+ bp->duplex = -1;
+ }
+ bp->link = phydev->link;
- dev_info(&bp->pdev->dev,
- "detected PHY at address %d (ID %04x:%04x)\n",
- phy_address, phyid1, phyid2);
+ status_change = 1;
+ }
- bp->mii.phy_id = phy_address;
- return 0;
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ printk(KERN_INFO "%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full":"Half");
+ else
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
}
-static void macb_set_media(struct macb *bp, int media)
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct net_device *dev)
{
- u32 reg;
+ struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ struct eth_platform_data *pdata;
+ int phy_addr;
- spin_lock_irq(&bp->lock);
- reg = macb_readl(bp, NCFGR);
- reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
- if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL))
- reg |= MACB_BIT(SPD);
- if (media & ADVERTISE_FULL)
- reg |= MACB_BIT(FD);
- macb_writel(bp, NCFGR, reg);
- spin_unlock_irq(&bp->lock);
+ /* find the first phy */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (bp->mii_bus.phy_map[phy_addr]) {
+ phydev = bp->mii_bus.phy_map[phy_addr];
+ break;
+ }
+ }
+
+ if (!phydev) {
+ printk (KERN_ERR "%s: no PHY found\n", dev->name);
+ return -1;
+ }
+
+ pdata = bp->pdev->dev.platform_data;
+ /* TODO : add pin_irq */
+
+ /* attach the mac to the phy */
+ if (pdata && pdata->is_rmii) {
+ phydev = phy_connect(dev, phydev->dev.bus_id,
+ &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
+ } else {
+ phydev = phy_connect(dev, phydev->dev.bus_id,
+ &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
+ }
+
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= PHY_BASIC_FEATURES;
+
+ phydev->advertising = phydev->supported;
+
+ bp->link = 0;
+ bp->speed = 0;
+ bp->duplex = -1;
+ bp->phy_dev = phydev;
+
+ return 0;
}
-static void macb_check_media(struct macb *bp, int ok_to_print, int init_media)
+static int macb_mii_init(struct macb *bp)
{
- struct mii_if_info *mii = &bp->mii;
- unsigned int old_carrier, new_carrier;
- int advertise, lpa, media, duplex;
+ struct eth_platform_data *pdata;
+ int err = -ENXIO, i;
- /* if forced media, go no further */
- if (mii->force_media)
- return;
+ /* Enable managment port */
+ macb_writel(bp, NCR, MACB_BIT(MPE));
- /* check current and old link status */
- old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
- new_carrier = (unsigned int) mii_link_ok(mii);
+ bp->mii_bus.name = "MACB_mii_bus",
+ bp->mii_bus.read = &macb_mdio_read,
+ bp->mii_bus.write = &macb_mdio_write,
+ bp->mii_bus.reset = &macb_mdio_reset,
+ bp->mii_bus.id = bp->pdev->id,
+ bp->mii_bus.priv = bp,
+ bp->mii_bus.dev = &bp->dev->dev;
+ pdata = bp->pdev->dev.platform_data;
- /* if carrier state did not change, assume nothing else did */
- if (!init_media && old_carrier == new_carrier)
- return;
+ if (pdata)
+ bp->mii_bus.phy_mask = pdata->phy_mask;
- /* no carrier, nothing much to do */
- if (!new_carrier) {
- netif_carrier_off(mii->dev);
- printk(KERN_INFO "%s: link down\n", mii->dev->name);
- return;
+ bp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bp->mii_bus.irq) {
+ err = -ENOMEM;
+ goto err_out;
}
- /*
- * we have carrier, see who's on the other end
- */
- netif_carrier_on(mii->dev);
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus.irq[i] = PHY_POLL;
- /* get MII advertise and LPA values */
- if (!init_media && mii->advertising) {
- advertise = mii->advertising;
- } else {
- advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
- mii->advertising = advertise;
- }
- lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+ platform_set_drvdata(bp->dev, &bp->mii_bus);
- /* figure out media and duplex from advertise and LPA values */
- media = mii_nway_result(lpa & advertise);
- duplex = (media & ADVERTISE_FULL) ? 1 : 0;
+ if (mdiobus_register(&bp->mii_bus))
+ goto err_out_free_mdio_irq;
- if (ok_to_print)
- printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n",
- mii->dev->name,
- media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10",
- duplex ? "full" : "half", lpa);
+ if (macb_mii_probe(bp->dev) != 0) {
+ goto err_out_unregister_bus;
+ }
- mii->full_duplex = duplex;
+ return 0;
- /* Let the MAC know about the new link state */
- macb_set_media(bp, media);
+err_out_unregister_bus:
+ mdiobus_unregister(&bp->mii_bus);
+err_out_free_mdio_irq:
+ kfree(bp->mii_bus.irq);
+err_out:
+ return err;
}
static void macb_update_stats(struct macb *bp)
@@ -265,16 +294,6 @@ static void macb_update_stats(struct macb *bp)
*p += __raw_readl(reg);
}
-static void macb_periodic_task(struct work_struct *work)
-{
- struct macb *bp = container_of(work, struct macb, periodic_task.work);
-
- macb_update_stats(bp);
- macb_check_media(bp, 1, 0);
-
- schedule_delayed_work(&bp->periodic_task, HZ);
-}
-
static void macb_tx(struct macb *bp)
{
unsigned int tail;
@@ -519,9 +538,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
spin_lock(&bp->lock);
while (status) {
- if (status & MACB_BIT(MFD))
- complete(&bp->mdio_complete);
-
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
macb_writel(bp, IDR, ~0UL);
@@ -535,7 +551,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* until we have processed the buffers
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
- dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n");
+ dev_dbg(&bp->pdev->dev,
+ "scheduling RX softirq\n");
__netif_rx_schedule(dev);
}
}
@@ -765,7 +782,7 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, TBQP, bp->tx_ring_dma);
/* Enable TX and RX */
- macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
/* Enable interrupts */
macb_writel(bp, IER, (MACB_BIT(RCOMP)
@@ -776,18 +793,126 @@ static void macb_init_hw(struct macb *bp)
| MACB_BIT(TCOMP)
| MACB_BIT(ISR_ROVR)
| MACB_BIT(HRESP)));
+
}
-static void macb_init_phy(struct net_device *dev)
+/*
+ * The hash address register is 64 bits long and takes up two
+ * locations in the memory map. The least significant bits are stored
+ * in EMAC_HSL and the most significant bits in EMAC_HSH.
+ *
+ * The unicast hash enable and the multicast hash enable bits in the
+ * network configuration register enable the reception of hash matched
+ * frames. The destination address is reduced to a 6 bit index into
+ * the 64 bit hash register using the following hash function. The
+ * hash function is an exclusive or of every sixth bit of the
+ * destination address.
+ *
+ * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
+ * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
+ * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
+ * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
+ * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
+ * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
+ *
+ * da[0] represents the least significant bit of the first byte
+ * received, that is, the multicast/unicast indicator, and da[47]
+ * represents the most significant bit of the last byte received. If
+ * the hash index, hi[n], points to a bit that is set in the hash
+ * register then the frame will be matched according to whether the
+ * frame is multicast or unicast. A multicast match will be signalled
+ * if the multicast hash enable bit is set, da[0] is 1 and the hash
+ * index points to a bit set in the hash register. A unicast match
+ * will be signalled if the unicast hash enable bit is set, da[0] is 0
+ * and the hash index points to a bit set in the hash register. To
+ * receive all multicast frames, the hash register should be set with
+ * all ones and the multicast hash enable bit should be set in the
+ * network configuration register.
+ */
+
+static inline int hash_bit_value(int bitnr, __u8 *addr)
{
+ if (addr[bitnr / 8] & (1 << (bitnr % 8)))
+ return 1;
+ return 0;
+}
+
+/*
+ * Return the hash index value for the specified address.
+ */
+static int hash_get_index(__u8 *addr)
+{
+ int i, j, bitval;
+ int hash_index = 0;
+
+ for (j = 0; j < 6; j++) {
+ for (i = 0, bitval = 0; i < 8; i++)
+ bitval ^= hash_bit_value(i*6 + j, addr);
+
+ hash_index |= (bitval << j);
+ }
+
+ return hash_index;
+}
+
+/*
+ * Add multicast addresses to the internal multicast-hash table.
+ */
+static void macb_sethashtable(struct net_device *dev)
+{
+ struct dev_mc_list *curr;
+ unsigned long mc_filter[2];
+ unsigned int i, bitnr;
+ struct macb *bp = netdev_priv(dev);
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ curr = dev->mc_list;
+ for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ if (!curr) break; /* unexpected end of list */
+
+ bitnr = hash_get_index(curr->dmi_addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ macb_writel(bp, HRB, mc_filter[0]);
+ macb_writel(bp, HRT, mc_filter[1]);
+}
+
+/*
+ * Enable/Disable promiscuous and multicast modes.
+ */
+static void macb_set_rx_mode(struct net_device *dev)
+{
+ unsigned long cfg;
struct macb *bp = netdev_priv(dev);
- /* Set some reasonable default settings */
- macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE,
- ADVERTISE_CSMA | ADVERTISE_ALL);
- macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR,
- (BMCR_SPEED100 | BMCR_ANENABLE
- | BMCR_ANRESTART | BMCR_FULLDPLX));
+ cfg = macb_readl(bp, NCFGR);
+
+ if (dev->flags & IFF_PROMISC)
+ /* Enable promiscuous mode */
+ cfg |= MACB_BIT(CAF);
+ else if (dev->flags & (~IFF_PROMISC))
+ /* Disable promiscuous mode */
+ cfg &= ~MACB_BIT(CAF);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Enable all multicast mode */
+ macb_writel(bp, HRB, -1);
+ macb_writel(bp, HRT, -1);
+ cfg |= MACB_BIT(NCFGR_MTI);
+ } else if (dev->mc_count > 0) {
+ /* Enable specific multicasts */
+ macb_sethashtable(dev);
+ cfg |= MACB_BIT(NCFGR_MTI);
+ } else if (dev->flags & (~IFF_ALLMULTI)) {
+ /* Disable all multicast mode */
+ macb_writel(bp, HRB, 0);
+ macb_writel(bp, HRT, 0);
+ cfg &= ~MACB_BIT(NCFGR_MTI);
+ }
+
+ macb_writel(bp, NCFGR, cfg);
}
static int macb_open(struct net_device *dev)
@@ -797,6 +922,10 @@ static int macb_open(struct net_device *dev)
dev_dbg(&bp->pdev->dev, "open\n");
+ /* if the phy is not yet register, retry later*/
+ if (!bp->phy_dev)
+ return -EAGAIN;
+
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
@@ -810,12 +939,11 @@ static int macb_open(struct net_device *dev)
macb_init_rings(bp);
macb_init_hw(bp);
- macb_init_phy(dev);
- macb_check_media(bp, 1, 1);
- netif_start_queue(dev);
+ /* schedule a link state check */
+ phy_start(bp->phy_dev);
- schedule_delayed_work(&bp->periodic_task, HZ);
+ netif_start_queue(dev);
return 0;
}
@@ -825,10 +953,11 @@ static int macb_close(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
unsigned long flags;
- cancel_rearming_delayed_work(&bp->periodic_task);
-
netif_stop_queue(dev);
+ if (bp->phy_dev)
+ phy_stop(bp->phy_dev);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -845,6 +974,9 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
struct net_device_stats *nstat = &bp->stats;
struct macb_stats *hwstat = &bp->hw_stats;
+ /* read stats from hardware */
+ macb_update_stats(bp);
+
/* Convert HW stats into netdevice stats */
nstat->rx_errors = (hwstat->rx_fcs_errors +
hwstat->rx_align_errors +
@@ -882,18 +1014,27 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
- return mii_ethtool_gset(&bp->mii, cmd);
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
}
static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
- return mii_ethtool_sset(&bp->mii, cmd);
+ return phy_ethtool_sset(phydev, cmd);
}
-static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void macb_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct macb *bp = netdev_priv(dev);
@@ -902,104 +1043,34 @@ static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *inf
strcpy(info->bus_info, bp->pdev->dev.bus_id);
}
-static int macb_nway_reset(struct net_device *dev)
-{
- struct macb *bp = netdev_priv(dev);
- return mii_nway_restart(&bp->mii);
-}
-
static struct ethtool_ops macb_ethtool_ops = {
.get_settings = macb_get_settings,
.set_settings = macb_set_settings,
.get_drvinfo = macb_get_drvinfo,
- .nway_reset = macb_nway_reset,
.get_link = ethtool_op_get_link,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct macb *bp = netdev_priv(dev);
+ struct phy_device *phydev = bp->phy_dev;
if (!netif_running(dev))
return -EINVAL;
- return generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL);
-}
-
-static ssize_t macb_mii_show(const struct device *_dev, char *buf,
- unsigned long addr)
-{
- struct net_device *dev = to_net_dev(_dev);
- struct macb *bp = netdev_priv(dev);
- ssize_t ret = -EINVAL;
-
- if (netif_running(dev)) {
- int value;
- value = macb_mdio_read(dev, bp->mii.phy_id, addr);
- ret = sprintf(buf, "0x%04x\n", (uint16_t)value);
- }
-
- return ret;
-}
-
-#define MII_ENTRY(name, addr) \
-static ssize_t show_##name(struct device *_dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- return macb_mii_show(_dev, buf, addr); \
-} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-
-MII_ENTRY(bmcr, MII_BMCR);
-MII_ENTRY(bmsr, MII_BMSR);
-MII_ENTRY(physid1, MII_PHYSID1);
-MII_ENTRY(physid2, MII_PHYSID2);
-MII_ENTRY(advertise, MII_ADVERTISE);
-MII_ENTRY(lpa, MII_LPA);
-MII_ENTRY(expansion, MII_EXPANSION);
-
-static struct attribute *macb_mii_attrs[] = {
- &dev_attr_bmcr.attr,
- &dev_attr_bmsr.attr,
- &dev_attr_physid1.attr,
- &dev_attr_physid2.attr,
- &dev_attr_advertise.attr,
- &dev_attr_lpa.attr,
- &dev_attr_expansion.attr,
- NULL,
-};
-
-static struct attribute_group macb_mii_group = {
- .name = "mii",
- .attrs = macb_mii_attrs,
-};
-
-static void macb_unregister_sysfs(struct net_device *net)
-{
- struct device *_dev = &net->dev;
+ if (!phydev)
+ return -ENODEV;
- sysfs_remove_group(&_dev->kobj, &macb_mii_group);
+ return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-static int macb_register_sysfs(struct net_device *net)
-{
- struct device *_dev = &net->dev;
- int ret;
-
- ret = sysfs_create_group(&_dev->kobj, &macb_mii_group);
- if (ret)
- printk(KERN_WARNING
- "%s: sysfs mii attribute registration failed: %d\n",
- net->name, ret);
- return ret;
-}
static int __devinit macb_probe(struct platform_device *pdev)
{
struct eth_platform_data *pdata;
struct resource *regs;
struct net_device *dev;
struct macb *bp;
+ struct phy_device *phydev;
unsigned long pclk_hz;
u32 config;
int err = -ENXIO;
@@ -1073,6 +1144,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
dev->stop = macb_close;
dev->hard_start_xmit = macb_start_xmit;
dev->get_stats = macb_get_stats;
+ dev->set_multicast_list = macb_set_rx_mode;
dev->do_ioctl = macb_ioctl;
dev->poll = macb_poll;
dev->weight = 64;
@@ -1080,10 +1152,6 @@ static int __devinit macb_probe(struct platform_device *pdev)
dev->base_addr = regs->start;
- INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task);
- mutex_init(&bp->mdio_mutex);
- init_completion(&bp->mdio_complete);
-
/* Set MII management clock divider */
pclk_hz = clk_get_rate(bp->pclk);
if (pclk_hz <= 20000000)
@@ -1096,20 +1164,9 @@ static int __devinit macb_probe(struct platform_device *pdev)
config = MACB_BF(CLK, MACB_CLK_DIV64);
macb_writel(bp, NCFGR, config);
- bp->mii.dev = dev;
- bp->mii.mdio_read = macb_mdio_read;
- bp->mii.mdio_write = macb_mdio_write;
- bp->mii.phy_id_mask = 0x1f;
- bp->mii.reg_num_mask = 0x1f;
-
macb_get_hwaddr(bp);
- err = macb_phy_probe(bp);
- if (err) {
- dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n");
- goto err_out_free_irq;
- }
-
pdata = pdev->dev.platform_data;
+
if (pdata && pdata->is_rmii)
#if defined(CONFIG_ARCH_AT91)
macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
@@ -1131,9 +1188,11 @@ static int __devinit macb_probe(struct platform_device *pdev)
goto err_out_free_irq;
}
- platform_set_drvdata(pdev, dev);
+ if (macb_mii_init(bp) != 0) {
+ goto err_out_unregister_netdev;
+ }
- macb_register_sysfs(dev);
+ platform_set_drvdata(pdev, dev);
printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d "
"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
@@ -1141,8 +1200,15 @@ static int __devinit macb_probe(struct platform_device *pdev)
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ phydev = bp->phy_dev;
+ printk(KERN_INFO "%s: attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
+ dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
+
return 0;
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_free_irq:
free_irq(dev->irq, dev);
err_out_iounmap:
@@ -1153,7 +1219,9 @@ err_out_disable_clocks:
clk_put(bp->hclk);
#endif
clk_disable(bp->pclk);
+#ifndef CONFIG_ARCH_AT91
err_out_put_pclk:
+#endif
clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
@@ -1171,7 +1239,8 @@ static int __devexit macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- macb_unregister_sysfs(dev);
+ mdiobus_unregister(&bp->mii_bus);
+ kfree(bp->mii_bus.irq);
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
index b3bb2182edd1..4e3283ebd97c 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/macb.h
@@ -383,11 +383,11 @@ struct macb {
unsigned int rx_pending, tx_pending;
- struct delayed_work periodic_task;
-
- struct mutex mdio_mutex;
- struct completion mdio_complete;
- struct mii_if_info mii;
+ struct mii_bus mii_bus;
+ struct phy_device *phy_dev;
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
};
#endif /* _MACB_H */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
new file mode 100644
index 000000000000..dc74d006e01f
--- /dev/null
+++ b/drivers/net/macvlan.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * The code this is based on carried the following copyright notice:
+ * ---
+ * (C) Copyright 2001-2006
+ * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
+ * Re-worked by Ben Greear <greearb@candelatech.com>
+ * ---
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_arp.h>
+#include <linux/if_link.h>
+#include <linux/if_macvlan.h>
+#include <net/rtnetlink.h>
+
+#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct macvlan_port {
+ struct net_device *dev;
+ struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
+ struct list_head vlans;
+};
+
+struct macvlan_dev {
+ struct net_device *dev;
+ struct list_head list;
+ struct hlist_node hlist;
+ struct macvlan_port *port;
+ struct net_device *lowerdev;
+};
+
+
+static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
+ const unsigned char *addr)
+{
+ struct macvlan_dev *vlan;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
+ if (!compare_ether_addr(vlan->dev->dev_addr, addr))
+ return vlan;
+ }
+ return NULL;
+}
+
+static void macvlan_broadcast(struct sk_buff *skb,
+ const struct macvlan_port *port)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+ const struct macvlan_dev *vlan;
+ struct hlist_node *n;
+ struct net_device *dev;
+ struct sk_buff *nskb;
+ unsigned int i;
+
+ for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
+ hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
+ dev = vlan->dev;
+ if (unlikely(!(dev->flags & IFF_UP)))
+ continue;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (nskb == NULL) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ dev->stats.rx_bytes += skb->len + ETH_HLEN;
+ dev->stats.rx_packets++;
+ dev->stats.multicast++;
+ dev->last_rx = jiffies;
+
+ nskb->dev = dev;
+ if (!compare_ether_addr(eth->h_dest, dev->broadcast))
+ nskb->pkt_type = PACKET_BROADCAST;
+ else
+ nskb->pkt_type = PACKET_MULTICAST;
+
+ netif_rx(nskb);
+ }
+ }
+}
+
+/* called under rcu_read_lock() from netif_receive_skb */
+static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+ const struct macvlan_port *port;
+ const struct macvlan_dev *vlan;
+ struct net_device *dev;
+
+ port = rcu_dereference(skb->dev->macvlan_port);
+ if (port == NULL)
+ return skb;
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ macvlan_broadcast(skb, port);
+ return skb;
+ }
+
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (vlan == NULL)
+ return skb;
+
+ dev = vlan->dev;
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == NULL) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_dropped++;
+ return NULL;
+ }
+
+ dev->stats.rx_bytes += skb->len + ETH_HLEN;
+ dev->stats.rx_packets++;
+ dev->last_rx = jiffies;
+
+ skb->dev = dev;
+ skb->pkt_type = PACKET_HOST;
+
+ netif_rx(skb);
+ return NULL;
+}
+
+static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct macvlan_dev *vlan = netdev_priv(dev);
+ unsigned int len = skb->len;
+ int ret;
+
+ skb->dev = vlan->lowerdev;
+ ret = dev_queue_xmit(skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+ } else {
+ dev->stats.tx_errors++;
+ dev->stats.tx_aborted_errors++;
+ }
+ return NETDEV_TX_OK;
+}
+
+static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, void *daddr, void *saddr,
+ unsigned len)
+{
+ const struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+ return lowerdev->hard_header(skb, lowerdev, type, daddr,
+ saddr ? : dev->dev_addr, len);
+}
+
+static int macvlan_open(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_port *port = vlan->port;
+ struct net_device *lowerdev = vlan->lowerdev;
+ int err;
+
+ err = dev_unicast_add(lowerdev, dev->dev_addr, ETH_ALEN);
+ if (err < 0)
+ return err;
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, 1);
+
+ hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[dev->dev_addr[5]]);
+ return 0;
+}
+
+static int macvlan_stop(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+ dev_mc_unsync(lowerdev, dev);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, -1);
+
+ dev_unicast_delete(lowerdev, dev->dev_addr, ETH_ALEN);
+
+ hlist_del_rcu(&vlan->hlist);
+ synchronize_rcu();
+ return 0;
+}
+
+static void macvlan_change_rx_flags(struct net_device *dev, int change)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+}
+
+static void macvlan_set_multicast_list(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ dev_mc_sync(vlan->lowerdev, dev);
+}
+
+static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * macvlan network devices have devices nesting below it and are a special
+ * "super class" of normal network devices; split their locks off into a
+ * separate class since they always nest.
+ */
+static struct lock_class_key macvlan_netdev_xmit_lock_key;
+
+#define MACVLAN_FEATURES \
+ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
+#define MACVLAN_STATE_MASK \
+ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+
+static int macvlan_init(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ const struct net_device *lowerdev = vlan->lowerdev;
+
+ dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
+ (lowerdev->state & MACVLAN_STATE_MASK);
+ dev->features = lowerdev->features & MACVLAN_FEATURES;
+ dev->iflink = lowerdev->ifindex;
+
+ lockdep_set_class(&dev->_xmit_lock, &macvlan_netdev_xmit_lock_key);
+ return 0;
+}
+
+static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ snprintf(drvinfo->driver, 32, "macvlan");
+ snprintf(drvinfo->version, 32, "0.1");
+}
+
+static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
+{
+ const struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+ if (lowerdev->ethtool_ops->get_rx_csum == NULL)
+ return 0;
+ return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
+}
+
+static const struct ethtool_ops macvlan_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = macvlan_ethtool_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_tso = ethtool_op_get_tso,
+ .get_ufo = ethtool_op_get_ufo,
+ .get_sg = ethtool_op_get_sg,
+ .get_drvinfo = macvlan_ethtool_get_drvinfo,
+};
+
+static void macvlan_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->init = macvlan_init;
+ dev->open = macvlan_open;
+ dev->stop = macvlan_stop;
+ dev->change_mtu = macvlan_change_mtu;
+ dev->change_rx_flags = macvlan_change_rx_flags;
+ dev->set_multicast_list = macvlan_set_multicast_list;
+ dev->hard_header = macvlan_hard_header;
+ dev->hard_start_xmit = macvlan_hard_start_xmit;
+ dev->destructor = free_netdev;
+ dev->ethtool_ops = &macvlan_ethtool_ops;
+ dev->tx_queue_len = 0;
+}
+
+static int macvlan_port_create(struct net_device *dev)
+{
+ struct macvlan_port *port;
+ unsigned int i;
+
+ if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
+ return -EINVAL;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (port == NULL)
+ return -ENOMEM;
+
+ port->dev = dev;
+ INIT_LIST_HEAD(&port->vlans);
+ for (i = 0; i < MACVLAN_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&port->vlan_hash[i]);
+ rcu_assign_pointer(dev->macvlan_port, port);
+ return 0;
+}
+
+static void macvlan_port_destroy(struct net_device *dev)
+{
+ struct macvlan_port *port = dev->macvlan_port;
+
+ rcu_assign_pointer(dev->macvlan_port, NULL);
+ synchronize_rcu();
+ kfree(port);
+}
+
+static void macvlan_transfer_operstate(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ const struct net_device *lowerdev = vlan->lowerdev;
+
+ if (lowerdev->operstate == IF_OPER_DORMANT)
+ netif_dormant_on(dev);
+ else
+ netif_dormant_off(dev);
+
+ if (netif_carrier_ok(lowerdev)) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else {
+ if (netif_carrier_ok(lowerdev))
+ netif_carrier_off(dev);
+ }
+}
+
+static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static int macvlan_newlink(struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_port *port;
+ struct net_device *lowerdev;
+ int err;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ lowerdev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK]));
+ if (lowerdev == NULL)
+ return -ENODEV;
+
+ if (!tb[IFLA_MTU])
+ dev->mtu = lowerdev->mtu;
+ else if (dev->mtu > lowerdev->mtu)
+ return -EINVAL;
+
+ if (!tb[IFLA_ADDRESS])
+ random_ether_addr(dev->dev_addr);
+
+ if (lowerdev->macvlan_port == NULL) {
+ err = macvlan_port_create(lowerdev);
+ if (err < 0)
+ return err;
+ }
+ port = lowerdev->macvlan_port;
+
+ vlan->lowerdev = lowerdev;
+ vlan->dev = dev;
+ vlan->port = port;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ return err;
+
+ list_add_tail(&vlan->list, &port->vlans);
+ macvlan_transfer_operstate(dev);
+ return 0;
+}
+
+static void macvlan_dellink(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_port *port = vlan->port;
+
+ list_del(&vlan->list);
+ unregister_netdevice(dev);
+
+ if (list_empty(&port->vlans))
+ macvlan_port_destroy(dev);
+}
+
+static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
+ .kind = "macvlan",
+ .priv_size = sizeof(struct macvlan_dev),
+ .setup = macvlan_setup,
+ .validate = macvlan_validate,
+ .newlink = macvlan_newlink,
+ .dellink = macvlan_dellink,
+};
+
+static int macvlan_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct macvlan_dev *vlan, *next;
+ struct macvlan_port *port;
+
+ port = dev->macvlan_port;
+ if (port == NULL)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGE:
+ list_for_each_entry(vlan, &port->vlans, list)
+ macvlan_transfer_operstate(vlan->dev);
+ break;
+ case NETDEV_FEAT_CHANGE:
+ list_for_each_entry(vlan, &port->vlans, list) {
+ vlan->dev->features = dev->features & MACVLAN_FEATURES;
+ netdev_features_change(vlan->dev);
+ }
+ break;
+ case NETDEV_UNREGISTER:
+ list_for_each_entry_safe(vlan, next, &port->vlans, list)
+ macvlan_dellink(vlan->dev);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block macvlan_notifier_block __read_mostly = {
+ .notifier_call = macvlan_device_event,
+};
+
+static int __init macvlan_init_module(void)
+{
+ int err;
+
+ register_netdevice_notifier(&macvlan_notifier_block);
+ macvlan_handle_frame_hook = macvlan_handle_frame;
+
+ err = rtnl_link_register(&macvlan_link_ops);
+ if (err < 0)
+ goto err1;
+ return 0;
+err1:
+ macvlan_handle_frame_hook = macvlan_handle_frame;
+ unregister_netdevice_notifier(&macvlan_notifier_block);
+ return err;
+}
+
+static void __exit macvlan_cleanup_module(void)
+{
+ rtnl_link_unregister(&macvlan_link_ops);
+ macvlan_handle_frame_hook = NULL;
+ unregister_netdevice_notifier(&macvlan_notifier_block);
+}
+
+module_init(macvlan_init_module);
+module_exit(macvlan_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Driver for MAC address based VLANs");
+MODULE_ALIAS_RTNL_LINK("macvlan");
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 1bb088aeaf71..6b32ec94b3a8 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -30,41 +30,133 @@
* SOFTWARE.
*/
+#include <linux/workqueue.h>
+
#include "mlx4.h"
-void mlx4_handle_catas_err(struct mlx4_dev *dev)
+enum {
+ MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
+};
+
+static DEFINE_SPINLOCK(catas_lock);
+
+static LIST_HEAD(catas_list);
+static struct workqueue_struct *catas_wq;
+static struct work_struct catas_work;
+
+static int internal_err_reset = 1;
+module_param(internal_err_reset, int, 0644);
+MODULE_PARM_DESC(internal_err_reset,
+ "Reset device on internal errors if non-zero (default 1)");
+
+static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- mlx4_err(dev, "Catastrophic error detected:\n");
+ mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
+}
- mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+static void poll_catas(unsigned long dev_ptr)
+{
+ struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (readl(priv->catas_err.map)) {
+ dump_err_buf(dev);
+
+ mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+
+ if (internal_err_reset) {
+ spin_lock(&catas_lock);
+ list_add(&priv->catas_err.list, &catas_list);
+ spin_unlock(&catas_lock);
+
+ queue_work(catas_wq, &catas_work);
+ }
+ } else
+ mod_timer(&priv->catas_err.timer,
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
}
-void mlx4_map_catas_buf(struct mlx4_dev *dev)
+static void catas_reset(struct work_struct *work)
+{
+ struct mlx4_priv *priv, *tmppriv;
+ struct mlx4_dev *dev;
+
+ LIST_HEAD(tlist);
+ int ret;
+
+ spin_lock_irq(&catas_lock);
+ list_splice_init(&catas_list, &tlist);
+ spin_unlock_irq(&catas_lock);
+
+ list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
+ ret = mlx4_restart_one(priv->dev.pdev);
+ dev = &priv->dev;
+ if (ret)
+ mlx4_err(dev, "Reset failed (%d)\n", ret);
+ else
+ mlx4_dbg(dev, "Reset succeeded\n");
+ }
+}
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long addr;
+ INIT_LIST_HEAD(&priv->catas_err.list);
+ init_timer(&priv->catas_err.timer);
+ priv->catas_err.map = NULL;
+
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
- if (!priv->catas_err.map)
- mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
+ if (!priv->catas_err.map) {
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
addr);
+ return;
+ }
+ priv->catas_err.timer.data = (unsigned long) dev;
+ priv->catas_err.timer.function = poll_catas;
+ priv->catas_err.timer.expires =
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+ add_timer(&priv->catas_err.timer);
}
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
+void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ del_timer_sync(&priv->catas_err.timer);
+
if (priv->catas_err.map)
iounmap(priv->catas_err.map);
+
+ spin_lock_irq(&catas_lock);
+ list_del(&priv->catas_err.list);
+ spin_unlock_irq(&catas_lock);
+}
+
+int __init mlx4_catas_init(void)
+{
+ INIT_WORK(&catas_work, catas_reset);
+
+ catas_wq = create_singlethread_workqueue("mlx4_err");
+ if (!catas_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_catas_cleanup(void)
+{
+ destroy_workqueue(catas_wq);
}
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index c1f81a993f5d..a9f31753661a 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -246,8 +246,6 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
- context->token += priv->cmd.token_mask + 1;
-
complete(&context->done);
}
@@ -264,6 +262,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
spin_lock(&cmd->context_lock);
BUG_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 27a82cecd693..2095c843fa15 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -89,14 +89,12 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
- (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD))
-#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
struct mlx4_eqe {
u8 reserved1;
@@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
@@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
-static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
-{
- mlx4_handle_catas_err(dev_ptr);
-
- /* MSI-X vectors always belong to us */
- return IRQ_HANDLED;
-}
-
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
@@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
if (eq_table->eq[i].have_irq)
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
- if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
- free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
}
static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
@@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
if (dev->flags & MLX4_FLAG_MSI_X) {
static const char *eq_name[] = {
[MLX4_EQ_COMP] = DRV_NAME " (comp)",
- [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
- [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
+ [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
};
- err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
- &priv->eq_table.eq[MLX4_EQ_CATAS]);
- if (err)
- goto err_out_async;
-
- for (i = 0; i < MLX4_EQ_CATAS; ++i) {
+ for (i = 0; i < MLX4_NUM_EQ; ++i) {
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt,
0, eq_name[i], priv->eq_table.eq + i);
if (err)
- goto err_out_catas;
+ goto err_out_async;
priv->eq_table.eq[i].have_irq = 1;
}
- err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
- mlx4_catas_interrupt, 0,
- eq_name[MLX4_EQ_CATAS], dev);
- if (err)
- goto err_out_catas;
-
- priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
} else {
err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, DRV_NAME, dev);
@@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
eq_set_ci(&priv->eq_table.eq[i], 1);
- if (dev->flags & MLX4_FLAG_MSI_X) {
- err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
- if (err)
- mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
- }
-
return 0;
-err_out_catas:
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
-
err_out_async:
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
@@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- if (dev->flags & MLX4_FLAG_MSI_X)
- mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
-
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
mlx4_free_irqs(dev);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- if (dev->flags & MLX4_FLAG_MSI_X)
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
mlx4_unmap_clr_int(dev);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index d2b065351e45..c45cbe43a0c4 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -138,6 +138,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
+#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
@@ -220,6 +221,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->local_ca_ack_delay = field & 0x1f;
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->num_ports = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
+ dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 296254ac27c1..7e1dd9e25cfb 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -60,6 +60,7 @@ struct mlx4_dev_cap {
int max_rdma_global;
int local_ca_ack_delay;
int num_ports;
+ u32 max_msg_sz;
int max_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 9ae951bf6aa6..be5d9e90ccf2 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c3da2a2f5431..4dc9dc19b716 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata =
static struct mlx4_profile default_profile = {
.num_qp = 1 << 16,
.num_srq = 1 << 16,
- .rdmarc_per_qp = 4,
+ .rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 17,
@@ -154,6 +154,7 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev
dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
@@ -582,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
goto err_pd_table_free;
}
- mlx4_map_catas_buf(dev);
-
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
- goto err_catas_buf;
+ goto err_mr_table_free;
}
err = mlx4_cmd_use_events(dev);
@@ -658,8 +657,7 @@ err_cmd_poll:
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
-err_catas_buf:
- mlx4_unmap_catas_buf(dev);
+err_mr_table_free:
mlx4_cleanup_mr_table(dev);
err_pd_table_free:
@@ -835,9 +833,6 @@ err_cleanup:
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
-
- mlx4_unmap_catas_buf(dev);
-
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
@@ -884,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
-
- mlx4_unmap_catas_buf(dev);
-
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
@@ -907,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
}
}
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+ mlx4_remove_one(pdev);
+ return mlx4_init_one(pdev, NULL);
+}
+
static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
@@ -929,6 +927,10 @@ static int __init mlx4_init(void)
{
int ret;
+ ret = mlx4_catas_init();
+ if (ret)
+ return ret;
+
ret = pci_register_driver(&mlx4_driver);
return ret < 0 ? ret : 0;
}
@@ -936,6 +938,7 @@ static int __init mlx4_init(void)
static void __exit mlx4_cleanup(void)
{
pci_unregister_driver(&mlx4_driver);
+ mlx4_catas_cleanup();
}
module_init(mlx4_init);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 3d3b6d24d8d3..be304a7c2c91 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -37,7 +37,9 @@
#ifndef MLX4_H
#define MLX4_H
+#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/timer.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -66,7 +68,6 @@ enum {
enum {
MLX4_EQ_ASYNC,
MLX4_EQ_COMP,
- MLX4_EQ_CATAS,
MLX4_NUM_EQ
};
@@ -247,7 +248,8 @@ struct mlx4_mcg_table {
struct mlx4_catas_err {
u32 __iomem *map;
- int size;
+ struct timer_list timer;
+ struct list_head list;
};
struct mlx4_priv {
@@ -310,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-void mlx4_map_catas_buf(struct mlx4_dev *dev);
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
-
+void mlx4_start_catas_poll(struct mlx4_dev *dev);
+void mlx4_stop_catas_poll(struct mlx4_dev *dev);
+int mlx4_catas_init(void);
+void mlx4_catas_cleanup(void);
+int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 492cfaaaa75c..19b48c71cf7f 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -277,3 +277,24 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
+
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ struct mlx4_qp_context *context)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ if (!err)
+ memcpy(context, mailbox->buf + 8, sizeof *context);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_query);
+
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 2134f83aed87..b061c86d6839 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -102,6 +102,13 @@ static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
MLX4_CMD_TIME_CLASS_B);
}
+static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
u64 db_rec, struct mlx4_srq *srq)
{
@@ -205,6 +212,29 @@ int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark
}
EXPORT_SYMBOL_GPL(mlx4_srq_arm);
+int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+
+ err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
+ if (err)
+ goto err_out;
+ *limit_watermark = srq_context->limit_watermark;
+
+err_out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_query);
+
int __devinit mlx4_init_srq_table(struct mlx4_dev *dev)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index e1732c164a40..deca65330b0f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1060,7 +1060,6 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
struct myri10ge_tx_buf *tx = &mgp->tx;
struct sk_buff *skb;
int idx, len;
- int limit = 0;
while (tx->pkt_done != mcp_index) {
idx = tx->done & tx->mask;
@@ -1091,11 +1090,6 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
bus), len,
PCI_DMA_TODEVICE);
}
-
- /* limit potential for livelock by only handling
- * 2 full tx rings per call */
- if (unlikely(++limit > 2 * tx->mask))
- break;
}
/* start the queue if we've stopped it */
if (netif_queue_stopped(mgp->dev)
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 460a08718c69..6bb48ba80964 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \
#define NATSEMI_CREATE_FILE(_dev, _name) \
device_create_file(&_dev->dev, &dev_attr_##_name)
#define NATSEMI_REMOVE_FILE(_dev, _name) \
- device_create_file(&_dev->dev, &dev_attr_##_name)
+ device_remove_file(&_dev->dev, &dev_attr_##_name)
NATSEMI_ATTR(dspcfg_workaround);
@@ -2357,8 +2357,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
np->rx_dma[entry],
buflen,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
- np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
np->rx_dma[entry],
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 995c0a5d4066..cfdeaf7aa163 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -669,10 +669,15 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
static int ne2k_pci_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
+ int rc;
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
NS8390_init(dev, 1);
netif_device_attach(dev);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 56f8197b953b..b703ccfe040b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -54,8 +54,6 @@ static char netxen_nic_driver_string[] = "NetXen Network Driver version "
#define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0
-u8 nx_p2_id = NX_P2_C0;
-
#define DMA_32BIT_MASK 0x00000000ffffffffULL
#define DMA_35BIT_MASK 0x00000007ffffffffULL
@@ -307,8 +305,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
pci_set_master(pdev);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id);
- if (nx_p2_id == NX_P2_C1 &&
+ if (pdev->revision == NX_P2_C1 &&
(pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
(pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
pci_using_dac = 1;
@@ -552,7 +549,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
adapter->ahw.pdev = pdev;
adapter->proc_cmd_buf_counter = 0;
- adapter->ahw.revision_id = nx_p2_id;
+ adapter->ahw.revision_id = pdev->revision;
/* make sure Window == 1 */
netxen_nic_pci_change_crbwindow(adapter, 1);
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3d5b4232f65f..22a3b3dc7d89 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -670,14 +670,10 @@ static void ni5010_set_multicast_list(struct net_device *dev)
PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
- if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI) {
+ if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
dev->flags |= IFF_PROMISC;
outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
- } else if (dev->mc_list) {
- /* Sorry, multicast not supported */
- PRINTK((KERN_DEBUG "%s: No multicast, entering broadcast mode\n", dev->name));
- outb(RMD_BROADCAST, EDLC_RMODE);
} else {
PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 8dbd6d1900b5..5e7999db2096 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -936,7 +936,7 @@ static void ni52_rcv_int(struct net_device *dev)
{
skb_reserve(skb,2);
skb_put(skb,totlen);
- eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 3818edf0ac18..4ef5fe345191 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1096,7 +1096,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
#ifdef RCV_VIA_SKB
if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
skb_put(skb,len);
- eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
+ skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
}
else {
struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
@@ -1108,7 +1108,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
}
#else
skb_put(skb,len);
- eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
+ skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
#endif
p->stats.rx_packets++;
p->stats.rx_bytes += len;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 104aab3c957f..ea80e6cb3dec 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1582,7 +1582,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI)
+ if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index df8998b4f37e..3cdbe118200b 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1567,7 +1567,7 @@ static void netdrv_rx_interrupt (struct net_device *dev,
if (skb) {
skb_reserve (skb, 2); /* 16 byte align the IP fields. */
- eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
skb_put (skb, pkt_size);
skb->protocol = eth_type_trans (skb, dev);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 0d1c7a41c9c6..ea9414c4d900 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -147,7 +147,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
DEBUG(0, "com20020_attach()\n");
/* Create new network device */
- info = kmalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
+ info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
if (!info)
goto fail_alloc_info;
@@ -155,7 +155,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
if (!dev)
goto fail_alloc_dev;
- memset(info, 0, sizeof(struct com20020_dev_t));
lp = dev->priv;
lp->timeout = timeout;
lp->backplane = backplane;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 4ecb8ca5a992..4eafa4f42cff 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -146,9 +146,8 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
DEBUG(0, "ibmtr_attach()\n");
/* Create new token-ring device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
- memset(info,0,sizeof(*info));
dev = alloc_trdev(sizeof(struct tok_info));
if (!dev) {
kfree(info);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 9c171a7390e2..465485a3fbc6 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1235,9 +1235,9 @@ static void pcnet32_rx_entry(struct net_device *dev,
lp->rx_dma_addr[entry],
pkt_len,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
- pkt_len, 0);
+ pkt_len);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
pkt_len,
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 596222b260d6..6a5385647911 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -21,6 +21,10 @@
/* Vitesse Extended Control Register 1 */
#define MII_VSC8244_EXT_CON1 0x17
#define MII_VSC8244_EXTCON1_INIT 0x0000
+#define MII_VSC8244_EXTCON1_TX_SKEW_MASK 0x0c00
+#define MII_VSC8244_EXTCON1_RX_SKEW_MASK 0x0300
+#define MII_VSC8244_EXTCON1_TX_SKEW 0x0800
+#define MII_VSC8244_EXTCON1_RX_SKEW 0x0200
/* Vitesse Interrupt Mask Register */
#define MII_VSC8244_IMASK 0x19
@@ -39,7 +43,7 @@
/* Vitesse Auxiliary Control/Status Register */
#define MII_VSC8244_AUX_CONSTAT 0x1c
-#define MII_VSC8244_AUXCONSTAT_INIT 0x0004
+#define MII_VSC8244_AUXCONSTAT_INIT 0x0000
#define MII_VSC8244_AUXCONSTAT_DUPLEX 0x0020
#define MII_VSC8244_AUXCONSTAT_SPEED 0x0018
#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
@@ -51,6 +55,7 @@ MODULE_LICENSE("GPL");
static int vsc824x_config_init(struct phy_device *phydev)
{
+ int extcon;
int err;
err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
@@ -58,14 +63,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_VSC8244_EXT_CON1,
- MII_VSC8244_EXTCON1_INIT);
+ extcon = phy_read(phydev, MII_VSC8244_EXT_CON1);
+
+ if (extcon < 0)
+ return err;
+
+ extcon &= ~(MII_VSC8244_EXTCON1_TX_SKEW_MASK |
+ MII_VSC8244_EXTCON1_RX_SKEW_MASK);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ extcon |= (MII_VSC8244_EXTCON1_TX_SKEW |
+ MII_VSC8244_EXTCON1_RX_SKEW);
+
+ err = phy_write(phydev, MII_VSC8244_EXT_CON1, extcon);
+
return err;
}
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
- int err = phy_read(phydev, MII_VSC8244_ISTAT);
+ int err = 0;
+
+ /*
+ * Don't bother to ACK the interrupts if interrupts
+ * are disabled. The 824x cannot clear the interrupts
+ * if they are disabled.
+ */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_read(phydev, MII_VSC8244_ISTAT);
return (err < 0) ? err : 0;
}
@@ -77,8 +102,19 @@ static int vsc824x_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_VSC8244_IMASK,
MII_VSC8244_IMASK_MASK);
- else
+ else {
+ /*
+ * The Vitesse PHY cannot clear the interrupt
+ * once it has disabled them, so we clear them first
+ */
+ err = phy_read(phydev, MII_VSC8244_ISTAT);
+
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_VSC8244_IMASK, 0);
+ }
+
return err;
}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index caabbc408c34..27f5b904f48e 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -159,12 +159,11 @@ ppp_asynctty_open(struct tty_struct *tty)
int err;
err = -ENOMEM;
- ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (ap == 0)
goto out;
/* initialize the asyncppp structure */
- memset(ap, 0, sizeof(*ap));
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 72c8d6628f58..eb98b661efba 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -121,12 +121,11 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
- state = kmalloc(sizeof(*state),
+ state = kzalloc(sizeof(*state),
GFP_KERNEL);
if (state == NULL)
return NULL;
- memset (state, 0, sizeof (struct ppp_deflate_state));
state->strm.next_in = NULL;
state->w_size = w_size;
state->strm.workspace = vmalloc(zlib_deflate_workspacesize());
@@ -341,11 +340,10 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
- state = kmalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return NULL;
- memset (state, 0, sizeof (struct ppp_deflate_state));
state->w_size = w_size;
state->strm.next_out = NULL;
state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 3ef0092dc09c..ef3325b69233 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2684,8 +2684,7 @@ static void __exit ppp_cleanup(void)
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
printk(KERN_ERR "PPP: removing module but units remain!\n");
cardmap_destroy(&all_ppp_units);
- if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
- printk(KERN_ERR "PPP: failed to unregister PPP device\n");
+ unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
}
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index d5bdd2574659..f79cf87a2bff 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -200,11 +200,10 @@ static void *mppe_alloc(unsigned char *options, int optlen)
|| options[0] != CI_MPPE || options[1] != CILEN_MPPE)
goto out;
- state = kmalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
goto out;
- memset(state, 0, sizeof(*state));
state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(state->arc4)) {
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 5918fab38349..ce64032a465a 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -207,13 +207,12 @@ ppp_sync_open(struct tty_struct *tty)
struct syncppp *ap;
int err;
- ap = kmalloc(sizeof(*ap), GFP_KERNEL);
+ ap = kzalloc(sizeof(*ap), GFP_KERNEL);
err = -ENOMEM;
if (ap == 0)
goto out;
/* initialize the syncppp structure */
- memset(ap, 0, sizeof(*ap));
ap->tty = tty;
ap->mru = PPP_MRU;
spin_lock_init(&ap->xmit_lock);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
new file mode 100644
index 000000000000..f87176055d0e
--- /dev/null
+++ b/drivers/net/pppol2tp.c
@@ -0,0 +1,2496 @@
+/*****************************************************************************
+ * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
+ *
+ * PPPoX --- Generic PPP encapsulation socket family
+ * PPPoL2TP --- PPP over L2TP (RFC 2661)
+ *
+ * Version: 1.0.0
+ *
+ * Authors: Martijn van Oosterhout <kleptog@svana.org>
+ * James Chapman (jchapman@katalix.com)
+ * Contributors:
+ * Michal Ostrowski <mostrows@speakeasy.net>
+ * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
+ * David S. Miller (davem@redhat.com)
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This driver handles only L2TP data frames; control frames are handled by a
+ * userspace application.
+ *
+ * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
+ * attaches it to a bound UDP socket with local tunnel_id / session_id and
+ * peer tunnel_id / session_id set. Data can then be sent or received using
+ * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
+ * can be read or modified using ioctl() or [gs]etsockopt() calls.
+ *
+ * When a PPPoL2TP socket is connected with local and peer session_id values
+ * zero, the socket is treated as a special tunnel management socket.
+ *
+ * Here's example userspace code to create a socket for sending/receiving data
+ * over an L2TP session:-
+ *
+ * struct sockaddr_pppol2tp sax;
+ * int fd;
+ * int session_fd;
+ *
+ * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
+ *
+ * sax.sa_family = AF_PPPOX;
+ * sax.sa_protocol = PX_PROTO_OL2TP;
+ * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
+ * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
+ * sax.pppol2tp.addr.sin_port = addr->sin_port;
+ * sax.pppol2tp.addr.sin_family = AF_INET;
+ * sax.pppol2tp.s_tunnel = tunnel_id;
+ * sax.pppol2tp.s_session = session_id;
+ * sax.pppol2tp.d_tunnel = peer_tunnel_id;
+ * sax.pppol2tp.d_session = peer_session_id;
+ *
+ * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
+ *
+ * A pppd plugin that allows PPP traffic to be carried over L2TP using
+ * this driver is available from the OpenL2TP project at
+ * http://openl2tp.sourceforge.net.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <asm/uaccess.h>
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+#include <linux/if_pppol2tp.h>
+#include <net/sock.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/file.h>
+#include <linux/hash.h>
+#include <linux/sort.h>
+#include <linux/proc_fs.h>
+#include <net/dst.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/xfrm.h>
+
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+
+
+#define PPPOL2TP_DRV_VERSION "V1.0"
+
+/* L2TP header constants */
+#define L2TP_HDRFLAG_T 0x8000
+#define L2TP_HDRFLAG_L 0x4000
+#define L2TP_HDRFLAG_S 0x0800
+#define L2TP_HDRFLAG_O 0x0200
+#define L2TP_HDRFLAG_P 0x0100
+
+#define L2TP_HDR_VER_MASK 0x000F
+#define L2TP_HDR_VER 0x0002
+
+/* Space for UDP, L2TP and PPP headers */
+#define PPPOL2TP_HEADER_OVERHEAD 40
+
+/* Just some random numbers */
+#define L2TP_TUNNEL_MAGIC 0x42114DDA
+#define L2TP_SESSION_MAGIC 0x0C04EB7D
+
+#define PPPOL2TP_HASH_BITS 4
+#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
+
+/* Default trace flags */
+#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
+
+#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
+ do { \
+ if ((_mask) & (_type)) \
+ printk(_lvl "PPPOL2TP: " _fmt, ##args); \
+ } while(0)
+
+/* Number of bytes to build transmit L2TP headers.
+ * Unfortunately the size is different depending on whether sequence numbers
+ * are enabled.
+ */
+#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
+#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
+
+struct pppol2tp_tunnel;
+
+/* Describes a session. It is the sk_user_data field in the PPPoL2TP
+ * socket. Contains information to determine incoming packets and transmit
+ * outgoing ones.
+ */
+struct pppol2tp_session
+{
+ int magic; /* should be
+ * L2TP_SESSION_MAGIC */
+ int owner; /* pid that opened the socket */
+
+ struct sock *sock; /* Pointer to the session
+ * PPPoX socket */
+ struct sock *tunnel_sock; /* Pointer to the tunnel UDP
+ * socket */
+
+ struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
+
+ struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
+ * context */
+
+ char name[20]; /* "sess xxxxx/yyyyy", where
+ * x=tunnel_id, y=session_id */
+ int mtu;
+ int mru;
+ int flags; /* accessed by PPPIOCGFLAGS.
+ * Unused. */
+ unsigned recv_seq:1; /* expect receive packets with
+ * sequence numbers? */
+ unsigned send_seq:1; /* send packets with sequence
+ * numbers? */
+ unsigned lns_mode:1; /* behave as LNS? LAC enables
+ * sequence numbers under
+ * control of LNS. */
+ int debug; /* bitmask of debug message
+ * categories */
+ int reorder_timeout; /* configured reorder timeout
+ * (in jiffies) */
+ u16 nr; /* session NR state (receive) */
+ u16 ns; /* session NR state (send) */
+ struct sk_buff_head reorder_q; /* receive reorder queue */
+ struct pppol2tp_ioc_stats stats;
+ struct hlist_node hlist; /* Hash list node */
+};
+
+/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
+ * all the associated sessions so incoming packets can be sorted out
+ */
+struct pppol2tp_tunnel
+{
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
+ /* hashed list of sessions,
+ * hashed by id */
+ int debug; /* bitmask of debug message
+ * categories */
+ char name[12]; /* "tunl xxxxx" */
+ struct pppol2tp_ioc_stats stats;
+
+ void (*old_sk_destruct)(struct sock *);
+
+ struct sock *sock; /* Parent socket */
+ struct list_head list; /* Keep a list of all open
+ * prepared sockets */
+
+ atomic_t ref_count;
+};
+
+/* Private data stored for received packets in the skb.
+ */
+struct pppol2tp_skb_cb {
+ u16 ns;
+ u16 nr;
+ u16 has_seq;
+ u16 length;
+ unsigned long expires;
+};
+
+#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
+
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
+static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
+
+static atomic_t pppol2tp_tunnel_count;
+static atomic_t pppol2tp_session_count;
+static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
+static struct proto_ops pppol2tp_ops;
+static LIST_HEAD(pppol2tp_tunnel_list);
+static DEFINE_RWLOCK(pppol2tp_tunnel_list_lock);
+
+/* Helpers to obtain tunnel/session contexts from sockets.
+ */
+static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
+{
+ struct pppol2tp_session *session;
+
+ if (sk == NULL)
+ return NULL;
+
+ session = (struct pppol2tp_session *)(sk->sk_user_data);
+ if (session == NULL)
+ return NULL;
+
+ BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+ return session;
+}
+
+static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
+{
+ struct pppol2tp_tunnel *tunnel;
+
+ if (sk == NULL)
+ return NULL;
+
+ tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
+ if (tunnel == NULL)
+ return NULL;
+
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ return tunnel;
+}
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ pppol2tp_tunnel_free(tunnel);
+}
+
+/* Session hash list.
+ * The session_id SHOULD be random according to RFC2661, but several
+ * L2TP implementations (Cisco and Microsoft) use incrementing
+ * session_ids. So we do a real hash on the session_id, rather than a
+ * simple bitmask.
+ */
+static inline struct hlist_head *
+pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
+{
+ unsigned long hash_val = (unsigned long) session_id;
+ return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
+}
+
+/* Lookup a session by id
+ */
+static struct pppol2tp_session *
+pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
+{
+ struct hlist_head *session_list =
+ pppol2tp_session_id_hash(tunnel, session_id);
+ struct pppol2tp_session *session;
+ struct hlist_node *walk;
+
+ read_lock(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, walk, session_list, hlist) {
+ if (session->tunnel_addr.s_session == session_id) {
+ read_unlock(&tunnel->hlist_lock);
+ return session;
+ }
+ }
+ read_unlock(&tunnel->hlist_lock);
+
+ return NULL;
+}
+
+/* Lookup a tunnel by id
+ */
+static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
+{
+ struct pppol2tp_tunnel *tunnel = NULL;
+
+ read_lock(&pppol2tp_tunnel_list_lock);
+ list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
+ if (tunnel->stats.tunnel_id == tunnel_id) {
+ read_unlock(&pppol2tp_tunnel_list_lock);
+ return tunnel;
+ }
+ }
+ read_unlock(&pppol2tp_tunnel_list_lock);
+
+ return NULL;
+}
+
+/*****************************************************************************
+ * Receive data handling
+ *****************************************************************************/
+
+/* Queue a skb in order. We come here only if the skb has an L2TP sequence
+ * number.
+ */
+static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
+{
+ struct sk_buff *skbp;
+ u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
+
+ spin_lock(&session->reorder_q.lock);
+ skb_queue_walk(&session->reorder_q, skbp) {
+ if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
+ __skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
+ session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
+ skb_queue_len(&session->reorder_q));
+ session->stats.rx_oos_packets++;
+ goto out;
+ }
+ }
+
+ __skb_queue_tail(&session->reorder_q, skb);
+
+out:
+ spin_unlock(&session->reorder_q.lock);
+}
+
+/* Dequeue a single skb.
+ */
+static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
+{
+ struct pppol2tp_tunnel *tunnel = session->tunnel;
+ int length = PPPOL2TP_SKB_CB(skb)->length;
+ struct sock *session_sock = NULL;
+
+ /* We're about to requeue the skb, so unlink it and return resources
+ * to its current owner (a socket receive buffer).
+ */
+ skb_unlink(skb, &session->reorder_q);
+ skb_orphan(skb);
+
+ tunnel->stats.rx_packets++;
+ tunnel->stats.rx_bytes += length;
+ session->stats.rx_packets++;
+ session->stats.rx_bytes += length;
+
+ if (PPPOL2TP_SKB_CB(skb)->has_seq) {
+ /* Bump our Nr */
+ session->nr++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated nr to %hu\n", session->name, session->nr);
+ }
+
+ /* If the socket is bound, send it in to PPP's input queue. Otherwise
+ * queue it on the session socket.
+ */
+ session_sock = session->sock;
+ if (session_sock->sk_state & PPPOX_BOUND) {
+ struct pppox_sock *po;
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv %d byte data frame, passing to ppp\n",
+ session->name, length);
+
+ /* We need to forget all info related to the L2TP packet
+ * gathered in the skb as we are going to reuse the same
+ * skb for the inner packet.
+ * Namely we need to:
+ * - reset xfrm (IPSec) information as it applies to
+ * the outer L2TP packet and not to the inner one
+ * - release the dst to force a route lookup on the inner
+ * IP packet since skb->dst currently points to the dst
+ * of the UDP tunnel
+ * - reset netfilter information as it doesn't apply
+ * to the inner packet either
+ */
+ secpath_reset(skb);
+ dst_release(skb->dst);
+ skb->dst = NULL;
+ nf_reset(skb);
+
+ po = pppox_sk(session_sock);
+ ppp_input(&po->chan, skb);
+ } else {
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: socket not bound\n", session->name);
+
+ /* Not bound. Nothing we can do, so discard. */
+ session->stats.rx_errors++;
+ kfree_skb(skb);
+ }
+
+ sock_put(session->sock);
+}
+
+/* Dequeue skbs from the session's reorder_q, subject to packet order.
+ * Skbs that have been in the queue for too long are simply discarded.
+ */
+static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
+{
+ struct sk_buff *skb;
+ struct sk_buff *tmp;
+
+ /* If the pkt at the head of the queue has the nr that we
+ * expect to send up next, dequeue it and any other
+ * in-sequence packets behind it.
+ */
+ spin_lock(&session->reorder_q.lock);
+ skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
+ if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %hu len %d discarded (too old), "
+ "waiting for %hu, reorder_q_len=%d\n",
+ session->name, PPPOL2TP_SKB_CB(skb)->ns,
+ PPPOL2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ __skb_unlink(skb, &session->reorder_q);
+ kfree_skb(skb);
+ continue;
+ }
+
+ if (PPPOL2TP_SKB_CB(skb)->has_seq) {
+ if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: holding oos pkt %hu len %d, "
+ "waiting for %hu, reorder_q_len=%d\n",
+ session->name, PPPOL2TP_SKB_CB(skb)->ns,
+ PPPOL2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto out;
+ }
+ }
+ spin_unlock(&session->reorder_q.lock);
+ pppol2tp_recv_dequeue_skb(session, skb);
+ spin_lock(&session->reorder_q.lock);
+ }
+
+out:
+ spin_unlock(&session->reorder_q.lock);
+}
+
+/* Internal receive frame. Do the real work of receiving an L2TP data frame
+ * here. The skb is not on a list when we get here.
+ * Returns 0 if the packet was a data packet and was successfully passed on.
+ * Returns 1 if the packet was not a good data packet and could not be
+ * forwarded. All such packets are passed up to userspace to deal with.
+ */
+static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
+{
+ struct pppol2tp_session *session = NULL;
+ struct pppol2tp_tunnel *tunnel;
+ unsigned char *ptr;
+ u16 hdrflags;
+ u16 tunnel_id, session_id;
+ int length;
+ struct udphdr *uh;
+
+ tunnel = pppol2tp_sock_to_tunnel(sock);
+ if (tunnel == NULL)
+ goto error;
+
+ /* Short packet? */
+ if (skb->len < sizeof(struct udphdr)) {
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
+ goto error;
+ }
+
+ /* Point to L2TP header */
+ ptr = skb->data + sizeof(struct udphdr);
+
+ /* Get L2TP header flags */
+ hdrflags = ntohs(*(__be16*)ptr);
+
+ /* Trace packet contents, if enabled */
+ if (tunnel->debug & PPPOL2TP_MSG_DATA) {
+ printk(KERN_DEBUG "%s: recv: ", tunnel->name);
+
+ for (length = 0; length < 16; length++)
+ printk(" %02X", ptr[length]);
+ printk("\n");
+ }
+
+ /* Get length of L2TP packet */
+ uh = (struct udphdr *) skb_transport_header(skb);
+ length = ntohs(uh->len) - sizeof(struct udphdr);
+
+ /* Too short? */
+ if (length < 12) {
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length);
+ goto error;
+ }
+
+ /* If type is control packet, it is handled by userspace. */
+ if (hdrflags & L2TP_HDRFLAG_T) {
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: recv control packet, len=%d\n", tunnel->name, length);
+ goto error;
+ }
+
+ /* Skip flags */
+ ptr += 2;
+
+ /* If length is present, skip it */
+ if (hdrflags & L2TP_HDRFLAG_L)
+ ptr += 2;
+
+ /* Extract tunnel and session ID */
+ tunnel_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ session_id = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+
+ /* Find the session context */
+ session = pppol2tp_session_find(tunnel, session_id);
+ if (!session) {
+ /* Not found? Pass to userspace to deal with */
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
+ "%s: no socket found (%hu/%hu). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
+ goto error;
+ }
+ sock_hold(session->sock);
+
+ /* The ref count on the socket was increased by the above call since
+ * we now hold a pointer to the session. Take care to do sock_put()
+ * when exiting this function from now on...
+ */
+
+ /* Handle the optional sequence numbers. If we are the LAC,
+ * enable/disable sequence numbers under the control of the LNS. If
+ * no sequence numbers present but we were expecting them, discard
+ * frame.
+ */
+ if (hdrflags & L2TP_HDRFLAG_S) {
+ u16 ns, nr;
+ ns = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+ nr = ntohs(*(__be16 *) ptr);
+ ptr += 2;
+
+ /* Received a packet with sequence numbers. If we're the LNS,
+ * check if we sre sending sequence numbers and if not,
+ * configure it so.
+ */
+ if ((!session->lns_mode) && (!session->send_seq)) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to enable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = -1;
+ }
+
+ /* Store L2TP info in the skb */
+ PPPOL2TP_SKB_CB(skb)->ns = ns;
+ PPPOL2TP_SKB_CB(skb)->nr = nr;
+ PPPOL2TP_SKB_CB(skb)->has_seq = 1;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
+ session->name, ns, nr, session->nr);
+ } else {
+ /* No sequence numbers.
+ * If user has configured mandatory sequence numbers, discard.
+ */
+ if (session->recv_seq) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ goto discard;
+ }
+
+ /* If we're the LAC and we're sending sequence numbers, the
+ * LNS has requested that we no longer send sequence numbers.
+ * If we're the LNS and we're sending sequence numbers, the
+ * LAC is broken. Discard the frame.
+ */
+ if ((!session->lns_mode) && (session->send_seq)) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
+ "%s: requested to disable seq numbers by LNS\n",
+ session->name);
+ session->send_seq = 0;
+ } else if (session->send_seq) {
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
+ "%s: recv data has no seq numbers when required. "
+ "Discarding\n", session->name);
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ goto discard;
+ }
+
+ /* Store L2TP info in the skb */
+ PPPOL2TP_SKB_CB(skb)->has_seq = 0;
+ }
+
+ /* If offset bit set, skip it. */
+ if (hdrflags & L2TP_HDRFLAG_O)
+ ptr += 2 + ntohs(*(__be16 *) ptr);
+
+ skb_pull(skb, ptr - skb->data);
+
+ /* Skip PPP header, if present. In testing, Microsoft L2TP clients
+ * don't send the PPP header (PPP header compression enabled), but
+ * other clients can include the header. So we cope with both cases
+ * here. The PPP header is always FF03 when using L2TP.
+ *
+ * Note that skb->data[] isn't dereferenced from a u16 ptr here since
+ * the field may be unaligned.
+ */
+ if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
+ skb_pull(skb, 2);
+
+ /* Prepare skb for adding to the session's reorder_q. Hold
+ * packets for max reorder_timeout or 1 second if not
+ * reordering.
+ */
+ PPPOL2TP_SKB_CB(skb)->length = length;
+ PPPOL2TP_SKB_CB(skb)->expires = jiffies +
+ (session->reorder_timeout ? session->reorder_timeout : HZ);
+
+ /* Add packet to the session's receive queue. Reordering is done here, if
+ * enabled. Saved L2TP protocol info is stored in skb->sb[].
+ */
+ if (PPPOL2TP_SKB_CB(skb)->has_seq) {
+ if (session->reorder_timeout != 0) {
+ /* Packet reordering enabled. Add skb to session's
+ * reorder queue, in order of ns.
+ */
+ pppol2tp_recv_queue_skb(session, skb);
+ } else {
+ /* Packet reordering disabled. Discard out-of-sequence
+ * packets
+ */
+ if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
+ session->stats.rx_seq_discards++;
+ session->stats.rx_errors++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: oos pkt %hu len %d discarded, "
+ "waiting for %hu, reorder_q_len=%d\n",
+ session->name, PPPOL2TP_SKB_CB(skb)->ns,
+ PPPOL2TP_SKB_CB(skb)->length, session->nr,
+ skb_queue_len(&session->reorder_q));
+ goto discard;
+ }
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+ } else {
+ /* No sequence numbers. Add the skb to the tail of the
+ * reorder queue. This ensures that it will be
+ * delivered after all previous sequenced skbs.
+ */
+ skb_queue_tail(&session->reorder_q, skb);
+ }
+
+ /* Try to dequeue as many skbs from reorder_q as we can. */
+ pppol2tp_recv_dequeue(session);
+
+ return 0;
+
+discard:
+ kfree_skb(skb);
+ sock_put(session->sock);
+
+ return 0;
+
+error:
+ return 1;
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes:
+ * 0 : success.
+ * <0: error
+ * >0: skb should be passed up to userspace as UDP.
+ */
+static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppol2tp_tunnel *tunnel;
+
+ tunnel = pppol2tp_sock_to_tunnel(sk);
+ if (tunnel == NULL)
+ goto pass_up;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: received %d bytes\n", tunnel->name, skb->len);
+
+ if (pppol2tp_recv_core(sk, skb))
+ goto pass_up;
+
+ return 0;
+
+pass_up:
+ return 1;
+}
+
+/* Receive message. This is the recvmsg for the PPPoL2TP socket.
+ */
+static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len,
+ int flags)
+{
+ int err;
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+
+ err = -EIO;
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+ msg->msg_namelen = 0;
+
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (skb) {
+ err = memcpy_toiovec(msg->msg_iov, (unsigned char *) skb->data,
+ skb->len);
+ if (err < 0)
+ goto do_skb_free;
+ err = skb->len;
+ }
+do_skb_free:
+ kfree_skb(skb);
+end:
+ return err;
+}
+
+/************************************************************************
+ * Transmit handling
+ ***********************************************************************/
+
+/* Tell how big L2TP headers are for a particular session. This
+ * depends on whether sequence numbers are being used.
+ */
+static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
+{
+ if (session->send_seq)
+ return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
+
+ return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+}
+
+/* Build an L2TP header for the session into the buffer provided.
+ */
+static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
+ void *buf)
+{
+ __be16 *bufp = buf;
+ u16 flags = L2TP_HDR_VER;
+
+ if (session->send_seq)
+ flags |= L2TP_HDRFLAG_S;
+
+ /* Setup L2TP header.
+ * FIXME: Can this ever be unaligned? Is direct dereferencing of
+ * 16-bit header fields safe here for all architectures?
+ */
+ *bufp++ = htons(flags);
+ *bufp++ = htons(session->tunnel_addr.d_tunnel);
+ *bufp++ = htons(session->tunnel_addr.d_session);
+ if (session->send_seq) {
+ *bufp++ = htons(session->ns);
+ *bufp++ = 0;
+ session->ns++;
+ PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
+ "%s: updated ns to %hu\n", session->name, session->ns);
+ }
+}
+
+/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
+ * when a user application does a sendmsg() on the session socket. L2TP and
+ * PPP headers must be inserted into the user's data.
+ */
+static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ static const unsigned char ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = sock->sk;
+ struct inet_sock *inet;
+ __wsum csum = 0;
+ struct sk_buff *skb;
+ int error;
+ int hdr_len;
+ struct pppol2tp_session *session;
+ struct pppol2tp_tunnel *tunnel;
+ struct udphdr *uh;
+ unsigned int len;
+
+ error = -ENOTCONN;
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto error;
+
+ /* Get session and tunnel contexts */
+ error = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto error;
+
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto error;
+
+ /* What header length is configured for this session? */
+ hdr_len = pppol2tp_l2tp_header_len(session);
+
+ /* Allocate a socket buffer */
+ error = -ENOMEM;
+ skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + hdr_len +
+ sizeof(ppph) + total_len,
+ 0, GFP_KERNEL);
+ if (!skb)
+ goto error;
+
+ /* Reserve space for headers. */
+ skb_reserve(skb, NET_SKB_PAD);
+ skb_reset_network_header(skb);
+ skb_reserve(skb, sizeof(struct iphdr));
+ skb_reset_transport_header(skb);
+
+ /* Build UDP header */
+ inet = inet_sk(session->tunnel_sock);
+ uh = (struct udphdr *) skb->data;
+ uh->source = inet->sport;
+ uh->dest = inet->dport;
+ uh->len = htons(hdr_len + sizeof(ppph) + total_len);
+ uh->check = 0;
+ skb_put(skb, sizeof(struct udphdr));
+
+ /* Build L2TP header */
+ pppol2tp_build_l2tp_header(session, skb->data);
+ skb_put(skb, hdr_len);
+
+ /* Add PPP header */
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+ error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error;
+ }
+ skb_put(skb, total_len);
+
+ /* Calculate UDP checksum if configured to do so */
+ if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT)
+ csum = udp_csum_outgoing(sk, skb);
+
+ /* Debug */
+ if (session->send_seq)
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes, ns=%hu\n", session->name,
+ total_len, session->ns - 1);
+ else
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %Zd bytes\n", session->name, total_len);
+
+ if (session->debug & PPPOL2TP_MSG_DATA) {
+ int i;
+ unsigned char *datap = skb->data;
+
+ printk(KERN_DEBUG "%s: xmit:", session->name);
+ for (i = 0; i < total_len; i++) {
+ printk(" %02X", *datap++);
+ if (i == 15) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+ }
+
+ /* Queue the packet to IP for output */
+ len = skb->len;
+ error = ip_queue_xmit(skb, 1);
+
+ /* Update stats */
+ if (error >= 0) {
+ tunnel->stats.tx_packets++;
+ tunnel->stats.tx_bytes += len;
+ session->stats.tx_packets++;
+ session->stats.tx_bytes += len;
+ } else {
+ tunnel->stats.tx_errors++;
+ session->stats.tx_errors++;
+ }
+
+error:
+ return error;
+}
+
+/* Transmit function called by generic PPP driver. Sends PPP frame
+ * over PPPoL2TP socket.
+ *
+ * This is almost the same as pppol2tp_sendmsg(), but rather than
+ * being called with a msghdr from userspace, it is called with a skb
+ * from the kernel.
+ *
+ * The supplied skb from ppp doesn't have enough headroom for the
+ * insertion of L2TP, UDP and IP headers so we need to allocate more
+ * headroom in the skb. This will create a cloned skb. But we must be
+ * careful in the error case because the caller will expect to free
+ * the skb it supplied, not our cloned skb. So we take care to always
+ * leave the original skb unfreed if we return an error.
+ */
+static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+ static const u8 ppph[2] = { 0xff, 0x03 };
+ struct sock *sk = (struct sock *) chan->private;
+ struct sock *sk_tun;
+ int hdr_len;
+ struct pppol2tp_session *session;
+ struct pppol2tp_tunnel *tunnel;
+ int rc;
+ int headroom;
+ int data_len = skb->len;
+ struct inet_sock *inet;
+ __wsum csum = 0;
+ struct sk_buff *skb2 = NULL;
+ struct udphdr *uh;
+ unsigned int len;
+
+ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
+ goto abort;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto abort;
+
+ sk_tun = session->tunnel_sock;
+ if (sk_tun == NULL)
+ goto abort;
+ tunnel = pppol2tp_sock_to_tunnel(sk_tun);
+ if (tunnel == NULL)
+ goto abort;
+
+ /* What header length is configured for this session? */
+ hdr_len = pppol2tp_l2tp_header_len(session);
+
+ /* Check that there's enough headroom in the skb to insert IP,
+ * UDP and L2TP and PPP headers. If not enough, expand it to
+ * make room. Note that a new skb (or a clone) is
+ * allocated. If we return an error from this point on, make
+ * sure we free the new skb but do not free the original skb
+ * since that is done by the caller for the error case.
+ */
+ headroom = NET_SKB_PAD + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + hdr_len + sizeof(ppph);
+ if (skb_headroom(skb) < headroom) {
+ skb2 = skb_realloc_headroom(skb, headroom);
+ if (skb2 == NULL)
+ goto abort;
+ } else
+ skb2 = skb;
+
+ /* Check that the socket has room */
+ if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf)
+ skb_set_owner_w(skb2, sk_tun);
+ else
+ goto discard;
+
+ /* Setup PPP header */
+ skb_push(skb2, sizeof(ppph));
+ skb2->data[0] = ppph[0];
+ skb2->data[1] = ppph[1];
+
+ /* Setup L2TP header */
+ skb_push(skb2, hdr_len);
+ pppol2tp_build_l2tp_header(session, skb2->data);
+
+ /* Setup UDP header */
+ inet = inet_sk(sk_tun);
+ skb_push(skb2, sizeof(struct udphdr));
+ skb_reset_transport_header(skb2);
+ uh = (struct udphdr *) skb2->data;
+ uh->source = inet->sport;
+ uh->dest = inet->dport;
+ uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
+ uh->check = 0;
+
+ /* Calculate UDP checksum if configured to do so */
+ if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
+ csum = udp_csum_outgoing(sk_tun, skb2);
+
+ /* Debug */
+ if (session->send_seq)
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %d bytes, ns=%hu\n", session->name,
+ data_len, session->ns - 1);
+ else
+ PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
+ "%s: send %d bytes\n", session->name, data_len);
+
+ if (session->debug & PPPOL2TP_MSG_DATA) {
+ int i;
+ unsigned char *datap = skb2->data;
+
+ printk(KERN_DEBUG "%s: xmit:", session->name);
+ for (i = 0; i < data_len; i++) {
+ printk(" %02X", *datap++);
+ if (i == 31) {
+ printk(" ...");
+ break;
+ }
+ }
+ printk("\n");
+ }
+
+ memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt));
+ IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ nf_reset(skb2);
+
+ /* Get routing info from the tunnel socket */
+ dst_release(skb2->dst);
+ skb2->dst = sk_dst_get(sk_tun);
+
+ /* Queue the packet to IP for output */
+ len = skb2->len;
+ rc = ip_queue_xmit(skb2, 1);
+
+ /* Update stats */
+ if (rc >= 0) {
+ tunnel->stats.tx_packets++;
+ tunnel->stats.tx_bytes += len;
+ session->stats.tx_packets++;
+ session->stats.tx_bytes += len;
+ } else {
+ tunnel->stats.tx_errors++;
+ session->stats.tx_errors++;
+ }
+
+ /* Free the original skb */
+ kfree_skb(skb);
+
+ return 1;
+
+discard:
+ /* Free the new skb. Caller will free original skb. */
+ if (skb2 != skb)
+ kfree_skb(skb2);
+abort:
+ return 0;
+}
+
+/*****************************************************************************
+ * Session (and tunnel control) socket create/destroy.
+ *****************************************************************************/
+
+/* When the tunnel UDP socket is closed, all the attached sockets need to go
+ * too.
+ */
+static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
+{
+ int hash;
+ struct hlist_node *walk;
+ struct hlist_node *tmp;
+ struct pppol2tp_session *session;
+ struct sock *sk;
+
+ if (tunnel == NULL)
+ BUG();
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing all sessions...\n", tunnel->name);
+
+ write_lock(&tunnel->hlist_lock);
+ for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
+again:
+ hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
+ session = hlist_entry(walk, struct pppol2tp_session, hlist);
+
+ sk = session->sock;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing session\n", session->name);
+
+ hlist_del_init(&session->hlist);
+
+ /* Since we should hold the sock lock while
+ * doing any unbinding, we need to release the
+ * lock we're holding before taking that lock.
+ * Hold a reference to the sock so it doesn't
+ * disappear as we're jumping between locks.
+ */
+ sock_hold(sk);
+ write_unlock(&tunnel->hlist_lock);
+ lock_sock(sk);
+
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ pppox_unbind_sock(sk);
+ sk->sk_state = PPPOX_DEAD;
+ sk->sk_state_change(sk);
+ }
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+ skb_queue_purge(&session->reorder_q);
+
+ release_sock(sk);
+ sock_put(sk);
+
+ /* Now restart from the beginning of this hash
+ * chain. We always remove a session from the
+ * list so we are guaranteed to make forward
+ * progress.
+ */
+ write_lock(&tunnel->hlist_lock);
+ goto again;
+ }
+ }
+ write_unlock(&tunnel->hlist_lock);
+}
+
+/* Really kill the tunnel.
+ * Come here only when all sessions have been cleared from the tunnel.
+ */
+static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
+{
+ /* Remove from socket list */
+ write_lock(&pppol2tp_tunnel_list_lock);
+ list_del_init(&tunnel->list);
+ write_unlock(&pppol2tp_tunnel_list_lock);
+
+ atomic_dec(&pppol2tp_tunnel_count);
+ kfree(tunnel);
+}
+
+/* Tunnel UDP socket destruct hook.
+ * The tunnel context is deleted only when all session sockets have been
+ * closed.
+ */
+static void pppol2tp_tunnel_destruct(struct sock *sk)
+{
+ struct pppol2tp_tunnel *tunnel;
+
+ tunnel = pppol2tp_sock_to_tunnel(sk);
+ if (tunnel == NULL)
+ goto end;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: closing...\n", tunnel->name);
+
+ /* Close all sessions */
+ pppol2tp_tunnel_closeall(tunnel);
+
+ /* No longer an encapsulation socket. See net/ipv4/udp.c */
+ (udp_sk(sk))->encap_type = 0;
+ (udp_sk(sk))->encap_rcv = NULL;
+
+ /* Remove hooks into tunnel socket */
+ tunnel->sock = NULL;
+ sk->sk_destruct = tunnel->old_sk_destruct;
+ sk->sk_user_data = NULL;
+
+ /* Call original (UDP) socket descructor */
+ if (sk->sk_destruct != NULL)
+ (*sk->sk_destruct)(sk);
+
+ pppol2tp_tunnel_dec_refcount(tunnel);
+
+end:
+ return;
+}
+
+/* Really kill the session socket. (Called from sock_put() if
+ * refcnt == 0.)
+ */
+static void pppol2tp_session_destruct(struct sock *sk)
+{
+ struct pppol2tp_session *session = NULL;
+
+ if (sk->sk_user_data != NULL) {
+ struct pppol2tp_tunnel *tunnel;
+
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto out;
+
+ /* Don't use pppol2tp_sock_to_tunnel() here to
+ * get the tunnel context because the tunnel
+ * socket might have already been closed (its
+ * sk->sk_user_data will be NULL) so use the
+ * session's private tunnel ptr instead.
+ */
+ tunnel = session->tunnel;
+ if (tunnel != NULL) {
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ /* If session_id is zero, this is a null
+ * session context, which was created for a
+ * socket that is being used only to manage
+ * tunnels.
+ */
+ if (session->tunnel_addr.s_session != 0) {
+ /* Delete the session socket from the
+ * hash
+ */
+ write_lock(&tunnel->hlist_lock);
+ hlist_del_init(&session->hlist);
+ write_unlock(&tunnel->hlist_lock);
+
+ atomic_dec(&pppol2tp_session_count);
+ }
+
+ /* This will delete the tunnel context if this
+ * is the last session on the tunnel.
+ */
+ session->tunnel = NULL;
+ session->tunnel_sock = NULL;
+ pppol2tp_tunnel_dec_refcount(tunnel);
+ }
+ }
+
+ kfree(session);
+out:
+ return;
+}
+
+/* Called when the PPPoX socket (session) is closed.
+ */
+static int pppol2tp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int error;
+
+ if (!sk)
+ return 0;
+
+ error = -EBADF;
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto error;
+
+ pppox_unbind_sock(sk);
+
+ /* Signal the death of the socket. */
+ sk->sk_state = PPPOX_DEAD;
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ /* Purge any queued data */
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
+ release_sock(sk);
+
+ /* This will delete the session context via
+ * pppol2tp_session_destruct() if the socket's refcnt drops to
+ * zero.
+ */
+ sock_put(sk);
+
+ return 0;
+
+error:
+ release_sock(sk);
+ return error;
+}
+
+/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
+ * sockets attached to it.
+ */
+static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
+ int *error)
+{
+ int err;
+ struct socket *sock = NULL;
+ struct sock *sk;
+ struct pppol2tp_tunnel *tunnel;
+ struct sock *ret = NULL;
+
+ /* Get the tunnel UDP socket from the fd, which was opened by
+ * the userspace L2TP daemon.
+ */
+ err = -EBADF;
+ sock = sockfd_lookup(fd, &err);
+ if (!sock) {
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
+ "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ tunnel_id, fd, err);
+ goto err;
+ }
+
+ /* Quick sanity checks */
+ err = -ESOCKTNOSUPPORT;
+ if (sock->type != SOCK_DGRAM) {
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
+ "tunl %hu: fd %d wrong type, got %d, expected %d\n",
+ tunnel_id, fd, sock->type, SOCK_DGRAM);
+ goto err;
+ }
+ err = -EAFNOSUPPORT;
+ if (sock->ops->family != AF_INET) {
+ PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
+ "tunl %hu: fd %d wrong family, got %d, expected %d\n",
+ tunnel_id, fd, sock->ops->family, AF_INET);
+ goto err;
+ }
+
+ err = -ENOTCONN;
+ sk = sock->sk;
+
+ /* Check if this socket has already been prepped */
+ tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
+ if (tunnel != NULL) {
+ /* User-data field already set */
+ err = -EBUSY;
+ BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+
+ /* This socket has already been prepped */
+ ret = tunnel->sock;
+ goto out;
+ }
+
+ /* This socket is available and needs prepping. Create a new tunnel
+ * context and init it.
+ */
+ sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
+ if (sk->sk_user_data == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ tunnel->magic = L2TP_TUNNEL_MAGIC;
+ sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
+
+ tunnel->stats.tunnel_id = tunnel_id;
+ tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
+
+ /* Hook on the tunnel socket destructor so that we can cleanup
+ * if the tunnel socket goes away.
+ */
+ tunnel->old_sk_destruct = sk->sk_destruct;
+ sk->sk_destruct = &pppol2tp_tunnel_destruct;
+
+ tunnel->sock = sk;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ /* Misc init */
+ rwlock_init(&tunnel->hlist_lock);
+
+ /* Add tunnel to our list */
+ INIT_LIST_HEAD(&tunnel->list);
+ write_lock(&pppol2tp_tunnel_list_lock);
+ list_add(&tunnel->list, &pppol2tp_tunnel_list);
+ write_unlock(&pppol2tp_tunnel_list_lock);
+ atomic_inc(&pppol2tp_tunnel_count);
+
+ /* Bump the reference count. The tunnel context is deleted
+ * only when this drops to zero.
+ */
+ pppol2tp_tunnel_inc_refcount(tunnel);
+
+ /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
+ (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
+
+ ret = tunnel->sock;
+
+ *error = 0;
+out:
+ if (sock)
+ sockfd_put(sock);
+
+ return ret;
+
+err:
+ *error = err;
+ goto out;
+}
+
+static struct proto pppol2tp_sk_proto = {
+ .name = "PPPOL2TP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct pppox_sock),
+};
+
+/* socket() handler. Initialize a new struct sock.
+ */
+static int pppol2tp_create(struct socket *sock)
+{
+ int error = -ENOMEM;
+ struct sock *sk;
+
+ sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1);
+ if (!sk)
+ goto out;
+
+ sock_init_data(sock, sk);
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &pppol2tp_ops;
+
+ sk->sk_backlog_rcv = pppol2tp_recv_core;
+ sk->sk_protocol = PX_PROTO_OL2TP;
+ sk->sk_family = PF_PPPOX;
+ sk->sk_state = PPPOX_NONE;
+ sk->sk_type = SOCK_STREAM;
+ sk->sk_destruct = pppol2tp_session_destruct;
+
+ error = 0;
+
+out:
+ return error;
+}
+
+/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+ */
+static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ int sockaddr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct sock *tunnel_sock = NULL;
+ struct pppol2tp_session *session = NULL;
+ struct pppol2tp_tunnel *tunnel;
+ struct dst_entry *dst;
+ int error = 0;
+
+ lock_sock(sk);
+
+ error = -EINVAL;
+ if (sp->sa_protocol != PX_PROTO_OL2TP)
+ goto end;
+
+ /* Check for already bound sockets */
+ error = -EBUSY;
+ if (sk->sk_state & PPPOX_CONNECTED)
+ goto end;
+
+ /* We don't supporting rebinding anyway */
+ error = -EALREADY;
+ if (sk->sk_user_data)
+ goto end; /* socket is already attached */
+
+ /* Don't bind if s_tunnel is 0 */
+ error = -EINVAL;
+ if (sp->pppol2tp.s_tunnel == 0)
+ goto end;
+
+ /* Special case: prepare tunnel socket if s_session and
+ * d_session is 0. Otherwise look up tunnel using supplied
+ * tunnel id.
+ */
+ if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
+ tunnel_sock = pppol2tp_prepare_tunnel_socket(sp->pppol2tp.fd,
+ sp->pppol2tp.s_tunnel,
+ &error);
+ if (tunnel_sock == NULL)
+ goto end;
+
+ tunnel = tunnel_sock->sk_user_data;
+ } else {
+ tunnel = pppol2tp_tunnel_find(sp->pppol2tp.s_tunnel);
+
+ /* Error if we can't find the tunnel */
+ error = -ENOENT;
+ if (tunnel == NULL)
+ goto end;
+
+ tunnel_sock = tunnel->sock;
+ }
+
+ /* Check that this session doesn't already exist */
+ error = -EEXIST;
+ session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
+ if (session != NULL)
+ goto end;
+
+ /* Allocate and initialize a new session context. */
+ session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
+ if (session == NULL) {
+ error = -ENOMEM;
+ goto end;
+ }
+
+ skb_queue_head_init(&session->reorder_q);
+
+ session->magic = L2TP_SESSION_MAGIC;
+ session->owner = current->pid;
+ session->sock = sk;
+ session->tunnel = tunnel;
+ session->tunnel_sock = tunnel_sock;
+ session->tunnel_addr = sp->pppol2tp;
+ sprintf(&session->name[0], "sess %hu/%hu",
+ session->tunnel_addr.s_tunnel,
+ session->tunnel_addr.s_session);
+
+ session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
+ session->stats.session_id = session->tunnel_addr.s_session;
+
+ INIT_HLIST_NODE(&session->hlist);
+
+ /* Inherit debug options from tunnel */
+ session->debug = tunnel->debug;
+
+ /* Default MTU must allow space for UDP/L2TP/PPP
+ * headers.
+ */
+ session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+ dst = sk_dst_get(sk);
+ if (dst != NULL) {
+ u32 pmtu = dst_mtu(__sk_dst_get(sk));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+ dst_release(dst);
+ }
+
+ /* Special case: if source & dest session_id == 0x0000, this socket is
+ * being created to manage the tunnel. Don't add the session to the
+ * session hash list, just set up the internal context for use by
+ * ioctl() and sockopt() handlers.
+ */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ error = 0;
+ sk->sk_user_data = session;
+ goto out_no_ppp;
+ }
+
+ /* Get tunnel context from the tunnel socket */
+ tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
+ if (tunnel == NULL) {
+ error = -EBADF;
+ goto end;
+ }
+
+ /* Right now, because we don't have a way to push the incoming skb's
+ * straight through the UDP layer, the only header we need to worry
+ * about is the L2TP header. This size is different depending on
+ * whether sequence numbers are enabled for the data channel.
+ */
+ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+
+ po->chan.private = sk;
+ po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.mtu = session->mtu;
+
+ error = ppp_register_channel(&po->chan);
+ if (error)
+ goto end;
+
+ /* This is how we get the session context from the socket. */
+ sk->sk_user_data = session;
+
+ /* Add session to the tunnel's hash list */
+ write_lock(&tunnel->hlist_lock);
+ hlist_add_head(&session->hlist,
+ pppol2tp_session_id_hash(tunnel,
+ session->tunnel_addr.s_session));
+ write_unlock(&tunnel->hlist_lock);
+
+ atomic_inc(&pppol2tp_session_count);
+
+out_no_ppp:
+ pppol2tp_tunnel_inc_refcount(tunnel);
+ sk->sk_state = PPPOX_CONNECTED;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: created\n", session->name);
+
+end:
+ release_sock(sk);
+
+ if (error != 0)
+ PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
+ "%s: connect failed: %d\n", session->name, error);
+
+ return error;
+}
+
+/* getname() support.
+ */
+static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *usockaddr_len, int peer)
+{
+ int len = sizeof(struct sockaddr_pppol2tp);
+ struct sockaddr_pppol2tp sp;
+ int error = 0;
+ struct pppol2tp_session *session;
+
+ error = -ENOTCONN;
+ if (sock->sk->sk_state != PPPOX_CONNECTED)
+ goto end;
+
+ session = pppol2tp_sock_to_session(sock->sk);
+ if (session == NULL) {
+ error = -EBADF;
+ goto end;
+ }
+
+ sp.sa_family = AF_PPPOX;
+ sp.sa_protocol = PX_PROTO_OL2TP;
+ memcpy(&sp.pppol2tp, &session->tunnel_addr,
+ sizeof(struct pppol2tp_addr));
+
+ memcpy(uaddr, &sp, len);
+
+ *usockaddr_len = len;
+
+ error = 0;
+
+end:
+ return error;
+}
+
+/****************************************************************************
+ * ioctl() handlers.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. However, in order to control kernel tunnel features, we allow
+ * userspace to create a special "tunnel" PPPoX socket which is used for
+ * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
+ * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
+ * calls.
+ ****************************************************************************/
+
+/* Session ioctl helper.
+ */
+static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ifreq ifr;
+ int err = 0;
+ struct sock *sk = session->sock;
+ int val = (int) arg;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
+ session->name, cmd, arg);
+
+ sock_hold(sk);
+
+ switch (cmd) {
+ case SIOCGIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+ ifr.ifr_mtu = session->mtu;
+ if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case SIOCSIFMTU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
+ break;
+
+ session->mtu = ifr.ifr_mtu;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mtu=%d\n", session->name, session->mtu);
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (put_user(session->mru, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ err = -EFAULT;
+ if (get_user(val,(int __user *) arg))
+ break;
+
+ session->mru = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set mru=%d\n", session->name, session->mru);
+ err = 0;
+ break;
+
+ case PPPIOCGFLAGS:
+ err = -EFAULT;
+ if (put_user(session->flags, (int __user *) arg))
+ break;
+
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get flags=%d\n", session->name, session->flags);
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ err = -EFAULT;
+ if (get_user(val, (int __user *) arg))
+ break;
+ session->flags = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set flags=%d\n", session->name, session->flags);
+ err = 0;
+ break;
+
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ if (copy_to_user((void __user *) arg, &session->stats,
+ sizeof(session->stats)))
+ break;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", session->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Tunnel ioctl helper.
+ *
+ * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
+ * specifies a session_id, the session ioctl handler is called. This allows an
+ * application to retrieve session stats via a tunnel socket.
+ */
+static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct sock *sk = tunnel->sock;
+ struct pppol2tp_ioc_stats stats_req;
+
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
+ "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
+ cmd, arg);
+
+ sock_hold(sk);
+
+ switch (cmd) {
+ case PPPIOCGL2TPSTATS:
+ err = -ENXIO;
+ if (!(sk->sk_state & PPPOX_CONNECTED))
+ break;
+
+ if (copy_from_user(&stats_req, (void __user *) arg,
+ sizeof(stats_req))) {
+ err = -EFAULT;
+ break;
+ }
+ if (stats_req.session_id != 0) {
+ /* resend to session ioctl handler */
+ struct pppol2tp_session *session =
+ pppol2tp_session_find(tunnel, stats_req.session_id);
+ if (session != NULL)
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+ else
+ err = -EBADR;
+ break;
+ }
+#ifdef CONFIG_XFRM
+ tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
+#endif
+ if (copy_to_user((void __user *) arg, &tunnel->stats,
+ sizeof(tunnel->stats))) {
+ err = -EFAULT;
+ break;
+ }
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get L2TP stats\n", tunnel->name);
+ err = 0;
+ break;
+
+ default:
+ err = -ENOSYS;
+ break;
+ }
+
+ sock_put(sk);
+
+ return err;
+}
+
+/* Main ioctl() handler.
+ * Dispatch to tunnel or session helpers depending on the socket.
+ */
+static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct pppol2tp_session *session;
+ struct pppol2tp_tunnel *tunnel;
+ int err;
+
+ if (!sk)
+ return 0;
+
+ err = -EBADF;
+ if (sock_flag(sk, SOCK_DEAD) != 0)
+ goto end;
+
+ err = -ENOTCONN;
+ if ((sk->sk_user_data == NULL) ||
+ (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session's session_id is zero, treat ioctl as a
+ * tunnel ioctl
+ */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ err = -EBADF;
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto end;
+
+ err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
+ goto end;
+ }
+
+ err = pppol2tp_session_ioctl(session, cmd, arg);
+
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * setsockopt() / getsockopt() support.
+ *
+ * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
+ * sockets. In order to control kernel tunnel features, we allow userspace to
+ * create a special "tunnel" PPPoX socket which is used for control only.
+ * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
+ * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
+ *****************************************************************************/
+
+/* Tunnel setsockopt() helper.
+ */
+static int pppol2tp_tunnel_setsockopt(struct sock *sk,
+ struct pppol2tp_tunnel *tunnel,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ tunnel->debug = val;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session setsockopt helper.
+ */
+static int pppol2tp_session_setsockopt(struct sock *sk,
+ struct pppol2tp_session *session,
+ int optname, int val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->recv_seq = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set recv_seq=%d\n", session->name,
+ session->recv_seq);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->send_seq = val ? -1 : 0;
+ {
+ struct sock *ssk = session->sock;
+ struct pppox_sock *po = pppox_sk(ssk);
+ po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
+ PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
+ }
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set send_seq=%d\n", session->name, session->send_seq);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ if ((val != 0) && (val != 1)) {
+ err = -EINVAL;
+ break;
+ }
+ session->lns_mode = val ? -1 : 0;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set lns_mode=%d\n", session->name,
+ session->lns_mode);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ session->debug = val;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set debug=%x\n", session->name, session->debug);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ session->reorder_timeout = msecs_to_jiffies(val);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: set reorder_timeout=%d\n", session->name,
+ session->reorder_timeout);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Main setsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session setsockopt
+ * handler, according to whether the PPPoL2TP socket is a for a regular
+ * session or the special tunnel type.
+ */
+static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct pppol2tp_session *session = sk->sk_user_data;
+ struct pppol2tp_tunnel *tunnel;
+ int val;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get session context from the socket */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel
+ */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ err = -EBADF;
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto end;
+
+ err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
+ } else
+ err = pppol2tp_session_setsockopt(sk, session, optname, val);
+
+ err = 0;
+
+end:
+ return err;
+}
+
+/* Tunnel getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_tunnel_getsockopt(struct sock *sk,
+ struct pppol2tp_tunnel *tunnel,
+ int optname, int __user *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_DEBUG:
+ *val = tunnel->debug;
+ PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%x\n", tunnel->name, tunnel->debug);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ return err;
+}
+
+/* Session getsockopt helper. Called with sock locked.
+ */
+static int pppol2tp_session_getsockopt(struct sock *sk,
+ struct pppol2tp_session *session,
+ int optname, int __user *val)
+{
+ int err = 0;
+
+ switch (optname) {
+ case PPPOL2TP_SO_RECVSEQ:
+ *val = session->recv_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get recv_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_SENDSEQ:
+ *val = session->send_seq;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get send_seq=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_LNSMODE:
+ *val = session->lns_mode;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get lns_mode=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_DEBUG:
+ *val = session->debug;
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get debug=%d\n", session->name, *val);
+ break;
+
+ case PPPOL2TP_SO_REORDERTO:
+ *val = (int) jiffies_to_msecs(session->reorder_timeout);
+ PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
+ "%s: get reorder_timeout=%d\n", session->name, *val);
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ }
+
+ return err;
+}
+
+/* Main getsockopt() entry point.
+ * Does API checks, then calls either the tunnel or session getsockopt
+ * handler, according to whether the PPPoX socket is a for a regular session
+ * or the special tunnel type.
+ */
+static int pppol2tp_getsockopt(struct socket *sock, int level,
+ int optname, char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct pppol2tp_session *session = sk->sk_user_data;
+ struct pppol2tp_tunnel *tunnel;
+ int val, len;
+ int err;
+
+ if (level != SOL_PPPOL2TP)
+ return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+
+ if (get_user(len, (int __user *) optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ if (len < 0)
+ return -EINVAL;
+
+ err = -ENOTCONN;
+ if (sk->sk_user_data == NULL)
+ goto end;
+
+ /* Get the session context */
+ err = -EBADF;
+ session = pppol2tp_sock_to_session(sk);
+ if (session == NULL)
+ goto end;
+
+ /* Special case: if session_id == 0x0000, treat as operation on tunnel */
+ if ((session->tunnel_addr.s_session == 0) &&
+ (session->tunnel_addr.d_session == 0)) {
+ err = -EBADF;
+ tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
+ if (tunnel == NULL)
+ goto end;
+
+ err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
+ } else
+ err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+
+ err = -EFAULT;
+ if (put_user(len, (int __user *) optlen))
+ goto end;
+
+ if (copy_to_user((void __user *) optval, &val, len))
+ goto end;
+
+ err = 0;
+end:
+ return err;
+}
+
+/*****************************************************************************
+ * /proc filesystem for debug
+ *****************************************************************************/
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+
+struct pppol2tp_seq_data {
+ struct pppol2tp_tunnel *tunnel; /* current tunnel */
+ struct pppol2tp_session *session; /* NULL means get first session in tunnel */
+};
+
+static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
+{
+ struct pppol2tp_session *session = NULL;
+ struct hlist_node *walk;
+ int found = 0;
+ int next = 0;
+ int i;
+
+ read_lock(&tunnel->hlist_lock);
+ for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
+ hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
+ if (curr == NULL) {
+ found = 1;
+ goto out;
+ }
+ if (session == curr) {
+ next = 1;
+ continue;
+ }
+ if (next) {
+ found = 1;
+ goto out;
+ }
+ }
+ }
+out:
+ read_unlock(&tunnel->hlist_lock);
+ if (!found)
+ session = NULL;
+
+ return session;
+}
+
+static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
+{
+ struct pppol2tp_tunnel *tunnel = NULL;
+
+ read_lock(&pppol2tp_tunnel_list_lock);
+ if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
+ goto out;
+ }
+ tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
+out:
+ read_unlock(&pppol2tp_tunnel_list_lock);
+
+ return tunnel;
+}
+
+static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
+{
+ struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
+ loff_t pos = *offs;
+
+ if (!pos)
+ goto out;
+
+ BUG_ON(m->private == NULL);
+ pd = m->private;
+
+ if (pd->tunnel == NULL) {
+ if (!list_empty(&pppol2tp_tunnel_list))
+ pd->tunnel = list_entry(pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
+ } else {
+ pd->session = next_session(pd->tunnel, pd->session);
+ if (pd->session == NULL) {
+ pd->tunnel = next_tunnel(pd->tunnel);
+ }
+ }
+
+ /* NULL tunnel and session indicates end of list */
+ if ((pd->tunnel == NULL) && (pd->session == NULL))
+ pd = NULL;
+
+out:
+ return pd;
+}
+
+static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return NULL;
+}
+
+static void pppol2tp_seq_stop(struct seq_file *p, void *v)
+{
+ /* nothing to do */
+}
+
+static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_tunnel *tunnel = v;
+
+ seq_printf(m, "\nTUNNEL '%s', %c %d\n",
+ tunnel->name,
+ (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
+ atomic_read(&tunnel->ref_count) - 1);
+ seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
+ tunnel->debug,
+ tunnel->stats.tx_packets, tunnel->stats.tx_bytes,
+ tunnel->stats.tx_errors,
+ tunnel->stats.rx_packets, tunnel->stats.rx_bytes,
+ tunnel->stats.rx_errors);
+}
+
+static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_session *session = v;
+
+ seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
+ "%04X/%04X %d %c\n",
+ session->name,
+ ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
+ ntohs(session->tunnel_addr.addr.sin_port),
+ session->tunnel_addr.s_tunnel,
+ session->tunnel_addr.s_session,
+ session->tunnel_addr.d_tunnel,
+ session->tunnel_addr.d_session,
+ session->sock->sk_state,
+ (session == session->sock->sk_user_data) ?
+ 'Y' : 'N');
+ seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
+ session->mtu, session->mru,
+ session->recv_seq ? 'R' : '-',
+ session->send_seq ? 'S' : '-',
+ session->lns_mode ? "LNS" : "LAC",
+ session->debug,
+ jiffies_to_msecs(session->reorder_timeout));
+ seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
+ session->nr, session->ns,
+ session->stats.tx_packets,
+ session->stats.tx_bytes,
+ session->stats.tx_errors,
+ session->stats.rx_packets,
+ session->stats.rx_bytes,
+ session->stats.rx_errors);
+}
+
+static int pppol2tp_seq_show(struct seq_file *m, void *v)
+{
+ struct pppol2tp_seq_data *pd = v;
+
+ /* display header on line 1 */
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
+ seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
+ seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ seq_puts(m, " SESSION name, addr/port src-tid/sid "
+ "dest-tid/sid state user-data-ok\n");
+ seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
+ seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
+ goto out;
+ }
+
+ /* Show the tunnel or session context.
+ */
+ if (pd->session == NULL)
+ pppol2tp_seq_tunnel_show(m, pd->tunnel);
+ else
+ pppol2tp_seq_session_show(m, pd->session);
+
+out:
+ return 0;
+}
+
+static struct seq_operations pppol2tp_seq_ops = {
+ .start = pppol2tp_seq_start,
+ .next = pppol2tp_seq_next,
+ .stop = pppol2tp_seq_stop,
+ .show = pppol2tp_seq_show,
+};
+
+/* Called when our /proc file is opened. We allocate data for use when
+ * iterating our tunnel / session contexts and store it in the private
+ * data of the seq_file.
+ */
+static int pppol2tp_proc_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ struct pppol2tp_seq_data *pd;
+ int ret = 0;
+
+ ret = seq_open(file, &pppol2tp_seq_ops);
+ if (ret < 0)
+ goto out;
+
+ m = file->private_data;
+
+ /* Allocate and fill our proc_data for access later */
+ ret = -ENOMEM;
+ m->private = kzalloc(sizeof(struct pppol2tp_seq_data), GFP_KERNEL);
+ if (m->private == NULL)
+ goto out;
+
+ pd = m->private;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/* Called when /proc file access completes.
+ */
+static int pppol2tp_proc_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = (struct seq_file *)file->private_data;
+
+ kfree(m->private);
+ m->private = NULL;
+
+ return seq_release(inode, file);
+}
+
+static struct file_operations pppol2tp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pppol2tp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = pppol2tp_proc_release,
+};
+
+static struct proc_dir_entry *pppol2tp_proc;
+
+#endif /* CONFIG_PROC_FS */
+
+/*****************************************************************************
+ * Init and cleanup
+ *****************************************************************************/
+
+static struct proto_ops pppol2tp_ops = {
+ .family = AF_PPPOX,
+ .owner = THIS_MODULE,
+ .release = pppol2tp_release,
+ .bind = sock_no_bind,
+ .connect = pppol2tp_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pppol2tp_getname,
+ .poll = datagram_poll,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = pppol2tp_setsockopt,
+ .getsockopt = pppol2tp_getsockopt,
+ .sendmsg = pppol2tp_sendmsg,
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
+};
+
+static struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+ .ioctl = pppol2tp_ioctl
+};
+
+static int __init pppol2tp_init(void)
+{
+ int err;
+
+ err = proto_register(&pppol2tp_sk_proto, 0);
+ if (err)
+ goto out;
+ err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
+ if (err)
+ goto out_unregister_pppol2tp_proto;
+
+#ifdef CONFIG_PROC_FS
+ pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net);
+ if (!pppol2tp_proc) {
+ err = -ENOMEM;
+ goto out_unregister_pppox_proto;
+ }
+ pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
+#endif /* CONFIG_PROC_FS */
+ printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
+ PPPOL2TP_DRV_VERSION);
+
+out:
+ return err;
+
+out_unregister_pppox_proto:
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+out_unregister_pppol2tp_proto:
+ proto_unregister(&pppol2tp_sk_proto);
+ goto out;
+}
+
+static void __exit pppol2tp_exit(void)
+{
+ unregister_pppox_proto(PX_PROTO_OL2TP);
+
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("pppol2tp", proc_net);
+#endif
+ proto_unregister(&pppol2tp_sk_proto);
+}
+
+module_init(pppol2tp_init);
+module_exit(pppol2tp_exit);
+
+MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>,"
+ "James Chapman <jchapman@katalix.com>");
+MODULE_DESCRIPTION("PPP over L2TP over UDP");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 982a9010c7a9..bb6896ae3151 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2338,7 +2338,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
{
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag, entry;
- struct TxDesc *txd;
+ struct TxDesc * uninitialized_var(txd);
entry = tp->cur_tx;
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index fa29a403a247..afef6c0c59fe 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -796,12 +796,14 @@ static void free_shared_mem(struct s2io_nic *nic)
struct mac_info *mac_control;
struct config_param *config;
int lst_size, lst_per_page;
- struct net_device *dev = nic->dev;
+ struct net_device *dev;
int page_num = 0;
if (!nic)
return;
+ dev = nic->dev;
+
mac_control = &nic->mac_control;
config = &nic->config;
@@ -1135,7 +1137,7 @@ static int init_nic(struct s2io_nic *nic)
* SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
*/
if ((nic->device_type == XFRAME_I_DEVICE) &&
- (get_xena_rev_id(nic->pdev) < 4))
+ (nic->pdev->revision < 4))
writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
val64 = readq(&bar0->tx_fifo_partition_0);
@@ -1873,7 +1875,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
herc = (sp->device_type == XFRAME_II_DEVICE);
if (flag == FALSE) {
- if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
+ if ((!herc && (sp->pdev->revision >= 4)) || herc) {
if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
ret = 1;
} else {
@@ -1881,7 +1883,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
ret = 1;
}
} else {
- if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
+ if ((!herc && (sp->pdev->revision >= 4)) || herc) {
if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
ADAPTER_STATUS_RMAC_PCC_IDLE))
ret = 1;
@@ -7076,23 +7078,6 @@ static void s2io_link(struct s2io_nic * sp, int link)
}
/**
- * get_xena_rev_id - to identify revision ID of xena.
- * @pdev : PCI Dev structure
- * Description:
- * Function to identify the Revision ID of xena.
- * Return value:
- * returns the revision ID of the device.
- */
-
-static int get_xena_rev_id(struct pci_dev *pdev)
-{
- u8 id = 0;
- int ret;
- ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
- return id;
-}
-
-/**
* s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
@@ -7550,7 +7535,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
s2io_vpd_read(sp);
DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
- sp->product_name, get_xena_rev_id(sp->pdev));
+ sp->product_name, pdev->revision);
DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
s2io_driver_version);
DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 58592780f519..3887fe63a908 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1033,7 +1033,6 @@ static void s2io_set_link(struct work_struct *work);
static int s2io_set_swapper(struct s2io_nic * sp);
static void s2io_card_down(struct s2io_nic *nic);
static int s2io_card_up(struct s2io_nic *nic);
-static int get_xena_rev_id(struct pci_dev *pdev);
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
int bit_state);
static int s2io_add_isr(struct s2io_nic * sp);
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index ad94358ece89..7dae4d404978 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -690,9 +690,9 @@ static int lan_saa9730_rx(struct net_device *dev)
lp->stats.rx_packets++;
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *) pData,
- len, 0);
+ len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
@@ -940,15 +940,14 @@ static void lan_saa9730_set_multicast(struct net_device *dev)
CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC,
&lp->lan_saa9730_regs->CamCtl);
} else {
- if (dev->flags & IFF_ALLMULTI) {
+ if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
/* accept all multicast packets */
- writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
- CAM_CONTROL_BROAD_ACC,
- &lp->lan_saa9730_regs->CamCtl);
- } else {
/*
* Will handle the multicast stuff later. -carstenl
*/
+ writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC |
+ CAM_CONTROL_BROAD_ACC,
+ &lp->lan_saa9730_regs->CamCtl);
}
}
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 2106becf6990..384b4685e977 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -320,7 +320,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
skb_put(skb, len);
/* Copy out of kseg1 to avoid silly cache flush. */
- eth_copy_and_sum(skb, pkt_pointer + 2, len, 0);
+ skb_copy_to_linear_data(skb, pkt_pointer + 2, len);
skb->protocol = eth_type_trans(skb, dev);
/* We don't want to receive our own packets */
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
index e886e8d7cfdf..4c3d98ff4cd4 100644
--- a/drivers/net/shaper.c
+++ b/drivers/net/shaper.c
@@ -600,10 +600,9 @@ static int __init shaper_init(void)
return -ENODEV;
alloc_size = sizeof(*dev) * shapers;
- devs = kmalloc(alloc_size, GFP_KERNEL);
+ devs = kzalloc(alloc_size, GFP_KERNEL);
if (!devs)
return -ENOMEM;
- memset(devs, 0, alloc_size);
for (i = 0; i < shapers; i++) {
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index bc8de48da313..ec2ad9f0efa2 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -548,7 +548,7 @@ static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
if (skb) {
skb_reserve(skb, NET_IP_ALIGN);
- eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+ skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
*sk_buff = skb;
sis190_give_to_asic(desc, rx_buf_sz);
ret = 0;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index a2f32151559e..13f08a390e1f 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -692,6 +692,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
{
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
u16 reg;
+ u32 rx_reg;
int i;
const u8 *addr = hw->dev[port]->dev_addr;
@@ -768,11 +769,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
/* Configure Rx MAC FIFO */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
- reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
+ rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
if (hw->chip_id == CHIP_ID_YUKON_EX)
- reg |= GMF_RX_OVER_ON;
+ rx_reg |= GMF_RX_OVER_ON;
- sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
/* Flush Rx MAC FIFO on any flow control or error */
sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 786d4b9c07ec..8b6478663a56 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -740,7 +740,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
pci_set_master(pdev);
/* enable MWI -- it vastly improves Rx performance on sparc64 */
- pci_set_mwi(pdev);
+ pci_try_set_mwi(pdev);
#ifdef ZEROCOPY
/* Starfire can do TCP/UDP checksumming */
@@ -1456,7 +1456,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
pci_dma_sync_single_for_cpu(np->pci_dev,
np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index a123ea87893b..b77ab6e8fd35 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -777,7 +777,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
{
skb_reserve(skb,2);
skb_put(skb,totlen);
- eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
+ skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
p->stats.rx_packets++;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 791e081fdc15..f1548c033327 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -853,10 +853,9 @@ static int lance_rx( struct net_device *dev )
skb_reserve( skb, 2 ); /* 16 byte align */
skb_put( skb, pkt_len ); /* Make room */
-// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
PKTBUF_ADDR(head),
- pkt_len, 0);
+ pkt_len);
skb->protocol = eth_type_trans( skb, dev );
netif_rx( skb );
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 2ad8d58dee3b..b3e0158def4f 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -860,7 +860,7 @@ static void bigmac_rx(struct bigmac *bp)
sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
- eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);
+ skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
sbus_dma_sync_single_for_device(bp->bigmac_sdev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e1f912d04043..af0c9831074c 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -397,7 +397,6 @@ struct netdev_private {
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
struct pci_dev *pci_dev;
void __iomem *base;
- unsigned char pci_rev_id;
};
/* The station address location in the EEPROM. */
@@ -544,8 +543,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
dev->change_mtu = &change_mtu;
pci_set_drvdata(pdev, dev);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
-
i = register_netdev(dev);
if (i)
goto err_out_unmap_rx;
@@ -828,7 +825,7 @@ static int netdev_open(struct net_device *dev)
iowrite8(100, ioaddr + RxDMAPollPeriod);
iowrite8(127, ioaddr + TxDMAPollPeriod);
/* Fix DFE-580TX packet drop issue */
- if (np->pci_rev_id >= 0x14)
+ if (np->pci_dev->revision >= 0x14)
iowrite8(0x01, ioaddr + DebugCtrl1);
netif_start_queue(dev);
@@ -1194,7 +1191,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
hw_frame_id = ioread8(ioaddr + TxFrameId);
}
- if (np->pci_rev_id >= 0x14) {
+ if (np->pci_dev->revision >= 0x14) {
spin_lock(&np->lock);
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
@@ -1313,7 +1310,7 @@ static void rx_poll(unsigned long data)
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
desc->frag[0].addr,
np->rx_buf_sz,
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 15146a119230..8b35f13318ea 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3095,12 +3095,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
#ifdef CONFIG_SPARC
hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
- if (hp->hm_revision == 0xff) {
- unsigned char prev;
-
- pci_read_config_byte(pdev, PCI_REVISION_ID, &prev);
- hp->hm_revision = 0xc0 | (prev & 0x0f);
- }
+ if (hp->hm_revision == 0xff)
+ hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
#else
/* works with this on non-sparc hosts */
hp->hm_revision = 0x20;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 42722530ab24..053b7cb0d944 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -549,9 +549,9 @@ static void lance_rx_dvma(struct net_device *dev)
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [entry][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index fa70e0b78af7..1b65ae8a1c7c 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -439,8 +439,8 @@ static void qe_rx(struct sunqe *qep)
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
- len, 0);
+ skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
qep->dev->last_rx = jiffies;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
new file mode 100644
index 000000000000..61f98251feab
--- /dev/null
+++ b/drivers/net/sunvnet.c
@@ -0,0 +1,1295 @@
+/* sunvnet.c: Sun LDOM Virtual Network Driver.
+ *
+ * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+#include "sunvnet.h"
+
+#define DRV_MODULE_NAME "sunvnet"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "June 25, 2007"
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual network driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* Ordered from largest major to lowest */
+static struct vio_version vnet_versions[] = {
+ { .major = 1, .minor = 0 },
+};
+
+static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
+{
+ return vio_dring_avail(dr, VNET_TX_RING_SIZE);
+}
+
+static int vnet_handle_unknown(struct vnet_port *port, void *arg)
+{
+ struct vio_msg_tag *pkt = arg;
+
+ printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
+ printk(KERN_ERR PFX "Resetting connection.\n");
+
+ ldc_disconnect(port->vio.lp);
+
+ return -ECONNRESET;
+}
+
+static int vnet_send_attr(struct vio_driver_state *vio)
+{
+ struct vnet_port *port = to_vnet_port(vio);
+ struct net_device *dev = port->vp->dev;
+ struct vio_net_attr_info pkt;
+ int i;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.tag.type = VIO_TYPE_CTRL;
+ pkt.tag.stype = VIO_SUBTYPE_INFO;
+ pkt.tag.stype_env = VIO_ATTR_INFO;
+ pkt.tag.sid = vio_send_sid(vio);
+ pkt.xfer_mode = VIO_DRING_MODE;
+ pkt.addr_type = VNET_ADDR_ETHERMAC;
+ pkt.ack_freq = 0;
+ for (i = 0; i < 6; i++)
+ pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
+ pkt.mtu = ETH_FRAME_LEN;
+
+ viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] mtu[%llu]\n",
+ pkt.xfer_mode, pkt.addr_type,
+ (unsigned long long) pkt.addr,
+ pkt.ack_freq,
+ (unsigned long long) pkt.mtu);
+
+ return vio_ldc_send(vio, &pkt, sizeof(pkt));
+}
+
+static int handle_attr_info(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] mtu[%llu]\n",
+ pkt->xfer_mode, pkt->addr_type,
+ (unsigned long long) pkt->addr,
+ pkt->ack_freq,
+ (unsigned long long) pkt->mtu);
+
+ pkt->tag.sid = vio_send_sid(vio);
+
+ if (pkt->xfer_mode != VIO_DRING_MODE ||
+ pkt->addr_type != VNET_ADDR_ETHERMAC ||
+ pkt->mtu != ETH_FRAME_LEN) {
+ viodbg(HS, "SEND NET ATTR NACK\n");
+
+ pkt->tag.stype = VIO_SUBTYPE_NACK;
+
+ (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
+
+ return -ECONNRESET;
+ } else {
+ viodbg(HS, "SEND NET ATTR ACK\n");
+
+ pkt->tag.stype = VIO_SUBTYPE_ACK;
+
+ return vio_ldc_send(vio, pkt, sizeof(*pkt));
+ }
+
+}
+
+static int handle_attr_ack(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR ACK\n");
+
+ return 0;
+}
+
+static int handle_attr_nack(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR NACK\n");
+
+ return -ECONNRESET;
+}
+
+static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
+{
+ struct vio_net_attr_info *pkt = arg;
+
+ switch (pkt->tag.stype) {
+ case VIO_SUBTYPE_INFO:
+ return handle_attr_info(vio, pkt);
+
+ case VIO_SUBTYPE_ACK:
+ return handle_attr_ack(vio, pkt);
+
+ case VIO_SUBTYPE_NACK:
+ return handle_attr_nack(vio, pkt);
+
+ default:
+ return -ECONNRESET;
+ }
+}
+
+static void vnet_handshake_complete(struct vio_driver_state *vio)
+{
+ struct vio_dring_state *dr;
+
+ dr = &vio->drings[VIO_DRIVER_RX_RING];
+ dr->snd_nxt = dr->rcv_nxt = 1;
+
+ dr = &vio->drings[VIO_DRIVER_TX_RING];
+ dr->snd_nxt = dr->rcv_nxt = 1;
+}
+
+/* The hypervisor interface that implements copying to/from imported
+ * memory from another domain requires that copies are done to 8-byte
+ * aligned buffers, and that the lengths of such copies are also 8-byte
+ * multiples.
+ *
+ * So we align skb->data to an 8-byte multiple and pad-out the data
+ * area so we can round the copy length up to the next multiple of
+ * 8 for the copy.
+ *
+ * The transmitter puts the actual start of the packet 6 bytes into
+ * the buffer it sends over, so that the IP headers after the ethernet
+ * header are aligned properly. These 6 bytes are not in the descriptor
+ * length, they are simply implied. This offset is represented using
+ * the VNET_PACKET_SKIP macro.
+ */
+static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
+ unsigned int len)
+{
+ struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
+ unsigned long addr, off;
+
+ if (unlikely(!skb))
+ return NULL;
+
+ addr = (unsigned long) skb->data;
+ off = ((addr + 7UL) & ~7UL) - addr;
+ if (off)
+ skb_reserve(skb, off);
+
+ return skb;
+}
+
+static int vnet_rx_one(struct vnet_port *port, unsigned int len,
+ struct ldc_trans_cookie *cookies, int ncookies)
+{
+ struct net_device *dev = port->vp->dev;
+ unsigned int copy_len;
+ struct sk_buff *skb;
+ int err;
+
+ err = -EMSGSIZE;
+ if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
+ dev->stats.rx_length_errors++;
+ goto out_dropped;
+ }
+
+ skb = alloc_and_align_skb(dev, len);
+ err = -ENOMEM;
+ if (unlikely(!skb)) {
+ dev->stats.rx_missed_errors++;
+ goto out_dropped;
+ }
+
+ copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
+ skb_put(skb, copy_len);
+ err = ldc_copy(port->vio.lp, LDC_COPY_IN,
+ skb->data, copy_len, 0,
+ cookies, ncookies);
+ if (unlikely(err < 0)) {
+ dev->stats.rx_frame_errors++;
+ goto out_free_skb;
+ }
+
+ skb_pull(skb, VNET_PACKET_SKIP);
+ skb_trim(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+
+ netif_rx(skb);
+
+ return 0;
+
+out_free_skb:
+ kfree_skb(skb);
+
+out_dropped:
+ dev->stats.rx_dropped++;
+ return err;
+}
+
+static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
+ u32 start, u32 end, u8 vio_dring_state)
+{
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_ACK,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = start,
+ .end_idx = end,
+ .state = vio_dring_state,
+ };
+ int err, delay;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ } while (err == -EAGAIN);
+
+ return err;
+}
+
+static u32 next_idx(u32 idx, struct vio_dring_state *dr)
+{
+ if (++idx == dr->num_entries)
+ idx = 0;
+ return idx;
+}
+
+static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
+{
+ if (idx == 0)
+ idx = dr->num_entries - 1;
+ else
+ idx--;
+
+ return idx;
+}
+
+static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ u32 index)
+{
+ struct vio_net_desc *desc = port->vio.desc_buf;
+ int err;
+
+ err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
+ (index * dr->entry_size),
+ dr->cookies, dr->ncookies);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return desc;
+}
+
+static int put_rx_desc(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ struct vio_net_desc *desc,
+ u32 index)
+{
+ int err;
+
+ err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
+ (index * dr->entry_size),
+ dr->cookies, dr->ncookies);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int vnet_walk_rx_one(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ u32 index, int *needs_ack)
+{
+ struct vio_net_desc *desc = get_rx_desc(port, dr, index);
+ struct vio_driver_state *vio = &port->vio;
+ int err;
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%lx:%lx]\n",
+ desc->hdr.state, desc->hdr.ack,
+ desc->size, desc->ncookies,
+ desc->cookies[0].cookie_addr,
+ desc->cookies[0].cookie_size);
+
+ if (desc->hdr.state != VIO_DESC_READY)
+ return 1;
+ err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+ if (err == -ECONNRESET)
+ return err;
+ desc->hdr.state = VIO_DESC_DONE;
+ err = put_rx_desc(port, dr, desc, index);
+ if (err < 0)
+ return err;
+ *needs_ack = desc->hdr.ack;
+ return 0;
+}
+
+static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
+ u32 start, u32 end)
+{
+ struct vio_driver_state *vio = &port->vio;
+ int ack_start = -1, ack_end = -1;
+
+ end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+
+ viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
+
+ while (start != end) {
+ int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
+ if (err == -ECONNRESET)
+ return err;
+ if (err != 0)
+ break;
+ if (ack_start == -1)
+ ack_start = start;
+ ack_end = start;
+ start = next_idx(start, dr);
+ if (ack && start != end) {
+ err = vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_ACTIVE);
+ if (err == -ECONNRESET)
+ return err;
+ ack_start = -1;
+ }
+ }
+ if (unlikely(ack_start == -1))
+ ack_start = ack_end = prev_idx(start, dr);
+ return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+}
+
+static int vnet_rx(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_dring_data *pkt = msgbuf;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
+ struct vio_driver_state *vio = &port->vio;
+
+ viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016lx] rcv_nxt[%016lx]\n",
+ pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
+
+ if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+ return 0;
+ if (unlikely(pkt->seq != dr->rcv_nxt)) {
+ printk(KERN_ERR PFX "RX out of sequence seq[0x%lx] "
+ "rcv_nxt[0x%lx]\n", pkt->seq, dr->rcv_nxt);
+ return 0;
+ }
+
+ dr->rcv_nxt++;
+
+ /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
+
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+}
+
+static int idx_is_pending(struct vio_dring_state *dr, u32 end)
+{
+ u32 idx = dr->cons;
+ int found = 0;
+
+ while (idx != dr->prod) {
+ if (idx == end) {
+ found = 1;
+ break;
+ }
+ idx = next_idx(idx, dr);
+ }
+ return found;
+}
+
+static int vnet_ack(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data *pkt = msgbuf;
+ struct net_device *dev;
+ struct vnet *vp;
+ u32 end;
+
+ if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+ return 0;
+
+ end = pkt->end_idx;
+ if (unlikely(!idx_is_pending(dr, end)))
+ return 0;
+
+ dr->cons = next_idx(end, dr);
+
+ vp = port->vp;
+ dev = vp->dev;
+ if (unlikely(netif_queue_stopped(dev) &&
+ vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
+ return 1;
+
+ return 0;
+}
+
+static int vnet_nack(struct vnet_port *port, void *msgbuf)
+{
+ /* XXX just reset or similar XXX */
+ return 0;
+}
+
+static int handle_mcast(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_net_mcast_info *pkt = msgbuf;
+
+ if (pkt->tag.stype != VIO_SUBTYPE_ACK)
+ printk(KERN_ERR PFX "%s: Got unexpected MCAST reply "
+ "[%02x:%02x:%04x:%08x]\n",
+ port->vp->dev->name,
+ pkt->tag.type,
+ pkt->tag.stype,
+ pkt->tag.stype_env,
+ pkt->tag.sid);
+
+ return 0;
+}
+
+static void maybe_tx_wakeup(struct vnet *vp)
+{
+ struct net_device *dev = vp->dev;
+
+ netif_tx_lock(dev);
+ if (likely(netif_queue_stopped(dev))) {
+ struct vnet_port *port;
+ int wake = 1;
+
+ list_for_each_entry(port, &vp->port_list, list) {
+ struct vio_dring_state *dr;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (vnet_tx_dring_avail(dr) <
+ VNET_TX_WAKEUP_THRESH(dr)) {
+ wake = 0;
+ break;
+ }
+ }
+ if (wake)
+ netif_wake_queue(dev);
+ }
+ netif_tx_unlock(dev);
+}
+
+static void vnet_event(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+ unsigned long flags;
+ int tx_wakeup, err;
+
+ spin_lock_irqsave(&vio->lock, flags);
+
+ if (unlikely(event == LDC_EVENT_RESET ||
+ event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+
+ if (event == LDC_EVENT_RESET)
+ vio_port_up(vio);
+ return;
+ }
+
+ if (unlikely(event != LDC_EVENT_DATA_READY)) {
+ printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
+ spin_unlock_irqrestore(&vio->lock, flags);
+ return;
+ }
+
+ tx_wakeup = err = 0;
+ while (1) {
+ union {
+ struct vio_msg_tag tag;
+ u64 raw[8];
+ } msgbuf;
+
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
+
+ if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
+ if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
+ err = vnet_rx(port, &msgbuf);
+ } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
+ err = vnet_ack(port, &msgbuf);
+ if (err > 0)
+ tx_wakeup |= err;
+ } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
+ err = vnet_nack(port, &msgbuf);
+ }
+ } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
+ if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
+ err = handle_mcast(port, &msgbuf);
+ else
+ err = vio_control_pkt_engine(vio, &msgbuf);
+ if (err)
+ break;
+ } else {
+ err = vnet_handle_unknown(port, &msgbuf);
+ }
+ if (err == -ECONNRESET)
+ break;
+ }
+ spin_unlock(&vio->lock);
+ if (unlikely(tx_wakeup && err != -ECONNRESET))
+ maybe_tx_wakeup(port->vp);
+ local_irq_restore(flags);
+}
+
+static int __vnet_tx_trigger(struct vnet_port *port)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_INFO,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = dr->prod,
+ .end_idx = (u32) -1,
+ };
+ int err, delay;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ } while (err == -EAGAIN);
+
+ return err;
+}
+
+struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+ unsigned int hash = vnet_hashfn(skb->data);
+ struct hlist_head *hp = &vp->port_hash[hash];
+ struct hlist_node *n;
+ struct vnet_port *port;
+
+ hlist_for_each_entry(port, n, hp, hash) {
+ if (!compare_ether_addr(port->raddr, skb->data))
+ return port;
+ }
+ port = NULL;
+ if (!list_empty(&vp->port_list))
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+ return port;
+}
+
+struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+ struct vnet_port *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ ret = __tx_port_find(vp, skb);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ return ret;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port = tx_port_find(vp, skb);
+ struct vio_dring_state *dr;
+ struct vio_net_desc *d;
+ unsigned long flags;
+ unsigned int len;
+ void *tx_buf;
+ int i, err;
+
+ if (unlikely(!port))
+ goto out_dropped;
+
+ spin_lock_irqsave(&port->vio.lock, flags);
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+
+ /* This is a hard error, log it. */
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+ "queue awake!\n", dev->name);
+ dev->stats.tx_errors++;
+ }
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ d = vio_dring_cur(dr);
+
+ tx_buf = port->tx_bufs[dr->prod].buf;
+ skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
+
+ len = skb->len;
+ if (len < ETH_ZLEN) {
+ len = ETH_ZLEN;
+ memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
+ }
+
+ d->hdr.ack = VIO_ACK_ENABLE;
+ d->size = len;
+ d->ncookies = port->tx_bufs[dr->prod].ncookies;
+ for (i = 0; i < d->ncookies; i++)
+ d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ wmb();
+
+ d->hdr.state = VIO_DESC_READY;
+
+ err = __vnet_tx_trigger(port);
+ if (unlikely(err < 0)) {
+ printk(KERN_INFO PFX "%s: TX trigger error %d\n",
+ dev->name, err);
+ d->hdr.state = VIO_DESC_FREE;
+ dev->stats.tx_carrier_errors++;
+ goto out_dropped_unlock;
+ }
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
+ if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+ netif_stop_queue(dev);
+ if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+ dev_kfree_skb(skb);
+
+ dev->trans_start = jiffies;
+ return NETDEV_TX_OK;
+
+out_dropped_unlock:
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+
+out_dropped:
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static void vnet_tx_timeout(struct net_device *dev)
+{
+ /* XXX Implement me XXX */
+}
+
+static int vnet_open(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int vnet_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
+{
+ struct vnet_mcast_entry *m;
+
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (!memcmp(m->addr, addr, ETH_ALEN))
+ return m;
+ }
+ return NULL;
+}
+
+static void __update_mc_list(struct vnet *vp, struct net_device *dev)
+{
+ struct dev_addr_list *p;
+
+ for (p = dev->mc_list; p; p = p->next) {
+ struct vnet_mcast_entry *m;
+
+ m = __vnet_mc_find(vp, p->dmi_addr);
+ if (m) {
+ m->hit = 1;
+ continue;
+ }
+
+ if (!m) {
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ continue;
+ memcpy(m->addr, p->dmi_addr, ETH_ALEN);
+ m->hit = 1;
+
+ m->next = vp->mcast_list;
+ vp->mcast_list = m;
+ }
+ }
+}
+
+static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
+{
+ struct vio_net_mcast_info info;
+ struct vnet_mcast_entry *m, **pp;
+ int n_addrs;
+
+ memset(&info, 0, sizeof(info));
+
+ info.tag.type = VIO_TYPE_CTRL;
+ info.tag.stype = VIO_SUBTYPE_INFO;
+ info.tag.stype_env = VNET_MCAST_INFO;
+ info.tag.sid = vio_send_sid(&port->vio);
+ info.set = 1;
+
+ n_addrs = 0;
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (m->sent)
+ continue;
+ m->sent = 1;
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+
+ (void) vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+
+ info.set = 0;
+
+ n_addrs = 0;
+ pp = &vp->mcast_list;
+ while ((m = *pp) != NULL) {
+ if (m->hit) {
+ m->hit = 0;
+ pp = &m->next;
+ continue;
+ }
+
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+
+ *pp = m->next;
+ kfree(m);
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+}
+
+static void vnet_set_rx_mode(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+ if (port->switch_port) {
+ __update_mc_list(vp, dev);
+ __send_mc_list(vp, port);
+ }
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+
+static int vnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu != ETH_DATA_LEN)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int vnet_set_mac_addr(struct net_device *dev, void *p)
+{
+ return -EINVAL;
+}
+
+static void vnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static u32 vnet_get_msglevel(struct net_device *dev)
+{
+ struct vnet *vp = netdev_priv(dev);
+ return vp->msg_enable;
+}
+
+static void vnet_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct vnet *vp = netdev_priv(dev);
+ vp->msg_enable = value;
+}
+
+static const struct ethtool_ops vnet_ethtool_ops = {
+ .get_drvinfo = vnet_get_drvinfo,
+ .get_msglevel = vnet_get_msglevel,
+ .set_msglevel = vnet_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_perm_addr = ethtool_op_get_perm_addr,
+};
+
+static void vnet_port_free_tx_bufs(struct vnet_port *port)
+{
+ struct vio_dring_state *dr;
+ int i;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ if (dr->base) {
+ ldc_free_exp_dring(port->vio.lp, dr->base,
+ (dr->entry_size * dr->num_entries),
+ dr->cookies, dr->ncookies);
+ dr->base = NULL;
+ dr->entry_size = 0;
+ dr->num_entries = 0;
+ dr->pending = 0;
+ dr->ncookies = 0;
+ }
+
+ for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+ void *buf = port->tx_bufs[i].buf;
+
+ if (!buf)
+ continue;
+
+ ldc_unmap(port->vio.lp,
+ port->tx_bufs[i].cookies,
+ port->tx_bufs[i].ncookies);
+
+ kfree(buf);
+ port->tx_bufs[i].buf = NULL;
+ }
+}
+
+static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
+{
+ struct vio_dring_state *dr;
+ unsigned long len;
+ int i, err, ncookies;
+ void *dring;
+
+ for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+ void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
+ int map_len = (ETH_FRAME_LEN + 7) & ~7;
+
+ err = -ENOMEM;
+ if (!buf) {
+ printk(KERN_ERR "TX buffer allocation failure\n");
+ goto err_out;
+ }
+ err = -EFAULT;
+ if ((unsigned long)buf & (8UL - 1)) {
+ printk(KERN_ERR "TX buffer misaligned\n");
+ kfree(buf);
+ goto err_out;
+ }
+
+ err = ldc_map_single(port->vio.lp, buf, map_len,
+ port->tx_bufs[i].cookies, 2,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (err < 0) {
+ kfree(buf);
+ goto err_out;
+ }
+ port->tx_bufs[i].buf = buf;
+ port->tx_bufs[i].ncookies = err;
+ }
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ len = (VNET_TX_RING_SIZE *
+ (sizeof(struct vio_net_desc) +
+ (sizeof(struct ldc_trans_cookie) * 2)));
+
+ ncookies = VIO_MAX_RING_COOKIES;
+ dring = ldc_alloc_exp_dring(port->vio.lp, len,
+ dr->cookies, &ncookies,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (IS_ERR(dring)) {
+ err = PTR_ERR(dring);
+ goto err_out;
+ }
+
+ dr->base = dring;
+ dr->entry_size = (sizeof(struct vio_net_desc) +
+ (sizeof(struct ldc_trans_cookie) * 2));
+ dr->num_entries = VNET_TX_RING_SIZE;
+ dr->prod = dr->cons = 0;
+ dr->pending = VNET_TX_RING_SIZE;
+ dr->ncookies = ncookies;
+
+ return 0;
+
+err_out:
+ vnet_port_free_tx_bufs(port);
+
+ return err;
+}
+
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+static struct vnet * __devinit vnet_new(const u64 *local_mac)
+{
+ struct net_device *dev;
+ struct vnet *vp;
+ int err, i;
+
+ dev = alloc_etherdev(sizeof(*vp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ vp = netdev_priv(dev);
+
+ spin_lock_init(&vp->lock);
+ vp->dev = dev;
+
+ INIT_LIST_HEAD(&vp->port_list);
+ for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&vp->port_hash[i]);
+ INIT_LIST_HEAD(&vp->list);
+ vp->local_mac = *local_mac;
+
+ dev->open = vnet_open;
+ dev->stop = vnet_close;
+ dev->set_multicast_list = vnet_set_rx_mode;
+ dev->set_mac_address = vnet_set_mac_addr;
+ dev->tx_timeout = vnet_tx_timeout;
+ dev->ethtool_ops = &vnet_ethtool_ops;
+ dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->change_mtu = vnet_change_mtu;
+ dev->hard_start_xmit = vnet_start_xmit;
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ goto err_out_free_dev;
+ }
+
+ printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ list_add(&vp->list, &vnet_list);
+
+ return vp;
+
+err_out_free_dev:
+ free_netdev(dev);
+
+ return ERR_PTR(err);
+}
+
+static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+{
+ struct vnet *iter, *vp;
+
+ mutex_lock(&vnet_list_mutex);
+ vp = NULL;
+ list_for_each_entry(iter, &vnet_list, list) {
+ if (iter->local_mac == *local_mac) {
+ vp = iter;
+ break;
+ }
+ }
+ if (!vp)
+ vp = vnet_new(local_mac);
+ mutex_unlock(&vnet_list_mutex);
+
+ return vp;
+}
+
+static const char *local_mac_prop = "local-mac-address";
+
+static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+ u64 port_node)
+{
+ const u64 *local_mac = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+ u64 target = mdesc_arc_target(hp, a);
+ const char *name;
+
+ name = mdesc_get_property(hp, target, "name", NULL);
+ if (!name || strcmp(name, "network"))
+ continue;
+
+ local_mac = mdesc_get_property(hp, target,
+ local_mac_prop, NULL);
+ if (local_mac)
+ break;
+ }
+ if (!local_mac)
+ return ERR_PTR(-ENODEV);
+
+ return vnet_find_or_create(local_mac);
+}
+
+static struct ldc_channel_config vnet_ldc_cfg = {
+ .event = vnet_event,
+ .mtu = 64,
+ .mode = LDC_MODE_UNRELIABLE,
+};
+
+static struct vio_driver_ops vnet_vio_ops = {
+ .send_attr = vnet_send_attr,
+ .handle_attr = vnet_handle_attr,
+ .handshake_complete = vnet_handshake_complete,
+};
+
+static void print_version(void)
+{
+ static int version_printed;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
+const char *remote_macaddr_prop = "remote-mac-address";
+
+static int __devinit vnet_port_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct mdesc_handle *hp;
+ struct vnet_port *port;
+ unsigned long flags;
+ struct vnet *vp;
+ const u64 *rmac;
+ int len, i, err, switch_port;
+
+ print_version();
+
+ hp = mdesc_grab();
+
+ vp = vnet_find_parent(hp, vdev->mp);
+ if (IS_ERR(vp)) {
+ printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
+ err = PTR_ERR(vp);
+ goto err_out_put_mdesc;
+ }
+
+ rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+ err = -ENODEV;
+ if (!rmac) {
+ printk(KERN_ERR PFX "Port lacks %s property.\n",
+ remote_macaddr_prop);
+ goto err_out_put_mdesc;
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!port) {
+ printk(KERN_ERR PFX "Cannot allocate vnet_port.\n");
+ goto err_out_put_mdesc;
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
+
+ port->vp = vp;
+
+ err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
+ vnet_versions, ARRAY_SIZE(vnet_versions),
+ &vnet_vio_ops, vp->dev->name);
+ if (err)
+ goto err_out_free_port;
+
+ err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
+ if (err)
+ goto err_out_free_port;
+
+ err = vnet_port_alloc_tx_bufs(port);
+ if (err)
+ goto err_out_free_ldc;
+
+ INIT_HLIST_NODE(&port->hash);
+ INIT_LIST_HEAD(&port->list);
+
+ switch_port = 0;
+ if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
+ switch_port = 1;
+ port->switch_port = switch_port;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (switch_port)
+ list_add(&port->list, &vp->port_list);
+ else
+ list_add_tail(&port->list, &vp->port_list);
+ hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ dev_set_drvdata(&vdev->dev, port);
+
+ printk(KERN_INFO "%s: PORT ( remote-mac ", vp->dev->name);
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", port->raddr[i], i == 5 ? ' ' : ':');
+ if (switch_port)
+ printk("switch-port ");
+ printk(")\n");
+
+ vio_port_up(&port->vio);
+
+ mdesc_release(hp);
+
+ return 0;
+
+err_out_free_ldc:
+ vio_ldc_free(&port->vio);
+
+err_out_free_port:
+ kfree(port);
+
+err_out_put_mdesc:
+ mdesc_release(hp);
+ return err;
+}
+
+static int vnet_port_remove(struct vio_dev *vdev)
+{
+ struct vnet_port *port = dev_get_drvdata(&vdev->dev);
+
+ if (port) {
+ struct vnet *vp = port->vp;
+ unsigned long flags;
+
+ del_timer_sync(&port->vio.timer);
+
+ spin_lock_irqsave(&vp->lock, flags);
+ list_del(&port->list);
+ hlist_del(&port->hash);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ vnet_port_free_tx_bufs(port);
+ vio_ldc_free(&port->vio);
+
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
+ }
+ return 0;
+}
+
+static struct vio_device_id vnet_port_match[] = {
+ {
+ .type = "vnet-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(vio, vnet_port_match);
+
+static struct vio_driver vnet_port_driver = {
+ .id_table = vnet_port_match,
+ .probe = vnet_port_probe,
+ .remove = vnet_port_remove,
+ .driver = {
+ .name = "vnet_port",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init vnet_init(void)
+{
+ return vio_register_driver(&vnet_port_driver);
+}
+
+static void __exit vnet_exit(void)
+{
+ vio_unregister_driver(&vnet_port_driver);
+}
+
+module_init(vnet_init);
+module_exit(vnet_exit);
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h
new file mode 100644
index 000000000000..d347a5bf24b0
--- /dev/null
+++ b/drivers/net/sunvnet.h
@@ -0,0 +1,83 @@
+#ifndef _SUNVNET_H
+#define _SUNVNET_H
+
+#define DESC_NCOOKIES(entry_size) \
+ ((entry_size) - sizeof(struct vio_net_desc))
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define VNET_TX_TIMEOUT (5 * HZ)
+
+#define VNET_TX_RING_SIZE 512
+#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
+
+/* VNET packets are sent in buffers with the first 6 bytes skipped
+ * so that after the ethernet header the IPv4/IPv6 headers are aligned
+ * properly.
+ */
+#define VNET_PACKET_SKIP 6
+
+struct vnet_tx_entry {
+ void *buf;
+ unsigned int ncookies;
+ struct ldc_trans_cookie cookies[2];
+};
+
+struct vnet;
+struct vnet_port {
+ struct vio_driver_state vio;
+
+ struct hlist_node hash;
+ u8 raddr[ETH_ALEN];
+ u8 switch_port;
+ u8 __pad;
+
+ struct vnet *vp;
+
+ struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
+
+ struct list_head list;
+};
+
+static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
+{
+ return container_of(vio, struct vnet_port, vio);
+}
+
+#define VNET_PORT_HASH_SIZE 16
+#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
+
+static inline unsigned int vnet_hashfn(u8 *mac)
+{
+ unsigned int val = mac[4] ^ mac[5];
+
+ return val & (VNET_PORT_HASH_MASK);
+}
+
+struct vnet_mcast_entry {
+ u8 addr[ETH_ALEN];
+ u8 sent;
+ u8 hit;
+ struct vnet_mcast_entry *next;
+};
+
+struct vnet {
+ /* Protects port_list and port_hash. */
+ spinlock_t lock;
+
+ struct net_device *dev;
+
+ u32 msg_enable;
+
+ struct list_head port_list;
+
+ struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
+
+ struct vnet_mcast_entry *mcast_list;
+
+ struct list_head list;
+ u64 local_mac;
+};
+
+#endif /* _SUNVNET_H */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75655add3f34..7f94ca930988 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -626,7 +626,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
return -ENODEV;
}
#else
-static int __devinit tc35815_read_plat_dev_addr(struct device *dev)
+static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
{
return -ENODEV;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 2f3184184ad9..887b9a5cfe48 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -64,8 +64,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.77"
-#define DRV_MODULE_RELDATE "May 31, 2007"
+#define DRV_MODULE_VERSION "3.79"
+#define DRV_MODULE_RELDATE "July 18, 2007"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -721,6 +721,44 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+{
+ u32 phy;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 ephy;
+
+ if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
+ tg3_writephy(tp, MII_TG3_EPHY_TEST,
+ ephy | MII_TG3_EPHY_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
+ if (enable)
+ phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ else
+ phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
+ }
+ tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
+ }
+ } else {
+ phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
+ MII_TG3_AUXCTL_SHDWSEL_MISC;
+ if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
+ if (enable)
+ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ else
+ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ phy |= MII_TG3_AUXCTL_MISC_WREN;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
+ }
+ }
+}
+
static void tg3_phy_set_wirespeed(struct tg3 *tp)
{
u32 val;
@@ -1045,23 +1083,11 @@ out:
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 phy_reg;
-
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
-
- if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
- u32 phy_reg2;
-
- tg3_writephy(tp, MII_TG3_EPHY_TEST,
- phy_reg | MII_TG3_EPHY_SHADOW_EN);
- /* Enable auto-MDIX */
- if (!tg3_readphy(tp, 0x10, &phy_reg2))
- tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
- tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
- }
}
+ tg3_phy_toggle_automdix(tp, 1);
tg3_phy_set_wirespeed(tp);
return 0;
}
@@ -1162,6 +1188,19 @@ static void tg3_frob_aux_power(struct tg3 *tp)
}
}
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+ return 1;
+ else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
+ if (speed != SPEED_10)
+ return 1;
+ } else if (speed == SPEED_10)
+ return 1;
+
+ return 0;
+}
+
static int tg3_setup_phy(struct tg3 *, int);
#define RESET_KIND_SHUTDOWN 0
@@ -1320,9 +1359,17 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
else
mac_mode = MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
- !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
- mac_mode |= MAC_MODE_LINK_POLARITY;
+ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700) {
+ u32 speed = (tp->tg3_flags &
+ TG3_FLAG_WOL_SPEED_100MB) ?
+ SPEED_100 : SPEED_10;
+ if (tg3_5700_link_polarity(tp, speed))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
} else {
mac_mode = MAC_MODE_PORT_MODE_TBI;
}
@@ -1990,15 +2037,12 @@ relink:
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
- if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
- (current_link_up == 1 &&
- tp->link_config.active_speed == SPEED_10))
- tp->mac_mode |= MAC_MODE_LINK_POLARITY;
- } else {
- if (current_link_up == 1)
+ if (current_link_up == 1 &&
+ tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
/* ??? Without this setting Netgear GA302T PHY does not
@@ -2639,6 +2683,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
}
out:
@@ -2698,10 +2745,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
else
current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
- tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
- tw32_f(MAC_MODE, tp->mac_mode);
- udelay(40);
-
tp->hw_status->status =
(SD_STATUS_UPDATED |
(tp->hw_status->status & ~SD_STATUS_LINK_CHG));
@@ -3512,9 +3555,9 @@ static inline int tg3_irq_sync(struct tg3 *tp)
*/
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
{
+ spin_lock_bh(&tp->lock);
if (irq_sync)
tg3_irq_quiesce(tp);
- spin_lock_bh(&tp->lock);
}
static inline void tg3_full_unlock(struct tg3 *tp)
@@ -4804,6 +4847,59 @@ static int tg3_poll_fw(struct tg3 *tp)
return 0;
}
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ pci_read_config_dword(tp->pdev, TG3PCI_COMMAND, &val);
+ tp->pci_cmd = val;
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ /* Re-enable indirect register accesses. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ /* Set MAX PCI retry to zero. */
+ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+ val |= PCISTATE_RETRY_SAME_DMA;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+ pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd);
+
+ /* Make sure PCI-X relaxed ordering bit is clear. */
+ pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
+ val &= ~PCIX_CAPS_RELAXED_ORDERING;
+ pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
+
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+ u32 val;
+
+ /* Chip reset on 5780 will reset MSI enable bit,
+ * so need to restore it.
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ u16 ctrl;
+
+ pci_read_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ &ctrl);
+ pci_write_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ ctrl | PCI_MSI_FLAGS_ENABLE);
+ val = tr32(MSGINT_MODE);
+ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+ }
+ }
+}
+
static void tg3_stop_fw(struct tg3 *);
/* tp->lock is held. */
@@ -4820,6 +4916,12 @@ static int tg3_chip_reset(struct tg3 *tp)
*/
tp->nvram_lock_cnt = 0;
+ /* GRC_MISC_CFG core clock reset will clear the memory
+ * enable bit in PCI register 4 and the MSI enable bit
+ * on some chips, so we save relevant registers here.
+ */
+ tg3_save_pci_state(tp);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -4918,50 +5020,14 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
}
- /* Re-enable indirect register accesses. */
- pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
-
- /* Set MAX PCI retry to zero. */
- val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
- val |= PCISTATE_RETRY_SAME_DMA;
- pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
-
- pci_restore_state(tp->pdev);
+ tg3_restore_pci_state(tp);
tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
- /* Make sure PCI-X relaxed ordering bit is clear. */
- pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
- val &= ~PCIX_CAPS_RELAXED_ORDERING;
- pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
-
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
- u32 val;
-
- /* Chip reset on 5780 will reset MSI enable bit,
- * so need to restore it.
- */
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- u16 ctrl;
-
- pci_read_config_word(tp->pdev,
- tp->msi_cap + PCI_MSI_FLAGS,
- &ctrl);
- pci_write_config_word(tp->pdev,
- tp->msi_cap + PCI_MSI_FLAGS,
- ctrl | PCI_MSI_FLAGS_ENABLE);
- val = tr32(MSGINT_MODE);
- tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
- }
-
+ val = 0;
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
val = tr32(MEMARB_MODE);
- tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
-
- } else
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
@@ -6444,6 +6510,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
@@ -8271,7 +8341,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
- ethtool_op_set_tx_hw_csum(dev, data);
+ ethtool_op_set_tx_ipv6_csum(dev, data);
else
ethtool_op_set_tx_csum(dev, data);
@@ -8805,7 +8875,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
return 0;
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
+ MAC_MODE_PORT_INT_LPBACK;
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else
@@ -8824,19 +8896,18 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
phytest | MII_TG3_EPHY_SHADOW_EN);
if (!tg3_readphy(tp, 0x1b, &phy))
tg3_writephy(tp, 0x1b, phy & ~0x20);
- if (!tg3_readphy(tp, 0x10, &phy))
- tg3_writephy(tp, 0x10, phy & ~0x4000);
tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
}
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
} else
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ tg3_phy_toggle_automdix(tp, 0);
+
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_LINK_POLARITY;
+ mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -8849,8 +8920,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
udelay(10);
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
- mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
+ mac_mode |= MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
@@ -9116,10 +9190,10 @@ static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
__tg3_set_rx_mode(dev);
- tg3_full_unlock(tp);
-
if (netif_running(dev))
tg3_netif_start(tp);
+
+ tg3_full_unlock(tp);
}
#endif
@@ -9410,11 +9484,13 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
case FLASH_5755VENDOR_ATMEL_FLASH_1:
case FLASH_5755VENDOR_ATMEL_FLASH_2:
case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ case FLASH_5755VENDOR_ATMEL_FLASH_5:
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
tp->tg3_flags2 |= TG3_FLG2_FLASH;
tp->nvram_pagesize = 264;
- if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1)
+ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
tp->nvram_size = (protect ? 0x3e200 : 0x80000);
else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
tp->nvram_size = (protect ? 0x1f200 : 0x40000);
@@ -10498,11 +10574,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
continue;
}
if (pci_id->rev != PCI_ANY_ID) {
- u8 rev;
-
- pci_read_config_byte(bridge, PCI_REVISION_ID,
- &rev);
- if (rev > pci_id->rev)
+ if (bridge->revision > pci_id->rev)
continue;
}
if (bridge->subordinate &&
@@ -11929,7 +12001,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
*/
if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- pci_save_state(tp->pdev);
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
}
@@ -11944,12 +12015,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
- dev->features |= NETIF_F_HW_CSUM;
- else
- dev->features |= NETIF_F_IP_CSUM;
- dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_IPV6_CSUM;
+
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
@@ -11959,12 +12029,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_coal(tp);
- /* Now that we have fully setup the chip, save away a snapshot
- * of the PCI config space. We need to restore this after
- * GRC_MISC_CFG core clock resets and some resume events.
- */
- pci_save_state(tp->pdev);
-
pci_set_drvdata(pdev, dev);
err = register_netdev(dev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bd9f4f428e5b..5c21f49026c9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1467,6 +1467,7 @@
#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003
+#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003
#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003
#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002
#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
@@ -1642,6 +1643,11 @@
#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
+#define MII_TG3_AUXCTL_MISC_WREN 0x8000
+#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
+#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
+
#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
#define MII_TG3_AUX_STAT_LPASS 0x0004
#define MII_TG3_AUX_STAT_SPDMASK 0x0700
@@ -1667,6 +1673,9 @@
#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
#define MII_TG3_EPHY_SHADOW_EN 0x80
+#define MII_TG3_EPHYTST_MISCCTRL 0x10 /* 5906 EPHY misc ctrl shadow register */
+#define MII_TG3_EPHYTST_MISCCTRL_MDIX 0x4000
+
#define MII_TG3_TEST1 0x1e
#define MII_TG3_TEST1_TRIM_EN 0x0010
#define MII_TG3_TEST1_CRC_EN 0x8000
@@ -2336,6 +2345,7 @@ struct tg3 {
#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
u32 led_ctrl;
+ u32 pci_cmd;
char board_part_number[24];
char fw_ver[16];
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 106dc1ef0acb..74eb12107e68 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -533,7 +533,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
struct net_device *dev;
TLanPrivateInfo *priv;
- u8 pci_rev;
u16 device_id;
int reg, rc = -ENODEV;
@@ -577,8 +576,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
goto err_out_free_dev;
}
- pci_read_config_byte ( pdev, PCI_REVISION_ID, &pci_rev);
-
for ( reg= 0; reg <= 5; reg ++ ) {
if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
pci_io_base = pci_resource_start(pdev, reg);
@@ -595,7 +592,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
dev->base_addr = pci_io_base;
dev->irq = pdev->irq;
- priv->adapterRev = pci_rev;
+ priv->adapterRev = pdev->revision;
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 58d7e5d452fa..f83bb5cb0d3d 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3692,7 +3692,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
__u16 rcode, correlator;
int err = 0;
__u8 xframe = 1;
- __u16 tx_fstatus;
rmf->vl = SWAP_BYTES(rmf->vl);
if(rx_status & FCB_RX_STATUS_DA_MATCHED)
@@ -3783,7 +3782,9 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
}
break;
- case TX_FORWARD:
+ case TX_FORWARD: {
+ __u16 uninitialized_var(tx_fstatus);
+
if((rcode = smctr_rcv_tx_forward(dev, rmf))
!= POSITIVE_ACK)
{
@@ -3811,6 +3812,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
}
}
break;
+ }
/* Received MAC Frames Processed by CRS/REM/RPS. */
case RSP:
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 42fca26afc50..09902891a6e6 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -2134,7 +2134,7 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
u_short vendor, status;
u_int irq = 0, device;
u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int i, j, cfrv;
+ int i, j;
struct de4x5_private *lp = netdev_priv(dev);
struct list_head *walk;
@@ -2150,7 +2150,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
/* Get the chip configuration revision register */
pb = this_dev->bus->number;
- pci_read_config_dword(this_dev, PCI_REVISION_ID, &cfrv);
/* Set the device number information */
lp->device = PCI_SLOT(this_dev->devfn);
@@ -2158,7 +2157,8 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
/* Set the chipset information */
if (is_DC2114x) {
- device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
+ device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
+ ? DC21142 : DC21143);
}
lp->chipset = device;
@@ -2254,7 +2254,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
}
/* Get the chip configuration revision register */
- pci_read_config_dword(pdev, PCI_REVISION_ID, &lp->cfrv);
+ lp->cfrv = pdev->revision;
/* Set the device number information */
lp->device = dev_num;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 4ed67ff0e81e..dab74feb44bc 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -181,11 +181,12 @@
udelay(5);
#define __CHK_IO_SIZE(pci_id, dev_rev) \
- (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
+ (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
DM9102A_IO_SIZE: DM9102_IO_SIZE)
-#define CHK_IO_SIZE(pci_dev, dev_rev) \
- (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
+#define CHK_IO_SIZE(pci_dev) \
+ (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
+ (pci_dev)->revision))
/* Sten Check */
#define DEVICE net_device
@@ -205,7 +206,7 @@ struct rx_desc {
struct dmfe_board_info {
u32 chip_id; /* Chip vendor/Device ID */
- u32 chip_revision; /* Chip revision */
+ u8 chip_revision; /* Chip revision */
struct DEVICE *next_dev; /* next device */
struct pci_dev *pdev; /* PCI device */
spinlock_t lock;
@@ -359,7 +360,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
{
struct dmfe_board_info *db; /* board information structure */
struct net_device *dev;
- u32 dev_rev, pci_pmr;
+ u32 pci_pmr;
int i, err;
DMFE_DBUG(0, "dmfe_init_one()", 0);
@@ -392,10 +393,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
goto err_out_disable;
}
- /* Read Chip revision */
- pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
-
- if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
+ if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
@@ -433,7 +431,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
db->chip_id = ent->driver_data;
db->ioaddr = pci_resource_start(pdev, 0);
- db->chip_revision = dev_rev;
+ db->chip_revision = pdev->revision;
db->wol_mode = 0;
db->pdev = pdev;
@@ -455,7 +453,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
pci_read_config_dword(pdev, 0x50, &pci_pmr);
pci_pmr &= 0x70000;
- if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
+ if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
db->chip_type = 1; /* DM9102A E3 */
else
db->chip_type = 0;
@@ -553,7 +551,7 @@ static int dmfe_open(struct DEVICE *dev)
/* CR6 operation mode decision */
if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
- (db->chip_revision >= 0x02000030) ) {
+ (db->chip_revision >= 0x30) ) {
db->cr6_data |= DMFE_TXTH_256;
db->cr0_data = CR0_DEFAULT;
db->dm910x_chk_mode=4; /* Enter the normal mode */
@@ -1199,9 +1197,9 @@ static void dmfe_timer(unsigned long data)
tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
if ( ((db->chip_id == PCI_DM9102_ID) &&
- (db->chip_revision == 0x02000030)) ||
+ (db->chip_revision == 0x30)) ||
((db->chip_id == PCI_DM9132_ID) &&
- (db->chip_revision == 0x02000010)) ) {
+ (db->chip_revision == 0x10)) ) {
/* DM9102A Chip */
if (tmp_cr12 & 2)
link_ok = 0;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index ea896777bcaf..53efd6694e75 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -197,8 +197,8 @@ int tulip_poll(struct net_device *dev, int *budget)
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
@@ -420,8 +420,8 @@ static int tulip_rx(struct net_device *dev)
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 041af63f2811..f87d76981ab7 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1155,7 +1155,7 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
/* set or disable MWI in the standard PCI command bit.
* Check for the case where mwi is desired but not available
*/
- if (csr0 & MWI) pci_set_mwi(pdev);
+ if (csr0 & MWI) pci_try_set_mwi(pdev);
else pci_clear_mwi(pdev);
/* read result from hardware (in case bit refused to enable) */
@@ -1238,7 +1238,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
};
static int last_irq;
static int multiport_cnt; /* For four-port boards w/one EEPROM */
- u8 chip_rev;
int i, irq;
unsigned short sum;
unsigned char *ee_data;
@@ -1274,10 +1273,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
{
- u32 dev_rev;
/* Read Chip revision */
- pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
- if(dev_rev < 0x02000030)
+ if (pdev->revision < 0x30)
{
printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
return -ENODEV;
@@ -1360,8 +1357,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (!ioaddr)
goto err_out_free_res;
- pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
-
/*
* initialize private data structure 'tp'
* it is zeroed and aligned in alloc_etherdev
@@ -1382,7 +1377,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tp->flags = tulip_tbl[chip_idx].flags;
tp->pdev = pdev;
tp->base_addr = ioaddr;
- tp->revision = chip_rev;
+ tp->revision = pdev->revision;
tp->csr0 = csr0;
spin_lock_init(&tp->lock);
spin_lock_init(&tp->mii_lock);
@@ -1399,7 +1394,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
tulip_mwi_config (pdev, dev);
#else
/* MWI is broken for DC21143 rev 65... */
- if (chip_idx == DC21143 && chip_rev == 65)
+ if (chip_idx == DC21143 && pdev->revision == 65)
tp->csr0 &= ~MWI;
#endif
@@ -1640,7 +1635,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
#else
"Port"
#endif
- " %#llx,", dev->name, chip_name, chip_rev,
+ " %#llx,", dev->name, chip_name, pdev->revision,
(unsigned long long) pci_resource_start(pdev, TULIP_BAR));
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 38f3b99716b8..5824f6a35495 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1232,7 +1232,7 @@ static int netdev_rx(struct net_device *dev)
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 2470b1ee33c0..16a54e6b8d4f 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -205,7 +205,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
{
struct net_device *dev = NULL;
struct xircom_private *private;
- unsigned char chip_rev;
unsigned long flags;
unsigned short tmp16;
enter("xircom_probe");
@@ -224,8 +223,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_read_config_word (pdev,PCI_STATUS, &tmp16);
pci_write_config_word (pdev, PCI_STATUS,tmp16);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
-
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
return -ENODEV;
@@ -286,7 +283,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
goto reg_fail;
}
- printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
+ printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
@@ -1208,7 +1205,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
goto out;
}
skb_reserve(skb, 2);
- eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
+ skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index f64172927377..fc439f333350 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -524,7 +524,6 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi
int chip_idx = id->driver_data;
long ioaddr;
int i;
- u8 chip_rev;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -620,9 +619,8 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi
if (register_netdev(dev))
goto err_out_cleardev;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
printk(KERN_INFO "%s: %s rev %d at %#3lx,",
- dev->name, xircom_tbl[chip_idx].chip_name, chip_rev, ioaddr);
+ dev->name, xircom_tbl[chip_idx].chip_name, pdev->revision, ioaddr);
for (i = 0; i < 6; i++)
printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
printk(", IRQ %d.\n", dev->irq);
@@ -1242,8 +1240,8 @@ xircom_rx(struct net_device *dev)
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a2c6caaaae93..62b2b3005019 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -432,6 +432,7 @@ static void tun_setup(struct net_device *dev)
init_waitqueue_head(&tun->read_wait);
tun->owner = -1;
+ tun->group = -1;
SET_MODULE_OWNER(dev);
dev->open = tun_net_open;
@@ -467,8 +468,11 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
return -EBUSY;
/* Check permissions */
- if (tun->owner != -1 &&
- current->euid != tun->owner && !capable(CAP_NET_ADMIN))
+ if (((tun->owner != -1 &&
+ current->euid != tun->owner) ||
+ (tun->group != -1 &&
+ current->egid != tun->group)) &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
}
else if (__dev_get_by_name(ifr->ifr_name))
@@ -610,6 +614,13 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
break;
+ case TUNSETGROUP:
+ /* Set group of the device */
+ tun->group= (gid_t) arg;
+
+ DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
+ break;
+
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 15b2fb8aa492..03587205546e 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1703,7 +1703,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
@@ -2267,12 +2267,6 @@ need_resume:
typhoon_resume(pdev);
return -EBUSY;
}
-
-static int
-typhoon_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable)
-{
- return pci_enable_wake(pdev, state, enable);
-}
#endif
static int __devinit
@@ -2636,7 +2630,6 @@ static struct pci_driver typhoon_driver = {
#ifdef CONFIG_PM
.suspend = typhoon_suspend,
.resume = typhoon_resume,
- .enable_wake = typhoon_enable_wake,
#endif
};
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 86e90c59d551..76752d84a30f 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -255,7 +255,7 @@ static void catc_rx_done(struct urb *urb)
if (!(skb = dev_alloc_skb(pkt_len)))
return;
- eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
+ skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, catc->netdev);
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index bc62b012602b..943988ed01d8 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -305,6 +305,9 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
+ USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
+ .driver_info = (unsigned long) &blob_info,
+}, {
// Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
// e.g. Gumstix, current OpenZaurus, ...
USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 60d29440f316..524dc5f5e46d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -635,7 +635,7 @@ static void kaweth_usb_receive(struct urb *urb)
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
+ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
skb_put(skb, pkt_len);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index adea290a9d5e..f51c2c138f10 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -622,7 +622,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
struct net_device *dev;
struct rhine_private *rp;
int i, rc;
- u8 pci_rev;
u32 quirks;
long pioaddr;
long memaddr;
@@ -642,27 +641,25 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
printk(version);
#endif
- pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
-
io_size = 256;
phy_id = 0;
quirks = 0;
name = "Rhine";
- if (pci_rev < VTunknown0) {
+ if (pdev->revision < VTunknown0) {
quirks = rqRhineI;
io_size = 128;
}
- else if (pci_rev >= VT6102) {
+ else if (pdev->revision >= VT6102) {
quirks = rqWOL | rqForceReset;
- if (pci_rev < VT6105) {
+ if (pdev->revision < VT6105) {
name = "Rhine II";
quirks |= rqStatusWBRace; /* Rhine-II exclusive */
}
else {
phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
- if (pci_rev >= VT6105_B0)
+ if (pdev->revision >= VT6105_B0)
quirks |= rq6patterns;
- if (pci_rev < VT6105M)
+ if (pdev->revision < VT6105M)
name = "Rhine III";
else
name = "Rhine III (Management Adapter)";
@@ -1492,9 +1489,9 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
- pkt_len, 0);
+ pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(rp->pdev,
rp->rx_skbuff_dma[entry],
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b670b97bcfde..f331843d1102 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -890,8 +890,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
{
- if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0)
- return -EIO;
+ vptr->rev_id = pdev->revision;
pci_set_master(pdev);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4fc8681bc110..a3df09ee729f 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -61,7 +61,7 @@ config COSA
#
config LANMEDIA
tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
- depends on PCI
+ depends on PCI && VIRT_TO_BUS
---help---
Driver for the following Lan Media family of serial boards:
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 6b63b350cd52..8ead774d14c8 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -315,12 +315,11 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
return -ENODEV;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "c101: unable to allocate memory\n");
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
card->dev = alloc_hdlcdev(card);
if (!card->dev) {
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 9ef49ce148b2..26058b4f8f36 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -572,13 +572,11 @@ static int cosa_probe(int base, int irq, int dma)
sprintf(cosa->name, "cosa%d", cosa->num);
/* Initialize the per-channel data */
- cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels,
- GFP_KERNEL);
+ cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL);
if (!cosa->chan) {
err = -ENOMEM;
goto err_out3;
}
- memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels);
for (i=0; i<cosa->nchannels; i++) {
cosa->chan[i].cosa = cosa;
cosa->chan[i].num = i;
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 6e5f1c898517..a0e8611ad8e8 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -113,12 +113,10 @@ static int __init cycx_init(void)
/* Verify number of cards and allocate adapter data space */
cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
cycx_ncards = max_t(int, cycx_ncards, 1);
- cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards,
- GFP_KERNEL);
+ cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL);
if (!cycx_card_array)
goto out;
- memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards);
/* Register adapters with WAN router */
for (cnt = 0; cnt < cycx_ncards; ++cnt) {
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 016b3ff3ea5e..a8af28b273d3 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -376,11 +376,10 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
}
/* allocate and initialize private data */
- chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
+ chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
if (!chan)
return -ENOMEM;
- memset(chan, 0, sizeof(*chan));
strcpy(chan->name, conf->name);
chan->card = card;
chan->link = conf->port;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index dca024471455..50d2f9108dca 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -890,12 +890,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
struct dscc4_dev_priv *root;
int i, ret = -ENOMEM;
- root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
+ root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
if (!root) {
printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
goto err_out;
}
- memset(root, 0, dev_per_card*sizeof(*root));
for (i = 0; i < dev_per_card; i++) {
root[i].dev = alloc_hdlcdev(root + i);
@@ -903,12 +902,11 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
goto err_free_dev;
}
- ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL);
+ ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
if (!ppriv) {
printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
goto err_free_dev;
}
- memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
ppriv->root = root;
spin_lock_init(&ppriv->lock);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 58a53b6d9b42..12dae8e24844 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2476,13 +2476,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Allocate driver private data */
- card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL);
+ card = kzalloc(sizeof (struct fst_card_info), GFP_KERNEL);
if (card == NULL) {
printk_err("FarSync card found but insufficient memory for"
" driver storage\n");
return -ENOMEM;
}
- memset(card, 0, sizeof (struct fst_card_info));
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 9ba3e4ee6ec7..bf5f8d9b5c83 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -231,11 +231,10 @@ static struct sv11_device *sv11_init(int iobase, int irq)
return NULL;
}
- sv = kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
+ sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL);
if(!sv)
goto fail3;
- memset(sv, 0, sizeof(*sv));
sv->if_ptr=&sv->netdev;
sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5c322dfb79f6..cbdf0b748bde 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -351,12 +351,11 @@ static int __init n2_run(unsigned long io, unsigned long irq,
return -ENODEV;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "n2: unable to allocate memory\n");
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 999bf71937ca..99fee2f1d019 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -2833,6 +2833,8 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
int br, tc;
int br_pwr, error;
+ *br_io = 0;
+
if (rate == 0)
return (0);
@@ -3439,7 +3441,6 @@ static int __devinit
cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int first_time = 1;
- ucchar cpc_rev_id;
int err, eeprom_outdated = 0;
ucshort device_id;
pc300_t *card;
@@ -3455,7 +3456,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_enable_device(pdev)) < 0)
return err;
- card = kmalloc(sizeof(pc300_t), GFP_KERNEL);
+ card = kzalloc(sizeof(pc300_t), GFP_KERNEL);
if (card == NULL) {
printk("PC300 found at RAM 0x%016llx, "
"but could not allocate card structure.\n",
@@ -3463,7 +3464,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto err_disable_dev;
}
- memset(card, 0, sizeof(pc300_t));
err = -ENODEV;
@@ -3480,7 +3480,6 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->hw.falcsize = pci_resource_len(pdev, 4);
card->hw.plxphys = pci_resource_start(pdev, 5);
card->hw.plxsize = pci_resource_len(pdev, 5);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id);
switch (device_id) {
case PCI_DEVICE_ID_PC300_RX_1:
@@ -3498,7 +3497,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#ifdef PC300_DEBUG_PCI
printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn);
- printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq);
+ printk("rev_id=%d) IRQ%d\n", pdev->revision, card->hw.irq);
printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx "
"ctladdr=0x%08lx falcaddr=0x%08lx\n",
card->hw.ramphys, card->hw.plxphys, card->hw.scaphys,
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index aff05dba720a..6353cb5c658d 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -311,7 +311,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
card_t *card;
- u8 rev_id;
u32 __iomem *p;
int i;
u32 ramsize;
@@ -335,14 +334,13 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
return i;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "pc300: unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
pci_set_drvdata(pdev, card);
if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
@@ -366,7 +364,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
return -ENOMEM;
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ca06a00d9d86..092e51d89036 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -289,7 +289,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
card_t *card;
- u8 rev_id;
u32 __iomem *p;
int i;
u32 ramsize;
@@ -313,14 +312,13 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
return i;
}
- card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "pci200syn: unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
- memset(card, 0, sizeof(card_t));
pci_set_drvdata(pdev, card);
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
@@ -330,7 +328,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
return -ENOMEM;
}
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 35eded7ffb2d..1cc18e787a65 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -595,8 +595,8 @@ recv_frame( struct net_device *dev )
u32 crc = CRC32_INITIAL;
- unsigned framelen, frameno, ack;
- unsigned is_first, frame_ok;
+ unsigned framelen = 0, frameno, ack;
+ unsigned is_first, frame_ok = 0;
if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
frame_ok = framelen > 4
@@ -604,8 +604,7 @@ recv_frame( struct net_device *dev )
: skip_tail( ioaddr, framelen, crc );
if( frame_ok )
interpret_ack( dev, ack );
- } else
- frame_ok = 0;
+ }
outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
if( frame_ok ) {
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 6a485f0556f4..792e588d7d61 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1196,10 +1196,9 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
if (read)
{
- temp = kmalloc(mem.len, GFP_KERNEL);
+ temp = kzalloc(mem.len, GFP_KERNEL);
if (!temp)
return(-ENOMEM);
- memset(temp, 0, mem.len);
sdla_read(dev, mem.addr, temp, mem.len);
if(copy_to_user(mem.data, temp, mem.len))
{
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 131358108c5a..11276bf3149f 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -270,11 +270,10 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
return NULL;
}
- b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL);
+ b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
if(!b)
goto fail3;
- memset(b, 0, sizeof(*b));
if (!(b->dev[0]= slvl_alloc(iobase, irq)))
goto fail2;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index c73601574334..3c78f9856380 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -599,7 +599,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
}
alloc_size = sizeof(card_t) + ports * sizeof(port_t);
- card = kmalloc(alloc_size, GFP_KERNEL);
+ card = kzalloc(alloc_size, GFP_KERNEL);
if (card == NULL) {
printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
pci_name(pdev));
@@ -607,7 +607,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
pci_disable_device(pdev);
return -ENOBUFS;
}
- memset(card, 0, alloc_size);
pci_set_drvdata(pdev, card);
card->pdev = pdev;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 1c9edd97accd..c48b1cc63fd5 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -786,14 +786,12 @@ static int __init init_x25_asy(void)
printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
"(dynamic channels, max=%d).\n", x25_asy_maxdev );
- x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev,
- GFP_KERNEL);
+ x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL);
if (!x25_asy_devs) {
printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
"array! Uaargh! (-> No X.25 available)\n");
return -ENOMEM;
}
- memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev);
return tty_register_ldisc(N_X25, &x25_ldisc);
}
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2d3a180dada0..ee1cc14db389 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -52,6 +52,8 @@
#include "airo.h"
+#define DRV_NAME "airo"
+
#ifdef CONFIG_PCI
static struct pci_device_id card_ids[] = {
{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
@@ -71,7 +73,7 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state);
static int airo_pci_resume(struct pci_dev *pdev);
static struct pci_driver airo_driver = {
- .name = "airo",
+ .name = DRV_NAME,
.id_table = card_ids,
.probe = airo_pci_probe,
.remove = __devexit_p(airo_pci_remove),
@@ -1092,7 +1094,7 @@ static int get_dec_u16( char *buffer, int *start, int limit );
static void OUT4500( struct airo_info *, u16 register, u16 value );
static unsigned short IN4500( struct airo_info *, u16 register );
static u16 setup_card(struct airo_info*, u8 *mac, int lock);
-static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock );
+static int enable_MAC(struct airo_info *ai, int lock);
static void disable_MAC(struct airo_info *ai, int lock);
static void enable_interrupts(struct airo_info*);
static void disable_interrupts(struct airo_info*);
@@ -1250,7 +1252,7 @@ static int flashputbuf(struct airo_info *ai);
static int flashrestart(struct airo_info *ai,struct net_device *dev);
#define airo_print(type, name, fmt, args...) \
- { printk(type "airo(%s): " fmt "\n", name, ##args); }
+ printk(type DRV_NAME "(%s): " fmt "\n", name, ##args)
#define airo_print_info(name, fmt, args...) \
airo_print(KERN_INFO, name, fmt, ##args)
@@ -1926,28 +1928,54 @@ static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock) {
return rc;
}
+static void try_auto_wep(struct airo_info *ai)
+{
+ if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) {
+ ai->expires = RUN_AT(3*HZ);
+ wake_up_interruptible(&ai->thr_wait);
+ }
+}
+
static int airo_open(struct net_device *dev) {
- struct airo_info *info = dev->priv;
- Resp rsp;
+ struct airo_info *ai = dev->priv;
+ int rc = 0;
- if (test_bit(FLAG_FLASHING, &info->flags))
+ if (test_bit(FLAG_FLASHING, &ai->flags))
return -EIO;
/* Make sure the card is configured.
* Wireless Extensions may postpone config changes until the card
* is open (to pipeline changes and speed-up card setup). If
* those changes are not yet commited, do it now - Jean II */
- if (test_bit (FLAG_COMMIT, &info->flags)) {
- disable_MAC(info, 1);
- writeConfigRid(info, 1);
+ if (test_bit(FLAG_COMMIT, &ai->flags)) {
+ disable_MAC(ai, 1);
+ writeConfigRid(ai, 1);
}
- if (info->wifidev != dev) {
+ if (ai->wifidev != dev) {
+ clear_bit(JOB_DIE, &ai->jobs);
+ ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
+ if (IS_ERR(ai->airo_thread_task))
+ return (int)PTR_ERR(ai->airo_thread_task);
+
+ rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (rc) {
+ airo_print_err(dev->name,
+ "register interrupt %d failed, rc %d",
+ dev->irq, rc);
+ set_bit(JOB_DIE, &ai->jobs);
+ kthread_stop(ai->airo_thread_task);
+ return rc;
+ }
+
/* Power on the MAC controller (which may have been disabled) */
- clear_bit(FLAG_RADIO_DOWN, &info->flags);
- enable_interrupts(info);
+ clear_bit(FLAG_RADIO_DOWN, &ai->flags);
+ enable_interrupts(ai);
+
+ try_auto_wep(ai);
}
- enable_MAC(info, &rsp, 1);
+ enable_MAC(ai, 1);
netif_start_queue(dev);
return 0;
@@ -2338,14 +2366,13 @@ static int airo_set_mac_address(struct net_device *dev, void *p)
{
struct airo_info *ai = dev->priv;
struct sockaddr *addr = p;
- Resp rsp;
readConfigRid(ai, 1);
memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len);
set_bit (FLAG_COMMIT, &ai->flags);
disable_MAC(ai, 1);
writeConfigRid (ai, 1);
- enable_MAC(ai, &rsp, 1);
+ enable_MAC(ai, 1);
memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len);
if (ai->wifidev)
memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len);
@@ -2392,6 +2419,11 @@ static int airo_close(struct net_device *dev) {
disable_MAC(ai, 1);
#endif
disable_interrupts( ai );
+
+ free_irq(dev->irq, dev);
+
+ set_bit(JOB_DIE, &ai->jobs);
+ kthread_stop(ai->airo_thread_task);
}
return 0;
}
@@ -2403,7 +2435,6 @@ void stop_airo_card( struct net_device *dev, int freeres )
set_bit(FLAG_RADIO_DOWN, &ai->flags);
disable_MAC(ai, 1);
disable_interrupts(ai);
- free_irq( dev->irq, dev );
takedown_proc_entry( dev, ai );
if (test_bit(FLAG_REGISTERED, &ai->flags)) {
unregister_netdev( dev );
@@ -2414,9 +2445,6 @@ void stop_airo_card( struct net_device *dev, int freeres )
}
clear_bit(FLAG_REGISTERED, &ai->flags);
}
- set_bit(JOB_DIE, &ai->jobs);
- kthread_stop(ai->airo_thread_task);
-
/*
* Clean out tx queue
*/
@@ -2554,8 +2582,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
* 2) Map PCI memory for issueing commands.
* 3) Allocate memory (shared) to send and receive ethernet frames.
*/
-static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
- const char *name)
+static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
{
unsigned long mem_start, mem_len, aux_start, aux_len;
int rc = -1;
@@ -2569,35 +2596,35 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
aux_start = pci_resource_start(pci, 2);
aux_len = AUXMEMSIZE;
- if (!request_mem_region(mem_start, mem_len, name)) {
- airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s",
- (int)mem_start, (int)mem_len, name);
+ if (!request_mem_region(mem_start, mem_len, DRV_NAME)) {
+ airo_print_err("", "Couldn't get region %x[%x]",
+ (int)mem_start, (int)mem_len);
goto out;
}
- if (!request_mem_region(aux_start, aux_len, name)) {
- airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s",
- (int)aux_start, (int)aux_len, name);
+ if (!request_mem_region(aux_start, aux_len, DRV_NAME)) {
+ airo_print_err("", "Couldn't get region %x[%x]",
+ (int)aux_start, (int)aux_len);
goto free_region1;
}
ai->pcimem = ioremap(mem_start, mem_len);
if (!ai->pcimem) {
- airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s",
- (int)mem_start, (int)mem_len, name);
+ airo_print_err("", "Couldn't map region %x[%x]",
+ (int)mem_start, (int)mem_len);
goto free_region2;
}
ai->pciaux = ioremap(aux_start, aux_len);
if (!ai->pciaux) {
- airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s",
- (int)aux_start, (int)aux_len, name);
+ airo_print_err("", "Couldn't map region %x[%x]",
+ (int)aux_start, (int)aux_len);
goto free_memmap;
}
/* Reserve PKTSIZE for each fid and 2K for the Rids */
ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
if (!ai->shared) {
- airo_print_err(ai->dev->name, "Couldn't alloc_consistent %d",
- PCI_SHARED_LEN);
+ airo_print_err("", "Couldn't alloc_consistent %d",
+ PCI_SHARED_LEN);
goto free_auxmap;
}
@@ -2742,7 +2769,7 @@ static int airo_networks_allocate(struct airo_info *ai)
kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
GFP_KERNEL);
if (!ai->networks) {
- airo_print_warn(ai->dev->name, "Out of memory allocating beacons");
+ airo_print_warn("", "Out of memory allocating beacons");
return -ENOMEM;
}
@@ -2770,7 +2797,6 @@ static int airo_test_wpa_capable(struct airo_info *ai)
{
int status;
CapabilityRid cap_rid;
- const char *name = ai->dev->name;
status = readCapabilityRid(ai, &cap_rid, 1);
if (status != SUCCESS) return 0;
@@ -2778,12 +2804,12 @@ static int airo_test_wpa_capable(struct airo_info *ai)
/* Only firmware versions 5.30.17 or better can do WPA */
if ((cap_rid.softVer > 0x530)
|| ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) {
- airo_print_info(name, "WPA is supported.");
+ airo_print_info("", "WPA is supported.");
return 1;
}
/* No WPA support */
- airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17"
+ airo_print_info("", "WPA unsupported (only firmware versions 5.30.17"
" and greater support WPA. Detected %s)", cap_rid.prodVer);
return 0;
}
@@ -2797,23 +2823,19 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
int i, rc;
/* Create the network device object. */
- dev = alloc_etherdev(sizeof(*ai));
- if (!dev) {
+ dev = alloc_netdev(sizeof(*ai), "", ether_setup);
+ if (!dev) {
airo_print_err("", "Couldn't alloc_etherdev");
return NULL;
- }
- if (dev_alloc_name(dev, dev->name) < 0) {
- airo_print_err("", "Couldn't get name!");
- goto err_out_free;
}
ai = dev->priv;
ai->wifidev = NULL;
- ai->flags = 0;
+ ai->flags = 1 << FLAG_RADIO_DOWN;
ai->jobs = 0;
ai->dev = dev;
if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
- airo_print_dbg(dev->name, "Found an MPI350 card");
+ airo_print_dbg("", "Found an MPI350 card");
set_bit(FLAG_MPI, &ai->flags);
}
spin_lock_init(&ai->aux_lock);
@@ -2821,14 +2843,11 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->config.len = 0;
ai->pci = pci;
init_waitqueue_head (&ai->thr_wait);
- ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
- if (IS_ERR(ai->airo_thread_task))
- goto err_out_free;
ai->tfm = NULL;
add_airo_dev(ai);
if (airo_networks_allocate (ai))
- goto err_out_thr;
+ goto err_out_free;
airo_networks_initialize (ai);
/* The Airo-specific entries in the device structure. */
@@ -2851,27 +2870,22 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
dev->base_addr = port;
SET_NETDEV_DEV(dev, dmdev);
+ SET_MODULE_OWNER(dev);
reset_card (dev, 1);
msleep(400);
- rc = request_irq( dev->irq, airo_interrupt, IRQF_SHARED, dev->name, dev );
- if (rc) {
- airo_print_err(dev->name, "register interrupt %d failed, rc %d",
- irq, rc);
- goto err_out_nets;
- }
if (!is_pcmcia) {
- if (!request_region( dev->base_addr, 64, dev->name )) {
+ if (!request_region(dev->base_addr, 64, DRV_NAME)) {
rc = -EBUSY;
airo_print_err(dev->name, "Couldn't request region");
- goto err_out_irq;
+ goto err_out_nets;
}
}
if (test_bit(FLAG_MPI,&ai->flags)) {
- if (mpi_map_card(ai, pci, dev->name)) {
- airo_print_err(dev->name, "Could not map memory");
+ if (mpi_map_card(ai, pci)) {
+ airo_print_err("", "Could not map memory");
goto err_out_res;
}
}
@@ -2899,6 +2913,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
}
+ strcpy(dev->name, "eth%d");
rc = register_netdev(dev);
if (rc) {
airo_print_err(dev->name, "Couldn't register_netdev");
@@ -2921,8 +2936,6 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
if (setup_proc_entry(dev, dev->priv) < 0)
goto err_out_wifi;
- netif_start_queue(dev);
- SET_MODULE_OWNER(dev);
return dev;
err_out_wifi:
@@ -2940,14 +2953,9 @@ err_out_map:
err_out_res:
if (!is_pcmcia)
release_region( dev->base_addr, 64 );
-err_out_irq:
- free_irq(dev->irq, dev);
err_out_nets:
airo_networks_free(ai);
-err_out_thr:
del_airo_dev(ai);
- set_bit(JOB_DIE, &ai->jobs);
- kthread_stop(ai->airo_thread_task);
err_out_free:
free_netdev(dev);
return NULL;
@@ -3078,7 +3086,8 @@ static int airo_thread(void *data) {
struct net_device *dev = data;
struct airo_info *ai = dev->priv;
int locked;
-
+
+ set_freezable();
while(1) {
/* make swsusp happy with our thread */
try_to_freeze();
@@ -3529,9 +3538,11 @@ static u16 IN4500( struct airo_info *ai, u16 reg ) {
return rc;
}
-static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) {
+static int enable_MAC(struct airo_info *ai, int lock)
+{
int rc;
- Cmd cmd;
+ Cmd cmd;
+ Resp rsp;
/* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions
* FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down"
@@ -3547,7 +3558,7 @@ static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) {
if (!test_bit(FLAG_ENABLED, &ai->flags)) {
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = MAC_ENABLE;
- rc = issuecommand(ai, &cmd, rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc == SUCCESS)
set_bit(FLAG_ENABLED, &ai->flags);
} else
@@ -3557,8 +3568,12 @@ static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) {
up(&ai->sem);
if (rc)
- airo_print_err(ai->dev->name, "%s: Cannot enable MAC, err=%d",
- __FUNCTION__, rc);
+ airo_print_err(ai->dev->name, "Cannot enable MAC");
+ else if ((rsp.status & 0xFF00) != 0) {
+ airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, "
+ "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2);
+ rc = ERROR;
+ }
return rc;
}
@@ -3902,12 +3917,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
if ( status != SUCCESS ) return ERROR;
}
- status = enable_MAC(ai, &rsp, lock);
- if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) {
- airo_print_err(ai->dev->name, "Bad MAC enable reason = %x, rid = %x,"
- " offset = %d", rsp.rsp0, rsp.rsp1, rsp.rsp2 );
+ status = enable_MAC(ai, lock);
+ if (status != SUCCESS)
return ERROR;
- }
/* Grab the initial wep key, we gotta save it for auto_wep */
rc = readWepKeyRid(ai, &wkr, 1, lock);
@@ -3919,10 +3931,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
rc = readWepKeyRid(ai, &wkr, 0, lock);
} while(lastindex != wkr.kindex);
- if (auto_wep) {
- ai->expires = RUN_AT(3*HZ);
- wake_up_interruptible(&ai->thr_wait);
- }
+ try_auto_wep(ai);
return SUCCESS;
}
@@ -4004,7 +4013,7 @@ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
}
if ( !(max_tries--) ) {
airo_print_err(ai->dev->name,
- "airo: BAP setup error too many retries\n");
+ "BAP setup error too many retries\n");
return ERROR;
}
// -- PC4500 missed it, try again
@@ -5152,7 +5161,6 @@ static void proc_SSID_on_close( struct inode *inode, struct file *file ) {
struct net_device *dev = dp->data;
struct airo_info *ai = dev->priv;
SsidRid SSID_rid;
- Resp rsp;
int i;
int offset = 0;
@@ -5177,7 +5185,7 @@ static void proc_SSID_on_close( struct inode *inode, struct file *file ) {
SSID_rid.len = sizeof(SSID_rid);
disable_MAC(ai, 1);
writeSsidRid(ai, &SSID_rid, 1);
- enable_MAC(ai, &rsp, 1);
+ enable_MAC(ai, 1);
}
static inline u8 hexVal(char c) {
@@ -5193,7 +5201,6 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
struct net_device *dev = dp->data;
struct airo_info *ai = dev->priv;
APListRid APList_rid;
- Resp rsp;
int i;
if ( !data->writelen ) return;
@@ -5218,18 +5225,17 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
}
disable_MAC(ai, 1);
writeAPListRid(ai, &APList_rid, 1);
- enable_MAC(ai, &rsp, 1);
+ enable_MAC(ai, 1);
}
/* This function wraps PC4500_writerid with a MAC disable */
static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
int len, int dummy ) {
int rc;
- Resp rsp;
disable_MAC(ai, 1);
rc = PC4500_writerid(ai, rid, rid_data, len, 1);
- enable_MAC(ai, &rsp, 1);
+ enable_MAC(ai, 1);
return rc;
}
@@ -5260,7 +5266,6 @@ static int set_wep_key(struct airo_info *ai, u16 index,
const char *key, u16 keylen, int perm, int lock ) {
static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
WepKeyRid wkr;
- Resp rsp;
memset(&wkr, 0, sizeof(wkr));
if (keylen == 0) {
@@ -5280,7 +5285,7 @@ static int set_wep_key(struct airo_info *ai, u16 index,
if (perm) disable_MAC(ai, lock);
writeWepKeyRid(ai, &wkr, perm, lock);
- if (perm) enable_MAC(ai, &rsp, lock);
+ if (perm) enable_MAC(ai, lock);
return 0;
}
@@ -5548,7 +5553,6 @@ static int proc_close( struct inode *inode, struct file *file )
changed. */
static void timer_func( struct net_device *dev ) {
struct airo_info *apriv = dev->priv;
- Resp rsp;
/* We don't have a link so try changing the authtype */
readConfigRid(apriv, 0);
@@ -5575,7 +5579,7 @@ static void timer_func( struct net_device *dev ) {
}
set_bit (FLAG_COMMIT, &apriv->flags);
writeConfigRid(apriv, 0);
- enable_MAC(apriv, &rsp, 0);
+ enable_MAC(apriv, 0);
up(&apriv->sem);
/* Schedule check to see if the change worked */
@@ -5597,8 +5601,10 @@ static int __devinit airo_pci_probe(struct pci_dev *pdev,
dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev);
else
dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev);
- if (!dev)
+ if (!dev) {
+ pci_disable_device(pdev);
return -ENODEV;
+ }
pci_set_drvdata(pdev, dev);
return 0;
@@ -5610,6 +5616,8 @@ static void __devexit airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -5646,7 +5654,6 @@ static int airo_pci_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct airo_info *ai = dev->priv;
- Resp rsp;
pci_power_t prev_state = pdev->current_state;
pci_set_power_state(pdev, PCI_D0);
@@ -5679,7 +5686,7 @@ static int airo_pci_resume(struct pci_dev *pdev)
ai->APList = NULL;
}
writeConfigRid(ai, 0);
- enable_MAC(ai, &rsp, 0);
+ enable_MAC(ai, 0);
ai->power = PMSG_ON;
netif_device_attach(dev);
netif_wake_queue(dev);
@@ -5903,7 +5910,6 @@ static int airo_set_essid(struct net_device *dev,
char *extra)
{
struct airo_info *local = dev->priv;
- Resp rsp;
SsidRid SSID_rid; /* SSIDs */
/* Reload the list of current SSID */
@@ -5935,7 +5941,7 @@ static int airo_set_essid(struct net_device *dev,
/* Write it to the card */
disable_MAC(local, 1);
writeSsidRid(local, &SSID_rid, 1);
- enable_MAC(local, &rsp, 1);
+ enable_MAC(local, 1);
return 0;
}
@@ -6000,7 +6006,7 @@ static int airo_set_wap(struct net_device *dev,
memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN);
disable_MAC(local, 1);
writeAPListRid(local, &APList_rid, 1);
- enable_MAC(local, &rsp, 1);
+ enable_MAC(local, 1);
}
return 0;
}
@@ -7454,7 +7460,6 @@ static int airo_config_commit(struct net_device *dev,
char *extra) /* NULL */
{
struct airo_info *local = dev->priv;
- Resp rsp;
if (!test_bit (FLAG_COMMIT, &local->flags))
return 0;
@@ -7479,7 +7484,7 @@ static int airo_config_commit(struct net_device *dev,
if (down_interruptible(&local->sem))
return -ERESTARTSYS;
writeConfigRid(local, 0);
- enable_MAC(local, &rsp, 0);
+ enable_MAC(local, 0);
if (test_bit (FLAG_RESET, &local->flags))
airo_set_promisc(local);
else
@@ -7746,7 +7751,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
unsigned char *iobuf;
int len;
struct airo_info *ai = dev->priv;
- Resp rsp;
if (test_bit(FLAG_FLASHING, &ai->flags))
return -EIO;
@@ -7758,7 +7762,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
if (test_bit(FLAG_COMMIT, &ai->flags)) {
disable_MAC (ai, 1);
writeConfigRid (ai, 1);
- enable_MAC (ai, &rsp, 1);
+ enable_MAC(ai, 1);
}
break;
case AIROGSLIST: ridcode = RID_SSID; break;
@@ -7815,7 +7819,6 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
struct airo_info *ai = dev->priv;
int ridcode;
int enabled;
- Resp rsp;
static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
unsigned char *iobuf;
@@ -7849,7 +7852,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
* same with MAC off
*/
case AIROPMACON:
- if (enable_MAC(ai, &rsp, 1) != 0)
+ if (enable_MAC(ai, 1) != 0)
return -EIO;
return 0;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index ef6b253a92ce..c5d6753a55ea 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -3741,10 +3741,8 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
&bcm->board_type);
if (err)
goto err_iounmap;
- err = bcm43xx_pci_read_config16(bcm, PCI_REVISION_ID,
- &bcm->board_revision);
- if (err)
- goto err_iounmap;
+
+ bcm->board_revision = bcm->pci_dev->revision;
err = bcm43xx_chipset_attach(bcm);
if (err)
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 0cd48d151f5e..7da3664b8515 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -453,8 +453,6 @@ static struct pci_driver prism2_pci_drv_id = {
.suspend = prism2_pci_suspend,
.resume = prism2_pci_resume,
#endif /* CONFIG_PM */
- /* Linux 2.4.6 added save_state and enable_wake that are not used here
- */
};
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0183df757b3e..040dc3e36410 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -613,9 +613,6 @@ static struct pci_driver prism2_plx_drv_id = {
.id_table = prism2_plx_id_table,
.probe = prism2_plx_probe,
.remove = prism2_plx_remove,
- .suspend = NULL,
- .resume = NULL,
- .enable_wake = NULL
};
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index d51daf87450f..8990585bd228 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -1768,7 +1768,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
if (priv->stop_rf_kill) {
priv->stop_rf_kill = 0;
- queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+ queue_delayed_work(priv->workqueue, &priv->rf_kill,
+ round_jiffies(HZ));
}
deferred = 1;
@@ -2098,7 +2099,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+ queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ));
}
static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
@@ -4233,7 +4234,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
/* Make sure the RF_KILL check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+ queue_delayed_work(priv->workqueue, &priv->rf_kill,
+ round_jiffies(HZ));
} else
schedule_reset(priv);
}
@@ -5969,7 +5971,8 @@ static void ipw2100_rf_kill(struct work_struct *work)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
if (!priv->stop_rf_kill)
- queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+ queue_delayed_work(priv->workqueue, &priv->rf_kill,
+ round_jiffies(HZ));
goto exit_unlock;
}
@@ -7865,10 +7868,10 @@ static int ipw2100_wx_set_powermode(struct net_device *dev,
goto done;
}
- if ((mode < 1) || (mode > POWER_MODES))
+ if ((mode < 0) || (mode > POWER_MODES))
mode = IPW_POWER_AUTO;
- if (priv->power_mode != mode)
+ if (IPW_POWER_LEVEL(priv->power_mode) != mode)
err = ipw2100_set_power_mode(priv, mode);
done:
mutex_unlock(&priv->action_mutex);
@@ -7899,7 +7902,7 @@ static int ipw2100_wx_get_powermode(struct net_device *dev,
break;
case IPW_POWER_AUTO:
snprintf(extra, MAX_POWER_STRING,
- "Power save level: %d (Auto)", 0);
+ "Power save level: %d (Auto)", level);
break;
default:
timeout = timeout_duration[level - 1] / 1000;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 7cb2052a55a5..61497c467467 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -70,7 +70,7 @@
#define VQ
#endif
-#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
+#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
#define DRV_VERSION IPW2200_VERSION
@@ -1751,7 +1751,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
/* Make sure the RF_KILL check timer is running */
cancel_delayed_work(&priv->rf_kill);
queue_delayed_work(priv->workqueue, &priv->rf_kill,
- 2 * HZ);
+ round_jiffies(2 * HZ));
} else
queue_work(priv->workqueue, &priv->up);
}
@@ -2506,7 +2506,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
break;
}
- param = cpu_to_le32(mode);
+ param = cpu_to_le32(param);
return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
&param);
}
@@ -4690,7 +4690,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
queue_delayed_work(priv->workqueue,
- &priv->request_scan, HZ);
+ &priv->request_scan,
+ round_jiffies(HZ));
/* Send an empty event to user space.
* We don't send the received data on the event because
@@ -9567,6 +9568,7 @@ static int ipw_wx_set_power(struct net_device *dev,
priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
else
priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+
err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
if (err) {
IPW_DEBUG_WX("failed setting power mode.\n");
@@ -9603,22 +9605,19 @@ static int ipw_wx_set_powermode(struct net_device *dev,
struct ipw_priv *priv = ieee80211_priv(dev);
int mode = *(int *)extra;
int err;
+
mutex_lock(&priv->mutex);
- if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
+ if ((mode < 1) || (mode > IPW_POWER_LIMIT))
mode = IPW_POWER_AC;
- priv->power_mode = mode;
- } else {
- priv->power_mode = IPW_POWER_ENABLED | mode;
- }
- if (priv->power_mode != mode) {
+ if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
err = ipw_send_power_mode(priv, mode);
-
if (err) {
IPW_DEBUG_WX("failed setting power mode.\n");
mutex_unlock(&priv->mutex);
return err;
}
+ priv->power_mode = IPW_POWER_ENABLED | mode;
}
mutex_unlock(&priv->mutex);
return 0;
@@ -10554,7 +10553,7 @@ static irqreturn_t ipw_isr(int irq, void *data)
spin_lock(&priv->irq_lock);
if (!(priv->status & STATUS_INT_ENABLED)) {
- /* Shared IRQ */
+ /* IRQ is disabled */
goto none;
}
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 13f6528abb00..4a8f5dc70239 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -240,7 +240,7 @@ static int wlan_cmd_802_11_enable_rsn(wlan_private * priv,
if (*enable)
penableRSN->enable = cpu_to_le16(cmd_enable_rsn);
else
- penableRSN->enable = cpu_to_le16(cmd_enable_rsn);
+ penableRSN->enable = cpu_to_le16(cmd_disable_rsn);
}
lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4a59306a3f05..9f366242c392 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -613,6 +613,7 @@ static int wlan_service_main_thread(void *data)
init_waitqueue_entry(&wait, current);
+ set_freezable();
for (;;) {
lbs_deb_thread( "main-thread 111: intcounter=%d "
"currenttxskb=%p dnld_sent=%d\n",
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 88d9d2d787d5..769c86fb9509 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -439,7 +439,6 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb)
ret = 0;
done:
- skb->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/libertas/version.h b/drivers/net/wireless/libertas/version.h
deleted file mode 100644
index 8b137891791f..000000000000
--- a/drivers/net/wireless/libertas/version.h
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index f42b796b5e47..2fcc3bf21081 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1719,9 +1719,6 @@ static int wlan_set_encodeext(struct net_device *dev,
pkey->type = KEY_TYPE_ID_TKIP;
} else if (alg == IW_ENCODE_ALG_CCMP) {
pkey->type = KEY_TYPE_ID_AES;
- } else {
- ret = -EINVAL;
- goto out;
}
/* If WPA isn't enabled yet, do that now */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 283be4a70524..585f5996d292 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -1853,7 +1853,6 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
struct mac_entry *entry;
- struct list_head *ptr;
struct sockaddr *addr = (struct sockaddr *) extra;
if (addr->sa_family != ARPHRD_ETHER)
@@ -1861,11 +1860,9 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
if (down_interruptible(&acl->sem))
return -ERESTARTSYS;
- for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
- entry = list_entry(ptr, struct mac_entry, _list);
-
+ list_for_each_entry(entry, &acl->mac_list, _list) {
if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
- list_del(ptr);
+ list_del(&entry->_list);
acl->size--;
kfree(entry);
up(&acl->sem);
@@ -1883,7 +1880,6 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
struct mac_entry *entry;
- struct list_head *ptr;
struct sockaddr *dst = (struct sockaddr *) extra;
dwrq->length = 0;
@@ -1891,9 +1887,7 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
if (down_interruptible(&acl->sem))
return -ERESTARTSYS;
- for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
- entry = list_entry(ptr, struct mac_entry, _list);
-
+ list_for_each_entry(entry, &acl->mac_list, _list) {
memcpy(dst->sa_data, entry->addr, ETH_ALEN);
dst->sa_family = ARPHRD_ETHER;
dwrq->length++;
@@ -1960,7 +1954,6 @@ prism54_get_policy(struct net_device *ndev, struct iw_request_info *info,
static int
prism54_mac_accept(struct islpci_acl *acl, char *mac)
{
- struct list_head *ptr;
struct mac_entry *entry;
int res = 0;
@@ -1972,8 +1965,7 @@ prism54_mac_accept(struct islpci_acl *acl, char *mac)
return 1;
}
- for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) {
- entry = list_entry(ptr, struct mac_entry, _list);
+ list_for_each_entry(entry, &acl->mac_list, _list) {
if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
res = 1;
break;
@@ -2216,11 +2208,9 @@ prism54_wpa_bss_ie_init(islpci_private *priv)
void
prism54_wpa_bss_ie_clean(islpci_private *priv)
{
- struct list_head *ptr, *n;
+ struct islpci_bss_wpa_ie *bss, *n;
- list_for_each_safe(ptr, n, &priv->bss_wpa_list) {
- struct islpci_bss_wpa_ie *bss;
- bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
+ list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) {
kfree(bss);
}
}
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 3dcb13bb7d57..af2e4f2405f2 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -87,7 +87,6 @@ static struct pci_driver prism54_driver = {
.remove = prism54_remove,
.suspend = prism54_suspend,
.resume = prism54_resume,
- /* .enable_wake ; we don't support this yet */
};
/******************************************************************************
@@ -167,8 +166,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
/* enable MWI */
- if (!pci_set_mwi(pdev))
- printk(KERN_INFO "%s: pci_set_mwi(pdev) succeeded\n", DRV_NAME);
+ pci_try_set_mwi(pdev);
/* setup the network device interface and its structure */
if (!(ndev = islpci_setup(pdev))) {
diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl8187_rtl8225.c
index e25a09f1b068..efc41207780e 100644
--- a/drivers/net/wireless/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl8187_rtl8225.c
@@ -67,7 +67,7 @@ static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
msleep(2);
}
-static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, u16 data)
+static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data)
{
struct rtl8187_priv *priv = dev->priv;
u16 reg80, reg82, reg84;
@@ -106,7 +106,7 @@ void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
struct rtl8187_priv *priv = dev->priv;
if (priv->asic_rev)
- rtl8225_write_8051(dev, addr, data);
+ rtl8225_write_8051(dev, addr, cpu_to_le16(data));
else
rtl8225_write_bitbang(dev, addr, data);
}
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ce9230b2f630..c8b5c2271938 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1011,7 +1011,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
} else {
skb->dev = dev;
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
- eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0);
+ skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
wl3501_receive(this, skb->data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 5b624bfc01a6..c39f1984b84d 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -49,8 +49,9 @@ void zd_chip_clear(struct zd_chip *chip)
ZD_MEMCLEAR(chip, sizeof(*chip));
}
-static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size)
+static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size)
{
+ u8 *addr = zd_usb_to_netdev(&chip->usb)->dev_addr;
return scnprintf(buffer, size, "%02x-%02x-%02x",
addr[0], addr[1], addr[2]);
}
@@ -61,10 +62,10 @@ static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size)
int i = 0;
i = scnprintf(buffer, size, "zd1211%s chip ",
- chip->is_zd1211b ? "b" : "");
+ zd_chip_is_zd1211b(chip) ? "b" : "");
i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i);
i += scnprintf(buffer+i, size-i, " ");
- i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i);
+ i += scnprint_mac_oui(chip, buffer+i, size-i);
i += scnprintf(buffer+i, size-i, " ");
i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c%c", chip->pa_type,
@@ -366,64 +367,9 @@ error:
return r;
}
-static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr,
- const zd_addr_t *addr)
-{
- int r;
- u32 parts[2];
-
- r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2);
- if (r) {
- dev_dbg_f(zd_chip_dev(chip),
- "error: couldn't read e2p macs. Error number %d\n", r);
- return r;
- }
-
- mac_addr[0] = parts[0];
- mac_addr[1] = parts[0] >> 8;
- mac_addr[2] = parts[0] >> 16;
- mac_addr[3] = parts[0] >> 24;
- mac_addr[4] = parts[1];
- mac_addr[5] = parts[1] >> 8;
-
- return 0;
-}
-
-static int read_e2p_mac_addr(struct zd_chip *chip)
-{
- static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 };
-
- ZD_ASSERT(mutex_is_locked(&chip->mutex));
- return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr);
-}
-
/* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and
* CR_MAC_ADDR_P2 must be overwritten
*/
-void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr)
-{
- mutex_lock(&chip->mutex);
- memcpy(mac_addr, chip->e2p_mac, ETH_ALEN);
- mutex_unlock(&chip->mutex);
-}
-
-static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
-{
- static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 };
- return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr);
-}
-
-int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr)
-{
- int r;
-
- dev_dbg_f(zd_chip_dev(chip), "\n");
- mutex_lock(&chip->mutex);
- r = read_mac_addr(chip, mac_addr);
- mutex_unlock(&chip->mutex);
- return r;
-}
-
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
{
int r;
@@ -444,12 +390,6 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
mutex_lock(&chip->mutex);
r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
-#ifdef DEBUG
- {
- u8 tmp[ETH_ALEN];
- read_mac_addr(chip, tmp);
- }
-#endif /* DEBUG */
mutex_unlock(&chip->mutex);
return r;
}
@@ -809,7 +749,7 @@ out:
static int hw_reset_phy(struct zd_chip *chip)
{
- return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) :
+ return zd_chip_is_zd1211b(chip) ? zd1211b_hw_reset_phy(chip) :
zd1211_hw_reset_phy(chip);
}
@@ -874,7 +814,7 @@ static int hw_init_hmac(struct zd_chip *chip)
if (r)
return r;
- return chip->is_zd1211b ?
+ return zd_chip_is_zd1211b(chip) ?
zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
}
@@ -1136,8 +1076,15 @@ static int read_fw_regs_offset(struct zd_chip *chip)
return 0;
}
+/* Read mac address using pre-firmware interface */
+int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr)
+{
+ dev_dbg_f(zd_chip_dev(chip), "\n");
+ return zd_usb_read_fw(&chip->usb, E2P_MAC_ADDR_P1, addr,
+ ETH_ALEN);
+}
-int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
+int zd_chip_init_hw(struct zd_chip *chip)
{
int r;
u8 rf_type;
@@ -1145,7 +1092,6 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
dev_dbg_f(zd_chip_dev(chip), "\n");
mutex_lock(&chip->mutex);
- chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0;
#ifdef DEBUG
r = test_init(chip);
@@ -1201,10 +1147,6 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
goto out;
#endif /* DEBUG */
- r = read_e2p_mac_addr(chip);
- if (r)
- goto out;
-
r = read_cal_int_tables(chip);
if (r)
goto out;
@@ -1259,7 +1201,7 @@ static int update_channel_integration_and_calibration(struct zd_chip *chip,
r = update_pwr_int(chip, channel);
if (r)
return r;
- if (chip->is_zd1211b) {
+ if (zd_chip_is_zd1211b(chip)) {
static const struct zd_ioreq16 ioreqs[] = {
{ CR69, 0x28 },
{},
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 79d0288c193a..f4698576ab71 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -704,7 +704,6 @@ struct zd_chip {
struct mutex mutex;
/* Base address of FW_REG_ registers */
zd_addr_t fw_regs_base;
- u8 e2p_mac[ETH_ALEN];
/* EepSetPoint in the vendor driver */
u8 pwr_cal_values[E2P_CHANNEL_COUNT];
/* integration values in the vendor driver */
@@ -715,7 +714,7 @@ struct zd_chip {
unsigned int pa_type:4,
patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
new_phy_layout:1, al2230s_bit:1,
- is_zd1211b:1, supports_tx_led:1;
+ supports_tx_led:1;
};
static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
@@ -734,9 +733,15 @@ void zd_chip_init(struct zd_chip *chip,
struct net_device *netdev,
struct usb_interface *intf);
void zd_chip_clear(struct zd_chip *chip);
-int zd_chip_init_hw(struct zd_chip *chip, u8 device_type);
+int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr);
+int zd_chip_init_hw(struct zd_chip *chip);
int zd_chip_reset(struct zd_chip *chip);
+static inline int zd_chip_is_zd1211b(struct zd_chip *chip)
+{
+ return chip->usb.is_zd1211b;
+}
+
static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values,
const zd_addr_t *addresses,
unsigned int count)
@@ -825,8 +830,6 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
}
u8 zd_chip_get_channel(struct zd_chip *chip);
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
-void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr);
-int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr);
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
int zd_chip_switch_radio_on(struct zd_chip *chip);
int zd_chip_switch_radio_off(struct zd_chip *chip);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6753d240c168..f6c487aa8246 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -86,38 +86,46 @@ out:
return r;
}
-int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
+int zd_mac_preinit_hw(struct zd_mac *mac)
{
int r;
- struct zd_chip *chip = &mac->chip;
u8 addr[ETH_ALEN];
+
+ r = zd_chip_read_mac_addr_fw(&mac->chip, addr);
+ if (r)
+ return r;
+
+ memcpy(mac->netdev->dev_addr, addr, ETH_ALEN);
+ return 0;
+}
+
+int zd_mac_init_hw(struct zd_mac *mac)
+{
+ int r;
+ struct zd_chip *chip = &mac->chip;
u8 default_regdomain;
r = zd_chip_enable_int(chip);
if (r)
goto out;
- r = zd_chip_init_hw(chip, device_type);
+ r = zd_chip_init_hw(chip);
if (r)
goto disable_int;
- zd_get_e2p_mac_addr(chip, addr);
- r = zd_write_mac_addr(chip, addr);
- if (r)
- goto disable_int;
ZD_ASSERT(!irqs_disabled());
- spin_lock_irq(&mac->lock);
- memcpy(mac->netdev->dev_addr, addr, ETH_ALEN);
- spin_unlock_irq(&mac->lock);
r = zd_read_regdomain(chip, &default_regdomain);
if (r)
goto disable_int;
if (!zd_regdomain_supported(default_regdomain)) {
- dev_dbg_f(zd_mac_dev(mac),
- "Regulatory Domain %#04x is not supported.\n",
- default_regdomain);
- r = -EINVAL;
- goto disable_int;
+ /* The vendor driver overrides the regulatory domain and
+ * allowed channel registers and unconditionally restricts
+ * available channels to 1-11 everywhere. Match their
+ * questionable behaviour only for regdomains which we don't
+ * recognise. */
+ dev_warn(zd_mac_dev(mac), "Unrecognised regulatory domain: "
+ "%#04x. Defaulting to FCC.\n", default_regdomain);
+ default_regdomain = ZD_REGDOMAIN_FCC;
}
spin_lock_irq(&mac->lock);
mac->regdomain = mac->default_regdomain = default_regdomain;
@@ -164,14 +172,25 @@ int zd_mac_open(struct net_device *netdev)
{
struct zd_mac *mac = zd_netdev_mac(netdev);
struct zd_chip *chip = &mac->chip;
+ struct zd_usb *usb = &chip->usb;
int r;
+ if (!usb->initialized) {
+ r = zd_usb_init_hw(usb);
+ if (r)
+ goto out;
+ }
+
tasklet_enable(&mac->rx_tasklet);
r = zd_chip_enable_int(chip);
if (r < 0)
goto out;
+ r = zd_write_mac_addr(chip, netdev->dev_addr);
+ if (r)
+ goto disable_int;
+
r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G);
if (r < 0)
goto disable_int;
@@ -251,9 +270,11 @@ int zd_mac_set_mac_address(struct net_device *netdev, void *p)
dev_dbg_f(zd_mac_dev(mac),
"Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data));
- r = zd_write_mac_addr(chip, addr->sa_data);
- if (r)
- return r;
+ if (netdev->flags & IFF_UP) {
+ r = zd_write_mac_addr(chip, addr->sa_data);
+ if (r)
+ return r;
+ }
spin_lock_irqsave(&mac->lock, flags);
memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -855,7 +876,7 @@ static int fill_ctrlset(struct zd_mac *mac,
/* ZD1211B: Computing the length difference this way, gives us
* flexibility to compute the packet length.
*/
- cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ?
+ cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ?
packet_length - frag_len : packet_length);
/*
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index faf4c7828d4e..9f9344eb50f9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -189,7 +189,8 @@ int zd_mac_init(struct zd_mac *mac,
struct usb_interface *intf);
void zd_mac_clear(struct zd_mac *mac);
-int zd_mac_init_hw(struct zd_mac *mac, u8 device_type);
+int zd_mac_preinit_hw(struct zd_mac *mac);
+int zd_mac_init_hw(struct zd_mac *mac);
int zd_mac_open(struct net_device *netdev);
int zd_mac_stop(struct net_device *netdev);
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index 7407409b60b1..abe5d38f7f4d 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -34,7 +34,7 @@ static const char * const rfs[] = {
[AL2210_RF] = "AL2210_RF",
[MAXIM_NEW_RF] = "MAXIM_NEW_RF",
[UW2453_RF] = "UW2453_RF",
- [UNKNOWN_A_RF] = "UNKNOWN_A_RF",
+ [AL2230S_RF] = "AL2230S_RF",
[RALINK_RF] = "RALINK_RF",
[INTERSIL_RF] = "INTERSIL_RF",
[RF2959_RF] = "RF2959_RF",
@@ -77,6 +77,7 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type)
r = zd_rf_init_rf2959(rf);
break;
case AL2230_RF:
+ case AL2230S_RF:
r = zd_rf_init_al2230(rf);
break;
case AL7230B_RF:
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index c6dfd8227f6e..30502f26b71c 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -26,7 +26,7 @@
#define AL2210_RF 0x7
#define MAXIM_NEW_RF 0x8
#define UW2453_RF 0x9
-#define UNKNOWN_A_RF 0xa
+#define AL2230S_RF 0xa
#define RALINK_RF 0xb
#define INTERSIL_RF 0xc
#define RF2959_RF 0xd
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index e7a4ecf7b6e2..006774de3202 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -21,6 +21,8 @@
#include "zd_usb.h"
#include "zd_chip.h"
+#define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF)
+
static const u32 zd1211_al2230_table[][3] = {
RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, },
RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, },
@@ -176,7 +178,7 @@ static int zd1211_al2230_init_hw(struct zd_rf *rf)
if (r)
return r;
- if (chip->al2230s_bit) {
+ if (IS_AL2230S(chip)) {
r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s,
ARRAY_SIZE(ioreqs_init_al2230s));
if (r)
@@ -188,7 +190,7 @@ static int zd1211_al2230_init_hw(struct zd_rf *rf)
return r;
/* improve band edge for AL2230S */
- if (chip->al2230s_bit)
+ if (IS_AL2230S(chip))
r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS);
else
r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS);
@@ -314,7 +316,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
if (r)
return r;
- if (chip->al2230s_bit) {
+ if (IS_AL2230S(chip)) {
r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s,
ARRAY_SIZE(ioreqs_init_al2230s));
if (r)
@@ -328,7 +330,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
if (r)
return r;
- if (chip->al2230s_bit)
+ if (IS_AL2230S(chip))
r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS);
else
r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS);
@@ -422,7 +424,7 @@ int zd_rf_init_al2230(struct zd_rf *rf)
struct zd_chip *chip = zd_rf_to_chip(rf);
rf->switch_radio_off = al2230_switch_radio_off;
- if (chip->is_zd1211b) {
+ if (zd_chip_is_zd1211b(chip)) {
rf->init_hw = zd1211b_al2230_init_hw;
rf->set_channel = zd1211b_al2230_set_channel;
rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index f4e8b6ada854..73d0bb26f810 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -473,7 +473,7 @@ int zd_rf_init_al7230b(struct zd_rf *rf)
{
struct zd_chip *chip = zd_rf_to_chip(rf);
- if (chip->is_zd1211b) {
+ if (zd_chip_is_zd1211b(chip)) {
rf->init_hw = zd1211b_al7230b_init_hw;
rf->switch_radio_on = zd1211b_al7230b_switch_radio_on;
rf->set_channel = zd1211b_al7230b_set_channel;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index 2d736bdf707c..cc70d40684ea 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -265,7 +265,7 @@ int zd_rf_init_rf2959(struct zd_rf *rf)
{
struct zd_chip *chip = zd_rf_to_chip(rf);
- if (chip->is_zd1211b) {
+ if (zd_chip_is_zd1211b(chip)) {
dev_err(zd_chip_dev(chip),
"RF2959 is currently not supported for ZD1211B"
" devices\n");
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index 414e40d571ab..857dcf3eae61 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -486,7 +486,7 @@ static int uw2453_switch_radio_on(struct zd_rf *rf)
if (r)
return r;
- if (chip->is_zd1211b)
+ if (zd_chip_is_zd1211b(chip))
ioreqs[1].value = 0x7f;
return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 740a2194fdde..a9c339ef116a 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -15,7 +15,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -26,6 +25,7 @@
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <net/ieee80211.h>
+#include <asm/unaligned.h>
#include "zd_def.h"
#include "zd_netdev.h"
@@ -71,6 +71,9 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
@@ -195,26 +198,27 @@ static u16 get_word(const void *data, u16 offset)
return le16_to_cpu(p[offset]);
}
-static char *get_fw_name(char *buffer, size_t size, u8 device_type,
+static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size,
const char* postfix)
{
scnprintf(buffer, size, "%s%s",
- device_type == DEVICE_ZD1211B ?
+ usb->is_zd1211b ?
FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX,
postfix);
return buffer;
}
-static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
+static int handle_version_mismatch(struct zd_usb *usb,
const struct firmware *ub_fw)
{
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
const struct firmware *ur_fw = NULL;
int offset;
int r = 0;
char fw_name[128];
r = request_fw_file(&ur_fw,
- get_fw_name(fw_name, sizeof(fw_name), device_type, "ur"),
+ get_fw_name(usb, fw_name, sizeof(fw_name), "ur"),
&udev->dev);
if (r)
goto error;
@@ -237,11 +241,12 @@ error:
return r;
}
-static int upload_firmware(struct usb_device *udev, u8 device_type)
+static int upload_firmware(struct zd_usb *usb)
{
int r;
u16 fw_bcdDevice;
u16 bcdDevice;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
const struct firmware *ub_fw = NULL;
const struct firmware *uph_fw = NULL;
char fw_name[128];
@@ -249,7 +254,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
bcdDevice = get_bcdDevice(udev);
r = request_fw_file(&ub_fw,
- get_fw_name(fw_name, sizeof(fw_name), device_type, "ub"),
+ get_fw_name(usb, fw_name, sizeof(fw_name), "ub"),
&udev->dev);
if (r)
goto error;
@@ -264,7 +269,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
dev_warn(&udev->dev, "device has old bootcode, please "
"report success or failure\n");
- r = handle_version_mismatch(udev, device_type, ub_fw);
+ r = handle_version_mismatch(usb, ub_fw);
if (r)
goto error;
} else {
@@ -275,7 +280,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
r = request_fw_file(&uph_fw,
- get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"),
+ get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"),
&udev->dev);
if (r)
goto error;
@@ -294,6 +299,30 @@ error:
return r;
}
+/* Read data from device address space using "firmware interface" which does
+ * not require firmware to be loaded. */
+int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
+{
+ int r;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
+
+ r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0,
+ data, len, 5000);
+ if (r < 0) {
+ dev_err(&udev->dev,
+ "read over firmware interface failed: %d\n", r);
+ return r;
+ } else if (r != len) {
+ dev_err(&udev->dev,
+ "incomplete read over firmware interface: %d/%d\n",
+ r, len);
+ return -EIO;
+ }
+
+ return 0;
+}
+
#define urb_dev(urb) (&(urb)->dev->dev)
static inline void handle_regs_int(struct urb *urb)
@@ -920,9 +949,42 @@ static int eject_installer(struct usb_interface *intf)
return 0;
}
+int zd_usb_init_hw(struct zd_usb *usb)
+{
+ int r;
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ r = upload_firmware(usb);
+ if (r) {
+ dev_err(zd_usb_dev(usb),
+ "couldn't load firmware. Error number %d\n", r);
+ return r;
+ }
+
+ r = usb_reset_configuration(zd_usb_to_usbdev(usb));
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "couldn't reset configuration. Error number %d\n", r);
+ return r;
+ }
+
+ r = zd_mac_init_hw(mac);
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb),
+ "couldn't initialize mac. Error number %d\n", r);
+ return r;
+ }
+
+ usb->initialized = 1;
+ return 0;
+}
+
static int probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int r;
+ struct zd_usb *usb;
struct usb_device *udev = interface_to_usbdev(intf);
struct net_device *netdev = NULL;
@@ -950,26 +1012,10 @@ static int probe(struct usb_interface *intf, const struct usb_device_id *id)
goto error;
}
- r = upload_firmware(udev, id->driver_info);
- if (r) {
- dev_err(&intf->dev,
- "couldn't load firmware. Error number %d\n", r);
- goto error;
- }
+ usb = &zd_netdev_mac(netdev)->chip.usb;
+ usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0;
- r = usb_reset_configuration(udev);
- if (r) {
- dev_dbg_f(&intf->dev,
- "couldn't reset configuration. Error number %d\n", r);
- goto error;
- }
-
- /* At this point the interrupt endpoint is not generally enabled. We
- * save the USB bandwidth until the network device is opened. But
- * notify that the initialization of the MAC will require the
- * interrupts to be temporary enabled.
- */
- r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info);
+ r = zd_mac_preinit_hw(zd_netdev_mac(netdev));
if (r) {
dev_dbg_f(&intf->dev,
"couldn't initialize mac. Error number %d\n", r);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 506ea6a74393..961a7a12ad68 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -188,6 +188,7 @@ struct zd_usb {
struct zd_usb_rx rx;
struct zd_usb_tx tx;
struct usb_interface *intf;
+ u8 is_zd1211b:1, initialized:1;
};
#define zd_usb_dev(usb) (&usb->intf->dev)
@@ -236,6 +237,8 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits);
+int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len);
+
extern struct workqueue_struct *zd_workqueue;
#endif /* _ZD_USB_H */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
new file mode 100644
index 000000000000..489f69c5d6ca
--- /dev/null
+++ b/drivers/net/xen-netfront.c
@@ -0,0 +1,1863 @@
+/*
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2005, K A Fraser
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <net/ip.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <xen/interface/io/netif.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/grant_table.h>
+
+static struct ethtool_ops xennet_ethtool_ops;
+
+struct netfront_cb {
+ struct page *page;
+ unsigned offset;
+};
+
+#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
+
+#define RX_COPY_THRESHOLD 256
+
+#define GRANT_INVALID_REF 0
+
+#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+
+struct netfront_info {
+ struct list_head list;
+ struct net_device *netdev;
+
+ struct net_device_stats stats;
+
+ struct xen_netif_tx_front_ring tx;
+ struct xen_netif_rx_front_ring rx;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ unsigned int evtchn;
+
+ /* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_DFL_MIN_TARGET 64
+#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+ unsigned rx_min_target, rx_max_target, rx_target;
+ struct sk_buff_head rx_batch;
+
+ struct timer_list rx_refill_timer;
+
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
+ * are linked from tx_skb_freelist through skb_entry.link.
+ *
+ * NB. Freelist index entries are always going to be less than
+ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
+ * greater than PAGE_OFFSET: we use this property to distinguish
+ * them.
+ */
+ union skb_entry {
+ struct sk_buff *skb;
+ unsigned link;
+ } tx_skbs[NET_TX_RING_SIZE];
+ grant_ref_t gref_tx_head;
+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+ unsigned tx_skb_freelist;
+
+ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
+ grant_ref_t gref_rx_head;
+ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+ struct xenbus_device *xbdev;
+ int tx_ring_ref;
+ int rx_ring_ref;
+
+ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
+ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
+ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+};
+
+struct netfront_rx_info {
+ struct xen_netif_rx_response rx;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+};
+
+/*
+ * Access macros for acquiring freeing slots in tx_skbs[].
+ */
+
+static void add_id_to_freelist(unsigned *head, union skb_entry *list,
+ unsigned short id)
+{
+ list[id].link = *head;
+ *head = id;
+}
+
+static unsigned short get_id_from_freelist(unsigned *head,
+ union skb_entry *list)
+{
+ unsigned int id = *head;
+ *head = list[id].link;
+ return id;
+}
+
+static int xennet_rxidx(RING_IDX idx)
+{
+ return idx & (NET_RX_RING_SIZE - 1);
+}
+
+static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+ RING_IDX ri)
+{
+ int i = xennet_rxidx(ri);
+ struct sk_buff *skb = np->rx_skbs[i];
+ np->rx_skbs[i] = NULL;
+ return skb;
+}
+
+static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+ RING_IDX ri)
+{
+ int i = xennet_rxidx(ri);
+ grant_ref_t ref = np->grant_rx_ref[i];
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ return ref;
+}
+
+#ifdef CONFIG_SYSFS
+static int xennet_sysfs_addif(struct net_device *netdev);
+static void xennet_sysfs_delif(struct net_device *netdev);
+#else /* !CONFIG_SYSFS */
+#define xennet_sysfs_addif(dev) (0)
+#define xennet_sysfs_delif(dev) do { } while (0)
+#endif
+
+static int xennet_can_sg(struct net_device *dev)
+{
+ return dev->features & NETIF_F_SG;
+}
+
+
+static void rx_refill_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ netif_rx_schedule(dev);
+}
+
+static int netfront_tx_slot_available(struct netfront_info *np)
+{
+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static void xennet_maybe_wake_tx(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (unlikely(netif_queue_stopped(dev)) &&
+ netfront_tx_slot_available(np) &&
+ likely(netif_running(dev)))
+ netif_wake_queue(dev);
+}
+
+static void xennet_alloc_rx_buffers(struct net_device *dev)
+{
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ int i, batch_target, notify;
+ RING_IDX req_prod = np->rx.req_prod_pvt;
+ struct xen_memory_reservation reservation;
+ grant_ref_t ref;
+ unsigned long pfn;
+ void *vaddr;
+ int nr_flips;
+ struct xen_netif_rx_request *req;
+
+ if (unlikely(!netif_carrier_ok(dev)))
+ return;
+
+ /*
+ * Allocate skbuffs greedily, even though we batch updates to the
+ * receive ring. This creates a less bursty demand on the memory
+ * allocator, so should reduce the chance of failed allocation requests
+ * both for ourself and for other kernel subsystems.
+ */
+ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ goto no_skb;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ kfree_skb(skb);
+no_skb:
+ /* Any skbuffs queued for refill? Force them out. */
+ if (i != 0)
+ goto refill;
+ /* Could not allocate any skbuffs. Try again later. */
+ mod_timer(&np->rx_refill_timer,
+ jiffies + (HZ/10));
+ break;
+ }
+
+ skb_shinfo(skb)->frags[0].page = page;
+ skb_shinfo(skb)->nr_frags = 1;
+ __skb_queue_tail(&np->rx_batch, skb);
+ }
+
+ /* Is the batch large enough to be worthwhile? */
+ if (i < (np->rx_target/2)) {
+ if (req_prod > np->rx.sring->req_prod)
+ goto push;
+ return;
+ }
+
+ /* Adjust our fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > np->rx_max_target))
+ np->rx_target = np->rx_max_target;
+
+ refill:
+ for (nr_flips = i = 0; ; i++) {
+ skb = __skb_dequeue(&np->rx_batch);
+ if (skb == NULL)
+ break;
+
+ skb->dev = dev;
+
+ id = xennet_rxidx(req_prod + i);
+
+ BUG_ON(np->rx_skbs[id]);
+ np->rx_skbs[id] = skb;
+
+ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+ BUG_ON((signed short)ref < 0);
+ np->grant_rx_ref[id] = ref;
+
+ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
+ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
+
+ req = RING_GET_REQUEST(&np->rx, req_prod + i);
+ gnttab_grant_foreign_access_ref(ref,
+ np->xbdev->otherend_id,
+ pfn_to_mfn(pfn),
+ 0);
+
+ req->id = id;
+ req->gref = ref;
+ }
+
+ if (nr_flips != 0) {
+ reservation.extent_start = np->rx_pfn_array;
+ reservation.nr_extents = nr_flips;
+ reservation.extent_order = 0;
+ reservation.address_bits = 0;
+ reservation.domid = DOMID_SELF;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* After all PTEs have been zapped, flush the TLB. */
+ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
+ UVMF_TLB_FLUSH|UVMF_ALL;
+
+ /* Give away a batch of pages. */
+ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
+ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
+
+ /* Zap PTEs and give away pages in one big
+ * multicall. */
+ (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
+
+ /* Check return status of HYPERVISOR_memory_op(). */
+ if (unlikely(np->rx_mcl[i].result != i))
+ panic("Unable to reduce memory reservation\n");
+ } else {
+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation) != i)
+ panic("Unable to reduce memory reservation\n");
+ }
+ } else {
+ wmb(); /* barrier so backend seens requests */
+ }
+
+ /* Above is a suitable barrier to ensure backend will see requests. */
+ np->rx.req_prod_pvt = req_prod + i;
+ push:
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
+ if (notify)
+ notify_remote_via_irq(np->netdev->irq);
+}
+
+static int xennet_open(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ spin_lock_bh(&np->rx_lock);
+ if (netif_carrier_ok(dev)) {
+ xennet_alloc_rx_buffers(dev);
+ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ netif_rx_schedule(dev);
+ }
+ spin_unlock_bh(&np->rx_lock);
+
+ xennet_maybe_wake_tx(dev);
+
+ return 0;
+}
+
+static void xennet_tx_buf_gc(struct net_device *dev)
+{
+ RING_IDX cons, prod;
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ BUG_ON(!netif_carrier_ok(dev));
+
+ do {
+ prod = np->tx.sring->rsp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
+
+ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
+ struct xen_netif_tx_response *txrsp;
+
+ txrsp = RING_GET_RESPONSE(&np->tx, cons);
+ if (txrsp->status == NETIF_RSP_NULL)
+ continue;
+
+ id = txrsp->id;
+ skb = np->tx_skbs[id].skb;
+ if (unlikely(gnttab_query_foreign_access(
+ np->grant_tx_ref[id]) != 0)) {
+ printk(KERN_ALERT "xennet_tx_buf_gc: warning "
+ "-- grant still in use by backend "
+ "domain.\n");
+ BUG();
+ }
+ gnttab_end_foreign_access_ref(
+ np->grant_tx_ref[id], GNTMAP_readonly);
+ gnttab_release_grant_reference(
+ &np->gref_tx_head, np->grant_tx_ref[id]);
+ np->grant_tx_ref[id] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+
+ np->tx.rsp_cons = prod;
+
+ /*
+ * Set a new event, then check for race with update of tx_cons.
+ * Note that it is essential to schedule a callback, no matter
+ * how few buffers are pending. Even if there is space in the
+ * transmit ring, higher layers may be blocked because too much
+ * data is outstanding: in such cases notification from Xen is
+ * likely to be the only kick that we'll get.
+ */
+ np->tx.sring->rsp_event =
+ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+ mb(); /* update shared area */
+ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
+
+ xennet_maybe_wake_tx(dev);
+}
+
+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ struct xen_netif_tx_request *tx)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ char *data = skb->data;
+ unsigned long mfn;
+ RING_IDX prod = np->tx.req_prod_pvt;
+ int frags = skb_shinfo(skb)->nr_frags;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ unsigned int id;
+ grant_ref_t ref;
+ int i;
+
+ /* While the header overlaps a page boundary (including being
+ larger than a page), split it it into page-sized chunks. */
+ while (len > PAGE_SIZE - offset) {
+ tx->size = PAGE_SIZE - offset;
+ tx->flags |= NETTXF_more_data;
+ len -= tx->size;
+ data += tx->size;
+ offset = 0;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+ tx->flags = 0;
+ }
+
+ /* Grant backend access to each skb fragment page. */
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+ tx->flags |= NETTXF_more_data;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = pfn_to_mfn(page_to_pfn(frag->page));
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = frag->page_offset;
+ tx->size = frag->size;
+ tx->flags = 0;
+ }
+
+ np->tx.req_prod_pvt = prod;
+}
+
+static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct xen_netif_tx_request *tx;
+ struct xen_netif_extra_info *extra;
+ char *data = skb->data;
+ RING_IDX i;
+ grant_ref_t ref;
+ unsigned long mfn;
+ int notify;
+ int frags = skb_shinfo(skb)->nr_frags;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+
+ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
+ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+ frags);
+ dump_stack();
+ goto drop;
+ }
+
+ spin_lock_irq(&np->tx_lock);
+
+ if (unlikely(!netif_carrier_ok(dev) ||
+ (frags > 1 && !xennet_can_sg(dev)) ||
+ netif_needs_gso(dev, skb))) {
+ spin_unlock_irq(&np->tx_lock);
+ goto drop;
+ }
+
+ i = np->tx.req_prod_pvt;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb;
+
+ tx = RING_GET_REQUEST(&np->tx, i);
+
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+ extra = NULL;
+
+ tx->flags = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ /* local packet? */
+ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* remote but checksummed. */
+ tx->flags |= NETTXF_data_validated;
+
+ if (skb_shinfo(skb)->gso_size) {
+ struct xen_netif_extra_info *gso;
+
+ gso = (struct xen_netif_extra_info *)
+ RING_GET_REQUEST(&np->tx, ++i);
+
+ if (extra)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ tx->flags |= NETTXF_extra_info;
+
+ gso->u.gso.size = skb_shinfo(skb)->gso_size;
+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.pad = 0;
+ gso->u.gso.features = 0;
+
+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ gso->flags = 0;
+ extra = gso;
+ }
+
+ np->tx.req_prod_pvt = i + 1;
+
+ xennet_make_frags(skb, dev, tx);
+ tx->size = skb->len;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
+ if (notify)
+ notify_remote_via_irq(np->netdev->irq);
+
+ xennet_tx_buf_gc(dev);
+
+ if (!netfront_tx_slot_available(np))
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&np->tx_lock);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ return 0;
+
+ drop:
+ np->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int xennet_close(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ netif_stop_queue(np->netdev);
+ return 0;
+}
+
+static struct net_device_stats *xennet_get_stats(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ return &np->stats;
+}
+
+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+ grant_ref_t ref)
+{
+ int new = xennet_rxidx(np->rx.req_prod_pvt);
+
+ BUG_ON(np->rx_skbs[new]);
+ np->rx_skbs[new] = skb;
+ np->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
+ np->rx.req_prod_pvt++;
+}
+
+static int xennet_get_extras(struct netfront_info *np,
+ struct xen_netif_extra_info *extras,
+ RING_IDX rp)
+
+{
+ struct xen_netif_extra_info *extra;
+ struct device *dev = &np->netdev->dev;
+ RING_IDX cons = np->rx.rsp_cons;
+ int err = 0;
+
+ do {
+ struct sk_buff *skb;
+ grant_ref_t ref;
+
+ if (unlikely(cons + 1 == rp)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Missing extra info\n");
+ err = -EBADR;
+ break;
+ }
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&np->rx, ++cons);
+
+ if (unlikely(!extra->type ||
+ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Invalid extra type: %d\n",
+ extra->type);
+ err = -EINVAL;
+ } else {
+ memcpy(&extras[extra->type - 1], extra,
+ sizeof(*extra));
+ }
+
+ skb = xennet_get_rx_skb(np, cons);
+ ref = xennet_get_rx_ref(np, cons);
+ xennet_move_rx_slot(np, skb, ref);
+ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
+ np->rx.rsp_cons = cons;
+ return err;
+}
+
+static int xennet_get_responses(struct netfront_info *np,
+ struct netfront_rx_info *rinfo, RING_IDX rp,
+ struct sk_buff_head *list)
+{
+ struct xen_netif_rx_response *rx = &rinfo->rx;
+ struct xen_netif_extra_info *extras = rinfo->extras;
+ struct device *dev = &np->netdev->dev;
+ RING_IDX cons = np->rx.rsp_cons;
+ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
+ grant_ref_t ref = xennet_get_rx_ref(np, cons);
+ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+ int frags = 1;
+ int err = 0;
+ unsigned long ret;
+
+ if (rx->flags & NETRXF_extra_info) {
+ err = xennet_get_extras(np, extras, rp);
+ cons = np->rx.rsp_cons;
+ }
+
+ for (;;) {
+ if (unlikely(rx->status < 0 ||
+ rx->offset + rx->status > PAGE_SIZE)) {
+ if (net_ratelimit())
+ dev_warn(dev, "rx->offset: %x, size: %u\n",
+ rx->offset, rx->status);
+ xennet_move_rx_slot(np, skb, ref);
+ err = -EINVAL;
+ goto next;
+ }
+
+ /*
+ * This definitely indicates a bug, either in this driver or in
+ * the backend driver. In future this should flag the bad
+ * situation to the system controller to reboot the backed.
+ */
+ if (ref == GRANT_INVALID_REF) {
+ if (net_ratelimit())
+ dev_warn(dev, "Bad rx response id %d.\n",
+ rx->id);
+ err = -EINVAL;
+ goto next;
+ }
+
+ ret = gnttab_end_foreign_access_ref(ref, 0);
+ BUG_ON(!ret);
+
+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
+
+ __skb_queue_tail(list, skb);
+
+next:
+ if (!(rx->flags & NETRXF_more_data))
+ break;
+
+ if (cons + frags == rp) {
+ if (net_ratelimit())
+ dev_warn(dev, "Need more frags\n");
+ err = -ENOENT;
+ break;
+ }
+
+ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
+ skb = xennet_get_rx_skb(np, cons + frags);
+ ref = xennet_get_rx_ref(np, cons + frags);
+ frags++;
+ }
+
+ if (unlikely(frags > max)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Too many frags\n");
+ err = -E2BIG;
+ }
+
+ if (unlikely(err))
+ np->rx.rsp_cons = cons + frags;
+
+ return err;
+}
+
+static int xennet_set_skb_gso(struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
+{
+ if (!gso->u.gso.size) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "GSO size must not be zero.\n");
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
+ return -EINVAL;
+ }
+
+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+
+ return 0;
+}
+
+static RING_IDX xennet_fill_frags(struct netfront_info *np,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ RING_IDX cons = np->rx.rsp_cons;
+ skb_frag_t *frag = shinfo->frags + nr_frags;
+ struct sk_buff *nskb;
+
+ while ((nskb = __skb_dequeue(list))) {
+ struct xen_netif_rx_response *rx =
+ RING_GET_RESPONSE(&np->rx, ++cons);
+
+ frag->page = skb_shinfo(nskb)->frags[0].page;
+ frag->page_offset = rx->offset;
+ frag->size = rx->status;
+
+ skb->data_len += rx->status;
+
+ skb_shinfo(nskb)->nr_frags = 0;
+ kfree_skb(nskb);
+
+ frag++;
+ nr_frags++;
+ }
+
+ shinfo->nr_frags = nr_frags;
+ return cons;
+}
+
+static int skb_checksum_setup(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+
+ iph = (void *)skb->data;
+ th = skb->data + 4 * iph->ihl;
+ if (th >= skb_tail_pointer(skb))
+ goto out;
+
+ skb->csum_start = th - skb->head;
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ break;
+ case IPPROTO_UDP:
+ skb->csum_offset = offsetof(struct udphdr, check);
+ break;
+ default:
+ if (net_ratelimit())
+ printk(KERN_ERR "Attempting to checksum a non-"
+ "TCP/UDP packet, dropping a protocol"
+ " %d packet", iph->protocol);
+ goto out;
+ }
+
+ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
+ goto out;
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static int handle_incoming_queue(struct net_device *dev,
+ struct sk_buff_head *rxq)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int packets_dropped = 0;
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(rxq)) != NULL) {
+ struct page *page = NETFRONT_SKB_CB(skb)->page;
+ void *vaddr = page_address(page);
+ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
+
+ memcpy(skb->data, vaddr + offset,
+ skb_headlen(skb));
+
+ if (page != skb_shinfo(skb)->frags[0].page)
+ __free_page(page);
+
+ /* Ethernet work: Delayed to here as it peeks the header. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_setup(skb)) {
+ kfree_skb(skb);
+ packets_dropped++;
+ np->stats.rx_errors++;
+ continue;
+ }
+ }
+
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += skb->len;
+
+ /* Pass it up. */
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ return packets_dropped;
+}
+
+static int xennet_poll(struct net_device *dev, int *pbudget)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct netfront_rx_info rinfo;
+ struct xen_netif_rx_response *rx = &rinfo.rx;
+ struct xen_netif_extra_info *extras = rinfo.extras;
+ RING_IDX i, rp;
+ int work_done, budget, more_to_do = 1;
+ struct sk_buff_head rxq;
+ struct sk_buff_head errq;
+ struct sk_buff_head tmpq;
+ unsigned long flags;
+ unsigned int len;
+ int err;
+
+ spin_lock(&np->rx_lock);
+
+ if (unlikely(!netif_carrier_ok(dev))) {
+ spin_unlock(&np->rx_lock);
+ return 0;
+ }
+
+ skb_queue_head_init(&rxq);
+ skb_queue_head_init(&errq);
+ skb_queue_head_init(&tmpq);
+
+ budget = *pbudget;
+ if (budget > dev->quota)
+ budget = dev->quota;
+ rp = np->rx.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ i = np->rx.rsp_cons;
+ work_done = 0;
+ while ((i != rp) && (work_done < budget)) {
+ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memset(extras, 0, sizeof(rinfo.extras));
+
+ err = xennet_get_responses(np, &rinfo, rp, &tmpq);
+
+ if (unlikely(err)) {
+err:
+ while ((skb = __skb_dequeue(&tmpq)))
+ __skb_queue_tail(&errq, skb);
+ np->stats.rx_errors++;
+ i = np->rx.rsp_cons;
+ continue;
+ }
+
+ skb = __skb_dequeue(&tmpq);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+ struct xen_netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (unlikely(xennet_set_skb_gso(skb, gso))) {
+ __skb_queue_head(&tmpq, skb);
+ np->rx.rsp_cons += skb_queue_len(&tmpq);
+ goto err;
+ }
+ }
+
+ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
+ NETFRONT_SKB_CB(skb)->offset = rx->offset;
+
+ len = rx->status;
+ if (len > RX_COPY_THRESHOLD)
+ len = RX_COPY_THRESHOLD;
+ skb_put(skb, len);
+
+ if (rx->status > len) {
+ skb_shinfo(skb)->frags[0].page_offset =
+ rx->offset + len;
+ skb_shinfo(skb)->frags[0].size = rx->status - len;
+ skb->data_len = rx->status - len;
+ } else {
+ skb_shinfo(skb)->frags[0].page = NULL;
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+
+ i = xennet_fill_frags(np, skb, &tmpq);
+
+ /*
+ * Truesize approximates the size of true data plus
+ * any supervisor overheads. Adding hypervisor
+ * overheads has been shown to significantly reduce
+ * achievable bandwidth with the default receive
+ * buffer size. It is therefore not wise to account
+ * for it here.
+ *
+ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
+ * to RX_COPY_THRESHOLD + the supervisor
+ * overheads. Here, we add the size of the data pulled
+ * in xennet_fill_frags().
+ *
+ * We also adjust for any unused space in the main
+ * data area by subtracting (RX_COPY_THRESHOLD -
+ * len). This is especially important with drivers
+ * which split incoming packets into header and data,
+ * using only 66 bytes of the main data area (see the
+ * e1000 driver for example.) On such systems,
+ * without this last adjustement, our achievable
+ * receive throughout using the standard receive
+ * buffer size was cut by 25%(!!!).
+ */
+ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+ skb->len += skb->data_len;
+
+ if (rx->flags & NETRXF_csum_blank)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ else if (rx->flags & NETRXF_data_validated)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ __skb_queue_tail(&rxq, skb);
+
+ np->rx.rsp_cons = ++i;
+ work_done++;
+ }
+
+ while ((skb = __skb_dequeue(&errq)))
+ kfree_skb(skb);
+
+ work_done -= handle_incoming_queue(dev, &rxq);
+
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
+ ((3*np->rx_target) / 4)) &&
+ (--np->rx_target < np->rx_min_target))
+ np->rx_target = np->rx_min_target;
+
+ xennet_alloc_rx_buffers(dev);
+
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done < budget) {
+ local_irq_save(flags);
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
+ if (!more_to_do)
+ __netif_rx_complete(dev);
+
+ local_irq_restore(flags);
+ }
+
+ spin_unlock(&np->rx_lock);
+
+ return more_to_do;
+}
+
+static int xennet_change_mtu(struct net_device *dev, int mtu)
+{
+ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static void xennet_release_tx_bufs(struct netfront_info *np)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ /* Skip over entries which are actually freelist references */
+ if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
+ continue;
+
+ skb = np->tx_skbs[i].skb;
+ gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
+ GNTMAP_readonly);
+ gnttab_release_grant_reference(&np->gref_tx_head,
+ np->grant_tx_ref[i]);
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+ dev_kfree_skb_irq(skb);
+ }
+}
+
+static void xennet_release_rx_bufs(struct netfront_info *np)
+{
+ struct mmu_update *mmu = np->rx_mmu;
+ struct multicall_entry *mcl = np->rx_mcl;
+ struct sk_buff_head free_list;
+ struct sk_buff *skb;
+ unsigned long mfn;
+ int xfer = 0, noxfer = 0, unused = 0;
+ int id, ref;
+
+ dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
+ __func__);
+ return;
+
+ skb_queue_head_init(&free_list);
+
+ spin_lock_bh(&np->rx_lock);
+
+ for (id = 0; id < NET_RX_RING_SIZE; id++) {
+ ref = np->grant_rx_ref[id];
+ if (ref == GRANT_INVALID_REF) {
+ unused++;
+ continue;
+ }
+
+ skb = np->rx_skbs[id];
+ mfn = gnttab_end_foreign_transfer_ref(ref);
+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
+
+ if (0 == mfn) {
+ skb_shinfo(skb)->nr_frags = 0;
+ dev_kfree_skb(skb);
+ noxfer++;
+ continue;
+ }
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Remap the page. */
+ struct page *page = skb_shinfo(skb)->frags[0].page;
+ unsigned long pfn = page_to_pfn(page);
+ void *vaddr = page_address(page);
+
+ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
+ mfn_pte(mfn, PAGE_KERNEL),
+ 0);
+ mcl++;
+ mmu->ptr = ((u64)mfn << PAGE_SHIFT)
+ | MMU_MACHPHYS_UPDATE;
+ mmu->val = pfn;
+ mmu++;
+
+ set_phys_to_machine(pfn, mfn);
+ }
+ __skb_queue_tail(&free_list, skb);
+ xfer++;
+ }
+
+ dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
+ __func__, xfer, noxfer, unused);
+
+ if (xfer) {
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Do all the remapping work and M2P updates. */
+ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
+ 0, DOMID_SELF);
+ mcl++;
+ HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+ }
+ }
+
+ while ((skb = __skb_dequeue(&free_list)) != NULL)
+ dev_kfree_skb(skb);
+
+ spin_unlock_bh(&np->rx_lock);
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_release_tx_bufs(np);
+ xennet_release_rx_bufs(np);
+ gnttab_free_grant_references(np->gref_tx_head);
+ gnttab_free_grant_references(np->gref_rx_head);
+}
+
+static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
+{
+ int i, err;
+ struct net_device *netdev;
+ struct netfront_info *np;
+
+ netdev = alloc_etherdev(sizeof(struct netfront_info));
+ if (!netdev) {
+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np = netdev_priv(netdev);
+ np->xbdev = dev;
+
+ spin_lock_init(&np->tx_lock);
+ spin_lock_init(&np->rx_lock);
+
+ skb_queue_head_init(&np->rx_batch);
+ np->rx_target = RX_DFL_MIN_TARGET;
+ np->rx_min_target = RX_DFL_MIN_TARGET;
+ np->rx_max_target = RX_MAX_TARGET;
+
+ init_timer(&np->rx_refill_timer);
+ np->rx_refill_timer.data = (unsigned long)netdev;
+ np->rx_refill_timer.function = rx_refill_timeout;
+
+ /* Initialise tx_skbs as a free chain containing every entry. */
+ np->tx_skb_freelist = 0;
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ np->tx_skbs[i].link = i+1;
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* Clear out rx_skbs */
+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
+ np->rx_skbs[i] = NULL;
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* A grant for every tx ring slot */
+ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ &np->gref_tx_head) < 0) {
+ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* A grant for every rx ring slot */
+ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ &np->gref_rx_head) < 0) {
+ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
+ err = -ENOMEM;
+ goto exit_free_tx;
+ }
+
+ netdev->open = xennet_open;
+ netdev->hard_start_xmit = xennet_start_xmit;
+ netdev->stop = xennet_close;
+ netdev->get_stats = xennet_get_stats;
+ netdev->poll = xennet_poll;
+ netdev->uninit = xennet_uninit;
+ netdev->change_mtu = xennet_change_mtu;
+ netdev->weight = 64;
+ netdev->features = NETIF_F_IP_CSUM;
+
+ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
+ np->netdev = netdev;
+
+ netif_carrier_off(netdev);
+
+ return netdev;
+
+ exit_free_tx:
+ gnttab_free_grant_references(np->gref_tx_head);
+ exit:
+ free_netdev(netdev);
+ return ERR_PTR(err);
+}
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and the ring buffers for communication with the backend, and
+ * inform the backend of the appropriate details for those.
+ */
+static int __devinit netfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err;
+ struct net_device *netdev;
+ struct netfront_info *info;
+
+ netdev = xennet_create_dev(dev);
+ if (IS_ERR(netdev)) {
+ err = PTR_ERR(netdev);
+ xenbus_dev_fatal(dev, err, "creating netdev");
+ return err;
+ }
+
+ info = netdev_priv(netdev);
+ dev->dev.driver_data = info;
+
+ err = register_netdev(info->netdev);
+ if (err) {
+ printk(KERN_WARNING "%s: register_netdev err=%d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ err = xennet_sysfs_addif(info->netdev);
+ if (err) {
+ unregister_netdev(info->netdev);
+ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ return 0;
+
+ fail:
+ free_netdev(netdev);
+ dev->dev.driver_data = NULL;
+ return err;
+}
+
+static void xennet_end_access(int ref, void *page)
+{
+ /* This frees the page as a side-effect */
+ if (ref != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+}
+
+static void xennet_disconnect_backend(struct netfront_info *info)
+{
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_bh(&info->rx_lock);
+ spin_lock_irq(&info->tx_lock);
+ netif_carrier_off(info->netdev);
+ spin_unlock_irq(&info->tx_lock);
+ spin_unlock_bh(&info->rx_lock);
+
+ if (info->netdev->irq)
+ unbind_from_irqhandler(info->netdev->irq, info->netdev);
+ info->evtchn = info->netdev->irq = 0;
+
+ /* End access and free the pages */
+ xennet_end_access(info->tx_ring_ref, info->tx.sring);
+ xennet_end_access(info->rx_ring_ref, info->rx.sring);
+
+ info->tx_ring_ref = GRANT_INVALID_REF;
+ info->rx_ring_ref = GRANT_INVALID_REF;
+ info->tx.sring = NULL;
+ info->rx.sring = NULL;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart. We tear down our netif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int netfront_resume(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+ xennet_disconnect_backend(info);
+ return 0;
+}
+
+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+{
+ char *s, *e, *macstr;
+ int i;
+
+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
+ if (IS_ERR(macstr))
+ return PTR_ERR(macstr);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac[i] = simple_strtoul(s, &e, 16);
+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
+ kfree(macstr);
+ return -ENOENT;
+ }
+ s = e+1;
+ }
+
+ kfree(macstr);
+ return 0;
+}
+
+static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct netfront_info *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+
+ if (likely(netif_carrier_ok(dev))) {
+ xennet_tx_buf_gc(dev);
+ /* Under tx_lock: protects access to rx shared-ring indexes. */
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ netif_rx_schedule(dev);
+ }
+
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
+{
+ struct xen_netif_tx_sring *txs;
+ struct xen_netif_rx_sring *rxs;
+ int err;
+ struct net_device *netdev = info->netdev;
+
+ info->tx_ring_ref = GRANT_INVALID_REF;
+ info->rx_ring_ref = GRANT_INVALID_REF;
+ info->rx.sring = NULL;
+ info->tx.sring = NULL;
+ netdev->irq = 0;
+
+ err = xen_net_read_mac(dev, netdev->dev_addr);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+ goto fail;
+ }
+
+ txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
+ if (!txs) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err, "allocating tx ring page");
+ goto fail;
+ }
+ SHARED_RING_INIT(txs);
+ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
+ if (err < 0) {
+ free_page((unsigned long)txs);
+ goto fail;
+ }
+
+ info->tx_ring_ref = err;
+ rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
+ if (!rxs) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err, "allocating rx ring page");
+ goto fail;
+ }
+ SHARED_RING_INIT(rxs);
+ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
+ if (err < 0) {
+ free_page((unsigned long)rxs);
+ goto fail;
+ }
+ info->rx_ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err)
+ goto fail;
+
+ err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
+ IRQF_SAMPLE_RANDOM, netdev->name,
+ netdev);
+ if (err < 0)
+ goto fail;
+ netdev->irq = err;
+ return 0;
+
+ fail:
+ return err;
+}
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+ struct netfront_info *info)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+
+ /* Create shared ring, alloc event channel. */
+ err = setup_netfront(dev, info);
+ if (err)
+ goto out;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_ring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
+ info->tx_ring_ref);
+ if (err) {
+ message = "writing tx ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
+ info->rx_ring_ref);
+ if (err) {
+ message = "writing rx ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "event-channel", "%u", info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
+ 1);
+ if (err) {
+ message = "writing request-rx-copy";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-notify";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_ring;
+ }
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_ring:
+ xennet_disconnect_backend(info);
+ out:
+ return err;
+}
+
+static int xennet_set_sg(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
+ "%d", &val) < 0)
+ val = 0;
+ if (!val)
+ return -ENOSYS;
+ } else if (dev->mtu > ETH_DATA_LEN)
+ dev->mtu = ETH_DATA_LEN;
+
+ return ethtool_op_set_sg(dev, data);
+}
+
+static int xennet_set_tso(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-gso-tcpv4", "%d", &val) < 0)
+ val = 0;
+ if (!val)
+ return -ENOSYS;
+ }
+
+ return ethtool_op_set_tso(dev, data);
+}
+
+static void xennet_set_features(struct net_device *dev)
+{
+ /* Turn off all GSO bits except ROBUST. */
+ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
+ dev->features |= NETIF_F_GSO_ROBUST;
+ xennet_set_sg(dev, 0);
+
+ /* We need checksum offload to enable scatter/gather and TSO. */
+ if (!(dev->features & NETIF_F_IP_CSUM))
+ return;
+
+ if (!xennet_set_sg(dev, 1))
+ xennet_set_tso(dev, 1);
+}
+
+static int xennet_connect(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int i, requeue_idx, err;
+ struct sk_buff *skb;
+ grant_ref_t ref;
+ struct xen_netif_rx_request *req;
+ unsigned int feature_rx_copy;
+
+ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-rx-copy", "%u", &feature_rx_copy);
+ if (err != 1)
+ feature_rx_copy = 0;
+
+ if (!feature_rx_copy) {
+ dev_info(&dev->dev,
+ "backend does not support copying recieve path");
+ return -ENODEV;
+ }
+
+ err = talk_to_backend(np->xbdev, np);
+ if (err)
+ return err;
+
+ xennet_set_features(dev);
+
+ spin_lock_bh(&np->rx_lock);
+ spin_lock_irq(&np->tx_lock);
+
+ /* Step 1: Discard all pending TX packet fragments. */
+ xennet_release_tx_bufs(np);
+
+ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ if (!np->rx_skbs[i])
+ continue;
+
+ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
+ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
+ req = RING_GET_REQUEST(&np->rx, requeue_idx);
+
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id,
+ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
+ frags->page)),
+ 0);
+ req->gref = ref;
+ req->id = requeue_idx;
+
+ requeue_idx++;
+ }
+
+ np->rx.req_prod_pvt = requeue_idx;
+
+ /*
+ * Step 3: All public and private state should now be sane. Get
+ * ready to start sending and receiving packets and give the driver
+ * domain a kick because we've probably just requeued some
+ * packets.
+ */
+ netif_carrier_on(np->netdev);
+ notify_remote_via_irq(np->netdev->irq);
+ xennet_tx_buf_gc(dev);
+ xennet_alloc_rx_buffers(dev);
+
+ spin_unlock_irq(&np->tx_lock);
+ spin_unlock_bh(&np->rx_lock);
+
+ return 0;
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct netfront_info *np = dev->dev.driver_data;
+ struct net_device *netdev = np->netdev;
+
+ dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateInitWait:
+ if (dev->state != XenbusStateInitialising)
+ break;
+ if (xennet_connect(netdev) != 0)
+ break;
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ xenbus_frontend_closed(dev);
+ break;
+ }
+}
+
+static struct ethtool_ops xennet_ethtool_ops =
+{
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = xennet_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = xennet_set_tso,
+ .get_link = ethtool_op_get_link,
+};
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_rxbuf_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_min_target);
+}
+
+static ssize_t store_rxbuf_min(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ target = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EBADMSG;
+
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock_bh(&np->rx_lock);
+ if (target > np->rx_max_target)
+ np->rx_max_target = target;
+ np->rx_min_target = target;
+ if (target > np->rx_target)
+ np->rx_target = target;
+
+ xennet_alloc_rx_buffers(netdev);
+
+ spin_unlock_bh(&np->rx_lock);
+ return len;
+}
+
+static ssize_t show_rxbuf_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_max_target);
+}
+
+static ssize_t store_rxbuf_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ target = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EBADMSG;
+
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock_bh(&np->rx_lock);
+ if (target < np->rx_min_target)
+ np->rx_min_target = target;
+ np->rx_max_target = target;
+ if (target < np->rx_target)
+ np->rx_target = target;
+
+ xennet_alloc_rx_buffers(netdev);
+
+ spin_unlock_bh(&np->rx_lock);
+ return len;
+}
+
+static ssize_t show_rxbuf_cur(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_target);
+}
+
+static struct device_attribute xennet_attrs[] = {
+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+};
+
+static int xennet_sysfs_addif(struct net_device *netdev)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+ err = device_create_file(&netdev->dev,
+ &xennet_attrs[i]);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+ fail:
+ while (--i >= 0)
+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ return err;
+}
+
+static void xennet_sysfs_delif(struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+}
+
+#endif /* CONFIG_SYSFS */
+
+static struct xenbus_device_id netfront_ids[] = {
+ { "vif" },
+ { "" }
+};
+
+
+static int __devexit xennet_remove(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+ unregister_netdev(info->netdev);
+
+ xennet_disconnect_backend(info);
+
+ del_timer_sync(&info->rx_refill_timer);
+
+ xennet_sysfs_delif(info->netdev);
+
+ free_netdev(info->netdev);
+
+ return 0;
+}
+
+static struct xenbus_driver netfront = {
+ .name = "vif",
+ .owner = THIS_MODULE,
+ .ids = netfront_ids,
+ .probe = netfront_probe,
+ .remove = __devexit_p(xennet_remove),
+ .resume = netfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init netif_init(void)
+{
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ if (is_initial_xendomain())
+ return 0;
+
+ printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
+
+ return xenbus_register_frontend(&netfront);
+}
+module_init(netif_init);
+
+
+static void __exit netif_exit(void)
+{
+ if (is_initial_xendomain())
+ return;
+
+ return xenbus_unregister_driver(&netfront);
+}
+module_exit(netif_exit);
+
+MODULE_DESCRIPTION("Xen virtual network device frontend");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index f2a90a7fa2d6..870c5393c21a 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1137,7 +1137,7 @@ static int yellowfin_rx(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
- eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
yp->rx_buf_sz,
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 3a0a3a734933..e503c9c98032 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -466,9 +466,8 @@ static struct nubus_dev* __init
parent->base, dir.base);
/* Actually we should probably panic if this fails */
- if ((dev = kmalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+ if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
return NULL;
- memset(dev, 0, sizeof(*dev));
dev->resid = parent->type;
dev->directory = dir.base;
dev->board = board;
@@ -800,9 +799,8 @@ static struct nubus_board* __init nubus_add_board(int slot, int bytelanes)
nubus_rewind(&rp, FORMAT_BLOCK_SIZE, bytelanes);
/* Actually we should probably panic if this fails */
- if ((board = kmalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
+ if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
return NULL;
- memset(board, 0, sizeof(*board));
board->fblock = rp;
/* Dump the format block for debugging purposes */
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
new file mode 100644
index 000000000000..c03072b12f42
--- /dev/null
+++ b/drivers/of/Kconfig
@@ -0,0 +1,3 @@
+config OF_DEVICE
+ def_bool y
+ depends on OF && (SPARC || PPC_OF)
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
new file mode 100644
index 000000000000..ab9be5d5255b
--- /dev/null
+++ b/drivers/of/Makefile
@@ -0,0 +1,2 @@
+obj-y = base.o
+obj-$(CONFIG_OF_DEVICE) += device.o platform.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
new file mode 100644
index 000000000000..9377f3bc410a
--- /dev/null
+++ b/drivers/of/base.c
@@ -0,0 +1,275 @@
+/*
+ * Procedures for creating, accessing and interpreting the device tree.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
+ *
+ * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+
+struct device_node *allnodes;
+
+/* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
+ */
+DEFINE_RWLOCK(devtree_lock);
+
+int of_n_addr_cells(struct device_node *np)
+{
+ const int *ip;
+
+ do {
+ if (np->parent)
+ np = np->parent;
+ ip = of_get_property(np, "#address-cells", NULL);
+ if (ip)
+ return *ip;
+ } while (np->parent);
+ /* No #address-cells property for the root node */
+ return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+}
+EXPORT_SYMBOL(of_n_addr_cells);
+
+int of_n_size_cells(struct device_node *np)
+{
+ const int *ip;
+
+ do {
+ if (np->parent)
+ np = np->parent;
+ ip = of_get_property(np, "#size-cells", NULL);
+ if (ip)
+ return *ip;
+ } while (np->parent);
+ /* No #size-cells property for the root node */
+ return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+}
+EXPORT_SYMBOL(of_n_size_cells);
+
+struct property *of_find_property(const struct device_node *np,
+ const char *name,
+ int *lenp)
+{
+ struct property *pp;
+
+ read_lock(&devtree_lock);
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+ if (of_prop_cmp(pp->name, name) == 0) {
+ if (lenp != 0)
+ *lenp = pp->length;
+ break;
+ }
+ }
+ read_unlock(&devtree_lock);
+
+ return pp;
+}
+EXPORT_SYMBOL(of_find_property);
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
+const void *of_get_property(const struct device_node *np, const char *name,
+ int *lenp)
+{
+ struct property *pp = of_find_property(np, name, lenp);
+
+ return pp ? pp->value : NULL;
+}
+EXPORT_SYMBOL(of_get_property);
+
+/** Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
+ */
+int of_device_is_compatible(const struct device_node *device,
+ const char *compat)
+{
+ const char* cp;
+ int cplen, l;
+
+ cp = of_get_property(device, "compatible", &cplen);
+ if (cp == NULL)
+ return 0;
+ while (cplen > 0) {
+ if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
+ return 1;
+ l = strlen(cp) + 1;
+ cp += l;
+ cplen -= l;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(of_device_is_compatible);
+
+/**
+ * of_get_parent - Get a node's parent if any
+ * @node: Node to get parent
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_get_parent(const struct device_node *node)
+{
+ struct device_node *np;
+
+ if (!node)
+ return NULL;
+
+ read_lock(&devtree_lock);
+ np = of_node_get(node->parent);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_get_parent);
+
+/**
+ * of_get_next_child - Iterate a node childs
+ * @node: parent node
+ * @prev: previous child of the parent node, or NULL to get first
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_get_next_child(const struct device_node *node,
+ struct device_node *prev)
+{
+ struct device_node *next;
+
+ read_lock(&devtree_lock);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling)
+ if (of_node_get(next))
+ break;
+ of_node_put(prev);
+ read_unlock(&devtree_lock);
+ return next;
+}
+EXPORT_SYMBOL(of_get_next_child);
+
+/**
+ * of_find_node_by_path - Find a node matching a full OF path
+ * @path: The full path to match
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_path(const char *path)
+{
+ struct device_node *np = allnodes;
+
+ read_lock(&devtree_lock);
+ for (; np; np = np->allnext) {
+ if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
+ && of_node_get(np))
+ break;
+ }
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_node_by_path);
+
+/**
+ * of_find_node_by_name - Find a node by its "name" property
+ * @from: The node to start searching from or NULL, the node
+ * you pass will not be searched, only the next one
+ * will; typically, you pass what the previous call
+ * returned. of_node_put() will be called on it
+ * @name: The name string to match against
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_name(struct device_node *from,
+ const char *name)
+{
+ struct device_node *np;
+
+ read_lock(&devtree_lock);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->name && (of_node_cmp(np->name, name) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_node_by_name);
+
+/**
+ * of_find_node_by_type - Find a node by its "device_type" property
+ * @from: The node to start searching from, or NULL to start searching
+ * the entire device tree. The node you pass will not be
+ * searched, only the next one will; typically, you pass
+ * what the previous call returned. of_node_put() will be
+ * called on from for you.
+ * @type: The type string to match against
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type)
+{
+ struct device_node *np;
+
+ read_lock(&devtree_lock);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->type && (of_node_cmp(np->type, type) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_node_by_type);
+
+/**
+ * of_find_compatible_node - Find a node based on type and one of the
+ * tokens in its "compatible" property
+ * @from: The node to start searching from or NULL, the node
+ * you pass will not be searched, only the next one
+ * will; typically, you pass what the previous call
+ * returned. of_node_put() will be called on it
+ * @type: The type string to match "device_type" or NULL to ignore
+ * @compatible: The string to match to one of the tokens in the device
+ * "compatible" list.
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_compatible_node(struct device_node *from,
+ const char *type, const char *compatible)
+{
+ struct device_node *np;
+
+ read_lock(&devtree_lock);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ if (type
+ && !(np->type && (of_node_cmp(np->type, type) == 0)))
+ continue;
+ if (of_device_is_compatible(np, compatible) && of_node_get(np))
+ break;
+ }
+ of_node_put(from);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_compatible_node);
diff --git a/drivers/of/device.c b/drivers/of/device.c
new file mode 100644
index 000000000000..6245f060fb77
--- /dev/null
+++ b/drivers/of/device.c
@@ -0,0 +1,131 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+
+#include <asm/errno.h>
+
+/**
+ * of_match_node - Tell if an device_node has a matching of_match structure
+ * @ids: array of of device match structures to search in
+ * @node: the of device structure to match against
+ *
+ * Low level utility function used by device matching.
+ */
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
+ const struct device_node *node)
+{
+ while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
+ int match = 1;
+ if (matches->name[0])
+ match &= node->name
+ && !strcmp(matches->name, node->name);
+ if (matches->type[0])
+ match &= node->type
+ && !strcmp(matches->type, node->type);
+ if (matches->compatible[0])
+ match &= of_device_is_compatible(node,
+ matches->compatible);
+ if (match)
+ return matches;
+ matches++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_match_node);
+
+/**
+ * of_match_device - Tell if an of_device structure has a matching
+ * of_match structure
+ * @ids: array of of device match structures to search in
+ * @dev: the of device structure to match against
+ *
+ * Used by a driver to check whether an of_device present in the
+ * system is in its list of supported devices.
+ */
+const struct of_device_id *of_match_device(const struct of_device_id *matches,
+ const struct of_device *dev)
+{
+ if (!dev->node)
+ return NULL;
+ return of_match_node(matches, dev->node);
+}
+EXPORT_SYMBOL(of_match_device);
+
+struct of_device *of_dev_get(struct of_device *dev)
+{
+ struct device *tmp;
+
+ if (!dev)
+ return NULL;
+ tmp = get_device(&dev->dev);
+ if (tmp)
+ return to_of_device(tmp);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(of_dev_get);
+
+void of_dev_put(struct of_device *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+}
+EXPORT_SYMBOL(of_dev_put);
+
+static ssize_t dev_show_devspec(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct of_device *ofdev;
+
+ ofdev = to_of_device(dev);
+ return sprintf(buf, "%s", ofdev->node->full_name);
+}
+
+static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
+
+/**
+ * of_release_dev - free an of device structure when all users of it are finished.
+ * @dev: device that's been disconnected
+ *
+ * Will be called only by the device core when all users of this of device are
+ * done.
+ */
+void of_release_dev(struct device *dev)
+{
+ struct of_device *ofdev;
+
+ ofdev = to_of_device(dev);
+ of_node_put(ofdev->node);
+ kfree(ofdev);
+}
+EXPORT_SYMBOL(of_release_dev);
+
+int of_device_register(struct of_device *ofdev)
+{
+ int rc;
+
+ BUG_ON(ofdev->node == NULL);
+
+ rc = device_register(&ofdev->dev);
+ if (rc)
+ return rc;
+
+ rc = device_create_file(&ofdev->dev, &dev_attr_devspec);
+ if (rc)
+ device_unregister(&ofdev->dev);
+
+ return rc;
+}
+EXPORT_SYMBOL(of_device_register);
+
+void of_device_unregister(struct of_device *ofdev)
+{
+ device_remove_file(&ofdev->dev, &dev_attr_devspec);
+ device_unregister(&ofdev->dev);
+}
+EXPORT_SYMBOL(of_device_unregister);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
new file mode 100644
index 000000000000..864f09fd9f86
--- /dev/null
+++ b/drivers/of/platform.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ * and Arnd Bergmann, IBM Corp.
+ * Merged from powerpc/kernel/of_platform.c and
+ * sparc{,64}/kernel/of_device.c by Stephen Rothwell
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct of_device *of_dev = to_of_device(dev);
+ struct of_platform_driver *of_drv = to_of_platform_driver(drv);
+ const struct of_device_id *matches = of_drv->match_table;
+
+ if (!matches)
+ return 0;
+
+ return of_match_device(matches, of_dev) != NULL;
+}
+
+static int of_platform_device_probe(struct device *dev)
+{
+ int error = -ENODEV;
+ struct of_platform_driver *drv;
+ struct of_device *of_dev;
+ const struct of_device_id *match;
+
+ drv = to_of_platform_driver(dev->driver);
+ of_dev = to_of_device(dev);
+
+ if (!drv->probe)
+ return error;
+
+ of_dev_get(of_dev);
+
+ match = of_match_device(drv->match_table, of_dev);
+ if (match)
+ error = drv->probe(of_dev, match);
+ if (error)
+ of_dev_put(of_dev);
+
+ return error;
+}
+
+static int of_platform_device_remove(struct device *dev)
+{
+ struct of_device *of_dev = to_of_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->remove)
+ drv->remove(of_dev);
+ return 0;
+}
+
+static int of_platform_device_suspend(struct device *dev, pm_message_t state)
+{
+ struct of_device *of_dev = to_of_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+ int error = 0;
+
+ if (dev->driver && drv->suspend)
+ error = drv->suspend(of_dev, state);
+ return error;
+}
+
+static int of_platform_device_resume(struct device * dev)
+{
+ struct of_device *of_dev = to_of_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+ int error = 0;
+
+ if (dev->driver && drv->resume)
+ error = drv->resume(of_dev);
+ return error;
+}
+
+int of_bus_type_init(struct bus_type *bus, const char *name)
+{
+ bus->name = name;
+ bus->match = of_platform_bus_match;
+ bus->probe = of_platform_device_probe;
+ bus->remove = of_platform_device_remove;
+ bus->suspend = of_platform_device_suspend;
+ bus->resume = of_platform_device_resume;
+ return bus_register(bus);
+}
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index edd6de995726..8134c7e198a5 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -26,8 +26,9 @@
#include <linux/profile.h>
#include <linux/module.h>
#include <linux/fs.h>
+#include <linux/oprofile.h>
#include <linux/sched.h>
-
+
#include "oprofile_stats.h"
#include "event_buffer.h"
#include "cpu_buffer.h"
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 9b6a4ebd03e3..5076ed1ebd8f 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -19,28 +19,10 @@ void free_event_buffer(void);
/* wake up the process sleeping on the event file */
void wake_up_buffer_waiter(void);
-
-/* Each escaped entry is prefixed by ESCAPE_CODE
- * then one of the following codes, then the
- * relevant data.
- */
-#define ESCAPE_CODE ~0UL
-#define CTX_SWITCH_CODE 1
-#define CPU_SWITCH_CODE 2
-#define COOKIE_SWITCH_CODE 3
-#define KERNEL_ENTER_SWITCH_CODE 4
-#define KERNEL_EXIT_SWITCH_CODE 5
-#define MODULE_LOADED_CODE 6
-#define CTX_TGID_CODE 7
-#define TRACE_BEGIN_CODE 8
-#define TRACE_END_CODE 9
-
+
#define INVALID_COOKIE ~0UL
#define NO_COOKIE 0UL
-/* add data to the event buffer */
-void add_event_entry(unsigned long data);
-
extern const struct file_operations event_buffer_fops;
/* mutex between sync_cpu_buffers() and the
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index e5162a64018b..2c645170f06e 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -53,9 +53,24 @@ int oprofile_setup(void)
* us missing task deaths and eventually oopsing
* when trying to process the event buffer.
*/
+ if (oprofile_ops.sync_start) {
+ int sync_ret = oprofile_ops.sync_start();
+ switch (sync_ret) {
+ case 0:
+ goto post_sync;
+ case 1:
+ goto do_generic;
+ case -1:
+ goto out3;
+ default:
+ goto out3;
+ }
+ }
+do_generic:
if ((err = sync_start()))
goto out3;
+post_sync:
is_setup = 1;
mutex_unlock(&start_mutex);
return 0;
@@ -118,7 +133,20 @@ out:
void oprofile_shutdown(void)
{
mutex_lock(&start_mutex);
+ if (oprofile_ops.sync_stop) {
+ int sync_ret = oprofile_ops.sync_stop();
+ switch (sync_ret) {
+ case 0:
+ goto post_sync;
+ case 1:
+ goto do_generic;
+ default:
+ goto post_sync;
+ }
+ }
+do_generic:
sync_stop();
+post_sync:
if (oprofile_ops.shutdown)
oprofile_ops.shutdown();
is_setup = 0;
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index a68b3b3761a2..a728a7cd2fc8 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <linux/ioport.h>
#include <asm/io.h>
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 924ef0609460..fc4bde259dc7 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -121,14 +121,14 @@ struct pdcspath_entry pdcspath_entry_##_name = { \
#define PDCS_ATTR(_name, _mode, _show, _store) \
struct subsys_attribute pdcs_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define PATHS_ATTR(_name, _mode, _show, _store) \
struct pdcspath_attribute paths_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
+ .attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index a708c329675e..38cdf9fa36a7 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -73,6 +73,7 @@
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
#include <linux/delay.h>
#include <asm/io.h>
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index f46c69e4ed82..d449b150930e 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,11 +5,9 @@
# Parport configuration.
#
-menu "Parallel port support"
- depends on HAS_IOMEM
-
-config PARPORT
+menuconfig PARPORT
tristate "Parallel port support"
+ depends on HAS_IOMEM
---help---
If you want to use devices connected to your machine's parallel port
(the connector at the computer with 25 holes), e.g. printer, ZIP
@@ -33,9 +31,11 @@ config PARPORT
If unsure, say Y.
+if PARPORT
+
config PARPORT_PC
tristate "PC-style hardware"
- depends on PARPORT && (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV
+ depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && (!M68K || ISA)
---help---
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
@@ -85,7 +85,7 @@ config PARPORT_PC_PCMCIA
config PARPORT_IP32
tristate "SGI IP32 builtin port (EXPERIMENTAL)"
- depends on SGI_IP32 && PARPORT && EXPERIMENTAL
+ depends on SGI_IP32 && EXPERIMENTAL
select PARPORT_NOT_PC
help
Say Y here if you need support for the parallel port on
@@ -94,7 +94,7 @@ config PARPORT_IP32
config PARPORT_AMIGA
tristate "Amiga builtin port"
- depends on AMIGA && PARPORT
+ depends on AMIGA
select PARPORT_NOT_PC
help
Say Y here if you need support for the parallel port hardware on
@@ -103,7 +103,7 @@ config PARPORT_AMIGA
config PARPORT_MFC3
tristate "Multiface III parallel port"
- depends on ZORRO && PARPORT
+ depends on ZORRO
select PARPORT_NOT_PC
help
Say Y here if you need parallel port support for the MFC3 card.
@@ -112,7 +112,7 @@ config PARPORT_MFC3
config PARPORT_ATARI
tristate "Atari hardware"
- depends on ATARI && PARPORT
+ depends on ATARI
select PARPORT_NOT_PC
help
Say Y here if you need support for the parallel port hardware on
@@ -122,12 +122,11 @@ config PARPORT_ATARI
config PARPORT_GSC
tristate
default GSC
- depends on PARPORT
select PARPORT_NOT_PC
config PARPORT_SUNBPP
tristate "Sparc hardware (EXPERIMENTAL)"
- depends on SBUS && PARPORT && EXPERIMENTAL
+ depends on SBUS && EXPERIMENTAL
select PARPORT_NOT_PC
help
This driver provides support for the bidirectional parallel port
@@ -136,7 +135,6 @@ config PARPORT_SUNBPP
config PARPORT_AX88796
tristate "AX88796 Parallel Port"
- depends on PARPORT
select PARPORT_NOT_PC
help
Say Y here if you need support for the parallel port hardware on
@@ -148,7 +146,6 @@ config PARPORT_AX88796
config PARPORT_1284
bool "IEEE 1284 transfer modes"
- depends on PARPORT
help
If you have a printer that supports status readback or device ID, or
want to use a device that uses enhanced parallel port transfer modes
@@ -159,5 +156,4 @@ config PARPORT_1284
config PARPORT_NOT_PC
bool
-endmenu
-
+endif # PARPORT
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 8b7d84eca05d..802a81d47367 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -105,9 +105,8 @@ static int parport_probe(struct pcmcia_device *link)
DEBUG(0, "parport_attach()\n");
/* Create new parport device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
- memset(info, 0, sizeof(*info));
link->priv = info;
info->p_dev = link;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 7bfbad57879d..5d58ad55d85c 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2424,7 +2424,6 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
u32 ite8872set;
u32 ite8872_lpt, ite8872_lpthi;
u8 ite8872_irq, type;
- char *fake_name = "parport probe";
int irq;
int i;
@@ -2432,11 +2431,11 @@ static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
// make sure which one chip
for(i = 0; i < 5; i++) {
- base_res = request_region(inta_addr[i], 0x8, fake_name);
+ base_res = request_region(inta_addr[i], 32, "it887x");
if (base_res) {
int test;
pci_write_config_dword (pdev, 0x60,
- 0xe7000000 | inta_addr[i]);
+ 0xe5000000 | inta_addr[i]);
pci_write_config_dword (pdev, 0x78,
0x00000000 | inta_addr[i]);
test = inb (inta_addr[i]);
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index 90ea3b8b99b0..bd6ad8b38168 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -324,10 +324,9 @@ static int __devinit parport_serial_pci_probe (struct pci_dev *dev,
struct parport_serial_private *priv;
int err;
- priv = kmalloc (sizeof *priv, GFP_KERNEL);
+ priv = kzalloc (sizeof *priv, GFP_KERNEL);
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(struct parport_serial_private));
pci_set_drvdata (dev, priv);
err = pci_enable_device (dev);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index e3beb784406f..006054a40995 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -41,9 +41,7 @@ obj-$(CONFIG_ACPI) += pci-acpi.o
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
-ifndef CONFIG_X86
-obj-y += syscall.o
-endif
+obj-$(CONFIG_PCI_SYSCALL) += syscall.o
ifeq ($(CONFIG_PCI_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index ddbadd95387e..f6cc0c5b5657 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -211,6 +211,7 @@ typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
extern int acpiphp_enable_slot (struct acpiphp_slot *slot);
extern int acpiphp_disable_slot (struct acpiphp_slot *slot);
+extern int acpiphp_eject_slot (struct acpiphp_slot *slot);
extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index fa5c0197d571..a0ca63adad5a 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -156,11 +156,15 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
+ int retval;
dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
/* disable the specified slot */
- return acpiphp_disable_slot(slot->acpi_slot);
+ retval = acpiphp_disable_slot(slot->acpi_slot);
+ if (!retval)
+ retval = acpiphp_eject_slot(slot->acpi_slot);
+ return retval;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 9ef4e989afc4..1e125b56c9a9 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -1282,7 +1282,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
/**
* acpiphp_eject_slot - physically eject the slot
*/
-static int acpiphp_eject_slot(struct acpiphp_slot *slot)
+int acpiphp_eject_slot(struct acpiphp_slot *slot)
{
acpi_status status;
struct acpiphp_func *func;
@@ -1368,6 +1368,9 @@ static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
+ return;
+
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
bridge->hpp.t0->cache_line_size);
pci_write_config_byte(dev, PCI_LATENCY_TIMER,
@@ -1502,6 +1505,37 @@ static void handle_bridge_insertion(acpi_handle handle, u32 type)
* ACPI event handlers
*/
+static acpi_status
+count_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ int *count = (int *)context;
+ struct acpiphp_bridge *bridge;
+
+ bridge = acpiphp_handle_to_bridge(handle);
+ if (bridge)
+ (*count)++;
+ return AE_OK ;
+}
+
+static acpi_status
+check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ struct acpiphp_bridge *bridge;
+ char objname[64];
+ struct acpi_buffer buffer = { .length = sizeof(objname),
+ .pointer = objname };
+
+ bridge = acpiphp_handle_to_bridge(handle);
+ if (bridge) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ dbg("%s: re-enumerating slots under %s\n",
+ __FUNCTION__, objname);
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ acpiphp_check_bridge(bridge);
+ }
+ return AE_OK ;
+}
+
/**
* handle_hotplug_event_bridge - handle ACPI event on bridges
*
@@ -1519,6 +1553,7 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
struct acpi_device *device;
+ int num_sub_bridges = 0;
if (acpi_bus_get_device(handle, &device)) {
/* This bridge must have just been physically inserted */
@@ -1527,7 +1562,12 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
}
bridge = acpiphp_handle_to_bridge(handle);
- if (!bridge) {
+ if (type == ACPI_NOTIFY_BUS_CHECK) {
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ACPI_UINT32_MAX,
+ count_sub_bridges, &num_sub_bridges, NULL);
+ }
+
+ if (!bridge && !num_sub_bridges) {
err("cannot get bridge info\n");
return;
}
@@ -1538,7 +1578,14 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
case ACPI_NOTIFY_BUS_CHECK:
/* bus re-enumerate */
dbg("%s: Bus check notify on %s\n", __FUNCTION__, objname);
- acpiphp_check_bridge(bridge);
+ if (bridge) {
+ dbg("%s: re-enumerating slots under %s\n",
+ __FUNCTION__, objname);
+ acpiphp_check_bridge(bridge);
+ }
+ if (num_sub_bridges)
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL);
break;
case ACPI_NOTIFY_DEVICE_CHECK:
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index e7322c25d377..70db38c0ced9 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -106,7 +106,8 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
static ssize_t ibm_read_apci_table(struct kobject *kobj,
- char *buffer, loff_t pos, size_t size);
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv);
static int __init ibm_acpiphp_init(void);
@@ -117,7 +118,6 @@ static struct notification ibm_note;
static struct bin_attribute ibm_apci_table_attr = {
.attr = {
.name = "apci_table",
- .owner = THIS_MODULE,
.mode = S_IRUGO,
},
.read = ibm_read_apci_table,
@@ -358,7 +358,8 @@ read_table_done:
* our solution is to only allow reading the table in all at once
**/
static ssize_t ibm_read_apci_table(struct kobject *kobj,
- char *buffer, loff_t pos, size_t size)
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t size)
{
int bytes_read = -EINVAL;
char *table = NULL;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 684551559d44..ed4d44e3332c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -35,6 +35,7 @@
#include <linux/smp_lock.h>
#include <asm/atomic.h>
#include <linux/delay.h>
+#include <linux/kthread.h>
#include "cpci_hotplug.h"
#define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>"
@@ -59,9 +60,8 @@ static int slots;
static atomic_t extracting;
int cpci_debug;
static struct cpci_hp_controller *controller;
-static struct semaphore event_semaphore; /* mutex for process loop (up if something to process) */
-static struct semaphore thread_exit; /* guard ensure thread has exited before calling it quits */
-static int thread_finished = 1;
+static struct task_struct *cpci_thread;
+static int thread_finished;
static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
@@ -357,9 +357,7 @@ cpci_hp_intr(int irq, void *data)
controller->ops->disable_irq();
/* Trigger processing by the event thread */
- dbg("Signal event_semaphore");
- up(&event_semaphore);
- dbg("exited cpci_hp_intr");
+ wake_up_process(cpci_thread);
return IRQ_HANDLED;
}
@@ -521,17 +519,12 @@ event_thread(void *data)
{
int rc;
- lock_kernel();
- daemonize("cpci_hp_eventd");
- unlock_kernel();
-
dbg("%s - event thread started", __FUNCTION__);
while (1) {
dbg("event thread sleeping");
- down_interruptible(&event_semaphore);
- dbg("event thread woken, thread_finished = %d",
- thread_finished);
- if (thread_finished || signal_pending(current))
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ if (kthread_should_stop())
break;
do {
rc = check_slots();
@@ -541,18 +534,17 @@ event_thread(void *data)
} else if (rc < 0) {
dbg("%s - error checking slots", __FUNCTION__);
thread_finished = 1;
- break;
+ goto out;
}
- } while (atomic_read(&extracting) && !thread_finished);
- if (thread_finished)
+ } while (atomic_read(&extracting) && !kthread_should_stop());
+ if (kthread_should_stop())
break;
/* Re-enable ENUM# interrupt */
dbg("%s - re-enabling irq", __FUNCTION__);
controller->ops->enable_irq();
}
- dbg("%s - event thread signals exit", __FUNCTION__);
- up(&thread_exit);
+ out:
return 0;
}
@@ -562,12 +554,8 @@ poll_thread(void *data)
{
int rc;
- lock_kernel();
- daemonize("cpci_hp_polld");
- unlock_kernel();
-
while (1) {
- if (thread_finished || signal_pending(current))
+ if (kthread_should_stop() || signal_pending(current))
break;
if (controller->ops->query_enum()) {
do {
@@ -578,48 +566,36 @@ poll_thread(void *data)
} else if (rc < 0) {
dbg("%s - error checking slots", __FUNCTION__);
thread_finished = 1;
- break;
+ goto out;
}
- } while (atomic_read(&extracting) && !thread_finished);
+ } while (atomic_read(&extracting) && !kthread_should_stop());
}
msleep(100);
}
- dbg("poll thread signals exit");
- up(&thread_exit);
+ out:
return 0;
}
static int
cpci_start_thread(void)
{
- int pid;
-
- /* initialize our semaphores */
- init_MUTEX_LOCKED(&event_semaphore);
- init_MUTEX_LOCKED(&thread_exit);
- thread_finished = 0;
-
if (controller->irq)
- pid = kernel_thread(event_thread, NULL, 0);
+ cpci_thread = kthread_run(event_thread, NULL, "cpci_hp_eventd");
else
- pid = kernel_thread(poll_thread, NULL, 0);
- if (pid < 0) {
+ cpci_thread = kthread_run(poll_thread, NULL, "cpci_hp_polld");
+ if (IS_ERR(cpci_thread)) {
err("Can't start up our thread");
- return -1;
+ return PTR_ERR(cpci_thread);
}
- dbg("Our thread pid = %d", pid);
+ thread_finished = 0;
return 0;
}
static void
cpci_stop_thread(void)
{
+ kthread_stop(cpci_thread);
thread_finished = 1;
- dbg("thread finish command given");
- if (controller->irq)
- up(&event_semaphore);
- dbg("wait for thread to exit");
- down(&thread_exit);
}
int
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 7b1beaad2752..5e9be44817cb 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -45,8 +45,6 @@ extern int cpci_debug;
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
-#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1))
-
u8 cpci_get_attention_status(struct slot* slot)
{
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 5617cfdadc5c..d590a99930fa 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -796,7 +796,6 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u8 num_of_slots = 0;
u8 hp_slot = 0;
u8 device;
- u8 rev;
u8 bus_cap;
u16 temp_word;
u16 vendor_id;
@@ -823,9 +822,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
dbg("Vendor ID: %x\n", vendor_id);
- rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
- dbg("revision: %d\n", rev);
- if (rc || ((vendor_id == PCI_VENDOR_ID_COMPAQ) && (!rev))) {
+ dbg("revision: %d\n", pdev->revision);
+ if ((vendor_id == PCI_VENDOR_ID_COMPAQ) && (!pdev->revision)) {
err(msg_HPC_rev_error);
rc = -ENODEV;
goto err_disable_device;
@@ -836,7 +834,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
*/
- if ((rev > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
+ if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
// TODO: This code can be made to support non-Compaq or Intel subsystem IDs
rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
if (rc) {
@@ -870,7 +868,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (subsystem_vid) {
case PCI_VENDOR_ID_COMPAQ:
- if (rev >= 0x13) { /* CIOBX */
+ if (pdev->revision >= 0x13) { /* CIOBX */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
ctrl->push_button = 1;
@@ -1075,7 +1073,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(ctrl->pci_bus, pdev->bus, sizeof(*ctrl->pci_bus));
ctrl->bus = pdev->bus->number;
- ctrl->rev = rev;
+ ctrl->rev = pdev->revision;
dbg("bus device function rev: %d %d %d %d\n", ctrl->bus,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), ctrl->rev);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index ccc57627201e..7959c222dc24 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -103,6 +103,7 @@ struct controller {
u8 cap_base;
struct timer_list poll_timer;
volatile int cmd_busy;
+ spinlock_t lock;
};
#define INT_BUTTON_IGNORE 0
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 7f22caa70178..98e541ffef3d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -197,6 +197,12 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
__FUNCTION__);
return;
}
+ /*
+ * After turning power off, we must wait for at least
+ * 1 second before taking any action that relies on
+ * power having been removed from the slot/adapter.
+ */
+ msleep(1000);
}
}
@@ -615,6 +621,12 @@ int pciehp_disable_slot(struct slot *p_slot)
mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
+ /*
+ * After turning power off, we must wait for at least
+ * 1 second before taking any action that relies on
+ * power having been removed from the slot/adapter.
+ */
+ msleep(1000);
}
ret = remove_board(p_slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9aac6a87eb53..016eea94a8a5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -275,11 +275,19 @@ static inline int pcie_wait_cmd(struct controller *ctrl)
return retval;
}
-static int pcie_write_cmd(struct slot *slot, u16 cmd)
+/**
+ * pcie_write_cmd - Issue controller command
+ * @slot: slot to which the command is issued
+ * @cmd: command value written to slot control register
+ * @mask: bitmask of slot control register to be modified
+ */
+static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
{
struct controller *ctrl = slot->ctrl;
int retval = 0;
u16 slot_status;
+ u16 slot_ctrl;
+ unsigned long flags;
DBG_ENTER_ROUTINE
@@ -299,17 +307,29 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd)
__FUNCTION__);
}
- ctrl->cmd_busy = 1;
- retval = pciehp_writew(ctrl, SLOTCTRL, (cmd | CMD_CMPL_INTR_ENABLE));
+ spin_lock_irqsave(&ctrl->lock, flags);
+ retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
if (retval) {
- err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
- goto out;
+ err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
+ goto out_spin_unlock;
}
+ slot_ctrl &= ~mask;
+ slot_ctrl |= ((cmd & mask) | CMD_CMPL_INTR_ENABLE);
+
+ ctrl->cmd_busy = 1;
+ retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
+ if (retval)
+ err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
+
+ out_spin_unlock:
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+
/*
* Wait for command completion.
*/
- retval = pcie_wait_cmd(ctrl);
+ if (!retval)
+ retval = pcie_wait_cmd(ctrl);
out:
mutex_unlock(&ctrl->ctrl_lock);
DBG_LEAVE_ROUTINE
@@ -502,25 +522,20 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status)
static int hpc_toggle_emi(struct slot *slot)
{
- struct controller *ctrl = slot->ctrl;
- u16 slot_cmd = 0;
- u16 slot_ctrl;
- int rc = 0;
+ u16 slot_cmd;
+ u16 cmd_mask;
+ int rc;
DBG_ENTER_ROUTINE
- rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (rc) {
- err("%s : hp_register_read_word SLOT_CTRL failed\n",
- __FUNCTION__);
- return rc;
- }
-
- slot_cmd = (slot_ctrl | EMI_CTRL);
- if (!pciehp_poll_mode)
+ slot_cmd = EMI_CTRL;
+ cmd_mask = EMI_CTRL;
+ if (!pciehp_poll_mode) {
slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ cmd_mask = cmd_mask | HP_INTR_ENABLE;
+ }
- pcie_write_cmd(slot, slot_cmd);
+ rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
slot->last_emi_toggle = get_seconds();
DBG_LEAVE_ROUTINE
return rc;
@@ -529,35 +544,32 @@ static int hpc_toggle_emi(struct slot *slot)
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
- u16 slot_cmd = 0;
- u16 slot_ctrl;
- int rc = 0;
+ u16 slot_cmd;
+ u16 cmd_mask;
+ int rc;
DBG_ENTER_ROUTINE
- rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (rc) {
- err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
- return rc;
- }
-
+ cmd_mask = ATTN_LED_CTRL;
switch (value) {
case 0 : /* turn off */
- slot_cmd = (slot_ctrl & ~ATTN_LED_CTRL) | 0x00C0;
+ slot_cmd = 0x00C0;
break;
case 1: /* turn on */
- slot_cmd = (slot_ctrl & ~ATTN_LED_CTRL) | 0x0040;
+ slot_cmd = 0x0040;
break;
case 2: /* turn blink */
- slot_cmd = (slot_ctrl & ~ATTN_LED_CTRL) | 0x0080;
+ slot_cmd = 0x0080;
break;
default:
return -1;
}
- if (!pciehp_poll_mode)
- slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ if (!pciehp_poll_mode) {
+ slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ cmd_mask = cmd_mask | HP_INTR_ENABLE;
+ }
- pcie_write_cmd(slot, slot_cmd);
+ rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
dbg("%s: SLOTCTRL %x write cmd %x\n",
__FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
@@ -570,21 +582,18 @@ static void hpc_set_green_led_on(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 slot_ctrl;
- int rc = 0;
+ u16 cmd_mask;
DBG_ENTER_ROUTINE
- rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (rc) {
- err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
- return;
+ slot_cmd = 0x0100;
+ cmd_mask = PWR_LED_CTRL;
+ if (!pciehp_poll_mode) {
+ slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ cmd_mask = cmd_mask | HP_INTR_ENABLE;
}
- slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0100;
- if (!pciehp_poll_mode)
- slot_cmd = slot_cmd | HP_INTR_ENABLE;
- pcie_write_cmd(slot, slot_cmd);
+ pcie_write_cmd(slot, slot_cmd, cmd_mask);
dbg("%s: SLOTCTRL %x write cmd %x\n",
__FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
@@ -596,22 +605,18 @@ static void hpc_set_green_led_off(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 slot_ctrl;
- int rc = 0;
+ u16 cmd_mask;
DBG_ENTER_ROUTINE
- rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (rc) {
- err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
- return;
+ slot_cmd = 0x0300;
+ cmd_mask = PWR_LED_CTRL;
+ if (!pciehp_poll_mode) {
+ slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ cmd_mask = cmd_mask | HP_INTR_ENABLE;
}
- slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0300;
-
- if (!pciehp_poll_mode)
- slot_cmd = slot_cmd | HP_INTR_ENABLE;
- pcie_write_cmd(slot, slot_cmd);
+ pcie_write_cmd(slot, slot_cmd, cmd_mask);
dbg("%s: SLOTCTRL %x write cmd %x\n",
__FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
@@ -623,22 +628,18 @@ static void hpc_set_green_led_blink(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 slot_ctrl;
- int rc = 0;
+ u16 cmd_mask;
DBG_ENTER_ROUTINE
- rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (rc) {
- err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
- return;
+ slot_cmd = 0x0200;
+ cmd_mask = PWR_LED_CTRL;
+ if (!pciehp_poll_mode) {
+ slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ cmd_mask = cmd_mask | HP_INTR_ENABLE;
}
- slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0200;
-
- if (!pciehp_poll_mode)
- slot_cmd = slot_cmd | HP_INTR_ENABLE;
- pcie_write_cmd(slot, slot_cmd);
+ pcie_write_cmd(slot, slot_cmd, cmd_mask);
dbg("%s: SLOTCTRL %x write cmd %x\n",
__FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
@@ -669,7 +670,8 @@ static int hpc_power_on_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 slot_ctrl, slot_status;
+ u16 cmd_mask;
+ u16 slot_status;
int retval = 0;
DBG_ENTER_ROUTINE
@@ -692,23 +694,23 @@ static int hpc_power_on_slot(struct slot * slot)
}
}
- retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (retval) {
- err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
- return retval;
- }
-
- slot_cmd = (slot_ctrl & ~PWR_CTRL) | POWER_ON;
-
+ slot_cmd = POWER_ON;
+ cmd_mask = PWR_CTRL;
/* Enable detection that we turned off at slot power-off time */
- if (!pciehp_poll_mode)
+ if (!pciehp_poll_mode) {
slot_cmd = slot_cmd |
PWR_FAULT_DETECT_ENABLE |
MRL_DETECT_ENABLE |
PRSN_DETECT_ENABLE |
HP_INTR_ENABLE;
+ cmd_mask = cmd_mask |
+ PWR_FAULT_DETECT_ENABLE |
+ MRL_DETECT_ENABLE |
+ PRSN_DETECT_ENABLE |
+ HP_INTR_ENABLE;
+ }
- retval = pcie_write_cmd(slot, slot_cmd);
+ retval = pcie_write_cmd(slot, slot_cmd, cmd_mask);
if (retval) {
err("%s: Write %x command failed!\n", __FUNCTION__, slot_cmd);
@@ -726,21 +728,15 @@ static int hpc_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
- u16 slot_ctrl;
+ u16 cmd_mask;
int retval = 0;
DBG_ENTER_ROUTINE
dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
- retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
- if (retval) {
- err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
- return retval;
- }
-
- slot_cmd = (slot_ctrl & ~PWR_CTRL) | POWER_OFF;
-
+ slot_cmd = POWER_OFF;
+ cmd_mask = PWR_CTRL;
/*
* If we get MRL or presence detect interrupts now, the isr
* will notice the sticky power-fault bit too and issue power
@@ -748,14 +744,19 @@ static int hpc_power_off_slot(struct slot * slot)
* of command completions, since the power-fault bit remains on
* till the slot is powered on again.
*/
- if (!pciehp_poll_mode)
+ if (!pciehp_poll_mode) {
slot_cmd = (slot_cmd &
~PWR_FAULT_DETECT_ENABLE &
~MRL_DETECT_ENABLE &
~PRSN_DETECT_ENABLE) | HP_INTR_ENABLE;
+ cmd_mask = cmd_mask |
+ PWR_FAULT_DETECT_ENABLE |
+ MRL_DETECT_ENABLE |
+ PRSN_DETECT_ENABLE |
+ HP_INTR_ENABLE;
+ }
- retval = pcie_write_cmd(slot, slot_cmd);
-
+ retval = pcie_write_cmd(slot, slot_cmd, cmd_mask);
if (retval) {
err("%s: Write command failed!\n", __FUNCTION__);
return -1;
@@ -775,6 +776,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
u16 temp_word;
int hp_slot = 0; /* only 1 slot per PCI Express port */
int rc = 0;
+ unsigned long flags;
rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
if (rc) {
@@ -794,10 +796,12 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc);
/* Mask Hot-plug Interrupt Enable */
if (!pciehp_poll_mode) {
+ spin_lock_irqsave(&ctrl->lock, flags);
rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
if (rc) {
err("%s: Cannot read SLOT_CTRL register\n",
__FUNCTION__);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
return IRQ_NONE;
}
@@ -808,8 +812,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (rc) {
err("%s: Cannot write to SLOTCTRL register\n",
__FUNCTION__);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
return IRQ_NONE;
}
+ spin_unlock_irqrestore(&ctrl->lock, flags);
rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
if (rc) {
@@ -859,10 +865,12 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
}
/* Unmask Hot-plug Interrupt Enable */
if (!pciehp_poll_mode) {
+ spin_lock_irqsave(&ctrl->lock, flags);
rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
if (rc) {
err("%s: Cannot read SLOTCTRL register\n",
__FUNCTION__);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
return IRQ_NONE;
}
@@ -873,8 +881,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (rc) {
err("%s: Cannot write to SLOTCTRL register\n",
__FUNCTION__);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
return IRQ_NONE;
}
+ spin_unlock_irqrestore(&ctrl->lock, flags);
rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
if (rc) {
@@ -1237,6 +1247,7 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
mutex_init(&ctrl->crit_sect);
mutex_init(&ctrl->ctrl_lock);
+ spin_lock_init(&ctrl->lock);
/* setup wait queue */
init_waitqueue_head(&ctrl->queue);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index bb3c101c2c5a..deb6b5e35feb 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -159,8 +159,8 @@ static void dlpar_pci_add_bus(struct device_node *dn)
/* Claim new bus resources */
pcibios_claim_one_bus(dev->bus);
- /* ioremap() for child bus, which may or may not succeed */
- remap_bus_range(dev->subordinate);
+ /* Map IO space for child bus, which may or may not succeed */
+ pcibios_map_io_space(dev->subordinate);
/* Add new devices to global lists. Register in proc, sysfs. */
pci_bus_add_devices(phb->bus);
@@ -390,7 +390,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
} else
pcibios_remove_pci_devices(bus);
- if (unmap_bus_range(bus)) {
+ if (pcibios_unmap_io_space(bus)) {
printk(KERN_ERR "%s: failed to unmap bus range\n",
__FUNCTION__);
return -ERANGE;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b5ac810404c0..c8062494009f 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -55,8 +55,6 @@ acpi_query_osc (
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
if (ACPI_FAILURE (status)) {
- printk(KERN_DEBUG
- "Evaluate _OSC Set fails. Status = 0x%04x\n", status);
*ret_status = status;
return status;
}
@@ -124,11 +122,9 @@ acpi_run_osc (
in_params[3].buffer.pointer = (u8 *)context;
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
- if (ACPI_FAILURE (status)) {
- printk(KERN_DEBUG
- "Evaluate _OSC Set fails. Status = 0x%04x\n", status);
+ if (ACPI_FAILURE (status))
return status;
- }
+
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
printk(KERN_DEBUG
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 284e83a527f9..1b7b2812bf2d 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -20,7 +20,7 @@
#include <linux/stat.h>
#include <linux/topology.h>
#include <linux/mm.h>
-
+#include <linux/capability.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -213,7 +213,8 @@ struct device_attribute pci_dev_attrs[] = {
};
static ssize_t
-pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
+pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
unsigned int size = 64;
@@ -285,7 +286,8 @@ pci_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
}
static ssize_t
-pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
+pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
unsigned int size = count;
@@ -352,7 +354,8 @@ pci_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
* callback routine (pci_legacy_read).
*/
ssize_t
-pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off, size_t count)
+pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
struct class_device,
@@ -376,7 +379,8 @@ pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off, size_t count)
* callback routine (pci_legacy_write).
*/
ssize_t
-pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off, size_t count)
+pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
struct class_device,
@@ -499,7 +503,6 @@ static int pci_create_resource_files(struct pci_dev *pdev)
sprintf(res_attr_name, "resource%d", i);
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
- res_attr->attr.owner = THIS_MODULE;
res_attr->size = pci_resource_len(pdev, i);
res_attr->mmap = pci_mmap_resource;
res_attr->private = &pdev->resource[i];
@@ -529,7 +532,8 @@ static inline void pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t
-pci_write_rom(struct kobject *kobj, char *buf, loff_t off, size_t count)
+pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -552,7 +556,8 @@ pci_write_rom(struct kobject *kobj, char *buf, loff_t off, size_t count)
* device corresponding to @kobj.
*/
static ssize_t
-pci_read_rom(struct kobject *kobj, char *buf, loff_t off, size_t count)
+pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
void __iomem *rom;
@@ -582,7 +587,6 @@ static struct bin_attribute pci_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 256,
.read = pci_read_config,
@@ -593,13 +597,17 @@ static struct bin_attribute pcie_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 4096,
.read = pci_read_config,
.write = pci_write_config,
};
+int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
+{
+ return 0;
+}
+
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
{
struct bin_attribute *rom_attr = NULL;
@@ -628,7 +636,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
rom_attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
rom_attr->attr.name = "rom";
rom_attr->attr.mode = S_IRUSR;
- rom_attr->attr.owner = THIS_MODULE;
rom_attr->read = pci_read_rom;
rom_attr->write = pci_write_rom;
retval = sysfs_create_bin_file(&pdev->dev.kobj, rom_attr);
@@ -640,10 +647,14 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
}
}
/* add platform-specific attributes */
- pcibios_add_platform_entries(pdev);
+ if (pcibios_add_platform_entries(pdev))
+ goto err_rom_file;
return 0;
+err_rom_file:
+ if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
+ sysfs_remove_bin_file(&pdev->dev.kobj, rom_attr);
err_rom:
kfree(rom_attr);
err_resource_files:
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fd47ac0c4730..03fd59e80fef 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -406,6 +406,13 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
return 0;
+ /* find PCI PM capability in list */
+ pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+
+ /* abort if the device doesn't support PM capabilities */
+ if (!pm)
+ return -EIO;
+
/* Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
@@ -418,13 +425,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0; /* we're already there */
- /* find PCI PM capability in list */
- pm = pci_find_capability(dev, PCI_CAP_ID_PM);
-
- /* abort if the device doesn't support PM capabilities */
- if (!pm)
- return -EIO;
-
pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
printk(KERN_DEBUG
@@ -1186,6 +1186,11 @@ int pci_set_mwi(struct pci_dev *dev)
return 0;
}
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
void pci_clear_mwi(struct pci_dev *dev)
{
}
@@ -1242,9 +1247,7 @@ pci_set_cacheline_size(struct pci_dev *dev)
* pci_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
- * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
- * and then calls @pcibios_set_mwi to do the needed arch specific
- * operations or a generic mwi-prep function.
+ * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
@@ -1260,7 +1263,8 @@ pci_set_mwi(struct pci_dev *dev)
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (! (cmd & PCI_COMMAND_INVALIDATE)) {
- pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev));
+ pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n",
+ pci_name(dev));
cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
@@ -1269,6 +1273,21 @@ pci_set_mwi(struct pci_dev *dev)
}
/**
+ * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
+ * Callers are not required to check the return value.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ int rc = pci_set_mwi(dev);
+ return rc;
+}
+
+/**
* pci_clear_mwi - disables Memory-Write-Invalidate for device dev
* @dev: the PCI device to disable
*
@@ -1375,6 +1394,164 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
#endif
/**
+ * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
+ * @dev: PCI device to query
+ *
+ * Returns mmrbc: maximum designed memory read count in bytes
+ * or appropriate error value.
+ */
+int pcix_get_max_mmrbc(struct pci_dev *dev)
+{
+ int err, cap;
+ u32 stat;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
+ if (err)
+ return -EINVAL;
+
+ return (stat & PCI_X_STATUS_MAX_READ) >> 12;
+}
+EXPORT_SYMBOL(pcix_get_max_mmrbc);
+
+/**
+ * pcix_get_mmrbc - get PCI-X maximum memory read byte count
+ * @dev: PCI device to query
+ *
+ * Returns mmrbc: maximum memory read count in bytes
+ * or appropriate error value.
+ */
+int pcix_get_mmrbc(struct pci_dev *dev)
+{
+ int ret, cap;
+ u32 cmd;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
+ if (!ret)
+ ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+
+ return ret;
+}
+EXPORT_SYMBOL(pcix_get_mmrbc);
+
+/**
+ * pcix_set_mmrbc - set PCI-X maximum memory read byte count
+ * @dev: PCI device to query
+ * @mmrbc: maximum memory read count in bytes
+ * valid values are 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum memory read byte count, some bridges have erratas
+ * that prevent this.
+ */
+int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
+{
+ int cap, err = -EINVAL;
+ u32 stat, cmd, v, o;
+
+ if (mmrbc < 512 || mmrbc > 4096 || (mmrbc & (mmrbc-1)))
+ goto out;
+
+ v = ffs(mmrbc) - 10;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ goto out;
+
+ err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
+ if (err)
+ goto out;
+
+ if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
+ return -E2BIG;
+
+ err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
+ if (err)
+ goto out;
+
+ o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
+ if (o != v) {
+ if (v > o && dev->bus &&
+ (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
+ return -EIO;
+
+ cmd &= ~PCI_X_CMD_MAX_READ;
+ cmd |= v << 2;
+ err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
+ }
+out:
+ return err;
+}
+EXPORT_SYMBOL(pcix_set_mmrbc);
+
+/**
+ * pcie_get_readrq - get PCI Express read request size
+ * @dev: PCI device to query
+ *
+ * Returns maximum memory read request in bytes
+ * or appropriate error value.
+ */
+int pcie_get_readrq(struct pci_dev *dev)
+{
+ int ret, cap;
+ u16 ctl;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!cap)
+ return -EINVAL;
+
+ ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (!ret)
+ ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+
+ return ret;
+}
+EXPORT_SYMBOL(pcie_get_readrq);
+
+/**
+ * pcie_set_readrq - set PCI Express maximum memory read request
+ * @dev: PCI device to query
+ * @count: maximum memory read count in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum read byte count
+ */
+int pcie_set_readrq(struct pci_dev *dev, int rq)
+{
+ int cap, err = -EINVAL;
+ u16 ctl, v;
+
+ if (rq < 128 || rq > 4096 || (rq & (rq-1)))
+ goto out;
+
+ v = (ffs(rq) - 8) << 12;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!cap)
+ goto out;
+
+ err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ goto out;
+
+ if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
+ ctl &= ~PCI_EXP_DEVCTL_READRQ;
+ ctl |= v;
+ err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(pcie_set_readrq);
+
+/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
@@ -1442,6 +1619,7 @@ EXPORT_SYMBOL(pci_release_selected_regions);
EXPORT_SYMBOL(pci_request_selected_regions);
EXPORT_SYMBOL(pci_set_master);
EXPORT_SYMBOL(pci_set_mwi);
+EXPORT_SYMBOL(pci_try_set_mwi);
EXPORT_SYMBOL(pci_clear_mwi);
EXPORT_SYMBOL_GPL(pci_intx);
EXPORT_SYMBOL(pci_set_dma_mask);
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index 3f37a60a6438..c3bde588aa13 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -4,7 +4,7 @@
config PCIEAER
boolean "Root Port Advanced Error Reporting support"
- depends on PCIEPORTBUS && ACPI
+ depends on PCIEPORTBUS
default y
help
This enables PCI Express Root Port Advanced Error Reporting
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 15a4f40d520b..8da3bd8455a8 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_PCIEAER) += aerdriver.o
-aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o aerdrv_acpi.o
+aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
+aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index db6ad8e763ac..ad90a01b0dfc 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -148,16 +148,15 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
{
struct aer_rpc *rpc;
- if (!(rpc = kmalloc(sizeof(struct aer_rpc),
+ if (!(rpc = kzalloc(sizeof(struct aer_rpc),
GFP_KERNEL)))
return NULL;
- memset(rpc, 0, sizeof(struct aer_rpc));
/*
* Initialize Root lock access, e_lock, to Root Error Status Reg,
* Root Error ID Reg, and Root error producer/consumer index.
*/
- rpc->e_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&rpc->e_lock);
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 5cca394d5999..c7ad68b6c6d6 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -19,10 +19,6 @@
#define AER_ERROR_MASK 0x001fffff
#define AER_ERROR(d) (d & AER_ERROR_MASK)
-#define OSC_METHOD_RUN_SUCCESS 0
-#define OSC_METHOD_NOT_SUPPORTED 1
-#define OSC_METHOD_RUN_FAILURE 2
-
/* Root Error Status Register Bits */
#define ROOT_ERR_STATUS_MASKS 0x0f
@@ -121,6 +117,14 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
-extern int aer_osc_setup(struct pci_dev *dev);
+
+#ifdef CONFIG_ACPI
+extern int aer_osc_setup(struct pcie_device *pciedev);
+#else
+static inline int aer_osc_setup(struct pcie_device *pciedev)
+{
+ return 0;
+}
+#endif
#endif //_AERDRV_H_
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index fa68e89ebec9..1a1eb45a779e 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -20,19 +20,18 @@
/**
* aer_osc_setup - run ACPI _OSC method
+ * @pciedev: pcie_device which AER is being enabled on
*
- * Return:
- * Zero if success. Nonzero for otherwise.
+ * @return: Zero on success. Nonzero otherwise.
*
* Invoked when PCIE bus loads AER service driver. To avoid conflict with
* BIOS AER support requires BIOS to yield AER control to OS native driver.
**/
-int aer_osc_setup(struct pci_dev *dev)
+int aer_osc_setup(struct pcie_device *pciedev)
{
- int retval = OSC_METHOD_RUN_SUCCESS;
- acpi_status status;
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
- struct pci_dev *pdev = dev;
+ acpi_status status = AE_NOT_FOUND;
+ struct pci_dev *pdev = pciedev->port;
+ acpi_handle handle = DEVICE_ACPI_HANDLE(&pdev->dev);
struct pci_bus *parent;
while (!handle) {
@@ -50,19 +49,20 @@ int aer_osc_setup(struct pci_dev *dev)
pdev = parent->self;
}
- if (!handle)
- return OSC_METHOD_NOT_SUPPORTED;
+ if (handle) {
+ pci_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT);
+ status = pci_osc_control_set(handle,
+ OSC_PCI_EXPRESS_AER_CONTROL |
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+ }
- pci_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT);
- status = pci_osc_control_set(handle, OSC_PCI_EXPRESS_AER_CONTROL |
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_FAILURE(status)) {
- if (status == AE_SUPPORT)
- retval = OSC_METHOD_NOT_SUPPORTED;
- else
- retval = OSC_METHOD_RUN_FAILURE;
+ printk(KERN_DEBUG "AER service couldn't init device %s - %s\n",
+ pciedev->device.bus_id,
+ (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
+ "no _OSC support" : "Run ACPI _OSC fails");
+ return -1;
}
- return retval;
+ return 0;
}
-
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 08e13033ced8..92a8469b21ba 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -22,8 +22,6 @@
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/suspend.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
#include <linux/delay.h>
#include "aerdrv.h"
@@ -119,6 +117,21 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
return 0;
}
+int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+
+ pos = pci_find_aer_capability(dev);
+ if (!pos)
+ return -EIO;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+
+ return 0;
+}
+
static int find_device_iter(struct device *device, void *data)
{
struct pci_dev *dev;
@@ -733,20 +746,8 @@ void aer_delete_rootport(struct aer_rpc *rpc)
**/
int aer_init(struct pcie_device *dev)
{
- int status;
-
- /* Run _OSC Method */
- status = aer_osc_setup(dev->port);
-
- if(status != OSC_METHOD_RUN_SUCCESS) {
- printk(KERN_DEBUG "%s: AER service init fails - %s\n",
- __FUNCTION__,
- (status == OSC_METHOD_NOT_SUPPORTED) ?
- "No ACPI _OSC support" : "Run ACPI _OSC fails");
-
- if (!forceload)
- return status;
- }
+ if (aer_osc_setup(dev) && !forceload)
+ return -ENXIO;
return AER_SUCCESS;
}
@@ -755,4 +756,5 @@ EXPORT_SYMBOL_GPL(pci_find_aer_capability);
EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+EXPORT_SYMBOL_GPL(pci_cleanup_aer_correct_error_status);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e48fcf089621..34b8dae0d90f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -22,6 +22,18 @@ EXPORT_SYMBOL(pci_root_buses);
LIST_HEAD(pci_devices);
+/*
+ * Some device drivers need know if pci is initiated.
+ * Basically, we think pci is not initiated when there
+ * is no device in list of pci_devices.
+ */
+int no_pci_devices(void)
+{
+ return list_empty(&pci_devices);
+}
+
+EXPORT_SYMBOL(no_pci_devices);
+
#ifdef HAVE_PCI_LEGACY
/**
* pci_create_legacy_files - create legacy I/O port and memory files
@@ -39,7 +51,6 @@ static void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
- b->legacy_io->attr.owner = THIS_MODULE;
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
class_device_create_bin_file(&b->class_dev, b->legacy_io);
@@ -49,7 +60,6 @@ static void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->attr.name = "legacy_mem";
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
- b->legacy_mem->attr.owner = THIS_MODULE;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
class_device_create_bin_file(&b->class_dev, b->legacy_mem);
}
@@ -656,7 +666,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
pcibios_assign_all_busses() ? " " :
" (try 'pci=assign-busses')");
printk(KERN_WARNING "Please report the result to "
- "linux-kernel to fix this permanently\n");
+ "<bk@suse.de> to fix this permanently\n");
}
bus = bus->parent;
}
@@ -702,6 +712,7 @@ static int pci_setup_device(struct pci_dev * dev)
dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+ dev->revision = class & 0xff;
class >>= 8; /* upper 3 bytes */
dev->class = class;
class >>= 8;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 0425a7b7350d..90adc62d07ff 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-
+#include <linux/capability.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include "pci.h"
@@ -480,7 +480,6 @@ static int __init pci_proc_init(void)
__initcall(pci_proc_init);
#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(pci_proc_attach_device);
EXPORT_SYMBOL(pci_proc_detach_bus);
#endif
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 01d8f8a8843c..c559085c89a5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -587,10 +587,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_v
*/
static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
{
- u8 rev;
-
- pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
- if (rev >= 0x02) {
+ if (dev->revision >= 0x02) {
printk(KERN_WARNING "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
printk(KERN_WARNING " : booting with the \"noapic\" option.\n");
}
@@ -610,13 +607,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw );
#define AMD8131_NIOAMODE_BIT 0
static void quirk_amd_8131_ioapic(struct pci_dev *dev)
{
- unsigned char revid, tmp;
+ unsigned char tmp;
if (nr_ioapics == 0)
return;
- pci_read_config_byte(dev, PCI_REVISION_ID, &revid);
- if (revid == AMD8131_revA0 || revid == AMD8131_revB0) {
+ if (dev->revision == AMD8131_revA0 || dev->revision == AMD8131_revB0) {
printk(KERN_INFO "Fixing up AMD8131 IOAPIC mode\n");
pci_read_config_byte( dev, AMD8131_MISC, &tmp);
tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
@@ -627,6 +623,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
#endif /* CONFIG_X86_IO_APIC */
+/*
+ * Some settings of MMRBC can lead to data corruption so block changes.
+ * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
+ */
+static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev)
+{
+ unsigned char revid;
+
+ pci_read_config_byte(dev, PCI_REVISION_ID, &revid);
+ if (dev->subordinate && revid <= 0x12) {
+ printk(KERN_INFO "AMD8131 rev %x detected, disabling PCI-X "
+ "MMRBC\n", revid);
+ dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
/*
* FIXME: it is questionable that quirk_via_acpi
@@ -843,10 +855,8 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
- u8 rev;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
- if (rev != 0x04) /* Only C0 requires this */
+ if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
if (config & (1<<6)) {
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index d087e0817715..dbbcc04abd1a 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -54,6 +54,49 @@ static void pci_disable_rom(struct pci_dev *pdev)
}
/**
+ * pci_get_rom_size - obtain the actual size of the ROM image
+ * @rom: kernel virtual pointer to image of ROM
+ * @size: size of PCI window
+ * return: size of actual ROM image
+ *
+ * Determine the actual length of the ROM image.
+ * The PCI window size could be much larger than the
+ * actual image size.
+ */
+size_t pci_get_rom_size(void __iomem *rom, size_t size)
+{
+ void __iomem *image;
+ int last_image;
+
+ image = rom;
+ do {
+ void __iomem *pds;
+ /* Standard PCI ROMs start out with these bytes 55 AA */
+ if (readb(image) != 0x55)
+ break;
+ if (readb(image + 1) != 0xAA)
+ break;
+ /* get the PCI data structure and check its signature */
+ pds = image + readw(image + 24);
+ if (readb(pds) != 'P')
+ break;
+ if (readb(pds + 1) != 'C')
+ break;
+ if (readb(pds + 2) != 'I')
+ break;
+ if (readb(pds + 3) != 'R')
+ break;
+ last_image = readb(pds + 21) & 0x80;
+ /* this length is reliable */
+ image += readw(pds + 16) * 512;
+ } while (!last_image);
+
+ /* never return a size larger than the PCI resource window */
+ /* there are known ROMs that get the size wrong */
+ return min((size_t)(image - rom), size);
+}
+
+/**
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
@@ -68,8 +111,6 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
loff_t start;
void __iomem *rom;
- void __iomem *image;
- int last_image;
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
@@ -117,33 +158,7 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
* size is much larger than the actual size of the ROM.
* True size is important if the ROM is going to be copied.
*/
- image = rom;
- do {
- void __iomem *pds;
- /* Standard PCI ROMs start out with these bytes 55 AA */
- if (readb(image) != 0x55)
- break;
- if (readb(image + 1) != 0xAA)
- break;
- /* get the PCI data structure and check its signature */
- pds = image + readw(image + 24);
- if (readb(pds) != 'P')
- break;
- if (readb(pds + 1) != 'C')
- break;
- if (readb(pds + 2) != 'I')
- break;
- if (readb(pds + 3) != 'R')
- break;
- last_image = readb(pds + 21) & 0x80;
- /* this length is reliable */
- image += readw(pds + 16) * 512;
- } while (!last_image);
-
- /* never return a size larger than the PCI resource window */
- /* there are known ROMs that get the size wrong */
- *size = min((size_t)(image - rom), *size);
-
+ *size = pci_get_rom_size(rom, *size);
return rom;
}
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index c13232435dc0..c6e79d01ce3d 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -139,12 +139,14 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
}
/**
- * pci_get_bus_and_slot - locate PCI device from a given PCI slot
+ * pci_get_bus_and_slot - locate PCI device from a given PCI bus & slot
* @bus: number of PCI bus on which desired PCI device resides
* @devfn: encodes number of PCI slot in which the desired PCI
* device resides and the logical device number within that slot
* in case of multi-function devices.
*
+ * Note: the bus/slot search is limited to PCI domain (segment) 0.
+ *
* Given a PCI bus and slot/function number, the desired PCI device
* is located in system global list of PCI devices. If the device
* is found, a pointer to its data structure is returned. If no
@@ -157,7 +159,8 @@ struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (dev->bus->number == bus && dev->devfn == devfn)
+ if (pci_domain_nr(dev->bus) == 0 &&
+ (dev->bus->number == bus && dev->devfn == devfn))
return dev;
}
return NULL;
@@ -199,7 +202,7 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
* can cause some machines to crash. So here we detect and flag that
* situation and bail out early.
*/
- if (unlikely(list_empty(&pci_devices)))
+ if (unlikely(no_pci_devices()))
return NULL;
down_read(&pci_bus_sem);
n = from ? from->global_list.next : pci_devices.next;
@@ -274,7 +277,7 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
* can cause some machines to crash. So here we detect and flag that
* situation and bail out early.
*/
- if (unlikely(list_empty(&pci_devices)))
+ if (unlikely(no_pci_devices()))
return NULL;
down_read(&pci_bus_sem);
n = from ? from->global_list.next : pci_devices.next;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 5ec297d7a5b4..5e5191ec8de6 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -34,8 +34,6 @@
#define DBG(x...)
#endif
-#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1))
-
static void pbus_assign_resources_sorted(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -310,7 +308,7 @@ static void pbus_size_io(struct pci_bus *bus)
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
- size = ROUND_UP(size + size1, 4096);
+ size = ALIGN(size + size1, 4096);
if (!size) {
b_res->flags = 0;
return;
@@ -378,11 +376,11 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
if (!align)
min_align = align1;
- else if (ROUND_UP(align + min_align, min_align) < align1)
+ else if (ALIGN(align + min_align, min_align) < align1)
min_align = align1 >> 1;
align += aligns[order];
}
- size = ROUND_UP(size, min_align);
+ size = ALIGN(size, min_align);
if (!size) {
b_res->flags = 0;
return 1;
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 9d37fec27f24..2ac050d7f8cf 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -23,14 +23,14 @@ sys_pciconfig_read(unsigned long bus, unsigned long dfn,
u8 byte;
u16 word;
u32 dword;
- long err, cfg_ret;
+ long err;
+ long cfg_ret;
- err = -EPERM;
if (!capable(CAP_SYS_ADMIN))
- goto error;
+ return -EPERM;
err = -ENODEV;
- dev = pci_find_slot(bus, dfn);
+ dev = pci_get_bus_and_slot(bus, dfn);
if (!dev)
goto error;
@@ -66,7 +66,8 @@ sys_pciconfig_read(unsigned long bus, unsigned long dfn,
case 4:
err = put_user(dword, (unsigned int __user *)buf);
break;
- };
+ }
+ pci_dev_put(dev);
return err;
error:
@@ -83,7 +84,8 @@ error:
case 4:
put_user(-1, (unsigned int __user *)buf);
break;
- };
+ }
+ pci_dev_put(dev);
return err;
}
@@ -101,7 +103,7 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- dev = pci_find_slot(bus, dfn);
+ dev = pci_get_bus_and_slot(bus, dfn);
if (!dev)
return -ENODEV;
@@ -137,8 +139,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
default:
err = -EINVAL;
break;
- };
+ }
unlock_kernel();
-
+ pci_dev_put(dev);
return err;
}
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 35f88649d3b7..c0c77f82d051 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -180,14 +180,15 @@ config TCIC
PCMCIA cards are plugged into. If unsure, say N.
config PCMCIA_M8XX
- tristate "MPC8xx PCMCIA support"
- depends on PCMCIA && PPC && 8xx
- select PCCARD_IODYN
- help
- Say Y here to include support for PowerPC 8xx series PCMCIA
- controller.
-
- This driver is also available as a module called m8xx_pcmcia.
+ tristate "MPC8xx PCMCIA support"
+ depends on PCMCIA && PPC && 8xx
+ select PCCARD_IODYN
+ select PCCARD_NONSTATIC
+ help
+ Say Y here to include support for PowerPC 8xx series PCMCIA
+ controller.
+
+ This driver is also available as a module called m8xx_pcmcia.
config HD64465_PCMCIA
tristate "HD64465 host bridge support"
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 50cad3a59a6c..7c93a108f9b8 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -651,6 +651,7 @@ static int pccardd(void *__skt)
add_wait_queue(&skt->thread_wait, &wait);
complete(&skt->thread_done);
+ set_freezable();
for (;;) {
unsigned long flags;
unsigned int events;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 143c6efc478a..a99607142fc8 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1127,6 +1127,34 @@ static int pcmcia_bus_uevent(struct device *dev, char **envp, int num_envp,
#endif
+/************************ runtime PM support ***************************/
+
+static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_resume(struct device *dev);
+
+static int runtime_suspend(struct device *dev)
+{
+ int rc;
+
+ down(&dev->sem);
+ rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+ up(&dev->sem);
+ if (!rc)
+ dev->power.power_state.event = PM_EVENT_SUSPEND;
+ return rc;
+}
+
+static void runtime_resume(struct device *dev)
+{
+ int rc;
+
+ down(&dev->sem);
+ rc = pcmcia_dev_resume(dev);
+ up(&dev->sem);
+ if (!rc)
+ dev->power.power_state.event = PM_EVENT_ON;
+}
+
/************************ per-device sysfs output ***************************/
#define pcmcia_device_attr(field, test, format) \
@@ -1173,9 +1201,9 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
return -EINVAL;
if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
- ret = dpm_runtime_suspend(dev, PMSG_SUSPEND);
+ ret = runtime_suspend(dev);
else if (p_dev->suspended && !strncmp(buf, "on", 2))
- dpm_runtime_resume(dev);
+ runtime_resume(dev);
return ret ? ret : count;
}
@@ -1312,10 +1340,10 @@ static int pcmcia_bus_suspend_callback(struct device *dev, void * _data)
struct pcmcia_socket *skt = _data;
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- if (p_dev->socket != skt)
+ if (p_dev->socket != skt || p_dev->suspended)
return 0;
- return dpm_runtime_suspend(dev, PMSG_SUSPEND);
+ return runtime_suspend(dev);
}
static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
@@ -1323,10 +1351,10 @@ static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
struct pcmcia_socket *skt = _data;
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- if (p_dev->socket != skt)
+ if (p_dev->socket != skt || !p_dev->suspended)
return 0;
- dpm_runtime_resume(dev);
+ runtime_resume(dev);
return 0;
}
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 9721ed7bf502..b01985498460 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -10,7 +10,7 @@
* Further fixes, v2.6 kernel port
* <marcelo.tosatti@cyclades.com>
*
- * Some fixes, additions (C) 2005 Montavista Software, Inc.
+ * Some fixes, additions (C) 2005-2007 Montavista Software, Inc.
* <vbordug@ru.mvista.com>
*
* "The ExCA standard specifies that socket controllers should provide
@@ -40,10 +40,6 @@
#include <linux/fcntl.h>
#include <linux/string.h>
-#include <asm/io.h>
-#include <asm/bitops.h>
-#include <asm/system.h>
-
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -51,11 +47,18 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/system.h>
+#include <asm/time.h>
#include <asm/mpc8xx.h>
#include <asm/8xx_immap.h>
#include <asm/irq.h>
+#include <asm/fs_pd.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
#include <pcmcia/version.h>
#include <pcmcia/cs_types.h>
@@ -110,7 +113,7 @@ MODULE_LICENSE("Dual MPL/GPL");
#define CONFIG_PCMCIA_SLOT_B
#endif
-#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
+#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
@@ -143,30 +146,20 @@ MODULE_LICENSE("Dual MPL/GPL");
/* ------------------------------------------------------------------------- */
-#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
-#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
-#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
-
-#define PCMCIA_SCHLVL PCMCIA_INTERRUPT /* Status Change Interrupt Level */
-
+#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
+#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
+#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
/* ------------------------------------------------------------------------- */
-/* 2.4.x and newer has this always in HZ */
-#define M8XX_BUSFREQ ((((bd_t *)&(__res))->bi_busfreq))
-
-static int pcmcia_schlvl = PCMCIA_SCHLVL;
+static int pcmcia_schlvl;
static DEFINE_SPINLOCK(events_lock);
-
#define PCMCIA_SOCKET_KEY_5V 1
#define PCMCIA_SOCKET_KEY_LV 2
/* look up table for pgcrx registers */
-static u32 *m8xx_pgcrx[2] = {
- &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pgcra,
- &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pgcrb
-};
+static u32 *m8xx_pgcrx[2];
/*
* This structure is used to address each window in the PCMCIA controller.
@@ -176,8 +169,8 @@ static u32 *m8xx_pgcrx[2] = {
*/
struct pcmcia_win {
- u32 br;
- u32 or;
+ u32 br;
+ u32 or;
};
/*
@@ -221,22 +214,27 @@ struct pcmcia_win {
/* we keep one lookup table per socket to check flags */
-#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
+#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
struct event_table {
u32 regbit;
u32 eventbit;
};
+static const char driver_name[] = "m8xx-pcmcia";
+
struct socket_info {
- void (*handler)(void *info, u32 events);
- void *info;
+ void (*handler) (void *info, u32 events);
+ void *info;
u32 slot;
+ pcmconf8xx_t *pcmcia;
+ u32 bus_freq;
+ int hwirq;
socket_state_t state;
struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
- struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
+ struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
struct event_table events[PCMCIA_EVENTS_MAX];
struct pcmcia_socket socket;
};
@@ -250,8 +248,7 @@ static struct socket_info socket[PCMCIA_SOCKETS_NO];
#define M8XX_SIZES_NO 32
-static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
-{
+static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = {
0x00000001, 0x00000002, 0x00000008, 0x00000004,
0x00000080, 0x00000040, 0x00000010, 0x00000020,
0x00008000, 0x00004000, 0x00001000, 0x00002000,
@@ -267,7 +264,7 @@ static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
static irqreturn_t m8xx_interrupt(int irq, void *dev);
-#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
+#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
/* ------------------------------------------------------------------------- */
/* board specific stuff: */
@@ -291,8 +288,9 @@ static int voltage_set(int slot, int vcc, int vpp)
{
u32 reg = 0;
- switch(vcc) {
- case 0: break;
+ switch (vcc) {
+ case 0:
+ break;
case 33:
reg |= BCSR1_PCVCTL4;
break;
@@ -303,11 +301,12 @@ static int voltage_set(int slot, int vcc, int vpp)
return 1;
}
- switch(vpp) {
- case 0: break;
+ switch (vpp) {
+ case 0:
+ break;
case 33:
case 50:
- if(vcc == vpp)
+ if (vcc == vpp)
reg |= BCSR1_PCVCTL6;
else
return 1;
@@ -318,25 +317,29 @@ static int voltage_set(int slot, int vcc, int vpp)
return 1;
}
- if(!((vcc == 50) || (vcc == 0)))
+ if (!((vcc == 50) || (vcc == 0)))
return 1;
/* first, turn off all power */
- out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7));
+ out_be32(((u32 *) RPX_CSR_ADDR),
+ in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
+ BCSR1_PCVCTL5 |
+ BCSR1_PCVCTL6 |
+ BCSR1_PCVCTL7));
/* enable new powersettings */
- out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) | reg);
+ out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
return 0;
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_) /* No hardware to enable */
+#define hardware_disable(_slot_) /* No hardware to disable */
-#endif /* CONFIG_RPXCLASSIC */
+#endif /* CONFIG_RPXCLASSIC */
/* FADS Boards from Motorola */
@@ -348,43 +351,45 @@ static int voltage_set(int slot, int vcc, int vpp)
{
u32 reg = 0;
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= BCSR1_PCCVCC0;
- break;
- case 50:
- reg |= BCSR1_PCCVCC1;
- break;
- default:
- return 1;
+ switch (vcc) {
+ case 0:
+ break;
+ case 33:
+ reg |= BCSR1_PCCVCC0;
+ break;
+ case 50:
+ reg |= BCSR1_PCCVCC1;
+ break;
+ default:
+ return 1;
}
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= BCSR1_PCCVPP1;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= BCSR1_PCCVPP0;
- else
- return 1;
- default:
+ switch (vpp) {
+ case 0:
+ break;
+ case 33:
+ case 50:
+ if (vcc == vpp)
+ reg |= BCSR1_PCCVPP1;
+ else
return 1;
+ break;
+ case 120:
+ if ((vcc == 33) || (vcc == 50))
+ reg |= BCSR1_PCCVPP0;
+ else
+ return 1;
+ default:
+ return 1;
}
/* first, turn off all power */
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK));
+ out_be32((u32 *) BCSR1,
+ in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK |
+ BCSR1_PCCVPP_MASK));
/* enable new powersettings */
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | reg);
+ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg);
return 0;
}
@@ -393,12 +398,12 @@ static int voltage_set(int slot, int vcc, int vpp)
static void hardware_enable(int slot)
{
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~BCSR1_PCCEN);
+ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN);
}
static void hardware_disable(int slot)
{
- out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | BCSR1_PCCEN);
+ out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN);
}
#endif
@@ -408,78 +413,21 @@ static void hardware_disable(int slot)
#if defined(CONFIG_MPC885ADS)
#define PCMCIA_BOARD_MSG "MPC885ADS"
+#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-static int voltage_set(int slot, int vcc, int vpp)
+static inline void hardware_enable(int slot)
{
- u32 reg = 0;
- unsigned *bcsr_io;
-
- bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
-
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= BCSR1_PCCVCC0;
- break;
- case 50:
- reg |= BCSR1_PCCVCC1;
- break;
- default:
- goto out_unmap;
- }
-
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= BCSR1_PCCVPP1;
- else
- goto out_unmap;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= BCSR1_PCCVPP0;
- else
- goto out_unmap;
- default:
- goto out_unmap;
- }
-
- /* first, turn off all power */
- out_be32(bcsr_io, in_be32(bcsr_io) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK));
-
- /* enable new powersettings */
- out_be32(bcsr_io, in_be32(bcsr_io) | reg);
-
- iounmap(bcsr_io);
- return 0;
-
-out_unmap:
- iounmap(bcsr_io);
- return 1;
+ m8xx_pcmcia_ops.hw_ctrl(slot, 1);
}
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-
-static void hardware_enable(int slot)
+static inline void hardware_disable(int slot)
{
- unsigned *bcsr_io;
-
- bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
- out_be32(bcsr_io, in_be32(bcsr_io) & ~BCSR1_PCCEN);
- iounmap(bcsr_io);
+ m8xx_pcmcia_ops.hw_ctrl(slot, 0);
}
-static void hardware_disable(int slot)
+static inline int voltage_set(int slot, int vcc, int vpp)
{
- unsigned *bcsr_io;
-
- bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
- out_be32(bcsr_io, in_be32(bcsr_io) | BCSR1_PCCEN);
- iounmap(bcsr_io);
+ return m8xx_pcmcia_ops.voltage_set(slot, vcc, vpp);
}
#endif
@@ -495,52 +443,53 @@ static int voltage_set(int slot, int vcc, int vpp)
{
u8 reg = 0;
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= CSR2_VCC_33;
- break;
- case 50:
- reg |= CSR2_VCC_50;
- break;
- default:
- return 1;
+ switch (vcc) {
+ case 0:
+ break;
+ case 33:
+ reg |= CSR2_VCC_33;
+ break;
+ case 50:
+ reg |= CSR2_VCC_50;
+ break;
+ default:
+ return 1;
}
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= CSR2_VPP_VCC;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= CSR2_VPP_12;
- else
- return 1;
- default:
+ switch (vpp) {
+ case 0:
+ break;
+ case 33:
+ case 50:
+ if (vcc == vpp)
+ reg |= CSR2_VPP_VCC;
+ else
return 1;
+ break;
+ case 120:
+ if ((vcc == 33) || (vcc == 50))
+ reg |= CSR2_VPP_12;
+ else
+ return 1;
+ default:
+ return 1;
}
/* first, turn off all power */
- out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
+ out_8((u8 *) MBX_CSR2_ADDR,
+ in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
/* enable new powersettings */
- out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) | reg);
+ out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
return 0;
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
+#define hardware_enable(_slot_) /* No hardware to enable */
+#define hardware_disable(_slot_) /* No hardware to disable */
-#endif /* CONFIG_MBX */
+#endif /* CONFIG_MBX */
#if defined(CONFIG_PRxK)
#include <asm/cpld.h>
@@ -554,43 +503,46 @@ static int voltage_set(int slot, int vcc, int vpp)
u8 regread;
cpld_regs *ccpld = get_cpld();
- switch(vcc) {
- case 0:
- break;
- case 33:
- reg |= PCMCIA_VCC_33;
- break;
- case 50:
- reg |= PCMCIA_VCC_50;
- break;
- default:
- return 1;
+ switch (vcc) {
+ case 0:
+ break;
+ case 33:
+ reg |= PCMCIA_VCC_33;
+ break;
+ case 50:
+ reg |= PCMCIA_VCC_50;
+ break;
+ default:
+ return 1;
}
- switch(vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if(vcc == vpp)
- reg |= PCMCIA_VPP_VCC;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= PCMCIA_VPP_12;
- else
- return 1;
- default:
+ switch (vpp) {
+ case 0:
+ break;
+ case 33:
+ case 50:
+ if (vcc == vpp)
+ reg |= PCMCIA_VPP_VCC;
+ else
return 1;
+ break;
+ case 120:
+ if ((vcc == 33) || (vcc == 50))
+ reg |= PCMCIA_VPP_12;
+ else
+ return 1;
+ default:
+ return 1;
}
reg = reg >> (slot << 2);
regread = in_8(&ccpld->fpga_pc_ctl);
- if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
+ if (reg !=
+ (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
/* enable new powersettings */
- regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2));
+ regread =
+ regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >>
+ (slot << 2));
out_8(&ccpld->fpga_pc_ctl, reg | regread);
msleep(100);
}
@@ -599,52 +551,10 @@ static int voltage_set(int slot, int vcc, int vpp)
}
#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
-
-#endif /* CONFIG_PRxK */
-
-static void m8xx_shutdown(void)
-{
- u32 m, i;
- struct pcmcia_win *w;
+#define hardware_enable(_slot_) /* No hardware to enable */
+#define hardware_disable(_slot_) /* No hardware to disable */
- for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
- w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
-
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, M8XX_PCMCIA_MASK(i));
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) & ~M8XX_PCMCIA_MASK(i));
-
- /* turn off interrupt and disable CxOE */
- out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
-
- /* turn off memory windows */
- for(m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
- out_be32(&w->or, 0); /* set to not valid */
- w++;
- }
-
- /* turn off voltage */
- voltage_set(i, 0, 0);
-
- /* disable external hardware */
- hardware_disable(i);
- }
-
- free_irq(pcmcia_schlvl, NULL);
-}
-
-static struct device_driver m8xx_driver = {
- .name = "m8xx-pcmcia",
- .bus = &platform_bus_type,
- .suspend = pcmcia_socket_dev_suspend,
- .resume = pcmcia_socket_dev_resume,
-};
-
-static struct platform_device m8xx_device = {
- .name = "m8xx-pcmcia",
- .id = 0,
-};
+#endif /* CONFIG_PRxK */
static u32 pending_events[PCMCIA_SOCKETS_NO];
static DEFINE_SPINLOCK(pending_event_lock);
@@ -654,24 +564,25 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
struct socket_info *s;
struct event_table *e;
unsigned int i, events, pscr, pipr, per;
+ pcmconf8xx_t *pcmcia = socket[0].pcmcia;
dprintk("Interrupt!\n");
/* get interrupt sources */
- pscr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr);
- pipr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr);
- per = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per);
+ pscr = in_be32(&pcmcia->pcmc_pscr);
+ pipr = in_be32(&pcmcia->pcmc_pipr);
+ per = in_be32(&pcmcia->pcmc_per);
- for(i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
s = &socket[i];
e = &s->events[0];
events = 0;
- while(e->regbit) {
- if(pscr & e->regbit)
+ while (e->regbit) {
+ if (pscr & e->regbit)
events |= e->eventbit;
- e++;
+ e++;
}
/*
@@ -679,13 +590,11 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
* not too nice done,
* we depend on that CD2 is the bit to the left of CD1...
*/
- if(events & SS_DETECT)
- if(((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
- (pipr & M8XX_PCMCIA_CD1(i)))
- {
+ if (events & SS_DETECT)
+ if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
+ (pipr & M8XX_PCMCIA_CD1(i))) {
events &= ~SS_DETECT;
}
-
#ifdef PCMCIA_GLITCHY_CD
/*
* I've experienced CD problems with my ADS board.
@@ -693,24 +602,23 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
* real change of Card detection.
*/
- if((events & SS_DETECT) &&
- ((pipr &
- (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
- (s->state.Vcc | s->state.Vpp)) {
+ if ((events & SS_DETECT) &&
+ ((pipr &
+ (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
+ (s->state.Vcc | s->state.Vpp)) {
events &= ~SS_DETECT;
/*printk( "CD glitch workaround - CD = 0x%08x!\n",
- (pipr & (M8XX_PCMCIA_CD2(i)
- | M8XX_PCMCIA_CD1(i))));*/
+ (pipr & (M8XX_PCMCIA_CD2(i)
+ | M8XX_PCMCIA_CD1(i)))); */
}
#endif
/* call the handler */
dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, "
- "pipr = 0x%08x\n",
- i, events, pscr, pipr);
+ "pipr = 0x%08x\n", i, events, pscr, pipr);
- if(events) {
+ if (events) {
spin_lock(&pending_event_lock);
pending_events[i] |= events;
spin_unlock(&pending_event_lock);
@@ -724,7 +632,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
per &= ~M8XX_PCMCIA_RDY_L(0);
per &= ~M8XX_PCMCIA_RDY_L(1);
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, per);
+ out_be32(&pcmcia->pcmc_per, per);
if (events)
pcmcia_parse_events(&socket[i].socket, events);
@@ -732,7 +640,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
}
/* clear the interrupt sources */
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, pscr);
+ out_be32(&pcmcia->pcmc_pscr, pscr);
dprintk("Interrupt done.\n");
@@ -743,21 +651,21 @@ static u32 m8xx_get_graycode(u32 size)
{
u32 k;
- for(k = 0; k < M8XX_SIZES_NO; k++)
- if(m8xx_size_to_gray[k] == size)
+ for (k = 0; k < M8XX_SIZES_NO; k++)
+ if (m8xx_size_to_gray[k] == size)
break;
- if((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
+ if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
k = -1;
return k;
}
-static u32 m8xx_get_speed(u32 ns, u32 is_io)
+static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
{
u32 reg, clocks, psst, psl, psht;
- if(!ns) {
+ if (!ns) {
/*
* We get called with IO maps setup to 0ns
@@ -765,10 +673,10 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io)
* They should be 255ns.
*/
- if(is_io)
+ if (is_io)
ns = 255;
else
- ns = 100; /* fast memory if 0 */
+ ns = 100; /* fast memory if 0 */
}
/*
@@ -779,23 +687,23 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io)
/* how we want to adjust the timing - in percent */
-#define ADJ 180 /* 80 % longer accesstime - to be sure */
+#define ADJ 180 /* 80 % longer accesstime - to be sure */
- clocks = ((M8XX_BUSFREQ / 1000) * ns) / 1000;
- clocks = (clocks * ADJ) / (100*1000);
- if(clocks >= PCMCIA_BMT_LIMIT) {
- printk( "Max access time limit reached\n");
- clocks = PCMCIA_BMT_LIMIT-1;
+ clocks = ((bus_freq / 1000) * ns) / 1000;
+ clocks = (clocks * ADJ) / (100 * 1000);
+ if (clocks >= PCMCIA_BMT_LIMIT) {
+ printk("Max access time limit reached\n");
+ clocks = PCMCIA_BMT_LIMIT - 1;
}
- psst = clocks / 7; /* setup time */
- psht = clocks / 7; /* hold time */
- psl = (clocks * 5) / 7; /* strobe length */
+ psst = clocks / 7; /* setup time */
+ psht = clocks / 7; /* hold time */
+ psl = (clocks * 5) / 7; /* strobe length */
psst += clocks - (psst + psht + psl);
- reg = psst << 12;
- reg |= psl << 7;
+ reg = psst << 12;
+ reg |= psl << 7;
reg |= psht << 16;
return reg;
@@ -806,11 +714,12 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
int lsock = container_of(sock, struct socket_info, socket)->slot;
struct socket_info *s = &socket[lsock];
unsigned int pipr, reg;
+ pcmconf8xx_t *pcmcia = s->pcmcia;
- pipr = in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pipr);
+ pipr = in_be32(&pcmcia->pcmc_pipr);
- *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
- | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
+ *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
+ | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
*value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
if (s->state.flags & SS_IOCARD)
@@ -894,16 +803,16 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
/* read out VS1 and VS2 */
reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
- >> M8XX_PCMCIA_VS_SHIFT(lsock);
+ >> M8XX_PCMCIA_VS_SHIFT(lsock);
- if(socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
- switch(reg) {
+ if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
+ switch (reg) {
case 1:
*value |= SS_3VCARD;
- break; /* GND, NC - 3.3V only */
+ break; /* GND, NC - 3.3V only */
case 2:
*value |= SS_XVCARD;
- break; /* NC. GND - x.xV only */
+ break; /* NC. GND - x.xV only */
};
}
@@ -911,27 +820,29 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
return 0;
}
-static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
+static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
{
int lsock = container_of(sock, struct socket_info, socket)->slot;
struct socket_info *s = &socket[lsock];
struct event_table *e;
unsigned int reg;
unsigned long flags;
+ pcmconf8xx_t *pcmcia = socket[0].pcmcia;
- dprintk( "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
- "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
- state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
+ dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
+ "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
+ state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
/* First, set voltage - bail out if invalid */
- if(voltage_set(lsock, state->Vcc, state->Vpp))
+ if (voltage_set(lsock, state->Vcc, state->Vpp))
return -EINVAL;
/* Take care of reset... */
- if(state->flags & SS_RESET)
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
+ if (state->flags & SS_RESET)
+ out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
else
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
/* ... and output enable. */
@@ -943,10 +854,11 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
no pullups are present -> the cards act wierd.
So right now the buffers are enabled if the power is on. */
- if(state->Vcc || state->Vpp)
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
+ if (state->Vcc || state->Vpp)
+ out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
else
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
/*
* We'd better turn off interrupts before
@@ -963,17 +875,17 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
e = &s->events[0];
reg = 0;
- if(state->csc_mask & SS_DETECT) {
+ if (state->csc_mask & SS_DETECT) {
e->eventbit = SS_DETECT;
reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
| M8XX_PCMCIA_CD1(lsock));
e++;
}
- if(state->flags & SS_IOCARD) {
+ if (state->flags & SS_IOCARD) {
/*
* I/O card
*/
- if(state->csc_mask & SS_STSCHG) {
+ if (state->csc_mask & SS_STSCHG) {
e->eventbit = SS_STSCHG;
reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
e++;
@@ -981,8 +893,10 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
/*
* If io_irq is non-zero we should enable irq.
*/
- if(state->io_irq) {
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(state->io_irq) << 24);
+ if (state->io_irq) {
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) |
+ mk_int_int_mask(s->hwirq) << 24);
/*
* Strange thing here:
* The manual does not tell us which interrupt
@@ -993,33 +907,32 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
* have to be cleared in PSCR in the interrupt handler.
*/
reg |= M8XX_PCMCIA_RDY_L(lsock);
- }
- else
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
- }
- else {
+ } else
+ out_be32(M8XX_PGCRX(lsock),
+ in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
+ } else {
/*
* Memory card
*/
- if(state->csc_mask & SS_BATDEAD) {
+ if (state->csc_mask & SS_BATDEAD) {
e->eventbit = SS_BATDEAD;
reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
e++;
}
- if(state->csc_mask & SS_BATWARN) {
+ if (state->csc_mask & SS_BATWARN) {
e->eventbit = SS_BATWARN;
reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
e++;
}
/* What should I trigger on - low/high,raise,fall? */
- if(state->csc_mask & SS_READY) {
+ if (state->csc_mask & SS_READY) {
e->eventbit = SS_READY;
- reg |= e->regbit = 0; //??
+ reg |= e->regbit = 0; //??
e++;
}
}
- e->regbit = 0; /* terminate list */
+ e->regbit = 0; /* terminate list */
/*
* Clear the status changed .
@@ -1027,7 +940,7 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
* Writing ones will clear the bits.
*/
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr, reg);
+ out_be32(&pcmcia->pcmc_pscr, reg);
/*
* Write the mask.
@@ -1036,15 +949,10 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
* Ones will enable the interrupt.
*/
- /*
- reg |= ((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per
- & M8XX_PCMCIA_MASK(lsock);
- */
-
- reg |= in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) &
- (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
-
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per, reg);
+ reg |=
+ in_be32(&pcmcia->
+ pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
+ out_be32(&pcmcia->pcmc_per, reg);
spin_unlock_irqrestore(&events_lock, flags);
@@ -1062,66 +970,68 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
struct socket_info *s = &socket[lsock];
struct pcmcia_win *w;
unsigned int reg, winnr;
+ pcmconf8xx_t *pcmcia = s->pcmcia;
#define M8XX_SIZE (io->stop - io->start + 1)
#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
- dprintk( "SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
- io->speed, io->start, io->stop);
+ dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, "
+ "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
+ io->speed, io->start, io->stop);
if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
|| (io->stop > 0xffff) || (io->stop < io->start))
return -EINVAL;
- if((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
+ if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
return -EINVAL;
- if(io->flags & MAP_ACTIVE) {
+ if (io->flags & MAP_ACTIVE) {
- dprintk( "io->flags & MAP_ACTIVE\n");
+ dprintk("io->flags & MAP_ACTIVE\n");
winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
- + (lsock * PCMCIA_IO_WIN_NO) + io->map;
+ + (lsock * PCMCIA_IO_WIN_NO) + io->map;
/* setup registers */
- w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
w += winnr;
- out_be32(&w->or, 0); /* turn off window first */
+ out_be32(&w->or, 0); /* turn off window first */
out_be32(&w->br, M8XX_BASE);
reg <<= 27;
- reg |= M8XX_PCMCIA_POR_IO |(lsock << 2);
+ reg |= M8XX_PCMCIA_POR_IO | (lsock << 2);
- reg |= m8xx_get_speed(io->speed, 1);
+ reg |= m8xx_get_speed(io->speed, 1, s->bus_freq);
- if(io->flags & MAP_WRPROT)
+ if (io->flags & MAP_WRPROT)
reg |= M8XX_PCMCIA_POR_WRPROT;
- if(io->flags & (MAP_16BIT | MAP_AUTOSZ))
+ /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */
+ if (io->flags & MAP_16BIT)
reg |= M8XX_PCMCIA_POR_16BIT;
- if(io->flags & MAP_ACTIVE)
+ if (io->flags & MAP_ACTIVE)
reg |= M8XX_PCMCIA_POR_VALID;
out_be32(&w->or, reg);
dprintk("Socket %u: Mapped io window %u at %#8.8x, "
- "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
+ "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
} else {
/* shutdown IO window */
winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
- + (lsock * PCMCIA_IO_WIN_NO) + io->map;
+ + (lsock * PCMCIA_IO_WIN_NO) + io->map;
/* setup registers */
- w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
w += winnr;
- out_be32(&w->or, 0); /* turn off window */
- out_be32(&w->br, 0); /* turn off base address */
+ out_be32(&w->or, 0); /* turn off window */
+ out_be32(&w->br, 0); /* turn off base address */
dprintk("Socket %u: Unmapped io window %u at %#8.8x, "
"OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
@@ -1129,35 +1039,35 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
/* copy the struct and modify the copy */
s->io_win[io->map] = *io;
- s->io_win[io->map].flags &= (MAP_WRPROT
- | MAP_16BIT
- | MAP_ACTIVE);
+ s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
dprintk("SetIOMap exit\n");
return 0;
}
-static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
+static int m8xx_set_mem_map(struct pcmcia_socket *sock,
+ struct pccard_mem_map *mem)
{
int lsock = container_of(sock, struct socket_info, socket)->slot;
struct socket_info *s = &socket[lsock];
struct pcmcia_win *w;
struct pccard_mem_map *old;
unsigned int reg, winnr;
+ pcmconf8xx_t *pcmcia = s->pcmcia;
- dprintk( "SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
- mem->speed, mem->static_start, mem->card_start);
+ dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
+ "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
+ mem->speed, mem->static_start, mem->card_start);
if ((mem->map >= PCMCIA_MEM_WIN_NO)
-// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
+// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
|| (mem->card_start >= 0x04000000)
- || (mem->static_start & 0xfff) /* 4KByte resolution */
- || (mem->card_start & 0xfff))
+ || (mem->static_start & 0xfff) /* 4KByte resolution */
+ ||(mem->card_start & 0xfff))
return -EINVAL;
- if((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
- printk( "Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
+ if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
+ printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
return -EINVAL;
}
reg <<= 27;
@@ -1166,50 +1076,47 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
/* Setup the window in the pcmcia controller */
- w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
+ w = (void *)&pcmcia->pcmc_pbr0;
w += winnr;
reg |= lsock << 2;
- reg |= m8xx_get_speed(mem->speed, 0);
+ reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq);
- if(mem->flags & MAP_ATTRIB)
- reg |= M8XX_PCMCIA_POR_ATTRMEM;
+ if (mem->flags & MAP_ATTRIB)
+ reg |= M8XX_PCMCIA_POR_ATTRMEM;
- if(mem->flags & MAP_WRPROT)
+ if (mem->flags & MAP_WRPROT)
reg |= M8XX_PCMCIA_POR_WRPROT;
- if(mem->flags & MAP_16BIT)
+ if (mem->flags & MAP_16BIT)
reg |= M8XX_PCMCIA_POR_16BIT;
- if(mem->flags & MAP_ACTIVE)
+ if (mem->flags & MAP_ACTIVE)
reg |= M8XX_PCMCIA_POR_VALID;
out_be32(&w->or, reg);
dprintk("Socket %u: Mapped memory window %u at %#8.8x, "
- "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
+ "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
- if(mem->flags & MAP_ACTIVE) {
+ if (mem->flags & MAP_ACTIVE) {
/* get the new base address */
mem->static_start = PCMCIA_MEM_WIN_BASE +
- (PCMCIA_MEM_WIN_SIZE * winnr)
- + mem->card_start;
+ (PCMCIA_MEM_WIN_SIZE * winnr)
+ + mem->card_start;
}
dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
- mem->speed, mem->static_start, mem->card_start);
+ "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
+ mem->speed, mem->static_start, mem->card_start);
/* copy the struct and modify the copy */
old = &s->mem_win[mem->map];
*old = *mem;
- old->flags &= (MAP_ATTRIB
- | MAP_WRPROT
- | MAP_16BIT
- | MAP_ACTIVE);
+ old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
return 0;
}
@@ -1220,7 +1127,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
pccard_io_map io = { 0, 0, 0, 0, 1 };
pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
- dprintk( "sock_init(%d)\n", s);
+ dprintk("sock_init(%d)\n", s);
m8xx_set_socket(sock, &dead_socket);
for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
@@ -1236,111 +1143,195 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
}
-static int m8xx_suspend(struct pcmcia_socket *sock)
+static int m8xx_sock_suspend(struct pcmcia_socket *sock)
{
return m8xx_set_socket(sock, &dead_socket);
}
static struct pccard_operations m8xx_services = {
- .init = m8xx_sock_init,
- .suspend = m8xx_suspend,
+ .init = m8xx_sock_init,
+ .suspend = m8xx_sock_suspend,
.get_status = m8xx_get_status,
.set_socket = m8xx_set_socket,
.set_io_map = m8xx_set_io_map,
.set_mem_map = m8xx_set_mem_map,
};
-static int __init m8xx_init(void)
+static int __init m8xx_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
{
struct pcmcia_win *w;
- unsigned int i,m;
+ unsigned int i, m, hwirq;
+ pcmconf8xx_t *pcmcia;
+ int status;
+ struct device_node *np = ofdev->node;
pcmcia_info("%s\n", version);
- if (driver_register(&m8xx_driver))
- return -1;
+ pcmcia = of_iomap(np, 0);
+ if (pcmcia == NULL)
+ return -EINVAL;
+
+ pcmcia_schlvl = irq_of_parse_and_map(np, 0);
+ hwirq = irq_map[pcmcia_schlvl].hwirq;
+ if (pcmcia_schlvl < 0)
+ return -EINVAL;
+
+ m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
+ m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
- " with IRQ %u.\n", pcmcia_schlvl);
+ " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq);
/* Configure Status change interrupt */
- if(request_irq(pcmcia_schlvl, m8xx_interrupt, 0,
- "m8xx_pcmcia", NULL)) {
+ if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
+ driver_name, socket)) {
pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
pcmcia_schlvl);
return -1;
}
- w = (void *) &((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pbr0;
-
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_pscr,
- M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1));
+ w = (void *)&pcmcia->pcmc_pbr0;
- out_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per,
- in_be32(&((immap_t *)IMAP_ADDR)->im_pcmcia.pcmc_per) &
- ~(M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1)));
+ out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
+ clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
-/* connect interrupt and disable CxOE */
+ /* connect interrupt and disable CxOE */
- out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(pcmcia_schlvl) << 16));
- out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(pcmcia_schlvl) << 16));
+ out_be32(M8XX_PGCRX(0),
+ M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
+ out_be32(M8XX_PGCRX(1),
+ M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
-/* intialize the fixed memory windows */
+ /* intialize the fixed memory windows */
- for(i = 0; i < PCMCIA_SOCKETS_NO; i++){
- for(m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+ for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
- (PCMCIA_MEM_WIN_SIZE
- * (m + i * PCMCIA_MEM_WIN_NO)));
+ (PCMCIA_MEM_WIN_SIZE
+ * (m + i * PCMCIA_MEM_WIN_NO)));
- out_be32(&w->or, 0); /* set to not valid */
+ out_be32(&w->or, 0); /* set to not valid */
w++;
}
}
-/* turn off voltage */
+ /* turn off voltage */
voltage_set(0, 0, 0);
voltage_set(1, 0, 0);
-/* Enable external hardware */
+ /* Enable external hardware */
hardware_enable(0);
hardware_enable(1);
- platform_device_register(&m8xx_device);
-
- for (i = 0 ; i < PCMCIA_SOCKETS_NO; i++) {
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
socket[i].slot = i;
socket[i].socket.owner = THIS_MODULE;
- socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
+ socket[i].socket.features =
+ SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
socket[i].socket.irq_mask = 0x000;
socket[i].socket.map_size = 0x1000;
socket[i].socket.io_offset = 0;
- socket[i].socket.pci_irq = i ? 7 : 9;
+ socket[i].socket.pci_irq = pcmcia_schlvl;
socket[i].socket.ops = &m8xx_services;
- socket[i].socket.resource_ops = &pccard_iodyn_ops;
+ socket[i].socket.resource_ops = &pccard_nonstatic_ops;
socket[i].socket.cb_dev = NULL;
- socket[i].socket.dev.parent = &m8xx_device.dev;
+ socket[i].socket.dev.parent = &ofdev->dev;
+ socket[i].pcmcia = pcmcia;
+ socket[i].bus_freq = ppc_proc_freq;
+ socket[i].hwirq = hwirq;
+
}
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
- pcmcia_register_socket(&socket[i].socket);
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+ status = pcmcia_register_socket(&socket[i].socket);
+ if (status < 0)
+ pcmcia_error("Socket register failed\n");
+ }
return 0;
}
-static void __exit m8xx_exit(void)
+static int m8xx_remove(struct of_device *ofdev)
{
- int i;
+ u32 m, i;
+ struct pcmcia_win *w;
+ pcmconf8xx_t *pcmcia = socket[0].pcmcia;
+
+ for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
+ w = (void *)&pcmcia->pcmc_pbr0;
+ out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i));
+ out_be32(&pcmcia->pcmc_per,
+ in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
+
+ /* turn off interrupt and disable CxOE */
+ out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
+
+ /* turn off memory windows */
+ for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
+ out_be32(&w->or, 0); /* set to not valid */
+ w++;
+ }
+
+ /* turn off voltage */
+ voltage_set(i, 0, 0);
+
+ /* disable external hardware */
+ hardware_disable(i);
+ }
for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
pcmcia_unregister_socket(&socket[i].socket);
- m8xx_shutdown();
+ free_irq(pcmcia_schlvl, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ return pcmcia_socket_dev_suspend(&pdev->dev, state);
+}
+
+static int m8xx_resume(struct platform_device *pdev)
+{
+ return pcmcia_socket_dev_resume(&pdev->dev);
+}
+#else
+#define m8xx_suspend NULL
+#define m8xx_resume NULL
+#endif
- platform_device_unregister(&m8xx_device);
- driver_unregister(&m8xx_driver);
+static struct of_device_id m8xx_pcmcia_match[] = {
+ {
+ .type = "pcmcia",
+ .compatible = "fsl,pq-pcmcia",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
+
+static struct of_platform_driver m8xx_pcmcia_driver = {
+ .name = driver_name,
+ .match_table = m8xx_pcmcia_match,
+ .probe = m8xx_probe,
+ .remove = m8xx_remove,
+ .suspend = m8xx_suspend,
+ .resume = m8xx_resume,
+};
+
+static int __init m8xx_init(void)
+{
+ return of_register_platform_driver(&m8xx_pcmcia_driver);
+}
+
+static void __exit m8xx_exit(void)
+{
+ of_unregister_platform_driver(&m8xx_pcmcia_driver);
}
module_init(m8xx_init);
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index a2bb46526b56..b4409002b7f8 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -283,7 +283,9 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off
return (ret);
}
-static ssize_t pccard_show_cis(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t pccard_show_cis(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
unsigned int size = 0x200;
@@ -311,7 +313,9 @@ static ssize_t pccard_show_cis(struct kobject *kobj, char *buf, loff_t off, size
return (count);
}
-static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t pccard_store_cis(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pcmcia_socket *s = to_socket(container_of(kobj, struct device, kobj));
cisdump_t *cis;
@@ -366,7 +370,7 @@ static struct device_attribute *pccard_socket_attributes[] = {
};
static struct bin_attribute pccard_cis_attr = {
- .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR, .owner = THIS_MODULE},
+ .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
.size = 0x200,
.read = pccard_show_cis,
.write = pccard_store_cis,
diff --git a/drivers/pnp/Kconfig b/drivers/pnp/Kconfig
index 1959cef8e9de..821933f9aa57 100644
--- a/drivers/pnp/Kconfig
+++ b/drivers/pnp/Kconfig
@@ -2,11 +2,9 @@
# Plug and Play configuration
#
-menu "Plug and Play support"
- depends on HAS_IOMEM
-
-config PNP
+menuconfig PNP
bool "Plug and Play support"
+ depends on HAS_IOMEM
depends on ISA || ACPI
---help---
Plug and Play (PnP) is a standard for peripherals which allows those
@@ -22,15 +20,15 @@ config PNP
If unsure, say Y.
+if PNP
+
config PNP_DEBUG
bool "PnP Debug Messages"
- depends on PNP
help
Say Y if you want the Plug and Play Layer to print debug messages.
This is useful if you are developing a PnP driver or troubleshooting.
comment "Protocols"
- depends on PNP
source "drivers/pnp/isapnp/Kconfig"
@@ -38,5 +36,4 @@ source "drivers/pnp/pnpbios/Kconfig"
source "drivers/pnp/pnpacpi/Kconfig"
-endmenu
-
+endif # PNP
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 3e20b1cc7778..8e7b2dd38810 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -35,12 +35,11 @@ void *pnp_alloc(long size)
{
void *result;
- result = kmalloc(size, GFP_KERNEL);
+ result = kzalloc(size, GFP_KERNEL);
if (!result){
printk(KERN_ERR "pnp: Out of Memory\n");
return NULL;
}
- memset(result, 0, size);
return result;
}
diff --git a/drivers/pnp/isapnp/Kconfig b/drivers/pnp/isapnp/Kconfig
index 578651eeb4b0..f1ef36673ad4 100644
--- a/drivers/pnp/isapnp/Kconfig
+++ b/drivers/pnp/isapnp/Kconfig
@@ -3,7 +3,7 @@
#
config ISAPNP
bool "ISA Plug and Play support"
- depends on PNP && ISA
+ depends on ISA
help
Say Y here if you would like support for ISA Plug and Play devices.
Some information is in <file:Documentation/isapnp.txt>.
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index a0b158704ca1..914d00c423ad 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -370,8 +370,6 @@ static int __init isapnp_read_tag(unsigned char *type, unsigned short *size)
#if 0
printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type, *size);
#endif
- if (type == 0) /* wrong type */
- return -1;
if (*type == 0xff && *size == 0xffff) /* probably invalid data */
return -1;
return 0;
diff --git a/drivers/pnp/pnpbios/Kconfig b/drivers/pnp/pnpbios/Kconfig
index fab848cae89d..b986d9fa3b9a 100644
--- a/drivers/pnp/pnpbios/Kconfig
+++ b/drivers/pnp/pnpbios/Kconfig
@@ -3,7 +3,7 @@
#
config PNPBIOS
bool "Plug and Play BIOS support (EXPERIMENTAL)"
- depends on PNP && ISA && X86 && EXPERIMENTAL
+ depends on ISA && X86 && EXPERIMENTAL
default n
---help---
Linux uses the PNPBIOS as defined in "Plug and Play BIOS
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 3a201b77b963..ed112ee16012 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -147,7 +147,7 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
info->location_id, info->serial, info->capabilities);
envp[i] = NULL;
- value = call_usermodehelper (argv [0], argv, envp, 0);
+ value = call_usermodehelper (argv [0], argv, envp, UMH_WAIT_EXEC);
kfree (buf);
kfree (envp);
return 0;
@@ -160,6 +160,7 @@ static int pnp_dock_thread(void * unused)
{
static struct pnp_docking_station_info now;
int docked = -1, d = 0;
+ set_freezable();
while (!unloading)
{
int status;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index ab9c3e5a7c1d..3f6e176e6ea1 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -16,6 +16,7 @@ config POWER_SUPPLY_DEBUG
config PDA_POWER
tristate "Generic PDA/phone power driver"
+ depends on !S390
help
Say Y here to enable generic power driver for PDAs and phones with
one or two external power supplies (AC/USB) connected to main and
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 042bd950d036..39a90a6f0f80 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -48,8 +48,6 @@ static void find_main_battery(void)
}
if (!main_battery)
main_battery = batm;
-
- return;
}
static int calculate_time(int status)
@@ -218,7 +216,6 @@ static void apm_battery_apm_get_power_status(struct apm_power_info *info)
}
up(&power_supply_class->sem);
- return;
}
static int __init apm_battery_init(void)
@@ -232,7 +229,6 @@ static int __init apm_battery_init(void)
static void __exit apm_battery_exit(void)
{
apm_get_power_status = NULL;
- return;
}
module_init(apm_battery_init);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 00e1ea6f1de2..be7021ee3611 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -254,8 +254,6 @@ static void ds2760_battery_update_status(struct ds2760_device_info *di)
if (di->charge_status != old_charge_status)
power_supply_changed(&di->bat);
-
- return;
}
static void ds2760_battery_work(struct work_struct *work)
@@ -268,8 +266,6 @@ static void ds2760_battery_work(struct work_struct *work)
ds2760_battery_update_status(di);
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval);
-
- return;
}
#define to_ds2760_device_info(x) container_of((x), struct ds2760_device_info, \
@@ -283,8 +279,6 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
cancel_delayed_work(&di->monitor_work);
queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
-
- return;
}
static int ds2760_battery_get_property(struct power_supply *psy,
@@ -457,7 +451,6 @@ static int __init ds2760_battery_init(void)
static void __exit ds2760_battery_exit(void)
{
platform_driver_unregister(&ds2760_battery_driver);
- return;
}
module_init(ds2760_battery_init);
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 878684df7667..c998e68d060f 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -341,7 +341,6 @@ static void __exit olpc_bat_exit(void)
power_supply_unregister(&olpc_bat);
power_supply_unregister(&olpc_ac);
platform_device_unregister(bat_pdev);
- return;
}
module_init(olpc_bat_init);
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 4e1eb040e148..c058f285be1a 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -97,36 +97,31 @@ static void update_charger(void)
dev_dbg(dev, "charger off\n");
pdata->set_charge(0);
}
-
- return;
}
-static void supply_timer_func(unsigned long irq)
+static void supply_timer_func(unsigned long power_supply_ptr)
{
- if (ac_irq && irq == ac_irq->start)
- power_supply_changed(&pda_power_supplies[0]);
- else if (usb_irq && irq == usb_irq->start)
- power_supply_changed(&pda_power_supplies[1]);
- return;
+ void *power_supply = (void *)power_supply_ptr;
+
+ power_supply_changed(power_supply);
}
-static void charger_timer_func(unsigned long irq)
+static void charger_timer_func(unsigned long power_supply_ptr)
{
update_charger();
/* Okay, charger set. Now wait a bit before notifying supplicants,
* charge power should stabilize. */
- supply_timer.data = irq;
+ supply_timer.data = power_supply_ptr;
mod_timer(&supply_timer,
jiffies + msecs_to_jiffies(pdata->wait_for_charger));
- return;
}
-static irqreturn_t power_changed_isr(int irq, void *unused)
+static irqreturn_t power_changed_isr(int irq, void *power_supply)
{
/* Wait a bit before reading ac/usb line status and setting charger,
* because ac/usb status readings may lag from irq. */
- charger_timer.data = irq;
+ charger_timer.data = (unsigned long)power_supply;
mod_timer(&charger_timer,
jiffies + msecs_to_jiffies(pdata->wait_for_status));
return IRQ_HANDLED;
@@ -252,7 +247,6 @@ static int __init pda_power_init(void)
static void __exit pda_power_exit(void)
{
platform_driver_unregister(&pda_power_pdrv);
- return;
}
module_init(pda_power_init);
diff --git a/drivers/power/pmu_battery.c b/drivers/power/pmu_battery.c
index 2fea4af0e40a..60a8cf3a0431 100644
--- a/drivers/power/pmu_battery.c
+++ b/drivers/power/pmu_battery.c
@@ -203,8 +203,6 @@ static void __exit pmu_bat_exit(void)
}
power_supply_unregister(&pmu_ac);
platform_device_unregister(bat_pdev);
-
- return;
}
module_init(pmu_bat_init);
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index e87ea5156755..a63b75cf75e2 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -46,8 +46,6 @@ static void power_supply_changed_work(struct work_struct *work)
power_supply_update_leds(psy);
kobject_uevent(&psy->dev->kobj, KOBJ_CHANGE);
-
- return;
}
void power_supply_changed(struct power_supply *psy)
@@ -55,8 +53,6 @@ void power_supply_changed(struct power_supply *psy)
dev_dbg(psy->dev, "%s\n", __FUNCTION__);
schedule_work(&psy->changed_work);
-
- return;
}
int power_supply_am_i_supplied(struct power_supply *psy)
@@ -129,7 +125,6 @@ void power_supply_unregister(struct power_supply *psy)
power_supply_remove_triggers(psy);
power_supply_remove_attrs(psy);
device_unregister(psy->dev);
- return;
}
static int __init power_supply_class_init(void)
@@ -147,7 +142,6 @@ static int __init power_supply_class_init(void)
static void __exit power_supply_class_exit(void)
{
class_destroy(power_supply_class);
- return;
}
EXPORT_SYMBOL_GPL(power_supply_changed);
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c
index 7232490bb595..7f8f3590b02e 100644
--- a/drivers/power/power_supply_leds.c
+++ b/drivers/power/power_supply_leds.c
@@ -40,8 +40,6 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->full_trig, LED_OFF);
break;
}
-
- return;
}
static int power_supply_create_bat_triggers(struct power_supply *psy)
@@ -97,7 +95,6 @@ static void power_supply_remove_bat_triggers(struct power_supply *psy)
kfree(psy->full_trig_name);
kfree(psy->charging_trig_name);
kfree(psy->charging_full_trig_name);
- return;
}
/* Generated power specific LEDs triggers. */
@@ -115,8 +112,6 @@ static void power_supply_update_gen_leds(struct power_supply *psy)
led_trigger_event(psy->online_trig, LED_FULL);
else
led_trigger_event(psy->online_trig, LED_OFF);
-
- return;
}
static int power_supply_create_gen_triggers(struct power_supply *psy)
@@ -145,7 +140,6 @@ static void power_supply_remove_gen_triggers(struct power_supply *psy)
{
led_trigger_unregister_simple(psy->online_trig);
kfree(psy->online_trig_name);
- return;
}
/* Choice what triggers to create&update. */
@@ -156,7 +150,6 @@ void power_supply_update_leds(struct power_supply *psy)
power_supply_update_bat_leds(psy);
else
power_supply_update_gen_leds(psy);
- return;
}
int power_supply_create_triggers(struct power_supply *psy)
@@ -172,5 +165,4 @@ void power_supply_remove_triggers(struct power_supply *psy)
power_supply_remove_bat_triggers(psy);
else
power_supply_remove_gen_triggers(psy);
- return;
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index c07d4258d347..c7c4574729b1 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -176,8 +176,6 @@ void power_supply_remove_attrs(struct power_supply *psy)
for (i = 0; i < psy->num_properties; i++)
device_remove_file(psy->dev,
&power_supply_attrs[psy->properties[i]]);
-
- return;
}
static char *kstruprdup(const char *str, gfp_t gfp)
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index e251d1c1171c..746031de2195 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,3 +1,6 @@
obj-$(CONFIG_PS3_VUART) += vuart.o
-obj-$(CONFIG_PS3_PS3AV) += ps3av.o ps3av_cmd.o
+obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
+ps3av_mod-objs += ps3av.o ps3av_cmd.o
+obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
obj-$(CONFIG_PS3_SYS_MANAGER) += sys-manager.o
+obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 1393e64335f9..85e21614f868 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -1,32 +1,30 @@
/*
- * Copyright (C) 2006 Sony Computer Entertainment Inc.
- * Copyright 2006, 2007 Sony Corporation
+ * PS3 AV backend support.
*
- * AV backend support for PS3
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published
- * by the Free Software Foundation; version 2 of the License.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/kernel.h>
#include <linux/ioctl.h>
#include <asm/firmware.h>
-#include <asm/lv1call.h>
#include <asm/ps3av.h>
#include <asm/ps3.h>
@@ -39,13 +37,12 @@ static int timeout = 5000; /* in msec ( 5 sec ) */
module_param(timeout, int, 0644);
static struct ps3av {
- int available;
struct mutex mutex;
struct work_struct work;
struct completion done;
struct workqueue_struct *wq;
int open_count;
- struct ps3_vuart_port_device *dev;
+ struct ps3_system_bus_device *dev;
int region;
struct ps3av_pkt_av_get_hw_conf av_hw_conf;
@@ -55,11 +52,13 @@ static struct ps3av {
u32 audio_port;
int ps3av_mode;
int ps3av_mode_old;
-} ps3av;
-
-static struct ps3_vuart_port_device ps3av_dev = {
- .match_id = PS3_MATCH_ID_AV_SETTINGS
-};
+ union {
+ struct ps3av_reply_hdr reply_hdr;
+ u8 raw[PS3AV_BUF_SIZE];
+ } recv_buf;
+ void (*flip_ctl)(int on, void *data);
+ void *flip_data;
+} *ps3av;
/* color space */
#define YUV444 PS3AV_CMD_VIDEO_CS_YUV444_8
@@ -169,7 +168,7 @@ static int ps3av_parse_event_packet(const struct ps3av_reply_hdr *hdr)
if (hdr->cid & PS3AV_EVENT_CMD_MASK) {
table = ps3av_search_cmd_table(hdr->cid, PS3AV_EVENT_CMD_MASK);
if (table)
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"recv event packet cid:%08x port:0x%x size:%d\n",
hdr->cid, ps3av_event_get_port_id(hdr->cid),
hdr->size);
@@ -182,6 +181,41 @@ static int ps3av_parse_event_packet(const struct ps3av_reply_hdr *hdr)
return 0;
}
+
+#define POLLING_INTERVAL 25 /* in msec */
+
+static int ps3av_vuart_write(struct ps3_system_bus_device *dev,
+ const void *buf, unsigned long size)
+{
+ int error;
+ dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+ error = ps3_vuart_write(dev, buf, size);
+ dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
+ return error ? error : size;
+}
+
+static int ps3av_vuart_read(struct ps3_system_bus_device *dev, void *buf,
+ unsigned long size, int timeout)
+{
+ int error;
+ int loopcnt = 0;
+
+ dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+ timeout = (timeout + POLLING_INTERVAL - 1) / POLLING_INTERVAL;
+ while (loopcnt++ <= timeout) {
+ error = ps3_vuart_read(dev, buf, size);
+ if (!error)
+ return size;
+ if (error != -EAGAIN) {
+ printk(KERN_ERR "%s: ps3_vuart_read failed %d\n",
+ __func__, error);
+ return error;
+ }
+ msleep(POLLING_INTERVAL);
+ }
+ return -EWOULDBLOCK;
+}
+
static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
struct ps3av_reply_hdr *recv_buf, int write_len,
int read_len)
@@ -190,13 +224,13 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
u32 cmd;
int event;
- if (!ps3av.available)
+ if (!ps3av)
return -ENODEV;
/* send pkt */
- res = ps3av_vuart_write(ps3av.dev, send_buf, write_len);
+ res = ps3av_vuart_write(ps3av->dev, send_buf, write_len);
if (res < 0) {
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"%s: ps3av_vuart_write() failed (result=%d)\n",
__func__, res);
return res;
@@ -206,20 +240,20 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
cmd = send_buf->cid;
do {
/* read header */
- res = ps3av_vuart_read(ps3av.dev, recv_buf, PS3AV_HDR_SIZE,
+ res = ps3av_vuart_read(ps3av->dev, recv_buf, PS3AV_HDR_SIZE,
timeout);
if (res != PS3AV_HDR_SIZE) {
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"%s: ps3av_vuart_read() failed (result=%d)\n",
__func__, res);
return res;
}
/* read body */
- res = ps3av_vuart_read(ps3av.dev, &recv_buf->cid,
+ res = ps3av_vuart_read(ps3av->dev, &recv_buf->cid,
recv_buf->size, timeout);
if (res < 0) {
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"%s: ps3av_vuart_read() failed (result=%d)\n",
__func__, res);
return res;
@@ -230,7 +264,7 @@ static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf,
} while (event);
if ((cmd | PS3AV_REPLY_BIT) != recv_buf->cid) {
- dev_dbg(&ps3av_dev.core, "%s: reply err (result=%x)\n",
+ dev_dbg(&ps3av->dev->core, "%s: reply err (result=%x)\n",
__func__, recv_buf->cid);
return -EINVAL;
}
@@ -245,7 +279,7 @@ static int ps3av_process_reply_packet(struct ps3av_send_hdr *cmd_buf,
int return_len;
if (recv_buf->version != PS3AV_VERSION) {
- dev_dbg(&ps3av_dev.core, "reply_packet invalid version:%x\n",
+ dev_dbg(&ps3av->dev->core, "reply_packet invalid version:%x\n",
recv_buf->version);
return -EFAULT;
}
@@ -267,16 +301,11 @@ int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size,
struct ps3av_send_hdr *buf)
{
int res = 0;
- static union {
- struct ps3av_reply_hdr reply_hdr;
- u8 raw[PS3AV_BUF_SIZE];
- } recv_buf;
-
u32 *table;
- BUG_ON(!ps3av.available);
+ BUG_ON(!ps3av);
- mutex_lock(&ps3av.mutex);
+ mutex_lock(&ps3av->mutex);
table = ps3av_search_cmd_table(cid, PS3AV_CID_MASK);
BUG_ON(!table);
@@ -288,7 +317,7 @@ int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size,
ps3av_set_hdr(cid, send_len, buf);
/* send packet via vuart */
- res = ps3av_send_cmd_pkt(buf, &recv_buf.reply_hdr, send_len,
+ res = ps3av_send_cmd_pkt(buf, &ps3av->recv_buf.reply_hdr, send_len,
usr_buf_size);
if (res < 0) {
printk(KERN_ERR
@@ -298,7 +327,7 @@ int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size,
}
/* process reply packet */
- res = ps3av_process_reply_packet(buf, &recv_buf.reply_hdr,
+ res = ps3av_process_reply_packet(buf, &ps3av->recv_buf.reply_hdr,
usr_buf_size);
if (res < 0) {
printk(KERN_ERR "%s: put_return_status() failed (result=%d)\n",
@@ -306,11 +335,11 @@ int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size,
goto err;
}
- mutex_unlock(&ps3av.mutex);
+ mutex_unlock(&ps3av->mutex);
return 0;
err:
- mutex_unlock(&ps3av.mutex);
+ mutex_unlock(&ps3av->mutex);
printk(KERN_ERR "%s: failed cid:%x res:%d\n", __func__, cid, res);
return res;
}
@@ -319,11 +348,11 @@ static int ps3av_set_av_video_mute(u32 mute)
{
int i, num_of_av_port, res;
- num_of_av_port = ps3av.av_hw_conf.num_of_hdmi +
- ps3av.av_hw_conf.num_of_avmulti;
+ num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
+ ps3av->av_hw_conf.num_of_avmulti;
/* video mute on */
for (i = 0; i < num_of_av_port; i++) {
- res = ps3av_cmd_av_video_mute(1, &ps3av.av_port[i], mute);
+ res = ps3av_cmd_av_video_mute(1, &ps3av->av_port[i], mute);
if (res < 0)
return -1;
}
@@ -335,13 +364,13 @@ static int ps3av_set_video_disable_sig(void)
{
int i, num_of_hdmi_port, num_of_av_port, res;
- num_of_hdmi_port = ps3av.av_hw_conf.num_of_hdmi;
- num_of_av_port = ps3av.av_hw_conf.num_of_hdmi +
- ps3av.av_hw_conf.num_of_avmulti;
+ num_of_hdmi_port = ps3av->av_hw_conf.num_of_hdmi;
+ num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
+ ps3av->av_hw_conf.num_of_avmulti;
/* tv mute */
for (i = 0; i < num_of_hdmi_port; i++) {
- res = ps3av_cmd_av_tv_mute(ps3av.av_port[i],
+ res = ps3av_cmd_av_tv_mute(ps3av->av_port[i],
PS3AV_CMD_MUTE_ON);
if (res < 0)
return -1;
@@ -350,11 +379,11 @@ static int ps3av_set_video_disable_sig(void)
/* video mute on */
for (i = 0; i < num_of_av_port; i++) {
- res = ps3av_cmd_av_video_disable_sig(ps3av.av_port[i]);
+ res = ps3av_cmd_av_video_disable_sig(ps3av->av_port[i]);
if (res < 0)
return -1;
if (i < num_of_hdmi_port) {
- res = ps3av_cmd_av_tv_mute(ps3av.av_port[i],
+ res = ps3av_cmd_av_tv_mute(ps3av->av_port[i],
PS3AV_CMD_MUTE_OFF);
if (res < 0)
return -1;
@@ -369,17 +398,17 @@ static int ps3av_set_audio_mute(u32 mute)
{
int i, num_of_av_port, num_of_opt_port, res;
- num_of_av_port = ps3av.av_hw_conf.num_of_hdmi +
- ps3av.av_hw_conf.num_of_avmulti;
- num_of_opt_port = ps3av.av_hw_conf.num_of_spdif;
+ num_of_av_port = ps3av->av_hw_conf.num_of_hdmi +
+ ps3av->av_hw_conf.num_of_avmulti;
+ num_of_opt_port = ps3av->av_hw_conf.num_of_spdif;
for (i = 0; i < num_of_av_port; i++) {
- res = ps3av_cmd_av_audio_mute(1, &ps3av.av_port[i], mute);
+ res = ps3av_cmd_av_audio_mute(1, &ps3av->av_port[i], mute);
if (res < 0)
return -1;
}
for (i = 0; i < num_of_opt_port; i++) {
- res = ps3av_cmd_audio_mute(1, &ps3av.opt_port[i], mute);
+ res = ps3av_cmd_audio_mute(1, &ps3av->opt_port[i], mute);
if (res < 0)
return -1;
}
@@ -394,40 +423,40 @@ int ps3av_set_audio_mode(u32 ch, u32 fs, u32 word_bits, u32 format, u32 source)
struct ps3av_pkt_audio_mode audio_mode;
u32 len = 0;
- num_of_audio = ps3av.av_hw_conf.num_of_hdmi +
- ps3av.av_hw_conf.num_of_avmulti +
- ps3av.av_hw_conf.num_of_spdif;
+ num_of_audio = ps3av->av_hw_conf.num_of_hdmi +
+ ps3av->av_hw_conf.num_of_avmulti +
+ ps3av->av_hw_conf.num_of_spdif;
avb_param.num_of_video_pkt = 0;
avb_param.num_of_audio_pkt = PS3AV_AVB_NUM_AUDIO; /* always 0 */
avb_param.num_of_av_video_pkt = 0;
- avb_param.num_of_av_audio_pkt = ps3av.av_hw_conf.num_of_hdmi;
+ avb_param.num_of_av_audio_pkt = ps3av->av_hw_conf.num_of_hdmi;
- vid = video_mode_table[ps3av.ps3av_mode].vid;
+ vid = video_mode_table[ps3av->ps3av_mode].vid;
/* audio mute */
ps3av_set_audio_mute(PS3AV_CMD_MUTE_ON);
/* audio inactive */
- res = ps3av_cmd_audio_active(0, ps3av.audio_port);
+ res = ps3av_cmd_audio_active(0, ps3av->audio_port);
if (res < 0)
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"ps3av_cmd_audio_active OFF failed\n");
/* audio_pkt */
for (i = 0; i < num_of_audio; i++) {
- ps3av_cmd_set_audio_mode(&audio_mode, ps3av.av_port[i], ch, fs,
- word_bits, format, source);
- if (i < ps3av.av_hw_conf.num_of_hdmi) {
+ ps3av_cmd_set_audio_mode(&audio_mode, ps3av->av_port[i], ch,
+ fs, word_bits, format, source);
+ if (i < ps3av->av_hw_conf.num_of_hdmi) {
/* hdmi only */
len += ps3av_cmd_set_av_audio_param(&avb_param.buf[len],
- ps3av.av_port[i],
+ ps3av->av_port[i],
&audio_mode, vid);
}
/* audio_mode pkt should be sent separately */
res = ps3av_cmd_audio_mode(&audio_mode);
if (res < 0)
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"ps3av_cmd_audio_mode failed, port:%x\n", i);
}
@@ -435,15 +464,16 @@ int ps3av_set_audio_mode(u32 ch, u32 fs, u32 word_bits, u32 format, u32 source)
len += offsetof(struct ps3av_pkt_avb_param, buf);
res = ps3av_cmd_avb_param(&avb_param, len);
if (res < 0)
- dev_dbg(&ps3av_dev.core, "ps3av_cmd_avb_param failed\n");
+ dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
/* audio mute */
ps3av_set_audio_mute(PS3AV_CMD_MUTE_OFF);
/* audio active */
- res = ps3av_cmd_audio_active(1, ps3av.audio_port);
+ res = ps3av_cmd_audio_active(1, ps3av->audio_port);
if (res < 0)
- dev_dbg(&ps3av_dev.core, "ps3av_cmd_audio_active ON failed\n");
+ dev_dbg(&ps3av->dev->core,
+ "ps3av_cmd_audio_active ON failed\n");
return 0;
}
@@ -456,7 +486,7 @@ static int ps3av_set_videomode(void)
ps3av_set_av_video_mute(PS3AV_CMD_MUTE_ON);
/* wake up ps3avd to do the actual video mode setting */
- queue_work(ps3av.wq, &ps3av.work);
+ queue_work(ps3av->wq, &ps3av->work);
return 0;
}
@@ -473,8 +503,8 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
avb_param.num_of_video_pkt = PS3AV_AVB_NUM_VIDEO; /* num of head */
avb_param.num_of_audio_pkt = 0;
- avb_param.num_of_av_video_pkt = ps3av.av_hw_conf.num_of_hdmi +
- ps3av.av_hw_conf.num_of_avmulti;
+ avb_param.num_of_av_video_pkt = ps3av->av_hw_conf.num_of_hdmi +
+ ps3av->av_hw_conf.num_of_avmulti;
avb_param.num_of_av_audio_pkt = 0;
/* video signal off */
@@ -484,21 +514,21 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
if (id & PS3AV_MODE_HDCP_OFF) {
res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_HDCP_OFF);
if (res == PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
- dev_dbg(&ps3av_dev.core, "Not supported\n");
+ dev_dbg(&ps3av->dev->core, "Not supported\n");
else if (res)
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"ps3av_cmd_av_hdmi_mode failed\n");
} else if (old_id & PS3AV_MODE_HDCP_OFF) {
res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_MODE_NORMAL);
if (res < 0 && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE)
- dev_dbg(&ps3av_dev.core,
+ dev_dbg(&ps3av->dev->core,
"ps3av_cmd_av_hdmi_mode failed\n");
}
/* video_pkt */
for (i = 0; i < avb_param.num_of_video_pkt; i++)
len += ps3av_cmd_set_video_mode(&avb_param.buf[len],
- ps3av.head[i], video_mode->vid,
+ ps3av->head[i], video_mode->vid,
video_mode->fmt, id);
/* av_video_pkt */
for (i = 0; i < avb_param.num_of_av_video_pkt; i++) {
@@ -507,12 +537,12 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
else
av_video_cs = video_mode->cs;
#ifndef PS3AV_HDMI_YUV
- if (ps3av.av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 ||
- ps3av.av_port[i] == PS3AV_CMD_AVPORT_HDMI_1)
+ if (ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 ||
+ ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_1)
av_video_cs = RGB8; /* use RGB for HDMI */
#endif
len += ps3av_cmd_set_av_video_cs(&avb_param.buf[len],
- ps3av.av_port[i],
+ ps3av->av_port[i],
video_mode->vid, av_video_cs,
video_mode->aspect, id);
}
@@ -524,7 +554,7 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
"%s: Command failed. Please try your request again. \n",
__func__);
else if (res)
- dev_dbg(&ps3av_dev.core, "ps3av_cmd_avb_param failed\n");
+ dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n");
msleep(1500);
/* av video mute */
@@ -533,8 +563,8 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
static void ps3avd(struct work_struct *work)
{
- ps3av_set_videomode_cont(ps3av.ps3av_mode, ps3av.ps3av_mode_old);
- complete(&ps3av.done);
+ ps3av_set_videomode_cont(ps3av->ps3av_mode, ps3av->ps3av_mode_old);
+ complete(&ps3av->done);
}
static int ps3av_vid2table_id(int vid)
@@ -601,7 +631,7 @@ static int ps3av_hdmi_get_vid(struct ps3av_info_monitor *info)
return vid;
}
- if (ps3av.region & PS3AV_REGION_60)
+ if (ps3av->region & PS3AV_REGION_60)
vid = PS3AV_DEFAULT_HDMI_VID_REG_60;
else
vid = PS3AV_DEFAULT_HDMI_VID_REG_50;
@@ -643,16 +673,16 @@ static int ps3av_auto_videomode(struct ps3av_pkt_av_get_hw_conf *av_hw_conf,
vid = PS3AV_DEFAULT_DVI_VID;
} else if (vid == -1) {
/* no HDMI interface or HDMI is off */
- if (ps3av.region & PS3AV_REGION_60)
+ if (ps3av->region & PS3AV_REGION_60)
vid = PS3AV_DEFAULT_AVMULTI_VID_REG_60;
else
vid = PS3AV_DEFAULT_AVMULTI_VID_REG_50;
- if (ps3av.region & PS3AV_REGION_RGB)
+ if (ps3av->region & PS3AV_REGION_RGB)
rgb = PS3AV_MODE_RGB;
} else if (boot) {
/* HDMI: using DEFAULT HDMI_VID while booting up */
info = &monitor_info.info;
- if (ps3av.region & PS3AV_REGION_60) {
+ if (ps3av->region & PS3AV_REGION_60) {
if (info->res_60.res_bits & PS3AV_RESBIT_720x480P)
vid = PS3AV_DEFAULT_HDMI_VID_REG_60;
else if (info->res_50.res_bits & PS3AV_RESBIT_720x576P)
@@ -715,14 +745,14 @@ int ps3av_set_video_mode(u32 id, int boot)
size = ARRAY_SIZE(video_mode_table);
if ((id & PS3AV_MODE_MASK) > size - 1 || id < 0) {
- dev_dbg(&ps3av_dev.core, "%s: error id :%d\n", __func__, id);
+ dev_dbg(&ps3av->dev->core, "%s: error id :%d\n", __func__, id);
return -EINVAL;
}
/* auto mode */
option = id & ~PS3AV_MODE_MASK;
if ((id & PS3AV_MODE_MASK) == 0) {
- id = ps3av_auto_videomode(&ps3av.av_hw_conf, boot);
+ id = ps3av_auto_videomode(&ps3av->av_hw_conf, boot);
if (id < 1) {
printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
return -EINVAL;
@@ -731,11 +761,11 @@ int ps3av_set_video_mode(u32 id, int boot)
}
/* set videomode */
- wait_for_completion(&ps3av.done);
- ps3av.ps3av_mode_old = ps3av.ps3av_mode;
- ps3av.ps3av_mode = id;
+ wait_for_completion(&ps3av->done);
+ ps3av->ps3av_mode_old = ps3av->ps3av_mode;
+ ps3av->ps3av_mode = id;
if (ps3av_set_videomode())
- ps3av.ps3av_mode = ps3av.ps3av_mode_old;
+ ps3av->ps3av_mode = ps3av->ps3av_mode_old;
return 0;
}
@@ -744,7 +774,7 @@ EXPORT_SYMBOL_GPL(ps3av_set_video_mode);
int ps3av_get_auto_mode(int boot)
{
- return ps3av_auto_videomode(&ps3av.av_hw_conf, boot);
+ return ps3av_auto_videomode(&ps3av->av_hw_conf, boot);
}
EXPORT_SYMBOL_GPL(ps3av_get_auto_mode);
@@ -772,7 +802,7 @@ EXPORT_SYMBOL_GPL(ps3av_set_mode);
int ps3av_get_mode(void)
{
- return ps3av.ps3av_mode;
+ return ps3av ? ps3av->ps3av_mode : 0;
}
EXPORT_SYMBOL_GPL(ps3av_get_mode);
@@ -842,82 +872,65 @@ int ps3av_audio_mute(int mute)
EXPORT_SYMBOL_GPL(ps3av_audio_mute);
-int ps3av_dev_open(void)
+void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
+ void *flip_data)
{
- int status = 0;
-
- mutex_lock(&ps3av.mutex);
- if (!ps3av.open_count++) {
- status = lv1_gpu_open(0);
- if (status) {
- printk(KERN_ERR "%s: lv1_gpu_open failed %d\n",
- __func__, status);
- ps3av.open_count--;
- }
- }
- mutex_unlock(&ps3av.mutex);
-
- return status;
+ mutex_lock(&ps3av->mutex);
+ ps3av->flip_ctl = flip_ctl;
+ ps3av->flip_data = flip_data;
+ mutex_unlock(&ps3av->mutex);
}
+EXPORT_SYMBOL_GPL(ps3av_register_flip_ctl);
-EXPORT_SYMBOL_GPL(ps3av_dev_open);
-
-int ps3av_dev_close(void)
+void ps3av_flip_ctl(int on)
{
- int status = 0;
-
- mutex_lock(&ps3av.mutex);
- if (ps3av.open_count <= 0) {
- printk(KERN_ERR "%s: GPU already closed\n", __func__);
- status = -1;
- } else if (!--ps3av.open_count) {
- status = lv1_gpu_close();
- if (status)
- printk(KERN_WARNING "%s: lv1_gpu_close failed %d\n",
- __func__, status);
- }
- mutex_unlock(&ps3av.mutex);
-
- return status;
+ mutex_lock(&ps3av->mutex);
+ if (ps3av->flip_ctl)
+ ps3av->flip_ctl(on, ps3av->flip_data);
+ mutex_unlock(&ps3av->mutex);
}
-EXPORT_SYMBOL_GPL(ps3av_dev_close);
-
-static int ps3av_probe(struct ps3_vuart_port_device *dev)
+static int ps3av_probe(struct ps3_system_bus_device *dev)
{
int res;
u32 id;
- dev_dbg(&ps3av_dev.core, "init ...\n");
- dev_dbg(&ps3av_dev.core, " timeout=%d\n", timeout);
+ dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+ dev_dbg(&dev->core, " timeout=%d\n", timeout);
- memset(&ps3av, 0, sizeof(ps3av));
-
- mutex_init(&ps3av.mutex);
- ps3av.ps3av_mode = 0;
- ps3av.dev = dev;
+ if (ps3av) {
+ dev_err(&dev->core, "Only one ps3av device is supported\n");
+ return -EBUSY;
+ }
- INIT_WORK(&ps3av.work, ps3avd);
- init_completion(&ps3av.done);
- complete(&ps3av.done);
- ps3av.wq = create_singlethread_workqueue("ps3avd");
- if (!ps3av.wq)
+ ps3av = kzalloc(sizeof(*ps3av), GFP_KERNEL);
+ if (!ps3av)
return -ENOMEM;
- ps3av.available = 1;
+ mutex_init(&ps3av->mutex);
+ ps3av->ps3av_mode = 0;
+ ps3av->dev = dev;
+
+ INIT_WORK(&ps3av->work, ps3avd);
+ init_completion(&ps3av->done);
+ complete(&ps3av->done);
+ ps3av->wq = create_singlethread_workqueue("ps3avd");
+ if (!ps3av->wq)
+ goto fail;
+
switch (ps3_os_area_get_av_multi_out()) {
case PS3_PARAM_AV_MULTI_OUT_NTSC:
- ps3av.region = PS3AV_REGION_60;
+ ps3av->region = PS3AV_REGION_60;
break;
case PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR:
case PS3_PARAM_AV_MULTI_OUT_SECAM:
- ps3av.region = PS3AV_REGION_50;
+ ps3av->region = PS3AV_REGION_50;
break;
case PS3_PARAM_AV_MULTI_OUT_PAL_RGB:
- ps3av.region = PS3AV_REGION_50 | PS3AV_REGION_RGB;
+ ps3av->region = PS3AV_REGION_50 | PS3AV_REGION_RGB;
break;
default:
- ps3av.region = PS3AV_REGION_60;
+ ps3av->region = PS3AV_REGION_60;
break;
}
@@ -927,39 +940,47 @@ static int ps3av_probe(struct ps3_vuart_port_device *dev)
printk(KERN_ERR "%s: ps3av_cmd_init failed %d\n", __func__,
res);
- ps3av_get_hw_conf(&ps3av);
- id = ps3av_auto_videomode(&ps3av.av_hw_conf, 1);
- mutex_lock(&ps3av.mutex);
- ps3av.ps3av_mode = id;
- mutex_unlock(&ps3av.mutex);
+ ps3av_get_hw_conf(ps3av);
+ id = ps3av_auto_videomode(&ps3av->av_hw_conf, 1);
+ mutex_lock(&ps3av->mutex);
+ ps3av->ps3av_mode = id;
+ mutex_unlock(&ps3av->mutex);
- dev_dbg(&ps3av_dev.core, "init...done\n");
+ dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
return 0;
+
+fail:
+ kfree(ps3av);
+ ps3av = NULL;
+ return -ENOMEM;
}
-static int ps3av_remove(struct ps3_vuart_port_device *dev)
+static int ps3av_remove(struct ps3_system_bus_device *dev)
{
- if (ps3av.available) {
+ dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
+ if (ps3av) {
ps3av_cmd_fin();
- if (ps3av.wq)
- destroy_workqueue(ps3av.wq);
- ps3av.available = 0;
+ if (ps3av->wq)
+ destroy_workqueue(ps3av->wq);
+ kfree(ps3av);
+ ps3av = NULL;
}
+ dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
return 0;
}
-static void ps3av_shutdown(struct ps3_vuart_port_device *dev)
+static void ps3av_shutdown(struct ps3_system_bus_device *dev)
{
+ dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
ps3av_remove(dev);
+ dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
}
static struct ps3_vuart_port_driver ps3av_driver = {
- .match_id = PS3_MATCH_ID_AV_SETTINGS,
- .core = {
- .name = "ps3_av",
- },
+ .core.match_id = PS3_MATCH_ID_AV_SETTINGS,
+ .core.core.name = "ps3_av",
.probe = ps3av_probe,
.remove = ps3av_remove,
.shutdown = ps3av_shutdown,
@@ -972,6 +993,8 @@ static int ps3av_module_init(void)
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
error = ps3_vuart_port_driver_register(&ps3av_driver);
if (error) {
printk(KERN_ERR
@@ -980,20 +1003,21 @@ static int ps3av_module_init(void)
return error;
}
- error = ps3_vuart_port_device_register(&ps3av_dev);
- if (error)
- printk(KERN_ERR
- "%s: ps3_vuart_port_device_register failed %d\n",
- __func__, error);
-
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
return error;
}
static void __exit ps3av_module_exit(void)
{
- device_unregister(&ps3av_dev.core);
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
ps3_vuart_port_driver_unregister(&ps3av_driver);
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
subsys_initcall(ps3av_module_init);
module_exit(ps3av_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PS3 AV Settings Driver");
+MODULE_AUTHOR("Sony Computer Entertainment Inc.");
+MODULE_ALIAS(PS3_MODULE_ALIAS_AV_SETTINGS);
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index 0145ea173c42..f72f5ddf18e4 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -143,6 +143,14 @@ static u32 ps3av_vid_video2av(int vid)
return PS3AV_CMD_AV_VID_480P;
}
+static int ps3av_hdmi_range(void)
+{
+ if (ps3_compare_firmware_version(1, 8, 0) < 0)
+ return 0;
+ else
+ return 1; /* supported */
+}
+
int ps3av_cmd_init(void)
{
int res;
@@ -350,6 +358,10 @@ u32 ps3av_cmd_set_av_video_cs(void *p, u32 avport, int video_vid, int cs_out,
/* should be same as video_mode.video_cs_out */
av_video_cs->av_cs_in = ps3av_cs_video2av(PS3AV_CMD_VIDEO_CS_RGB_8);
av_video_cs->bitlen_out = ps3av_cs_video2av_bitlen(cs_out);
+ if ((id & PS3AV_MODE_WHITE) && ps3av_hdmi_range())
+ av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_ON;
+ else /* default off */
+ av_video_cs->super_white = PS3AV_CMD_AV_SUPER_WHITE_OFF;
av_video_cs->aspect = aspect;
if (id & PS3AV_MODE_DITHER) {
av_video_cs->dither = PS3AV_CMD_AV_DITHER_ON
@@ -392,6 +404,10 @@ u32 ps3av_cmd_set_video_mode(void *p, u32 head, int video_vid, int video_fmt,
video_mode->pitch = video_mode->width * 4; /* line_length */
video_mode->video_out_format = PS3AV_CMD_VIDEO_OUT_FORMAT_RGB_12BIT;
video_mode->video_format = ps3av_video_fmt_table[video_fmt].format;
+ if ((id & PS3AV_MODE_COLOR) && ps3av_hdmi_range())
+ video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_DISABLE_LUT;
+ else /* default enable */
+ video_mode->video_cl_cnv = PS3AV_CMD_VIDEO_CL_CNV_ENABLE_LUT;
video_mode->video_order = ps3av_video_fmt_table[video_fmt].order;
pr_debug("%s: video_mode:vid:%x width:%d height:%d pitch:%d out_format:%d format:%x order:%x\n",
@@ -852,7 +868,7 @@ int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len)
{
int res;
- ps3fb_flip_ctl(0); /* flip off */
+ ps3av_flip_ctl(0); /* flip off */
/* avb packet */
res = ps3av_do_pkt(PS3AV_CID_AVB_PARAM, send_len, sizeof(*avb),
@@ -866,7 +882,7 @@ int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len)
res);
out:
- ps3fb_flip_ctl(1); /* flip on */
+ ps3av_flip_ctl(1); /* flip on */
return res;
}
@@ -987,34 +1003,3 @@ void ps3av_cmd_av_monitor_info_dump(const struct ps3av_pkt_av_get_monitor_info *
| PS3AV_CMD_AV_LAYOUT_176 \
| PS3AV_CMD_AV_LAYOUT_192)
-/************************* vuart ***************************/
-
-#define POLLING_INTERVAL 25 /* in msec */
-
-int ps3av_vuart_write(struct ps3_vuart_port_device *dev, const void *buf,
- unsigned long size)
-{
- int error = ps3_vuart_write(dev, buf, size);
- return error ? error : size;
-}
-
-int ps3av_vuart_read(struct ps3_vuart_port_device *dev, void *buf,
- unsigned long size, int timeout)
-{
- int error;
- int loopcnt = 0;
-
- timeout = (timeout + POLLING_INTERVAL - 1) / POLLING_INTERVAL;
- while (loopcnt++ <= timeout) {
- error = ps3_vuart_read(dev, buf, size);
- if (!error)
- return size;
- if (error != -EAGAIN) {
- printk(KERN_ERR "%s: ps3_vuart_read failed %d\n",
- __func__, error);
- return error;
- }
- msleep(POLLING_INTERVAL);
- }
- return -EWOULDBLOCK;
-}
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
new file mode 100644
index 000000000000..3a9824e3b251
--- /dev/null
+++ b/drivers/ps3/ps3stor_lib.c
@@ -0,0 +1,302 @@
+/*
+ * PS3 Storage Library
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+static int ps3stor_probe_access(struct ps3_storage_device *dev)
+{
+ int res, error;
+ unsigned int i;
+ unsigned long n;
+
+ if (dev->sbd.match_id == PS3_MATCH_ID_STOR_ROM) {
+ /* special case: CD-ROM is assumed always accessible */
+ dev->accessible_regions = 1;
+ return 0;
+ }
+
+ error = -EPERM;
+ for (i = 0; i < dev->num_regions; i++) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: checking accessibility of region %u\n",
+ __func__, __LINE__, i);
+
+ dev->region_idx = i;
+ res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, 0, 1,
+ 0);
+ if (res) {
+ dev_dbg(&dev->sbd.core, "%s:%u: read failed, "
+ "region %u is not accessible\n", __func__,
+ __LINE__, i);
+ continue;
+ }
+
+ dev_dbg(&dev->sbd.core, "%s:%u: region %u is accessible\n",
+ __func__, __LINE__, i);
+ set_bit(i, &dev->accessible_regions);
+
+ /* We can access at least one region */
+ error = 0;
+ }
+ if (error)
+ return error;
+
+ n = hweight_long(dev->accessible_regions);
+ if (n > 1)
+ dev_info(&dev->sbd.core,
+ "%s:%u: %lu accessible regions found. Only the first "
+ "one will be used",
+ __func__, __LINE__, n);
+ dev->region_idx = __ffs(dev->accessible_regions);
+ dev_info(&dev->sbd.core,
+ "First accessible region has index %u start %lu size %lu\n",
+ dev->region_idx, dev->regions[dev->region_idx].start,
+ dev->regions[dev->region_idx].size);
+
+ return 0;
+}
+
+
+/**
+ * ps3stor_setup - Setup a storage device before use
+ * @dev: Pointer to a struct ps3_storage_device
+ * @handler: Pointer to an interrupt handler
+ *
+ * Returns 0 for success, or an error code
+ */
+int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
+{
+ int error, res, alignment;
+ enum ps3_dma_page_size page_size;
+
+ error = ps3_open_hv_device(&dev->sbd);
+ if (error) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: ps3_open_hv_device failed %d\n", __func__,
+ __LINE__, error);
+ goto fail;
+ }
+
+ error = ps3_sb_event_receive_port_setup(&dev->sbd, PS3_BINDING_CPU_ANY,
+ &dev->irq);
+ if (error) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
+ __func__, __LINE__, error);
+ goto fail_close_device;
+ }
+
+ error = request_irq(dev->irq, handler, IRQF_DISABLED,
+ dev->sbd.core.driver->name, dev);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
+ __func__, __LINE__, error);
+ goto fail_sb_event_receive_port_destroy;
+ }
+
+ alignment = min(__ffs(dev->bounce_size),
+ __ffs((unsigned long)dev->bounce_buf));
+ if (alignment < 12) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: bounce buffer not aligned (%lx at 0x%p)\n",
+ __func__, __LINE__, dev->bounce_size, dev->bounce_buf);
+ error = -EINVAL;
+ goto fail_free_irq;
+ } else if (alignment < 16)
+ page_size = PS3_DMA_4K;
+ else
+ page_size = PS3_DMA_64K;
+ dev->sbd.d_region = &dev->dma_region;
+ ps3_dma_region_init(&dev->sbd, &dev->dma_region, page_size,
+ PS3_DMA_OTHER, dev->bounce_buf, dev->bounce_size);
+ res = ps3_dma_region_create(&dev->dma_region);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: cannot create DMA region\n",
+ __func__, __LINE__);
+ error = -ENOMEM;
+ goto fail_free_irq;
+ }
+
+ dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
+ dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
+ dev->bounce_size, DMA_BIDIRECTIONAL);
+ if (!dev->bounce_dma) {
+ dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
+ __func__, __LINE__);
+ error = -ENODEV;
+ goto fail_free_dma;
+ }
+
+ error = ps3stor_probe_access(dev);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: No accessible regions found\n",
+ __func__, __LINE__);
+ goto fail_unmap_dma;
+ }
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
+ DMA_BIDIRECTIONAL);
+fail_free_dma:
+ ps3_dma_region_free(&dev->dma_region);
+fail_free_irq:
+ free_irq(dev->irq, dev);
+fail_sb_event_receive_port_destroy:
+ ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
+fail_close_device:
+ ps3_close_hv_device(&dev->sbd);
+fail:
+ return error;
+}
+EXPORT_SYMBOL_GPL(ps3stor_setup);
+
+
+/**
+ * ps3stor_teardown - Tear down a storage device after use
+ * @dev: Pointer to a struct ps3_storage_device
+ */
+void ps3stor_teardown(struct ps3_storage_device *dev)
+{
+ int error;
+
+ dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
+ DMA_BIDIRECTIONAL);
+ ps3_dma_region_free(&dev->dma_region);
+
+ free_irq(dev->irq, dev);
+
+ error = ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
+ if (error)
+ dev_err(&dev->sbd.core,
+ "%s:%u: destroy event receive port failed %d\n",
+ __func__, __LINE__, error);
+
+ error = ps3_close_hv_device(&dev->sbd);
+ if (error)
+ dev_err(&dev->sbd.core,
+ "%s:%u: ps3_close_hv_device failed %d\n", __func__,
+ __LINE__, error);
+}
+EXPORT_SYMBOL_GPL(ps3stor_teardown);
+
+
+/**
+ * ps3stor_read_write_sectors - read/write from/to a storage device
+ * @dev: Pointer to a struct ps3_storage_device
+ * @lpar: HV logical partition address
+ * @start_sector: First sector to read/write
+ * @sectors: Number of sectors to read/write
+ * @write: Flag indicating write (non-zero) or read (zero)
+ *
+ * Returns 0 for success, -1 in case of failure to submit the command, or
+ * an LV1 status value in case of other errors
+ */
+u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
+ u64 start_sector, u64 sectors, int write)
+{
+ unsigned int region_id = dev->regions[dev->region_idx].id;
+ const char *op = write ? "write" : "read";
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n",
+ __func__, __LINE__, op, sectors, start_sector);
+
+ init_completion(&dev->done);
+ res = write ? lv1_storage_write(dev->sbd.dev_id, region_id,
+ start_sector, sectors, 0, lpar,
+ &dev->tag)
+ : lv1_storage_read(dev->sbd.dev_id, region_id,
+ start_sector, sectors, 0, lpar,
+ &dev->tag);
+ if (res) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
+ __LINE__, op, res);
+ return -1;
+ }
+
+ wait_for_completion(&dev->done);
+ if (dev->lv1_status) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
+ __LINE__, op, dev->lv1_status);
+ return dev->lv1_status;
+ }
+
+ dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, __LINE__,
+ op);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ps3stor_read_write_sectors);
+
+
+/**
+ * ps3stor_send_command - send a device command to a storage device
+ * @dev: Pointer to a struct ps3_storage_device
+ * @cmd: Command number
+ * @arg1: First command argument
+ * @arg2: Second command argument
+ * @arg3: Third command argument
+ * @arg4: Fourth command argument
+ *
+ * Returns 0 for success, -1 in case of failure to submit the command, or
+ * an LV1 status value in case of other errors
+ */
+u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd, u64 arg1,
+ u64 arg2, u64 arg3, u64 arg4)
+{
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%lx\n", __func__,
+ __LINE__, cmd);
+
+ init_completion(&dev->done);
+
+ res = lv1_storage_send_device_command(dev->sbd.dev_id, cmd, arg1,
+ arg2, arg3, arg4, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: send_device_command 0x%lx failed %d\n",
+ __func__, __LINE__, cmd, res);
+ return -1;
+ }
+
+ wait_for_completion(&dev->done);
+ if (dev->lv1_status) {
+ dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx failed 0x%lx\n",
+ __func__, __LINE__, cmd, dev->lv1_status);
+ return dev->lv1_status;
+ }
+
+ dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx completed\n", __func__,
+ __LINE__, cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ps3stor_send_command);
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 Storage Bus Library");
+MODULE_AUTHOR("Sony Corporation");
diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c
new file mode 100644
index 000000000000..31648f7d9ae1
--- /dev/null
+++ b/drivers/ps3/sys-manager-core.c
@@ -0,0 +1,68 @@
+/*
+ * PS3 System Manager core.
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <asm/ps3.h>
+
+/**
+ * Staticly linked routines that allow late binding of a loaded sys-manager
+ * module.
+ */
+
+static struct ps3_sys_manager_ops ps3_sys_manager_ops;
+
+/**
+ * ps3_register_sys_manager_ops - Bind ps3_sys_manager_ops to a module.
+ * @ops: struct ps3_sys_manager_ops.
+ *
+ * To be called from ps3_sys_manager_probe() and ps3_sys_manager_remove() to
+ * register call back ops for power control. Copies data to the static
+ * variable ps3_sys_manager_ops.
+ */
+
+void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
+{
+ BUG_ON(!ops);
+ BUG_ON(!ops->dev);
+ ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops;
+}
+EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
+
+void ps3_sys_manager_power_off(void)
+{
+ if (ps3_sys_manager_ops.power_off)
+ ps3_sys_manager_ops.power_off(ps3_sys_manager_ops.dev);
+
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1)
+ (void)0;
+}
+
+void ps3_sys_manager_restart(void)
+{
+ if (ps3_sys_manager_ops.restart)
+ ps3_sys_manager_ops.restart(ps3_sys_manager_ops.dev);
+
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1)
+ (void)0;
+}
diff --git a/drivers/ps3/sys-manager.c b/drivers/ps3/sys-manager.c
index 3aa2b0dcc369..8461b08ab9fb 100644
--- a/drivers/ps3/sys-manager.c
+++ b/drivers/ps3/sys-manager.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("PS3 System Manager");
/**
* ps3_sys_manager - PS3 system manager driver.
*
- * The system manager provides an asyncronous system event notification
+ * The system manager provides an asynchronous system event notification
* mechanism for reporting events like thermal alert and button presses to
* guests. It also provides support to control system shutdown and startup.
*
@@ -52,6 +52,7 @@ MODULE_DESCRIPTION("PS3 System Manager");
* @size: Header size in bytes, curently 16.
* @payload_size: Message payload size in bytes.
* @service_id: Message type, one of enum ps3_sys_manager_service_id.
+ * @request_tag: Unique number to identify reply.
*/
struct ps3_sys_manager_header {
@@ -61,29 +62,49 @@ struct ps3_sys_manager_header {
u16 reserved_1;
u32 payload_size;
u16 service_id;
- u16 reserved_2[3];
+ u16 reserved_2;
+ u32 request_tag;
};
+#define dump_sm_header(_h) _dump_sm_header(_h, __func__, __LINE__)
+static void __maybe_unused _dump_sm_header(
+ const struct ps3_sys_manager_header *h, const char *func, int line)
+{
+ pr_debug("%s:%d: version: %xh\n", func, line, h->version);
+ pr_debug("%s:%d: size: %xh\n", func, line, h->size);
+ pr_debug("%s:%d: payload_size: %xh\n", func, line, h->payload_size);
+ pr_debug("%s:%d: service_id: %xh\n", func, line, h->service_id);
+ pr_debug("%s:%d: request_tag: %xh\n", func, line, h->request_tag);
+}
+
/**
- * @PS3_SM_RX_MSG_LEN - System manager received message length.
+ * @PS3_SM_RX_MSG_LEN_MIN - Shortest received message length.
+ * @PS3_SM_RX_MSG_LEN_MAX - Longest received message length.
*
- * Currently all messages received from the system manager are the same length
- * (16 bytes header + 16 bytes payload = 32 bytes). This knowlege is used to
- * simplify the logic.
+ * Currently all messages received from the system manager are either
+ * (16 bytes header + 8 bytes payload = 24 bytes) or (16 bytes header
+ * + 16 bytes payload = 32 bytes). This knowlege is used to simplify
+ * the logic.
*/
enum {
- PS3_SM_RX_MSG_LEN = 32,
+ PS3_SM_RX_MSG_LEN_MIN = 24,
+ PS3_SM_RX_MSG_LEN_MAX = 32,
};
/**
* enum ps3_sys_manager_service_id - Message header service_id.
- * @PS3_SM_SERVICE_ID_REQUEST: guest --> sys_manager.
- * @PS3_SM_SERVICE_ID_COMMAND: guest <-- sys_manager.
- * @PS3_SM_SERVICE_ID_RESPONSE: guest --> sys_manager.
- * @PS3_SM_SERVICE_ID_SET_ATTR: guest --> sys_manager.
- * @PS3_SM_SERVICE_ID_EXTERN_EVENT: guest <-- sys_manager.
- * @PS3_SM_SERVICE_ID_SET_NEXT_OP: guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_REQUEST: guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_REQUEST_ERROR: guest <-- sys_manager.
+ * @PS3_SM_SERVICE_ID_COMMAND: guest <-- sys_manager.
+ * @PS3_SM_SERVICE_ID_RESPONSE: guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_SET_ATTR: guest --> sys_manager.
+ * @PS3_SM_SERVICE_ID_EXTERN_EVENT: guest <-- sys_manager.
+ * @PS3_SM_SERVICE_ID_SET_NEXT_OP: guest --> sys_manager.
+ *
+ * PS3_SM_SERVICE_ID_REQUEST_ERROR is returned for invalid data values in a
+ * a PS3_SM_SERVICE_ID_REQUEST message. It also seems to be returned when
+ * a REQUEST message is sent at the wrong time.
*/
enum ps3_sys_manager_service_id {
@@ -93,6 +114,7 @@ enum ps3_sys_manager_service_id {
PS3_SM_SERVICE_ID_COMMAND = 3,
PS3_SM_SERVICE_ID_EXTERN_EVENT = 4,
PS3_SM_SERVICE_ID_SET_NEXT_OP = 5,
+ PS3_SM_SERVICE_ID_REQUEST_ERROR = 6,
PS3_SM_SERVICE_ID_SET_ATTR = 8,
};
@@ -185,11 +207,21 @@ enum ps3_sys_manager_cmd {
};
/**
+ * ps3_sm_force_power_off - Poweroff helper.
+ *
+ * A global variable used to force a poweroff when the power button has
+ * been pressed irrespective of how init handles the ctrl_alt_del signal.
+ *
+ */
+
+static unsigned int ps3_sm_force_power_off;
+
+/**
* ps3_sys_manager_write - Helper to write a two part message to the vuart.
*
*/
-static int ps3_sys_manager_write(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_write(struct ps3_system_bus_device *dev,
const struct ps3_sys_manager_header *header, const void *payload)
{
int result;
@@ -213,15 +245,10 @@ static int ps3_sys_manager_write(struct ps3_vuart_port_device *dev,
*
*/
-static int ps3_sys_manager_send_attr(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_send_attr(struct ps3_system_bus_device *dev,
enum ps3_sys_manager_attr attr)
{
- static const struct ps3_sys_manager_header header = {
- .version = 1,
- .size = 16,
- .payload_size = 16,
- .service_id = PS3_SM_SERVICE_ID_SET_ATTR,
- };
+ struct ps3_sys_manager_header header;
struct {
u8 version;
u8 reserved_1[3];
@@ -232,6 +259,12 @@ static int ps3_sys_manager_send_attr(struct ps3_vuart_port_device *dev,
dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, attr);
+ memset(&header, 0, sizeof(header));
+ header.version = 1;
+ header.size = 16;
+ header.payload_size = 16;
+ header.service_id = PS3_SM_SERVICE_ID_SET_ATTR;
+
memset(&payload, 0, sizeof(payload));
payload.version = 1;
payload.attribute = attr;
@@ -245,16 +278,11 @@ static int ps3_sys_manager_send_attr(struct ps3_vuart_port_device *dev,
* Tell the system manager what to do after this lpar is destroyed.
*/
-static int ps3_sys_manager_send_next_op(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_send_next_op(struct ps3_system_bus_device *dev,
enum ps3_sys_manager_next_op op,
enum ps3_sys_manager_wake_source wake_source)
{
- static const struct ps3_sys_manager_header header = {
- .version = 1,
- .size = 16,
- .payload_size = 16,
- .service_id = PS3_SM_SERVICE_ID_SET_NEXT_OP,
- };
+ struct ps3_sys_manager_header header;
struct {
u8 version;
u8 type;
@@ -268,6 +296,12 @@ static int ps3_sys_manager_send_next_op(struct ps3_vuart_port_device *dev,
dev_dbg(&dev->core, "%s:%d: (%xh)\n", __func__, __LINE__, op);
+ memset(&header, 0, sizeof(header));
+ header.version = 1;
+ header.size = 16;
+ header.payload_size = 16;
+ header.service_id = PS3_SM_SERVICE_ID_SET_NEXT_OP;
+
memset(&payload, 0, sizeof(payload));
payload.version = 3;
payload.type = op;
@@ -286,32 +320,35 @@ static int ps3_sys_manager_send_next_op(struct ps3_vuart_port_device *dev,
* the command is then communicated back to the system manager with a response
* message.
*
- * Currently, the only supported request it the 'shutdown self' request.
+ * Currently, the only supported request is the 'shutdown self' request.
*/
-static int ps3_sys_manager_send_request_shutdown(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_send_request_shutdown(
+ struct ps3_system_bus_device *dev)
{
- static const struct ps3_sys_manager_header header = {
- .version = 1,
- .size = 16,
- .payload_size = 16,
- .service_id = PS3_SM_SERVICE_ID_REQUEST,
- };
+ struct ps3_sys_manager_header header;
struct {
u8 version;
u8 type;
u8 gos_id;
u8 reserved_1[13];
- } static const payload = {
- .version = 1,
- .type = 1, /* shutdown */
- .gos_id = 0, /* self */
- };
+ } payload;
BUILD_BUG_ON(sizeof(payload) != 16);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+ memset(&header, 0, sizeof(header));
+ header.version = 1;
+ header.size = 16;
+ header.payload_size = 16;
+ header.service_id = PS3_SM_SERVICE_ID_REQUEST;
+
+ memset(&payload, 0, sizeof(payload));
+ payload.version = 1;
+ payload.type = 1; /* shutdown */
+ payload.gos_id = 0; /* self */
+
return ps3_sys_manager_write(dev, &header, &payload);
}
@@ -323,15 +360,10 @@ static int ps3_sys_manager_send_request_shutdown(struct ps3_vuart_port_device *d
* failure of a command sent by the system manager.
*/
-static int ps3_sys_manager_send_response(struct ps3_vuart_port_device *dev,
+static int ps3_sys_manager_send_response(struct ps3_system_bus_device *dev,
u64 status)
{
- static const struct ps3_sys_manager_header header = {
- .version = 1,
- .size = 16,
- .payload_size = 16,
- .service_id = PS3_SM_SERVICE_ID_RESPONSE,
- };
+ struct ps3_sys_manager_header header;
struct {
u8 version;
u8 reserved_1[3];
@@ -344,6 +376,12 @@ static int ps3_sys_manager_send_response(struct ps3_vuart_port_device *dev,
dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
(status ? "nak" : "ack"));
+ memset(&header, 0, sizeof(header));
+ header.version = 1;
+ header.size = 16;
+ header.payload_size = 16;
+ header.service_id = PS3_SM_SERVICE_ID_RESPONSE;
+
memset(&payload, 0, sizeof(payload));
payload.version = 1;
payload.status = status;
@@ -356,7 +394,7 @@ static int ps3_sys_manager_send_response(struct ps3_vuart_port_device *dev,
*
*/
-static int ps3_sys_manager_handle_event(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
{
int result;
struct {
@@ -370,7 +408,7 @@ static int ps3_sys_manager_handle_event(struct ps3_vuart_port_device *dev)
BUILD_BUG_ON(sizeof(event) != 16);
result = ps3_vuart_read(dev, &event, sizeof(event));
- BUG_ON(result);
+ BUG_ON(result && "need to retry here");
if (event.version != 1) {
dev_dbg(&dev->core, "%s:%d: unsupported event version (%u)\n",
@@ -382,11 +420,34 @@ static int ps3_sys_manager_handle_event(struct ps3_vuart_port_device *dev)
case PS3_SM_EVENT_POWER_PRESSED:
dev_dbg(&dev->core, "%s:%d: POWER_PRESSED\n",
__func__, __LINE__);
+ ps3_sm_force_power_off = 1;
+ /*
+ * A memory barrier is use here to sync memory since
+ * ps3_sys_manager_final_restart() could be called on
+ * another cpu.
+ */
+ wmb();
+ kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */
break;
case PS3_SM_EVENT_POWER_RELEASED:
dev_dbg(&dev->core, "%s:%d: POWER_RELEASED (%u ms)\n",
__func__, __LINE__, event.value);
- kill_cad_pid(SIGINT, 1);
+ break;
+ case PS3_SM_EVENT_RESET_PRESSED:
+ dev_dbg(&dev->core, "%s:%d: RESET_PRESSED\n",
+ __func__, __LINE__);
+ ps3_sm_force_power_off = 0;
+ /*
+ * A memory barrier is use here to sync memory since
+ * ps3_sys_manager_final_restart() could be called on
+ * another cpu.
+ */
+ wmb();
+ kill_cad_pid(SIGINT, 1); /* ctrl_alt_del */
+ break;
+ case PS3_SM_EVENT_RESET_RELEASED:
+ dev_dbg(&dev->core, "%s:%d: RESET_RELEASED (%u ms)\n",
+ __func__, __LINE__, event.value);
break;
case PS3_SM_EVENT_THERMAL_ALERT:
dev_dbg(&dev->core, "%s:%d: THERMAL_ALERT (zone %u)\n",
@@ -411,7 +472,7 @@ static int ps3_sys_manager_handle_event(struct ps3_vuart_port_device *dev)
* The system manager sends this in reply to a 'request' message from the guest.
*/
-static int ps3_sys_manager_handle_cmd(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_handle_cmd(struct ps3_system_bus_device *dev)
{
int result;
struct {
@@ -425,6 +486,7 @@ static int ps3_sys_manager_handle_cmd(struct ps3_vuart_port_device *dev)
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
result = ps3_vuart_read(dev, &cmd, sizeof(cmd));
+ BUG_ON(result && "need to retry here");
if(result)
return result;
@@ -448,9 +510,10 @@ static int ps3_sys_manager_handle_cmd(struct ps3_vuart_port_device *dev)
/**
* ps3_sys_manager_handle_msg - First stage msg handler.
*
+ * Can be called directly to manually poll vuart and pump message handler.
*/
-static int ps3_sys_manager_handle_msg(struct ps3_vuart_port_device *dev)
+static int ps3_sys_manager_handle_msg(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_sys_manager_header header;
@@ -464,12 +527,17 @@ static int ps3_sys_manager_handle_msg(struct ps3_vuart_port_device *dev)
if (header.version != 1) {
dev_dbg(&dev->core, "%s:%d: unsupported header version (%u)\n",
__func__, __LINE__, header.version);
+ dump_sm_header(&header);
goto fail_header;
}
BUILD_BUG_ON(sizeof(header) != 16);
- BUG_ON(header.size != 16);
- BUG_ON(header.payload_size != 16);
+
+ if (header.size != 16 || (header.payload_size != 8
+ && header.payload_size != 16)) {
+ dump_sm_header(&header);
+ BUG();
+ }
switch (header.service_id) {
case PS3_SM_SERVICE_ID_EXTERN_EVENT:
@@ -478,6 +546,11 @@ static int ps3_sys_manager_handle_msg(struct ps3_vuart_port_device *dev)
case PS3_SM_SERVICE_ID_COMMAND:
dev_dbg(&dev->core, "%s:%d: COMMAND\n", __func__, __LINE__);
return ps3_sys_manager_handle_cmd(dev);
+ case PS3_SM_SERVICE_ID_REQUEST_ERROR:
+ dev_dbg(&dev->core, "%s:%d: REQUEST_ERROR\n", __func__,
+ __LINE__);
+ dump_sm_header(&header);
+ break;
default:
dev_dbg(&dev->core, "%s:%d: unknown service_id (%u)\n",
__func__, __LINE__, header.service_id);
@@ -494,45 +567,25 @@ fail_id:
}
/**
- * ps3_sys_manager_work - Asyncronous read handler.
- *
- * Signaled when a complete message arrives at the vuart port.
- */
-
-static void ps3_sys_manager_work(struct work_struct *work)
-{
- struct ps3_vuart_port_device *dev = ps3_vuart_work_to_port_device(work);
-
- ps3_sys_manager_handle_msg(dev);
- ps3_vuart_read_async(dev, ps3_sys_manager_work, PS3_SM_RX_MSG_LEN);
-}
-
-struct {
- struct ps3_vuart_port_device *dev;
-} static drv_priv;
-
-/**
- * ps3_sys_manager_restart - The final platform machine_restart routine.
+ * ps3_sys_manager_final_power_off - The final platform machine_power_off routine.
*
- * This routine never returns. The routine disables asyncronous vuart reads
+ * This routine never returns. The routine disables asynchronous vuart reads
* then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
* the shutdown command sent from the system manager. Soon after the
* acknowledgement is sent the lpar is destroyed by the HV. This routine
- * should only be called from ps3_restart().
+ * should only be called from ps3_power_off() through
+ * ps3_sys_manager_ops.power_off.
*/
-void ps3_sys_manager_restart(void)
+static void ps3_sys_manager_final_power_off(struct ps3_system_bus_device *dev)
{
- struct ps3_vuart_port_device *dev = drv_priv.dev;
-
- BUG_ON(!drv_priv.dev);
+ BUG_ON(!dev);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
ps3_vuart_cancel_async(dev);
- ps3_sys_manager_send_attr(dev, 0);
- ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_LPAR_REBOOT,
+ ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_SHUTDOWN,
PS3_SM_WAKE_DEFAULT);
ps3_sys_manager_send_request_shutdown(dev);
@@ -543,26 +596,33 @@ void ps3_sys_manager_restart(void)
}
/**
- * ps3_sys_manager_power_off - The final platform machine_power_off routine.
+ * ps3_sys_manager_final_restart - The final platform machine_restart routine.
*
- * This routine never returns. The routine disables asyncronous vuart reads
+ * This routine never returns. The routine disables asynchronous vuart reads
* then spins calling ps3_sys_manager_handle_msg() to receive and acknowledge
* the shutdown command sent from the system manager. Soon after the
* acknowledgement is sent the lpar is destroyed by the HV. This routine
- * should only be called from ps3_power_off().
+ * should only be called from ps3_restart() through ps3_sys_manager_ops.restart.
*/
-void ps3_sys_manager_power_off(void)
+static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev)
{
- struct ps3_vuart_port_device *dev = drv_priv.dev;
-
- BUG_ON(!drv_priv.dev);
+ BUG_ON(!dev);
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+ /* Check if we got here via a power button event. */
+
+ if (ps3_sm_force_power_off) {
+ dev_dbg(&dev->core, "%s:%d: forcing poweroff\n",
+ __func__, __LINE__);
+ ps3_sys_manager_final_power_off(dev);
+ }
+
ps3_vuart_cancel_async(dev);
- ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_SHUTDOWN,
+ ps3_sys_manager_send_attr(dev, 0);
+ ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_LPAR_REBOOT,
PS3_SM_WAKE_DEFAULT);
ps3_sys_manager_send_request_shutdown(dev);
@@ -572,31 +632,60 @@ void ps3_sys_manager_power_off(void)
ps3_sys_manager_handle_msg(dev);
}
-static int ps3_sys_manager_probe(struct ps3_vuart_port_device *dev)
+/**
+ * ps3_sys_manager_work - Asynchronous read handler.
+ *
+ * Signaled when PS3_SM_RX_MSG_LEN_MIN bytes arrive at the vuart port.
+ */
+
+static void ps3_sys_manager_work(struct ps3_system_bus_device *dev)
+{
+ ps3_sys_manager_handle_msg(dev);
+ ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
+}
+
+static int ps3_sys_manager_probe(struct ps3_system_bus_device *dev)
{
int result;
+ struct ps3_sys_manager_ops ops;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
- BUG_ON(drv_priv.dev);
- drv_priv.dev = dev;
+ ops.power_off = ps3_sys_manager_final_power_off;
+ ops.restart = ps3_sys_manager_final_restart;
+ ops.dev = dev;
+
+ /* ps3_sys_manager_register_ops copies ops. */
+
+ ps3_sys_manager_register_ops(&ops);
result = ps3_sys_manager_send_attr(dev, PS3_SM_ATTR_ALL);
BUG_ON(result);
- result = ps3_vuart_read_async(dev, ps3_sys_manager_work,
- PS3_SM_RX_MSG_LEN);
+ result = ps3_vuart_read_async(dev, PS3_SM_RX_MSG_LEN_MIN);
BUG_ON(result);
return result;
}
+static int ps3_sys_manager_remove(struct ps3_system_bus_device *dev)
+{
+ dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+ return 0;
+}
+
+static void ps3_sys_manager_shutdown(struct ps3_system_bus_device *dev)
+{
+ dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+}
+
static struct ps3_vuart_port_driver ps3_sys_manager = {
- .match_id = PS3_MATCH_ID_SYSTEM_MANAGER,
- .core = {
- .name = "ps3_sys_manager",
- },
+ .core.match_id = PS3_MATCH_ID_SYSTEM_MANAGER,
+ .core.core.name = "ps3_sys_manager",
.probe = ps3_sys_manager_probe,
+ .remove = ps3_sys_manager_remove,
+ .shutdown = ps3_sys_manager_shutdown,
+ .work = ps3_sys_manager_work,
};
static int __init ps3_sys_manager_init(void)
@@ -608,3 +697,6 @@ static int __init ps3_sys_manager_init(void)
}
module_init(ps3_sys_manager_init);
+/* Module remove not supported. */
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER);
diff --git a/drivers/ps3/vuart.c b/drivers/ps3/vuart.c
index ec2d36a1bc67..bea25a1391ee 100644
--- a/drivers/ps3/vuart.c
+++ b/drivers/ps3/vuart.c
@@ -71,6 +71,34 @@ enum vuart_interrupt_mask {
};
/**
+ * struct ps3_vuart_port_priv - private vuart device data.
+ */
+
+struct ps3_vuart_port_priv {
+ u64 interrupt_mask;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } tx_list;
+ struct {
+ struct ps3_vuart_work work;
+ unsigned long bytes_held;
+ spinlock_t lock;
+ struct list_head head;
+ } rx_list;
+ struct ps3_vuart_stats stats;
+};
+
+static struct ps3_vuart_port_priv *to_port_priv(
+ struct ps3_system_bus_device *dev)
+{
+ BUG_ON(!dev);
+ BUG_ON(!dev->driver_priv);
+ return (struct ps3_vuart_port_priv *)dev->driver_priv;
+}
+
+/**
* struct ports_bmp - bitmap indicating ports needing service.
*
* A 256 bit read only bitmap indicating ports needing service. Do not write
@@ -83,31 +111,14 @@ struct ports_bmp {
} __attribute__ ((aligned (32)));
#define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
-static void __attribute__ ((unused)) _dump_ports_bmp(
+static void __maybe_unused _dump_ports_bmp(
const struct ports_bmp* bmp, const char* func, int line)
{
pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status);
}
-static int ps3_vuart_match_id_to_port(enum ps3_match_id match_id,
- unsigned int *port_number)
-{
- switch(match_id) {
- case PS3_MATCH_ID_AV_SETTINGS:
- *port_number = 0;
- return 0;
- case PS3_MATCH_ID_SYSTEM_MANAGER:
- *port_number = 2;
- return 0;
- default:
- WARN_ON(1);
- *port_number = UINT_MAX;
- return -EINVAL;
- };
-}
-
#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
-static void __attribute__ ((unused)) _dump_port_params(unsigned int port_number,
+static void __maybe_unused _dump_port_params(unsigned int port_number,
const char* func, int line)
{
#if defined(DEBUG)
@@ -144,14 +155,14 @@ struct vuart_triggers {
unsigned long tx;
};
-int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
+int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
struct vuart_triggers *trig)
{
int result;
unsigned long size;
unsigned long val;
- result = lv1_get_virtual_uart_param(dev->priv->port_number,
+ result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_TX_TRIGGER, &trig->tx);
if (result) {
@@ -160,7 +171,7 @@ int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
return result;
}
- result = lv1_get_virtual_uart_param(dev->priv->port_number,
+ result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BUF_SIZE, &size);
if (result) {
@@ -169,7 +180,7 @@ int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
return result;
}
- result = lv1_get_virtual_uart_param(dev->priv->port_number,
+ result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_TRIGGER, &val);
if (result) {
@@ -186,13 +197,13 @@ int ps3_vuart_get_triggers(struct ps3_vuart_port_device *dev,
return result;
}
-int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
+int ps3_vuart_set_triggers(struct ps3_system_bus_device *dev, unsigned int tx,
unsigned int rx)
{
int result;
unsigned long size;
- result = lv1_set_virtual_uart_param(dev->priv->port_number,
+ result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_TX_TRIGGER, tx);
if (result) {
@@ -201,7 +212,7 @@ int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
return result;
}
- result = lv1_get_virtual_uart_param(dev->priv->port_number,
+ result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BUF_SIZE, &size);
if (result) {
@@ -210,7 +221,7 @@ int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
return result;
}
- result = lv1_set_virtual_uart_param(dev->priv->port_number,
+ result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_RX_TRIGGER, size - rx);
if (result) {
@@ -225,10 +236,12 @@ int ps3_vuart_set_triggers(struct ps3_vuart_port_device *dev, unsigned int tx,
return result;
}
-static int ps3_vuart_get_rx_bytes_waiting(struct ps3_vuart_port_device *dev,
+static int ps3_vuart_get_rx_bytes_waiting(struct ps3_system_bus_device *dev,
u64 *bytes_waiting)
{
- int result = lv1_get_virtual_uart_param(dev->priv->port_number,
+ int result;
+
+ result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BYTES, bytes_waiting);
if (result)
@@ -240,17 +253,24 @@ static int ps3_vuart_get_rx_bytes_waiting(struct ps3_vuart_port_device *dev,
return result;
}
-static int ps3_vuart_set_interrupt_mask(struct ps3_vuart_port_device *dev,
+/**
+ * ps3_vuart_set_interrupt_mask - Enable/disable the port interrupt sources.
+ * @dev: The struct ps3_system_bus_device instance.
+ * @bmp: Logical OR of enum vuart_interrupt_mask values. A zero bit disables.
+ */
+
+static int ps3_vuart_set_interrupt_mask(struct ps3_system_bus_device *dev,
unsigned long mask)
{
int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, mask);
- dev->priv->interrupt_mask = mask;
+ priv->interrupt_mask = mask;
- result = lv1_set_virtual_uart_param(dev->priv->port_number,
- PARAM_INTERRUPT_MASK, dev->priv->interrupt_mask);
+ result = lv1_set_virtual_uart_param(dev->port_number,
+ PARAM_INTERRUPT_MASK, priv->interrupt_mask);
if (result)
dev_dbg(&dev->core, "%s:%d: interrupt_mask failed: %s\n",
@@ -259,79 +279,96 @@ static int ps3_vuart_set_interrupt_mask(struct ps3_vuart_port_device *dev,
return result;
}
-static int ps3_vuart_get_interrupt_status(struct ps3_vuart_port_device *dev,
+static int ps3_vuart_get_interrupt_status(struct ps3_system_bus_device *dev,
unsigned long *status)
{
+ int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
u64 tmp;
- int result = lv1_get_virtual_uart_param(dev->priv->port_number,
+
+ result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_INTERRUPT_STATUS, &tmp);
if (result)
dev_dbg(&dev->core, "%s:%d: interrupt_status failed: %s\n",
__func__, __LINE__, ps3_result(result));
- *status = tmp & dev->priv->interrupt_mask;
+ *status = tmp & priv->interrupt_mask;
dev_dbg(&dev->core, "%s:%d: m %lxh, s %lxh, m&s %lxh\n",
- __func__, __LINE__, dev->priv->interrupt_mask, tmp, *status);
+ __func__, __LINE__, priv->interrupt_mask, tmp, *status);
return result;
}
-int ps3_vuart_enable_interrupt_tx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_enable_interrupt_tx(struct ps3_system_bus_device *dev)
{
- return (dev->priv->interrupt_mask & INTERRUPT_MASK_TX) ? 0
- : ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+ return (priv->interrupt_mask & INTERRUPT_MASK_TX) ? 0
+ : ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_TX);
}
-int ps3_vuart_enable_interrupt_rx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_enable_interrupt_rx(struct ps3_system_bus_device *dev)
{
- return (dev->priv->interrupt_mask & INTERRUPT_MASK_RX) ? 0
- : ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+ return (priv->interrupt_mask & INTERRUPT_MASK_RX) ? 0
+ : ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_RX);
}
-int ps3_vuart_enable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
+int ps3_vuart_enable_interrupt_disconnect(struct ps3_system_bus_device *dev)
{
- return (dev->priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
- : ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+ return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
+ : ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_DISCONNECT);
}
-int ps3_vuart_disable_interrupt_tx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_disable_interrupt_tx(struct ps3_system_bus_device *dev)
{
- return (dev->priv->interrupt_mask & INTERRUPT_MASK_TX)
- ? ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+ return (priv->interrupt_mask & INTERRUPT_MASK_TX)
+ ? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_TX) : 0;
}
-int ps3_vuart_disable_interrupt_rx(struct ps3_vuart_port_device *dev)
+int ps3_vuart_disable_interrupt_rx(struct ps3_system_bus_device *dev)
{
- return (dev->priv->interrupt_mask & INTERRUPT_MASK_RX)
- ? ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+ return (priv->interrupt_mask & INTERRUPT_MASK_RX)
+ ? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_RX) : 0;
}
-int ps3_vuart_disable_interrupt_disconnect(struct ps3_vuart_port_device *dev)
+int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev)
{
- return (dev->priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
- ? ps3_vuart_set_interrupt_mask(dev, dev->priv->interrupt_mask
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+
+ return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
+ ? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_DISCONNECT) : 0;
}
/**
* ps3_vuart_raw_write - Low level write helper.
+ * @dev: The struct ps3_system_bus_device instance.
*
* Do not call ps3_vuart_raw_write directly, use ps3_vuart_write.
*/
-static int ps3_vuart_raw_write(struct ps3_vuart_port_device *dev,
+static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
const void* buf, unsigned int bytes, unsigned long *bytes_written)
{
int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
- result = lv1_write_virtual_uart(dev->priv->port_number,
+ result = lv1_write_virtual_uart(dev->port_number,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
if (result) {
@@ -340,28 +377,30 @@ static int ps3_vuart_raw_write(struct ps3_vuart_port_device *dev,
return result;
}
- dev->priv->stats.bytes_written += *bytes_written;
+ priv->stats.bytes_written += *bytes_written;
dev_dbg(&dev->core, "%s:%d: wrote %lxh/%xh=>%lxh\n", __func__, __LINE__,
- *bytes_written, bytes, dev->priv->stats.bytes_written);
+ *bytes_written, bytes, priv->stats.bytes_written);
return result;
}
/**
* ps3_vuart_raw_read - Low level read helper.
+ * @dev: The struct ps3_system_bus_device instance.
*
* Do not call ps3_vuart_raw_read directly, use ps3_vuart_read.
*/
-static int ps3_vuart_raw_read(struct ps3_vuart_port_device *dev, void* buf,
+static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes, unsigned long *bytes_read)
{
int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
- result = lv1_read_virtual_uart(dev->priv->port_number,
+ result = lv1_read_virtual_uart(dev->port_number,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_read);
if (result) {
@@ -370,25 +409,27 @@ static int ps3_vuart_raw_read(struct ps3_vuart_port_device *dev, void* buf,
return result;
}
- dev->priv->stats.bytes_read += *bytes_read;
+ priv->stats.bytes_read += *bytes_read;
dev_dbg(&dev->core, "%s:%d: read %lxh/%xh=>%lxh\n", __func__, __LINE__,
- *bytes_read, bytes, dev->priv->stats.bytes_read);
+ *bytes_read, bytes, priv->stats.bytes_read);
return result;
}
/**
* ps3_vuart_clear_rx_bytes - Discard bytes received.
+ * @dev: The struct ps3_system_bus_device instance.
* @bytes: Max byte count to discard, zero = all pending.
*
* Used to clear pending rx interrupt source. Will not block.
*/
-void ps3_vuart_clear_rx_bytes(struct ps3_vuart_port_device *dev,
+void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
unsigned int bytes)
{
int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
u64 bytes_waiting;
void* tmp;
@@ -418,8 +459,9 @@ void ps3_vuart_clear_rx_bytes(struct ps3_vuart_port_device *dev,
/* Don't include these bytes in the stats. */
- dev->priv->stats.bytes_read -= bytes_waiting;
+ priv->stats.bytes_read -= bytes_waiting;
}
+EXPORT_SYMBOL_GPL(ps3_vuart_clear_rx_bytes);
/**
* struct list_buffer - An element for a port device fifo buffer list.
@@ -435,6 +477,7 @@ struct list_buffer {
/**
* ps3_vuart_write - the entry point for writing data to a port
+ * @dev: The struct ps3_system_bus_device instance.
*
* If the port is idle on entry as much of the incoming data is written to
* the port as the port will accept. Otherwise a list buffer is created
@@ -442,25 +485,26 @@ struct list_buffer {
* then enqueued for transmision via the transmit interrupt.
*/
-int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
+int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
unsigned int bytes)
{
static unsigned long dbg_number;
int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb;
dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
bytes, bytes);
- spin_lock_irqsave(&dev->priv->tx_list.lock, flags);
+ spin_lock_irqsave(&priv->tx_list.lock, flags);
- if (list_empty(&dev->priv->tx_list.head)) {
+ if (list_empty(&priv->tx_list.head)) {
unsigned long bytes_written;
result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
- spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+ spin_unlock_irqrestore(&priv->tx_list.lock, flags);
if (result) {
dev_dbg(&dev->core,
@@ -478,7 +522,7 @@ int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
bytes -= bytes_written;
buf += bytes_written;
} else
- spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+ spin_unlock_irqrestore(&priv->tx_list.lock, flags);
lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
@@ -491,29 +535,86 @@ int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
lb->tail = lb->data + bytes;
lb->dbg_number = ++dbg_number;
- spin_lock_irqsave(&dev->priv->tx_list.lock, flags);
- list_add_tail(&lb->link, &dev->priv->tx_list.head);
+ spin_lock_irqsave(&priv->tx_list.lock, flags);
+ list_add_tail(&lb->link, &priv->tx_list.head);
ps3_vuart_enable_interrupt_tx(dev);
- spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+ spin_unlock_irqrestore(&priv->tx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %xh bytes\n",
__func__, __LINE__, lb->dbg_number, bytes);
return 0;
}
+EXPORT_SYMBOL_GPL(ps3_vuart_write);
+
+/**
+ * ps3_vuart_queue_rx_bytes - Queue waiting bytes into the buffer list.
+ * @dev: The struct ps3_system_bus_device instance.
+ * @bytes_queued: Number of bytes queued to the buffer list.
+ *
+ * Must be called with priv->rx_list.lock held.
+ */
+
+static int ps3_vuart_queue_rx_bytes(struct ps3_system_bus_device *dev,
+ u64 *bytes_queued)
+{
+ static unsigned long dbg_number;
+ int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+ struct list_buffer *lb;
+ u64 bytes;
+
+ *bytes_queued = 0;
+
+ result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
+ BUG_ON(result);
+
+ if (result)
+ return -EIO;
+
+ if (!bytes)
+ return 0;
+
+ /* Add some extra space for recently arrived data. */
+
+ bytes += 128;
+
+ lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
+
+ if (!lb)
+ return -ENOMEM;
+
+ ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
+
+ lb->head = lb->data;
+ lb->tail = lb->data + bytes;
+ lb->dbg_number = ++dbg_number;
+
+ list_add_tail(&lb->link, &priv->rx_list.head);
+ priv->rx_list.bytes_held += bytes;
+
+ dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %lxh bytes\n",
+ __func__, __LINE__, lb->dbg_number, bytes);
+
+ *bytes_queued = bytes;
+
+ return 0;
+}
/**
- * ps3_vuart_read - the entry point for reading data from a port
+ * ps3_vuart_read - The entry point for reading data from a port.
*
- * If enough bytes to satisfy the request are held in the buffer list those
- * bytes are dequeued and copied to the caller's buffer. Emptied list buffers
- * are retiered. If the request cannot be statified by bytes held in the list
- * buffers -EAGAIN is returned.
+ * Queue data waiting at the port, and if enough bytes to satisfy the request
+ * are held in the buffer list those bytes are dequeued and copied to the
+ * caller's buffer. Emptied list buffers are retiered. If the request cannot
+ * be statified by bytes held in the list buffers -EAGAIN is returned.
*/
-int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+int ps3_vuart_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes)
{
+ int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb, *n;
unsigned long bytes_read;
@@ -521,30 +622,37 @@ int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
bytes, bytes);
- spin_lock_irqsave(&dev->priv->rx_list.lock, flags);
+ spin_lock_irqsave(&priv->rx_list.lock, flags);
- if (dev->priv->rx_list.bytes_held < bytes) {
- spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
- dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
- __func__, __LINE__,
- bytes - dev->priv->rx_list.bytes_held);
- return -EAGAIN;
+ /* Queue rx bytes here for polled reads. */
+
+ while (priv->rx_list.bytes_held < bytes) {
+ u64 tmp;
+
+ result = ps3_vuart_queue_rx_bytes(dev, &tmp);
+ if (result || !tmp) {
+ dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
+ __func__, __LINE__,
+ bytes - priv->rx_list.bytes_held);
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
+ return -EAGAIN;
+ }
}
- list_for_each_entry_safe(lb, n, &dev->priv->rx_list.head, link) {
+ list_for_each_entry_safe(lb, n, &priv->rx_list.head, link) {
bytes_read = min((unsigned int)(lb->tail - lb->head), bytes);
memcpy(buf, lb->head, bytes_read);
buf += bytes_read;
bytes -= bytes_read;
- dev->priv->rx_list.bytes_held -= bytes_read;
+ priv->rx_list.bytes_held -= bytes_read;
if (bytes_read < lb->tail - lb->head) {
lb->head += bytes_read;
dev_dbg(&dev->core, "%s:%d: buf_%lu: dequeued %lxh "
"bytes\n", __func__, __LINE__, lb->dbg_number,
bytes_read);
- spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
@@ -556,16 +664,32 @@ int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
kfree(lb);
}
- spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(ps3_vuart_read);
-int ps3_vuart_read_async(struct ps3_vuart_port_device *dev, work_func_t func,
- unsigned int bytes)
+/**
+ * ps3_vuart_work - Asynchronous read handler.
+ */
+
+static void ps3_vuart_work(struct work_struct *work)
+{
+ struct ps3_system_bus_device *dev =
+ ps3_vuart_work_to_system_bus_dev(work);
+ struct ps3_vuart_port_driver *drv =
+ ps3_system_bus_dev_to_vuart_drv(dev);
+
+ BUG_ON(!drv);
+ drv->work(dev);
+}
+
+int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
{
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
- if(dev->priv->work.trigger) {
+ if (priv->rx_list.work.trigger) {
dev_dbg(&dev->core, "%s:%d: warning, multiple calls\n",
__func__, __LINE__);
return -EAGAIN;
@@ -573,30 +697,32 @@ int ps3_vuart_read_async(struct ps3_vuart_port_device *dev, work_func_t func,
BUG_ON(!bytes);
- PREPARE_WORK(&dev->priv->work.work, func);
+ PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work);
- spin_lock_irqsave(&dev->priv->work.lock, flags);
- if(dev->priv->rx_list.bytes_held >= bytes) {
+ spin_lock_irqsave(&priv->rx_list.lock, flags);
+ if (priv->rx_list.bytes_held >= bytes) {
dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
__func__, __LINE__, bytes);
- schedule_work(&dev->priv->work.work);
- spin_unlock_irqrestore(&dev->priv->work.lock, flags);
+ schedule_work(&priv->rx_list.work.work);
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
- dev->priv->work.trigger = bytes;
- spin_unlock_irqrestore(&dev->priv->work.lock, flags);
+ priv->rx_list.work.trigger = bytes;
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d: waiting for %u(%xh) bytes\n", __func__,
__LINE__, bytes, bytes);
return 0;
}
+EXPORT_SYMBOL_GPL(ps3_vuart_read_async);
-void ps3_vuart_cancel_async(struct ps3_vuart_port_device *dev)
+void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev)
{
- dev->priv->work.trigger = 0;
+ to_port_priv(dev)->rx_list.work.trigger = 0;
}
+EXPORT_SYMBOL_GPL(ps3_vuart_cancel_async);
/**
* ps3_vuart_handle_interrupt_tx - third stage transmit interrupt handler
@@ -606,18 +732,19 @@ void ps3_vuart_cancel_async(struct ps3_vuart_port_device *dev)
* adjusts the final list buffer state for a partial write.
*/
-static int ps3_vuart_handle_interrupt_tx(struct ps3_vuart_port_device *dev)
+static int ps3_vuart_handle_interrupt_tx(struct ps3_system_bus_device *dev)
{
int result = 0;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb, *n;
unsigned long bytes_total = 0;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
- spin_lock_irqsave(&dev->priv->tx_list.lock, flags);
+ spin_lock_irqsave(&priv->tx_list.lock, flags);
- list_for_each_entry_safe(lb, n, &dev->priv->tx_list.head, link) {
+ list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) {
unsigned long bytes_written;
@@ -651,7 +778,7 @@ static int ps3_vuart_handle_interrupt_tx(struct ps3_vuart_port_device *dev)
ps3_vuart_disable_interrupt_tx(dev);
port_full:
- spin_unlock_irqrestore(&dev->priv->tx_list.lock, flags);
+ spin_unlock_irqrestore(&priv->tx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d wrote %lxh bytes total\n",
__func__, __LINE__, bytes_total);
return result;
@@ -665,60 +792,37 @@ port_full:
* buffer list. Buffer list data is dequeued via ps3_vuart_read.
*/
-static int ps3_vuart_handle_interrupt_rx(struct ps3_vuart_port_device *dev)
+static int ps3_vuart_handle_interrupt_rx(struct ps3_system_bus_device *dev)
{
- static unsigned long dbg_number;
- int result = 0;
+ int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
- struct list_buffer *lb;
- unsigned long bytes;
+ u64 bytes;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
- result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
-
- if (result)
- return -EIO;
-
- BUG_ON(!bytes);
-
- /* Add some extra space for recently arrived data. */
-
- bytes += 128;
-
- lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
+ spin_lock_irqsave(&priv->rx_list.lock, flags);
+ result = ps3_vuart_queue_rx_bytes(dev, &bytes);
- if (!lb)
- return -ENOMEM;
-
- ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
-
- lb->head = lb->data;
- lb->tail = lb->data + bytes;
- lb->dbg_number = ++dbg_number;
-
- spin_lock_irqsave(&dev->priv->rx_list.lock, flags);
- list_add_tail(&lb->link, &dev->priv->rx_list.head);
- dev->priv->rx_list.bytes_held += bytes;
- spin_unlock_irqrestore(&dev->priv->rx_list.lock, flags);
-
- dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %lxh bytes\n",
- __func__, __LINE__, lb->dbg_number, bytes);
+ if (result) {
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
+ return result;
+ }
- spin_lock_irqsave(&dev->priv->work.lock, flags);
- if(dev->priv->work.trigger
- && dev->priv->rx_list.bytes_held >= dev->priv->work.trigger) {
+ if (priv->rx_list.work.trigger && priv->rx_list.bytes_held
+ >= priv->rx_list.work.trigger) {
dev_dbg(&dev->core, "%s:%d: schedule_work %lxh bytes\n",
- __func__, __LINE__, dev->priv->work.trigger);
- dev->priv->work.trigger = 0;
- schedule_work(&dev->priv->work.work);
+ __func__, __LINE__, priv->rx_list.work.trigger);
+ priv->rx_list.work.trigger = 0;
+ schedule_work(&priv->rx_list.work.work);
}
- spin_unlock_irqrestore(&dev->priv->work.lock, flags);
- return 0;
+
+ spin_unlock_irqrestore(&priv->rx_list.lock, flags);
+ return result;
}
static int ps3_vuart_handle_interrupt_disconnect(
- struct ps3_vuart_port_device *dev)
+ struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
BUG_ON("no support");
@@ -733,9 +837,10 @@ static int ps3_vuart_handle_interrupt_disconnect(
* stage handler after one iteration.
*/
-static int ps3_vuart_handle_port_interrupt(struct ps3_vuart_port_device *dev)
+static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
{
int result;
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long status;
result = ps3_vuart_get_interrupt_status(dev, &status);
@@ -747,21 +852,21 @@ static int ps3_vuart_handle_port_interrupt(struct ps3_vuart_port_device *dev)
status);
if (status & INTERRUPT_MASK_DISCONNECT) {
- dev->priv->stats.disconnect_interrupts++;
+ priv->stats.disconnect_interrupts++;
result = ps3_vuart_handle_interrupt_disconnect(dev);
if (result)
ps3_vuart_disable_interrupt_disconnect(dev);
}
if (status & INTERRUPT_MASK_TX) {
- dev->priv->stats.tx_interrupts++;
+ priv->stats.tx_interrupts++;
result = ps3_vuart_handle_interrupt_tx(dev);
if (result)
ps3_vuart_disable_interrupt_tx(dev);
}
if (status & INTERRUPT_MASK_RX) {
- dev->priv->stats.rx_interrupts++;
+ priv->stats.rx_interrupts++;
result = ps3_vuart_handle_interrupt_rx(dev);
if (result)
ps3_vuart_disable_interrupt_rx(dev);
@@ -771,11 +876,11 @@ static int ps3_vuart_handle_port_interrupt(struct ps3_vuart_port_device *dev)
}
struct vuart_bus_priv {
- const struct ports_bmp bmp;
+ struct ports_bmp *bmp;
unsigned int virq;
struct semaphore probe_mutex;
int use_count;
- struct ps3_vuart_port_device *devices[PORT_COUNT];
+ struct ps3_system_bus_device *devices[PORT_COUNT];
} static vuart_bus_priv;
/**
@@ -788,17 +893,16 @@ struct vuart_bus_priv {
static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
{
- struct vuart_bus_priv *bus_priv;
+ struct vuart_bus_priv *bus_priv = _private;
- BUG_ON(!_private);
- bus_priv = (struct vuart_bus_priv *)_private;
+ BUG_ON(!bus_priv);
while (1) {
unsigned int port;
- dump_ports_bmp(&bus_priv->bmp);
+ dump_ports_bmp(bus_priv->bmp);
- port = (BITS_PER_LONG - 1) - __ilog2(bus_priv->bmp.status);
+ port = (BITS_PER_LONG - 1) - __ilog2(bus_priv->bmp->status);
if (port == BITS_PER_LONG)
break;
@@ -812,100 +916,144 @@ static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
return IRQ_HANDLED;
}
-static int ps3_vuart_match(struct device *_dev, struct device_driver *_drv)
+static int ps3_vuart_bus_interrupt_get(void)
{
int result;
- struct ps3_vuart_port_driver *drv = to_ps3_vuart_port_driver(_drv);
- struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
- result = dev->match_id == drv->match_id;
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+ vuart_bus_priv.use_count++;
+
+ BUG_ON(vuart_bus_priv.use_count > 2);
+
+ if (vuart_bus_priv.use_count != 1) {
+ return 0;
+ }
+
+ BUG_ON(vuart_bus_priv.bmp);
+
+ vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL);
+
+ if (!vuart_bus_priv.bmp) {
+ pr_debug("%s:%d: kzalloc failed.\n", __func__, __LINE__);
+ result = -ENOMEM;
+ goto fail_bmp_malloc;
+ }
+
+ result = ps3_vuart_irq_setup(PS3_BINDING_CPU_ANY, vuart_bus_priv.bmp,
+ &vuart_bus_priv.virq);
+
+ if (result) {
+ pr_debug("%s:%d: ps3_vuart_irq_setup failed (%d)\n",
+ __func__, __LINE__, result);
+ result = -EPERM;
+ goto fail_alloc_irq;
+ }
+
+ result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
+ IRQF_DISABLED, "vuart", &vuart_bus_priv);
- dev_info(&dev->core, "%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__,
- __LINE__, dev->match_id, dev->core.bus_id, drv->match_id,
- drv->core.name, (result ? "match" : "miss"));
+ if (result) {
+ pr_debug("%s:%d: request_irq failed (%d)\n",
+ __func__, __LINE__, result);
+ goto fail_request_irq;
+ }
+ pr_debug(" <- %s:%d: ok\n", __func__, __LINE__);
return result;
+
+fail_request_irq:
+ ps3_vuart_irq_destroy(vuart_bus_priv.virq);
+ vuart_bus_priv.virq = NO_IRQ;
+fail_alloc_irq:
+ kfree(vuart_bus_priv.bmp);
+ vuart_bus_priv.bmp = NULL;
+fail_bmp_malloc:
+ vuart_bus_priv.use_count--;
+ pr_debug(" <- %s:%d: failed\n", __func__, __LINE__);
+ return result;
+}
+
+static int ps3_vuart_bus_interrupt_put(void)
+{
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+ vuart_bus_priv.use_count--;
+
+ BUG_ON(vuart_bus_priv.use_count < 0);
+
+ if (vuart_bus_priv.use_count != 0)
+ return 0;
+
+ free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
+
+ ps3_vuart_irq_destroy(vuart_bus_priv.virq);
+ vuart_bus_priv.virq = NO_IRQ;
+
+ kfree(vuart_bus_priv.bmp);
+ vuart_bus_priv.bmp = NULL;
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
}
-static int ps3_vuart_probe(struct device *_dev)
+static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
{
int result;
- unsigned int port_number;
- struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
- struct ps3_vuart_port_driver *drv =
- to_ps3_vuart_port_driver(_dev->driver);
+ struct ps3_vuart_port_driver *drv;
+ struct ps3_vuart_port_priv *priv = NULL;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+ drv = ps3_system_bus_dev_to_vuart_drv(dev);
+
+ dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
+ drv->core.core.name);
+
BUG_ON(!drv);
- down(&vuart_bus_priv.probe_mutex);
+ if (dev->port_number >= PORT_COUNT) {
+ BUG();
+ return -EINVAL;
+ }
- /* Setup vuart_bus_priv.devices[]. */
+ down(&vuart_bus_priv.probe_mutex);
- result = ps3_vuart_match_id_to_port(dev->match_id,
- &port_number);
+ result = ps3_vuart_bus_interrupt_get();
- if (result) {
- dev_dbg(&dev->core, "%s:%d: unknown match_id (%d)\n",
- __func__, __LINE__, dev->match_id);
- result = -EINVAL;
- goto fail_match;
- }
+ if (result)
+ goto fail_setup_interrupt;
- if (vuart_bus_priv.devices[port_number]) {
+ if (vuart_bus_priv.devices[dev->port_number]) {
dev_dbg(&dev->core, "%s:%d: port busy (%d)\n", __func__,
- __LINE__, port_number);
+ __LINE__, dev->port_number);
result = -EBUSY;
- goto fail_match;
+ goto fail_busy;
}
- vuart_bus_priv.devices[port_number] = dev;
+ vuart_bus_priv.devices[dev->port_number] = dev;
- /* Setup dev->priv. */
+ /* Setup dev->driver_priv. */
- dev->priv = kzalloc(sizeof(struct ps3_vuart_port_priv), GFP_KERNEL);
+ dev->driver_priv = kzalloc(sizeof(struct ps3_vuart_port_priv),
+ GFP_KERNEL);
- if (!dev->priv) {
+ if (!dev->driver_priv) {
result = -ENOMEM;
- goto fail_alloc;
+ goto fail_dev_malloc;
}
- dev->priv->port_number = port_number;
-
- INIT_LIST_HEAD(&dev->priv->tx_list.head);
- spin_lock_init(&dev->priv->tx_list.lock);
+ priv = to_port_priv(dev);
- INIT_LIST_HEAD(&dev->priv->rx_list.head);
- spin_lock_init(&dev->priv->rx_list.lock);
+ INIT_LIST_HEAD(&priv->tx_list.head);
+ spin_lock_init(&priv->tx_list.lock);
- INIT_WORK(&dev->priv->work.work, NULL);
- spin_lock_init(&dev->priv->work.lock);
- dev->priv->work.trigger = 0;
- dev->priv->work.dev = dev;
+ INIT_LIST_HEAD(&priv->rx_list.head);
+ spin_lock_init(&priv->rx_list.lock);
- if (++vuart_bus_priv.use_count == 1) {
-
- result = ps3_vuart_irq_setup(PS3_BINDING_CPU_ANY,
- (void*)&vuart_bus_priv.bmp.status, &vuart_bus_priv.virq);
-
- if (result) {
- dev_dbg(&dev->core,
- "%s:%d: ps3_vuart_irq_setup failed (%d)\n",
- __func__, __LINE__, result);
- result = -EPERM;
- goto fail_alloc_irq;
- }
-
- result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
- IRQF_DISABLED, "vuart", &vuart_bus_priv);
-
- if (result) {
- dev_info(&dev->core, "%s:%d: request_irq failed (%d)\n",
- __func__, __LINE__, result);
- goto fail_request_irq;
- }
- }
+ INIT_WORK(&priv->rx_list.work.work, NULL);
+ priv->rx_list.work.trigger = 0;
+ priv->rx_list.work.dev = dev;
/* clear stale pending interrupts */
@@ -936,150 +1084,158 @@ static int ps3_vuart_probe(struct device *_dev)
fail_probe:
ps3_vuart_set_interrupt_mask(dev, 0);
-fail_request_irq:
- ps3_vuart_irq_destroy(vuart_bus_priv.virq);
- vuart_bus_priv.virq = NO_IRQ;
-fail_alloc_irq:
- --vuart_bus_priv.use_count;
- kfree(dev->priv);
- dev->priv = NULL;
-fail_alloc:
- vuart_bus_priv.devices[port_number] = NULL;
-fail_match:
+ kfree(dev->driver_priv);
+ dev->driver_priv = NULL;
+fail_dev_malloc:
+ vuart_bus_priv.devices[dev->port_number] = NULL;
+fail_busy:
+ ps3_vuart_bus_interrupt_put();
+fail_setup_interrupt:
up(&vuart_bus_priv.probe_mutex);
- dev_dbg(&dev->core, "%s:%d failed\n", __func__, __LINE__);
+ dev_dbg(&dev->core, "%s:%d: failed\n", __func__, __LINE__);
return result;
}
-static int ps3_vuart_remove(struct device *_dev)
+/**
+ * ps3_vuart_cleanup - common cleanup helper.
+ * @dev: The struct ps3_system_bus_device instance.
+ *
+ * Cleans interrupts and HV resources. Must be called with
+ * vuart_bus_priv.probe_mutex held. Used by ps3_vuart_remove and
+ * ps3_vuart_shutdown. After this call, polled reading will still work.
+ */
+
+static int ps3_vuart_cleanup(struct ps3_system_bus_device *dev)
{
- struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
- struct ps3_vuart_port_driver *drv =
- to_ps3_vuart_port_driver(_dev->driver);
+ dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+
+ ps3_vuart_cancel_async(dev);
+ ps3_vuart_set_interrupt_mask(dev, 0);
+ ps3_vuart_bus_interrupt_put();
+ return 0;
+}
+
+/**
+ * ps3_vuart_remove - Completely clean the device instance.
+ * @dev: The struct ps3_system_bus_device instance.
+ *
+ * Cleans all memory, interrupts and HV resources. After this call the
+ * device can no longer be used.
+ */
+
+static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+{
+ struct ps3_vuart_port_priv *priv = to_port_priv(dev);
+ struct ps3_vuart_port_driver *drv;
+
+ BUG_ON(!dev);
down(&vuart_bus_priv.probe_mutex);
- dev_dbg(&dev->core, "%s:%d: %s\n", __func__, __LINE__,
- dev->core.bus_id);
+ dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+ dev->match_id);
- BUG_ON(vuart_bus_priv.use_count < 1);
+ if (!dev->core.driver) {
+ dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+ __LINE__);
+ up(&vuart_bus_priv.probe_mutex);
+ return 0;
+ }
- if (drv->remove)
- drv->remove(dev);
- else
- dev_dbg(&dev->core, "%s:%d: %s no remove method\n", __func__,
- __LINE__, dev->core.bus_id);
+ drv = ps3_system_bus_dev_to_vuart_drv(dev);
- vuart_bus_priv.devices[dev->priv->port_number] = NULL;
+ BUG_ON(!drv);
- if (--vuart_bus_priv.use_count == 0) {
+ if (drv->remove) {
+ drv->remove(dev);
+ } else {
+ dev_dbg(&dev->core, "%s:%d: no remove method\n", __func__,
+ __LINE__);
BUG();
- free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
- ps3_vuart_irq_destroy(vuart_bus_priv.virq);
- vuart_bus_priv.virq = NO_IRQ;
}
- kfree(dev->priv);
- dev->priv = NULL;
+ ps3_vuart_cleanup(dev);
+
+ vuart_bus_priv.devices[dev->port_number] = NULL;
+ kfree(priv);
+ priv = NULL;
+ dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
up(&vuart_bus_priv.probe_mutex);
return 0;
}
-static void ps3_vuart_shutdown(struct device *_dev)
-{
- struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
- struct ps3_vuart_port_driver *drv =
- to_ps3_vuart_port_driver(_dev->driver);
-
- dev_dbg(&dev->core, "%s:%d: %s\n", __func__, __LINE__,
- dev->core.bus_id);
-
- if (drv->shutdown)
- drv->shutdown(dev);
- else
- dev_dbg(&dev->core, "%s:%d: %s no shutdown method\n", __func__,
- __LINE__, dev->core.bus_id);
-}
-
/**
- * ps3_vuart_bus - The vuart bus instance.
+ * ps3_vuart_shutdown - Cleans interrupts and HV resources.
+ * @dev: The struct ps3_system_bus_device instance.
*
- * The vuart is managed as a bus that port devices connect to.
+ * Cleans interrupts and HV resources. After this call the
+ * device can still be used in polling mode. This behavior required
+ * by sys-manager to be able to complete the device power operation
+ * sequence.
*/
-struct bus_type ps3_vuart_bus = {
- .name = "ps3_vuart",
- .match = ps3_vuart_match,
- .probe = ps3_vuart_probe,
- .remove = ps3_vuart_remove,
- .shutdown = ps3_vuart_shutdown,
-};
-
-int __init ps3_vuart_bus_init(void)
+static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
{
- int result;
+ struct ps3_vuart_port_driver *drv;
- pr_debug("%s:%d:\n", __func__, __LINE__);
+ BUG_ON(!dev);
- if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
- return -ENODEV;
+ down(&vuart_bus_priv.probe_mutex);
- init_MUTEX(&vuart_bus_priv.probe_mutex);
- result = bus_register(&ps3_vuart_bus);
- BUG_ON(result);
+ dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
+ dev->match_id);
- return result;
-}
+ if (!dev->core.driver) {
+ dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
+ __LINE__);
+ up(&vuart_bus_priv.probe_mutex);
+ return 0;
+ }
-void __exit ps3_vuart_bus_exit(void)
-{
- pr_debug("%s:%d:\n", __func__, __LINE__);
- bus_unregister(&ps3_vuart_bus);
-}
+ drv = ps3_system_bus_dev_to_vuart_drv(dev);
-core_initcall(ps3_vuart_bus_init);
-module_exit(ps3_vuart_bus_exit);
+ BUG_ON(!drv);
-/**
- * ps3_vuart_port_release_device - Remove a vuart port device.
- */
+ if (drv->shutdown)
+ drv->shutdown(dev);
+ else if (drv->remove) {
+ dev_dbg(&dev->core, "%s:%d: no shutdown, calling remove\n",
+ __func__, __LINE__);
+ drv->remove(dev);
+ } else {
+ dev_dbg(&dev->core, "%s:%d: no shutdown method\n", __func__,
+ __LINE__);
+ BUG();
+ }
-static void ps3_vuart_port_release_device(struct device *_dev)
-{
-#if defined(DEBUG)
- struct ps3_vuart_port_device *dev = to_ps3_vuart_port_device(_dev);
+ ps3_vuart_cleanup(dev);
- dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
+ dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
- BUG_ON(dev->priv && "forgot to free");
- memset(&dev->core, 0, sizeof(dev->core));
-#endif
+ up(&vuart_bus_priv.probe_mutex);
+ return 0;
}
-/**
- * ps3_vuart_port_device_register - Add a vuart port device.
- */
-
-int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev)
+static int __init ps3_vuart_bus_init(void)
{
- static unsigned int dev_count = 1;
-
- BUG_ON(dev->priv && "forgot to free");
+ pr_debug("%s:%d:\n", __func__, __LINE__);
- dev->core.parent = NULL;
- dev->core.bus = &ps3_vuart_bus;
- dev->core.release = ps3_vuart_port_release_device;
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return -ENODEV;
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "vuart_%02x",
- dev_count++);
+ init_MUTEX(&vuart_bus_priv.probe_mutex);
- dev_dbg(&dev->core, "%s:%d register\n", __func__, __LINE__);
+ return 0;
+}
- return device_register(&dev->core);
+static void __exit ps3_vuart_bus_exit(void)
+{
+ pr_debug("%s:%d:\n", __func__, __LINE__);
}
-EXPORT_SYMBOL_GPL(ps3_vuart_port_device_register);
+core_initcall(ps3_vuart_bus_init);
+module_exit(ps3_vuart_bus_exit);
/**
* ps3_vuart_port_driver_register - Add a vuart port device driver.
@@ -1089,12 +1245,18 @@ int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv)
{
int result;
- pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.name);
- drv->core.bus = &ps3_vuart_bus;
- result = driver_register(&drv->core);
+ pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
+
+ BUG_ON(!drv->core.match_id);
+ BUG_ON(!drv->core.core.name);
+
+ drv->core.probe = ps3_vuart_probe;
+ drv->core.remove = ps3_vuart_remove;
+ drv->core.shutdown = ps3_vuart_shutdown;
+
+ result = ps3_system_bus_driver_register(&drv->core);
return result;
}
-
EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
/**
@@ -1103,8 +1265,7 @@ EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv)
{
- pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.name);
- driver_unregister(&drv->core);
+ pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
+ ps3_system_bus_driver_unregister(&drv->core);
}
-
EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_unregister);
diff --git a/drivers/ps3/vuart.h b/drivers/ps3/vuart.h
index 1be992d568c8..eb7f6d94a890 100644
--- a/drivers/ps3/vuart.h
+++ b/drivers/ps3/vuart.h
@@ -34,29 +34,7 @@ struct ps3_vuart_stats {
struct ps3_vuart_work {
struct work_struct work;
unsigned long trigger;
- spinlock_t lock;
- struct ps3_vuart_port_device* dev; /* to convert work to device */
-};
-
-/**
- * struct ps3_vuart_port_priv - private vuart device data.
- */
-
-struct ps3_vuart_port_priv {
- unsigned int port_number;
- u64 interrupt_mask;
-
- struct {
- spinlock_t lock;
- struct list_head head;
- } tx_list;
- struct {
- unsigned long bytes_held;
- spinlock_t lock;
- struct list_head head;
- } rx_list;
- struct ps3_vuart_stats stats;
- struct ps3_vuart_work work;
+ struct ps3_system_bus_device *dev; /* to convert work to device */
};
/**
@@ -64,32 +42,30 @@ struct ps3_vuart_port_priv {
*/
struct ps3_vuart_port_driver {
- enum ps3_match_id match_id;
- struct device_driver core;
- int (*probe)(struct ps3_vuart_port_device *);
- int (*remove)(struct ps3_vuart_port_device *);
- void (*shutdown)(struct ps3_vuart_port_device *);
- int (*tx_event)(struct ps3_vuart_port_device *dev);
- int (*rx_event)(struct ps3_vuart_port_device *dev);
- int (*disconnect_event)(struct ps3_vuart_port_device *dev);
- /* int (*suspend)(struct ps3_vuart_port_device *, pm_message_t); */
- /* int (*resume)(struct ps3_vuart_port_device *); */
+ struct ps3_system_bus_driver core;
+ int (*probe)(struct ps3_system_bus_device *);
+ int (*remove)(struct ps3_system_bus_device *);
+ void (*shutdown)(struct ps3_system_bus_device *);
+ void (*work)(struct ps3_system_bus_device *);
+ /* int (*tx_event)(struct ps3_system_bus_device *dev); */
+ /* int (*rx_event)(struct ps3_system_bus_device *dev); */
+ /* int (*disconnect_event)(struct ps3_system_bus_device *dev); */
+ /* int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+ /* int (*resume)(struct ps3_system_bus_device *); */
};
int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv);
void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv);
-static inline struct ps3_vuart_port_driver *to_ps3_vuart_port_driver(
- struct device_driver *_drv)
-{
- return container_of(_drv, struct ps3_vuart_port_driver, core);
-}
-static inline struct ps3_vuart_port_device *to_ps3_vuart_port_device(
- struct device *_dev)
+static inline struct ps3_vuart_port_driver *
+ ps3_system_bus_dev_to_vuart_drv(struct ps3_system_bus_device *_dev)
{
- return container_of(_dev, struct ps3_vuart_port_device, core);
+ struct ps3_system_bus_driver *sbd =
+ ps3_system_bus_dev_to_system_bus_drv(_dev);
+ BUG_ON(!sbd);
+ return container_of(sbd, struct ps3_vuart_port_driver, core);
}
-static inline struct ps3_vuart_port_device *ps3_vuart_work_to_port_device(
+static inline struct ps3_system_bus_device *ps3_vuart_work_to_system_bus_dev(
struct work_struct *_work)
{
struct ps3_vuart_work *vw = container_of(_work, struct ps3_vuart_work,
@@ -97,14 +73,13 @@ static inline struct ps3_vuart_port_device *ps3_vuart_work_to_port_device(
return vw->dev;
}
-int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
- unsigned int bytes);
-int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
+int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
unsigned int bytes);
-int ps3_vuart_read_async(struct ps3_vuart_port_device *dev, work_func_t func,
+int ps3_vuart_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes);
-void ps3_vuart_cancel_async(struct ps3_vuart_port_device *dev);
-void ps3_vuart_clear_rx_bytes(struct ps3_vuart_port_device *dev,
+int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes);
+void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev);
+void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
unsigned int bytes);
#endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index f935c1f71a58..44420723a359 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -297,11 +297,10 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
struct rio_switch *rswitch;
int result, rdid;
- rdev = kmalloc(sizeof(struct rio_dev), GFP_KERNEL);
+ rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
if (!rdev)
goto out;
- memset(rdev, 0, sizeof(struct rio_dev));
rdev->net = net;
rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
&result);
@@ -801,9 +800,8 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port)
{
struct rio_net *net;
- net = kmalloc(sizeof(struct rio_net), GFP_KERNEL);
+ net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
if (net) {
- memset(net, 0, sizeof(struct rio_net));
INIT_LIST_HEAD(&net->node);
INIT_LIST_HEAD(&net->devices);
INIT_LIST_HEAD(&net->mports);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index eed91434417d..659e31164cf0 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -67,7 +67,8 @@ struct device_attribute rio_dev_attrs[] = {
};
static ssize_t
-rio_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
+rio_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
to_rio_dev(container_of(kobj, struct device, kobj));
@@ -137,7 +138,8 @@ rio_read_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
}
static ssize_t
-rio_write_config(struct kobject *kobj, char *buf, loff_t off, size_t count)
+rio_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
to_rio_dev(container_of(kobj, struct device, kobj));
@@ -197,7 +199,6 @@ static struct bin_attribute rio_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 0x200000,
.read = rio_read_config,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4e4c10a7fd3a..9d8d40d5c8f7 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -10,7 +10,6 @@ config RTC_LIB
config RTC_CLASS
tristate "RTC class"
- depends on EXPERIMENTAL
default n
select RTC_LIB
help
@@ -39,6 +38,9 @@ config RTC_HCTOSYS_DEVICE
clock, usually rtc0. Initialization is done when the system
starts up, and when it resumes from a low power state.
+ The driver for this RTC device must be loaded before late_initcall
+ functions run, so it must usually be statically linked.
+
This clock should be battery-backed, so that it reads the correct
time when the system boots from a power-off state. Otherwise, your
system will need an external clock source (like an NTP server).
@@ -119,7 +121,7 @@ config RTC_DRV_TEST
will be called rtc-test.
comment "I2C RTC drivers"
- depends on RTC_CLASS
+ depends on RTC_CLASS && I2C
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00"
@@ -160,11 +162,11 @@ config RTC_DRV_MAX6900
will be called rtc-max6900.
config RTC_DRV_RS5C372
- tristate "Ricoh RS5C372A/B"
+ tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A"
depends on RTC_CLASS && I2C
help
If you say yes here you get support for the
- Ricoh RS5C372A and RS5C372B RTC chips.
+ Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
This driver can also be built as a module. If so, the module
will be called rtc-rs5c372.
@@ -213,12 +215,40 @@ config RTC_DRV_PCF8583
This driver can also be built as a module. If so, the module
will be called rtc-pcf8583.
+config RTC_DRV_M41T80
+ tristate "ST M41T80 series RTC"
+ depends on RTC_CLASS && I2C
+ help
+ If you say Y here you will get support for the
+ ST M41T80 RTC chips series. Currently following chips are
+ supported: M41T80, M41T81, M41T82, M41T83, M41ST84, M41ST85
+ and M41ST87.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-m41t80.
+
+config RTC_DRV_M41T80_WDT
+ bool "ST M41T80 series RTC watchdog timer"
+ depends on RTC_DRV_M41T80
+ help
+ If you say Y here you will get support for the
+ watchdog timer in ST M41T80 RTC chips series.
+
+config RTC_DRV_TWL92330
+ boolean "TI TWL92330/Menelaus"
+ depends on RTC_CLASS && I2C && MENELAUS
+ help
+ If you say yes here you get support for the RTC on the
+ TWL92330 "Menelaus" power mangement chip, used with OMAP2
+ platforms. The support is integrated with the rest of
+ the Menelaus driver; it's not separate module.
+
comment "SPI RTC drivers"
- depends on RTC_CLASS
+ depends on RTC_CLASS && SPI_MASTER
config RTC_DRV_RS5C348
tristate "Ricoh RS5C348A/B"
- depends on RTC_CLASS && SPI
+ depends on RTC_CLASS && SPI_MASTER
help
If you say yes here you get support for the
Ricoh RS5C348A and RS5C348B RTC chips.
@@ -228,7 +258,7 @@ config RTC_DRV_RS5C348
config RTC_DRV_MAX6902
tristate "Maxim 6902"
- depends on RTC_CLASS && SPI
+ depends on RTC_CLASS && SPI_MASTER
help
If you say yes here you will get support for the
Maxim MAX6902 SPI RTC chip.
@@ -246,7 +276,7 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
depends on RTC_CLASS && (X86 || ALPHA || ARM26 || ARM \
- || M32R || ATARI || POWERPC || MIPS)
+ || M32R || ATARI || PPC || MIPS)
help
Say "yes" here to get direct support for the real time clock
found in every PC or ACPI-based system, and some other boards.
@@ -262,6 +292,12 @@ config RTC_DRV_CMOS
This driver can also be built as a module. If so, the module
will be called rtc-cmos.
+config RTC_DRV_DS1216
+ tristate "Dallas DS1216"
+ depends on RTC_CLASS && SNI_RM
+ help
+ If you say yes here you get support for the Dallas DS1216 RTC chips.
+
config RTC_DRV_DS1553
tristate "Dallas DS1553"
depends on RTC_CLASS
@@ -272,6 +308,16 @@ config RTC_DRV_DS1553
This driver can also be built as a module. If so, the module
will be called rtc-ds1553.
+config RTC_DRV_STK17TA8
+ tristate "Simtek STK17TA8"
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the
+ Simtek STK17TA8 timekeeping chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-stk17ta8.
+
config RTC_DRV_DS1742
tristate "Dallas DS1742/1743"
depends on RTC_CLASS
@@ -292,6 +338,16 @@ config RTC_DRV_M48T86
This driver can also be built as a module. If so, the module
will be called rtc-m48t86.
+config RTC_DRV_M48T59
+ tristate "ST M48T59"
+ depends on RTC_CLASS
+ help
+ If you say Y here you will get support for the
+ ST M48T59 RTC chip.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-m48t59".
+
config RTC_DRV_V3020
tristate "EM Microelectronic V3020"
depends on RTC_CLASS
@@ -351,7 +407,7 @@ config RTC_DRV_SA1100
config RTC_DRV_SH
tristate "SuperH On-Chip RTC"
- depends on RTC_CLASS && SUPERH
+ depends on RTC_CLASS && SUPERH && (CPU_SH3 || CPU_SH4)
help
Say Y here to enable support for the on-chip RTC found in
most SuperH processors.
@@ -379,6 +435,13 @@ config RTC_DRV_PL031
To compile this driver as a module, choose M here: the
module will be called rtc-pl031.
+config RTC_DRV_AT32AP700X
+ tristate "AT32AP700X series RTC"
+ depends on RTC_CLASS && PLATFORM_AT32AP
+ help
+ Driver for the internal RTC (Realtime Clock) on Atmel AVR32
+ AT32AP700x family processors.
+
config RTC_DRV_AT91RM9200
tristate "AT91RM9200"
depends on RTC_CLASS && ARCH_AT91RM9200
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index a1afbc236073..7ede9e725360 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
+obj-$(CONFIG_RTC_DRV_AT32AP700X) += rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
@@ -28,8 +29,10 @@ obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
+obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
+obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
@@ -41,3 +44,5 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
+obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
new file mode 100644
index 000000000000..2999214ca534
--- /dev/null
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -0,0 +1,317 @@
+/*
+ * An RTC driver for the AVR32 AT32AP700x processor series.
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+
+/*
+ * This is a bare-bones RTC. It runs during most system sleep states, but has
+ * no battery backup and gets reset during system restart. It must be
+ * initialized from an external clock (network, I2C, etc) before it can be of
+ * much use.
+ *
+ * The alarm functionality is limited by the hardware, not supporting
+ * periodic interrupts.
+ */
+
+#define RTC_CTRL 0x00
+#define RTC_CTRL_EN 0
+#define RTC_CTRL_PCLR 1
+#define RTC_CTRL_TOPEN 2
+#define RTC_CTRL_PSEL 8
+
+#define RTC_VAL 0x04
+
+#define RTC_TOP 0x08
+
+#define RTC_IER 0x10
+#define RTC_IER_TOPI 0
+
+#define RTC_IDR 0x14
+#define RTC_IDR_TOPI 0
+
+#define RTC_IMR 0x18
+#define RTC_IMR_TOPI 0
+
+#define RTC_ISR 0x1c
+#define RTC_ISR_TOPI 0
+
+#define RTC_ICR 0x20
+#define RTC_ICR_TOPI 0
+
+#define RTC_BIT(name) (1 << RTC_##name)
+#define RTC_BF(name, value) ((value) << RTC_##name)
+
+#define rtc_readl(dev, reg) \
+ __raw_readl((dev)->regs + RTC_##reg)
+#define rtc_writel(dev, reg, value) \
+ __raw_writel((value), (dev)->regs + RTC_##reg)
+
+struct rtc_at32ap700x {
+ struct rtc_device *rtc;
+ void __iomem *regs;
+ unsigned long alarm_time;
+ unsigned long irq;
+ /* Protect against concurrent register access. */
+ spinlock_t lock;
+};
+
+static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ unsigned long now;
+
+ now = rtc_readl(rtc, VAL);
+ rtc_time_to_tm(now, tm);
+
+ return 0;
+}
+
+static int at32_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ unsigned long now;
+ int ret;
+
+ ret = rtc_tm_to_time(tm, &now);
+ if (ret == 0)
+ rtc_writel(rtc, VAL, now);
+
+ return ret;
+}
+
+static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+
+ rtc_time_to_tm(rtc->alarm_time, &alrm->time);
+ alrm->pending = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0;
+
+ return 0;
+}
+
+static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ unsigned long rtc_unix_time;
+ unsigned long alarm_unix_time;
+ int ret;
+
+ rtc_unix_time = rtc_readl(rtc, VAL);
+
+ ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time);
+ if (ret)
+ return ret;
+
+ if (alarm_unix_time < rtc_unix_time)
+ return -EINVAL;
+
+ spin_lock_irq(&rtc->lock);
+ rtc->alarm_time = alarm_unix_time;
+ rtc_writel(rtc, TOP, rtc->alarm_time);
+ if (alrm->pending)
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ | RTC_BIT(CTRL_TOPEN));
+ else
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ & ~RTC_BIT(CTRL_TOPEN));
+ spin_unlock_irq(&rtc->lock);
+
+ return ret;
+}
+
+static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ spin_lock_irq(&rtc->lock);
+
+ switch (cmd) {
+ case RTC_AIE_ON:
+ if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
+ ret = -EINVAL;
+ break;
+ }
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ | RTC_BIT(CTRL_TOPEN));
+ rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+ rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
+ break;
+ case RTC_AIE_OFF:
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ & ~RTC_BIT(CTRL_TOPEN));
+ rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+ rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ spin_unlock_irq(&rtc->lock);
+
+ return ret;
+}
+
+static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
+{
+ struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id;
+ unsigned long isr = rtc_readl(rtc, ISR);
+ unsigned long events = 0;
+ int ret = IRQ_NONE;
+
+ spin_lock(&rtc->lock);
+
+ if (isr & RTC_BIT(ISR_TOPI)) {
+ rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
+ rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+ rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
+ & ~RTC_BIT(CTRL_TOPEN));
+ rtc_writel(rtc, VAL, rtc->alarm_time);
+ events = RTC_AF | RTC_IRQF;
+ rtc_update_irq(rtc->rtc, 1, events);
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&rtc->lock);
+
+ return ret;
+}
+
+static struct rtc_class_ops at32_rtc_ops = {
+ .ioctl = at32_rtc_ioctl,
+ .read_time = at32_rtc_readtime,
+ .set_time = at32_rtc_settime,
+ .read_alarm = at32_rtc_readalarm,
+ .set_alarm = at32_rtc_setalarm,
+};
+
+static int __init at32_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct rtc_at32ap700x *rtc;
+ int irq = -1;
+ int ret;
+
+ rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL);
+ if (!rtc) {
+ dev_dbg(&pdev->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_dbg(&pdev->dev, "no mmio resource defined\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "could not get irq\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
+ goto out;
+ }
+
+ rtc->irq = irq;
+ rtc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ if (!rtc->regs) {
+ ret = -ENOMEM;
+ dev_dbg(&pdev->dev, "could not map I/O memory\n");
+ goto out_free_irq;
+ }
+ spin_lock_init(&rtc->lock);
+
+ /*
+ * Maybe init RTC: count from zero at 1 Hz, disable wrap irq.
+ *
+ * Do not reset VAL register, as it can hold an old time
+ * from last JTAG reset.
+ */
+ if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) {
+ rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR));
+ rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
+ rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe)
+ | RTC_BIT(CTRL_EN));
+ }
+
+ rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &at32_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc)) {
+ dev_dbg(&pdev->dev, "could not register rtc device\n");
+ ret = PTR_ERR(rtc->rtc);
+ goto out_iounmap;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
+ (unsigned long)rtc->regs, rtc->irq);
+
+ return 0;
+
+out_iounmap:
+ iounmap(rtc->regs);
+out_free_irq:
+ free_irq(irq, rtc);
+out:
+ kfree(rtc);
+ return ret;
+}
+
+static int __exit at32_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
+
+ free_irq(rtc->irq, rtc);
+ iounmap(rtc->regs);
+ rtc_device_unregister(rtc->rtc);
+ kfree(rtc);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+MODULE_ALIAS("at32ap700x_rtc");
+
+static struct platform_driver at32_rtc_driver = {
+ .remove = __exit_p(at32_rtc_remove),
+ .driver = {
+ .name = "at32ap700x_rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at32_rtc_init(void)
+{
+ return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe);
+}
+module_init(at32_rtc_init);
+
+static void __exit at32_rtc_exit(void)
+{
+ platform_driver_unregister(&at32_rtc_driver);
+}
+module_exit(at32_rtc_exit);
+
+MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e24ea82dc35b..5d760bb6c2cd 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -235,7 +235,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
-static int cmos_set_freq(struct device *dev, int freq)
+static int cmos_irq_set_freq(struct device *dev, int freq)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
int f;
@@ -259,6 +259,34 @@ static int cmos_set_freq(struct device *dev, int freq)
return 0;
}
+static int cmos_irq_set_state(struct device *dev, int enabled)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned char rtc_control, rtc_intr;
+ unsigned long flags;
+
+ if (!is_valid_irq(cmos->irq))
+ return -ENXIO;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ rtc_control = CMOS_READ(RTC_CONTROL);
+
+ if (enabled)
+ rtc_control |= RTC_PIE;
+ else
+ rtc_control &= ~RTC_PIE;
+
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+
+ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
+ if (is_intr(rtc_intr))
+ rtc_update_irq(cmos->rtc, 1, rtc_intr);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return 0;
+}
+
#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
static int
@@ -360,7 +388,8 @@ static const struct rtc_class_ops cmos_rtc_ops = {
.read_alarm = cmos_read_alarm,
.set_alarm = cmos_set_alarm,
.proc = cmos_procfs,
- .irq_set_freq = cmos_set_freq,
+ .irq_set_freq = cmos_irq_set_freq,
+ .irq_set_state = cmos_irq_set_state,
};
/*----------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f4e5f0040ff7..304535942de2 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -341,6 +341,8 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
case RTC_IRQP_READ:
if (ops->irq_set_freq)
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
+ else
+ err = -ENOTTY;
break;
case RTC_IRQP_SET:
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
new file mode 100644
index 000000000000..83efb88f8f23
--- /dev/null
+++ b/drivers/rtc/rtc-ds1216.c
@@ -0,0 +1,226 @@
+/*
+ * Dallas DS1216 RTC driver
+ *
+ * Copyright (c) 2007 Thomas Bogendoerfer
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/bcd.h>
+
+#define DRV_VERSION "0.1"
+
+struct ds1216_regs {
+ u8 tsec;
+ u8 sec;
+ u8 min;
+ u8 hour;
+ u8 wday;
+ u8 mday;
+ u8 month;
+ u8 year;
+};
+
+#define DS1216_HOUR_1224 (1 << 7)
+#define DS1216_HOUR_AMPM (1 << 5)
+
+struct ds1216_priv {
+ struct rtc_device *rtc;
+ void __iomem *ioaddr;
+ size_t size;
+ unsigned long baseaddr;
+};
+
+static const u8 magic[] = {
+ 0xc5, 0x3a, 0xa3, 0x5c, 0xc5, 0x3a, 0xa3, 0x5c
+};
+
+/*
+ * Read the 64 bit we'd like to have - It a series
+ * of 64 bits showing up in the LSB of the base register.
+ *
+ */
+static void ds1216_read(u8 __iomem *ioaddr, u8 *buf)
+{
+ unsigned char c;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ c = 0;
+ for (j = 0; j < 8; j++)
+ c |= (readb(ioaddr) & 0x1) << j;
+ buf[i] = c;
+ }
+}
+
+static void ds1216_write(u8 __iomem *ioaddr, const u8 *buf)
+{
+ unsigned char c;
+ int i, j;
+
+ for (i = 0; i < 8; i++) {
+ c = buf[i];
+ for (j = 0; j < 8; j++) {
+ writeb(c, ioaddr);
+ c = c >> 1;
+ }
+ }
+}
+
+static void ds1216_switch_ds_to_clock(u8 __iomem *ioaddr)
+{
+ /* Reset magic pointer */
+ readb(ioaddr);
+ /* Write 64 bit magic to DS1216 */
+ ds1216_write(ioaddr, magic);
+}
+
+static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_regs regs;
+
+ ds1216_switch_ds_to_clock(priv->ioaddr);
+ ds1216_read(priv->ioaddr, (u8 *)&regs);
+
+ tm->tm_sec = BCD2BIN(regs.sec);
+ tm->tm_min = BCD2BIN(regs.min);
+ if (regs.hour & DS1216_HOUR_1224) {
+ /* AM/PM mode */
+ tm->tm_hour = BCD2BIN(regs.hour & 0x1f);
+ if (regs.hour & DS1216_HOUR_AMPM)
+ tm->tm_hour += 12;
+ } else
+ tm->tm_hour = BCD2BIN(regs.hour & 0x3f);
+ tm->tm_wday = (regs.wday & 7) - 1;
+ tm->tm_mday = BCD2BIN(regs.mday & 0x3f);
+ tm->tm_mon = BCD2BIN(regs.month & 0x1f);
+ tm->tm_year = BCD2BIN(regs.year);
+ if (tm->tm_year < 70)
+ tm->tm_year += 100;
+ return 0;
+}
+
+static int ds1216_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ds1216_priv *priv = platform_get_drvdata(pdev);
+ struct ds1216_regs regs;
+
+ ds1216_switch_ds_to_clock(priv->ioaddr);
+ ds1216_read(priv->ioaddr, (u8 *)&regs);
+
+ regs.tsec = 0; /* clear 0.1 and 0.01 seconds */
+ regs.sec = BIN2BCD(tm->tm_sec);
+ regs.min = BIN2BCD(tm->tm_min);
+ regs.hour &= DS1216_HOUR_1224;
+ if (regs.hour && tm->tm_hour > 12) {
+ regs.hour |= DS1216_HOUR_AMPM;
+ tm->tm_hour -= 12;
+ }
+ regs.hour |= BIN2BCD(tm->tm_hour);
+ regs.wday &= ~7;
+ regs.wday |= tm->tm_wday;
+ regs.mday = BIN2BCD(tm->tm_mday);
+ regs.month = BIN2BCD(tm->tm_mon);
+ regs.year = BIN2BCD(tm->tm_year % 100);
+
+ ds1216_switch_ds_to_clock(priv->ioaddr);
+ ds1216_write(priv->ioaddr, (u8 *)&regs);
+ return 0;
+}
+
+static const struct rtc_class_ops ds1216_rtc_ops = {
+ .read_time = ds1216_rtc_read_time,
+ .set_time = ds1216_rtc_set_time,
+};
+
+static int __devinit ds1216_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ struct ds1216_priv *priv;
+ int ret = 0;
+ u8 dummy[8];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->size = res->end - res->start + 1;
+ if (!request_mem_region(res->start, priv->size, pdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->baseaddr = res->start;
+ priv->ioaddr = ioremap(priv->baseaddr, priv->size);
+ if (!priv->ioaddr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rtc = rtc_device_register("ds1216", &pdev->dev,
+ &ds1216_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto out;
+ }
+ priv->rtc = rtc;
+ platform_set_drvdata(pdev, priv);
+
+ /* dummy read to get clock into a known state */
+ ds1216_read(priv->ioaddr, dummy);
+ return 0;
+
+out:
+ if (priv->rtc)
+ rtc_device_unregister(priv->rtc);
+ if (priv->ioaddr)
+ iounmap(priv->ioaddr);
+ if (priv->baseaddr)
+ release_mem_region(priv->baseaddr, priv->size);
+ kfree(priv);
+ return ret;
+}
+
+static int __devexit ds1216_rtc_remove(struct platform_device *pdev)
+{
+ struct ds1216_priv *priv = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(priv->rtc);
+ iounmap(priv->ioaddr);
+ release_mem_region(priv->baseaddr, priv->size);
+ kfree(priv);
+ return 0;
+}
+
+static struct platform_driver ds1216_rtc_platform_driver = {
+ .driver = {
+ .name = "rtc-ds1216",
+ .owner = THIS_MODULE,
+ },
+ .probe = ds1216_rtc_probe,
+ .remove = __devexit_p(ds1216_rtc_remove),
+};
+
+static int __init ds1216_rtc_init(void)
+{
+ return platform_driver_register(&ds1216_rtc_platform_driver);
+}
+
+static void __exit ds1216_rtc_exit(void)
+{
+ platform_driver_unregister(&ds1216_rtc_platform_driver);
+}
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
+MODULE_DESCRIPTION("DS1216 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(ds1216_rtc_init);
+module_exit(ds1216_rtc_exit);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 3f0f7b8fa813..5158a625671f 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -24,29 +24,29 @@
* setting the date and time), Linux can ignore the non-clock features.
* That's a natural job for a factory or repair bench.
*
- * If the I2C "force" mechanism is used, we assume the chip is a ds1337.
- * (Much better would be board-specific tables of I2C devices, along with
- * the platform_data drivers would use to sort such issues out.)
+ * This is currently a simple no-alarms driver. If your board has the
+ * alarm irq wired up on a ds1337 or ds1339, and you want to use that,
+ * then look at the rtc-rs5c372 driver for code to steal...
*/
enum ds_type {
- unknown = 0,
- ds_1307, /* or ds1338, ... */
- ds_1337, /* or ds1339, ... */
- ds_1340, /* or st m41t00, ... */
+ ds_1307,
+ ds_1337,
+ ds_1338,
+ ds_1339,
+ ds_1340,
+ m41t00,
// rs5c372 too? different address...
};
-static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
-
-I2C_CLIENT_INSMOD;
-
-
/* RTC registers don't differ much, except for the century flag */
#define DS1307_REG_SECS 0x00 /* 00-59 */
# define DS1307_BIT_CH 0x80
+# define DS1340_BIT_nEOSC 0x80
#define DS1307_REG_MIN 0x01 /* 00-59 */
#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
+# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
+# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
#define DS1307_REG_WDAY 0x03 /* 01-07 */
@@ -56,11 +56,12 @@ I2C_CLIENT_INSMOD;
#define DS1307_REG_YEAR 0x06 /* 00-99 */
/* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
- * start at 7, and they differ a lot. Only control and status matter for RTC;
- * be careful using them.
+ * start at 7, and they differ a LOT. Only control and status matter for
+ * basic RTC date and time functionality; be careful using them.
*/
-#define DS1307_REG_CONTROL 0x07
+#define DS1307_REG_CONTROL 0x07 /* or ds1338 */
# define DS1307_BIT_OUT 0x80
+# define DS1338_BIT_OSF 0x20
# define DS1307_BIT_SQWE 0x10
# define DS1307_BIT_RS1 0x02
# define DS1307_BIT_RS0 0x01
@@ -71,6 +72,13 @@ I2C_CLIENT_INSMOD;
# define DS1337_BIT_INTCN 0x04
# define DS1337_BIT_A2IE 0x02
# define DS1337_BIT_A1IE 0x01
+#define DS1340_REG_CONTROL 0x07
+# define DS1340_BIT_OUT 0x80
+# define DS1340_BIT_FT 0x40
+# define DS1340_BIT_CALIB_SIGN 0x20
+# define DS1340_M_CALIBRATION 0x1f
+#define DS1340_REG_FLAG 0x09
+# define DS1340_BIT_OSF 0x80
#define DS1337_REG_STATUS 0x0f
# define DS1337_BIT_OSF 0x80
# define DS1337_BIT_A2I 0x02
@@ -84,21 +92,63 @@ struct ds1307 {
u8 regs[8];
enum ds_type type;
struct i2c_msg msg[2];
- struct i2c_client client;
+ struct i2c_client *client;
+ struct i2c_client dev;
struct rtc_device *rtc;
};
+struct chip_desc {
+ char name[9];
+ unsigned nvram56:1;
+ unsigned alarm:1;
+ enum ds_type type;
+};
+
+static const struct chip_desc chips[] = { {
+ .name = "ds1307",
+ .type = ds_1307,
+ .nvram56 = 1,
+}, {
+ .name = "ds1337",
+ .type = ds_1337,
+ .alarm = 1,
+}, {
+ .name = "ds1338",
+ .type = ds_1338,
+ .nvram56 = 1,
+}, {
+ .name = "ds1339",
+ .type = ds_1339,
+ .alarm = 1,
+}, {
+ .name = "ds1340",
+ .type = ds_1340,
+}, {
+ .name = "m41t00",
+ .type = m41t00,
+}, };
+
+static inline const struct chip_desc *find_chip(const char *s)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(chips); i++)
+ if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0)
+ return &chips[i];
+ return NULL;
+}
static int ds1307_get_time(struct device *dev, struct rtc_time *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
int tmp;
- /* read the RTC registers all at once */
+ /* read the RTC date and time registers all at once */
ds1307->msg[1].flags = I2C_M_RD;
ds1307->msg[1].len = 7;
- tmp = i2c_transfer(ds1307->client.adapter, ds1307->msg, 2);
+ tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
+ ds1307->msg, 2);
if (tmp != 2) {
dev_err(dev, "%s error %d\n", "read", tmp);
return -EIO;
@@ -129,7 +179,8 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
- return 0;
+ /* initial clock setting can be undefined */
+ return rtc_valid_tm(t);
}
static int ds1307_set_time(struct device *dev, struct rtc_time *t)
@@ -157,11 +208,18 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
tmp = t->tm_year - 100;
buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
- if (ds1307->type == ds_1337)
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
- else if (ds1307->type == ds_1340)
+ break;
+ case ds_1340:
buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
| DS1340_BIT_CENTURY;
+ break;
+ default:
+ break;
+ }
ds1307->msg[1].flags = 0;
ds1307->msg[1].len = 8;
@@ -170,7 +228,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
"write", buf[0], buf[1], buf[2], buf[3],
buf[4], buf[5], buf[6]);
- result = i2c_transfer(ds1307->client.adapter, &ds1307->msg[1], 1);
+ result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
+ &ds1307->msg[1], 1);
if (result != 1) {
dev_err(dev, "%s error %d\n", "write", tmp);
return -EIO;
@@ -185,25 +244,29 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
static struct i2c_driver ds1307_driver;
-static int __devinit
-ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
+static int __devinit ds1307_probe(struct i2c_client *client)
{
struct ds1307 *ds1307;
int err = -ENODEV;
- struct i2c_client *client;
int tmp;
-
- if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) {
- err = -ENOMEM;
- goto exit;
+ const struct chip_desc *chip;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+
+ chip = find_chip(client->name);
+ if (!chip) {
+ dev_err(&client->dev, "unknown chip type '%s'\n",
+ client->name);
+ return -ENODEV;
}
- client = &ds1307->client;
- client->addr = address;
- client->adapter = adapter;
- client->driver = &ds1307_driver;
- client->flags = 0;
+ if (!i2c_check_functionality(adapter,
+ I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EIO;
+
+ if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
+ return -ENOMEM;
+ ds1307->client = client;
i2c_set_clientdata(client, ds1307);
ds1307->msg[0].addr = client->addr;
@@ -216,14 +279,16 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
ds1307->msg[1].len = sizeof(ds1307->regs);
ds1307->msg[1].buf = ds1307->regs;
- /* HACK: "force" implies "needs ds1337-style-oscillator setup" */
- if (kind >= 0) {
- ds1307->type = ds_1337;
+ ds1307->type = chip->type;
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
ds1307->reg_addr = DS1337_REG_CONTROL;
ds1307->msg[1].len = 2;
- tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
+ /* get registers that the "rtc" read below won't read... */
+ tmp = i2c_transfer(adapter, ds1307->msg, 2);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
@@ -233,19 +298,26 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
ds1307->reg_addr = 0;
ds1307->msg[1].len = sizeof(ds1307->regs);
- /* oscillator is off; need to turn it on */
- if ((ds1307->regs[0] & DS1337_BIT_nEOSC)
- || (ds1307->regs[1] & DS1337_BIT_OSF)) {
- printk(KERN_ERR "no ds1337 oscillator code\n");
- goto exit_free;
+ /* oscillator off? turn it on, so clock can tick. */
+ if (ds1307->regs[0] & DS1337_BIT_nEOSC)
+ i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
+ ds1307->regs[0] & ~DS1337_BIT_nEOSC);
+
+ /* oscillator fault? clear flag, and warn */
+ if (ds1307->regs[1] & DS1337_BIT_OSF) {
+ i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
+ ds1307->regs[1] & ~DS1337_BIT_OSF);
+ dev_warn(&client->dev, "SET TIME!\n");
}
- } else
- ds1307->type = ds_1307;
+ break;
+ default:
+ break;
+ }
read_rtc:
/* read RTC registers */
- tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
+ tmp = i2c_transfer(adapter, ds1307->msg, 2);
if (tmp != 2) {
pr_debug("read error %d\n", tmp);
err = -EIO;
@@ -257,72 +329,80 @@ read_rtc:
* still a few values that are clearly out-of-range.
*/
tmp = ds1307->regs[DS1307_REG_SECS];
- if (tmp & DS1307_BIT_CH) {
- if (ds1307->type && ds1307->type != ds_1307) {
- pr_debug("not a ds1307?\n");
- goto exit_free;
- }
- ds1307->type = ds_1307;
-
- /* this partial initialization should work for ds1307,
- * ds1338, ds1340, st m41t00, and more.
+ switch (ds1307->type) {
+ case ds_1340:
+ /* FIXME read register with DS1340_BIT_OSF, use that to
+ * trigger the "set time" warning (*after* restarting the
+ * oscillator!) instead of this weaker ds1307/m41t00 test.
*/
- dev_warn(&client->dev, "oscillator started; SET TIME!\n");
- i2c_smbus_write_byte_data(client, 0, 0);
- goto read_rtc;
+ case ds_1307:
+ case m41t00:
+ /* clock halted? turn it on, so clock can tick. */
+ if (tmp & DS1307_BIT_CH) {
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+ dev_warn(&client->dev, "SET TIME!\n");
+ goto read_rtc;
+ }
+ break;
+ case ds_1338:
+ /* clock halted? turn it on, so clock can tick. */
+ if (tmp & DS1307_BIT_CH)
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+
+ /* oscillator fault? clear flag, and warn */
+ if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
+ i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
+ ds1307->regs[DS1337_REG_CONTROL]
+ & ~DS1338_BIT_OSF);
+ dev_warn(&client->dev, "SET TIME!\n");
+ goto read_rtc;
+ }
+ break;
+ case ds_1337:
+ case ds_1339:
+ break;
}
+
+ tmp = ds1307->regs[DS1307_REG_SECS];
tmp = BCD2BIN(tmp & 0x7f);
if (tmp > 60)
- goto exit_free;
+ goto exit_bad;
tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
if (tmp > 60)
- goto exit_free;
+ goto exit_bad;
tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
if (tmp == 0 || tmp > 31)
- goto exit_free;
+ goto exit_bad;
tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
if (tmp == 0 || tmp > 12)
- goto exit_free;
+ goto exit_bad;
- /* force into in 24 hour mode (most chips) or
- * disable century bit (ds1340)
- */
tmp = ds1307->regs[DS1307_REG_HOUR];
- if (tmp & (1 << 6)) {
- if (tmp & (1 << 5))
- tmp = BCD2BIN(tmp & 0x1f) + 12;
- else
- tmp = BCD2BIN(tmp);
- i2c_smbus_write_byte_data(client,
- DS1307_REG_HOUR,
- BIN2BCD(tmp));
- }
-
- /* FIXME chips like 1337 can generate alarm irqs too; those are
- * worth exposing through the API (especially when the irq is
- * wakeup-capable).
- */
-
switch (ds1307->type) {
- case unknown:
- strlcpy(client->name, "unknown", I2C_NAME_SIZE);
- break;
- case ds_1307:
- strlcpy(client->name, "ds1307", I2C_NAME_SIZE);
- break;
- case ds_1337:
- strlcpy(client->name, "ds1337", I2C_NAME_SIZE);
- break;
case ds_1340:
- strlcpy(client->name, "ds1340", I2C_NAME_SIZE);
+ case m41t00:
+ /* NOTE: ignores century bits; fix before deploying
+ * systems that will run through year 2100.
+ */
break;
- }
+ default:
+ if (!(tmp & DS1307_BIT_12HR))
+ break;
- /* Tell the I2C layer a new client has arrived */
- if ((err = i2c_attach_client(client)))
- goto exit_free;
+ /* Be sure we're in 24 hour mode. Multi-master systems
+ * take note...
+ */
+ tmp = BCD2BIN(tmp & 0x1f);
+ if (tmp == 12)
+ tmp = 0;
+ if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
+ tmp += 12;
+ i2c_smbus_write_byte_data(client,
+ DS1307_REG_HOUR,
+ BIN2BCD(tmp));
+ }
ds1307->rtc = rtc_device_register(client->name, &client->dev,
&ds13xx_rtc_ops, THIS_MODULE);
@@ -330,46 +410,40 @@ read_rtc:
err = PTR_ERR(ds1307->rtc);
dev_err(&client->dev,
"unable to register the class device\n");
- goto exit_detach;
+ goto exit_free;
}
return 0;
-exit_detach:
- i2c_detach_client(client);
+exit_bad:
+ dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
+ "bogus register",
+ ds1307->regs[0], ds1307->regs[1],
+ ds1307->regs[2], ds1307->regs[3],
+ ds1307->regs[4], ds1307->regs[5],
+ ds1307->regs[6]);
+
exit_free:
kfree(ds1307);
-exit:
return err;
}
-static int __devinit
-ds1307_attach_adapter(struct i2c_adapter *adapter)
-{
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return 0;
- return i2c_probe(adapter, &addr_data, ds1307_detect);
-}
-
-static int __devexit ds1307_detach_client(struct i2c_client *client)
+static int __devexit ds1307_remove(struct i2c_client *client)
{
- int err;
struct ds1307 *ds1307 = i2c_get_clientdata(client);
rtc_device_unregister(ds1307->rtc);
- if ((err = i2c_detach_client(client)))
- return err;
kfree(ds1307);
return 0;
}
static struct i2c_driver ds1307_driver = {
.driver = {
- .name = "ds1307",
+ .name = "rtc-ds1307",
.owner = THIS_MODULE,
},
- .attach_adapter = ds1307_attach_adapter,
- .detach_client = __devexit_p(ds1307_detach_client),
+ .probe = ds1307_probe,
+ .remove = __devexit_p(ds1307_remove),
};
static int __init ds1307_init(void)
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index afa64c7fa2e2..46da5714932c 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -258,8 +258,9 @@ static const struct rtc_class_ops ds1553_rtc_ops = {
.ioctl = ds1553_rtc_ioctl,
};
-static ssize_t ds1553_nvram_read(struct kobject *kobj, char *buf,
- loff_t pos, size_t size)
+static ssize_t ds1553_nvram_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
@@ -272,8 +273,9 @@ static ssize_t ds1553_nvram_read(struct kobject *kobj, char *buf,
return count;
}
-static ssize_t ds1553_nvram_write(struct kobject *kobj, char *buf,
- loff_t pos, size_t size)
+static ssize_t ds1553_nvram_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
@@ -290,7 +292,6 @@ static struct bin_attribute ds1553_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUGO | S_IWUGO,
- .owner = THIS_MODULE,
},
.size = RTC_OFFSET,
.read = ds1553_nvram_read,
@@ -406,7 +407,7 @@ static __init int ds1553_init(void)
static __exit void ds1553_exit(void)
{
- return platform_driver_unregister(&ds1553_rtc_driver);
+ platform_driver_unregister(&ds1553_rtc_driver);
}
module_init(ds1553_init);
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index d68288b389dc..b2e5481ba3b6 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -127,8 +127,9 @@ static const struct rtc_class_ops ds1742_rtc_ops = {
.set_time = ds1742_rtc_set_time,
};
-static ssize_t ds1742_nvram_read(struct kobject *kobj, char *buf,
- loff_t pos, size_t size)
+static ssize_t ds1742_nvram_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
@@ -141,8 +142,9 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj, char *buf,
return count;
}
-static ssize_t ds1742_nvram_write(struct kobject *kobj, char *buf,
- loff_t pos, size_t size)
+static ssize_t ds1742_nvram_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
{
struct platform_device *pdev =
to_platform_device(container_of(kobj, struct device, kobj));
@@ -159,7 +161,6 @@ static struct bin_attribute ds1742_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUGO | S_IWUGO,
- .owner = THIS_MODULE,
},
.read = ds1742_nvram_read,
.write = ds1742_nvram_write,
@@ -262,7 +263,7 @@ static __init int ds1742_init(void)
static __exit void ds1742_exit(void)
{
- return platform_driver_unregister(&ds1742_rtc_driver);
+ platform_driver_unregister(&ds1742_rtc_driver);
}
module_init(ds1742_init);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
new file mode 100644
index 000000000000..80c4a8463065
--- /dev/null
+++ b/drivers/rtc/rtc-m41t80.c
@@ -0,0 +1,917 @@
+/*
+ * I2C client/driver for the ST M41T80 family of i2c rtc chips.
+ *
+ * Author: Alexander Bigga <ab@mycable.de>
+ *
+ * Based on m41t00.c by Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) mycable GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/reboot.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#endif
+
+#define M41T80_REG_SSEC 0
+#define M41T80_REG_SEC 1
+#define M41T80_REG_MIN 2
+#define M41T80_REG_HOUR 3
+#define M41T80_REG_WDAY 4
+#define M41T80_REG_DAY 5
+#define M41T80_REG_MON 6
+#define M41T80_REG_YEAR 7
+#define M41T80_REG_ALARM_MON 0xa
+#define M41T80_REG_ALARM_DAY 0xb
+#define M41T80_REG_ALARM_HOUR 0xc
+#define M41T80_REG_ALARM_MIN 0xd
+#define M41T80_REG_ALARM_SEC 0xe
+#define M41T80_REG_FLAGS 0xf
+#define M41T80_REG_SQW 0x13
+
+#define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1)
+#define M41T80_ALARM_REG_SIZE \
+ (M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
+
+#define M41T80_SEC_ST (1 << 7) /* ST: Stop Bit */
+#define M41T80_ALMON_AFE (1 << 7) /* AFE: AF Enable Bit */
+#define M41T80_ALMON_SQWE (1 << 6) /* SQWE: SQW Enable Bit */
+#define M41T80_ALHOUR_HT (1 << 6) /* HT: Halt Update Bit */
+#define M41T80_FLAGS_AF (1 << 6) /* AF: Alarm Flag Bit */
+#define M41T80_FLAGS_BATT_LOW (1 << 4) /* BL: Battery Low Bit */
+
+#define M41T80_FEATURE_HT (1 << 0)
+#define M41T80_FEATURE_BL (1 << 1)
+
+#define DRV_VERSION "0.05"
+
+struct m41t80_chip_info {
+ const char *name;
+ u8 features;
+};
+
+static const struct m41t80_chip_info m41t80_chip_info_tbl[] = {
+ {
+ .name = "m41t80",
+ .features = 0,
+ },
+ {
+ .name = "m41t81",
+ .features = M41T80_FEATURE_HT,
+ },
+ {
+ .name = "m41t81s",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41t82",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41t83",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41st84",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41st85",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+ {
+ .name = "m41st87",
+ .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
+ },
+};
+
+struct m41t80_data {
+ const struct m41t80_chip_info *chip;
+ struct rtc_device *rtc;
+};
+
+static int m41t80_get_datetime(struct i2c_client *client,
+ struct rtc_time *tm)
+{
+ u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
+ .buf = buf + M41T80_REG_SEC,
+ },
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+
+ tm->tm_sec = BCD2BIN(buf[M41T80_REG_SEC] & 0x7f);
+ tm->tm_min = BCD2BIN(buf[M41T80_REG_MIN] & 0x7f);
+ tm->tm_hour = BCD2BIN(buf[M41T80_REG_HOUR] & 0x3f);
+ tm->tm_mday = BCD2BIN(buf[M41T80_REG_DAY] & 0x3f);
+ tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
+ tm->tm_mon = BCD2BIN(buf[M41T80_REG_MON] & 0x1f) - 1;
+
+ /* assume 20YY not 19YY, and ignore the Century Bit */
+ tm->tm_year = BCD2BIN(buf[M41T80_REG_YEAR]) + 100;
+ return 0;
+}
+
+/* Sets the given date and time to the real time clock. */
+static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ u8 wbuf[1 + M41T80_DATETIME_REG_SIZE];
+ u8 *buf = &wbuf[1];
+ u8 dt_addr[1] = { M41T80_REG_SEC };
+ struct i2c_msg msgs_in[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
+ .buf = buf + M41T80_REG_SEC,
+ },
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1 + M41T80_DATETIME_REG_SIZE,
+ .buf = wbuf,
+ },
+ };
+
+ /* Read current reg values into buf[1..7] */
+ if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+
+ wbuf[0] = 0; /* offset into rtc's regs */
+ /* Merge time-data and register flags into buf[0..7] */
+ buf[M41T80_REG_SSEC] = 0;
+ buf[M41T80_REG_SEC] =
+ BIN2BCD(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
+ buf[M41T80_REG_MIN] =
+ BIN2BCD(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
+ buf[M41T80_REG_HOUR] =
+ BIN2BCD(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
+ buf[M41T80_REG_WDAY] =
+ (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
+ buf[M41T80_REG_DAY] =
+ BIN2BCD(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
+ buf[M41T80_REG_MON] =
+ BIN2BCD(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
+ /* assume 20YY not 19YY */
+ buf[M41T80_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
+
+ if (i2c_transfer(client->adapter, msgs, 1) != 1) {
+ dev_err(&client->dev, "write error\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct m41t80_data *clientdata = i2c_get_clientdata(client);
+ u8 reg;
+
+ if (clientdata->chip->features & M41T80_FEATURE_BL) {
+ reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ seq_printf(seq, "battery\t\t: %s\n",
+ (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
+ }
+ return 0;
+}
+#else
+#define m41t80_rtc_proc NULL
+#endif
+
+static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ return m41t80_get_datetime(to_i2c_client(dev), tm);
+}
+
+static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ return m41t80_set_datetime(to_i2c_client(dev), tm);
+}
+
+#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
+static int
+m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int rc;
+
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ case RTC_AIE_ON:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (rc < 0)
+ goto err;
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ rc &= ~M41T80_ALMON_AFE;
+ break;
+ case RTC_AIE_ON:
+ rc |= M41T80_ALMON_AFE;
+ break;
+ }
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
+ goto err;
+ return 0;
+err:
+ return -EIO;
+}
+#else
+#define m41t80_rtc_ioctl NULL
+#endif
+
+static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 wbuf[1 + M41T80_ALARM_REG_SIZE];
+ u8 *buf = &wbuf[1];
+ u8 *reg = buf - M41T80_REG_ALARM_MON;
+ u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
+ struct i2c_msg msgs_in[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_ALARM_REG_SIZE,
+ .buf = buf,
+ },
+ };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1 + M41T80_ALARM_REG_SIZE,
+ .buf = wbuf,
+ },
+ };
+
+ if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+ reg[M41T80_REG_ALARM_MON] &= ~(0x1f | M41T80_ALMON_AFE);
+ reg[M41T80_REG_ALARM_DAY] = 0;
+ reg[M41T80_REG_ALARM_HOUR] &= ~(0x3f | 0x80);
+ reg[M41T80_REG_ALARM_MIN] = 0;
+ reg[M41T80_REG_ALARM_SEC] = 0;
+
+ wbuf[0] = M41T80_REG_ALARM_MON; /* offset into rtc's regs */
+ reg[M41T80_REG_ALARM_SEC] |= t->time.tm_sec >= 0 ?
+ BIN2BCD(t->time.tm_sec) : 0x80;
+ reg[M41T80_REG_ALARM_MIN] |= t->time.tm_min >= 0 ?
+ BIN2BCD(t->time.tm_min) : 0x80;
+ reg[M41T80_REG_ALARM_HOUR] |= t->time.tm_hour >= 0 ?
+ BIN2BCD(t->time.tm_hour) : 0x80;
+ reg[M41T80_REG_ALARM_DAY] |= t->time.tm_mday >= 0 ?
+ BIN2BCD(t->time.tm_mday) : 0x80;
+ if (t->time.tm_mon >= 0)
+ reg[M41T80_REG_ALARM_MON] |= BIN2BCD(t->time.tm_mon + 1);
+ else
+ reg[M41T80_REG_ALARM_DAY] |= 0x40;
+
+ if (i2c_transfer(client->adapter, msgs, 1) != 1) {
+ dev_err(&client->dev, "write error\n");
+ return -EIO;
+ }
+
+ if (t->enabled) {
+ reg[M41T80_REG_ALARM_MON] |= M41T80_ALMON_AFE;
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ reg[M41T80_REG_ALARM_MON]) < 0) {
+ dev_err(&client->dev, "write error\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 buf[M41T80_ALARM_REG_SIZE + 1]; /* all alarm regs and flags */
+ u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
+ u8 *reg = buf - M41T80_REG_ALARM_MON;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = dt_addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = M41T80_ALARM_REG_SIZE + 1,
+ .buf = buf,
+ },
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) < 0) {
+ dev_err(&client->dev, "read error\n");
+ return -EIO;
+ }
+ t->time.tm_sec = -1;
+ t->time.tm_min = -1;
+ t->time.tm_hour = -1;
+ t->time.tm_mday = -1;
+ t->time.tm_mon = -1;
+ if (!(reg[M41T80_REG_ALARM_SEC] & 0x80))
+ t->time.tm_sec = BCD2BIN(reg[M41T80_REG_ALARM_SEC] & 0x7f);
+ if (!(reg[M41T80_REG_ALARM_MIN] & 0x80))
+ t->time.tm_min = BCD2BIN(reg[M41T80_REG_ALARM_MIN] & 0x7f);
+ if (!(reg[M41T80_REG_ALARM_HOUR] & 0x80))
+ t->time.tm_hour = BCD2BIN(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
+ if (!(reg[M41T80_REG_ALARM_DAY] & 0x80))
+ t->time.tm_mday = BCD2BIN(reg[M41T80_REG_ALARM_DAY] & 0x3f);
+ if (!(reg[M41T80_REG_ALARM_DAY] & 0x40))
+ t->time.tm_mon = BCD2BIN(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
+ t->time.tm_year = -1;
+ t->time.tm_wday = -1;
+ t->time.tm_yday = -1;
+ t->time.tm_isdst = -1;
+ t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
+ t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
+ return 0;
+}
+
+static struct rtc_class_ops m41t80_rtc_ops = {
+ .read_time = m41t80_rtc_read_time,
+ .set_time = m41t80_rtc_set_time,
+ .read_alarm = m41t80_rtc_read_alarm,
+ .set_alarm = m41t80_rtc_set_alarm,
+ .proc = m41t80_rtc_proc,
+ .ioctl = m41t80_rtc_ioctl,
+};
+
+#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+static ssize_t m41t80_sysfs_show_flags(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (val < 0)
+ return -EIO;
+ return sprintf(buf, "%#x\n", val);
+}
+static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL);
+
+static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
+ if (val < 0)
+ return -EIO;
+ val = (val >> 4) & 0xf;
+ switch (val) {
+ case 0:
+ break;
+ case 1:
+ val = 32768;
+ break;
+ default:
+ val = 32768 >> val;
+ }
+ return sprintf(buf, "%d\n", val);
+}
+static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int almon, sqw;
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (val) {
+ if (!is_power_of_2(val))
+ return -EINVAL;
+ val = ilog2(val);
+ if (val == 15)
+ val = 1;
+ else if (val < 14)
+ val = 15 - val;
+ else
+ return -EINVAL;
+ }
+ /* disable SQW, set SQW frequency & re-enable */
+ almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (almon < 0)
+ return -EIO;
+ sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
+ if (sqw < 0)
+ return -EIO;
+ sqw = (sqw & 0x0f) | (val << 4);
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ almon & ~M41T80_ALMON_SQWE) < 0 ||
+ i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0)
+ return -EIO;
+ if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ almon | M41T80_ALMON_SQWE) < 0)
+ return -EIO;
+ return count;
+}
+static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR,
+ m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq);
+
+static struct attribute *attrs[] = {
+ &dev_attr_flags.attr,
+ &dev_attr_sqwfreq.attr,
+ NULL,
+};
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static int m41t80_sysfs_register(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &attr_group);
+}
+#else
+static int m41t80_sysfs_register(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+/*
+ *****************************************************************************
+ *
+ * Watchdog Driver
+ *
+ *****************************************************************************
+ */
+static struct i2c_client *save_client;
+
+/* Default margin */
+#define WD_TIMO 60 /* 1..31 seconds */
+
+static int wdt_margin = WD_TIMO;
+module_param(wdt_margin, int, 0);
+MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)");
+
+static unsigned long wdt_is_open;
+static int boot_flag;
+
+/**
+ * wdt_ping:
+ *
+ * Reload counter one with the watchdog timeout. We don't bother reloading
+ * the cascade counter.
+ */
+static void wdt_ping(void)
+{
+ unsigned char i2c_data[2];
+ struct i2c_msg msgs1[1] = {
+ {
+ .addr = save_client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = i2c_data,
+ },
+ };
+ i2c_data[0] = 0x09; /* watchdog register */
+
+ if (wdt_margin > 31)
+ i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */
+ else
+ /*
+ * WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
+ */
+ i2c_data[1] = wdt_margin<<2 | 0x82;
+
+ i2c_transfer(save_client->adapter, msgs1, 1);
+}
+
+/**
+ * wdt_disable:
+ *
+ * disables watchdog.
+ */
+static void wdt_disable(void)
+{
+ unsigned char i2c_data[2], i2c_buf[0x10];
+ struct i2c_msg msgs0[2] = {
+ {
+ .addr = save_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = i2c_data,
+ },
+ {
+ .addr = save_client->addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = i2c_buf,
+ },
+ };
+ struct i2c_msg msgs1[1] = {
+ {
+ .addr = save_client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = i2c_data,
+ },
+ };
+
+ i2c_data[0] = 0x09;
+ i2c_transfer(save_client->adapter, msgs0, 2);
+
+ i2c_data[0] = 0x09;
+ i2c_data[1] = 0x00;
+ i2c_transfer(save_client->adapter, msgs1, 1);
+}
+
+/**
+ * wdt_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write (unused as data does not matter here
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we we don't define content meaning.
+ */
+static ssize_t wdt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /* Can't seek (pwrite) on this device
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+ */
+ if (count) {
+ wdt_ping();
+ return 1;
+ }
+ return 0;
+}
+
+static ssize_t wdt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return 0;
+}
+
+/**
+ * wdt_ioctl:
+ * @inode: inode of the device
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features. We only actually usefully support
+ * querying capabilities and current status.
+ */
+static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_margin, rv;
+ static struct watchdog_info ident = {
+ .options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .firmware_version = 1,
+ .identity = "M41T80 WTD"
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info __user *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(boot_flag, (int __user *)arg);
+ case WDIOC_KEEPALIVE:
+ wdt_ping();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, (int __user *)arg))
+ return -EFAULT;
+ /* Arbitrary, can't find the card's limits */
+ if (new_margin < 1 || new_margin > 124)
+ return -EINVAL;
+ wdt_margin = new_margin;
+ wdt_ping();
+ /* Fall */
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt_margin, (int __user *)arg);
+
+ case WDIOC_SETOPTIONS:
+ if (copy_from_user(&rv, (int __user *)arg, sizeof(int)))
+ return -EFAULT;
+
+ if (rv & WDIOS_DISABLECARD) {
+ printk(KERN_INFO
+ "rtc-m41t80: disable watchdog\n");
+ wdt_disable();
+ }
+
+ if (rv & WDIOS_ENABLECARD) {
+ printk(KERN_INFO
+ "rtc-m41t80: enable watchdog\n");
+ wdt_ping();
+ }
+
+ return -EINVAL;
+ }
+ return -ENOTTY;
+}
+
+/**
+ * wdt_open:
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ */
+static int wdt_open(struct inode *inode, struct file *file)
+{
+ if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
+ if (test_and_set_bit(0, &wdt_is_open))
+ return -EBUSY;
+ /*
+ * Activate
+ */
+ wdt_is_open = 1;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * wdt_close:
+ * @inode: inode to board
+ * @file: file handle to board
+ *
+ */
+static int wdt_release(struct inode *inode, struct file *file)
+{
+ if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
+ clear_bit(0, &wdt_is_open);
+ return 0;
+}
+
+/**
+ * notify_sys:
+ * @this: our notifier block
+ * @code: the event being reported
+ * @unused: unused
+ *
+ * Our notifier is called on system shutdowns. We want to turn the card
+ * off at reboot otherwise the machine will reboot again during memory
+ * test or worse yet during the following fsck. This would suck, in fact
+ * trust me - if it happens it does suck.
+ */
+static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ /* Disable Watchdog */
+ wdt_disable();
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations wdt_fops = {
+ .owner = THIS_MODULE,
+ .read = wdt_read,
+ .ioctl = wdt_ioctl,
+ .write = wdt_write,
+ .open = wdt_open,
+ .release = wdt_release,
+};
+
+static struct miscdevice wdt_dev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &wdt_fops,
+};
+
+/*
+ * The WDT card needs to learn about soft shutdowns in order to
+ * turn the timebomb registers off.
+ */
+static struct notifier_block wdt_notifier = {
+ .notifier_call = wdt_notify_sys,
+};
+#endif /* CONFIG_RTC_DRV_M41T80_WDT */
+
+/*
+ *****************************************************************************
+ *
+ * Driver Interface
+ *
+ *****************************************************************************
+ */
+static int m41t80_probe(struct i2c_client *client)
+{
+ int i, rc = 0;
+ struct rtc_device *rtc = NULL;
+ struct rtc_time tm;
+ const struct m41t80_chip_info *chip;
+ struct m41t80_data *clientdata = NULL;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
+ | I2C_FUNC_SMBUS_BYTE_DATA)) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ dev_info(&client->dev,
+ "chip found, driver version " DRV_VERSION "\n");
+
+ chip = NULL;
+ for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
+ if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
+ chip = &m41t80_chip_info_tbl[i];
+ break;
+ }
+ }
+ if (!chip) {
+ dev_err(&client->dev, "%s is not supported\n", client->name);
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
+ if (!clientdata) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ rtc = rtc_device_register(client->name, &client->dev,
+ &m41t80_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ rc = PTR_ERR(rtc);
+ rtc = NULL;
+ goto exit;
+ }
+
+ clientdata->rtc = rtc;
+ clientdata->chip = chip;
+ i2c_set_clientdata(client, clientdata);
+
+ /* Make sure HT (Halt Update) bit is cleared */
+ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
+ if (rc < 0)
+ goto ht_err;
+
+ if (rc & M41T80_ALHOUR_HT) {
+ if (chip->features & M41T80_FEATURE_HT) {
+ m41t80_get_datetime(client, &tm);
+ dev_info(&client->dev, "HT bit was set!\n");
+ dev_info(&client->dev,
+ "Power Down at "
+ "%04i-%02i-%02i %02i:%02i:%02i\n",
+ tm.tm_year + 1900,
+ tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
+ tm.tm_min, tm.tm_sec);
+ }
+ if (i2c_smbus_write_byte_data(client,
+ M41T80_REG_ALARM_HOUR,
+ rc & ~M41T80_ALHOUR_HT) < 0)
+ goto ht_err;
+ }
+
+ /* Make sure ST (stop) bit is cleared */
+ rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
+ if (rc < 0)
+ goto st_err;
+
+ if (rc & M41T80_SEC_ST) {
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
+ rc & ~M41T80_SEC_ST) < 0)
+ goto st_err;
+ }
+
+ rc = m41t80_sysfs_register(&client->dev);
+ if (rc)
+ goto exit;
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+ if (chip->features & M41T80_FEATURE_HT) {
+ rc = misc_register(&wdt_dev);
+ if (rc)
+ goto exit;
+ rc = register_reboot_notifier(&wdt_notifier);
+ if (rc) {
+ misc_deregister(&wdt_dev);
+ goto exit;
+ }
+ save_client = client;
+ }
+#endif
+ return 0;
+
+st_err:
+ rc = -EIO;
+ dev_err(&client->dev, "Can't clear ST bit\n");
+ goto exit;
+ht_err:
+ rc = -EIO;
+ dev_err(&client->dev, "Can't clear HT bit\n");
+ goto exit;
+
+exit:
+ if (rtc)
+ rtc_device_unregister(rtc);
+ kfree(clientdata);
+ return rc;
+}
+
+static int m41t80_remove(struct i2c_client *client)
+{
+ struct m41t80_data *clientdata = i2c_get_clientdata(client);
+ struct rtc_device *rtc = clientdata->rtc;
+
+#ifdef CONFIG_RTC_DRV_M41T80_WDT
+ if (clientdata->chip->features & M41T80_FEATURE_HT) {
+ misc_deregister(&wdt_dev);
+ unregister_reboot_notifier(&wdt_notifier);
+ }
+#endif
+ if (rtc)
+ rtc_device_unregister(rtc);
+ kfree(clientdata);
+
+ return 0;
+}
+
+static struct i2c_driver m41t80_driver = {
+ .driver = {
+ .name = "m41t80",
+ },
+ .probe = m41t80_probe,
+ .remove = m41t80_remove,
+};
+
+static int __init m41t80_rtc_init(void)
+{
+ return i2c_add_driver(&m41t80_driver);
+}
+
+static void __exit m41t80_rtc_exit(void)
+{
+ i2c_del_driver(&m41t80_driver);
+}
+
+MODULE_AUTHOR("Alexander Bigga <ab@mycable.de>");
+MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(m41t80_rtc_init);
+module_exit(m41t80_rtc_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
new file mode 100644
index 000000000000..33b752350ab5
--- /dev/null
+++ b/drivers/rtc/rtc-m48t59.c
@@ -0,0 +1,491 @@
+/*
+ * ST M48T59 RTC driver
+ *
+ * Copyright (c) 2007 Wind River Systems, Inc.
+ *
+ * Author: Mark Zhan <rongkai.zhan@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/rtc/m48t59.h>
+#include <linux/bcd.h>
+
+#ifndef NO_IRQ
+#define NO_IRQ (-1)
+#endif
+
+#define M48T59_READ(reg) pdata->read_byte(dev, reg)
+#define M48T59_WRITE(val, reg) pdata->write_byte(dev, reg, val)
+
+#define M48T59_SET_BITS(mask, reg) \
+ M48T59_WRITE((M48T59_READ(reg) | (mask)), (reg))
+#define M48T59_CLEAR_BITS(mask, reg) \
+ M48T59_WRITE((M48T59_READ(reg) & ~(mask)), (reg))
+
+struct m48t59_private {
+ void __iomem *ioaddr;
+ unsigned int size; /* iomem size */
+ unsigned int irq;
+ struct rtc_device *rtc;
+ spinlock_t lock; /* serialize the NVRAM and RTC access */
+};
+
+/*
+ * This is the generic access method when the chip is memory-mapped
+ */
+static void
+m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+ writeb(val, m48t59->ioaddr+ofs);
+}
+
+static u8
+m48t59_mem_readb(struct device *dev, u32 ofs)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+ return readb(m48t59->ioaddr+ofs);
+}
+
+/*
+ * NOTE: M48T59 only uses BCD mode
+ */
+static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the READ command */
+ M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+
+ tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
+ /* tm_mon is 0-11 */
+ tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+ tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_MDAY));
+
+ val = M48T59_READ(M48T59_WDAY);
+ if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB)) {
+ dev_dbg(dev, "Century bit is enabled\n");
+ tm->tm_year += 100; /* one century */
+ }
+
+ tm->tm_wday = BCD2BIN(val & 0x07);
+ tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_HOUR) & 0x3F);
+ tm->tm_min = BCD2BIN(M48T59_READ(M48T59_MIN) & 0x7F);
+ tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_SEC) & 0x7F);
+
+ /* Clear the READ bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ dev_dbg(dev, "RTC read time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return 0;
+}
+
+static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u8 val = 0;
+
+ dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the WRITE command */
+ M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+
+ M48T59_WRITE((BIN2BCD(tm->tm_sec) & 0x7F), M48T59_SEC);
+ M48T59_WRITE((BIN2BCD(tm->tm_min) & 0x7F), M48T59_MIN);
+ M48T59_WRITE((BIN2BCD(tm->tm_hour) & 0x3F), M48T59_HOUR);
+ M48T59_WRITE((BIN2BCD(tm->tm_mday) & 0x3F), M48T59_MDAY);
+ /* tm_mon is 0-11 */
+ M48T59_WRITE((BIN2BCD(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
+ M48T59_WRITE(BIN2BCD(tm->tm_year % 100), M48T59_YEAR);
+
+ if (tm->tm_year/100)
+ val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
+ val |= (BIN2BCD(tm->tm_wday) & 0x07);
+ M48T59_WRITE(val, M48T59_WDAY);
+
+ /* Clear the WRITE bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+ return 0;
+}
+
+/*
+ * Read alarm time and date in RTC
+ */
+static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned long flags;
+ u8 val;
+
+ /* If no irq, we don't support ALARM */
+ if (m48t59->irq == NO_IRQ)
+ return -EIO;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the READ command */
+ M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+
+ tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
+ /* tm_mon is 0-11 */
+ tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
+
+ val = M48T59_READ(M48T59_WDAY);
+ if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB))
+ tm->tm_year += 100; /* one century */
+
+ tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_ALARM_DATE));
+ tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_ALARM_HOUR));
+ tm->tm_min = BCD2BIN(M48T59_READ(M48T59_ALARM_MIN));
+ tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_ALARM_SEC));
+
+ /* Clear the READ bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ dev_dbg(dev, "RTC read alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return 0;
+}
+
+/*
+ * Set alarm time and date in RTC
+ */
+static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+ u8 mday, hour, min, sec;
+ unsigned long flags;
+
+ /* If no irq, we don't support ALARM */
+ if (m48t59->irq == NO_IRQ)
+ return -EIO;
+
+ /*
+ * 0xff means "always match"
+ */
+ mday = tm->tm_mday;
+ mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
+ if (mday == 0xff)
+ mday = M48T59_READ(M48T59_MDAY);
+
+ hour = tm->tm_hour;
+ hour = (hour < 24) ? BIN2BCD(hour) : 0x00;
+
+ min = tm->tm_min;
+ min = (min < 60) ? BIN2BCD(min) : 0x00;
+
+ sec = tm->tm_sec;
+ sec = (sec < 60) ? BIN2BCD(sec) : 0x00;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ /* Issue the WRITE command */
+ M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+
+ M48T59_WRITE(mday, M48T59_ALARM_DATE);
+ M48T59_WRITE(hour, M48T59_ALARM_HOUR);
+ M48T59_WRITE(min, M48T59_ALARM_MIN);
+ M48T59_WRITE(sec, M48T59_ALARM_SEC);
+
+ /* Clear the WRITE bit */
+ M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
+ tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+ return 0;
+}
+
+/*
+ * Handle commands from user-space
+ */
+static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ switch (cmd) {
+ case RTC_AIE_OFF: /* alarm interrupt off */
+ M48T59_WRITE(0x00, M48T59_INTR);
+ break;
+ case RTC_AIE_ON: /* alarm interrupt on */
+ M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ return ret;
+}
+
+static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&m48t59->lock, flags);
+ val = M48T59_READ(M48T59_FLAGS);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+
+ seq_printf(seq, "battery\t\t: %s\n",
+ (val & M48T59_FLAGS_BF) ? "low" : "normal");
+ return 0;
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
+{
+ struct device *dev = (struct device *)dev_id;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ u8 event;
+
+ spin_lock(&m48t59->lock);
+ event = M48T59_READ(M48T59_FLAGS);
+ spin_unlock(&m48t59->lock);
+
+ if (event & M48T59_FLAGS_AF) {
+ rtc_update_irq(m48t59->rtc, 1, (RTC_AF | RTC_IRQF));
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static const struct rtc_class_ops m48t59_rtc_ops = {
+ .ioctl = m48t59_rtc_ioctl,
+ .read_time = m48t59_rtc_read_time,
+ .set_time = m48t59_rtc_set_time,
+ .read_alarm = m48t59_rtc_readalarm,
+ .set_alarm = m48t59_rtc_setalarm,
+ .proc = m48t59_rtc_proc,
+};
+
+static ssize_t m48t59_nvram_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ ssize_t cnt = 0;
+ unsigned long flags;
+
+ for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
+ spin_lock_irqsave(&m48t59->lock, flags);
+ *buf++ = M48T59_READ(cnt);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+ }
+
+ return cnt;
+}
+
+static ssize_t m48t59_nvram_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+ ssize_t cnt = 0;
+ unsigned long flags;
+
+ for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
+ spin_lock_irqsave(&m48t59->lock, flags);
+ M48T59_WRITE(*buf++, cnt);
+ spin_unlock_irqrestore(&m48t59->lock, flags);
+ }
+
+ return cnt;
+}
+
+static struct bin_attribute m48t59_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUGO,
+ .owner = THIS_MODULE,
+ },
+ .read = m48t59_nvram_read,
+ .write = m48t59_nvram_write,
+};
+
+static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
+{
+ struct m48t59_plat_data *pdata = pdev->dev.platform_data;
+ struct m48t59_private *m48t59 = NULL;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ /* This chip could be memory-mapped or I/O-mapped */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -EINVAL;
+ }
+
+ if (res->flags & IORESOURCE_IO) {
+ /* If we are I/O-mapped, the platform should provide
+ * the operations accessing chip registers.
+ */
+ if (!pdata || !pdata->write_byte || !pdata->read_byte)
+ return -EINVAL;
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* we are memory-mapped */
+ if (!pdata) {
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ /* Ensure we only kmalloc platform data once */
+ pdev->dev.platform_data = pdata;
+ }
+
+ /* Try to use the generic memory read/write ops */
+ if (!pdata->write_byte)
+ pdata->write_byte = m48t59_mem_writeb;
+ if (!pdata->read_byte)
+ pdata->read_byte = m48t59_mem_readb;
+ }
+
+ m48t59 = kzalloc(sizeof(*m48t59), GFP_KERNEL);
+ if (!m48t59)
+ return -ENOMEM;
+
+ m48t59->size = res->end - res->start + 1;
+ m48t59->ioaddr = ioremap(res->start, m48t59->size);
+ if (!m48t59->ioaddr)
+ goto out;
+
+ /* Try to get irq number. We also can work in
+ * the mode without IRQ.
+ */
+ m48t59->irq = platform_get_irq(pdev, 0);
+ if (m48t59->irq < 0)
+ m48t59->irq = NO_IRQ;
+
+ if (m48t59->irq != NO_IRQ) {
+ ret = request_irq(m48t59->irq, m48t59_rtc_interrupt,
+ IRQF_SHARED, "rtc-m48t59", &pdev->dev);
+ if (ret)
+ goto out;
+ }
+
+ m48t59->rtc = rtc_device_register("m48t59", &pdev->dev,
+ &m48t59_rtc_ops, THIS_MODULE);
+ if (IS_ERR(m48t59->rtc)) {
+ ret = PTR_ERR(m48t59->rtc);
+ goto out;
+ }
+
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+ if (ret)
+ goto out;
+
+ spin_lock_init(&m48t59->lock);
+ platform_set_drvdata(pdev, m48t59);
+ return 0;
+
+out:
+ if (!IS_ERR(m48t59->rtc))
+ rtc_device_unregister(m48t59->rtc);
+ if (m48t59->irq != NO_IRQ)
+ free_irq(m48t59->irq, &pdev->dev);
+ if (m48t59->ioaddr)
+ iounmap(m48t59->ioaddr);
+ if (m48t59)
+ kfree(m48t59);
+ return ret;
+}
+
+static int __devexit m48t59_rtc_remove(struct platform_device *pdev)
+{
+ struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
+
+ sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
+ if (!IS_ERR(m48t59->rtc))
+ rtc_device_unregister(m48t59->rtc);
+ if (m48t59->ioaddr)
+ iounmap(m48t59->ioaddr);
+ if (m48t59->irq != NO_IRQ)
+ free_irq(m48t59->irq, &pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(m48t59);
+ return 0;
+}
+
+static struct platform_driver m48t59_rtc_platdrv = {
+ .driver = {
+ .name = "rtc-m48t59",
+ .owner = THIS_MODULE,
+ },
+ .probe = m48t59_rtc_probe,
+ .remove = __devexit_p(m48t59_rtc_remove),
+};
+
+static int __init m48t59_rtc_init(void)
+{
+ return platform_driver_register(&m48t59_rtc_platdrv);
+}
+
+static void __exit m48t59_rtc_exit(void)
+{
+ platform_driver_unregister(&m48t59_rtc_platdrv);
+}
+
+module_init(m48t59_rtc_init);
+module_exit(m48t59_rtc_exit);
+
+MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
+MODULE_DESCRIPTION("M48T59 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index eee4ee5bb75a..a1cd448639c9 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -31,17 +31,24 @@
#define MAX6900_REG_DW 5 /* day of week 1-7 */
#define MAX6900_REG_YR 6 /* year 00-99 */
#define MAX6900_REG_CT 7 /* control */
-#define MAX6900_REG_LEN 8
+ /* register 8 is undocumented */
+#define MAX6900_REG_CENTURY 9 /* century */
+#define MAX6900_REG_LEN 10
+
+#define MAX6900_BURST_LEN 8 /* can burst r/w first 8 regs */
#define MAX6900_REG_CT_WP (1 << 7) /* Write Protect */
+
/*
* register read/write commands
*/
#define MAX6900_REG_CONTROL_WRITE 0x8e
-#define MAX6900_REG_BURST_READ 0xbf
-#define MAX6900_REG_BURST_WRITE 0xbe
+#define MAX6900_REG_CENTURY_WRITE 0x92
+#define MAX6900_REG_CENTURY_READ 0x93
#define MAX6900_REG_RESERVED_READ 0x96
+#define MAX6900_REG_BURST_WRITE 0xbe
+#define MAX6900_REG_BURST_READ 0xbf
#define MAX6900_IDLE_TIME_AFTER_WRITE 3 /* specification says 2.5 mS */
@@ -58,19 +65,32 @@ static int max6900_probe(struct i2c_adapter *adapter, int addr, int kind);
static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
{
- u8 reg_addr[1] = { MAX6900_REG_BURST_READ };
- struct i2c_msg msgs[2] = {
+ u8 reg_burst_read[1] = { MAX6900_REG_BURST_READ };
+ u8 reg_century_read[1] = { MAX6900_REG_CENTURY_READ };
+ struct i2c_msg msgs[4] = {
{
.addr = client->addr,
.flags = 0, /* write */
- .len = sizeof(reg_addr),
- .buf = reg_addr
+ .len = sizeof(reg_burst_read),
+ .buf = reg_burst_read
},
{
.addr = client->addr,
.flags = I2C_M_RD,
- .len = MAX6900_REG_LEN,
+ .len = MAX6900_BURST_LEN,
.buf = buf
+ },
+ {
+ .addr = client->addr,
+ .flags = 0, /* write */
+ .len = sizeof(reg_century_read),
+ .buf = reg_century_read
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(buf[MAX6900_REG_CENTURY]),
+ .buf = &buf[MAX6900_REG_CENTURY]
}
};
int rc;
@@ -86,33 +106,58 @@ static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf)
{
- u8 i2c_buf[MAX6900_REG_LEN + 1] = { MAX6900_REG_BURST_WRITE };
- struct i2c_msg msgs[1] = {
+ u8 i2c_century_buf[1 + 1] = { MAX6900_REG_CENTURY_WRITE };
+ struct i2c_msg century_msgs[1] = {
{
.addr = client->addr,
.flags = 0, /* write */
- .len = MAX6900_REG_LEN + 1,
- .buf = i2c_buf
+ .len = sizeof(i2c_century_buf),
+ .buf = i2c_century_buf
+ }
+ };
+ u8 i2c_burst_buf[MAX6900_BURST_LEN + 1] = { MAX6900_REG_BURST_WRITE };
+ struct i2c_msg burst_msgs[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0, /* write */
+ .len = sizeof(i2c_burst_buf),
+ .buf = i2c_burst_buf
}
};
int rc;
- memcpy(&i2c_buf[1], buf, MAX6900_REG_LEN);
+ /*
+ * We have to make separate calls to i2c_transfer because of
+ * the need to delay after each write to the chip. Also,
+ * we write the century byte first, since we set the write-protect
+ * bit as part of the burst write.
+ */
+ i2c_century_buf[1] = buf[MAX6900_REG_CENTURY];
+ rc = i2c_transfer(client->adapter, century_msgs,
+ ARRAY_SIZE(century_msgs));
+ if (rc != ARRAY_SIZE(century_msgs))
+ goto write_failed;
+ msleep(MAX6900_IDLE_TIME_AFTER_WRITE);
- rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (rc != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: register write failed\n",
- __FUNCTION__);
- return -EIO;
- }
+ memcpy(&i2c_burst_buf[1], buf, MAX6900_BURST_LEN);
+
+ rc = i2c_transfer(client->adapter, burst_msgs, ARRAY_SIZE(burst_msgs));
+ if (rc != ARRAY_SIZE(burst_msgs))
+ goto write_failed;
msleep(MAX6900_IDLE_TIME_AFTER_WRITE);
+
return 0;
+
+write_failed:
+ dev_err(&client->dev, "%s: register write failed\n",
+ __FUNCTION__);
+ return -EIO;
}
static int max6900_i2c_validate_client(struct i2c_client *client)
{
u8 regs[MAX6900_REG_LEN];
- u8 zero_mask[MAX6900_REG_LEN] = {
+ u8 zero_mask[] = {
0x80, /* seconds */
0x80, /* minutes */
0x40, /* hours */
@@ -134,7 +179,7 @@ static int max6900_i2c_validate_client(struct i2c_client *client)
if (rc < 0)
return rc;
- for (i = 0; i < MAX6900_REG_LEN; ++i) {
+ for (i = 0; i < ARRAY_SIZE(zero_mask); ++i) {
if (regs[i] & zero_mask[i])
return -ENODEV;
}
@@ -156,7 +201,8 @@ static int max6900_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
tm->tm_hour = BCD2BIN(regs[MAX6900_REG_HR] & 0x3f);
tm->tm_mday = BCD2BIN(regs[MAX6900_REG_DT]);
tm->tm_mon = BCD2BIN(regs[MAX6900_REG_MO]) - 1;
- tm->tm_year = BCD2BIN(regs[MAX6900_REG_YR]) + 100;
+ tm->tm_year = BCD2BIN(regs[MAX6900_REG_YR]) +
+ BCD2BIN(regs[MAX6900_REG_CENTURY]) * 100 - 1900;
tm->tm_wday = BCD2BIN(regs[MAX6900_REG_DW]);
return 0;
@@ -189,9 +235,11 @@ static int max6900_i2c_set_time(struct i2c_client *client,
regs[MAX6900_REG_HR] = BIN2BCD(tm->tm_hour);
regs[MAX6900_REG_DT] = BIN2BCD(tm->tm_mday);
regs[MAX6900_REG_MO] = BIN2BCD(tm->tm_mon + 1);
- regs[MAX6900_REG_YR] = BIN2BCD(tm->tm_year - 100);
regs[MAX6900_REG_DW] = BIN2BCD(tm->tm_wday);
- regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP; /* set write protect */
+ regs[MAX6900_REG_YR] = BIN2BCD(tm->tm_year % 100);
+ regs[MAX6900_REG_CENTURY] = BIN2BCD((tm->tm_year + 1900) / 100);
+ /* set write protect */
+ regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP;
rc = max6900_i2c_write_regs(client, regs);
if (rc < 0)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 09bbe575647b..6b67b5097927 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,13 +13,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
-#define DRV_VERSION "0.4"
-
-/* Addresses to scan */
-static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "0.5"
/*
@@ -88,9 +82,6 @@ struct rs5c372 {
unsigned has_irq:1;
char buf[17];
char *regs;
-
- /* on conversion to a "new style" i2c driver, this vanishes */
- struct i2c_client dev;
};
static int rs5c_get_regs(struct rs5c372 *rs5c)
@@ -483,25 +474,35 @@ static int rs5c_sysfs_register(struct device *dev)
return err;
}
+static void rs5c_sysfs_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_trim);
+ device_remove_file(dev, &dev_attr_osc);
+}
+
#else
static int rs5c_sysfs_register(struct device *dev)
{
return 0;
}
+
+static void rs5c_sysfs_unregister(struct device *dev)
+{
+ /* nothing */
+}
#endif /* SYSFS */
static struct i2c_driver rs5c372_driver;
-static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
+static int rs5c372_probe(struct i2c_client *client)
{
int err = 0;
- struct i2c_client *client;
struct rs5c372 *rs5c372;
struct rtc_time tm;
- dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+ dev_dbg(&client->dev, "%s\n", __FUNCTION__);
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
goto exit;
}
@@ -514,35 +515,22 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
/* we read registers 0x0f then 0x00-0x0f; skip the first one */
rs5c372->regs=&rs5c372->buf[1];
- /* On conversion to a "new style" i2c driver, we'll be handed
- * the i2c_client (we won't create it)
- */
- client = &rs5c372->dev;
rs5c372->client = client;
-
- /* I2C client */
- client->addr = address;
- client->driver = &rs5c372_driver;
- client->adapter = adapter;
-
- strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
-
i2c_set_clientdata(client, rs5c372);
- /* Inform the i2c layer */
- if ((err = i2c_attach_client(client)))
- goto exit_kfree;
-
err = rs5c_get_regs(rs5c372);
if (err < 0)
- goto exit_detach;
+ goto exit_kfree;
- /* For "new style" drivers, irq is in i2c_client and chip type
- * info comes from i2c_client.dev.platform_data. Meanwhile:
- *
- * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE
- */
- if (rs5c372->type == rtc_undef) {
+ if (strcmp(client->name, "rs5c372a") == 0)
+ rs5c372->type = rtc_rs5c372a;
+ else if (strcmp(client->name, "rs5c372b") == 0)
+ rs5c372->type = rtc_rs5c372b;
+ else if (strcmp(client->name, "rv5c386") == 0)
+ rs5c372->type = rtc_rv5c386;
+ else if (strcmp(client->name, "rv5c387a") == 0)
+ rs5c372->type = rtc_rv5c387a;
+ else {
rs5c372->type = rtc_rs5c372b;
dev_warn(&client->dev, "assuming rs5c372b\n");
}
@@ -567,7 +555,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
break;
default:
dev_err(&client->dev, "unknown RTC type\n");
- goto exit_detach;
+ goto exit_kfree;
}
/* if the oscillator lost power and no other software (like
@@ -601,7 +589,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
if ((i2c_master_send(client, buf, 3)) != 3) {
dev_err(&client->dev, "setup error\n");
- goto exit_detach;
+ goto exit_kfree;
}
rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
@@ -621,14 +609,14 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
rs5c372->time24 ? "24hr" : "am/pm"
);
- /* FIXME when client->irq exists, use it to register alarm irq */
+ /* REVISIT use client->irq to register alarm irq ... */
rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
&client->dev, &rs5c372_rtc_ops, THIS_MODULE);
if (IS_ERR(rs5c372->rtc)) {
err = PTR_ERR(rs5c372->rtc);
- goto exit_detach;
+ goto exit_kfree;
}
err = rs5c_sysfs_register(&client->dev);
@@ -640,9 +628,6 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
exit_devreg:
rtc_device_unregister(rs5c372->rtc);
-exit_detach:
- i2c_detach_client(client);
-
exit_kfree:
kfree(rs5c372);
@@ -650,24 +635,12 @@ exit:
return err;
}
-static int rs5c372_attach(struct i2c_adapter *adapter)
+static int rs5c372_remove(struct i2c_client *client)
{
- return i2c_probe(adapter, &addr_data, rs5c372_probe);
-}
-
-static int rs5c372_detach(struct i2c_client *client)
-{
- int err;
struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
- if (rs5c372->rtc)
- rtc_device_unregister(rs5c372->rtc);
-
- /* REVISIT properly destroy the sysfs files ... */
-
- if ((err = i2c_detach_client(client)))
- return err;
-
+ rtc_device_unregister(rs5c372->rtc);
+ rs5c_sysfs_unregister(&client->dev);
kfree(rs5c372);
return 0;
}
@@ -676,8 +649,8 @@ static struct i2c_driver rs5c372_driver = {
.driver = {
.name = "rtc-rs5c372",
},
- .attach_adapter = &rs5c372_attach,
- .detach_client = &rs5c372_detach,
+ .probe = rs5c372_probe,
+ .remove = rs5c372_remove,
};
static __init int rs5c372_init(void)
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
new file mode 100644
index 000000000000..f10d3facecbe
--- /dev/null
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -0,0 +1,420 @@
+/*
+ * A RTC driver for the Simtek STK17TA8
+ *
+ * By Thomas Hommel <thomas.hommel@gefanuc.com>
+ *
+ * Based on the DS1553 driver from
+ * Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_VERSION "0.1"
+
+#define RTC_REG_SIZE 0x20000
+#define RTC_OFFSET 0x1fff0
+
+#define RTC_FLAGS (RTC_OFFSET + 0)
+#define RTC_CENTURY (RTC_OFFSET + 1)
+#define RTC_SECONDS_ALARM (RTC_OFFSET + 2)
+#define RTC_MINUTES_ALARM (RTC_OFFSET + 3)
+#define RTC_HOURS_ALARM (RTC_OFFSET + 4)
+#define RTC_DATE_ALARM (RTC_OFFSET + 5)
+#define RTC_INTERRUPTS (RTC_OFFSET + 6)
+#define RTC_WATCHDOG (RTC_OFFSET + 7)
+#define RTC_CALIBRATION (RTC_OFFSET + 8)
+#define RTC_SECONDS (RTC_OFFSET + 9)
+#define RTC_MINUTES (RTC_OFFSET + 10)
+#define RTC_HOURS (RTC_OFFSET + 11)
+#define RTC_DAY (RTC_OFFSET + 12)
+#define RTC_DATE (RTC_OFFSET + 13)
+#define RTC_MONTH (RTC_OFFSET + 14)
+#define RTC_YEAR (RTC_OFFSET + 15)
+
+#define RTC_SECONDS_MASK 0x7f
+#define RTC_DAY_MASK 0x07
+#define RTC_CAL_MASK 0x3f
+
+/* Bits in the Calibration register */
+#define RTC_STOP 0x80
+
+/* Bits in the Flags register */
+#define RTC_FLAGS_AF 0x40
+#define RTC_FLAGS_PF 0x20
+#define RTC_WRITE 0x02
+#define RTC_READ 0x01
+
+/* Bits in the Interrupts register */
+#define RTC_INTS_AIE 0x40
+
+struct rtc_plat_data {
+ struct rtc_device *rtc;
+ void __iomem *ioaddr;
+ unsigned long baseaddr;
+ unsigned long last_jiffies;
+ int irq;
+ unsigned int irqen;
+ int alrm_sec;
+ int alrm_min;
+ int alrm_hour;
+ int alrm_mday;
+};
+
+static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u8 flags;
+
+ flags = readb(pdata->ioaddr + RTC_FLAGS);
+ writeb(flags | RTC_WRITE, pdata->ioaddr + RTC_FLAGS);
+
+ writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
+ writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
+ writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
+ writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
+ writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
+ writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
+ writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
+ writeb(BIN2BCD((tm->tm_year + 1900) / 100), ioaddr + RTC_CENTURY);
+
+ writeb(flags & ~RTC_WRITE, pdata->ioaddr + RTC_FLAGS);
+ return 0;
+}
+
+static int stk17ta8_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ unsigned int year, month, day, hour, minute, second, week;
+ unsigned int century;
+ u8 flags;
+
+ /* give enough time to update RTC in case of continuous read */
+ if (pdata->last_jiffies == jiffies)
+ msleep(1);
+ pdata->last_jiffies = jiffies;
+
+ flags = readb(pdata->ioaddr + RTC_FLAGS);
+ writeb(flags | RTC_READ, ioaddr + RTC_FLAGS);
+ second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK;
+ minute = readb(ioaddr + RTC_MINUTES);
+ hour = readb(ioaddr + RTC_HOURS);
+ day = readb(ioaddr + RTC_DATE);
+ week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK;
+ month = readb(ioaddr + RTC_MONTH);
+ year = readb(ioaddr + RTC_YEAR);
+ century = readb(ioaddr + RTC_CENTURY);
+ writeb(flags & ~RTC_READ, ioaddr + RTC_FLAGS);
+ tm->tm_sec = BCD2BIN(second);
+ tm->tm_min = BCD2BIN(minute);
+ tm->tm_hour = BCD2BIN(hour);
+ tm->tm_mday = BCD2BIN(day);
+ tm->tm_wday = BCD2BIN(week);
+ tm->tm_mon = BCD2BIN(month) - 1;
+ /* year is 1900 + tm->tm_year */
+ tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
+
+ if (rtc_valid_tm(tm) < 0) {
+ dev_err(dev, "retrieved date/time is not valid.\n");
+ rtc_time_to_tm(0, tm);
+ }
+ return 0;
+}
+
+static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
+{
+ void __iomem *ioaddr = pdata->ioaddr;
+ unsigned long irqflags;
+ u8 flags;
+
+ spin_lock_irqsave(&pdata->rtc->irq_lock, irqflags);
+
+ flags = readb(ioaddr + RTC_FLAGS);
+ writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
+
+ writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_mday),
+ ioaddr + RTC_DATE_ALARM);
+ writeb(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_hour),
+ ioaddr + RTC_HOURS_ALARM);
+ writeb(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_min),
+ ioaddr + RTC_MINUTES_ALARM);
+ writeb(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_sec),
+ ioaddr + RTC_SECONDS_ALARM);
+ writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
+ readb(ioaddr + RTC_FLAGS); /* clear interrupts */
+ writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
+ spin_unlock_irqrestore(&pdata->rtc->irq_lock, irqflags);
+}
+
+static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0)
+ return -EINVAL;
+ pdata->alrm_mday = alrm->time.tm_mday;
+ pdata->alrm_hour = alrm->time.tm_hour;
+ pdata->alrm_min = alrm->time.tm_min;
+ pdata->alrm_sec = alrm->time.tm_sec;
+ if (alrm->enabled)
+ pdata->irqen |= RTC_AF;
+ stk17ta8_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int stk17ta8_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0)
+ return -EINVAL;
+ alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
+ alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
+ alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
+ alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
+ alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
+ return 0;
+}
+
+static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ unsigned long events = RTC_IRQF;
+
+ /* read and clear interrupt */
+ if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
+ return IRQ_NONE;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ rtc_update_irq(pdata->rtc, 1, events);
+ return IRQ_HANDLED;
+}
+
+static void stk17ta8_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq >= 0) {
+ pdata->irqen = 0;
+ stk17ta8_rtc_update_alarm(pdata);
+ }
+}
+
+static int stk17ta8_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0)
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ pdata->irqen &= ~RTC_AF;
+ stk17ta8_rtc_update_alarm(pdata);
+ break;
+ case RTC_AIE_ON:
+ pdata->irqen |= RTC_AF;
+ stk17ta8_rtc_update_alarm(pdata);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static const struct rtc_class_ops stk17ta8_rtc_ops = {
+ .read_time = stk17ta8_rtc_read_time,
+ .set_time = stk17ta8_rtc_set_time,
+ .read_alarm = stk17ta8_rtc_read_alarm,
+ .set_alarm = stk17ta8_rtc_set_alarm,
+ .release = stk17ta8_rtc_release,
+ .ioctl = stk17ta8_rtc_ioctl,
+};
+
+static ssize_t stk17ta8_nvram_read(struct kobject *kobj, char *buf,
+ loff_t pos, size_t size)
+{
+ struct platform_device *pdev =
+ to_platform_device(container_of(kobj, struct device, kobj));
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ ssize_t count;
+
+ for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+ *buf++ = readb(ioaddr + pos++);
+ return count;
+}
+
+static ssize_t stk17ta8_nvram_write(struct kobject *kobj, char *buf,
+ loff_t pos, size_t size)
+{
+ struct platform_device *pdev =
+ to_platform_device(container_of(kobj, struct device, kobj));
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ ssize_t count;
+
+ for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+ writeb(*buf++, ioaddr + pos++);
+ return count;
+}
+
+static struct bin_attribute stk17ta8_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUGO,
+ .owner = THIS_MODULE,
+ },
+ .size = RTC_OFFSET,
+ .read = stk17ta8_nvram_read,
+ .write = stk17ta8_nvram_write,
+};
+
+static int __init stk17ta8_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ unsigned int cal;
+ unsigned int flags;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr = NULL;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->irq = -1;
+ if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ pdata->baseaddr = res->start;
+ ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
+ if (!ioaddr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdata->ioaddr = ioaddr;
+ pdata->irq = platform_get_irq(pdev, 0);
+
+ /* turn RTC on if it was not on */
+ cal = readb(ioaddr + RTC_CALIBRATION);
+ if (cal & RTC_STOP) {
+ cal &= RTC_CAL_MASK;
+ flags = readb(ioaddr + RTC_FLAGS);
+ writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
+ writeb(cal, ioaddr + RTC_CALIBRATION);
+ writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
+ }
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF)
+ dev_warn(&pdev->dev, "voltage-low detected.\n");
+
+ if (pdata->irq >= 0) {
+ writeb(0, ioaddr + RTC_INTERRUPTS);
+ if (request_irq(pdata->irq, stk17ta8_rtc_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ pdev->name, pdev) < 0) {
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+ }
+
+ rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &stk17ta8_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto out;
+ }
+ pdata->rtc = rtc;
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
+ if (ret)
+ goto out;
+ return 0;
+ out:
+ if (pdata->rtc)
+ rtc_device_unregister(pdata->rtc);
+ if (pdata->irq >= 0)
+ free_irq(pdata->irq, pdev);
+ if (ioaddr)
+ iounmap(ioaddr);
+ if (pdata->baseaddr)
+ release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+ kfree(pdata);
+ return ret;
+}
+
+static int __devexit stk17ta8_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
+ rtc_device_unregister(pdata->rtc);
+ if (pdata->irq >= 0) {
+ writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
+ free_irq(pdata->irq, pdev);
+ }
+ iounmap(pdata->ioaddr);
+ release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+ kfree(pdata);
+ return 0;
+}
+
+static struct platform_driver stk17ta8_rtc_driver = {
+ .probe = stk17ta8_rtc_probe,
+ .remove = __devexit_p(stk17ta8_rtc_remove),
+ .driver = {
+ .name = "stk17ta8",
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int stk17ta8_init(void)
+{
+ return platform_driver_register(&stk17ta8_rtc_driver);
+}
+
+static __exit void stk17ta8_exit(void)
+{
+ return platform_driver_unregister(&stk17ta8_rtc_driver);
+}
+
+module_init(stk17ta8_init);
+module_exit(stk17ta8_exit);
+
+MODULE_AUTHOR("Thomas Hommel <thomas.hommel@gefanuc.com>");
+MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index af7596ef29e2..ce2f78de7a80 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -17,10 +17,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -30,25 +31,11 @@
#include <asm/div64.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/vr41xx/irq.h>
MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
MODULE_LICENSE("GPL");
-#define RTC1_TYPE1_START 0x0b0000c0UL
-#define RTC1_TYPE1_END 0x0b0000dfUL
-#define RTC2_TYPE1_START 0x0b0001c0UL
-#define RTC2_TYPE1_END 0x0b0001dfUL
-
-#define RTC1_TYPE2_START 0x0f000100UL
-#define RTC1_TYPE2_END 0x0f00011fUL
-#define RTC2_TYPE2_START 0x0f000120UL
-#define RTC2_TYPE2_END 0x0f00013fUL
-
-#define RTC1_SIZE 0x20
-#define RTC2_SIZE 0x20
-
/* RTC 1 registers */
#define ETIMELREG 0x00
#define ETIMEMREG 0x02
@@ -98,13 +85,8 @@ static char rtc_name[] = "RTC";
static unsigned long periodic_frequency;
static unsigned long periodic_count;
static unsigned int alarm_enabled;
-
-struct resource rtc_resource[2] = {
- { .name = rtc_name,
- .flags = IORESOURCE_MEM, },
- { .name = rtc_name,
- .flags = IORESOURCE_MEM, },
-};
+static int aie_irq = -1;
+static int pie_irq = -1;
static inline unsigned long read_elapsed_second(void)
{
@@ -150,8 +132,8 @@ static void vr41xx_rtc_release(struct device *dev)
spin_unlock_irq(&rtc_lock);
- disable_irq(ELAPSEDTIME_IRQ);
- disable_irq(RTCLONG1_IRQ);
+ disable_irq(aie_irq);
+ disable_irq(pie_irq);
}
static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
@@ -209,14 +191,14 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
spin_lock_irq(&rtc_lock);
if (alarm_enabled)
- disable_irq(ELAPSEDTIME_IRQ);
+ disable_irq(aie_irq);
rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15));
rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1));
rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17));
if (wkalrm->enabled)
- enable_irq(ELAPSEDTIME_IRQ);
+ enable_irq(aie_irq);
alarm_enabled = wkalrm->enabled;
@@ -234,7 +216,7 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
spin_lock_irq(&rtc_lock);
if (!alarm_enabled) {
- enable_irq(ELAPSEDTIME_IRQ);
+ enable_irq(aie_irq);
alarm_enabled = 1;
}
@@ -244,17 +226,17 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
spin_lock_irq(&rtc_lock);
if (alarm_enabled) {
- disable_irq(ELAPSEDTIME_IRQ);
+ disable_irq(aie_irq);
alarm_enabled = 0;
}
spin_unlock_irq(&rtc_lock);
break;
case RTC_PIE_ON:
- enable_irq(RTCLONG1_IRQ);
+ enable_irq(pie_irq);
break;
case RTC_PIE_OFF:
- disable_irq(RTCLONG1_IRQ);
+ disable_irq(pie_irq);
break;
case RTC_IRQP_READ:
return put_user(periodic_frequency, (unsigned long __user *)arg);
@@ -331,31 +313,37 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
static int __devinit rtc_probe(struct platform_device *pdev)
{
+ struct resource *res;
struct rtc_device *rtc;
- unsigned int irq;
int retval;
- if (pdev->num_resources != 2)
+ if (pdev->num_resources != 4)
return -EBUSY;
- rtc1_base = ioremap(pdev->resource[0].start, RTC1_SIZE);
- if (rtc1_base == NULL)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
return -EBUSY;
- rtc2_base = ioremap(pdev->resource[1].start, RTC2_SIZE);
- if (rtc2_base == NULL) {
- iounmap(rtc1_base);
- rtc1_base = NULL;
+ rtc1_base = ioremap(res->start, res->end - res->start + 1);
+ if (!rtc1_base)
return -EBUSY;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ retval = -EBUSY;
+ goto err_rtc1_iounmap;
+ }
+
+ rtc2_base = ioremap(res->start, res->end - res->start + 1);
+ if (!rtc2_base) {
+ retval = -EBUSY;
+ goto err_rtc1_iounmap;
}
rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- iounmap(rtc1_base);
- iounmap(rtc2_base);
- rtc1_base = NULL;
- rtc2_base = NULL;
- return PTR_ERR(rtc);
+ retval = PTR_ERR(rtc);
+ goto err_iounmap_all;
}
spin_lock_irq(&rtc_lock);
@@ -368,35 +356,50 @@ static int __devinit rtc_probe(struct platform_device *pdev)
spin_unlock_irq(&rtc_lock);
- irq = ELAPSEDTIME_IRQ;
- retval = request_irq(irq, elapsedtime_interrupt, IRQF_DISABLED,
- "elapsed_time", pdev);
- if (retval == 0) {
- irq = RTCLONG1_IRQ;
- retval = request_irq(irq, rtclong1_interrupt, IRQF_DISABLED,
- "rtclong1", pdev);
+ aie_irq = platform_get_irq(pdev, 0);
+ if (aie_irq < 0 || aie_irq >= NR_IRQS) {
+ retval = -EBUSY;
+ goto err_device_unregister;
}
- if (retval < 0) {
- printk(KERN_ERR "rtc: IRQ%d is busy\n", irq);
- rtc_device_unregister(rtc);
- if (irq == RTCLONG1_IRQ)
- free_irq(ELAPSEDTIME_IRQ, NULL);
- iounmap(rtc1_base);
- iounmap(rtc2_base);
- rtc1_base = NULL;
- rtc2_base = NULL;
- return retval;
- }
+ retval = request_irq(aie_irq, elapsedtime_interrupt, IRQF_DISABLED,
+ "elapsed_time", pdev);
+ if (retval < 0)
+ goto err_device_unregister;
+
+ pie_irq = platform_get_irq(pdev, 1);
+ if (pie_irq < 0 || pie_irq >= NR_IRQS)
+ goto err_free_irq;
+
+ retval = request_irq(pie_irq, rtclong1_interrupt, IRQF_DISABLED,
+ "rtclong1", pdev);
+ if (retval < 0)
+ goto err_free_irq;
platform_set_drvdata(pdev, rtc);
- disable_irq(ELAPSEDTIME_IRQ);
- disable_irq(RTCLONG1_IRQ);
+ disable_irq(aie_irq);
+ disable_irq(pie_irq);
printk(KERN_INFO "rtc: Real Time Clock of NEC VR4100 series\n");
return 0;
+
+err_free_irq:
+ free_irq(aie_irq, pdev);
+
+err_device_unregister:
+ rtc_device_unregister(rtc);
+
+err_iounmap_all:
+ iounmap(rtc2_base);
+ rtc2_base = NULL;
+
+err_rtc1_iounmap:
+ iounmap(rtc1_base);
+ rtc1_base = NULL;
+
+ return retval;
}
static int __devexit rtc_remove(struct platform_device *pdev)
@@ -404,23 +407,21 @@ static int __devexit rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc;
rtc = platform_get_drvdata(pdev);
- if (rtc != NULL)
+ if (rtc)
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
- free_irq(ELAPSEDTIME_IRQ, NULL);
- free_irq(RTCLONG1_IRQ, NULL);
- if (rtc1_base != NULL)
+ free_irq(aie_irq, pdev);
+ free_irq(pie_irq, pdev);
+ if (rtc1_base)
iounmap(rtc1_base);
- if (rtc2_base != NULL)
+ if (rtc2_base)
iounmap(rtc2_base);
return 0;
}
-static struct platform_device *rtc_platform_device;
-
static struct platform_driver rtc_platform_driver = {
.probe = rtc_probe,
.remove = __devexit_p(rtc_remove),
@@ -432,55 +433,12 @@ static struct platform_driver rtc_platform_driver = {
static int __init vr41xx_rtc_init(void)
{
- int retval;
-
- switch (current_cpu_data.cputype) {
- case CPU_VR4111:
- case CPU_VR4121:
- rtc_resource[0].start = RTC1_TYPE1_START;
- rtc_resource[0].end = RTC1_TYPE1_END;
- rtc_resource[1].start = RTC2_TYPE1_START;
- rtc_resource[1].end = RTC2_TYPE1_END;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- rtc_resource[0].start = RTC1_TYPE2_START;
- rtc_resource[0].end = RTC1_TYPE2_END;
- rtc_resource[1].start = RTC2_TYPE2_START;
- rtc_resource[1].end = RTC2_TYPE2_END;
- break;
- default:
- return -ENODEV;
- break;
- }
-
- rtc_platform_device = platform_device_alloc("RTC", -1);
- if (rtc_platform_device == NULL)
- return -ENOMEM;
-
- retval = platform_device_add_resources(rtc_platform_device,
- rtc_resource, ARRAY_SIZE(rtc_resource));
-
- if (retval == 0)
- retval = platform_device_add(rtc_platform_device);
-
- if (retval < 0) {
- platform_device_put(rtc_platform_device);
- return retval;
- }
-
- retval = platform_driver_register(&rtc_platform_driver);
- if (retval < 0)
- platform_device_unregister(rtc_platform_device);
-
- return retval;
+ return platform_driver_register(&rtc_platform_driver);
}
static void __exit vr41xx_rtc_exit(void)
{
platform_driver_unregister(&rtc_platform_driver);
- platform_device_unregister(rtc_platform_device);
}
module_init(vr41xx_rtc_init);
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 513d1a611aab..b3fae357ca49 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -9,6 +9,9 @@
*
* based on a lot of other RTC drivers.
*
+ * Information and datasheet:
+ * http://www.intersil.com/cda/deviceinfo/0,1477,X1205,00.html
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -26,7 +29,7 @@
* Two bytes need to be written to read a single register,
* while most other chips just require one and take the second
* one as the data to be written. To prevent corrupting
- * unknown chips, the user must explicitely set the probe parameter.
+ * unknown chips, the user must explicitly set the probe parameter.
*/
static unsigned short normal_i2c[] = { I2C_CLIENT_END };
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 6a89cefe99bb..0c67258fb9ec 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -291,7 +291,7 @@ dasd_parse_keyword( char *parsestring ) {
dasd_page_cache =
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
PAGE_SIZE, SLAB_CACHE_DMA,
- NULL, NULL );
+ NULL);
if (!dasd_page_cache)
MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
"fixed buffer mode disabled.");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1340451ea408..35765f6a86e0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -747,14 +747,9 @@ dcssblk_check_params(void)
static void __exit
dcssblk_exit(void)
{
- int rc;
-
PRINT_DEBUG("DCSSBLOCK EXIT...\n");
s390_root_dev_unregister(dcssblk_root_dev);
- rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
- if (rc) {
- PRINT_ERR("unregister_blkdev() failed!\n");
- }
+ unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
PRINT_DEBUG("...finished!\n");
}
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 66102a184322..3f36cb3910ee 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -164,3 +164,10 @@ config MONWRITER
help
Character device driver for writing z/VM monitor service records
+config S390_VMUR
+ tristate "z/VM unit record device driver"
+ depends on S390
+ default "m"
+ help
+ Character device driver for z/VM reader, puncher and printer.
+
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c210784bdf46..130de19916f2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
obj-$(CONFIG_MONREADER) += monreader.o
obj-$(CONFIG_MONWRITER) += monwriter.o
+obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index e765875e8db2..80e7a537e7d2 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -131,10 +131,9 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{
struct tape_34xx_work *p;
- if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
+ if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM;
- memset(p, 0, sizeof(*p));
INIT_WORK(&p->work, tape_34xx_work_handler);
p->device = tape_get_device_reference(device);
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 82e6a6b253eb..2f419b0ea628 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,7 +1,7 @@
/*
- * Copyright (C) 2004,2005 IBM Corporation
+ * Copyright IBM Corp. 2004,2007
* Interface implementation for communication with the z/VM control program
- * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
@@ -22,9 +22,11 @@
#include "vmcp.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Borntraeger <cborntra@de.ibm.com>");
+MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>");
MODULE_DESCRIPTION("z/VM CP interface");
+#define PRINTK_HEADER "vmcp: "
+
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
@@ -40,7 +42,7 @@ static int vmcp_open(struct inode *inode, struct file *file)
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
- init_MUTEX(&session->mutex);
+ mutex_init(&session->mutex);
file->private_data = session;
return nonseekable_open(inode, file);
}
@@ -57,37 +59,37 @@ static int vmcp_release(struct inode *inode, struct file *file)
}
static ssize_t
-vmcp_read(struct file *file, char __user * buff, size_t count, loff_t * ppos)
+vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
size_t tocopy;
struct vmcp_session *session;
session = (struct vmcp_session *)file->private_data;
- if (down_interruptible(&session->mutex))
+ if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return 0;
}
if (*ppos > session->resp_size) {
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return 0;
}
tocopy = min(session->resp_size - (size_t) (*ppos), count);
- tocopy = min(tocopy,session->bufsize - (size_t) (*ppos));
+ tocopy = min(tocopy, session->bufsize - (size_t) (*ppos));
if (copy_to_user(buff, session->response + (*ppos), tocopy)) {
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return -EFAULT;
}
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
*ppos += tocopy;
return tocopy;
}
static ssize_t
-vmcp_write(struct file *file, const char __user * buff, size_t count,
- loff_t * ppos)
+vmcp_write(struct file *file, const char __user *buff, size_t count,
+ loff_t *ppos)
{
char *cmd;
struct vmcp_session *session;
@@ -103,24 +105,23 @@ vmcp_write(struct file *file, const char __user * buff, size_t count,
}
cmd[count] = '\0';
session = (struct vmcp_session *)file->private_data;
- if (down_interruptible(&session->mutex)) {
+ if (mutex_lock_interruptible(&session->mutex)) {
kfree(cmd);
return -ERESTARTSYS;
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
- | __GFP_REPEAT | GFP_DMA,
+ | __GFP_REPEAT | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
kfree(cmd);
return -ENOMEM;
}
debug_text_event(vmcp_debug, 1, cmd);
- session->resp_size = cpcmd(cmd, session->response,
- session->bufsize,
- &session->resp_code);
- up(&session->mutex);
+ session->resp_size = cpcmd(cmd, session->response, session->bufsize,
+ &session->resp_code);
+ mutex_unlock(&session->mutex);
kfree(cmd);
*ppos = 0; /* reset the file pointer after a command */
return count;
@@ -145,12 +146,12 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int temp;
session = (struct vmcp_session *)file->private_data;
- if (down_interruptible(&session->mutex))
+ if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return put_user(temp, (int __user *)arg);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
@@ -161,14 +162,14 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
}
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return temp;
case VMCP_GETSIZE:
temp = session->resp_size;
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return put_user(temp, (int __user *)arg);
default:
- up(&session->mutex);
+ mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
}
}
@@ -180,7 +181,7 @@ static const struct file_operations vmcp_fops = {
.read = vmcp_read,
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
- .compat_ioctl = vmcp_ioctl
+ .compat_ioctl = vmcp_ioctl,
};
static struct miscdevice vmcp_dev = {
@@ -194,26 +195,38 @@ static int __init vmcp_init(void)
int ret;
if (!MACHINE_IS_VM) {
- printk(KERN_WARNING
- "z/VM CP interface is only available under z/VM\n");
+ PRINT_WARN("z/VM CP interface is only available under z/VM\n");
return -ENODEV;
}
- ret = misc_register(&vmcp_dev);
- if (!ret)
- printk(KERN_INFO "z/VM CP interface loaded\n");
- else
- printk(KERN_WARNING
- "z/VM CP interface not loaded. Could not register misc device.\n");
vmcp_debug = debug_register("vmcp", 1, 1, 240);
- debug_register_view(vmcp_debug, &debug_hex_ascii_view);
- return ret;
+ if (!vmcp_debug) {
+ PRINT_ERR("z/VM CP interface not loaded. Could not register "
+ "debug feature\n");
+ return -ENOMEM;
+ }
+ ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
+ if (ret) {
+ PRINT_ERR("z/VM CP interface not loaded. Could not register "
+ "debug feature view. Error code: %d\n", ret);
+ debug_unregister(vmcp_debug);
+ return ret;
+ }
+ ret = misc_register(&vmcp_dev);
+ if (ret) {
+ PRINT_ERR("z/VM CP interface not loaded. Could not register "
+ "misc device. Error code: %d\n", ret);
+ debug_unregister(vmcp_debug);
+ return ret;
+ }
+ PRINT_INFO("z/VM CP interface loaded\n");
+ return 0;
}
static void __exit vmcp_exit(void)
{
- WARN_ON(misc_deregister(&vmcp_dev) != 0);
+ misc_deregister(&vmcp_dev);
debug_unregister(vmcp_debug);
- printk(KERN_INFO "z/VM CP interface unloaded.\n");
+ PRINT_INFO("z/VM CP interface unloaded.\n");
}
module_init(vmcp_init);
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 8a5975f3dad7..6a993948e188 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -12,8 +12,8 @@
* The idea of this driver is based on cpint from Neale Ferguson
*/
-#include <asm/semaphore.h>
#include <linux/ioctl.h>
+#include <linux/mutex.h>
#define VMCP_GETCODE _IOR(0x10, 1, int)
#define VMCP_SETBUF _IOW(0x10, 2, int)
@@ -26,5 +26,5 @@ struct vmcp_session {
int resp_code;
/* As we use copy_from/to_user, which might *
* sleep and cannot use a spinlock */
- struct semaphore mutex;
+ struct mutex mutex;
};
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
new file mode 100644
index 000000000000..e90b0f846195
--- /dev/null
+++ b/drivers/s390/char/vmur.c
@@ -0,0 +1,906 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ * Frank Munzert <munzert@de.ibm.com>
+ */
+
+#include <linux/cdev.h>
+
+#include <asm/uaccess.h>
+#include <asm/cio.h>
+#include <asm/ccwdev.h>
+#include <asm/debug.h>
+
+#include "vmur.h"
+
+/*
+ * Driver overview
+ *
+ * Unit record device support is implemented as a character device driver.
+ * We can fit at least 16 bits into a device minor number and use the
+ * simple method of mapping a character device number with minor abcd
+ * to the unit record device with devno abcd.
+ * I/O to virtual unit record devices is handled as follows:
+ * Reads: Diagnose code 0x14 (input spool file manipulation)
+ * is used to read spool data page-wise.
+ * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
+ * is available by reading sysfs attr reclen. Each write() to the device
+ * must specify an integral multiple (maximal 511) of reclen.
+ */
+
+static char ur_banner[] = "z/VM virtual unit record device driver";
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
+MODULE_LICENSE("GPL");
+
+#define PRINTK_HEADER "vmur: "
+
+static dev_t ur_first_dev_maj_min;
+static struct class *vmur_class;
+static struct debug_info *vmur_dbf;
+
+/* We put the device's record length (for writes) in the driver_info field */
+static struct ccw_device_id ur_ids[] = {
+ { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
+ { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
+ { /* end of list */ }
+};
+
+MODULE_DEVICE_TABLE(ccw, ur_ids);
+
+static int ur_probe(struct ccw_device *cdev);
+static void ur_remove(struct ccw_device *cdev);
+static int ur_set_online(struct ccw_device *cdev);
+static int ur_set_offline(struct ccw_device *cdev);
+
+static struct ccw_driver ur_driver = {
+ .name = "vmur",
+ .owner = THIS_MODULE,
+ .ids = ur_ids,
+ .probe = ur_probe,
+ .remove = ur_remove,
+ .set_online = ur_set_online,
+ .set_offline = ur_set_offline,
+};
+
+/*
+ * Allocation, freeing, getting and putting of urdev structures
+ */
+static struct urdev *urdev_alloc(struct ccw_device *cdev)
+{
+ struct urdev *urd;
+
+ urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
+ if (!urd)
+ return NULL;
+ urd->cdev = cdev;
+ urd->reclen = cdev->id.driver_info;
+ ccw_device_get_id(cdev, &urd->dev_id);
+ mutex_init(&urd->io_mutex);
+ mutex_init(&urd->open_mutex);
+ return urd;
+}
+
+static void urdev_free(struct urdev *urd)
+{
+ kfree(urd);
+}
+
+/*
+ * This is how the character device driver gets a reference to a
+ * ur device. When this call returns successfully, a reference has
+ * been taken (by get_device) on the underlying kobject. The recipient
+ * of this urdev pointer must eventually drop it with urdev_put(urd)
+ * which does the corresponding put_device().
+ */
+static struct urdev *urdev_get_from_devno(u16 devno)
+{
+ char bus_id[16];
+ struct ccw_device *cdev;
+
+ sprintf(bus_id, "0.0.%04x", devno);
+ cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
+ if (!cdev)
+ return NULL;
+
+ return cdev->dev.driver_data;
+}
+
+static void urdev_put(struct urdev *urd)
+{
+ put_device(&urd->cdev->dev);
+}
+
+/*
+ * Low-level functions to do I/O to a ur device.
+ * alloc_chan_prog
+ * do_ur_io
+ * ur_int_handler
+ *
+ * alloc_chan_prog allocates and builds the channel program
+ *
+ * do_ur_io issues the channel program to the device and blocks waiting
+ * on a completion event it publishes at urd->io_done. The function
+ * serialises itself on the device's mutex so that only one I/O
+ * is issued at a time (and that I/O is synchronous).
+ *
+ * ur_int_handler catches the "I/O done" interrupt, writes the
+ * subchannel status word into the scsw member of the urdev structure
+ * and complete()s the io_done to wake the waiting do_ur_io.
+ *
+ * The caller of do_ur_io is responsible for kfree()ing the channel program
+ * address pointer that alloc_chan_prog returned.
+ */
+
+
+/*
+ * alloc_chan_prog
+ * The channel program we use is write commands chained together
+ * with a final NOP CCW command-chained on (which ensures that CE and DE
+ * are presented together in a single interrupt instead of as separate
+ * interrupts unless an incorrect length indication kicks in first). The
+ * data length in each CCW is reclen. The caller must ensure that count
+ * is an integral multiple of reclen.
+ * The channel program pointer returned by this function must be freed
+ * with kfree. The caller is responsible for checking that
+ * count/reclen is not ridiculously large.
+ */
+static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen)
+{
+ size_t num_ccws;
+ struct ccw1 *cpa;
+ int i;
+
+ TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen);
+
+ /*
+ * We chain a NOP onto the writes to force CE+DE together.
+ * That means we allocate room for CCWs to cover count/reclen
+ * records plus a NOP.
+ */
+ num_ccws = count / reclen + 1;
+ cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (!cpa)
+ return NULL;
+
+ for (i = 0; count; i++) {
+ cpa[i].cmd_code = WRITE_CCW_CMD;
+ cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+ cpa[i].count = reclen;
+ cpa[i].cda = __pa(buf);
+ buf += reclen;
+ count -= reclen;
+ }
+ /* The following NOP CCW forces CE+DE to be presented together */
+ cpa[i].cmd_code = CCW_CMD_NOOP;
+ cpa[i].flags = 0;
+ cpa[i].count = 0;
+ cpa[i].cda = 0;
+
+ return cpa;
+}
+
+static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
+{
+ int rc;
+ struct ccw_device *cdev = urd->cdev;
+ DECLARE_COMPLETION(event);
+
+ TRACE("do_ur_io: cpa=%p\n", cpa);
+
+ rc = mutex_lock_interruptible(&urd->io_mutex);
+ if (rc)
+ return rc;
+
+ urd->io_done = &event;
+
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ rc = ccw_device_start(cdev, cpa, 1, 0, 0);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
+ TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
+ if (rc)
+ goto out;
+
+ wait_for_completion(&event);
+ TRACE("do_ur_io: I/O complete\n");
+ rc = 0;
+
+out:
+ mutex_unlock(&urd->io_mutex);
+ return rc;
+}
+
+/*
+ * ur interrupt handler, called from the ccw_device layer
+ */
+static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ struct urdev *urd;
+
+ TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
+ intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count);
+
+ if (!intparm) {
+ TRACE("ur_int_handler: unsolicited interrupt\n");
+ return;
+ }
+ urd = cdev->dev.driver_data;
+ /* On special conditions irb is an error pointer */
+ if (IS_ERR(irb))
+ urd->io_request_rc = PTR_ERR(irb);
+ else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
+ urd->io_request_rc = 0;
+ else
+ urd->io_request_rc = -EIO;
+
+ complete(urd->io_done);
+}
+
+/*
+ * reclen sysfs attribute - The record length to be used for write CCWs
+ */
+static ssize_t ur_attr_reclen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct urdev *urd = dev->driver_data;
+
+ return sprintf(buf, "%zu\n", urd->reclen);
+}
+
+static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
+
+static int ur_create_attributes(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_reclen);
+}
+
+static void ur_remove_attributes(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_reclen);
+}
+
+/*
+ * diagnose code 0x210 - retrieve device information
+ * cc=0 normal completion, we have a real device
+ * cc=1 CP paging error
+ * cc=2 The virtual device exists, but is not associated with a real device
+ * cc=3 Invalid device address, or the virtual device does not exist
+ */
+static int get_urd_class(struct urdev *urd)
+{
+ static struct diag210 ur_diag210;
+ int cc;
+
+ ur_diag210.vrdcdvno = urd->dev_id.devno;
+ ur_diag210.vrdclen = sizeof(struct diag210);
+
+ cc = diag210(&ur_diag210);
+ switch (cc) {
+ case 0:
+ return -ENOTSUPP;
+ case 2:
+ return ur_diag210.vrdcvcla; /* virtual device class */
+ case 3:
+ return -ENODEV;
+ default:
+ return -EIO;
+ }
+}
+
+/*
+ * Allocation and freeing of urfile structures
+ */
+static struct urfile *urfile_alloc(struct urdev *urd)
+{
+ struct urfile *urf;
+
+ urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
+ if (!urf)
+ return NULL;
+ urf->urd = urd;
+
+ TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
+ urf->dev_reclen);
+
+ return urf;
+}
+
+static void urfile_free(struct urfile *urf)
+{
+ TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
+ kfree(urf);
+}
+
+/*
+ * The fops implementation of the character device driver
+ */
+static ssize_t do_write(struct urdev *urd, const char __user *udata,
+ size_t count, size_t reclen, loff_t *ppos)
+{
+ struct ccw1 *cpa;
+ char *buf;
+ int rc;
+
+ /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */
+ buf = kmalloc(count, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, udata, count)) {
+ rc = -EFAULT;
+ goto fail_kfree_buf;
+ }
+
+ cpa = alloc_chan_prog(buf, count, reclen);
+ if (!cpa) {
+ rc = -ENOMEM;
+ goto fail_kfree_buf;
+ }
+
+ rc = do_ur_io(urd, cpa);
+ if (rc)
+ goto fail_kfree_cpa;
+
+ if (urd->io_request_rc) {
+ rc = urd->io_request_rc;
+ goto fail_kfree_cpa;
+ }
+ *ppos += count;
+ rc = count;
+fail_kfree_cpa:
+ kfree(cpa);
+fail_kfree_buf:
+ kfree(buf);
+ return rc;
+}
+
+static ssize_t ur_write(struct file *file, const char __user *udata,
+ size_t count, loff_t *ppos)
+{
+ struct urfile *urf = file->private_data;
+
+ TRACE("ur_write: count=%zu\n", count);
+
+ if (count == 0)
+ return 0;
+
+ if (count % urf->dev_reclen)
+ return -EINVAL; /* count must be a multiple of reclen */
+
+ if (count > urf->dev_reclen * MAX_RECS_PER_IO)
+ count = urf->dev_reclen * MAX_RECS_PER_IO;
+
+ return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
+}
+
+static int do_diag_14(unsigned long rx, unsigned long ry1,
+ unsigned long subcode)
+{
+ register unsigned long _ry1 asm("2") = ry1;
+ register unsigned long _ry2 asm("3") = subcode;
+ int rc = 0;
+
+ asm volatile(
+#ifdef CONFIG_64BIT
+ " sam31\n"
+ " diag %2,2,0x14\n"
+ " sam64\n"
+#else
+ " diag %2,2,0x14\n"
+#endif
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (rc), "+d" (_ry2)
+ : "d" (rx), "d" (_ry1)
+ : "cc");
+
+ TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc);
+ return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0028 - position spool file to designated
+ * record
+ * cc=0 normal completion
+ * cc=2 no file active on the virtual reader or device not ready
+ * cc=3 record specified is beyond EOF
+ */
+static int diag_position_to_record(int devno, int record)
+{
+ int cc;
+
+ cc = do_diag_14(record, devno, 0x28);
+ switch (cc) {
+ case 0:
+ return 0;
+ case 2:
+ return -ENOMEDIUM;
+ case 3:
+ return -ENODATA; /* position beyond end of file */
+ default:
+ return -EIO;
+ }
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
+ * cc=0 normal completion
+ * cc=1 EOF reached
+ * cc=2 no file active on the virtual reader, and no file eligible
+ * cc=3 file already active on the virtual reader or specified virtual
+ * reader does not exist or is not a reader
+ */
+static int diag_read_file(int devno, char *buf)
+{
+ int cc;
+
+ cc = do_diag_14((unsigned long) buf, devno, 0x00);
+ switch (cc) {
+ case 0:
+ return 0;
+ case 1:
+ return -ENODATA;
+ case 2:
+ return -ENOMEDIUM;
+ default:
+ return -EIO;
+ }
+}
+
+static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
+ loff_t *offs)
+{
+ size_t len, copied, res;
+ char *buf;
+ int rc;
+ u16 reclen;
+ struct urdev *urd;
+
+ urd = ((struct urfile *) file->private_data)->urd;
+ reclen = ((struct urfile *) file->private_data)->file_reclen;
+
+ rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
+ if (rc == -ENODATA)
+ return 0;
+ if (rc)
+ return rc;
+
+ len = min((size_t) PAGE_SIZE, count);
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ copied = 0;
+ res = (size_t) (*offs % PAGE_SIZE);
+ do {
+ rc = diag_read_file(urd->dev_id.devno, buf);
+ if (rc == -ENODATA) {
+ break;
+ }
+ if (rc)
+ goto fail;
+ if (reclen)
+ *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
+ len = min(count - copied, PAGE_SIZE - res);
+ if (copy_to_user(ubuf + copied, buf + res, len)) {
+ rc = -EFAULT;
+ goto fail;
+ }
+ res = 0;
+ copied += len;
+ } while (copied != count);
+
+ *offs += copied;
+ rc = copied;
+fail:
+ kfree(buf);
+ return rc;
+}
+
+static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
+ loff_t *offs)
+{
+ struct urdev *urd;
+ int rc;
+
+ TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
+
+ if (count == 0)
+ return 0;
+
+ urd = ((struct urfile *) file->private_data)->urd;
+ rc = mutex_lock_interruptible(&urd->io_mutex);
+ if (rc)
+ return rc;
+ rc = diag14_read(file, ubuf, count, offs);
+ mutex_unlock(&urd->io_mutex);
+ return rc;
+}
+
+/*
+ * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
+ * cc=0 normal completion
+ * cc=1 no files on reader queue or no subsequent file
+ * cc=2 spid specified is invalid
+ */
+static int diag_read_next_file_info(struct file_control_block *buf, int spid)
+{
+ int cc;
+
+ cc = do_diag_14((unsigned long) buf, spid, 0xfff);
+ switch (cc) {
+ case 0:
+ return 0;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int verify_device(struct urdev *urd)
+{
+ struct file_control_block fcb;
+ char *buf;
+ int rc;
+
+ switch (urd->class) {
+ case DEV_CLASS_UR_O:
+ return 0; /* no check needed here */
+ case DEV_CLASS_UR_I:
+ /* check for empty reader device (beginning of chain) */
+ rc = diag_read_next_file_info(&fcb, 0);
+ if (rc)
+ return rc;
+
+ /* open file on virtual reader */
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ rc = diag_read_file(urd->dev_id.devno, buf);
+ kfree(buf);
+
+ if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
+ return rc;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int get_file_reclen(struct urdev *urd)
+{
+ struct file_control_block fcb;
+ int rc;
+
+ switch (urd->class) {
+ case DEV_CLASS_UR_O:
+ return 0;
+ case DEV_CLASS_UR_I:
+ rc = diag_read_next_file_info(&fcb, 0);
+ if (rc)
+ return rc;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ if (fcb.file_stat & FLG_CP_DUMP)
+ return 0;
+
+ return fcb.rec_len;
+}
+
+static int ur_open(struct inode *inode, struct file *file)
+{
+ u16 devno;
+ struct urdev *urd;
+ struct urfile *urf;
+ unsigned short accmode;
+ int rc;
+
+ accmode = file->f_flags & O_ACCMODE;
+
+ if (accmode == O_RDWR)
+ return -EACCES;
+
+ /*
+ * We treat the minor number as the devno of the ur device
+ * to find in the driver tree.
+ */
+ devno = MINOR(file->f_dentry->d_inode->i_rdev);
+
+ urd = urdev_get_from_devno(devno);
+ if (!urd)
+ return -ENXIO;
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&urd->open_mutex)) {
+ rc = -EBUSY;
+ goto fail_put;
+ }
+ } else {
+ if (mutex_lock_interruptible(&urd->open_mutex)) {
+ rc = -ERESTARTSYS;
+ goto fail_put;
+ }
+ }
+
+ TRACE("ur_open\n");
+
+ if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
+ ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
+ TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
+ rc = -EACCES;
+ goto fail_unlock;
+ }
+
+ rc = verify_device(urd);
+ if (rc)
+ goto fail_unlock;
+
+ urf = urfile_alloc(urd);
+ if (!urf) {
+ rc = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ urf->dev_reclen = urd->reclen;
+ rc = get_file_reclen(urd);
+ if (rc < 0)
+ goto fail_urfile_free;
+ urf->file_reclen = rc;
+ file->private_data = urf;
+ return 0;
+
+fail_urfile_free:
+ urfile_free(urf);
+fail_unlock:
+ mutex_unlock(&urd->open_mutex);
+fail_put:
+ urdev_put(urd);
+ return rc;
+}
+
+static int ur_release(struct inode *inode, struct file *file)
+{
+ struct urfile *urf = file->private_data;
+
+ TRACE("ur_release\n");
+ mutex_unlock(&urf->urd->open_mutex);
+ urdev_put(urf->urd);
+ urfile_free(urf);
+ return 0;
+}
+
+static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t newpos;
+
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
+ return -ESPIPE; /* seek allowed only for reader */
+ if (offset % PAGE_SIZE)
+ return -ESPIPE; /* only multiples of 4K allowed */
+ switch (whence) {
+ case 0: /* SEEK_SET */
+ newpos = offset;
+ break;
+ case 1: /* SEEK_CUR */
+ newpos = file->f_pos + offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+ file->f_pos = newpos;
+ return newpos;
+}
+
+static struct file_operations ur_fops = {
+ .owner = THIS_MODULE,
+ .open = ur_open,
+ .release = ur_release,
+ .read = ur_read,
+ .write = ur_write,
+ .llseek = ur_llseek,
+};
+
+/*
+ * ccw_device infrastructure:
+ * ur_probe gets its own ref to the device (i.e. get_device),
+ * creates the struct urdev, the device attributes, sets up
+ * the interrupt handler and validates the virtual unit record device.
+ * ur_remove removes the device attributes, frees the struct urdev
+ * and drops (put_device) the ref to the device we got in ur_probe.
+ */
+static int ur_probe(struct ccw_device *cdev)
+{
+ struct urdev *urd;
+ int rc;
+
+ TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private);
+
+ if (!get_device(&cdev->dev))
+ return -ENODEV;
+
+ urd = urdev_alloc(cdev);
+ if (!urd) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ rc = ur_create_attributes(&cdev->dev);
+ if (rc) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ cdev->dev.driver_data = urd;
+ cdev->handler = ur_int_handler;
+
+ /* validate virtual unit record device */
+ urd->class = get_urd_class(urd);
+ if (urd->class < 0) {
+ rc = urd->class;
+ goto fail;
+ }
+ if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
+ rc = -ENOTSUPP;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ urdev_free(urd);
+ put_device(&cdev->dev);
+ return rc;
+}
+
+static void ur_remove(struct ccw_device *cdev)
+{
+ struct urdev *urd = cdev->dev.driver_data;
+
+ TRACE("ur_remove\n");
+ if (cdev->online)
+ ur_set_offline(cdev);
+ ur_remove_attributes(&cdev->dev);
+ urdev_free(urd);
+ put_device(&cdev->dev);
+}
+
+static int ur_set_online(struct ccw_device *cdev)
+{
+ struct urdev *urd;
+ int minor, major, rc;
+ char node_id[16];
+
+ TRACE("ur_set_online: cdev=%p state=%d\n", cdev,
+ *(int *) cdev->private);
+
+ if (!try_module_get(ur_driver.owner))
+ return -EINVAL;
+
+ urd = (struct urdev *) cdev->dev.driver_data;
+ minor = urd->dev_id.devno;
+ major = MAJOR(ur_first_dev_maj_min);
+
+ urd->char_device = cdev_alloc();
+ if (!urd->char_device) {
+ rc = -ENOMEM;
+ goto fail_module_put;
+ }
+
+ cdev_init(urd->char_device, &ur_fops);
+ urd->char_device->dev = MKDEV(major, minor);
+ urd->char_device->owner = ur_fops.owner;
+
+ rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+ if (rc)
+ goto fail_free_cdev;
+ if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
+ if (urd->class == DEV_CLASS_UR_I)
+ sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id);
+ if (urd->class == DEV_CLASS_UR_O)
+ sprintf(node_id, "vmpun-%s", cdev->dev.bus_id);
+ } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
+ sprintf(node_id, "vmprt-%s", cdev->dev.bus_id);
+ } else {
+ rc = -ENOTSUPP;
+ goto fail_free_cdev;
+ }
+
+ urd->device = device_create(vmur_class, NULL, urd->char_device->dev,
+ "%s", node_id);
+ if (IS_ERR(urd->device)) {
+ rc = PTR_ERR(urd->device);
+ TRACE("ur_set_online: device_create rc=%d\n", rc);
+ goto fail_free_cdev;
+ }
+
+ return 0;
+
+fail_free_cdev:
+ cdev_del(urd->char_device);
+fail_module_put:
+ module_put(ur_driver.owner);
+
+ return rc;
+}
+
+static int ur_set_offline(struct ccw_device *cdev)
+{
+ struct urdev *urd;
+
+ TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n",
+ cdev, cdev->private, *(int *) cdev->private);
+ urd = (struct urdev *) cdev->dev.driver_data;
+ device_destroy(vmur_class, urd->char_device->dev);
+ cdev_del(urd->char_device);
+ module_put(ur_driver.owner);
+
+ return 0;
+}
+
+/*
+ * Module initialisation and cleanup
+ */
+static int __init ur_init(void)
+{
+ int rc;
+ dev_t dev;
+
+ if (!MACHINE_IS_VM) {
+ PRINT_ERR("%s is only available under z/VM.\n", ur_banner);
+ return -ENODEV;
+ }
+
+ vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
+ if (!vmur_dbf)
+ return -ENOMEM;
+ rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
+ if (rc)
+ goto fail_free_dbf;
+
+ debug_set_level(vmur_dbf, 6);
+
+ rc = ccw_driver_register(&ur_driver);
+ if (rc)
+ goto fail_free_dbf;
+
+ rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
+ if (rc) {
+ PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc);
+ goto fail_unregister_driver;
+ }
+ ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
+
+ vmur_class = class_create(THIS_MODULE, "vmur");
+ if (IS_ERR(vmur_class)) {
+ rc = PTR_ERR(vmur_class);
+ goto fail_unregister_region;
+ }
+ PRINT_INFO("%s loaded.\n", ur_banner);
+ return 0;
+
+fail_unregister_region:
+ unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+fail_unregister_driver:
+ ccw_driver_unregister(&ur_driver);
+fail_free_dbf:
+ debug_unregister(vmur_dbf);
+ return rc;
+}
+
+static void __exit ur_exit(void)
+{
+ class_destroy(vmur_class);
+ unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
+ ccw_driver_unregister(&ur_driver);
+ debug_unregister(vmur_dbf);
+ PRINT_INFO("%s unloaded.\n", ur_banner);
+}
+
+module_init(ur_init);
+module_exit(ur_exit);
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
new file mode 100644
index 000000000000..16d0a4e38e40
--- /dev/null
+++ b/drivers/s390/char/vmur.h
@@ -0,0 +1,104 @@
+/*
+ * Linux driver for System z and s390 unit record devices
+ * (z/VM virtual punch, reader, printer)
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ * Frank Munzert <munzert@de.ibm.com>
+ */
+
+#ifndef _VMUR_H_
+#define _VMUR_H_
+
+#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
+#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
+/*
+ * we only support z/VM's default unit record devices:
+ * both in SPOOL directory control statement and in CP DEFINE statement
+ * RDR defaults to 2540 reader
+ * PUN defaults to 2540 punch
+ * PRT defaults to 1403 printer
+ */
+#define READER_PUNCH_DEVTYPE 0x2540
+#define PRINTER_DEVTYPE 0x1403
+
+/* z/VM spool file control block SFBLOK */
+struct file_control_block {
+ char reserved_1[8];
+ char user_owner[8];
+ char user_orig[8];
+ __s32 data_recs;
+ __s16 rec_len;
+ __s16 file_num;
+ __u8 file_stat;
+ __u8 dev_type;
+ char reserved_2[6];
+ char file_name[12];
+ char file_type[12];
+ char create_date[8];
+ char create_time[8];
+ char reserved_3[6];
+ __u8 file_class;
+ __u8 sfb_lok;
+ __u64 distr_code;
+ __u32 reserved_4;
+ __u8 current_starting_copy_number;
+ __u8 sfblock_cntrl_flags;
+ __u8 reserved_5;
+ __u8 more_status_flags;
+ char rest[200];
+} __attribute__ ((packed));
+
+#define FLG_CP_DUMP 0x10
+
+/*
+ * A struct urdev is created for each ur device that is made available
+ * via the ccw_device driver model.
+ */
+struct urdev {
+ struct ccw_device *cdev; /* Backpointer to ccw device */
+ struct mutex io_mutex; /* Serialises device IO */
+ struct mutex open_mutex; /* Serialises access to device */
+ struct completion *io_done; /* do_ur_io waits; irq completes */
+ struct device *device;
+ struct cdev *char_device;
+ struct ccw_dev_id dev_id; /* device id */
+ size_t reclen; /* Record length for *write* CCWs */
+ int class; /* VM device class */
+ int io_request_rc; /* return code from I/O request */
+};
+
+/*
+ * A struct urfile is allocated at open() time for each device and
+ * freed on release().
+ */
+struct urfile {
+ struct urdev *urd;
+ unsigned int flags;
+ size_t dev_reclen;
+ __u16 file_reclen;
+};
+
+/*
+ * Device major/minor definitions.
+ */
+
+#define UR_MAJOR 0 /* get dynamic major */
+/*
+ * We map minor numbers directly to device numbers (0-FFFF) for simplicity.
+ * This avoids having to allocate (and manage) slot numbers.
+ */
+#define NUM_MINORS 65536
+
+/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
+#define MAX_RECS_PER_IO 511
+#define WRITE_CCW_CMD 0x01
+
+#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
+#define CCWDEV_CU_DI(cutype, di) \
+ CCW_DEVICE(cutype, 0x00), .driver_info = (di)
+
+#define FILE_RECLEN_OFFSET 4064 /* reclen offset in spool data block */
+
+#endif /* _VMUR_H_ */
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index ac289e6eadfe..b57d93d986c0 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -141,8 +141,9 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
/*
* Channel measurement related functions
*/
-static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t chp_measurement_chars_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
unsigned int size;
@@ -165,7 +166,6 @@ static struct bin_attribute chp_measurement_chars_attr = {
.attr = {
.name = "measurement_chars",
.mode = S_IRUSR,
- .owner = THIS_MODULE,
},
.size = sizeof(struct cmg_chars),
.read = chp_measurement_chars_read,
@@ -193,8 +193,9 @@ static void chp_measurement_copy_block(struct cmg_entry *buf,
} while (reference_buf.values[0] != buf->values[0]);
}
-static ssize_t chp_measurement_read(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t chp_measurement_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct channel_subsystem *css;
@@ -217,7 +218,6 @@ static struct bin_attribute chp_measurement_attr = {
.attr = {
.name = "measurement",
.mode = S_IRUSR,
- .owner = THIS_MODULE,
},
.size = sizeof(struct cmg_entry),
.read = chp_measurement_read,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6b264bdb5bfb..001682e70f67 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -272,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
struct ccw_device_id *id = &(cdev->id);
int len;
- len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1;
+ len = snprint_alias(buf, PAGE_SIZE, id, "\n");
return len > PAGE_SIZE ? PAGE_SIZE : len;
}
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index e70aeb7a3781..ed026a1dc324 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -166,9 +166,9 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
{
char dbf_text[15];
- if (ccq == 0 || ccq == 32 || ccq == 96)
+ if (ccq == 0 || ccq == 32)
return 0;
- if (ccq == 97)
+ if (ccq == 96 || ccq == 97)
return 1;
/*notify devices immediately*/
sprintf(dbf_text,"%d", ccq);
@@ -2306,8 +2306,8 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
"SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
- irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
- CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
irq_ptr->sch_token = 0;
@@ -2328,8 +2328,8 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
"SIGAs for sch 0.%x.%x.\n", result,
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
- CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
goto out;
@@ -2340,8 +2340,8 @@ qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
"is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
ssqd_area->response.code,
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
- qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
- CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
goto out;
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 348bb7b82771..023455a0b34a 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -317,8 +317,8 @@ claw_probe(struct ccwgroup_device *cgdev)
CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
return -ENOMEM;
}
- privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
- privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL);
+ privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
+ privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
probe_error(cgdev);
put_device(&cgdev->dev);
@@ -327,8 +327,6 @@ claw_probe(struct ccwgroup_device *cgdev)
CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM);
return -ENOMEM;
}
- memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE);
- memset(privptr->p_env, 0x00, sizeof(struct claw_env));
memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
@@ -3924,7 +3922,7 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id);
ccw_device_get_id(cdev, &dev_id);
p_ch->devno = dev_id.devno;
- if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
+ if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "%s Out of memory in %s for irb\n",
p_ch->id,__FUNCTION__);
#ifdef FUNCTRACE
@@ -3933,7 +3931,6 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
#endif
return -ENOMEM;
}
- memset(p_ch->irb, 0, sizeof (struct irb));
#ifdef FUNCTRACE
printk(KERN_INFO "%s:%s Exit on line %d\n",
cdev->dev.bus_id,__FUNCTION__,__LINE__);
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index b34eb82edd98..ec18bae05df0 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -211,6 +211,10 @@ struct qeth_perf_stats {
/* initial values when measuring starts */
unsigned long initial_rx_packets;
unsigned long initial_tx_packets;
+ /* inbound scatter gather data */
+ unsigned int sg_skbs_rx;
+ unsigned int sg_frags_rx;
+ unsigned int sg_alloc_page_rx;
};
/* Routing stuff */
@@ -341,6 +345,9 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
#define QETH_IP_HEADER_SIZE 40
+/* large receive scatter gather copy break */
+#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
+
struct qeth_hdr_layer3 {
__u8 id;
__u8 flags;
@@ -771,6 +778,7 @@ struct qeth_card_options {
int layer2;
enum qeth_large_send_types large_send;
int performance_stats;
+ int rx_sg_cb;
};
/*
@@ -828,6 +836,7 @@ struct qeth_card {
int (*orig_hard_header)(struct sk_buff *,struct net_device *,
unsigned short,void *,void *,unsigned);
struct qeth_osn_info osn_info;
+ atomic_t force_alloc_skb;
};
struct qeth_card_list_struct {
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 86b0c44165c1..57f69434fbf9 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1054,6 +1054,7 @@ qeth_set_intial_options(struct qeth_card *card)
else
card->options.layer2 = 0;
card->options.performance_stats = 0;
+ card->options.rx_sg_cb = QETH_RX_SG_CB;
}
/**
@@ -1934,6 +1935,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
atomic_inc(&reply->received);
wake_up(&reply->wait_q);
}
+ cpu_relax();
};
rc = reply->rc;
qeth_put_reply(reply);
@@ -2258,6 +2260,89 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
return skb;
}
+static inline int
+qeth_create_skb_frag(struct qdio_buffer_element *element,
+ struct sk_buff **pskb,
+ int offset, int *pfrag, int data_len)
+{
+ struct page *page = virt_to_page(element->addr);
+ if (*pfrag == 0) {
+ /* the upper protocol layers assume that there is data in the
+ * skb itself. Copy a small amount (64 bytes) to make them
+ * happy. */
+ *pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH);
+ if (!(*pskb))
+ return -ENOMEM;
+ skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH);
+ if (data_len <= 64) {
+ memcpy(skb_put(*pskb, data_len), element->addr + offset,
+ data_len);
+ } else {
+ get_page(page);
+ memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
+ skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
+ data_len - 64);
+ (*pskb)->data_len += data_len - 64;
+ (*pskb)->len += data_len - 64;
+ (*pskb)->truesize += data_len - 64;
+ }
+ } else {
+ get_page(page);
+ skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
+ (*pskb)->data_len += data_len;
+ (*pskb)->len += data_len;
+ (*pskb)->truesize += data_len;
+ }
+ (*pfrag)++;
+ return 0;
+}
+
+static inline struct qeth_buffer_pool_entry *
+qeth_find_free_buffer_pool_entry(struct qeth_card *card)
+{
+ struct list_head *plh;
+ struct qeth_buffer_pool_entry *entry;
+ int i, free;
+ struct page *page;
+
+ if (list_empty(&card->qdio.in_buf_pool.entry_list))
+ return NULL;
+
+ list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
+ entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
+ free = 1;
+ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+ if (page_count(virt_to_page(entry->elements[i])) > 1) {
+ free = 0;
+ break;
+ }
+ }
+ if (free) {
+ list_del_init(&entry->list);
+ return entry;
+ }
+ }
+
+ /* no free buffer in pool so take first one and swap pages */
+ entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
+ struct qeth_buffer_pool_entry, list);
+ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
+ if (page_count(virt_to_page(entry->elements[i])) > 1) {
+ page = alloc_page(GFP_ATOMIC|GFP_DMA);
+ if (!page) {
+ return NULL;
+ } else {
+ free_page((unsigned long)entry->elements[i]);
+ entry->elements[i] = page_address(page);
+ if (card->options.performance_stats)
+ card->perf_stats.sg_alloc_page_rx++;
+ }
+ }
+ }
+ list_del_init(&entry->list);
+ return entry;
+}
+
static struct sk_buff *
qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
struct qdio_buffer_element **__element, int *__offset,
@@ -2269,6 +2354,8 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
int skb_len;
void *data_ptr;
int data_len;
+ int use_rx_sg = 0;
+ int frag = 0;
QETH_DBF_TEXT(trace,6,"nextskb");
/* qeth_hdr must not cross element boundaries */
@@ -2293,23 +2380,43 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
if (!skb_len)
return NULL;
- if (card->options.fake_ll){
- if(card->dev->type == ARPHRD_IEEE802_TR){
- if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr)))
- goto no_mem;
- skb_reserve(skb,QETH_FAKE_LL_LEN_TR);
+ if ((skb_len >= card->options.rx_sg_cb) &&
+ (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+ (!atomic_read(&card->force_alloc_skb))) {
+ use_rx_sg = 1;
+ } else {
+ if (card->options.fake_ll) {
+ if (card->dev->type == ARPHRD_IEEE802_TR) {
+ if (!(skb = qeth_get_skb(skb_len +
+ QETH_FAKE_LL_LEN_TR, *hdr)))
+ goto no_mem;
+ skb_reserve(skb, QETH_FAKE_LL_LEN_TR);
+ } else {
+ if (!(skb = qeth_get_skb(skb_len +
+ QETH_FAKE_LL_LEN_ETH, *hdr)))
+ goto no_mem;
+ skb_reserve(skb, QETH_FAKE_LL_LEN_ETH);
+ }
} else {
- if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr)))
+ skb = qeth_get_skb(skb_len, *hdr);
+ if (!skb)
goto no_mem;
- skb_reserve(skb,QETH_FAKE_LL_LEN_ETH);
}
- } else if (!(skb = qeth_get_skb(skb_len, *hdr)))
- goto no_mem;
+ }
+
data_ptr = element->addr + offset;
while (skb_len) {
data_len = min(skb_len, (int)(element->length - offset));
- if (data_len)
- memcpy(skb_put(skb, data_len), data_ptr, data_len);
+ if (data_len) {
+ if (use_rx_sg) {
+ if (qeth_create_skb_frag(element, &skb, offset,
+ &frag, data_len))
+ goto no_mem;
+ } else {
+ memcpy(skb_put(skb, data_len), data_ptr,
+ data_len);
+ }
+ }
skb_len -= data_len;
if (skb_len){
if (qeth_is_last_sbale(element)){
@@ -2331,6 +2438,10 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
}
*__element = element;
*__offset = offset;
+ if (use_rx_sg && card->options.performance_stats) {
+ card->perf_stats.sg_skbs_rx++;
+ card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
+ }
return skb;
no_mem:
if (net_ratelimit()){
@@ -2608,28 +2719,15 @@ qeth_process_inbound_buffer(struct qeth_card *card,
}
}
-static struct qeth_buffer_pool_entry *
-qeth_get_buffer_pool_entry(struct qeth_card *card)
-{
- struct qeth_buffer_pool_entry *entry;
-
- QETH_DBF_TEXT(trace, 6, "gtbfplen");
- if (!list_empty(&card->qdio.in_buf_pool.entry_list)) {
- entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
- struct qeth_buffer_pool_entry, list);
- list_del_init(&entry->list);
- return entry;
- }
- return NULL;
-}
-
-static void
+static int
qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
{
struct qeth_buffer_pool_entry *pool_entry;
int i;
-
- pool_entry = qeth_get_buffer_pool_entry(card);
+
+ pool_entry = qeth_find_free_buffer_pool_entry(card);
+ if (!pool_entry)
+ return 1;
/*
* since the buffer is accessed only from the input_tasklet
* there shouldn't be a need to synchronize; also, since we use
@@ -2648,6 +2746,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
buf->buffer->element[i].flags = 0;
}
buf->state = QETH_QDIO_BUF_EMPTY;
+ return 0;
}
static void
@@ -2682,6 +2781,7 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
int count;
int i;
int rc;
+ int newcount = 0;
QETH_DBF_TEXT(trace,6,"queinbuf");
count = (index < queue->next_buf_to_init)?
@@ -2692,9 +2792,27 @@ qeth_queue_input_buffer(struct qeth_card *card, int index)
/* only requeue at a certain threshold to avoid SIGAs */
if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){
for (i = queue->next_buf_to_init;
- i < queue->next_buf_to_init + count; ++i)
- qeth_init_input_buffer(card,
- &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]);
+ i < queue->next_buf_to_init + count; ++i) {
+ if (qeth_init_input_buffer(card,
+ &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
+ break;
+ } else {
+ newcount++;
+ }
+ }
+
+ if (newcount < count) {
+ /* we are in memory shortage so we switch back to
+ traditional skb allocation and drop packages */
+ if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1))
+ printk(KERN_WARNING
+ "qeth: switch to alloc skb\n");
+ count = newcount;
+ } else {
+ if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0))
+ printk(KERN_WARNING "qeth: switch to sg\n");
+ }
+
/*
* according to old code it should be avoided to requeue all
* 128 buffers in order to benefit from PCI avoidance.
@@ -6494,6 +6612,7 @@ qeth_hardsetup_card(struct qeth_card *card)
QETH_DBF_TEXT(setup, 2, "hrdsetup");
+ atomic_set(&card->force_alloc_skb, 0);
retry:
if (retries < 3){
PRINT_WARN("Retrying to do IDX activates.\n");
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c
index 89d56c8ecdd2..f1ff165a5e05 100644
--- a/drivers/s390/net/qeth_proc.c
+++ b/drivers/s390/net/qeth_proc.c
@@ -212,6 +212,12 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it)
" Skb fragments sent in SG mode : %u\n\n",
card->perf_stats.sg_skbs_sent,
card->perf_stats.sg_frags_sent);
+ seq_printf(s, " Skbs received in SG mode : %u\n"
+ " Skb fragments received in SG mode : %u\n"
+ " Page allocations for rx SG mode : %u\n\n",
+ card->perf_stats.sg_skbs_rx,
+ card->perf_stats.sg_frags_rx,
+ card->perf_stats.sg_alloc_page_rx);
seq_printf(s, " large_send tx (in Kbytes) : %u\n"
" large_send count : %u\n\n",
card->perf_stats.large_send_bytes >> 10,
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 65ffc21afc37..bb0287ad1aac 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -991,7 +991,7 @@ static struct attribute_group qeth_osn_device_attr_group = {
#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \
struct device_attribute dev_attr_##_id = { \
- .attr = {.name=__stringify(_name), .mode=_mode, .owner=THIS_MODULE },\
+ .attr = {.name=__stringify(_name), .mode=_mode, },\
.show = _show, \
.store = _store, \
};
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 821cde65e369..ab5ec1feaf4e 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -259,21 +259,21 @@ zfcp_module_init(void)
size = sizeof(struct zfcp_fsf_req_qtcb);
align = calc_alignment(size);
zfcp_data.fsf_req_qtcb_cache =
- kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_fsf", size, align, 0, NULL);
if (!zfcp_data.fsf_req_qtcb_cache)
goto out;
size = sizeof(struct fsf_status_read_buffer);
align = calc_alignment(size);
zfcp_data.sr_buffer_cache =
- kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_sr", size, align, 0, NULL);
if (!zfcp_data.sr_buffer_cache)
goto out_sr_cache;
size = sizeof(struct zfcp_gid_pn_data);
align = calc_alignment(size);
zfcp_data.gid_pn_cache =
- kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL);
+ kmem_cache_create("zfcp_gid", size, align, 0, NULL);
if (!zfcp_data.gid_pn_cache)
goto out_gid_cache;
@@ -815,9 +815,7 @@ zfcp_get_adapter_by_busid(char *bus_id)
struct zfcp_unit *
zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
{
- struct zfcp_unit *unit, *tmp_unit;
- unsigned int scsi_lun;
- int found;
+ struct zfcp_unit *unit;
/*
* check that there is no unit with this FCP_LUN already in list
@@ -863,22 +861,10 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
}
zfcp_unit_get(unit);
+ unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
- scsi_lun = 0;
- found = 0;
write_lock_irq(&zfcp_data.config_lock);
- list_for_each_entry(tmp_unit, &port->unit_list_head, list) {
- if (tmp_unit->scsi_lun != scsi_lun) {
- found = 1;
- break;
- }
- scsi_lun++;
- }
- unit->scsi_lun = scsi_lun;
- if (found)
- list_add_tail(&unit->list, &tmp_unit->list);
- else
- list_add_tail(&unit->list, &port->unit_list_head);
+ list_add_tail(&unit->list, &port->unit_list_head);
atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
write_unlock_irq(&zfcp_data.config_lock);
@@ -1540,15 +1526,12 @@ zfcp_gid_pn_buffers_alloc(struct zfcp_gid_pn_data **gid_pn, mempool_t *pool)
* zfcp_gid_pn_buffers_free - free buffers for GID_PN nameserver request
* @gid_pn: pointer to struct zfcp_gid_pn_data which has to be freed
*/
-static void
-zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
+static void zfcp_gid_pn_buffers_free(struct zfcp_gid_pn_data *gid_pn)
{
- if ((gid_pn->ct.pool != 0))
+ if (gid_pn->ct.pool)
mempool_free(gid_pn, gid_pn->ct.pool);
else
- kfree(gid_pn);
-
- return;
+ kfree(gid_pn);
}
/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 22649639230b..b36dfc40d9fa 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -126,6 +126,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
#define ZFCP_MIN_OUTPUT_THRESHOLD 1 /* ignored by QDIO layer */
#define QDIO_SCSI_QFMT 1 /* 1 for FSF */
+#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
/********************* FSF SPECIFIC DEFINES *********************************/
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index aef66bc2b6ca..d8cd75ce2d9a 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1626,7 +1626,7 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
{
struct zfcp_erp_add_work *p;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ZFCP_LOG_NORMAL("error: Out of resources. Could not register "
"the FCP-LUN 0x%Lx connected to "
@@ -1639,7 +1639,6 @@ zfcp_erp_schedule_work(struct zfcp_unit *unit)
}
zfcp_unit_get(unit);
- memset(p, 0, sizeof(*p));
atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
INIT_WORK(&p->work, zfcp_erp_scsi_scan);
p->unit = unit;
@@ -1986,6 +1985,10 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
failed_openfcp:
zfcp_close_fsf(erp_action->adapter);
failed_qdio:
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+ ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
+ ZFCP_STATUS_ADAPTER_XPORT_OK,
+ &erp_action->adapter->status);
out:
return retval;
}
@@ -2167,6 +2170,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
sleep *= 2;
}
+ atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+ &adapter->status);
+
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status)) {
ZFCP_LOG_INFO("error: exchange of configuration data for "
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 0eb31e162b15..b240800b78d7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1930,7 +1930,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
skip_fsfstatus:
send_els->status = retval;
- if (send_els->handler != 0)
+ if (send_els->handler)
send_els->handler(send_els->handler_data);
return retval;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index bdf5782b8a7a..c408badd2ae9 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -47,103 +47,56 @@ static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
/*
- * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
- * array in the adapter struct.
- * Cur_buf is the pointer array and count can be any number of required
- * buffers, the page-fitting arithmetic is done entirely within this funciton.
+ * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
+ * in the adapter struct sbuf is the pointer array.
*
- * returns: number of buffers allocated
* locks: must only be called with zfcp_data.config_sema taken
*/
-static int
-zfcp_qdio_buffers_enqueue(struct qdio_buffer **cur_buf, int count)
+static void
+zfcp_qdio_buffers_dequeue(struct qdio_buffer **sbuf)
{
- int buf_pos;
- int qdio_buffers_per_page;
- int page_pos = 0;
- struct qdio_buffer *first_in_page = NULL;
-
- qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
- ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
-
- for (buf_pos = 0; buf_pos < count; buf_pos++) {
- if (page_pos == 0) {
- cur_buf[buf_pos] = (struct qdio_buffer *)
- get_zeroed_page(GFP_KERNEL);
- if (cur_buf[buf_pos] == NULL) {
- ZFCP_LOG_INFO("error: allocation of "
- "QDIO buffer failed \n");
- goto out;
- }
- first_in_page = cur_buf[buf_pos];
- } else {
- cur_buf[buf_pos] = first_in_page + page_pos;
+ int pos;
- }
- /* was initialised to zero */
- page_pos++;
- page_pos %= qdio_buffers_per_page;
- }
- out:
- return buf_pos;
+ for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE)
+ free_page((unsigned long) sbuf[pos]);
}
/*
- * Frees BUFFER memory for each of the pointers of the struct qdio_buffer array
- * in the adapter struct cur_buf is the pointer array and count can be any
- * number of buffers in the array that should be freed starting from buffer 0
+ * Allocates BUFFER memory to each of the pointers of the qdio_buffer_t
+ * array in the adapter struct.
+ * Cur_buf is the pointer array
*
+ * returns: zero on success else -ENOMEM
* locks: must only be called with zfcp_data.config_sema taken
*/
-static void
-zfcp_qdio_buffers_dequeue(struct qdio_buffer **cur_buf, int count)
+static int
+zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbuf)
{
- int buf_pos;
- int qdio_buffers_per_page;
-
- qdio_buffers_per_page = PAGE_SIZE / sizeof (struct qdio_buffer);
- ZFCP_LOG_TRACE("buffers_per_page=%d\n", qdio_buffers_per_page);
+ int pos;
- for (buf_pos = 0; buf_pos < count; buf_pos += qdio_buffers_per_page)
- free_page((unsigned long) cur_buf[buf_pos]);
- return;
+ for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
+ sbuf[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
+ if (!sbuf[pos]) {
+ zfcp_qdio_buffers_dequeue(sbuf);
+ return -ENOMEM;
+ }
+ }
+ for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
+ if (pos % QBUFF_PER_PAGE)
+ sbuf[pos] = sbuf[pos - 1] + 1;
+ return 0;
}
/* locks: must only be called with zfcp_data.config_sema taken */
int
zfcp_qdio_allocate_queues(struct zfcp_adapter *adapter)
{
- int buffer_count;
- int retval = 0;
+ int ret;
- buffer_count =
- zfcp_qdio_buffers_enqueue(&(adapter->request_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
- if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
- ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for request "
- "queue\n", buffer_count);
- zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
- buffer_count);
- retval = -ENOMEM;
- goto out;
- }
-
- buffer_count =
- zfcp_qdio_buffers_enqueue(&(adapter->response_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
- if (buffer_count < QDIO_MAX_BUFFERS_PER_Q) {
- ZFCP_LOG_DEBUG("only %d QDIO buffers allocated for response "
- "queue", buffer_count);
- zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
- buffer_count);
- ZFCP_LOG_TRACE("freeing request_queue buffers\n");
- zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
- retval = -ENOMEM;
- goto out;
- }
- out:
- return retval;
+ ret = zfcp_qdio_buffers_enqueue(adapter->request_queue.buffer);
+ if (ret)
+ return ret;
+ return zfcp_qdio_buffers_enqueue(adapter->response_queue.buffer);
}
/* locks: must only be called with zfcp_data.config_sema taken */
@@ -151,12 +104,10 @@ void
zfcp_qdio_free_queues(struct zfcp_adapter *adapter)
{
ZFCP_LOG_TRACE("freeing request_queue buffers\n");
- zfcp_qdio_buffers_dequeue(&(adapter->request_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
+ zfcp_qdio_buffers_dequeue(adapter->request_queue.buffer);
ZFCP_LOG_TRACE("freeing response_queue buffers\n");
- zfcp_qdio_buffers_dequeue(&(adapter->response_queue.buffer[0]),
- QDIO_MAX_BUFFERS_PER_Q);
+ zfcp_qdio_buffers_dequeue(adapter->response_queue.buffer);
}
int
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index a54e4140683a..e821a155b658 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/kmod.h>
+#include <linux/reboot.h>
#include <asm/oplib.h>
#include <asm/ebus.h>
@@ -170,8 +171,6 @@ static void get_current_temps(struct bbc_cpu_temperature *tp)
static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp)
{
static int shutting_down = 0;
- static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
char *type = "???";
s8 val = -1;
@@ -195,7 +194,7 @@ static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp)
printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n");
shutting_down = 1;
- if (call_usermodehelper("/sbin/shutdown", argv, envp, 0) < 0)
+ if (orderly_poweroff(true) < 0)
printk(KERN_CRIT "envctrl: shutdown execution failed\n");
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 178155bf9db6..fbadd4d761f3 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -156,10 +156,9 @@ struct bbc_i2c_client *bbc_i2c_attach(struct linux_ebus_child *echild)
if (!bp)
return NULL;
- client = kmalloc(sizeof(*client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
- memset(client, 0, sizeof(*client));
client->bp = bp;
client->echild = echild;
client->bus = echild->resource[0].start;
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index 022e869c44dd..7b5773d88212 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -24,6 +24,7 @@
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/smp_lock.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/ebus.h>
#include <asm/oplib.h>
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 8328acab47fd..dadabef116b6 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -26,6 +26,7 @@
#include <linux/ioport.h>
#include <linux/miscdevice.h>
#include <linux/kmod.h>
+#include <linux/reboot.h>
#include <asm/ebus.h>
#include <asm/uaccess.h>
@@ -966,10 +967,6 @@ static struct i2c_child_t *envctrl_get_i2c_child(unsigned char mon_type)
static void envctrl_do_shutdown(void)
{
static int inprog = 0;
- static char *envp[] = {
- "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = {
- "/sbin/shutdown", "-h", "now", NULL };
int ret;
if (inprog != 0)
@@ -977,7 +974,7 @@ static void envctrl_do_shutdown(void)
inprog = 1;
printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n");
- ret = call_usermodehelper("/sbin/shutdown", argv, envp, 0);
+ ret = orderly_poweroff(true);
if (ret < 0) {
printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n");
inprog = 0; /* unlikely to succeed, but we could try again */
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 512857a23169..5157a2abc58d 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -619,8 +619,7 @@ static void __exit jsflash_cleanup_module(void)
jsf0.busy = 0;
misc_deregister(&jsf_dev);
- if (unregister_blkdev(JSFD_MAJOR, "jsfd") != 0)
- printk("jsfd: cleanup_module failed\n");
+ unregister_blkdev(JSFD_MAJOR, "jsfd");
blk_cleanup_queue(jsf_queue);
}
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index 6afc7e5df0d4..26b1d2a17ed2 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -656,12 +656,9 @@ static int vfc_probe(void)
if (!cards)
return -ENODEV;
- vfc_dev_lst = kmalloc(sizeof(struct vfc_dev *) *
- (cards+1),
- GFP_KERNEL);
+ vfc_dev_lst = kcalloc(cards + 1, sizeof(struct vfc_dev*), GFP_KERNEL);
if (vfc_dev_lst == NULL)
return -ENOMEM;
- memset(vfc_dev_lst, 0, sizeof(struct vfc_dev *) * (cards + 1));
vfc_dev_lst[cards] = NULL;
ret = register_chrdev(VFC_MAJOR, vfcstr, &vfc_fops);
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 002643392d42..2553629ec15d 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -33,6 +33,7 @@ struct sbus_bus *sbus_root;
static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sdev)
{
+ struct dev_archdata *sd;
unsigned long base;
const void *pval;
int len, err;
@@ -67,6 +68,10 @@ static void __init fill_sbus_device(struct device_node *dp, struct sbus_dev *sde
sbus_fill_device_irq(sdev);
+ sd = &sdev->ofdev.dev.archdata;
+ sd->prom_node = dp;
+ sd->op = &sdev->ofdev;
+
sdev->ofdev.node = dp;
if (sdev->parent)
sdev->ofdev.dev.parent = &sdev->parent->ofdev.dev;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index eb766c3af1c8..efd9d8d3a890 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
- Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
+ Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -69,6 +69,8 @@
2.26.02.008 - Free irq handler in __twa_shutdown().
Serialize reset code.
Add support for 9650SE controllers.
+ 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
+ 2.26.02.010 - Add support for 9690SA controllers.
*/
#include <linux/module.h>
@@ -92,7 +94,7 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.008"
+#define TW_DRIVER_VERSION "2.26.02.010"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@@ -124,11 +126,11 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
unsigned short *fw_on_ctlr_branch,
unsigned short *fw_on_ctlr_build,
u32 *init_connect_result);
-static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
+static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
-static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
+static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
@@ -683,7 +685,7 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
full_command_packet = &tw_ioctl->firmware_command;
/* Load request id and sglist for both command types */
- twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
+ twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
@@ -700,10 +702,10 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
/* Now we need to reset the board */
printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
- tw_dev->host->host_no, TW_DRIVER, 0xc,
+ tw_dev->host->host_no, TW_DRIVER, 0x37,
cmd);
retval = TW_IOCTL_ERROR_OS_EIO;
- twa_reset_device_extension(tw_dev, 1);
+ twa_reset_device_extension(tw_dev);
goto out3;
}
@@ -890,7 +892,9 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
}
if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
- if ((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) || (!test_bit(TW_IN_RESET, &tw_dev->flags)))
+ if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
+ (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
+ (!test_bit(TW_IN_RESET, &tw_dev->flags)))
TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
}
@@ -935,8 +939,7 @@ static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
unsigned long before;
int retval = 1;
- if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) ||
- (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE)) {
+ if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
before = jiffies;
while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
@@ -1160,13 +1163,12 @@ static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
}
/* Allocate event info space */
- tw_dev->event_queue[0] = kmalloc(sizeof(TW_Event) * TW_Q_LENGTH, GFP_KERNEL);
+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
if (!tw_dev->event_queue[0]) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
goto out;
}
- memset(tw_dev->event_queue[0], 0, sizeof(TW_Event) * TW_Q_LENGTH);
for (i = 0; i < TW_Q_LENGTH; i++) {
tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
@@ -1196,7 +1198,6 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
u32 status_reg_value;
TW_Response_Queue response_que;
TW_Command_Full *full_command_packet;
- TW_Command *command_packet;
TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
int handled = 0;
@@ -1274,7 +1275,6 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
request_id = TW_RESID_OUT(response_que.response_id);
full_command_packet = tw_dev->command_packet_virt[request_id];
error = 0;
- command_packet = &full_command_packet->command.oldcommand;
/* Check for command packet errors */
if (full_command_packet->command.newcommand.status != 0) {
if (tw_dev->srb[request_id] != 0) {
@@ -1306,22 +1306,26 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
wake_up(&tw_dev->ioctl_wqueue);
}
} else {
+ struct scsi_cmnd *cmd;
+
+ cmd = tw_dev->srb[request_id];
+
twa_scsiop_execute_scsi_complete(tw_dev, request_id);
/* If no error command was a success */
if (error == 0) {
- tw_dev->srb[request_id]->result = (DID_OK << 16);
+ cmd->result = (DID_OK << 16);
}
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
}
/* Report residual bytes for single sgl */
- if ((tw_dev->srb[request_id]->use_sg <= 1) && (full_command_packet->command.newcommand.status == 0)) {
- if (full_command_packet->command.newcommand.sg_list[0].length < tw_dev->srb[request_id]->request_bufflen)
- tw_dev->srb[request_id]->resid = tw_dev->srb[request_id]->request_bufflen - full_command_packet->command.newcommand.sg_list[0].length;
+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
}
/* Now complete the io */
@@ -1349,11 +1353,15 @@ twa_interrupt_bail:
} /* End twa_interrupt() */
/* This function will load the request id and various sgls for ioctls */
-static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
+static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
{
TW_Command *oldcommand;
TW_Command_Apache *newcommand;
TW_SG_Entry *sgl;
+ unsigned int pae = 0;
+
+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
+ pae = 1;
if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
newcommand = &full_command_packet->command.newcommand;
@@ -1369,12 +1377,14 @@ static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, d
if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
/* Load the sg list */
- sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
+ if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
+ sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
+ else
+ sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
sgl->length = cpu_to_le32(length);
- if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
- oldcommand->size += 1;
+ oldcommand->size += pae;
}
}
} /* End twa_load_sgl() */
@@ -1384,52 +1394,20 @@ static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
{
int use_sg;
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- struct pci_dev *pdev = tw_dev->tw_pci_dev;
- int retval = 0;
-
- if (cmd->use_sg == 0)
- goto out;
-
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
- if (use_sg == 0) {
+ use_sg = scsi_dma_map(cmd);
+ if (!use_sg)
+ return 0;
+ else if (use_sg < 0) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
- goto out;
+ return 0;
}
cmd->SCp.phase = TW_PHASE_SGLIST;
cmd->SCp.have_data_in = use_sg;
- retval = use_sg;
-out:
- return retval;
-} /* End twa_map_scsi_sg_data() */
-/* This function will perform a pci-dma map for a single buffer */
-static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id)
-{
- dma_addr_t mapping;
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- struct pci_dev *pdev = tw_dev->tw_pci_dev;
- dma_addr_t retval = 0;
-
- if (cmd->request_bufflen == 0) {
- retval = 0;
- goto out;
- }
-
- mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL);
-
- if (mapping == 0) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page");
- goto out;
- }
-
- cmd->SCp.phase = TW_PHASE_SINGLE;
- cmd->SCp.have_data_in = mapping;
- retval = mapping;
-out:
- return retval;
-} /* End twa_map_scsi_single_data() */
+ return use_sg;
+} /* End twa_map_scsi_sg_data() */
/* This function will poll for a response interrupt of a request */
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
@@ -1535,7 +1513,8 @@ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id,
command_que_value = tw_dev->command_packet_phys[request_id];
/* For 9650SE write low 4 bytes first */
- if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
+ if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
+ (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
command_que_value += TW_COMMAND_OFFSET;
writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
}
@@ -1566,7 +1545,8 @@ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id,
TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
goto out;
} else {
- if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) {
+ if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
+ (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
/* Now write upper 4 bytes */
writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
} else {
@@ -1590,7 +1570,7 @@ out:
} /* End twa_post_command_packet() */
/* This function will reset a device extension */
-static int twa_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
+static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
{
int i = 0;
int retval = 1;
@@ -1748,7 +1728,7 @@ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
mutex_lock(&tw_dev->ioctl_lock);
/* Now reset the card and some of the device extension data */
- if (twa_reset_device_extension(tw_dev, 0)) {
+ if (twa_reset_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
goto out;
}
@@ -1815,15 +1795,13 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
u32 num_sectors = 0x0;
int i, sg_count;
struct scsi_cmnd *srb = NULL;
- struct scatterlist *sglist = NULL;
- dma_addr_t buffaddr = 0x0;
+ struct scatterlist *sglist = NULL, *sg;
int retval = 1;
if (tw_dev->srb[request_id]) {
- if (tw_dev->srb[request_id]->request_buffer) {
- sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
- }
srb = tw_dev->srb[request_id];
+ if (scsi_sglist(srb))
+ sglist = scsi_sglist(srb);
}
/* Initialize command packet */
@@ -1856,32 +1834,12 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (!sglistarg) {
/* Map sglist from scsi layer to cmd packet */
- if (tw_dev->srb[request_id]->use_sg == 0) {
- if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
- command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
- command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
- if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
- memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
- } else {
- buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
- if (buffaddr == 0)
- goto out;
-
- command_packet->sg_list[0].address = TW_CPU_TO_SGL(buffaddr);
- command_packet->sg_list[0].length = cpu_to_le32(tw_dev->srb[request_id]->request_bufflen);
- }
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), 1));
-
- if (command_packet->sg_list[0].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi");
- goto out;
- }
- }
- if (tw_dev->srb[request_id]->use_sg > 0) {
- if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
- if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) {
- struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
+ if (scsi_sg_count(srb)) {
+ if ((scsi_sg_count(srb) == 1) &&
+ (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+ if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ struct scatterlist *sg = scsi_sglist(srb);
char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -1893,16 +1851,16 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (sg_count == 0)
goto out;
- for (i = 0; i < sg_count; i++) {
- command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(&sglist[i]));
- command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(&sglist[i]));
+ scsi_for_each_sg(srb, sg, sg_count, i) {
+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
+ command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
goto out;
}
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), tw_dev->srb[request_id]->use_sg));
+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
}
} else {
/* Internal cdb post */
@@ -1932,7 +1890,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
/* Update SG statistics */
if (srb) {
- tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg;
+ tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
tw_dev->max_sgl_entries = tw_dev->sgl_entries;
}
@@ -1951,16 +1909,13 @@ out:
/* This function completes an execute scsi operation */
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
{
- if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH &&
- (tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE ||
- tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) {
- if (tw_dev->srb[request_id]->use_sg == 0) {
- memcpy(tw_dev->srb[request_id]->request_buffer,
- tw_dev->generic_buffer_virt[request_id],
- tw_dev->srb[request_id]->request_bufflen);
- }
- if (tw_dev->srb[request_id]->use_sg == 1) {
- struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+
+ if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+ (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+ if (scsi_sg_count(cmd) == 1) {
+ struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]);
char *buf;
unsigned long flags = 0;
local_irq_save(flags);
@@ -2017,16 +1972,8 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- struct pci_dev *pdev = tw_dev->tw_pci_dev;
- switch(cmd->SCp.phase) {
- case TW_PHASE_SINGLE:
- pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
- break;
- case TW_PHASE_SGLIST:
- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
- break;
- }
+ scsi_dma_unmap(cmd);
} /* End twa_unmap_scsi_data() */
/* scsi_host_template initializer */
@@ -2063,11 +2010,14 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
- retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK);
- if (retval) {
- TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
- goto out_disable_device;
- }
+ if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
+ || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
+ || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
+ TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -2115,7 +2065,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
goto out_iounmap;
/* Set host specific parameters */
- if (pdev->device == PCI_DEVICE_ID_3WARE_9650SE)
+ if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
+ (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
host->max_id = TW_MAX_UNITS_9650SE;
else
host->max_id = TW_MAX_UNITS;
@@ -2222,6 +2173,8 @@ static struct pci_device_id twa_pci_tbl[] __devinitdata = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 7901517d4513..d14a9479e389 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
Written By: Adam Radford <linuxraid@amcc.com>
Modifications By: Tom Couch <linuxraid@amcc.com>
- Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
+ Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -419,6 +419,9 @@ static twa_message_type twa_error_table[] = {
#ifndef PCI_DEVICE_ID_3WARE_9650SE
#define PCI_DEVICE_ID_3WARE_9650SE 0x1004
#endif
+#ifndef PCI_DEVICE_ID_3WARE_9690SA
+#define PCI_DEVICE_ID_3WARE_9690SA 0x1005
+#endif
/* Bitmask macros to eliminate bitfields */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 656bdb1352d8..c7995fc216e8 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1273,57 +1273,24 @@ static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
int use_sg;
dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
-
- if (cmd->use_sg == 0)
- return 0;
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
-
- if (use_sg == 0) {
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0) {
printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
return 0;
}
cmd->SCp.phase = TW_PHASE_SGLIST;
cmd->SCp.have_data_in = use_sg;
-
+
return use_sg;
} /* End tw_map_scsi_sg_data() */
-static u32 tw_map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- dma_addr_t mapping;
-
- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data()\n");
-
- if (cmd->request_bufflen == 0)
- return 0;
-
- mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, DMA_BIDIRECTIONAL);
-
- if (mapping == 0) {
- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data(): pci_map_page() failed.\n");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SINGLE;
- cmd->SCp.have_data_in = mapping;
-
- return mapping;
-} /* End tw_map_scsi_single_data() */
-
static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
- switch(cmd->SCp.phase) {
- case TW_PHASE_SINGLE:
- pci_unmap_page(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
- break;
- case TW_PHASE_SGLIST:
- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
- break;
- }
+ scsi_dma_unmap(cmd);
} /* End tw_unmap_scsi_data() */
/* This function will reset a device extension */
@@ -1499,27 +1466,16 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
void *buf;
unsigned int transfer_len;
unsigned long flags = 0;
+ struct scatterlist *sg = scsi_sglist(cmd);
- if (cmd->use_sg) {
- struct scatterlist *sg =
- (struct scatterlist *)cmd->request_buffer;
- local_irq_save(flags);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- transfer_len = min(sg->length, len);
- } else {
- buf = cmd->request_buffer;
- transfer_len = min(cmd->request_bufflen, len);
- }
+ local_irq_save(flags);
+ buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ transfer_len = min(sg->length, len);
memcpy(buf, data, transfer_len);
-
- if (cmd->use_sg) {
- struct scatterlist *sg;
- sg = (struct scatterlist *)cmd->request_buffer;
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- }
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
}
/* This function is called by the isr to complete an inquiry command */
@@ -1764,19 +1720,20 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
{
TW_Command *command_packet;
unsigned long command_que_value;
- u32 lba = 0x0, num_sectors = 0x0, buffaddr = 0x0;
+ u32 lba = 0x0, num_sectors = 0x0;
int i, use_sg;
struct scsi_cmnd *srb;
- struct scatterlist *sglist;
+ struct scatterlist *sglist, *sg;
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n");
- if (tw_dev->srb[request_id]->request_buffer == NULL) {
+ srb = tw_dev->srb[request_id];
+
+ sglist = scsi_sglist(srb);
+ if (!sglist) {
printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n");
return 1;
}
- sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
- srb = tw_dev->srb[request_id];
/* Initialize command packet */
command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
@@ -1819,33 +1776,18 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
command_packet->byte8.io.lba = lba;
command_packet->byte6.block_count = num_sectors;
- /* Do this if there are no sg list entries */
- if (tw_dev->srb[request_id]->use_sg == 0) {
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): SG = 0\n");
- buffaddr = tw_map_scsi_single_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- if (buffaddr == 0)
- return 1;
+ use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
+ if (!use_sg)
+ return 1;
- command_packet->byte8.io.sgl[0].address = buffaddr;
- command_packet->byte8.io.sgl[0].length = tw_dev->srb[request_id]->request_bufflen;
+ scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
+ command_packet->byte8.io.sgl[i].address = sg_dma_address(sg);
+ command_packet->byte8.io.sgl[i].length = sg_dma_len(sg);
command_packet->size+=2;
}
- /* Do this if we have multiple sg list entries */
- if (tw_dev->srb[request_id]->use_sg > 0) {
- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- if (use_sg == 0)
- return 1;
-
- for (i=0;i<use_sg; i++) {
- command_packet->byte8.io.sgl[i].address = sg_dma_address(&sglist[i]);
- command_packet->byte8.io.sgl[i].length = sg_dma_len(&sglist[i]);
- command_packet->size+=2;
- }
- }
-
/* Update SG statistics */
- tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg;
+ tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
tw_dev->max_sgl_entries = tw_dev->sgl_entries;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index cb02656eb54c..71ff3fbfce12 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -267,8 +267,6 @@ NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
offset = max_offset;
}
if(XFERP < min_xferp) {
- printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
- XFERP, min_xferp);
XFERP = min_xferp;
}
return (offset & 0x0f) | (XFERP & 0x07)<<4;
@@ -585,16 +583,8 @@ NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
struct NCR_700_command_slot *slot)
{
if(SCp->sc_data_direction != DMA_NONE &&
- SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
- if(SCp->use_sg) {
- dma_unmap_sg(hostdata->dev, SCp->request_buffer,
- SCp->use_sg, SCp->sc_data_direction);
- } else {
- dma_unmap_single(hostdata->dev, slot->dma_handle,
- SCp->request_bufflen,
- SCp->sc_data_direction);
- }
- }
+ SCp->sc_data_direction != DMA_BIDIRECTIONAL)
+ scsi_dma_unmap(SCp);
}
STATIC inline void
@@ -661,7 +651,6 @@ NCR_700_chip_setup(struct Scsi_Host *host)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
- __u32 dcntl_extra = 0;
__u8 min_period;
__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
@@ -686,13 +675,14 @@ NCR_700_chip_setup(struct Scsi_Host *host)
burst_disable = BURST_DISABLE;
break;
}
- dcntl_extra = COMPAT_700_MODE;
+ hostdata->dcntl_extra |= COMPAT_700_MODE;
- NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
NCR_700_writeb(burst_length | hostdata->dmode_extra,
host, DMODE_710_REG);
- NCR_700_writeb(burst_disable | (hostdata->differential ?
- DIFF : 0), host, CTEST7_REG);
+ NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
+ (hostdata->differential ? DIFF : 0),
+ host, CTEST7_REG);
NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
| AUTO_ATN, host, SCNTL0_REG);
@@ -727,13 +717,13 @@ NCR_700_chip_setup(struct Scsi_Host *host)
* of spec: sync divider 2, async divider 3 */
DEBUG(("53c700: sync 2 async 3\n"));
NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock/2;
} else if(hostdata->clock > 50 && hostdata->clock <= 75) {
/* sync divider 1.5, async divider 3 */
DEBUG(("53c700: sync 1.5 async 3\n"));
NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock*2;
hostdata->sync_clock /= 3;
@@ -741,18 +731,18 @@ NCR_700_chip_setup(struct Scsi_Host *host)
/* sync divider 1, async divider 2 */
DEBUG(("53c700: sync 1 async 2\n"));
NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock;
} else if(hostdata->clock > 25 && hostdata->clock <=37) {
/* sync divider 1, async divider 1.5 */
DEBUG(("53c700: sync 1 async 1.5\n"));
NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
hostdata->sync_clock = hostdata->clock;
} else {
DEBUG(("53c700: sync 1 async 1\n"));
NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
- NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
+ NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
/* sync divider 1, async divider 1 */
hostdata->sync_clock = hostdata->clock;
}
@@ -1263,14 +1253,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
host->host_no, pun, lun, NCR_700_condition[i],
NCR_700_phase[j], dsp - hostdata->pScript);
if(SCp != NULL) {
- scsi_print_command(SCp);
+ struct scatterlist *sg;
- if(SCp->use_sg) {
- for(i = 0; i < SCp->use_sg + 1; i++) {
- printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
- }
+ scsi_print_command(SCp);
+ scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
+ printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
}
- }
+ }
NCR_700_internal_bus_reset(host);
} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
@@ -1844,8 +1833,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
}
/* sanity check: some of the commands generated by the mid-layer
* have an eccentric idea of their sc_data_direction */
- if(!SCp->use_sg && !SCp->request_bufflen
- && SCp->sc_data_direction != DMA_NONE) {
+ if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
+ SCp->sc_data_direction != DMA_NONE) {
#ifdef NCR_700_DEBUG
printk("53c700: Command");
scsi_print_command(SCp);
@@ -1887,31 +1876,15 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
int i;
int sg_count;
dma_addr_t vPtr = 0;
+ struct scatterlist *sg;
__u32 count = 0;
- if(SCp->use_sg) {
- sg_count = dma_map_sg(hostdata->dev,
- SCp->request_buffer, SCp->use_sg,
- direction);
- } else {
- vPtr = dma_map_single(hostdata->dev,
- SCp->request_buffer,
- SCp->request_bufflen,
- direction);
- count = SCp->request_bufflen;
- slot->dma_handle = vPtr;
- sg_count = 1;
- }
-
+ sg_count = scsi_dma_map(SCp);
+ BUG_ON(sg_count < 0);
- for(i = 0; i < sg_count; i++) {
-
- if(SCp->use_sg) {
- struct scatterlist *sg = SCp->request_buffer;
-
- vPtr = sg_dma_address(&sg[i]);
- count = sg_dma_len(&sg[i]);
- }
+ scsi_for_each_sg(SCp, sg, sg_count, i) {
+ vPtr = sg_dma_address(sg);
+ count = sg_dma_len(sg);
slot->SG[i].ins = bS_to_host(move_ins | count);
DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 841e1bb27d57..e06bdfeab420 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -177,6 +177,7 @@ struct NCR_700_command_slot {
__u8 state;
#define NCR_700_FLAG_AUTOSENSE 0x01
__u8 flags;
+ __u8 pad1[2]; /* Needed for m68k where min alignment is 2 bytes */
int tag;
__u32 resume_offset;
struct scsi_cmnd *cmnd;
@@ -196,6 +197,8 @@ struct NCR_700_Host_Parameters {
void __iomem *base; /* the base for the port (copied to host) */
struct device *dev;
__u32 dmode_extra; /* adjustable bus settings */
+ __u32 dcntl_extra; /* adjustable bus settings */
+ __u32 ctest7_extra; /* adjustable bus settings */
__u32 differential:1; /* if we are differential */
#ifdef CONFIG_53C700_LE_ON_BE
/* This option is for HP only. Set it if your chip is wired for
@@ -352,6 +355,7 @@ struct NCR_700_Host_Parameters {
#define SEL_TIMEOUT_DISABLE 0x10 /* 710 only */
#define DFP 0x08
#define EVP 0x04
+#define CTEST7_TT1 0x02
#define DIFF 0x01
#define CTEST6_REG 0x1A
#define TEMP_REG 0x1C
@@ -385,6 +389,7 @@ struct NCR_700_Host_Parameters {
#define SOFTWARE_RESET 0x01
#define COMPAT_700_MODE 0x01
#define SCRPTS_16BITS 0x20
+#define EA_710 0x20
#define ASYNC_DIV_2_0 0x00
#define ASYNC_DIV_1_5 0x40
#define ASYNC_DIV_1_0 0x80
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
deleted file mode 100644
index 93b41f45638a..000000000000
--- a/drivers/scsi/53c7xx.c
+++ /dev/null
@@ -1,6102 +0,0 @@
-/*
- * 53c710 driver. Modified from Drew Eckhardts driver
- * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
- * Check out PERM_OPTIONS and EXPECTED_CLOCK, which may be defined in the
- * relevant machine specific file (eg. mvme16x.[ch], amiga7xx.[ch]).
- * There are also currently some defines at the top of 53c7xx.scr.
- * The chip type is #defined in script_asm.pl, as well as the Makefile.
- * Host scsi ID expected to be 7 - see NCR53c7x0_init().
- *
- * I have removed the PCI code and some of the 53c8xx specific code -
- * simply to make this file smaller and easier to manage.
- *
- * MVME16x issues:
- * Problems trying to read any chip registers in NCR53c7x0_init(), as they
- * may never have been set by 16xBug (eg. If kernel has come in over tftp).
- */
-
-/*
- * Adapted for Linux/m68k Amiga platforms for the A4000T/A4091 and
- * WarpEngine SCSI controllers.
- * By Alan Hourihane <alanh@fairlite.demon.co.uk>
- * Thanks to Richard Hirst for making it possible with the MVME additions
- */
-
-/*
- * 53c710 rev 0 doesn't support add with carry. Rev 1 and 2 does. To
- * overcome this problem you can define FORCE_DSA_ALIGNMENT, which ensures
- * that the DSA address is always xxxxxx00. If disconnection is not allowed,
- * then the script only ever tries to add small (< 256) positive offsets to
- * DSA, so lack of carry isn't a problem. FORCE_DSA_ALIGNMENT can, of course,
- * be defined for all chip revisions at a small cost in memory usage.
- */
-
-#define FORCE_DSA_ALIGNMENT
-
-/*
- * Selection timer does not always work on the 53c710, depending on the
- * timing at the last disconnect, if this is a problem for you, try
- * using validids as detailed below.
- *
- * Options for the NCR7xx driver
- *
- * noasync:0 - disables sync and asynchronous negotiation
- * nosync:0 - disables synchronous negotiation (does async)
- * nodisconnect:0 - disables disconnection
- * validids:0x?? - Bitmask field that disallows certain ID's.
- * - e.g. 0x03 allows ID 0,1
- * - 0x1F allows ID 0,1,2,3,4
- * opthi:n - replace top word of options with 'n'
- * optlo:n - replace bottom word of options with 'n'
- * - ALWAYS SPECIFY opthi THEN optlo <<<<<<<<<<
- */
-
-/*
- * PERM_OPTIONS are driver options which will be enabled for all NCR boards
- * in the system at driver initialization time.
- *
- * Don't THINK about touching these in PERM_OPTIONS :
- * OPTION_MEMORY_MAPPED
- * 680x0 doesn't have an IO map!
- *
- * OPTION_DEBUG_TEST1
- * Test 1 does bus mastering and interrupt tests, which will help weed
- * out brain damaged main boards.
- *
- * Other PERM_OPTIONS settings are listed below. Note the actual options
- * required are set in the relevant file (mvme16x.c, amiga7xx.c, etc):
- *
- * OPTION_NO_ASYNC
- * Don't negotiate for asynchronous transfers on the first command
- * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
- * devices which do something bad rather than sending a MESSAGE
- * REJECT back to us like they should if they can't cope.
- *
- * OPTION_SYNCHRONOUS
- * Enable support for synchronous transfers. Target negotiated
- * synchronous transfers will be responded to. To initiate
- * a synchronous transfer request, call
- *
- * request_synchronous (hostno, target)
- *
- * from within KGDB.
- *
- * OPTION_ALWAYS_SYNCHRONOUS
- * Negotiate for synchronous transfers with every target after
- * driver initialization or a SCSI bus reset. This is a bit dangerous,
- * since there are some dain bramaged SCSI devices which will accept
- * SDTR messages but keep talking asynchronously.
- *
- * OPTION_DISCONNECT
- * Enable support for disconnect/reconnect. To change the
- * default setting on a given host adapter, call
- *
- * request_disconnect (hostno, allow)
- *
- * where allow is non-zero to allow, 0 to disallow.
- *
- * If you really want to run 10MHz FAST SCSI-II transfers, you should
- * know that the NCR driver currently ignores parity information. Most
- * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
- * than 8MHz. To play it safe, we only request 5MHz transfers.
- *
- * If you'd rather get 10MHz transfers, edit sdtr_message and change
- * the fourth byte from 50 to 25.
- */
-
-/*
- * Sponsored by
- * iX Multiuser Multitasking Magazine
- * Hannover, Germany
- * hm@ix.de
- *
- * Copyright 1993, 1994, 1995 Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@PoohSticks.ORG
- * +1 (303) 786-7975
- *
- * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
- *
- * For more information, please consult
- *
- * NCR53C810
- * SCSI I/O Processor
- * Programmer's Guide
- *
- * NCR 53C810
- * PCI-SCSI I/O Processor
- * Data Manual
- *
- * NCR 53C810/53C820
- * PCI-SCSI I/O Processor Design In Guide
- *
- * For literature on Symbios Logic Inc. formerly NCR, SCSI,
- * and Communication products please call (800) 334-5454 or
- * (719) 536-3300.
- *
- * PCI BIOS Specification Revision
- * PCI Local Bus Specification
- * PCI System Design Guide
- *
- * PCI Special Interest Group
- * M/S HF3-15A
- * 5200 N.E. Elam Young Parkway
- * Hillsboro, Oregon 97124-6497
- * +1 (503) 696-2000
- * +1 (800) 433-5177
- */
-
-/*
- * Design issues :
- * The cumulative latency needed to propagate a read/write request
- * through the file system, buffer cache, driver stacks, SCSI host, and
- * SCSI device is ultimately the limiting factor in throughput once we
- * have a sufficiently fast host adapter.
- *
- * So, to maximize performance we want to keep the ratio of latency to data
- * transfer time to a minimum by
- * 1. Minimizing the total number of commands sent (typical command latency
- * including drive and bus mastering host overhead is as high as 4.5ms)
- * to transfer a given amount of data.
- *
- * This is accomplished by placing no arbitrary limit on the number
- * of scatter/gather buffers supported, since we can transfer 1K
- * per scatter/gather buffer without Eric's cluster patches,
- * 4K with.
- *
- * 2. Minimizing the number of fatal interrupts serviced, since
- * fatal interrupts halt the SCSI I/O processor. Basically,
- * this means offloading the practical maximum amount of processing
- * to the SCSI chip.
- *
- * On the NCR53c810/820/720, this is accomplished by using
- * interrupt-on-the-fly signals when commands complete,
- * and only handling fatal errors and SDTR / WDTR messages
- * in the host code.
- *
- * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
- * only the lack of a interrupt-on-the-fly facility complicates
- * things. Also, SCSI ID registers and commands are
- * bit fielded rather than binary encoded.
- *
- * On the NCR53c700 and NCR53c700-66, operations that are done via
- * indirect, table mode on the more advanced chips must be
- * replaced by calls through a jump table which
- * acts as a surrogate for the DSA. Unfortunately, this
- * will mean that we must service an interrupt for each
- * disconnect/reconnect.
- *
- * 3. Eliminating latency by pipelining operations at the different levels.
- *
- * This driver allows a configurable number of commands to be enqueued
- * for each target/lun combination (experimentally, I have discovered
- * that two seems to work best) and will ultimately allow for
- * SCSI-II tagged queuing.
- *
- *
- * Architecture :
- * This driver is built around a Linux queue of commands waiting to
- * be executed, and a shared Linux/NCR array of commands to start. Commands
- * are transferred to the array by the run_process_issue_queue() function
- * which is called whenever a command completes.
- *
- * As commands are completed, the interrupt routine is triggered,
- * looks for commands in the linked list of completed commands with
- * valid status, removes these commands from a list of running commands,
- * calls the done routine, and flags their target/luns as not busy.
- *
- * Due to limitations in the intelligence of the NCR chips, certain
- * concessions are made. In many cases, it is easier to dynamically
- * generate/fix-up code rather than calculate on the NCR at run time.
- * So, code is generated or fixed up for
- *
- * - Handling data transfers, using a variable number of MOVE instructions
- * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
- *
- * The DATAIN and DATAOUT routines are separate, so that an incorrect
- * direction can be trapped, and space isn't wasted.
- *
- * It may turn out that we're better off using some sort
- * of table indirect instruction in a loop with a variable
- * sized table on the NCR53c710 and newer chips.
- *
- * - Checking for reselection (NCR53c710 and better)
- *
- * - Handling the details of SCSI context switches (NCR53c710 and better),
- * such as reprogramming appropriate synchronous parameters,
- * removing the dsa structure from the NCR's queue of outstanding
- * commands, etc.
- *
- */
-
-#include <linux/module.h>
-
-
-#include <linux/types.h>
-#include <asm/setup.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <linux/delay.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/ioport.h>
-#include <linux/time.h>
-#include <linux/blkdev.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_AMIGA
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-#include <asm/irq.h>
-
-#define BIG_ENDIAN
-#define NO_IO_SPACE
-#endif
-
-#ifdef CONFIG_MVME16x
-#include <asm/mvme16xhw.h>
-
-#define BIG_ENDIAN
-#define NO_IO_SPACE
-#define VALID_IDS
-#endif
-
-#ifdef CONFIG_BVME6000
-#include <asm/bvme6000hw.h>
-
-#define BIG_ENDIAN
-#define NO_IO_SPACE
-#define VALID_IDS
-#endif
-
-#include "scsi.h"
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport_spi.h>
-#include "53c7xx.h"
-#include <linux/stat.h>
-#include <linux/stddef.h>
-
-#ifdef NO_IO_SPACE
-/*
- * The following make the definitions in 53c7xx.h (write8, etc) smaller,
- * we don't have separate i/o space anyway.
- */
-#undef inb
-#undef outb
-#undef inw
-#undef outw
-#undef inl
-#undef outl
-#define inb(x) 1
-#define inw(x) 1
-#define inl(x) 1
-#define outb(x,y) 1
-#define outw(x,y) 1
-#define outl(x,y) 1
-#endif
-
-static int check_address (unsigned long addr, int size);
-static void dump_events (struct Scsi_Host *host, int count);
-static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
- int free, int issue);
-static void hard_reset (struct Scsi_Host *host);
-static void ncr_scsi_reset (struct Scsi_Host *host);
-static void print_lots (struct Scsi_Host *host);
-static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
- int scntl3, int now_connected);
-static int datapath_residual (struct Scsi_Host *host);
-static const char * sbcl_to_phase (int sbcl);
-static void print_progress (Scsi_Cmnd *cmd);
-static void print_queues (struct Scsi_Host *host);
-static void process_issue_queue (unsigned long flags);
-static int shutdown (struct Scsi_Host *host);
-static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
-static int disable (struct Scsi_Host *host);
-static int NCR53c7xx_run_tests (struct Scsi_Host *host);
-static irqreturn_t NCR53c7x0_intr(int irq, void *dev_id);
-static void NCR53c7x0_intfly (struct Scsi_Host *host);
-static int ncr_halt (struct Scsi_Host *host);
-static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
- *cmd);
-static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
-static void print_dsa (struct Scsi_Host *host, u32 *dsa,
- const char *prefix);
-static int print_insn (struct Scsi_Host *host, const u32 *insn,
- const char *prefix, int kernel);
-
-static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
-static void NCR53c7x0_init_fixup (struct Scsi_Host *host);
-static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
- NCR53c7x0_cmd *cmd);
-static void NCR53c7x0_soft_reset (struct Scsi_Host *host);
-
-/* Size of event list (per host adapter) */
-static int track_events = 0;
-static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
-static struct scsi_host_template *the_template = NULL;
-
-/* NCR53c710 script handling code */
-
-#include "53c7xx_d.h"
-#ifdef A_int_debug_sync
-#define DEBUG_SYNC_INTR A_int_debug_sync
-#endif
-int NCR53c7xx_script_len = sizeof (SCRIPT);
-int NCR53c7xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
-#ifdef FORCE_DSA_ALIGNMENT
-int CmdPageStart = (0 - Ent_dsa_zero - sizeof(struct NCR53c7x0_cmd)) & 0xff;
-#endif
-
-static char *setup_strings[] =
- {"","","","","","","",""};
-
-#define MAX_SETUP_STRINGS ARRAY_SIZE(setup_strings)
-#define SETUP_BUFFER_SIZE 200
-static char setup_buffer[SETUP_BUFFER_SIZE];
-static char setup_used[MAX_SETUP_STRINGS];
-
-void ncr53c7xx_setup (char *str, int *ints)
-{
- int i;
- char *p1, *p2;
-
- p1 = setup_buffer;
- *p1 = '\0';
- if (str)
- strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer));
- setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
- p1 = setup_buffer;
- i = 0;
- while (*p1 && (i < MAX_SETUP_STRINGS)) {
- p2 = strchr(p1, ',');
- if (p2) {
- *p2 = '\0';
- if (p1 != p2)
- setup_strings[i] = p1;
- p1 = p2 + 1;
- i++;
- }
- else {
- setup_strings[i] = p1;
- break;
- }
- }
- for (i=0; i<MAX_SETUP_STRINGS; i++)
- setup_used[i] = 0;
-}
-
-
-/* check_setup_strings() returns index if key found, 0 if not
- */
-
-static int check_setup_strings(char *key, int *flags, int *val, char *buf)
-{
-int x;
-char *cp;
-
- for (x=0; x<MAX_SETUP_STRINGS; x++) {
- if (setup_used[x])
- continue;
- if (!strncmp(setup_strings[x], key, strlen(key)))
- break;
- if (!strncmp(setup_strings[x], "next", strlen("next")))
- return 0;
- }
- if (x == MAX_SETUP_STRINGS)
- return 0;
- setup_used[x] = 1;
- cp = setup_strings[x] + strlen(key);
- *val = -1;
- if (*cp != ':')
- return ++x;
- cp++;
- if ((*cp >= '0') && (*cp <= '9')) {
- *val = simple_strtoul(cp,NULL,0);
- }
- return ++x;
-}
-
-
-
-/*
- * KNOWN BUGS :
- * - There is some sort of conflict when the PPP driver is compiled with
- * support for 16 channels?
- *
- * - On systems which predate the 1.3.x initialization order change,
- * the NCR driver will cause Cannot get free page messages to appear.
- * These are harmless, but I don't know of an easy way to avoid them.
- *
- * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
- * we get a PHASE MISMATCH with DSA set to zero (suggests that we
- * are occurring somewhere in the reselection code) where
- * DSP=some value DCMD|DBC=same value.
- *
- * Closer inspection suggests that we may be trying to execute
- * some portion of the DSA?
- * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
- * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
- * scsi0 : no current command : unexpected phase MSGIN.
- * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
- * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
- * scsi0 : DSP->
- * 001c46cc : 0x001c46cc 0x00000000
- * 001c46d4 : 0x001c5ea0 0x000011f8
- *
- * Changed the print code in the phase_mismatch handler so
- * that we call print_lots to try to diagnose this.
- *
- */
-
-/*
- * Possible future direction of architecture for max performance :
- *
- * We're using a single start array for the NCR chip. This is
- * sub-optimal, because we cannot add a command which would conflict with
- * an executing command to this start queue, and therefore must insert the
- * next command for a given I/T/L combination after the first has completed;
- * incurring our interrupt latency between SCSI commands.
- *
- * To allow further pipelining of the NCR and host CPU operation, we want
- * to set things up so that immediately on termination of a command destined
- * for a given LUN, we get that LUN busy again.
- *
- * To do this, we need to add a 32 bit pointer to which is jumped to
- * on completion of a command. If no new command is available, this
- * would point to the usual DSA issue queue select routine.
- *
- * If one were, it would point to a per-NCR53c7x0_cmd select routine
- * which starts execution immediately, inserting the command at the head
- * of the start queue if the NCR chip is selected or reselected.
- *
- * We would change so that we keep a list of outstanding commands
- * for each unit, rather than a single running_list. We'd insert
- * a new command into the right running list; if the NCR didn't
- * have something running for that yet, we'd put it in the
- * start queue as well. Some magic needs to happen to handle the
- * race condition between the first command terminating before the
- * new one is written.
- *
- * Potential for profiling :
- * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
- */
-
-
-/*
- * TODO :
- * 1. To support WIDE transfers, not much needs to happen. We
- * should do CHMOVE instructions instead of MOVEs when
- * we have scatter/gather segments of uneven length. When
- * we do this, we need to handle the case where we disconnect
- * between segments.
- *
- * 2. Currently, when Icky things happen we do a FATAL(). Instead,
- * we want to do an integrity check on the parts of the NCR hostdata
- * structure which were initialized at boot time; FATAL() if that
- * fails, and otherwise try to recover. Keep track of how many
- * times this has happened within a single SCSI command; if it
- * gets excessive, then FATAL().
- *
- * 3. Parity checking is currently disabled, and a few things should
- * happen here now that we support synchronous SCSI transfers :
- * 1. On soft-reset, we shoould set the EPC (Enable Parity Checking)
- * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
- *
- * 2. We should enable the parity interrupt in the SIEN0 register.
- *
- * 3. intr_phase_mismatch() needs to believe that message out is
- * always an "acceptable" phase to have a mismatch in. If
- * the old phase was MSG_IN, we should send a MESSAGE PARITY
- * error. If the old phase was something else, we should send
- * a INITIATOR_DETECTED_ERROR message. Note that this could
- * cause a RESTORE POINTERS message; so we should handle that
- * correctly first. Instead, we should probably do an
- * initiator_abort.
- *
- * 4. MPEE bit of CTEST4 should be set so we get interrupted if
- * we detect an error.
- *
- *
- * 5. The initial code has been tested on the NCR53c810. I don't
- * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
- * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
- * finish development on those platforms.
- *
- * NCR53c820/825/720 - need to add wide transfer support, including WDTR
- * negotiation, programming of wide transfer capabilities
- * on reselection and table indirect selection.
- *
- * NCR53c710 - need to add fatal interrupt or GEN code for
- * command completion signaling. Need to modify all
- * SDID, SCID, etc. registers, and table indirect select code
- * since these use bit fielded (ie 1<<target) instead of
- * binary encoded target ids. Need to accommodate
- * different register mappings, probably scan through
- * the SCRIPT code and change the non SFBR register operand
- * of all MOVE instructions.
- *
- * It is rather worse than this actually, the 710 corrupts
- * both TEMP and DSA when you do a MOVE MEMORY. This
- * screws you up all over the place. MOVE MEMORY 4 with a
- * destination of DSA seems to work OK, which helps some.
- * Richard Hirst richard@sleepie.demon.co.uk
- *
- * NCR53c700/700-66 - need to add code to refix addresses on
- * every nexus change, eliminate all table indirect code,
- * very messy.
- *
- * 6. The NCR53c7x0 series is very popular on other platforms that
- * could be running Linux - ie, some high performance AMIGA SCSI
- * boards use it.
- *
- * So, I should include #ifdef'd code so that it is
- * compatible with these systems.
- *
- * Specifically, the little Endian assumptions I made in my
- * bit fields need to change, and if the NCR doesn't see memory
- * the right way, we need to provide options to reverse words
- * when the scripts are relocated.
- *
- * 7. Use vremap() to access memory mapped boards.
- */
-
-/*
- * Allow for simultaneous existence of multiple SCSI scripts so we
- * can have a single driver binary for all of the family.
- *
- * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
- * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
- * supported)
- *
- * So that we only need two SCSI scripts, we need to modify things so
- * that we fixup register accesses in READ/WRITE instructions, and
- * we'll also have to accommodate the bit vs. binary encoding of IDs
- * with the 7xx chips.
- */
-
-#define ROUNDUP(adr,type) \
- ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
-
-
-/*
- * Function: issue_to_cmd
- *
- * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
- * structure pointer.
- *
- * Inputs; issue - pointer to start of NOP or JUMP instruction
- * in issue array.
- *
- * Returns: pointer to command on success; 0 if opcode is NOP.
- */
-
-static inline struct NCR53c7x0_cmd *
-issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
- u32 *issue)
-{
- return (issue[0] != hostdata->NOP_insn) ?
- /*
- * If the IF TRUE bit is set, it's a JUMP instruction. The
- * operand is a bus pointer to the dsa_begin routine for this DSA. The
- * dsa field of the NCR53c7x0_cmd structure starts with the
- * DSA code template. By converting to a virtual address,
- * subtracting the code template size, and offset of the
- * dsa field, we end up with a pointer to the start of the
- * structure (alternatively, we could use the
- * dsa_cmnd field, an anachronism from when we weren't
- * sure what the relationship between the NCR structures
- * and host structures were going to be.
- */
- (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
- (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
- offsetof(struct NCR53c7x0_cmd, dsa))
- /* If the IF TRUE bit is not set, it's a NOP */
- : NULL;
-}
-
-
-/*
- * FIXME: we should junk these, in favor of synchronous_want and
- * wide_want in the NCR53c7x0_hostdata structure.
- */
-
-/* Template for "preferred" synchronous transfer parameters. */
-
-static const unsigned char sdtr_message[] = {
-#ifdef CONFIG_SCSI_NCR53C7xx_FAST
- EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 25 /* *4ns */, 8 /* off */
-#else
- EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
-#endif
-};
-
-/* Template to request asynchronous transfers */
-
-static const unsigned char async_message[] = {
- EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
-};
-
-/* Template for "preferred" WIDE transfer parameters */
-
-static const unsigned char wdtr_message[] = {
- EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
-};
-
-#if 0
-/*
- * Function : struct Scsi_Host *find_host (int host)
- *
- * Purpose : KGDB support function which translates a host number
- * to a host structure.
- *
- * Inputs : host - number of SCSI host
- *
- * Returns : NULL on failure, pointer to host structure on success.
- */
-
-static struct Scsi_Host *
-find_host (int host) {
- struct Scsi_Host *h;
- for (h = first_host; h && h->host_no != host; h = h->next);
- if (!h) {
- printk (KERN_ALERT "scsi%d not found\n", host);
- return NULL;
- } else if (h->hostt != the_template) {
- printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
- return NULL;
- }
- return h;
-}
-
-#if 0
-/*
- * Function : request_synchronous (int host, int target)
- *
- * Purpose : KGDB interface which will allow us to negotiate for
- * synchronous transfers. This ill be replaced with a more
- * integrated function; perhaps a new entry in the scsi_host
- * structure, accessible via an ioctl() or perhaps /proc/scsi.
- *
- * Inputs : host - number of SCSI host; target - number of target.
- *
- * Returns : 0 when negotiation has been setup for next SCSI command,
- * -1 on failure.
- */
-
-static int
-request_synchronous (int host, int target) {
- struct Scsi_Host *h;
- struct NCR53c7x0_hostdata *hostdata;
- unsigned long flags;
- if (target < 0) {
- printk (KERN_ALERT "target %d is bogus\n", target);
- return -1;
- }
- if (!(h = find_host (host)))
- return -1;
- else if (h->this_id == target) {
- printk (KERN_ALERT "target %d is host ID\n", target);
- return -1;
- }
- else if (target >= h->max_id) {
- printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
- h->max_id);
- return -1;
- }
- hostdata = (struct NCR53c7x0_hostdata *)h->hostdata[0];
-
- local_irq_save(flags);
- if (hostdata->initiate_sdtr & (1 << target)) {
- local_irq_restore(flags);
- printk (KERN_ALERT "target %d already doing SDTR\n", target);
- return -1;
- }
- hostdata->initiate_sdtr |= (1 << target);
- local_irq_restore(flags);
- return 0;
-}
-#endif
-
-/*
- * Function : request_disconnect (int host, int on_or_off)
- *
- * Purpose : KGDB support function, tells us to allow or disallow
- * disconnections.
- *
- * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
- * zero to disallow.
- *
- * Returns : 0 on success, * -1 on failure.
- */
-
-static int
-request_disconnect (int host, int on_or_off) {
- struct Scsi_Host *h;
- struct NCR53c7x0_hostdata *hostdata;
- if (!(h = find_host (host)))
- return -1;
- hostdata = (struct NCR53c7x0_hostdata *) h->hostdata[0];
- if (on_or_off)
- hostdata->options |= OPTION_DISCONNECT;
- else
- hostdata->options &= ~OPTION_DISCONNECT;
- return 0;
-}
-#endif
-
-/*
- * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
- *
- * Purpose : Initialize internal structures, as required on startup, or
- * after a SCSI bus reset.
- *
- * Inputs : host - pointer to this host adapter's structure
- */
-
-static void
-NCR53c7x0_driver_init (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int i, j;
- u32 *ncrcurrent;
-
- for (i = 0; i < 16; ++i) {
- hostdata->request_sense[i] = 0;
- for (j = 0; j < 8; ++j)
- hostdata->busy[i][j] = 0;
- set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
- }
- hostdata->issue_queue = NULL;
- hostdata->running_list = hostdata->finished_queue =
- hostdata->ncrcurrent = NULL;
- for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
- i < host->can_queue; ++i, ncrcurrent += 2) {
- ncrcurrent[0] = hostdata->NOP_insn;
- ncrcurrent[1] = 0xdeadbeef;
- }
- ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
- ncrcurrent[1] = (u32) virt_to_bus (hostdata->script) +
- hostdata->E_wait_reselect;
- hostdata->reconnect_dsa_head = 0;
- hostdata->addr_reconnect_dsa_head = (u32)
- virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
- hostdata->expecting_iid = 0;
- hostdata->expecting_sto = 0;
- if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
- hostdata->initiate_sdtr = 0xffff;
- else
- hostdata->initiate_sdtr = 0;
- hostdata->talked_to = 0;
- hostdata->idle = 1;
-}
-
-/*
- * Function : static int clock_to_ccf_710 (int clock)
- *
- * Purpose : Return the clock conversion factor for a given SCSI clock.
- *
- * Inputs : clock - SCSI clock expressed in Hz.
- *
- * Returns : ccf on success, -1 on failure.
- */
-
-static int
-clock_to_ccf_710 (int clock) {
- if (clock <= 16666666)
- return -1;
- if (clock <= 25000000)
- return 2; /* Divide by 1.0 */
- else if (clock <= 37500000)
- return 1; /* Divide by 1.5 */
- else if (clock <= 50000000)
- return 0; /* Divide by 2.0 */
- else if (clock <= 66000000)
- return 3; /* Divide by 3.0 */
- else
- return -1;
-}
-
-/*
- * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
- *
- * Purpose : initialize the internal structures for a given SCSI host
- *
- * Inputs : host - pointer to this host adapter's structure
- *
- * Preconditions : when this function is called, the chip_type
- * field of the hostdata structure MUST have been set.
- *
- * Returns : 0 on success, -1 on failure.
- */
-
-int
-NCR53c7x0_init (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- int i, ccf;
- unsigned char revision;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- /*
- * There are some things which we need to know about in order to provide
- * a semblance of support. Print 'em if they aren't what we expect,
- * otherwise don't add to the noise.
- *
- * -1 means we don't know what to expect.
- */
- int val, flags;
- char buf[32];
- int expected_id = -1;
- int expected_clock = -1;
- int uninitialized = 0;
-#ifdef NO_IO_SPACE
- int expected_mapping = OPTION_MEMORY_MAPPED;
-#else
- int expected_mapping = OPTION_IO_MAPPED;
-#endif
- for (i=0;i<7;i++)
- hostdata->valid_ids[i] = 1; /* Default all ID's to scan */
-
- /* Parse commandline flags */
- if (check_setup_strings("noasync",&flags,&val,buf))
- {
- hostdata->options |= OPTION_NO_ASYNC;
- hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
- }
-
- if (check_setup_strings("nosync",&flags,&val,buf))
- {
- hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
- }
-
- if (check_setup_strings("nodisconnect",&flags,&val,buf))
- hostdata->options &= ~OPTION_DISCONNECT;
-
- if (check_setup_strings("validids",&flags,&val,buf))
- {
- for (i=0;i<7;i++)
- hostdata->valid_ids[i] = val & (1<<i);
- }
-
- if ((i = check_setup_strings("next",&flags,&val,buf)))
- {
- while (i)
- setup_used[--i] = 1;
- }
-
- if (check_setup_strings("opthi",&flags,&val,buf))
- hostdata->options = (long long)val << 32;
- if (check_setup_strings("optlo",&flags,&val,buf))
- hostdata->options |= val;
-
- NCR53c7x0_local_setup(host);
- switch (hostdata->chip) {
- case 710:
- case 770:
- hostdata->dstat_sir_intr = NCR53c7x0_dstat_sir_intr;
- hostdata->init_save_regs = NULL;
- hostdata->dsa_fixup = NCR53c7xx_dsa_fixup;
- hostdata->init_fixup = NCR53c7x0_init_fixup;
- hostdata->soft_reset = NCR53c7x0_soft_reset;
- hostdata->run_tests = NCR53c7xx_run_tests;
- expected_clock = hostdata->scsi_clock;
- expected_id = 7;
- break;
- default:
- printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
- host->host_no, hostdata->chip);
- scsi_unregister (host);
- return -1;
- }
-
- /* Assign constants accessed by NCR */
- hostdata->NCR53c7xx_zero = 0;
- hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
- hostdata->NCR53c7xx_msg_abort = ABORT;
- hostdata->NCR53c7xx_msg_nop = NOP;
- hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
- if (expected_mapping == -1 ||
- (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
- (expected_mapping & OPTION_MEMORY_MAPPED))
- printk ("scsi%d : using %s mapped access\n", host->host_no,
- (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
- "io");
-
- hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
- DMODE_REG_00 : DMODE_REG_10;
- hostdata->istat = ((hostdata->chip / 100) == 8) ?
- ISTAT_REG_800 : ISTAT_REG_700;
-
-/* We have to assume that this may be the first access to the chip, so
- * we must set EA in DCNTL. */
-
- NCR53c7x0_write8 (DCNTL_REG, DCNTL_10_EA|DCNTL_10_COM);
-
-
-/* Only the ISTAT register is readable when the NCR is running, so make
- sure it's halted. */
- ncr_halt(host);
-
-/*
- * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
- * as does the 710 with one bit per SCSI ID. Conversely, the NCR
- * uses a normal, 3 bit binary representation of these values.
- *
- * Get the rest of the NCR documentation, and FIND OUT where the change
- * was.
- */
-
-#if 0
- /* May not be able to do this - chip my not have been set up yet */
- tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
- for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
-#else
- host->this_id = 7;
-#endif
-
-/*
- * Note : we should never encounter a board setup for ID0. So,
- * if we see ID0, assume that it was uninitialized and set it
- * to the industry standard 7.
- */
- if (!host->this_id) {
- printk("scsi%d : initiator ID was %d, changing to 7\n",
- host->host_no, host->this_id);
- host->this_id = 7;
- hostdata->this_id_mask = 1 << 7;
- uninitialized = 1;
- };
-
- if (expected_id == -1 || host->this_id != expected_id)
- printk("scsi%d : using initiator ID %d\n", host->host_no,
- host->this_id);
-
- /*
- * Save important registers to allow a soft reset.
- */
-
- /*
- * CTEST7 controls cache snooping, burst mode, and support for
- * external differential drivers. This isn't currently used - the
- * default value may not be optimal anyway.
- * Even worse, it may never have been set up since reset.
- */
- hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
- revision = (NCR53c7x0_read8(CTEST8_REG) & 0xF0) >> 4;
- switch (revision) {
- case 1: revision = 0; break;
- case 2: revision = 1; break;
- case 4: revision = 2; break;
- case 8: revision = 3; break;
- default: revision = 255; break;
- }
- printk("scsi%d: Revision 0x%x\n",host->host_no,revision);
-
- if ((revision == 0 || revision == 255) && (hostdata->options & (OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS)))
- {
- printk ("scsi%d: Disabling sync working and disconnect/reselect\n",
- host->host_no);
- hostdata->options &= ~(OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS);
- }
-
- /*
- * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
- * on 800 series chips, it allows for a totem-pole IRQ driver.
- * NOTE saved_dcntl currently overwritten in init function.
- * The value read here may be garbage anyway, MVME16x board at least
- * does not initialise chip if kernel arrived via tftp.
- */
-
- hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
-
- /*
- * DMODE controls DMA burst length, and on 700 series chips,
- * 286 mode and bus width
- * NOTE: On MVME16x, chip may have been reset, so this could be a
- * power-on/reset default value.
- */
- hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
-
- /*
- * Now that burst length and enabled/disabled status is known,
- * clue the user in on it.
- */
-
- ccf = clock_to_ccf_710 (expected_clock);
-
- for (i = 0; i < 16; ++i)
- hostdata->cmd_allocated[i] = 0;
-
- if (hostdata->init_save_regs)
- hostdata->init_save_regs (host);
- if (hostdata->init_fixup)
- hostdata->init_fixup (host);
-
- if (!the_template) {
- the_template = host->hostt;
- first_host = host;
- }
-
- /*
- * Linux SCSI drivers have always been plagued with initialization
- * problems - some didn't work with the BIOS disabled since they expected
- * initialization from it, some didn't work when the networking code
- * was enabled and registers got scrambled, etc.
- *
- * To avoid problems like this, in the future, we will do a soft
- * reset on the SCSI chip, taking it back to a sane state.
- */
-
- hostdata->soft_reset (host);
-
-#if 1
- hostdata->debug_count_limit = -1;
-#else
- hostdata->debug_count_limit = 1;
-#endif
- hostdata->intrs = -1;
- hostdata->resets = -1;
- memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
- sizeof (hostdata->synchronous_want));
-
- NCR53c7x0_driver_init (host);
-
- if (request_irq(host->irq, NCR53c7x0_intr, IRQF_SHARED, "53c7xx", host))
- {
- printk("scsi%d : IRQ%d not free, detaching\n",
- host->host_no, host->irq);
- goto err_unregister;
- }
-
- if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
- (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
- /* XXX Should disable interrupts, etc. here */
- goto err_free_irq;
- } else {
- if (host->io_port) {
- host->n_io_port = 128;
- if (!request_region (host->io_port, host->n_io_port, "ncr53c7xx"))
- goto err_free_irq;
- }
- }
-
- if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
- printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
- hard_reset (host);
- }
- return 0;
-
- err_free_irq:
- free_irq(host->irq, NCR53c7x0_intr);
- err_unregister:
- scsi_unregister(host);
- return -1;
-}
-
-/*
- * Function : int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
- * unsigned long base, int io_port, int irq, int dma, long long options,
- * int clock);
- *
- * Purpose : initializes a NCR53c7,8x0 based on base addresses,
- * IRQ, and DMA channel.
- *
- * Inputs : tpnt - Template for this SCSI adapter, board - board level
- * product, chip - 710
- *
- * Returns : 0 on success, -1 on failure.
- *
- */
-
-int
-ncr53c7xx_init (struct scsi_host_template *tpnt, int board, int chip,
- unsigned long base, int io_port, int irq, int dma,
- long long options, int clock)
-{
- struct Scsi_Host *instance;
- struct NCR53c7x0_hostdata *hostdata;
- char chip_str[80];
- int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
- schedule_size = 0, ok = 0;
- void *tmp;
- unsigned long page;
-
- switch (chip) {
- case 710:
- case 770:
- schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
- script_len = NCR53c7xx_script_len;
- dsa_len = NCR53c7xx_dsa_len;
- options |= OPTION_INTFLY;
- sprintf (chip_str, "NCR53c%d", chip);
- break;
- default:
- printk("scsi-ncr53c7xx : unsupported SCSI chip %d\n", chip);
- return -1;
- }
-
- printk("scsi-ncr53c7xx : %s at memory 0x%lx, io 0x%x, irq %d",
- chip_str, base, io_port, irq);
- if (dma == DMA_NONE)
- printk("\n");
- else
- printk(", dma %d\n", dma);
-
- if (options & OPTION_DEBUG_PROBE_ONLY) {
- printk ("scsi-ncr53c7xx : probe only enabled, aborting initialization\n");
- return -1;
- }
-
- max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
- /* Size of dynamic part of command structure : */
- 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
- ( 2 * /* Current instructions per scatter/gather segment */
- tpnt->sg_tablesize +
- 3 /* Current startup / termination required per phase */
- ) *
- 8 /* Each instruction is eight bytes */;
-
- /* Allocate fixed part of hostdata, dynamic part to hold appropriate
- SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
-
- We need a NCR53c7x0_cmd structure for scan_scsis() when we are
- not loaded as a module, and when we're loaded as a module, we
- can't use a non-dynamically allocated structure because modules
- are vmalloc()'d, which can allow structures to cross page
- boundaries and breaks our physical/virtual address assumptions
- for DMA.
-
- So, we stick it past the end of our hostdata structure.
-
- ASSUMPTION :
- Regardless of how many simultaneous SCSI commands we allow,
- the probe code only executes a _single_ instruction at a time,
- so we only need one here, and don't need to allocate NCR53c7x0_cmd
- structures for each target until we are no longer in scan_scsis
- and kmalloc() has become functional (memory_init() happens
- after all device driver initialization).
- */
-
- size = sizeof(struct NCR53c7x0_hostdata) + script_len +
- /* Note that alignment will be guaranteed, since we put the command
- allocated at probe time after the fixed-up SCSI script, which
- consists of 32 bit words, aligned on a 32 bit boundary. But
- on a 64bit machine we need 8 byte alignment for hostdata->free, so
- we add in another 4 bytes to take care of potential misalignment
- */
- (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
-
- page = __get_free_pages(GFP_ATOMIC,1);
- if(page==0)
- {
- printk(KERN_ERR "53c7xx: out of memory.\n");
- return -ENOMEM;
- }
-#ifdef FORCE_DSA_ALIGNMENT
- /*
- * 53c710 rev.0 doesn't have an add-with-carry instruction.
- * Ensure we allocate enough memory to force DSA alignment.
- */
- size += 256;
-#endif
- /* Size should be < 8K, so we can fit it in two pages. */
- if (size > 8192) {
- printk(KERN_ERR "53c7xx: hostdata > 8K\n");
- return -1;
- }
-
- instance = scsi_register (tpnt, 4);
- if (!instance)
- {
- free_page(page);
- return -1;
- }
- instance->hostdata[0] = page;
- memset((void *)instance->hostdata[0], 0, 8192);
- cache_push(virt_to_phys((void *)(instance->hostdata[0])), 8192);
- cache_clear(virt_to_phys((void *)(instance->hostdata[0])), 8192);
- kernel_set_cachemode((void *)instance->hostdata[0], 8192, IOMAP_NOCACHE_SER);
-
- /* FIXME : if we ever support an ISA NCR53c7xx based board, we
- need to check if the chip is running in a 16 bit mode, and if so
- unregister it if it is past the 16M (0x1000000) mark */
-
- hostdata = (struct NCR53c7x0_hostdata *)instance->hostdata[0];
- hostdata->size = size;
- hostdata->script_count = script_len / sizeof(u32);
- hostdata->board = board;
- hostdata->chip = chip;
-
- /*
- * Being memory mapped is more desirable, since
- *
- * - Memory accesses may be faster.
- *
- * - The destination and source address spaces are the same for
- * all instructions, meaning we don't have to twiddle dmode or
- * any other registers.
- *
- * So, we try for memory mapped, and if we don't get it,
- * we go for port mapped, and that failing we tell the user
- * it can't work.
- */
-
- if (base) {
- instance->base = base;
- /* Check for forced I/O mapping */
- if (!(options & OPTION_IO_MAPPED)) {
- options |= OPTION_MEMORY_MAPPED;
- ok = 1;
- }
- } else {
- options &= ~OPTION_MEMORY_MAPPED;
- }
-
- if (io_port) {
- instance->io_port = io_port;
- options |= OPTION_IO_MAPPED;
- ok = 1;
- } else {
- options &= ~OPTION_IO_MAPPED;
- }
-
- if (!ok) {
- printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
- instance->host_no);
- scsi_unregister (instance);
- return -1;
- }
- instance->irq = irq;
- instance->dma_channel = dma;
-
- hostdata->options = options;
- hostdata->dsa_len = dsa_len;
- hostdata->max_cmd_size = max_cmd_size;
- hostdata->num_cmds = 1;
- hostdata->scsi_clock = clock;
- /* Initialize single command */
- tmp = (hostdata->script + hostdata->script_count);
-#ifdef FORCE_DSA_ALIGNMENT
- {
- void *t = ROUNDUP(tmp, void *);
- if (((u32)t & 0xff) > CmdPageStart)
- t = (void *)((u32)t + 255);
- t = (void *)(((u32)t & ~0xff) + CmdPageStart);
- hostdata->free = t;
-#if 0
- printk ("scsi: Registered size increased by 256 to %d\n", size);
- printk ("scsi: CmdPageStart = 0x%02x\n", CmdPageStart);
- printk ("scsi: tmp = 0x%08x, hostdata->free set to 0x%08x\n",
- (u32)tmp, (u32)t);
-#endif
- }
-#else
- hostdata->free = ROUNDUP(tmp, void *);
-#endif
- hostdata->free->real = tmp;
- hostdata->free->size = max_cmd_size;
- hostdata->free->free = NULL;
- hostdata->free->next = NULL;
- hostdata->extra_allocate = 0;
-
- /* Allocate command start code space */
- hostdata->schedule = (chip == 700 || chip == 70066) ?
- NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
-
-/*
- * For diagnostic purposes, we don't really care how fast things blaze.
- * For profiling, we want to access the 800ns resolution system clock,
- * using a 'C' call on the host processor.
- *
- * Therefore, there's no need for the NCR chip to directly manipulate
- * this data, and we should put it wherever is most convenient for
- * Linux.
- */
- if (track_events)
- hostdata->events = (struct NCR53c7x0_event *) (track_events ?
- vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
- else
- hostdata->events = NULL;
-
- if (hostdata->events) {
- memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
- track_events);
- hostdata->event_size = track_events;
- hostdata->event_index = 0;
- } else
- hostdata->event_size = 0;
-
- return NCR53c7x0_init(instance);
-}
-
-
-/*
- * Function : static void NCR53c7x0_init_fixup (struct Scsi_Host *host)
- *
- * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
- *
- * Inputs : host - pointer to this host adapter's structure
- *
- */
-
-static void
-NCR53c7x0_init_fixup (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned char tmp;
- int i, ncr_to_memory, memory_to_ncr;
- u32 base;
- NCR53c7x0_local_setup(host);
-
-
- /* XXX - NOTE : this code MUST be made endian aware */
- /* Copy code into buffer that was allocated at detection time. */
- memcpy ((void *) hostdata->script, (void *) SCRIPT,
- sizeof(SCRIPT));
- /* Fixup labels */
- for (i = 0; i < PATCHES; ++i)
- hostdata->script[LABELPATCHES[i]] +=
- virt_to_bus(hostdata->script);
- /* Fixup addresses of constants that used to be EXTERNAL */
-
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
- virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
- virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
- virt_to_bus(&(hostdata->NCR53c7xx_zero)));
- patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
- virt_to_bus(&(hostdata->NCR53c7xx_sink)));
- patch_abs_32 (hostdata->script, 0, NOP_insn,
- virt_to_bus(&(hostdata->NOP_insn)));
- patch_abs_32 (hostdata->script, 0, schedule,
- virt_to_bus((void *) hostdata->schedule));
-
- /* Fixup references to external variables: */
- for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
- hostdata->script[EXTERNAL_PATCHES[i].offset] +=
- virt_to_bus(EXTERNAL_PATCHES[i].address);
-
- /*
- * Fixup absolutes set at boot-time.
- *
- * All non-code absolute variables suffixed with "dsa_" and "int_"
- * are constants, and need no fixup provided the assembler has done
- * it for us (I don't know what the "real" NCR assembler does in
- * this case, my assembler does the right magic).
- */
-
- patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
- Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
- patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
- Ent_dsa_code_restore_pointers - Ent_dsa_zero);
- patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
- Ent_dsa_code_check_reselect - Ent_dsa_zero);
-
- /*
- * Just for the hell of it, preserve the settings of
- * Burst Length and Enable Read Line bits from the DMODE
- * register. Make sure SCRIPTS start automagically.
- */
-
-#if defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000)
- /* We know better what we want than 16xBug does! */
- tmp = DMODE_10_BL_8 | DMODE_10_FC2;
-#else
- tmp = NCR53c7x0_read8(DMODE_REG_10);
- tmp &= (DMODE_BL_MASK | DMODE_10_FC2 | DMODE_10_FC1 | DMODE_710_PD |
- DMODE_710_UO);
-#endif
-
- if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
- base = (u32) host->io_port;
- memory_to_ncr = tmp|DMODE_800_DIOM;
- ncr_to_memory = tmp|DMODE_800_SIOM;
- } else {
- base = virt_to_bus((void *)host->base);
- memory_to_ncr = ncr_to_memory = tmp;
- }
-
- /* SCRATCHB_REG_10 == SCRATCHA_REG_800, as it happens */
- patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
- patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
- patch_abs_32 (hostdata->script, 0, addr_dsa, base + DSA_REG);
-
- /*
- * I needed some variables in the script to be accessible to
- * both the NCR chip and the host processor. For these variables,
- * I made the arbitrary decision to store them directly in the
- * hostdata structure rather than in the RELATIVE area of the
- * SCRIPTS.
- */
-
-
- patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
- patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
- patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
-
- patch_abs_32 (hostdata->script, 0, msg_buf,
- virt_to_bus((void *)&(hostdata->msg_buf)));
- patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
- virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
- patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
- virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
- patch_abs_32 (hostdata->script, 0, reselected_identify,
- virt_to_bus((void *)&(hostdata->reselected_identify)));
-/* reselected_tag is currently unused */
-#if 0
- patch_abs_32 (hostdata->script, 0, reselected_tag,
- virt_to_bus((void *)&(hostdata->reselected_tag)));
-#endif
-
- patch_abs_32 (hostdata->script, 0, test_dest,
- virt_to_bus((void*)&hostdata->test_dest));
- patch_abs_32 (hostdata->script, 0, test_src,
- virt_to_bus(&hostdata->test_source));
- patch_abs_32 (hostdata->script, 0, saved_dsa,
- virt_to_bus((void *)&hostdata->saved2_dsa));
- patch_abs_32 (hostdata->script, 0, emulfly,
- virt_to_bus((void *)&hostdata->emulated_intfly));
-
- patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
- (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
-
-/* These are for event logging; the ncr_event enum contains the
- actual interrupt numbers. */
-#ifdef A_int_EVENT_SELECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
-#endif
-#ifdef A_int_EVENT_DISCONNECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
-#endif
-#ifdef A_int_EVENT_RESELECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
-#endif
-#ifdef A_int_EVENT_COMPLETE
- patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
-#endif
-#ifdef A_int_EVENT_IDLE
- patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
-#endif
-#ifdef A_int_EVENT_SELECT_FAILED
- patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
- (u32) EVENT_SELECT_FAILED);
-#endif
-#ifdef A_int_EVENT_BEFORE_SELECT
- patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
- (u32) EVENT_BEFORE_SELECT);
-#endif
-#ifdef A_int_EVENT_RESELECT_FAILED
- patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
- (u32) EVENT_RESELECT_FAILED);
-#endif
-
- /*
- * Make sure the NCR and Linux code agree on the location of
- * certain fields.
- */
-
- hostdata->E_accept_message = Ent_accept_message;
- hostdata->E_command_complete = Ent_command_complete;
- hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
- hostdata->E_data_transfer = Ent_data_transfer;
- hostdata->E_debug_break = Ent_debug_break;
- hostdata->E_dsa_code_template = Ent_dsa_code_template;
- hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
- hostdata->E_end_data_transfer = Ent_end_data_transfer;
- hostdata->E_initiator_abort = Ent_initiator_abort;
- hostdata->E_msg_in = Ent_msg_in;
- hostdata->E_other_transfer = Ent_other_transfer;
- hostdata->E_other_in = Ent_other_in;
- hostdata->E_other_out = Ent_other_out;
- hostdata->E_reject_message = Ent_reject_message;
- hostdata->E_respond_message = Ent_respond_message;
- hostdata->E_select = Ent_select;
- hostdata->E_select_msgout = Ent_select_msgout;
- hostdata->E_target_abort = Ent_target_abort;
-#ifdef Ent_test_0
- hostdata->E_test_0 = Ent_test_0;
-#endif
- hostdata->E_test_1 = Ent_test_1;
- hostdata->E_test_2 = Ent_test_2;
-#ifdef Ent_test_3
- hostdata->E_test_3 = Ent_test_3;
-#endif
- hostdata->E_wait_reselect = Ent_wait_reselect;
- hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
-
- hostdata->dsa_cmdout = A_dsa_cmdout;
- hostdata->dsa_cmnd = A_dsa_cmnd;
- hostdata->dsa_datain = A_dsa_datain;
- hostdata->dsa_dataout = A_dsa_dataout;
- hostdata->dsa_end = A_dsa_end;
- hostdata->dsa_msgin = A_dsa_msgin;
- hostdata->dsa_msgout = A_dsa_msgout;
- hostdata->dsa_msgout_other = A_dsa_msgout_other;
- hostdata->dsa_next = A_dsa_next;
- hostdata->dsa_select = A_dsa_select;
- hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
- hostdata->dsa_status = A_dsa_status;
- hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
- 8 /* destination operand */;
-
- /* sanity check */
- if (A_dsa_fields_start != Ent_dsa_code_template_end -
- Ent_dsa_zero)
- printk("scsi%d : NCR dsa_fields start is %d not %d\n",
- host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
- Ent_dsa_zero);
-
- printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
- virt_to_bus(hostdata->script), hostdata->script);
-}
-
-/*
- * Function : static int NCR53c7xx_run_tests (struct Scsi_Host *host)
- *
- * Purpose : run various verification tests on the NCR chip,
- * including interrupt generation, and proper bus mastering
- * operation.
- *
- * Inputs : host - a properly initialized Scsi_Host structure
- *
- * Preconditions : the NCR chip must be in a halted state.
- *
- * Returns : 0 if all tests were successful, -1 on error.
- *
- */
-
-static int
-NCR53c7xx_run_tests (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long timeout;
- u32 start;
- int failed, i;
- unsigned long flags;
- NCR53c7x0_local_setup(host);
-
- /* The NCR chip _must_ be idle to run the test scripts */
-
- local_irq_save(flags);
- if (!hostdata->idle) {
- printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
-
- /*
- * Check for functional interrupts, this could work as an
- * autoprobe routine.
- */
-
- if ((hostdata->options & OPTION_DEBUG_TEST1) &&
- hostdata->state != STATE_DISABLED) {
- hostdata->idle = 0;
- hostdata->test_running = 1;
- hostdata->test_completed = -1;
- hostdata->test_dest = 0;
- hostdata->test_source = 0xdeadbeef;
- start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
- hostdata->state = STATE_RUNNING;
- printk ("scsi%d : test 1", host->host_no);
- NCR53c7x0_write32 (DSP_REG, start);
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM |
- DCNTL_STD);
- printk (" started\n");
- local_irq_restore(flags);
-
- /*
- * This is currently a .5 second timeout, since (in theory) no slow
- * board will take that long. In practice, we've seen one
- * pentium which occassionally fails with this, but works with
- * 10 times as much?
- */
-
- timeout = jiffies + 5 * HZ / 10;
- while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
- barrier();
-
- failed = 1;
- if (hostdata->test_completed == -1)
- printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
- (hostdata->test_dest == 0xdeadbeef) ?
- " due to lost interrupt.\n"
- " Please verify that the correct IRQ is being used for your board,\n"
- : "");
- else if (hostdata->test_completed != 1)
- printk ("scsi%d : test 1 bad interrupt value (%d)\n",
- host->host_no, hostdata->test_completed);
- else
- failed = (hostdata->test_dest != 0xdeadbeef);
-
- if (hostdata->test_dest != 0xdeadbeef) {
- printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
- " probable cache invalidation problem. Please configure caching\n"
- " as write-through or disabled\n",
- host->host_no, hostdata->test_dest);
- }
-
- if (failed) {
- printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
- host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
- hostdata->script, start);
- printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
- NCR53c7x0_read32(DSPS_REG));
- local_irq_restore(flags);
- return -1;
- }
- hostdata->test_running = 0;
- }
-
- if ((hostdata->options & OPTION_DEBUG_TEST2) &&
- hostdata->state != STATE_DISABLED) {
- u32 dsa[48];
- unsigned char identify = IDENTIFY(0, 0);
- unsigned char cmd[6];
- unsigned char data[36];
- unsigned char status = 0xff;
- unsigned char msg = 0xff;
-
- cmd[0] = INQUIRY;
- cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
- cmd[4] = sizeof(data);
-
- dsa[2] = 1;
- dsa[3] = virt_to_bus(&identify);
- dsa[4] = 6;
- dsa[5] = virt_to_bus(&cmd);
- dsa[6] = sizeof(data);
- dsa[7] = virt_to_bus(&data);
- dsa[8] = 1;
- dsa[9] = virt_to_bus(&status);
- dsa[10] = 1;
- dsa[11] = virt_to_bus(&msg);
-
- for (i = 0; i < 6; ++i) {
-#ifdef VALID_IDS
- if (!hostdata->valid_ids[i])
- continue;
-#endif
- local_irq_disable();
- if (!hostdata->idle) {
- printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
-
- /* 710: bit mapped scsi ID, async */
- dsa[0] = (1 << i) << 16;
- hostdata->idle = 0;
- hostdata->test_running = 2;
- hostdata->test_completed = -1;
- start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
- hostdata->state = STATE_RUNNING;
- NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
- NCR53c7x0_write32 (DSP_REG, start);
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
- DCNTL_SSM | DCNTL_STD);
- local_irq_restore(flags);
-
- timeout = jiffies + 5 * HZ; /* arbitrary */
- while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
- barrier();
-
- NCR53c7x0_write32 (DSA_REG, 0);
-
- if (hostdata->test_completed == 2) {
- data[35] = 0;
- printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
- host->host_no, i, data + 8);
- printk ("scsi%d : status ", host->host_no);
- scsi_print_status (status);
- printk ("\nscsi%d : message ", host->host_no);
- spi_print_msg(&msg);
- printk ("\n");
- } else if (hostdata->test_completed == 3) {
- printk("scsi%d : test 2 no connection with target %d\n",
- host->host_no, i);
- if (!hostdata->idle) {
- printk("scsi%d : not idle\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
- } else if (hostdata->test_completed == -1) {
- printk ("scsi%d : test 2 timed out\n", host->host_no);
- local_irq_restore(flags);
- return -1;
- }
- hostdata->test_running = 0;
- }
- }
-
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * Function : static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
- * performing all necessary relocation.
- *
- * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
- * enough to hold the NCR53c8xx dsa.
- */
-
-static void
-NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
- Scsi_Cmnd *c = cmd->cmd;
- struct Scsi_Host *host = c->device->host;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int i;
-
- memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
- hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
-
- /*
- * Note : within the NCR 'C' code, dsa points to the _start_
- * of the DSA structure, and _not_ the offset of dsa_zero within
- * that structure used to facilitate shorter signed offsets
- * for the 8 bit ALU.
- *
- * The implications of this are that
- *
- * - 32 bit A_dsa_* absolute values require an additional
- * dsa_zero added to their value to be correct, since they are
- * relative to dsa_zero which is in essentially a separate
- * space from the code symbols.
- *
- * - All other symbols require no special treatment.
- */
-
- patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_lun, c->device->lun);
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
- Ent_dsa_code_template + A_dsa_next);
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->device->id].script));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_sscf_710, virt_to_bus((void *)&hostdata->sync[c->device->id].sscf_710));
- patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_target, 1 << c->device->id);
- /* XXX - new pointer stuff */
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
-
- /* XXX - new start stuff */
-
- patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
- dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
-}
-
-/*
- * Function : run_process_issue_queue (void)
- *
- * Purpose : insure that the coroutine is running and will process our
- * request. process_issue_queue_running is checked/set here (in an
- * inline function) rather than in process_issue_queue itself to reduce
- * the chances of stack overflow.
- *
- */
-
-static volatile int process_issue_queue_running = 0;
-
-static __inline__ void
-run_process_issue_queue(void) {
- unsigned long flags;
- local_irq_save(flags);
- if (!process_issue_queue_running) {
- process_issue_queue_running = 1;
- process_issue_queue(flags);
- /*
- * process_issue_queue_running is cleared in process_issue_queue
- * once it can't do more work, and process_issue_queue exits with
- * interrupts disabled.
- */
- }
- local_irq_restore(flags);
-}
-
-/*
- * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
- * result)
- *
- * Purpose : mark SCSI command as finished, OR'ing the host portion
- * of the result word into the result field of the corresponding
- * Scsi_Cmnd structure, and removing it from the internal queues.
- *
- * Inputs : cmd - command, result - entire result field
- *
- * Preconditions : the NCR chip should be in a halted state when
- * abnormal_finished is run, since it modifies structures which
- * the NCR expects to have exclusive access to.
- */
-
-static void
-abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
- Scsi_Cmnd *c = cmd->cmd;
- struct Scsi_Host *host = c->device->host;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- int left, found;
- volatile struct NCR53c7x0_cmd * linux_search;
- volatile struct NCR53c7x0_cmd * volatile *linux_prev;
- volatile u32 *ncr_prev, *ncrcurrent, ncr_search;
-
-#if 0
- printk ("scsi%d: abnormal finished\n", host->host_no);
-#endif
-
- local_irq_save(flags);
- found = 0;
- /*
- * Traverse the NCR issue array until we find a match or run out
- * of instructions. Instructions in the NCR issue array are
- * either JUMP or NOP instructions, which are 2 words in length.
- */
-
-
- for (found = 0, left = host->can_queue, ncrcurrent = hostdata->schedule;
- left > 0; --left, ncrcurrent += 2)
- {
- if (issue_to_cmd (host, hostdata, (u32 *) ncrcurrent) == cmd)
- {
- ncrcurrent[0] = hostdata->NOP_insn;
- ncrcurrent[1] = 0xdeadbeef;
- ++found;
- break;
- }
- }
-
- /*
- * Traverse the NCR reconnect list of DSA structures until we find
- * a pointer to this dsa or have found too many command structures.
- * We let prev point at the next field of the previous element or
- * head of the list, so we don't do anything different for removing
- * the head element.
- */
-
- for (left = host->can_queue,
- ncr_search = hostdata->reconnect_dsa_head,
- ncr_prev = &hostdata->reconnect_dsa_head;
- left >= 0 && ncr_search &&
- ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
- != (char *) cmd->dsa;
- ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
- hostdata->dsa_next), ncr_search = *ncr_prev, --left);
-
- if (left < 0)
- printk("scsi%d: loop detected in ncr reconncect list\n",
- host->host_no);
- else if (ncr_search) {
- if (found)
- printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
- host->host_no, c->pid);
- else {
- volatile u32 * next = (u32 *)
- ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
- *ncr_prev = *next;
-/* If we're at the tail end of the issue queue, update that pointer too. */
- found = 1;
- }
- }
-
- /*
- * Traverse the host running list until we find this command or discover
- * we have too many elements, pointing linux_prev at the next field of the
- * linux_previous element or head of the list, search at this element.
- */
-
- for (left = host->can_queue, linux_search = hostdata->running_list,
- linux_prev = &hostdata->running_list;
- left >= 0 && linux_search && linux_search != cmd;
- linux_prev = &(linux_search->next),
- linux_search = linux_search->next, --left);
-
- if (left < 0)
- printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
- host->host_no, c->pid);
- else if (linux_search) {
- *linux_prev = linux_search->next;
- --hostdata->busy[c->device->id][c->device->lun];
- }
-
- /* Return the NCR command structure to the free list */
- cmd->next = hostdata->free;
- hostdata->free = cmd;
- c->host_scribble = NULL;
-
- /* And return */
- c->result = result;
- c->scsi_done(c);
-
- local_irq_restore(flags);
- run_process_issue_queue();
-}
-
-/*
- * Function : static void intr_break (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : Handler for breakpoint interrupts from a SCSI script
- *
- * Inputs : host - pointer to this host adapter's structure,
- * cmd - pointer to the command (if any) dsa was pointing
- * to.
- *
- */
-
-static void
-intr_break (struct Scsi_Host *host, struct
- NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_break *bp;
-#if 0
- Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
-#endif
- u32 *dsp;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- NCR53c7x0_local_setup(host);
-
- /*
- * Find the break point corresponding to this address, and
- * dump the appropriate debugging information to standard
- * output.
- */
- local_irq_save(flags);
- dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
- for (bp = hostdata->breakpoints; bp && bp->address != dsp;
- bp = bp->next);
- if (!bp)
- panic("scsi%d : break point interrupt from %p with no breakpoint!",
- host->host_no, dsp);
-
- /*
- * Configure the NCR chip for manual start mode, so that we can
- * point the DSP register at the instruction that follows the
- * INT int_debug_break instruction.
- */
-
- NCR53c7x0_write8 (hostdata->dmode,
- NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
-
- /*
- * And update the DSP register, using the size of the old
- * instruction in bytes.
- */
-
- local_irq_restore(flags);
-}
-/*
- * Function : static void print_synchronous (const char *prefix,
- * const unsigned char *msg)
- *
- * Purpose : print a pretty, user and machine parsable representation
- * of a SDTR message, including the "real" parameters, data
- * clock so we can tell transfer rate at a glance.
- *
- * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
- */
-
-static void
-print_synchronous (const char *prefix, const unsigned char *msg) {
- if (msg[4]) {
- int Hz = 1000000000 / (msg[3] * 4);
- int integer = Hz / 1000000;
- int fraction = (Hz - (integer * 1000000)) / 10000;
- printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
- prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
- (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
- (((msg[3] * 4) < 200) ? "-II" : ""));
- } else
- printk ("%sasynchronous SCSI\n", prefix);
-}
-
-/*
- * Function : static void set_synchronous (struct Scsi_Host *host,
- * int target, int sxfer, int scntl3, int now_connected)
- *
- * Purpose : reprogram transfers between the selected SCSI initiator and
- * target with the given register values; in the indirect
- * select operand, reselection script, and chip registers.
- *
- * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
- * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
- * we should reprogram the registers now too.
- *
- * NOTE: For 53c710, scntl3 is actually used for SCF bits from
- * SBCL, as we don't have a SCNTL3.
- */
-
-static void
-set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
- int now_connected) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- u32 *script;
- NCR53c7x0_local_setup(host);
-
- /* These are eight bit registers */
- sxfer &= 0xff;
- scntl3 &= 0xff;
-
- hostdata->sync[target].sxfer_sanity = sxfer;
- hostdata->sync[target].scntl3_sanity = scntl3;
-
-/*
- * HARD CODED : synchronous script is EIGHT words long. This
- * must agree with 53c7.8xx.h
- */
-
- if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
- hostdata->sync[target].select_indirect = (1 << target) << 16 |
- (sxfer << 8);
- hostdata->sync[target].sscf_710 = scntl3;
-
- script = (u32 *) hostdata->sync[target].script;
-
- /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
- script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
- DCMD_RWRI_OP_MOVE) << 24) |
- (SBCL_REG << 16) | (scntl3 << 8);
- script[1] = 0;
- script += 2;
-
- script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
- DCMD_RWRI_OP_MOVE) << 24) |
- (SXFER_REG << 16) | (sxfer << 8);
- script[1] = 0;
- script += 2;
-
-#ifdef DEBUG_SYNC_INTR
- if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
- script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
- script[1] = DEBUG_SYNC_INTR;
- script += 2;
- }
-#endif
-
- script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
- script[1] = 0;
- script += 2;
- }
-
- if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
- printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, target, sxfer, scntl3);
-
- if (now_connected) {
- NCR53c7x0_write8(SBCL_REG, scntl3);
- NCR53c7x0_write8(SXFER_REG, sxfer);
- }
-}
-
-
-/*
- * Function : static int asynchronous (struct Scsi_Host *host, int target)
- *
- * Purpose : reprogram between the selected SCSI Host adapter and target
- * (assumed to be currently connected) for asynchronous transfers.
- *
- * Inputs : host - SCSI host structure, target - numeric target ID.
- *
- * Preconditions : the NCR chip should be in one of the halted states
- */
-
-static void
-asynchronous (struct Scsi_Host *host, int target) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- NCR53c7x0_local_setup(host);
- set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
- 1);
- printk ("scsi%d : setting target %d to asynchronous SCSI\n",
- host->host_no, target);
-}
-
-/*
- * XXX - do we want to go out of our way (ie, add extra code to selection
- * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
- * conversion bits, or can we be content in just setting the
- * sxfer bits? I chose to do so [richard@sleepie.demon.co.uk]
- */
-
-/* Table for NCR53c8xx synchronous values */
-
-/* This table is also correct for 710, allowing that scf=4 is equivalent
- * of SSCF=0 (ie use DCNTL, divide by 3) for a 50.01-66.00MHz clock.
- * For any other clock values, we cannot use entries with SCF values of
- * 4. I guess that for a 66MHz clock, the slowest it will set is 2MHz,
- * and for a 50MHz clock, the slowest will be 2.27Mhz. Should check
- * that a device doesn't try and negotiate sync below these limits!
- */
-
-static const struct {
- int div; /* Total clock divisor * 10 */
- unsigned char scf; /* */
- unsigned char tp; /* 4 + tp = xferp divisor */
-} syncs[] = {
-/* div scf tp div scf tp div scf tp */
- { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
- { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
- { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
- { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
- { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
- { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
- { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
- { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
-};
-
-/*
- * Function : static void synchronous (struct Scsi_Host *host, int target,
- * char *msg)
- *
- * Purpose : reprogram transfers between the selected SCSI initiator and
- * target for synchronous SCSI transfers such that the synchronous
- * offset is less than that requested and period at least as long
- * as that requested. Also modify *msg such that it contains
- * an appropriate response.
- *
- * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
- * msg - synchronous transfer request.
- */
-
-
-static void
-synchronous (struct Scsi_Host *host, int target, char *msg) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int desire, divisor, i, limit;
- unsigned char scntl3, sxfer;
-/* The diagnostic message fits on one line, even with max. width integers */
- char buf[80];
-
-/* Desired transfer clock in Hz */
- desire = 1000000000L / (msg[3] * 4);
-/* Scale the available SCSI clock by 10 so we get tenths */
- divisor = (hostdata->scsi_clock * 10) / desire;
-
-/* NCR chips can handle at most an offset of 8 */
- if (msg[4] > 8)
- msg[4] = 8;
-
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
- host->host_no, divisor / 10, divisor % 10);
-
- limit = ARRAY_SIZE(syncs) - 1;
- for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
-
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk("scsi%d : selected synchronous divisor of %d.%01d\n",
- host->host_no, syncs[i].div / 10, syncs[i].div % 10);
-
- msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
-
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
- msg[3] * 4);
-
- scntl3 = syncs[i].scf;
- sxfer = (msg[4] << SXFER_MO_SHIFT) | (syncs[i].tp << 4);
- if (hostdata->options & OPTION_DEBUG_SDTR)
- printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
- host->host_no, (int) sxfer, (int) scntl3);
- set_synchronous (host, target, sxfer, scntl3, 1);
- sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
- print_synchronous (buf, msg);
-}
-
-/*
- * Function : static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : Handler for INT generated instructions for the
- * NCR53c810/820 SCSI SCRIPT
- *
- * Inputs : host - pointer to this host adapter's structure,
- * cmd - pointer to the command (if any) dsa was pointing
- * to.
- *
- */
-
-static int
-NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
- NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- int print;
- Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- u32 dsps,*dsp; /* Argument of the INT instruction */
-
- NCR53c7x0_local_setup(host);
- dsps = NCR53c7x0_read32(DSPS_REG);
- dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
-
- /* RGH 150597: Frig. Commands which fail with Check Condition are
- * Flagged as successful - hack dsps to indicate check condition */
-#if 0
- /* RGH 200597: Need to disable for BVME6000, as it gets Check Conditions
- * and then dies. Seems to handle Check Condition at startup, but
- * not mid kernel build. */
- if (dsps == A_int_norm_emulateintfly && cmd && cmd->result == 2)
- dsps = A_int_err_check_condition;
-#endif
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
-
- switch (dsps) {
- case A_int_msg_1:
- print = 1;
- switch (hostdata->msg_buf[0]) {
- /*
- * Unless we've initiated synchronous negotiation, I don't
- * think that this should happen.
- */
- case MESSAGE_REJECT:
- hostdata->dsp = hostdata->script + hostdata->E_accept_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
- printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
- c->device->id);
- cmd->flags &= ~CMD_FLAG_SDTR;
- asynchronous (host, c->device->id);
- print = 0;
- }
- break;
- case INITIATE_RECOVERY:
- printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
- host->host_no);
- /* Fall through to default */
- hostdata->dsp = hostdata->script + hostdata->E_reject_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- break;
- default:
- printk ("scsi%d : unsupported message, rejecting\n",
- host->host_no);
- hostdata->dsp = hostdata->script + hostdata->E_reject_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- }
- if (print) {
- printk ("scsi%d : received message", host->host_no);
- if (c)
- printk (" from target %d lun %d ", c->device->id, c->device->lun);
- spi_print_msg((unsigned char *) hostdata->msg_buf);
- printk("\n");
- }
-
- return SPECIFIC_INT_NOTHING;
-
-
- case A_int_msg_sdtr:
-/*
- * At this point, hostdata->msg_buf contains
- * 0 EXTENDED MESSAGE
- * 1 length
- * 2 SDTR
- * 3 period * 4ns
- * 4 offset
- */
-
- if (cmd) {
- char buf[80];
- sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->device->id,
- (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
- print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
-
- /*
- * Initiator initiated, won't happen unless synchronous
- * transfers are enabled. If we get a SDTR message in
- * response to our SDTR, we should program our parameters
- * such that
- * offset <= requested offset
- * period >= requested period
- */
- if (cmd->flags & CMD_FLAG_SDTR) {
- cmd->flags &= ~CMD_FLAG_SDTR;
- if (hostdata->msg_buf[4])
- synchronous (host, c->device->id, (unsigned char *)
- hostdata->msg_buf);
- else
- asynchronous (host, c->device->id);
- hostdata->dsp = hostdata->script + hostdata->E_accept_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- } else {
- if (hostdata->options & OPTION_SYNCHRONOUS) {
- cmd->flags |= CMD_FLAG_DID_SDTR;
- synchronous (host, c->device->id, (unsigned char *)
- hostdata->msg_buf);
- } else {
- hostdata->msg_buf[4] = 0; /* 0 offset = async */
- asynchronous (host, c->device->id);
- }
- patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
- patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
- virt_to_bus ((void *)&hostdata->msg_buf));
- hostdata->dsp = hostdata->script +
- hostdata->E_respond_message / sizeof(u32);
- hostdata->dsp_changed = 1;
- }
- return SPECIFIC_INT_NOTHING;
- }
- /* Fall through to abort if we couldn't find a cmd, and
- therefore a dsa structure to twiddle */
- case A_int_msg_wdtr:
- hostdata->dsp = hostdata->script + hostdata->E_reject_message /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- case A_int_err_unexpected_phase:
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : unexpected phase\n", host->host_no);
- return SPECIFIC_INT_ABORT;
- case A_int_err_selected:
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : selected by target %d\n", host->host_no,
- (int) NCR53c7x0_read8(SDID_REG_800) &7);
- else
- printk ("scsi%d : selected by target LCRC=0x%02x\n", host->host_no,
- (int) NCR53c7x0_read8(LCRC_REG_10));
- hostdata->dsp = hostdata->script + hostdata->E_target_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- case A_int_err_unexpected_reselect:
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : unexpected reselect by target %d lun %d\n",
- host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
- hostdata->reselected_identify & 7);
- else
- printk ("scsi%d : unexpected reselect LCRC=0x%02x\n", host->host_no,
- (int) NCR53c7x0_read8(LCRC_REG_10));
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
-/*
- * Since contingent allegiance conditions are cleared by the next
- * command issued to a target, we must issue a REQUEST SENSE
- * command after receiving a CHECK CONDITION status, before
- * another command is issued.
- *
- * Since this NCR53c7x0_cmd will be freed after use, we don't
- * care if we step on the various fields, so modify a few things.
- */
- case A_int_err_check_condition:
-#if 0
- if (hostdata->options & OPTION_DEBUG_INTR)
-#endif
- printk ("scsi%d : CHECK CONDITION\n", host->host_no);
- if (!c) {
- printk("scsi%d : CHECK CONDITION with no SCSI command\n",
- host->host_no);
- return SPECIFIC_INT_PANIC;
- }
-
- /*
- * FIXME : this uses the normal one-byte selection message.
- * We may want to renegotiate for synchronous & WIDE transfers
- * since these could be the crux of our problem.
- *
- hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
- * have to set this up so that the rest of the DSA
- * agrees with this being an untagged queue'd command.
- */
-
- patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
-
- /*
- * Modify the table indirect for COMMAND OUT phase, since
- * Request Sense is a six byte command.
- */
-
- patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
-
- /*
- * The CDB is now mirrored in our local non-cached
- * structure, but keep the old structure up to date as well,
- * just in case anyone looks at it.
- */
-
- /*
- * XXX Need to worry about data buffer alignment/cache state
- * XXX here, but currently never get A_int_err_check_condition,
- * XXX so ignore problem for now.
- */
- cmd->cmnd[0] = c->cmnd[0] = REQUEST_SENSE;
- cmd->cmnd[0] = c->cmnd[1] &= 0xe0; /* Zero all but LUN */
- cmd->cmnd[0] = c->cmnd[2] = 0;
- cmd->cmnd[0] = c->cmnd[3] = 0;
- cmd->cmnd[0] = c->cmnd[4] = sizeof(c->sense_buffer);
- cmd->cmnd[0] = c->cmnd[5] = 0;
-
- /*
- * Disable dataout phase, and program datain to transfer to the
- * sense buffer, and add a jump to other_transfer after the
- * command so overflow/underrun conditions are detected.
- */
-
- patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
- virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
- patch_dsa_32 (cmd->dsa, dsa_datain, 0,
- virt_to_bus(cmd->data_transfer_start));
- cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
- DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
- cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
-
- cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
- << 24) | DBC_TCI_TRUE;
- cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
- hostdata->E_other_transfer;
-
- /*
- * Currently, this command is flagged as completed, ie
- * it has valid status and message data. Reflag it as
- * incomplete. Q - need to do something so that original
- * status, etc are used.
- */
-
- cmd->result = cmd->cmd->result = 0xffff;
-
- /*
- * Restart command as a REQUEST SENSE.
- */
- hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- return SPECIFIC_INT_NOTHING;
- case A_int_debug_break:
- return SPECIFIC_INT_BREAK;
- case A_int_norm_aborted:
- hostdata->dsp = (u32 *) hostdata->schedule;
- hostdata->dsp_changed = 1;
- if (cmd)
- abnormal_finished (cmd, DID_ERROR << 16);
- return SPECIFIC_INT_NOTHING;
- case A_int_norm_emulateintfly:
- NCR53c7x0_intfly(host);
- return SPECIFIC_INT_NOTHING;
- case A_int_test_1:
- case A_int_test_2:
- hostdata->idle = 1;
- hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk("scsi%d : test%d complete\n", host->host_no,
- hostdata->test_completed);
- return SPECIFIC_INT_NOTHING;
-#ifdef A_int_debug_reselected_ok
- case A_int_debug_reselected_ok:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- /*
- * Note - this dsa is not based on location relative to
- * the command structure, but to location relative to the
- * DSA register
- */
- u32 *dsa;
- dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
-
- printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
- host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
- printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt(cmd->saved_data_pointer));
- print_insn (host, hostdata->script + Ent_reselected_ok /
- sizeof(u32), "", 1);
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, NCR53c7x0_read8(SXFER_REG),
- NCR53c7x0_read8(SCNTL3_REG_800));
- else
- printk ("scsi%d : sxfer=0x%x, cannot read SBCL\n",
- host->host_no, NCR53c7x0_read8(SXFER_REG));
- if (c) {
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script, "", 1);
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script + 2, "", 1);
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_reselect_check
- case A_int_debug_reselect_check:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- u32 *dsa;
-#if 0
- u32 *code;
-#endif
- /*
- * Note - this dsa is not based on location relative to
- * the command structure, but to location relative to the
- * DSA register
- */
- dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
- printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
- host->host_no, virt_to_bus(dsa), dsa);
- if (dsa) {
- printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt (cmd->saved_data_pointer));
-#if 0
- printk("scsi%d : template code :\n", host->host_no);
- for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
- / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
- code += print_insn (host, code, "", 1));
-#endif
- }
- print_insn (host, hostdata->script + Ent_reselected_ok /
- sizeof(u32), "", 1);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_dsa_schedule
- case A_int_debug_dsa_schedule:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- u32 *dsa;
- /*
- * Note - this dsa is not based on location relative to
- * the command structure, but to location relative to the
- * DSA register
- */
- dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
- printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
- host->host_no, virt_to_bus(dsa), dsa);
- if (dsa)
- printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
- " (temp was 0x%x (virt 0x%p))\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt (cmd->saved_data_pointer),
- NCR53c7x0_read32 (TEMP_REG),
- bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_scheduled
- case A_int_debug_scheduled:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
- host->host_no, NCR53c7x0_read32(DSA_REG),
- bus_to_virt(NCR53c7x0_read32(DSA_REG)));
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_idle
- case A_int_debug_idle:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : idle\n", host->host_no);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_cmd
- case A_int_debug_cmd:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : command sent\n");
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_dsa_loaded
- case A_int_debug_dsa_loaded:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
- NCR53c7x0_read32(DSA_REG),
- bus_to_virt(NCR53c7x0_read32(DSA_REG)));
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_reselected
- case A_int_debug_reselected:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- if ((hostdata->chip / 100) == 8)
- printk("scsi%d : reselected by target %d lun %d\n",
- host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
- (int) hostdata->reselected_identify & 7);
- else
- printk("scsi%d : reselected by LCRC=0x%02x lun %d\n",
- host->host_no, (int) NCR53c7x0_read8(LCRC_REG_10),
- (int) hostdata->reselected_identify & 7);
- print_queues(host);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_disconnect_msg
- case A_int_debug_disconnect_msg:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
- if (c)
- printk("scsi%d : target %d lun %d disconnecting\n",
- host->host_no, c->device->id, c->device->lun);
- else
- printk("scsi%d : unknown target disconnecting\n",
- host->host_no);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_disconnected
- case A_int_debug_disconnected:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- printk ("scsi%d : disconnected, new queues are\n",
- host->host_no);
- print_queues(host);
-#if 0
- /* Not valid on ncr53c710! */
- printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, NCR53c7x0_read8(SXFER_REG),
- NCR53c7x0_read8(SCNTL3_REG_800));
-#endif
- if (c) {
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script, "", 1);
- print_insn (host, (u32 *)
- hostdata->sync[c->device->id].script + 2, "", 1);
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_panic
- case A_int_debug_panic:
- printk("scsi%d : int_debug_panic received\n", host->host_no);
- print_lots (host);
- return SPECIFIC_INT_PANIC;
-#endif
-#ifdef A_int_debug_saved
- case A_int_debug_saved:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer,
- bus_to_virt (cmd->saved_data_pointer));
- print_progress (c);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_restored
- case A_int_debug_restored:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT)) {
- if (cmd) {
- int size;
- printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
- host->host_no, cmd->saved_data_pointer, bus_to_virt (
- cmd->saved_data_pointer));
- size = print_insn (host, (u32 *)
- bus_to_virt(cmd->saved_data_pointer), "", 1);
- size = print_insn (host, (u32 *)
- bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
- print_progress (c);
- }
-#if 0
- printk ("scsi%d : datapath residual %d\n",
- host->host_no, datapath_residual (host)) ;
-#endif
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_sync
- case A_int_debug_sync:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
- unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG), scntl3;
- if ((hostdata->chip / 100) == 8) {
- scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
- if (c) {
- if (sxfer != hostdata->sync[c->device->id].sxfer_sanity ||
- scntl3 != hostdata->sync[c->device->id].scntl3_sanity) {
- printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
- host->host_no, sxfer, scntl3);
- NCR53c7x0_write8 (SXFER_REG, sxfer);
- NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
- }
- } else
- printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
- host->host_no, (int) sxfer, (int) scntl3);
- } else {
- if (c) {
- if (sxfer != hostdata->sync[c->device->id].sxfer_sanity) {
- printk ("scsi%d : sync sanity check failed sxfer=0x%x",
- host->host_no, sxfer);
- NCR53c7x0_write8 (SXFER_REG, sxfer);
- NCR53c7x0_write8 (SBCL_REG,
- hostdata->sync[c->device->id].sscf_710);
- }
- } else
- printk ("scsi%d : unknown command sxfer=0x%x\n",
- host->host_no, (int) sxfer);
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_datain
- case A_int_debug_datain:
- if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
- OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
- int size;
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
- " datapath residual=%d\n",
- host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
- (int) NCR53c7x0_read8(SXFER_REG),
- (int) NCR53c7x0_read8(SCNTL3_REG_800),
- datapath_residual (host)) ;
- else
- printk ("scsi%d : In do_datain (%s) sxfer=0x%x\n"
- " datapath residual=%d\n",
- host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
- (int) NCR53c7x0_read8(SXFER_REG),
- datapath_residual (host)) ;
- print_insn (host, dsp, "", 1);
- size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
- print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
- }
- return SPECIFIC_INT_RESTART;
-#endif
-#ifdef A_int_debug_check_dsa
- case A_int_debug_check_dsa:
- if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
- int sdid;
- int tmp;
- char *where;
- if (hostdata->chip / 100 == 8)
- sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
- else {
- tmp = NCR53c7x0_read8 (SDID_REG_700);
- if (!tmp)
- panic ("SDID_REG_700 = 0");
- tmp >>= 1;
- sdid = 0;
- while (tmp) {
- tmp >>= 1;
- sdid++;
- }
- }
- where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
- (DCMD_REG)) == hostdata->script +
- Ent_select_check_dsa / sizeof(u32) ?
- "selection" : "reselection";
- if (c && sdid != c->device->id) {
- printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
- host->host_no, sdid, c->device->id, where);
- print_lots(host);
- dump_events (host, 20);
- return SPECIFIC_INT_PANIC;
- }
- }
- return SPECIFIC_INT_RESTART;
-#endif
- default:
- if ((dsps & 0xff000000) == 0x03000000) {
- printk ("scsi%d : misc debug interrupt 0x%x\n",
- host->host_no, dsps);
- return SPECIFIC_INT_RESTART;
- } else if ((dsps & 0xff000000) == 0x05000000) {
- if (hostdata->events) {
- struct NCR53c7x0_event *event;
- ++hostdata->event_index;
- if (hostdata->event_index >= hostdata->event_size)
- hostdata->event_index = 0;
- event = (struct NCR53c7x0_event *) hostdata->events +
- hostdata->event_index;
- event->event = (enum ncr_event) dsps;
- event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
- if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
- if (hostdata->chip / 100 == 8)
- event->target = NCR53c7x0_read8(SSID_REG_800);
- else {
- unsigned char tmp, sdid;
- tmp = NCR53c7x0_read8 (SDID_REG_700);
- if (!tmp)
- panic ("SDID_REG_700 = 0");
- tmp >>= 1;
- sdid = 0;
- while (tmp) {
- tmp >>= 1;
- sdid++;
- }
- event->target = sdid;
- }
- }
- else
- event->target = 255;
-
- if (event->event == EVENT_RESELECT)
- event->lun = hostdata->reselected_identify & 0xf;
- else if (c)
- event->lun = c->device->lun;
- else
- event->lun = 255;
- do_gettimeofday(&(event->time));
- if (c) {
- event->pid = c->pid;
- memcpy ((void *) event->cmnd, (void *) c->cmnd,
- sizeof (event->cmnd));
- } else {
- event->pid = -1;
- }
- }
- return SPECIFIC_INT_RESTART;
- }
-
- printk ("scsi%d : unknown user interrupt 0x%x\n",
- host->host_no, (unsigned) dsps);
- return SPECIFIC_INT_PANIC;
- }
-}
-
-/*
- * XXX - the stock NCR assembler won't output the scriptu.h file,
- * which undefine's all #define'd CPP symbols from the script.h
- * file, which will create problems if you use multiple scripts
- * with the same symbol names.
- *
- * If you insist on using NCR's assembler, you could generate
- * scriptu.h from script.h using something like
- *
- * grep #define script.h | \
- * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
- * > scriptu.h
- */
-
-#include "53c7xx_u.h"
-
-/* XXX - add alternate script handling code here */
-
-
-/*
- * Function : static void NCR537xx_soft_reset (struct Scsi_Host *host)
- *
- * Purpose : perform a soft reset of the NCR53c7xx chip
- *
- * Inputs : host - pointer to this host adapter's structure
- *
- * Preconditions : NCR53c7x0_init must have been called for this
- * host.
- *
- */
-
-static void
-NCR53c7x0_soft_reset (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- NCR53c7x0_local_setup(host);
-
- local_irq_save(flags);
-
- /* Disable scsi chip and s/w level 7 ints */
-
-#ifdef CONFIG_MVME16x
- if (MACH_IS_MVME16x)
- {
- volatile unsigned long v;
-
- v = *(volatile unsigned long *)0xfff4006c;
- v &= ~0x8000;
- *(volatile unsigned long *)0xfff4006c = v;
- v = *(volatile unsigned long *)0xfff4202c;
- v &= ~0x10;
- *(volatile unsigned long *)0xfff4202c = v;
- }
-#endif
- /* Anything specific for your hardware? */
-
- /*
- * Do a soft reset of the chip so that everything is
- * reinitialized to the power-on state.
- *
- * Basically follow the procedure outlined in the NCR53c700
- * data manual under Chapter Six, How to Use, Steps Necessary to
- * Start SCRIPTS, with the exception of actually starting the
- * script and setting up the synchronous transfer gunk.
- */
-
- /* Should we reset the scsi bus here??????????????????? */
-
- NCR53c7x0_write8(ISTAT_REG_700, ISTAT_10_SRST);
- NCR53c7x0_write8(ISTAT_REG_700, 0);
-
- /*
- * saved_dcntl is set up in NCR53c7x0_init() before it is overwritten
- * here. We should have some better way of working out the CF bit
- * setting..
- */
-
- hostdata->saved_dcntl = DCNTL_10_EA|DCNTL_10_COM;
- if (hostdata->scsi_clock > 50000000)
- hostdata->saved_dcntl |= DCNTL_700_CF_3;
- else
- if (hostdata->scsi_clock > 37500000)
- hostdata->saved_dcntl |= DCNTL_700_CF_2;
-#if 0
- else
- /* Any clocks less than 37.5MHz? */
-#endif
-
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM);
- else
- NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
- /* Following disables snooping - snooping is not required, as non-
- * cached pages are used for shared data, and appropriate use is
- * made of cache_push/cache_clear. Indeed, for 68060
- * enabling snooping causes disk corruption of ext2fs free block
- * bitmaps and the like. If you have a 68060 with snooping hardwared
- * on, then you need to enable CONFIG_060_WRITETHROUGH.
- */
- NCR53c7x0_write8(CTEST7_REG, CTEST7_10_TT1|CTEST7_STD);
- /* Actually burst of eight, according to my 53c710 databook */
- NCR53c7x0_write8(hostdata->dmode, DMODE_10_BL_8 | DMODE_10_FC2);
- NCR53c7x0_write8(SCID_REG, 1 << host->this_id);
- NCR53c7x0_write8(SBCL_REG, 0);
- NCR53c7x0_write8(SCNTL1_REG, SCNTL1_ESR_700);
- NCR53c7x0_write8(SCNTL0_REG, ((hostdata->options & OPTION_PARITY) ?
- SCNTL0_EPC : 0) | SCNTL0_EPG_700 | SCNTL0_ARB1 | SCNTL0_ARB2);
-
- /*
- * Enable all interrupts, except parity which we only want when
- * the user requests it.
- */
-
- NCR53c7x0_write8(DIEN_REG, DIEN_700_BF |
- DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_700_OPC);
-
- NCR53c7x0_write8(SIEN_REG_700, ((hostdata->options & OPTION_PARITY) ?
- SIEN_PAR : 0) | SIEN_700_STO | SIEN_RST | SIEN_UDC |
- SIEN_SGE | SIEN_MA);
-
-#ifdef CONFIG_MVME16x
- if (MACH_IS_MVME16x)
- {
- volatile unsigned long v;
-
- /* Enable scsi chip and s/w level 7 ints */
- v = *(volatile unsigned long *)0xfff40080;
- v = (v & ~(0xf << 28)) | (4 << 28);
- *(volatile unsigned long *)0xfff40080 = v;
- v = *(volatile unsigned long *)0xfff4006c;
- v |= 0x8000;
- *(volatile unsigned long *)0xfff4006c = v;
- v = *(volatile unsigned long *)0xfff4202c;
- v = (v & ~0xff) | 0x10 | 4;
- *(volatile unsigned long *)0xfff4202c = v;
- }
-#endif
- /* Anything needed for your hardware? */
- local_irq_restore(flags);
-}
-
-
-/*
- * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
- *
- * Purpose : Return the first free NCR53c7x0_cmd structure (which are
- * reused in a LIFO manner to minimize cache thrashing).
- *
- * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
- * structures for this device, do so. Attempt to complete all scheduled
- * allocations using get_zeroed_page(), putting NCR53c7x0_cmd structures on
- * the free list. Teach programmers not to drink and hack.
- *
- * Inputs : cmd - SCSI command
- *
- * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
- * NULL on failure.
- */
-
-static void
-my_free_page (void *addr, int dummy)
-{
- /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
- * XXX may be invalid (CONFIG_060_WRITETHROUGH)
- */
- kernel_set_cachemode((void *)addr, 4096, IOMAP_FULL_CACHING);
- free_page ((u32)addr);
-}
-
-static struct NCR53c7x0_cmd *
-allocate_cmd (Scsi_Cmnd *cmd) {
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- u32 real; /* Real address */
- int size; /* Size of *tmp */
- struct NCR53c7x0_cmd *tmp;
- unsigned long flags;
-
- if (hostdata->options & OPTION_DEBUG_ALLOCATION)
- printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
- " target = %d, lun = %d, %s\n",
- host->host_no, hostdata->num_cmds, host->can_queue,
- cmd->device->id, cmd->device->lun, (hostdata->cmd_allocated[cmd->device->id] &
- (1 << cmd->device->lun)) ? "already allocated" : "not allocated");
-
-/*
- * If we have not yet reserved commands for this I_T_L nexus, and
- * the device exists (as indicated by permanent Scsi_Cmnd structures
- * being allocated under 1.3.x, or being outside of scan_scsis in
- * 1.2.x), do so now.
- */
- if (!(hostdata->cmd_allocated[cmd->device->id] & (1 << cmd->device->lun)) &&
- cmd->device && cmd->device->has_cmdblocks) {
- if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
- hostdata->extra_allocate += host->cmd_per_lun;
- hostdata->cmd_allocated[cmd->device->id] |= (1 << cmd->device->lun);
- }
-
- for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
- ++hostdata->num_cmds) {
- /* historically, kmalloc has returned unaligned addresses; pad so we
- have enough room to ROUNDUP */
- size = hostdata->max_cmd_size + sizeof (void *);
-#ifdef FORCE_DSA_ALIGNMENT
- /*
- * 53c710 rev.0 doesn't have an add-with-carry instruction.
- * Ensure we allocate enough memory to force alignment.
- */
- size += 256;
-#endif
-/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
-
- if (size > 4096) {
- printk (KERN_ERR "53c7xx: allocate_cmd size > 4K\n");
- return NULL;
- }
- real = get_zeroed_page(GFP_ATOMIC);
- if (real == 0)
- return NULL;
- cache_push(virt_to_phys((void *)real), 4096);
- cache_clear(virt_to_phys((void *)real), 4096);
- kernel_set_cachemode((void *)real, 4096, IOMAP_NOCACHE_SER);
- tmp = ROUNDUP(real, void *);
-#ifdef FORCE_DSA_ALIGNMENT
- {
- if (((u32)tmp & 0xff) > CmdPageStart)
- tmp = (struct NCR53c7x0_cmd *)((u32)tmp + 255);
- tmp = (struct NCR53c7x0_cmd *)(((u32)tmp & ~0xff) + CmdPageStart);
-#if 0
- printk ("scsi: size = %d, real = 0x%08x, tmp set to 0x%08x\n",
- size, real, (u32)tmp);
-#endif
- }
-#endif
- tmp->real = (void *)real;
- tmp->size = size;
- tmp->free = ((void (*)(void *, int)) my_free_page);
- local_irq_save(flags);
- tmp->next = hostdata->free;
- hostdata->free = tmp;
- local_irq_restore(flags);
- }
- local_irq_save(flags);
- tmp = (struct NCR53c7x0_cmd *) hostdata->free;
- if (tmp) {
- hostdata->free = tmp->next;
- }
- local_irq_restore(flags);
- if (!tmp)
- printk ("scsi%d : can't allocate command for target %d lun %d\n",
- host->host_no, cmd->device->id, cmd->device->lun);
- return tmp;
-}
-
-/*
- * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
- *
- *
- * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
- * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
- * initialization, and dsa code relocation.
- *
- * Inputs : cmd - SCSI command
- *
- * Returns : NCR53c7x0_cmd structure corresponding to cmd,
- * NULL on failure.
- */
-static struct NCR53c7x0_cmd *
-create_cmd (Scsi_Cmnd *cmd) {
- NCR53c7x0_local_declare();
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
- int datain, /* Number of instructions per phase */
- dataout;
- int data_transfer_instructions, /* Count of dynamic instructions */
- i; /* Counter */
- u32 *cmd_datain, /* Address of datain/dataout code */
- *cmd_dataout; /* Incremented as we assemble */
-#ifdef notyet
- unsigned char *msgptr; /* Current byte in select message */
- int msglen; /* Length of whole select message */
-#endif
- unsigned long flags;
- u32 exp_select_indirect; /* Used in sanity check */
- NCR53c7x0_local_setup(cmd->device->host);
-
- if (!(tmp = allocate_cmd (cmd)))
- return NULL;
-
- /*
- * Copy CDB and initialised result fields from Scsi_Cmnd to NCR53c7x0_cmd.
- * We do this because NCR53c7x0_cmd may have a special cache mode
- * selected to cope with lack of bus snooping, etc.
- */
-
- memcpy(tmp->cmnd, cmd->cmnd, 12);
- tmp->result = cmd->result;
-
- /*
- * Decide whether we need to generate commands for DATA IN,
- * DATA OUT, neither, or both based on the SCSI command
- */
-
- switch (cmd->cmnd[0]) {
- /* These commands do DATA IN */
- case INQUIRY:
- case MODE_SENSE:
- case READ_6:
- case READ_10:
- case READ_CAPACITY:
- case REQUEST_SENSE:
- case READ_BLOCK_LIMITS:
- case READ_TOC:
- datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
- dataout = 0;
- break;
- /* These commands do DATA OUT */
- case MODE_SELECT:
- case WRITE_6:
- case WRITE_10:
-#if 0
- printk("scsi%d : command is ", host->host_no);
- __scsi_print_command(cmd->cmnd);
-#endif
-#if 0
- printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
- cmd->use_sg);
-#endif
- datain = 0;
- dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
-#if 0
- hostdata->options |= OPTION_DEBUG_INTR;
-#endif
- break;
- /*
- * These commands do no data transfer, we should force an
- * interrupt if a data phase is attempted on them.
- */
- case TEST_UNIT_READY:
- case ALLOW_MEDIUM_REMOVAL:
- case START_STOP:
- datain = dataout = 0;
- break;
- /*
- * We don't know about these commands, so generate code to handle
- * both DATA IN and DATA OUT phases. More efficient to identify them
- * and add them to the above cases.
- */
- default:
- printk("scsi%d : datain+dataout for command ", host->host_no);
- __scsi_print_command(cmd->cmnd);
- datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
- }
-
- /*
- * New code : so that active pointers work correctly regardless
- * of where the saved data pointer is at, we want to immediately
- * enter the dynamic code after selection, and on a non-data
- * phase perform a CALL to the non-data phase handler, with
- * returns back to this address.
- *
- * If a phase mismatch is encountered in the middle of a
- * Block MOVE instruction, we want to _leave_ that instruction
- * unchanged as the current case is, modify a temporary buffer,
- * and point the active pointer (TEMP) at that.
- *
- * Furthermore, we want to implement a saved data pointer,
- * set by the SAVE_DATA_POINTERs message.
- *
- * So, the data transfer segments will change to
- * CALL data_transfer, WHEN NOT data phase
- * MOVE x, x, WHEN data phase
- * ( repeat )
- * JUMP other_transfer
- */
-
- data_transfer_instructions = datain + dataout;
-
- /*
- * When we perform a request sense, we overwrite various things,
- * including the data transfer code. Make sure we have enough
- * space to do that.
- */
-
- if (data_transfer_instructions < 2)
- data_transfer_instructions = 2;
-
-
- /*
- * The saved data pointer is set up so that a RESTORE POINTERS message
- * will start the data transfer over at the beginning.
- */
-
- tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
- hostdata->E_data_transfer;
-
- /*
- * Initialize Linux specific fields.
- */
-
- tmp->cmd = cmd;
- tmp->next = NULL;
- tmp->flags = 0;
- tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
- hostdata->dsa_start;
- tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
-
- /*
- * Calculate addresses of dynamic code to fill in DSA
- */
-
- tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
- hostdata->dsa_start) / sizeof(u32);
- tmp->data_transfer_end = tmp->data_transfer_start +
- 2 * data_transfer_instructions;
-
- cmd_datain = datain ? tmp->data_transfer_start : NULL;
- cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
- data_transfer_start) : NULL;
-
- /*
- * Fill in the NCR53c7x0_cmd structure as follows
- * dsa, with fixed up DSA code
- * datain code
- * dataout code
- */
-
- /* Copy template code into dsa and perform all necessary fixups */
- if (hostdata->dsa_fixup)
- hostdata->dsa_fixup(tmp);
-
- patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
- /*
- * XXX is this giving 53c710 access to the Scsi_Cmnd in some way?
- * Do we need to change it for caching reasons?
- */
- patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
-
- if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS) {
-
- exp_select_indirect = ((1 << cmd->device->id) << 16) |
- (hostdata->sync[cmd->device->id].sxfer_sanity << 8);
-
- if (hostdata->sync[cmd->device->id].select_indirect !=
- exp_select_indirect) {
- printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
- host->host_no, hostdata->sync[cmd->device->id].select_indirect);
- FATAL(host);
-
- }
- }
-
- patch_dsa_32(tmp->dsa, dsa_select, 0,
- hostdata->sync[cmd->device->id].select_indirect);
-
- /*
- * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
- * different commands; although it should be trivial to do them
- * both at the same time.
- */
- if (hostdata->initiate_wdtr & (1 << cmd->device->id)) {
- memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
- sizeof(wdtr_message));
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
- local_irq_save(flags);
- hostdata->initiate_wdtr &= ~(1 << cmd->device->id);
- local_irq_restore(flags);
- } else if (hostdata->initiate_sdtr & (1 << cmd->device->id)) {
- memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
- sizeof(sdtr_message));
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
- tmp->flags |= CMD_FLAG_SDTR;
- local_irq_save(flags);
- hostdata->initiate_sdtr &= ~(1 << cmd->device->id);
- local_irq_restore(flags);
-
- }
-#if 1
- else if (!(hostdata->talked_to & (1 << cmd->device->id)) &&
- !(hostdata->options & OPTION_NO_ASYNC)) {
-
- memcpy ((void *) (tmp->select + 1), (void *) async_message,
- sizeof(async_message));
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
- tmp->flags |= CMD_FLAG_SDTR;
- }
-#endif
- else
- patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
-
- hostdata->talked_to |= (1 << cmd->device->id);
- tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
- IDENTIFY (1, cmd->device->lun) : IDENTIFY (0, cmd->device->lun);
- patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
- patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
- patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(tmp->cmnd));
- patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
- virt_to_bus (cmd_dataout)
- : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
- patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
- virt_to_bus (cmd_datain)
- : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
- /*
- * XXX - need to make endian aware, should use separate variables
- * for both status and message bytes.
- */
- patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
-/*
- * FIXME : these only works for little endian. We probably want to
- * provide message and status fields in the NCR53c7x0_cmd
- * structure, and assign them to cmd->result when we're done.
- */
-#ifdef BIG_ENDIAN
- patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 2);
- patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
- patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result) + 3);
-#else
- patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 1);
- patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
- patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result));
-#endif
- patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
- patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
- virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
-
- /*
- * Generate code for zero or more of the DATA IN, DATA OUT phases
- * in the format
- *
- * CALL data_transfer, WHEN NOT phase
- * MOVE first buffer length, first buffer address, WHEN phase
- * ...
- * MOVE last buffer length, last buffer address, WHEN phase
- * JUMP other_transfer
- */
-
-/*
- * See if we're getting to data transfer by generating an unconditional
- * interrupt.
- */
-#if 0
- if (datain) {
- cmd_datain[0] = 0x98080000;
- cmd_datain[1] = 0x03ffd00d;
- cmd_datain += 2;
- }
-#endif
-
-/*
- * XXX - I'm undecided whether all of this nonsense is faster
- * in the long run, or whether I should just go and implement a loop
- * on the NCR chip using table indirect mode?
- *
- * In any case, this is how it _must_ be done for 53c700/700-66 chips,
- * so this stays even when we come up with something better.
- *
- * When we're limited to 1 simultaneous command, no overlapping processing,
- * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
- * drive.
- *
- * Not bad, not good. We'll see.
- */
-
- tmp->bounce.len = 0; /* Assume aligned buffer */
-
- for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
- cmd_dataout += 4, ++i) {
- u32 vbuf = cmd->use_sg
- ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
- ((struct scatterlist *)cmd->request_buffer)[i].offset
- : (u32)(cmd->request_buffer);
- u32 bbuf = virt_to_bus((void *)vbuf);
- u32 count = cmd->use_sg ?
- ((struct scatterlist *)cmd->request_buffer)[i].length :
- cmd->request_bufflen;
-
- /*
- * If we have buffers which are not aligned with 16 byte cache
- * lines, then we just hope nothing accesses the other parts of
- * those cache lines while the transfer is in progress. That would
- * fill the cache, and subsequent reads of the dma data would pick
- * up the wrong thing.
- * XXX We need a bounce buffer to handle that correctly.
- */
-
- if (((bbuf & 15) || (count & 15)) && (datain || dataout))
- {
- /* Bounce buffer needed */
- if (cmd->use_sg)
- printk ("53c7xx: Non-aligned buffer with use_sg\n");
- else if (datain && dataout)
- printk ("53c7xx: Non-aligned buffer with datain && dataout\n");
- else if (count > 256)
- printk ("53c7xx: Non-aligned transfer > 256 bytes\n");
- else
- {
- if (datain)
- {
- tmp->bounce.len = count;
- tmp->bounce.addr = vbuf;
- bbuf = virt_to_bus(tmp->bounce.buf);
- tmp->bounce.buf[0] = 0xff;
- tmp->bounce.buf[1] = 0xfe;
- tmp->bounce.buf[2] = 0xfd;
- tmp->bounce.buf[3] = 0xfc;
- }
- if (dataout)
- {
- memcpy ((void *)tmp->bounce.buf, (void *)vbuf, count);
- bbuf = virt_to_bus(tmp->bounce.buf);
- }
- }
- }
-
- if (datain) {
- cache_clear(virt_to_phys((void *)vbuf), count);
- /* CALL other_in, WHEN NOT DATA_IN */
- cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
- DCMD_TCI_IO) << 24) |
- DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
- cmd_datain[1] = virt_to_bus (hostdata->script) +
- hostdata->E_other_in;
- /* MOVE count, buf, WHEN DATA_IN */
- cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
- << 24) | count;
- cmd_datain[3] = bbuf;
-#if 0
- print_insn (host, cmd_datain, "dynamic ", 1);
- print_insn (host, cmd_datain + 2, "dynamic ", 1);
-#endif
- }
- if (dataout) {
- cache_push(virt_to_phys((void *)vbuf), count);
- /* CALL other_out, WHEN NOT DATA_OUT */
- cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
- DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
- cmd_dataout[1] = virt_to_bus(hostdata->script) +
- hostdata->E_other_out;
- /* MOVE count, buf, WHEN DATA+OUT */
- cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
- | count;
- cmd_dataout[3] = bbuf;
-#if 0
- print_insn (host, cmd_dataout, "dynamic ", 1);
- print_insn (host, cmd_dataout + 2, "dynamic ", 1);
-#endif
- }
- }
-
- /*
- * Install JUMP instructions after the data transfer routines to return
- * control to the do_other_transfer routines.
- */
-
-
- if (datain) {
- cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
- DBC_TCI_TRUE;
- cmd_datain[1] = virt_to_bus(hostdata->script) +
- hostdata->E_other_transfer;
-#if 0
- print_insn (host, cmd_datain, "dynamic jump ", 1);
-#endif
- cmd_datain += 2;
- }
-#if 0
- if (datain) {
- cmd_datain[0] = 0x98080000;
- cmd_datain[1] = 0x03ffdeed;
- cmd_datain += 2;
- }
-#endif
- if (dataout) {
- cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
- DBC_TCI_TRUE;
- cmd_dataout[1] = virt_to_bus(hostdata->script) +
- hostdata->E_other_transfer;
-#if 0
- print_insn (host, cmd_dataout, "dynamic jump ", 1);
-#endif
- cmd_dataout += 2;
- }
-
- return tmp;
-}
-
-/*
- * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
- * void (*done)(Scsi_Cmnd *))
- *
- * Purpose : enqueues a SCSI command
- *
- * Inputs : cmd - SCSI command, done - function called on completion, with
- * a pointer to the command descriptor.
- *
- * Returns : 0
- *
- * Side effects :
- * cmd is added to the per instance driver issue_queue, with major
- * twiddling done to the host specific fields of cmd. If the
- * process_issue_queue coroutine isn't running, it is restarted.
- *
- * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
- * hold our own data, and pervert the ptr field of the SCp field
- * to create a linked list.
- */
-
-int
-NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- unsigned long flags;
- Scsi_Cmnd *tmp;
-
- cmd->scsi_done = done;
- cmd->host_scribble = NULL;
- cmd->SCp.ptr = NULL;
- cmd->SCp.buffer = NULL;
-
-#ifdef VALID_IDS
- /* Ignore commands on invalid IDs */
- if (!hostdata->valid_ids[cmd->device->id]) {
- printk("scsi%d : ignoring target %d lun %d\n", host->host_no,
- cmd->device->id, cmd->device->lun);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- return 0;
- }
-#endif
-
- local_irq_save(flags);
- if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
- || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
- !(hostdata->debug_lun_limit[cmd->device->id] & (1 << cmd->device->lun)))
-#ifdef LINUX_1_2
- || cmd->device->id > 7
-#else
- || cmd->device->id >= host->max_id
-#endif
- || cmd->device->id == host->this_id
- || hostdata->state == STATE_DISABLED) {
- printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
- cmd->device->id, cmd->device->lun);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- local_irq_restore(flags);
- return 0;
- }
-
- if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
- (hostdata->debug_count_limit == 0)) {
- printk("scsi%d : maximum commands exceeded\n", host->host_no);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- local_irq_restore(flags);
- return 0;
- }
-
- if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
- switch (cmd->cmnd[0]) {
- case WRITE_6:
- case WRITE_10:
- printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
- host->host_no);
- cmd->result = (DID_BAD_TARGET << 16);
- done(cmd);
- local_irq_restore(flags);
- return 0;
- }
- }
-
- if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
- hostdata->debug_count_limit != -1)
- --hostdata->debug_count_limit;
-
- cmd->result = 0xffff; /* The NCR will overwrite message
- and status with valid data */
- cmd->host_scribble = (unsigned char *) tmp = create_cmd (cmd);
-
- /*
- * REQUEST SENSE commands are inserted at the head of the queue
- * so that we do not clear the contingent allegiance condition
- * they may be looking at.
- */
-
- if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
- cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
- hostdata->issue_queue = cmd;
- } else {
- for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
- tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
- tmp->SCp.ptr = (unsigned char *) cmd;
- }
- local_irq_restore(flags);
- run_process_issue_queue();
- return 0;
-}
-
-/*
- * Function : void to_schedule_list (struct Scsi_Host *host,
- * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
- *
- * Purpose : takes a SCSI command which was just removed from the
- * issue queue, and deals with it by inserting it in the first
- * free slot in the schedule list or by terminating it immediately.
- *
- * Inputs :
- * host - SCSI host adapter; hostdata - hostdata structure for
- * this adapter; cmd - a pointer to the command; should have
- * the host_scribble field initialized to point to a valid
- *
- * Side effects :
- * cmd is added to the per instance schedule list, with minor
- * twiddling done to the host specific fields of cmd.
- *
- */
-
-static __inline__ void
-to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
- struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- Scsi_Cmnd *tmp = cmd->cmd;
- unsigned long flags;
- /* dsa start is negative, so subtraction is used */
- volatile u32 *ncrcurrent;
-
- int i;
- NCR53c7x0_local_setup(host);
-#if 0
- printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
- virt_to_bus(hostdata->dsa), hostdata->dsa);
-#endif
-
- local_irq_save(flags);
-
- /*
- * Work around race condition : if an interrupt fired and we
- * got disabled forget about this command.
- */
-
- if (hostdata->state == STATE_DISABLED) {
- printk("scsi%d : driver disabled\n", host->host_no);
- tmp->result = (DID_BAD_TARGET << 16);
- cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
- hostdata->free = cmd;
- tmp->scsi_done(tmp);
- local_irq_restore(flags);
- return;
- }
-
- for (i = host->can_queue, ncrcurrent = hostdata->schedule;
- i > 0 && ncrcurrent[0] != hostdata->NOP_insn;
- --i, ncrcurrent += 2 /* JUMP instructions are two words */);
-
- if (i > 0) {
- ++hostdata->busy[tmp->device->id][tmp->device->lun];
- cmd->next = hostdata->running_list;
- hostdata->running_list = cmd;
-
- /* Restore this instruction to a NOP once the command starts */
- cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
- sizeof(u32)] = (u32) virt_to_bus ((void *)ncrcurrent);
- /* Replace the current jump operand. */
- ncrcurrent[1] =
- virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
- hostdata->E_dsa_code_template;
- /* Replace the NOP instruction with a JUMP */
- ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
- DBC_TCI_TRUE;
- } else {
- printk ("scsi%d: no free slot\n", host->host_no);
- disable(host);
- tmp->result = (DID_ERROR << 16);
- cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
- hostdata->free = cmd;
- tmp->scsi_done(tmp);
- local_irq_restore(flags);
- return;
- }
-
- /*
- * If the NCR chip is in an idle state, start it running the scheduler
- * immediately. Otherwise, signal the chip to jump to schedule as
- * soon as it is idle.
- */
-
- if (hostdata->idle) {
- hostdata->idle = 0;
- hostdata->state = STATE_RUNNING;
- NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
- if (hostdata->options & OPTION_DEBUG_TRACE)
- NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
- DCNTL_SSM | DCNTL_STD);
- } else {
- NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
- }
-
- local_irq_restore(flags);
-}
-
-/*
- * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
- * *hostdata, Scsi_Cmnd *cmd)
- *
- * Purpose : decide if we can pass the given SCSI command on to the
- * device in question or not.
- *
- * Returns : non-zero when we're busy, 0 when we aren't.
- */
-
-static __inline__ int
-busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
- Scsi_Cmnd *cmd) {
- /* FIXME : in the future, this needs to accommodate SCSI-II tagged
- queuing, and we may be able to play with fairness here a bit.
- */
- return hostdata->busy[cmd->device->id][cmd->device->lun];
-}
-
-/*
- * Function : process_issue_queue (void)
- *
- * Purpose : transfer commands from the issue queue to NCR start queue
- * of each NCR53c7/8xx in the system, avoiding kernel stack
- * overflows when the scsi_done() function is invoked recursively.
- *
- * NOTE : process_issue_queue exits with interrupts *disabled*, so the
- * caller must reenable them if it desires.
- *
- * NOTE : process_issue_queue should be called from both
- * NCR53c7x0_queue_command() and from the interrupt handler
- * after command completion in case NCR53c7x0_queue_command()
- * isn't invoked again but we've freed up resources that are
- * needed.
- */
-
-static void
-process_issue_queue (unsigned long flags) {
- Scsi_Cmnd *tmp, *prev;
- struct Scsi_Host *host;
- struct NCR53c7x0_hostdata *hostdata;
- int done;
-
- /*
- * We run (with interrupts disabled) until we're sure that none of
- * the host adapters have anything that can be done, at which point
- * we set process_issue_queue_running to 0 and exit.
- *
- * Interrupts are enabled before doing various other internal
- * instructions, after we've decided that we need to run through
- * the loop again.
- *
- */
-
- do {
- local_irq_disable(); /* Freeze request queues */
- done = 1;
- for (host = first_host; host && host->hostt == the_template;
- host = host->next) {
- hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
- local_irq_disable();
- if (hostdata->issue_queue) {
- if (hostdata->state == STATE_DISABLED) {
- tmp = (Scsi_Cmnd *) hostdata->issue_queue;
- hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
- tmp->result = (DID_BAD_TARGET << 16);
- if (tmp->host_scribble) {
- ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
- hostdata->free;
- hostdata->free =
- (struct NCR53c7x0_cmd *)tmp->host_scribble;
- tmp->host_scribble = NULL;
- }
- tmp->scsi_done (tmp);
- done = 0;
- } else
- for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
- prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
- tmp->SCp.ptr)
- if (!tmp->host_scribble ||
- !busyp (host, hostdata, tmp)) {
- if (prev)
- prev->SCp.ptr = tmp->SCp.ptr;
- else
- hostdata->issue_queue = (Scsi_Cmnd *)
- tmp->SCp.ptr;
- tmp->SCp.ptr = NULL;
- if (tmp->host_scribble) {
- if (hostdata->options & OPTION_DEBUG_QUEUES)
- printk ("scsi%d : moving command for target %d lun %d to start list\n",
- host->host_no, tmp->device->id, tmp->device->lun);
-
-
- to_schedule_list (host, hostdata,
- (struct NCR53c7x0_cmd *)
- tmp->host_scribble);
- } else {
- if (((tmp->result & 0xff) == 0xff) ||
- ((tmp->result & 0xff00) == 0xff00)) {
- printk ("scsi%d : danger Will Robinson!\n",
- host->host_no);
- tmp->result = DID_ERROR << 16;
- disable (host);
- }
- tmp->scsi_done(tmp);
- }
- done = 0;
- } /* if target/lun is not busy */
- } /* if hostdata->issue_queue */
- if (!done)
- local_irq_restore(flags);
- } /* for host */
- } while (!done);
- process_issue_queue_running = 0;
-}
-
-/*
- * Function : static void intr_scsi (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : handle all SCSI interrupts, indicated by the setting
- * of the SIP bit in the ISTAT register.
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- */
-
-static void
-intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- unsigned char sstat0_sist0, sist1, /* Registers */
- fatal; /* Did a fatal interrupt
- occur ? */
-
- NCR53c7x0_local_setup(host);
-
- fatal = 0;
-
- sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
- sist1 = 0;
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
- sstat0_sist0, sist1);
-
- /* 250ms selection timeout */
- if (sstat0_sist0 & SSTAT0_700_STO) {
- fatal = 1;
- if (hostdata->options & OPTION_DEBUG_INTR) {
- printk ("scsi%d : Selection Timeout\n", host->host_no);
- if (cmd) {
- printk("scsi%d : target %d, lun %d, command ",
- host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
- __scsi_print_command (cmd->cmd->cmnd);
- printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
- NCR53c7x0_read32(DSP_REG),
- bus_to_virt(NCR53c7x0_read32(DSP_REG)));
- } else {
- printk("scsi%d : no command\n", host->host_no);
- }
- }
-/*
- * XXX - question : how do we want to handle the Illegal Instruction
- * interrupt, which may occur before or after the Selection Timeout
- * interrupt?
- */
-
- if (1) {
- hostdata->idle = 1;
- hostdata->expecting_sto = 0;
-
- if (hostdata->test_running) {
- hostdata->test_running = 0;
- hostdata->test_completed = 3;
- } else if (cmd) {
- abnormal_finished(cmd, DID_BAD_TARGET << 16);
- }
-#if 0
- hostdata->intrs = 0;
-#endif
- }
- }
-
-/*
- * FIXME : in theory, we can also get a UDC when a STO occurs.
- */
- if (sstat0_sist0 & SSTAT0_UDC) {
- fatal = 1;
- if (cmd) {
- printk("scsi%d : target %d lun %d unexpected disconnect\n",
- host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
- print_lots (host);
- abnormal_finished(cmd, DID_ERROR << 16);
- } else
- printk("scsi%d : unexpected disconnect (no command)\n",
- host->host_no);
-
- hostdata->dsp = (u32 *) hostdata->schedule;
- hostdata->dsp_changed = 1;
- }
-
- /* SCSI PARITY error */
- if (sstat0_sist0 & SSTAT0_PAR) {
- fatal = 1;
- if (cmd && cmd->cmd) {
- printk("scsi%d : target %d lun %d parity error.\n",
- host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
- abnormal_finished (cmd, DID_PARITY << 16);
- } else
- printk("scsi%d : parity error\n", host->host_no);
- /* Should send message out, parity error */
-
- /* XXX - Reduce synchronous transfer rate! */
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- /* SCSI GROSS error */
- }
-
- if (sstat0_sist0 & SSTAT0_SGE) {
- fatal = 1;
- printk("scsi%d : gross error, saved2_dsa = 0x%x\n", host->host_no,
- (unsigned int)hostdata->saved2_dsa);
- print_lots (host);
-
- /*
- * A SCSI gross error may occur when we have
- *
- * - A synchronous offset which causes the SCSI FIFO to be overwritten.
- *
- * - A REQ which causes the maximum synchronous offset programmed in
- * the SXFER register to be exceeded.
- *
- * - A phase change with an outstanding synchronous offset.
- *
- * - Residual data in the synchronous data FIFO, with a transfer
- * other than a synchronous receive is started.$#
- */
-
-
- /* XXX Should deduce synchronous transfer rate! */
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- /* Phase mismatch */
- }
-
- if (sstat0_sist0 & SSTAT0_MA) {
- fatal = 1;
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : SSTAT0_MA\n", host->host_no);
- intr_phase_mismatch (host, cmd);
- }
-
-#if 0
- if (sstat0_sist0 & SIST0_800_RSL)
- printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
-#endif
-
-/*
- * If a fatal SCSI interrupt occurs, we must insure that the DMA and
- * SCSI FIFOs were flushed.
- */
-
- if (fatal) {
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
-
- if (!(hostdata->dstat & DSTAT_DFE)) {
- printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
- /*
- * Really need to check this code for 710 RGH.
- * Havn't seen any problems, but maybe we should FLUSH before
- * clearing sometimes.
- */
- NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
- while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
- ;
- hostdata->dstat |= DSTAT_DFE;
- }
- }
-}
-
-#ifdef CYCLIC_TRACE
-
-/*
- * The following implements a cyclic log of instructions executed, if you turn
- * TRACE on. It will also print the log for you. Very useful when debugging
- * 53c710 support, possibly not really needed any more.
- */
-
-u32 insn_log[4096];
-u32 insn_log_index = 0;
-
-void log1 (u32 i)
-{
- insn_log[insn_log_index++] = i;
- if (insn_log_index == 4096)
- insn_log_index = 0;
-}
-
-void log_insn (u32 *ip)
-{
- log1 ((u32)ip);
- log1 (*ip);
- log1 (*(ip+1));
- if (((*ip >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
- log1 (*(ip+2));
-}
-
-void dump_log(void)
-{
- int cnt = 0;
- int i = insn_log_index;
- int size;
- struct Scsi_Host *host = first_host;
-
- while (cnt < 4096) {
- printk ("%08x (+%6x): ", insn_log[i], (insn_log[i] - (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4);
- if (++i == 4096)
- i = 0;
- cnt++;
- if (((insn_log[i] >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
- size = 3;
- else
- size = 2;
- while (size--) {
- printk ("%08x ", insn_log[i]);
- if (++i == 4096)
- i = 0;
- cnt++;
- }
- printk ("\n");
- }
-}
-#endif
-
-
-/*
- * Function : static void NCR53c7x0_intfly (struct Scsi_Host *host)
- *
- * Purpose : Scan command queue for specified host, looking for completed
- * commands.
- *
- * Inputs : Scsi_Host pointer.
- *
- * This is called from the interrupt handler, when a simulated INTFLY
- * interrupt occurs.
- */
-
-static void
-NCR53c7x0_intfly (struct Scsi_Host *host)
-{
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
- struct NCR53c7x0_cmd *cmd, /* command which halted */
- **cmd_prev_ptr;
- unsigned long flags;
- char search_found = 0; /* Got at least one ? */
-
- hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
- NCR53c7x0_local_setup(host);
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : INTFLY\n", host->host_no);
-
- /*
- * Traverse our list of running commands, and look
- * for those with valid (non-0xff ff) status and message
- * bytes encoded in the result which signify command
- * completion.
- */
-
- local_irq_save(flags);
-restart:
- for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)&(hostdata->running_list),
- cmd = (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
- cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
- cmd = (struct NCR53c7x0_cmd *) cmd->next)
- {
- Scsi_Cmnd *tmp;
-
- if (!cmd) {
- printk("scsi%d : very weird.\n", host->host_no);
- break;
- }
-
- if (!(tmp = cmd->cmd)) {
- printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
- host->host_no);
- continue;
- }
- /* Copy the result over now; may not be complete,
- * but subsequent tests may as well be done on
- * cached memory.
- */
- tmp->result = cmd->result;
-
- if (((tmp->result & 0xff) == 0xff) ||
- ((tmp->result & 0xff00) == 0xff00))
- continue;
-
- search_found = 1;
-
- if (cmd->bounce.len)
- memcpy ((void *)cmd->bounce.addr,
- (void *)cmd->bounce.buf, cmd->bounce.len);
-
- /* Important - remove from list _before_ done is called */
- if (cmd_prev_ptr)
- *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
-
- --hostdata->busy[tmp->device->id][tmp->device->lun];
- cmd->next = hostdata->free;
- hostdata->free = cmd;
-
- tmp->host_scribble = NULL;
-
- if (hostdata->options & OPTION_DEBUG_INTR) {
- printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
- host->host_no, tmp->pid, tmp->device->id, tmp->device->lun, tmp->result);
- __scsi_print_command (tmp->cmnd);
- }
-
- tmp->scsi_done(tmp);
- goto restart;
- }
- local_irq_restore(flags);
-
- if (!search_found) {
- printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
- host->host_no);
- } else {
- run_process_issue_queue();
- }
- return;
-}
-
-/*
- * Function : static irqreturn_t NCR53c7x0_intr (int irq, void *dev_id)
- *
- * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
- * the same IRQ line.
- *
- * Inputs : Since we're using the IRQF_DISABLED interrupt handler
- * semantics, irq indicates the interrupt which invoked
- * this handler.
- *
- * On the 710 we simualte an INTFLY with a script interrupt, and the
- * script interrupt handler will call back to this function.
- */
-
-static irqreturn_t
-NCR53c7x0_intr (int irq, void *dev_id)
-{
- NCR53c7x0_local_declare();
- struct Scsi_Host *host; /* Host we are looking at */
- unsigned char istat; /* Values of interrupt regs */
- struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
- struct NCR53c7x0_cmd *cmd; /* command which halted */
- u32 *dsa; /* DSA */
- int handled = 0;
-
-#ifdef NCR_DEBUG
- char buf[80]; /* Debugging sprintf buffer */
- size_t buflen; /* Length of same */
-#endif
-
- host = (struct Scsi_Host *)dev_id;
- hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
- NCR53c7x0_local_setup(host);
-
- /*
- * Only read istat once per loop, since reading it again will unstack
- * interrupts
- */
-
- while ((istat = NCR53c7x0_read8(hostdata->istat)) & (ISTAT_SIP|ISTAT_DIP)) {
- handled = 1;
- hostdata->dsp_changed = 0;
- hostdata->dstat_valid = 0;
- hostdata->state = STATE_HALTED;
-
- if (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK)
- printk ("scsi%d : SCSI FIFO not empty\n", host->host_no);
-
- /*
- * NCR53c700 and NCR53c700-66 change the current SCSI
- * process, hostdata->ncrcurrent, in the Linux driver so
- * cmd = hostdata->ncrcurrent.
- *
- * With other chips, we must look through the commands
- * executing and find the command structure which
- * corresponds to the DSA register.
- */
-
- if (hostdata->options & OPTION_700) {
- cmd = (struct NCR53c7x0_cmd *) hostdata->ncrcurrent;
- } else {
- dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
- for (cmd = (struct NCR53c7x0_cmd *) hostdata->running_list;
- cmd && (dsa + (hostdata->dsa_start / sizeof(u32))) != cmd->dsa;
- cmd = (struct NCR53c7x0_cmd *)(cmd->next))
- ;
- }
- if (hostdata->options & OPTION_DEBUG_INTR) {
- if (cmd) {
- printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
- host->host_no, cmd->cmd->pid, (int) cmd->cmd->device->id,
- (int) cmd->cmd->device->lun);
- __scsi_print_command (cmd->cmd->cmnd);
- } else {
- printk("scsi%d : no active command\n", host->host_no);
- }
- }
-
- if (istat & ISTAT_SIP) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : ISTAT_SIP\n", host->host_no);
- intr_scsi (host, cmd);
- }
-
- if (istat & ISTAT_DIP) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : ISTAT_DIP\n", host->host_no);
- intr_dma (host, cmd);
- }
-
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
-
- if (!(hostdata->dstat & DSTAT_DFE)) {
- printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
- /* Really need to check this out for 710 RGH */
- NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
- while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
- ;
- hostdata->dstat |= DSTAT_DFE;
- }
-
- if (!hostdata->idle && hostdata->state == STATE_HALTED) {
- if (!hostdata->dsp_changed)
- hostdata->dsp = (u32 *)bus_to_virt(NCR53c7x0_read32(DSP_REG));
-#if 0
- printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
- host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
-#endif
-
- hostdata->state = STATE_RUNNING;
- NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
- if (hostdata->options & OPTION_DEBUG_TRACE) {
-#ifdef CYCLIC_TRACE
- log_insn (hostdata->dsp);
-#else
- print_insn (host, hostdata->dsp, "t ", 1);
-#endif
- NCR53c7x0_write8 (DCNTL_REG,
- hostdata->saved_dcntl | DCNTL_SSM | DCNTL_STD);
- }
- }
- }
- return IRQ_HANDLED;
-}
-
-
-/*
- * Function : static int abort_connected (struct Scsi_Host *host)
- *
- * Purpose : Assuming that the NCR SCSI processor is currently
- * halted, break the currently established nexus. Clean
- * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
- * be done on receipt of the abort interrupt.
- *
- * Inputs : host - SCSI host
- *
- */
-
-static int
-abort_connected (struct Scsi_Host *host) {
-#ifdef NEW_ABORT
- NCR53c7x0_local_declare();
-#endif
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
-/* FIXME : this probably should change for production kernels; at the
- least, counter should move to a per-host structure. */
- static int counter = 5;
-#ifdef NEW_ABORT
- int sstat, phase, offset;
- u32 *script;
- NCR53c7x0_local_setup(host);
-#endif
-
- if (--counter <= 0) {
- disable(host);
- return 0;
- }
-
- printk ("scsi%d : DANGER : abort_connected() called \n",
- host->host_no);
-
-#ifdef NEW_ABORT
-
-/*
- * New strategy : Rather than using a generic abort routine,
- * we'll specifically try to source or sink the appropriate
- * amount of data for the phase we're currently in (taking into
- * account the current synchronous offset)
- */
-
- sstat = NCR53c8x0_read8 (SSTAT2_REG);
- offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
- phase = sstat & SSTAT2_PHASE_MASK;
-
-/*
- * SET ATN
- * MOVE source_or_sink, WHEN CURRENT PHASE
- * < repeat for each outstanding byte >
- * JUMP send_abort_message
- */
-
- script = hostdata->abort_script = kmalloc (
- 8 /* instruction size */ * (
- 1 /* set ATN */ +
- (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
- 1 /* send abort message */),
- GFP_ATOMIC);
-
-
-#else /* def NEW_ABORT */
- hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
- sizeof(u32);
-#endif /* def NEW_ABORT */
- hostdata->dsp_changed = 1;
-
-/* XXX - need to flag the command as aborted after the abort_connected
- code runs
- */
- return 0;
-}
-
-/*
- * Function : static int datapath_residual (Scsi_Host *host)
- *
- * Purpose : return residual data count of what's in the chip.
- *
- * Inputs : host - SCSI host
- */
-
-static int
-datapath_residual (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- int count, synchronous, sstat;
- unsigned int ddir;
-
- NCR53c7x0_local_setup(host);
- /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
- count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
- (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
- synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
- /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
- ddir = NCR53c7x0_read8 (CTEST0_REG_700) & CTEST0_700_DDIR;
-
- if (ddir) {
- /* Receive */
- if (synchronous)
- count += (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
- else
- if (NCR53c7x0_read8 (SSTAT1_REG) & SSTAT1_ILF)
- ++count;
- } else {
- /* Send */
- sstat = NCR53c7x0_read8 (SSTAT1_REG);
- if (sstat & SSTAT1_OLF)
- ++count;
- if (synchronous && (sstat & SSTAT1_ORF))
- ++count;
- }
- return count;
-}
-
-/*
- * Function : static const char * sbcl_to_phase (int sbcl)_
- *
- * Purpose : Convert SBCL register to user-parsable phase representation
- *
- * Inputs : sbcl - value of sbcl register
- */
-
-
-static const char *
-sbcl_to_phase (int sbcl) {
- switch (sbcl & SBCL_PHASE_MASK) {
- case SBCL_PHASE_DATAIN:
- return "DATAIN";
- case SBCL_PHASE_DATAOUT:
- return "DATAOUT";
- case SBCL_PHASE_MSGIN:
- return "MSGIN";
- case SBCL_PHASE_MSGOUT:
- return "MSGOUT";
- case SBCL_PHASE_CMDOUT:
- return "CMDOUT";
- case SBCL_PHASE_STATIN:
- return "STATUSIN";
- default:
- return "unknown";
- }
-}
-
-/*
- * Function : static const char * sstat2_to_phase (int sstat)_
- *
- * Purpose : Convert SSTAT2 register to user-parsable phase representation
- *
- * Inputs : sstat - value of sstat register
- */
-
-
-static const char *
-sstat2_to_phase (int sstat) {
- switch (sstat & SSTAT2_PHASE_MASK) {
- case SSTAT2_PHASE_DATAIN:
- return "DATAIN";
- case SSTAT2_PHASE_DATAOUT:
- return "DATAOUT";
- case SSTAT2_PHASE_MSGIN:
- return "MSGIN";
- case SSTAT2_PHASE_MSGOUT:
- return "MSGOUT";
- case SSTAT2_PHASE_CMDOUT:
- return "CMDOUT";
- case SSTAT2_PHASE_STATIN:
- return "STATUSIN";
- default:
- return "unknown";
- }
-}
-
-/*
- * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : Handle phase mismatch interrupts
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- *
- * Side effects : The abort_connected() routine is called or the NCR chip
- * is restarted, jumping to the command_complete entry point, or
- * patching the address and transfer count of the current instruction
- * and calling the msg_in entry point as appropriate.
- */
-
-static void
-intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- u32 dbc_dcmd, *dsp, *dsp_next;
- unsigned char dcmd, sbcl;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int residual;
- enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
- ACTION_ABORT_PRINT;
- const char *where = NULL;
-
- NCR53c7x0_local_setup(host);
-
- /*
- * Corrective action is based on where in the SCSI SCRIPT(tm) the error
- * occurred, as well as which SCSI phase we are currently in.
- */
- dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
-
- /*
- * Fetch the current instruction, and remove the operands for easier
- * interpretation.
- */
- dbc_dcmd = NCR53c7x0_read32(DBC_REG);
- dcmd = (dbc_dcmd & 0xff000000) >> 24;
- /*
- * Like other processors, the NCR adjusts the instruction pointer before
- * instruction decode. Set the DSP address back to what it should
- * be for this instruction based on its size (2 or 3 32 bit words).
- */
- dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
-
-
- /*
- * Read new SCSI phase from the SBCL lines. Since all of our code uses
- * a WHEN conditional instead of an IF conditional, we don't need to
- * wait for a new REQ.
- */
- sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
-
- if (!cmd) {
- action = ACTION_ABORT_PRINT;
- where = "no current command";
- /*
- * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
- * mismatches should only occur where we're doing a multi-byte
- * BMI instruction. Specifically, this means
- *
- * - select messages (a SCSI-I target may ignore additional messages
- * after the IDENTIFY; any target may reject a SDTR or WDTR)
- *
- * - command out (targets may send a message to signal an error
- * condition, or go into STATUSIN after they've decided
- * they don't like the command.
- *
- * - reply_message (targets may reject a multi-byte message in the
- * middle)
- *
- * - data transfer routines (command completion with buffer space
- * left, disconnect message, or error message)
- */
- } else if (((dsp >= cmd->data_transfer_start &&
- dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
- if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
- DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
- DCMD_BMI_OP_MOVE_I)) {
- residual = datapath_residual (host);
- if (hostdata->options & OPTION_DEBUG_DISCONNECT)
- printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
- host->host_no, residual);
-
- /*
- * The first instruction is a CALL to the alternate handler for
- * this data transfer phase, so we can do calls to
- * munge_msg_restart as we would if control were passed
- * from normal dynamic code.
- */
- if (dsp != cmd->residual + 2) {
- cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
- ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
- DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
- cmd->residual[1] = virt_to_bus(hostdata->script)
- + ((dcmd & DCMD_BMI_IO)
- ? hostdata->E_other_in : hostdata->E_other_out);
- }
-
- /*
- * The second instruction is the a data transfer block
- * move instruction, reflecting the pointer and count at the
- * time of the phase mismatch.
- */
- cmd->residual[2] = dbc_dcmd + residual;
- cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
-
- /*
- * The third and final instruction is a jump to the instruction
- * which follows the instruction which had to be 'split'
- */
- if (dsp != cmd->residual + 2) {
- cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
- << 24) | DBC_TCI_TRUE;
- cmd->residual[5] = virt_to_bus(dsp_next);
- }
-
- /*
- * For the sake of simplicity, transfer control to the
- * conditional CALL at the start of the residual buffer.
- */
- hostdata->dsp = cmd->residual;
- hostdata->dsp_changed = 1;
- action = ACTION_CONTINUE;
- } else {
- where = "non-BMI dynamic DSA code";
- action = ACTION_ABORT_PRINT;
- }
- } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4 + 2)) {
- /* RGH 290697: Added +2 above, to compensate for the script
- * instruction which disables the selection timer. */
- /* Release ATN */
- NCR53c7x0_write8 (SOCL_REG, 0);
- switch (sbcl) {
- /*
- * Some devices (SQ555 come to mind) grab the IDENTIFY message
- * sent on selection, and decide to go into COMMAND OUT phase
- * rather than accepting the rest of the messages or rejecting
- * them. Handle these devices gracefully.
- */
- case SBCL_PHASE_CMDOUT:
- hostdata->dsp = dsp + 2 /* two _words_ */;
- hostdata->dsp_changed = 1;
- printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
- host->host_no, cmd->cmd->device->id);
- cmd->flags &= ~CMD_FLAG_SDTR;
- action = ACTION_CONTINUE;
- break;
- case SBCL_PHASE_MSGIN:
- hostdata->dsp = hostdata->script + hostdata->E_msg_in /
- sizeof(u32);
- hostdata->dsp_changed = 1;
- action = ACTION_CONTINUE;
- break;
- default:
- where="select message out";
- action = ACTION_ABORT_PRINT;
- }
- /*
- * Some SCSI devices will interpret a command as they read the bytes
- * off the SCSI bus, and may decide that the command is Bogus before
- * they've read the entire command off the bus.
- */
- } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
- (u32)) {
- hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
- sizeof (u32);
- hostdata->dsp_changed = 1;
- action = ACTION_CONTINUE;
- /* FIXME : we need to handle message reject, etc. within msg_respond. */
-#ifdef notyet
- } else if (dsp == hostdata->script + hostdata->E_reply_message) {
- switch (sbcl) {
- /* Any other phase mismatches abort the currently executing command. */
-#endif
- } else {
- where = "unknown location";
- action = ACTION_ABORT_PRINT;
- }
-
- /* Flush DMA FIFO */
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
- if (!(hostdata->dstat & DSTAT_DFE)) {
- /* Really need to check this out for 710 RGH */
- NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
- while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF);
- hostdata->dstat |= DSTAT_DFE;
- }
-
- switch (action) {
- case ACTION_ABORT_PRINT:
- printk("scsi%d : %s : unexpected phase %s.\n",
- host->host_no, where ? where : "unknown location",
- sbcl_to_phase(sbcl));
- print_lots (host);
- /* Fall through to ACTION_ABORT */
- case ACTION_ABORT:
- abort_connected (host);
- break;
- case ACTION_CONTINUE:
- break;
- }
-
-#if 0
- if (hostdata->dsp_changed) {
- printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
- print_insn (host, hostdata->dsp, "", 1);
- }
-#endif
-}
-
-/*
- * Function : static void intr_bf (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : handle BUS FAULT interrupts
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- */
-
-static void
-intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- u32 *dsp,
- *next_dsp, /* Current dsp */
- *dsa,
- dbc_dcmd; /* DCMD (high eight bits) + DBC */
- char *reason = NULL;
- /* Default behavior is for a silent error, with a retry until we've
- exhausted retries. */
- enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
- int report = 0;
- NCR53c7x0_local_setup(host);
-
- dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
- next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
- dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
-/* FIXME - check chip type */
- dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
-
- /*
- * Bus faults can be caused by either a Bad Address or
- * Target Abort. We should check the Received Target Abort
- * bit of the PCI status register and Master Abort Bit.
- *
- * - Master Abort bit indicates that no device claimed
- * the address with DEVSEL within five clocks
- *
- * - Target Abort bit indicates that a target claimed it,
- * but changed its mind once it saw the byte enables.
- *
- */
-
- /* 53c710, not PCI system */
- report = 1;
- reason = "Unknown";
-
-#ifndef notyet
- report = 1;
-#endif
- if (report && reason)
- {
- printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
- host->host_no, reason ? reason : "unknown");
- print_lots (host);
- }
-
-#ifndef notyet
- retry = NEVER;
-#endif
-
- /*
- * TODO : we should attempt to recover from any spurious bus
- * faults. After X retries, we should figure that things are
- * sufficiently wedged, and call NCR53c7xx_reset.
- *
- * This code should only get executed once we've decided that we
- * cannot retry.
- */
-
- if (retry == NEVER) {
- printk(KERN_ALERT " mail richard@sleepie.demon.co.uk\n");
- FATAL (host);
- }
-}
-
-/*
- * Function : static void intr_dma (struct Scsi_Host *host,
- * struct NCR53c7x0_cmd *cmd)
- *
- * Purpose : handle all DMA interrupts, indicated by the setting
- * of the DIP bit in the ISTAT register.
- *
- * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
- * may be NULL.
- */
-
-static void
-intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned char dstat; /* DSTAT */
- u32 *dsp,
- *next_dsp, /* Current dsp */
- *dsa,
- dbc_dcmd; /* DCMD (high eight bits) + DBC */
- int tmp;
- unsigned long flags;
- NCR53c7x0_local_setup(host);
-
- if (!hostdata->dstat_valid) {
- hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
- hostdata->dstat_valid = 1;
- }
-
- dstat = hostdata->dstat;
-
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
-
- dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
- next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
- dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
-/* XXX - check chip type */
- dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
-
- /*
- * DSTAT_ABRT is the aborted interrupt. This is set whenever the
- * SCSI chip is aborted.
- *
- * With NCR53c700 and NCR53c700-66 style chips, we should only
- * get this when the chip is currently running the accept
- * reselect/select code and we have set the abort bit in the
- * ISTAT register.
- *
- */
-
- if (dstat & DSTAT_ABRT) {
-#if 0
- /* XXX - add code here to deal with normal abort */
- if ((hostdata->options & OPTION_700) && (hostdata->state ==
- STATE_ABORTING)) {
- } else
-#endif
- {
- printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
- " ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "s ", 1);
- FATAL (host);
- }
- }
-
- /*
- * DSTAT_SSI is the single step interrupt. Should be generated
- * whenever we have single stepped or are tracing.
- */
-
- if (dstat & DSTAT_SSI) {
- if (hostdata->options & OPTION_DEBUG_TRACE) {
- /* Don't print instr. until we write DSP at end of intr function */
- } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
- print_insn (host, dsp, "s ", 0);
- local_irq_save(flags);
-/* XXX - should we do this, or can we get away with writing dsp? */
-
- NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
- ~DCNTL_SSM) | DCNTL_STD);
- local_irq_restore(flags);
- } else {
- printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
- " ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "", 1);
- printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
- FATAL (host);
- }
- }
-
- /*
- * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
- * is different) is generated whenever an illegal instruction is
- * encountered.
- *
- * XXX - we may want to emulate INTFLY here, so we can use
- * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
- * chips.
- */
-
- if (dstat & DSTAT_OPC) {
- /*
- * Ascertain if this IID interrupts occurred before or after a STO
- * interrupt. Since the interrupt handling code now leaves
- * DSP unmodified until _after_ all stacked interrupts have been
- * processed, reading the DSP returns the original DSP register.
- * This means that if dsp lies between the select code, and
- * message out following the selection code (where the IID interrupt
- * would have to have occurred by due to the implicit wait for REQ),
- * we have an IID interrupt resulting from a STO condition and
- * can ignore it.
- */
-
- if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
- (dsp <= (hostdata->script + hostdata->E_select_msgout /
- sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
- host->host_no);
- if (hostdata->expecting_iid) {
- hostdata->expecting_iid = 0;
- hostdata->idle = 1;
- if (hostdata->test_running == 2) {
- hostdata->test_running = 0;
- hostdata->test_completed = 3;
- } else if (cmd)
- abnormal_finished (cmd, DID_BAD_TARGET << 16);
- } else {
- hostdata->expecting_sto = 1;
- }
- /*
- * We can't guarantee we'll be able to execute the WAIT DISCONNECT
- * instruction within the 3.4us of bus free and arbitration delay
- * that a target can RESELECT in and assert REQ after we've dropped
- * ACK. If this happens, we'll get an illegal instruction interrupt.
- * Doing away with the WAIT DISCONNECT instructions broke everything,
- * so instead I'll settle for moving one WAIT DISCONNECT a few
- * instructions closer to the CLEAR ACK before it to minimize the
- * chances of this happening, and handle it if it occurs anyway.
- *
- * Simply continue with what we were doing, and control should
- * be transferred to the schedule routine which will ultimately
- * pass control onto the reselection or selection (not yet)
- * code.
- */
- } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
- SBCL_REQ)) {
- if (!(hostdata->options & OPTION_NO_PRINT_RACE))
- {
- printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
- host->host_no);
- hostdata->options |= OPTION_NO_PRINT_RACE;
- }
- } else {
- printk(KERN_ALERT "scsi%d : invalid instruction\n", host->host_no);
- print_lots (host);
- printk(KERN_ALERT " mail Richard@sleepie.demon.co.uk with ALL\n"
- " boot messages and diagnostic output\n");
- FATAL (host);
- }
- }
-
- /*
- * DSTAT_BF are bus fault errors. DSTAT_800_BF is valid for 710 also.
- */
-
- if (dstat & DSTAT_800_BF) {
- intr_bf (host, cmd);
- }
-
-
- /*
- * DSTAT_SIR interrupts are generated by the execution of
- * the INT instruction. Since the exact values available
- * are determined entirely by the SCSI script running,
- * and are local to a particular script, a unique handler
- * is called for each script.
- */
-
- if (dstat & DSTAT_SIR) {
- if (hostdata->options & OPTION_DEBUG_INTR)
- printk ("scsi%d : DSTAT_SIR\n", host->host_no);
- switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
- case SPECIFIC_INT_NOTHING:
- case SPECIFIC_INT_RESTART:
- break;
- case SPECIFIC_INT_ABORT:
- abort_connected(host);
- break;
- case SPECIFIC_INT_PANIC:
- printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "", 1);
- printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
- FATAL (host);
- break;
- case SPECIFIC_INT_BREAK:
- intr_break (host, cmd);
- break;
- default:
- printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
- print_insn (host, dsp, KERN_ALERT "", 1);
- printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
- tmp);
- FATAL (host);
- }
- }
-}
-
-/*
- * Function : static int print_insn (struct Scsi_Host *host,
- * u32 *insn, int kernel)
- *
- * Purpose : print numeric representation of the instruction pointed
- * to by insn to the debugging or kernel message buffer
- * as appropriate.
- *
- * If desired, a user level program can interpret this
- * information.
- *
- * Inputs : host, insn - host, pointer to instruction, prefix -
- * string to prepend, kernel - use printk instead of debugging buffer.
- *
- * Returns : size, in u32s, of instruction printed.
- */
-
-/*
- * FIXME: should change kernel parameter so that it takes an ENUM
- * specifying severity - either KERN_ALERT or KERN_PANIC so
- * all panic messages are output with the same severity.
- */
-
-static int
-print_insn (struct Scsi_Host *host, const u32 *insn,
- const char *prefix, int kernel) {
- char buf[160], /* Temporary buffer and pointer. ICKY
- arbitrary length. */
-
-
- *tmp;
- unsigned char dcmd; /* dcmd register for *insn */
- int size;
-
- /*
- * Check to see if the instruction pointer is not bogus before
- * indirecting through it; avoiding red-zone at start of
- * memory.
- *
- * FIXME: icky magic needs to happen here on non-intel boxes which
- * don't have kernel memory mapped in like this. Might be reasonable
- * to use vverify()?
- */
-
- if (virt_to_phys((void *)insn) < PAGE_SIZE ||
- virt_to_phys((void *)(insn + 8)) > virt_to_phys(high_memory) ||
- ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
- virt_to_phys((void *)(insn + 12)) > virt_to_phys(high_memory))) {
- size = 0;
- sprintf (buf, "%s%p: address out of range\n",
- prefix, insn);
- } else {
-/*
- * FIXME : (void *) cast in virt_to_bus should be unnecessary, because
- * it should take const void * as argument.
- */
-#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
- sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
- (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
- insn[0], insn[1], bus_to_virt (insn[1]));
-#else
- /* Remove virtual addresses to reduce output, as they are the same */
- sprintf(buf, "%s0x%x (+%x) : 0x%08x 0x%08x",
- (prefix ? prefix : ""), (u32)insn, ((u32)insn -
- (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4,
- insn[0], insn[1]);
-#endif
- tmp = buf + strlen(buf);
- if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
-#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
- sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
- bus_to_virt(insn[2]));
-#else
- /* Remove virtual addr to reduce output, as it is the same */
- sprintf (tmp, " 0x%08x\n", insn[2]);
-#endif
- size = 3;
- } else {
- sprintf (tmp, "\n");
- size = 2;
- }
- }
-
- if (kernel)
- printk ("%s", buf);
-#ifdef NCR_DEBUG
- else {
- size_t len = strlen(buf);
- debugger_kernel_write(host, buf, len);
- }
-#endif
- return size;
-}
-
-/*
- * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
- *
- * Purpose : Abort an errant SCSI command, doing all necessary
- * cleanup of the issue_queue, running_list, shared Linux/NCR
- * dsa issue and reconnect queues.
- *
- * Inputs : cmd - command to abort, code - entire result field
- *
- * Returns : 0 on success, -1 on failure.
- */
-
-int
-NCR53c7xx_abort (Scsi_Cmnd *cmd) {
- NCR53c7x0_local_declare();
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
- host->hostdata[0] : NULL;
- unsigned long flags;
- struct NCR53c7x0_cmd *curr, **prev;
- Scsi_Cmnd *me, **last;
-#if 0
- static long cache_pid = -1;
-#endif
-
-
- if (!host) {
- printk ("Bogus SCSI command pid %ld; no host structure\n",
- cmd->pid);
- return SCSI_ABORT_ERROR;
- } else if (!hostdata) {
- printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
- return SCSI_ABORT_ERROR;
- }
- NCR53c7x0_local_setup(host);
-
-/*
- * CHECK : I don't think that reading ISTAT will unstack any interrupts,
- * since we need to write the INTF bit to clear it, and SCSI/DMA
- * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
- *
- * See that this is the case. Appears to be correct on the 710, at least.
- *
- * I suspect that several of our failures may be coming from a new fatal
- * interrupt (possibly due to a phase mismatch) happening after we've left
- * the interrupt handler, but before the PIC has had the interrupt condition
- * cleared.
- */
-
- if (NCR53c7x0_read8(hostdata->istat) & (ISTAT_DIP|ISTAT_SIP)) {
- printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
- cmd->pid);
- NCR53c7x0_intr (host->irq, NULL, NULL);
- return SCSI_ABORT_BUSY;
- }
-
- local_irq_save(flags);
-#if 0
- if (cache_pid == cmd->pid)
- panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
- else
- cache_pid = cmd->pid;
-#endif
-
-
-/*
- * The command could be hiding in the issue_queue. This would be very
- * nice, as commands can't be moved from the high level driver's issue queue
- * into the shared queue until an interrupt routine is serviced, and this
- * moving is atomic.
- *
- * If this is the case, we don't have to worry about anything - we simply
- * pull the command out of the old queue, and call it aborted.
- */
-
- for (me = (Scsi_Cmnd *) hostdata->issue_queue,
- last = (Scsi_Cmnd **) &(hostdata->issue_queue);
- me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
- me = (Scsi_Cmnd *)me->SCp.ptr);
-
- if (me) {
- *last = (Scsi_Cmnd *) me->SCp.ptr;
- if (me->host_scribble) {
- ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
- hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
- me->host_scribble = NULL;
- }
- cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd);
- printk ("scsi%d : found command %ld in Linux issue queue\n",
- host->host_no, me->pid);
- local_irq_restore(flags);
- run_process_issue_queue();
- return SCSI_ABORT_SUCCESS;
- }
-
-/*
- * That failing, the command could be in our list of already executing
- * commands. If this is the case, drastic measures are called for.
- */
-
- for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
- prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
- curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
- &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
-
- if (curr) {
- if ((curr->result & 0xff) != 0xff && (curr->result & 0xff00) != 0xff00) {
- cmd->result = curr->result;
- if (prev)
- *prev = (struct NCR53c7x0_cmd *) curr->next;
- curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
- cmd->host_scribble = NULL;
- hostdata->free = curr;
- cmd->scsi_done(cmd);
- printk ("scsi%d : found finished command %ld in running list\n",
- host->host_no, cmd->pid);
- local_irq_restore(flags);
- return SCSI_ABORT_NOT_RUNNING;
- } else {
- printk ("scsi%d : DANGER : command running, can not abort.\n",
- cmd->device->host->host_no);
- local_irq_restore(flags);
- return SCSI_ABORT_BUSY;
- }
- }
-
-/*
- * And if we couldn't find it in any of our queues, it must have been
- * a dropped interrupt.
- */
-
- curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
- if (curr) {
- curr->next = hostdata->free;
- hostdata->free = curr;
- cmd->host_scribble = NULL;
- }
-
- if (curr == NULL || ((curr->result & 0xff00) == 0xff00) ||
- ((curr->result & 0xff) == 0xff)) {
- printk ("scsi%d : did this command ever run?\n", host->host_no);
- cmd->result = DID_ABORT << 16;
- } else {
- printk ("scsi%d : probably lost INTFLY, normal completion\n",
- host->host_no);
- cmd->result = curr->result;
-/*
- * FIXME : We need to add an additional flag which indicates if a
- * command was ever counted as BUSY, so if we end up here we can
- * decrement the busy count if and only if it is necessary.
- */
- --hostdata->busy[cmd->device->id][cmd->device->lun];
- }
- local_irq_restore(flags);
- cmd->scsi_done(cmd);
-
-/*
- * We need to run process_issue_queue since termination of this command
- * may allow another queued command to execute first?
- */
- return SCSI_ABORT_NOT_RUNNING;
-}
-
-/*
- * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
- *
- * Purpose : perform a hard reset of the SCSI bus and NCR
- * chip.
- *
- * Inputs : cmd - command which caused the SCSI RESET
- *
- * Returns : 0 on success.
- */
-
-int
-NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- int found = 0;
- struct NCR53c7x0_cmd * c;
- Scsi_Cmnd *tmp;
- /*
- * When we call scsi_done(), it's going to wake up anything sleeping on the
- * resources which were in use by the aborted commands, and we'll start to
- * get new commands.
- *
- * We can't let this happen until after we've re-initialized the driver
- * structures, and can't reinitialize those structures until after we've
- * dealt with their contents.
- *
- * So, we need to find all of the commands which were running, stick
- * them on a linked list of completed commands (we'll use the host_scribble
- * pointer), do our reinitialization, and then call the done function for
- * each command.
- */
- Scsi_Cmnd *nuke_list = NULL;
- struct Scsi_Host *host = cmd->device->host;
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
-
- NCR53c7x0_local_setup(host);
- local_irq_save(flags);
- ncr_halt (host);
- print_lots (host);
- dump_events (host, 30);
- ncr_scsi_reset (host);
- for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
- 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
- if (tmp == cmd) {
- found = 1;
- break;
- }
-
- /*
- * If we didn't find the command which caused this reset in our running
- * list, then we've lost it. See that it terminates normally anyway.
- */
- if (!found) {
- c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
- if (c) {
- cmd->host_scribble = NULL;
- c->next = hostdata->free;
- hostdata->free = c;
- } else
- printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
- cmd->SCp.buffer = (struct scatterlist *) nuke_list;
- nuke_list = cmd;
- }
-
- NCR53c7x0_driver_init (host);
- hostdata->soft_reset (host);
- if (hostdata->resets == 0)
- disable(host);
- else if (hostdata->resets != -1)
- --hostdata->resets;
- local_irq_restore(flags);
- for (; nuke_list; nuke_list = tmp) {
- tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
- nuke_list->result = DID_RESET << 16;
- nuke_list->scsi_done (nuke_list);
- }
- local_irq_restore(flags);
- return SCSI_RESET_SUCCESS;
-}
-
-/*
- * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
- * therefore shares the scsicam_bios_param function.
- */
-
-/*
- * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
- *
- * Purpose : convert instructions stored at NCR pointer into data
- * pointer offset.
- *
- * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
- * DSP, or saved data pointer.
- *
- * Returns : offset on success, -1 on failure.
- */
-
-
-static int
-insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) cmd->device->host->hostdata[0];
- struct NCR53c7x0_cmd *ncmd =
- (struct NCR53c7x0_cmd *) cmd->host_scribble;
- int offset = 0, buffers;
- struct scatterlist *segment;
- char *ptr;
- int found = 0;
-
-/*
- * With the current code implementation, if the insn is inside dynamically
- * generated code, the data pointer will be the instruction preceding
- * the next transfer segment.
- */
-
- if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
- ((insn >= ncmd->data_transfer_start &&
- insn < ncmd->data_transfer_end) ||
- (insn >= ncmd->residual &&
- insn < (ncmd->residual +
- sizeof(ncmd->residual))))) {
- ptr = bus_to_virt(insn[3]);
-
- if ((buffers = cmd->use_sg)) {
- for (offset = 0,
- segment = (struct scatterlist *) cmd->request_buffer;
- buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
- (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
- --buffers, offset += segment->length, ++segment)
-#if 0
- printk("scsi%d: comparing 0x%p to 0x%p\n",
- cmd->device->host->host_no, saved, page_address(segment->page+segment->offset));
-#else
- ;
-#endif
- offset += ptr - ((char *)page_address(segment->page)+segment->offset);
- } else {
- found = 1;
- offset = ptr - (char *) (cmd->request_buffer);
- }
- } else if ((insn >= hostdata->script +
- hostdata->E_data_transfer / sizeof(u32)) &&
- (insn <= hostdata->script +
- hostdata->E_end_data_transfer / sizeof(u32))) {
- found = 1;
- offset = 0;
- }
- return found ? offset : -1;
-}
-
-
-
-/*
- * Function : void print_progress (Scsi_Cmnd *cmd)
- *
- * Purpose : print the current location of the saved data pointer
- *
- * Inputs : cmd - command we are interested in
- *
- */
-
-static void
-print_progress (Scsi_Cmnd *cmd) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_cmd *ncmd =
- (struct NCR53c7x0_cmd *) cmd->host_scribble;
- int offset, i;
- char *where;
- u32 *ptr;
- NCR53c7x0_local_setup (cmd->device->host);
-
- if (check_address ((unsigned long) ncmd,sizeof (struct NCR53c7x0_cmd)) == 0)
- {
- printk("\nNCR53c7x0_cmd fields:\n");
- printk(" bounce.len=0x%x, addr=0x%0x, buf[]=0x%02x %02x %02x %02x\n",
- ncmd->bounce.len, ncmd->bounce.addr, ncmd->bounce.buf[0],
- ncmd->bounce.buf[1], ncmd->bounce.buf[2], ncmd->bounce.buf[3]);
- printk(" result=%04x, cdb[0]=0x%02x\n", ncmd->result, ncmd->cmnd[0]);
- }
-
- for (i = 0; i < 2; ++i) {
- if (check_address ((unsigned long) ncmd,
- sizeof (struct NCR53c7x0_cmd)) == -1)
- continue;
- if (!i) {
- where = "saved";
- ptr = bus_to_virt(ncmd->saved_data_pointer);
- } else {
- where = "active";
- ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
- NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
- sizeof(u32));
- }
- offset = insn_to_offset (cmd, ptr);
-
- if (offset != -1)
- printk ("scsi%d : %s data pointer at offset %d\n",
- cmd->device->host->host_no, where, offset);
- else {
- int size;
- printk ("scsi%d : can't determine %s data pointer offset\n",
- cmd->device->host->host_no, where);
- if (ncmd) {
- size = print_insn (cmd->device->host,
- bus_to_virt(ncmd->saved_data_pointer), "", 1);
- print_insn (cmd->device->host,
- bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
- "", 1);
- }
- }
- }
-}
-
-
-static void
-print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int i, len;
- char *ptr;
- Scsi_Cmnd *cmd;
-
- if (check_address ((unsigned long) dsa, hostdata->dsa_end -
- hostdata->dsa_start) == -1) {
- printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
- return;
- }
- printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
- " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
- prefix ? prefix : "",
- host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
- dsa[hostdata->dsa_msgout / sizeof(u32)],
- dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
- bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
-
- /*
- * Only print messages if they're sane in length so we don't
- * blow the kernel printk buffer on something which won't buy us
- * anything.
- */
-
- if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
- sizeof (hostdata->free->select))
- for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
- ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
- i > 0 && !check_address ((unsigned long) ptr, 1);
- ptr += len, i -= len) {
- printk(" ");
- len = spi_print_msg(ptr);
- printk("\n");
- if (!len)
- break;
- }
-
- printk(" + %d : select_indirect = 0x%x\n",
- hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
- cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
- printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
- (u32) virt_to_bus(cmd));
- /* XXX Maybe we should access cmd->host_scribble->result here. RGH */
- if (cmd) {
- printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
- cmd->result, cmd->device->id, cmd->device->lun);
- __scsi_print_command(cmd->cmnd);
- } else
- printk("\n");
- printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
- dsa[hostdata->dsa_next / sizeof(u32)]);
- if (cmd) {
- printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
- " script : ",
- host->host_no, cmd->device->id,
- hostdata->sync[cmd->device->id].sxfer_sanity,
- hostdata->sync[cmd->device->id].scntl3_sanity);
- for (i = 0; i < (sizeof(hostdata->sync[cmd->device->id].script) / 4); ++i)
- printk ("0x%x ", hostdata->sync[cmd->device->id].script[i]);
- printk ("\n");
- print_progress (cmd);
- }
-}
-/*
- * Function : void print_queues (Scsi_Host *host)
- *
- * Purpose : print the contents of the NCR issue and reconnect queues
- *
- * Inputs : host - SCSI host we are interested in
- *
- */
-
-static void
-print_queues (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- u32 *dsa, *next_dsa;
- volatile u32 *ncrcurrent;
- int left;
- Scsi_Cmnd *cmd, *next_cmd;
- unsigned long flags;
-
- printk ("scsi%d : issue queue\n", host->host_no);
-
- for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
- left >= 0 && cmd;
- cmd = next_cmd) {
- next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
- local_irq_save(flags);
- if (cmd->host_scribble) {
- if (check_address ((unsigned long) (cmd->host_scribble),
- sizeof (cmd->host_scribble)) == -1)
- printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
- host->host_no, cmd->pid);
- /* print_dsa does sanity check on address, no need to check */
- else
- print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
- -> dsa, "");
- } else
- printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
- host->host_no, cmd->pid, cmd->device->id, cmd->device->lun);
- local_irq_restore(flags);
- }
-
- if (left <= 0) {
- printk ("scsi%d : loop detected in issue queue\n",
- host->host_no);
- }
-
- /*
- * Traverse the NCR reconnect and start DSA structures, printing out
- * each element until we hit the end or detect a loop. Currently,
- * the reconnect structure is a linked list; and the start structure
- * is an array. Eventually, the reconnect structure will become a
- * list as well, since this simplifies the code.
- */
-
- printk ("scsi%d : schedule dsa array :\n", host->host_no);
- for (left = host->can_queue, ncrcurrent = hostdata->schedule;
- left > 0; ncrcurrent += 2, --left)
- if (ncrcurrent[0] != hostdata->NOP_insn)
-/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
- print_dsa (host, bus_to_virt (ncrcurrent[1] -
- (hostdata->E_dsa_code_begin -
- hostdata->E_dsa_code_template)), "");
- printk ("scsi%d : end schedule dsa array\n", host->host_no);
-
- printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
-
- for (left = host->can_queue,
- dsa = bus_to_virt (hostdata->reconnect_dsa_head);
- left >= 0 && dsa;
- dsa = next_dsa) {
- local_irq_save(flags);
- if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
- printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
- dsa);
- next_dsa = NULL;
- }
- else
- {
- next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
- print_dsa (host, dsa, "");
- }
- local_irq_restore(flags);
- }
- printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
- if (left < 0)
- printk("scsi%d: possible loop in ncr reconnect list\n",
- host->host_no);
-}
-
-static void
-print_lots (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
- unsigned char dcmd, sbcl;
- int i, size;
- NCR53c7x0_local_setup(host);
-
- if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
- dbc_dcmd = NCR53c7x0_read32(DBC_REG);
- dcmd = (dbc_dcmd & 0xff000000) >> 24;
- dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
- dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
- sbcl = NCR53c7x0_read8 (SBCL_REG);
-
- /*
- * For the 53c710, the following will report value 0 for SCNTL3
- * and STEST0 - we don't have these registers.
- */
- printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
- " DSA=0x%lx (virt 0x%p)\n"
- " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
- " SXFER=0x%x, SCNTL3=0x%x\n"
- " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
- " SCRATCH=0x%x, saved2_dsa=0x%0lx\n",
- host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
- bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
- virt_to_bus(dsa), dsa,
- NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
- bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
- (int) NCR53c7x0_read8(hostdata->dmode),
- (int) NCR53c7x0_read8(SXFER_REG),
- ((hostdata->chip / 100) == 8) ?
- (int) NCR53c7x0_read8(SCNTL3_REG_800) : 0,
- (sbcl & SBCL_BSY) ? "BSY " : "",
- (sbcl & SBCL_SEL) ? "SEL " : "",
- (sbcl & SBCL_REQ) ? "REQ " : "",
- sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
- SSTAT1_REG : SSTAT2_REG)),
- (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
- SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
- ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (STEST0_REG_800) :
- NCR53c7x0_read32(SCRATCHA_REG_800),
- hostdata->saved2_dsa);
- printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
- virt_to_bus(dsp), dsp);
- for (i = 6; i > 0; --i, dsp += size)
- size = print_insn (host, dsp, "", 1);
- if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
- if ((hostdata->chip / 100) == 8)
- printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
- host->host_no, NCR53c7x0_read8 (SDID_REG_800),
- NCR53c7x0_read8 (SSID_REG_800));
- else
- printk ("scsi%d : connected (SDID=0x%x)\n",
- host->host_no, NCR53c7x0_read8 (SDID_REG_700));
- print_dsa (host, dsa, "");
- }
-
-#if 1
- print_queues (host);
-#endif
- }
-}
-
-/*
- * Function : static int shutdown (struct Scsi_Host *host)
- *
- * Purpose : does a clean (we hope) shutdown of the NCR SCSI
- * chip. Use prior to dumping core, unloading the NCR driver,
- *
- * Returns : 0 on success
- */
-static int
-shutdown (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- NCR53c7x0_local_setup(host);
- local_irq_save(flags);
-/* Get in a state where we can reset the SCSI bus */
- ncr_halt (host);
- ncr_scsi_reset (host);
- hostdata->soft_reset(host);
-
- disable (host);
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * Function : void ncr_scsi_reset (struct Scsi_Host *host)
- *
- * Purpose : reset the SCSI bus.
- */
-
-static void
-ncr_scsi_reset (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- NCR53c7x0_local_setup(host);
- local_irq_save(flags);
- NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
- udelay(25); /* Minimum amount of time to assert RST */
- NCR53c7x0_write8(SCNTL1_REG, 0);
- local_irq_restore(flags);
-}
-
-/*
- * Function : void hard_reset (struct Scsi_Host *host)
- *
- */
-
-static void
-hard_reset (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- local_irq_save(flags);
- ncr_scsi_reset(host);
- NCR53c7x0_driver_init (host);
- if (hostdata->soft_reset)
- hostdata->soft_reset (host);
- local_irq_restore(flags);
-}
-
-
-/*
- * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
- * int free, int issue)
- *
- * Purpose : return a linked list (using the SCp.buffer field as next,
- * so we don't perturb hostdata. We don't use a field of the
- * NCR53c7x0_cmd structure since we may not have allocated one
- * for the command causing the reset.) of Scsi_Cmnd structures that
- * had propagated below the Linux issue queue level. If free is set,
- * free the NCR53c7x0_cmd structures which are associated with
- * the Scsi_Cmnd structures, and clean up any internal
- * NCR lists that the commands were on. If issue is set,
- * also return commands in the issue queue.
- *
- * Returns : linked list of commands
- *
- * NOTE : the caller should insure that the NCR chip is halted
- * if the free flag is set.
- */
-
-static Scsi_Cmnd *
-return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- struct NCR53c7x0_cmd *c;
- int i;
- u32 *ncrcurrent;
- Scsi_Cmnd *list = NULL, *tmp;
- for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
- c = (struct NCR53c7x0_cmd *) c->next) {
- if (c->cmd->SCp.buffer) {
- printk ("scsi%d : loop detected in running list!\n", host->host_no);
- break;
- } else {
- printk ("Duh? Bad things happening in the NCR driver\n");
- break;
- }
-
- c->cmd->SCp.buffer = (struct scatterlist *) list;
- list = c->cmd;
- if (free) {
- c->next = hostdata->free;
- hostdata->free = c;
- }
- }
-
- if (free) {
- for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
- i < host->can_queue; ++i, ncrcurrent += 2) {
- ncrcurrent[0] = hostdata->NOP_insn;
- ncrcurrent[1] = 0xdeadbeef;
- }
- hostdata->ncrcurrent = NULL;
- }
-
- if (issue) {
- for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
- if (tmp->SCp.buffer) {
- printk ("scsi%d : loop detected in issue queue!\n",
- host->host_no);
- break;
- }
- tmp->SCp.buffer = (struct scatterlist *) list;
- list = tmp;
- }
- if (free)
- hostdata->issue_queue = NULL;
-
- }
- return list;
-}
-
-/*
- * Function : static int disable (struct Scsi_Host *host)
- *
- * Purpose : disables the given NCR host, causing all commands
- * to return a driver error. Call this so we can unload the
- * module during development and try again. Eventually,
- * we should be able to find clean workarounds for these
- * problems.
- *
- * Inputs : host - hostadapter to twiddle
- *
- * Returns : 0 on success.
- */
-
-static int
-disable (struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- unsigned long flags;
- Scsi_Cmnd *nuke_list, *tmp;
- local_irq_save(flags);
- if (hostdata->state != STATE_HALTED)
- ncr_halt (host);
- nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
- hard_reset (host);
- hostdata->state = STATE_DISABLED;
- local_irq_restore(flags);
- printk ("scsi%d : nuking commands\n", host->host_no);
- for (; nuke_list; nuke_list = tmp) {
- tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
- nuke_list->result = DID_ERROR << 16;
- nuke_list->scsi_done(nuke_list);
- }
- printk ("scsi%d : done. \n", host->host_no);
- printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
- host->host_no);
- return 0;
-}
-
-/*
- * Function : static int ncr_halt (struct Scsi_Host *host)
- *
- * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
- *
- * Inputs : host - SCSI chip to halt
- *
- * Returns : 0 on success
- */
-
-static int
-ncr_halt (struct Scsi_Host *host) {
- NCR53c7x0_local_declare();
- unsigned long flags;
- unsigned char istat, tmp;
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- int stage;
- NCR53c7x0_local_setup(host);
-
- local_irq_save(flags);
- /* Stage 0 : eat all interrupts
- Stage 1 : set ABORT
- Stage 2 : eat all but abort interrupts
- Stage 3 : eat all interrupts
- */
- for (stage = 0;;) {
- if (stage == 1) {
- NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
- ++stage;
- }
- istat = NCR53c7x0_read8 (hostdata->istat);
- if (istat & ISTAT_SIP) {
- tmp = NCR53c7x0_read8(SSTAT0_REG);
- } else if (istat & ISTAT_DIP) {
- tmp = NCR53c7x0_read8(DSTAT_REG);
- if (stage == 2) {
- if (tmp & DSTAT_ABRT) {
- NCR53c7x0_write8(hostdata->istat, 0);
- ++stage;
- } else {
- printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
- host->host_no);
- disable (host);
- }
- }
- }
- if (!(istat & (ISTAT_SIP|ISTAT_DIP))) {
- if (stage == 0)
- ++stage;
- else if (stage == 3)
- break;
- }
- }
- hostdata->state = STATE_HALTED;
- local_irq_restore(flags);
-#if 0
- print_lots (host);
-#endif
- return 0;
-}
-
-/*
- * Function: event_name (int event)
- *
- * Purpose: map event enum into user-readable strings.
- */
-
-static const char *
-event_name (int event) {
- switch (event) {
- case EVENT_NONE: return "none";
- case EVENT_ISSUE_QUEUE: return "to issue queue";
- case EVENT_START_QUEUE: return "to start queue";
- case EVENT_SELECT: return "selected";
- case EVENT_DISCONNECT: return "disconnected";
- case EVENT_RESELECT: return "reselected";
- case EVENT_COMPLETE: return "completed";
- case EVENT_IDLE: return "idle";
- case EVENT_SELECT_FAILED: return "select failed";
- case EVENT_BEFORE_SELECT: return "before select";
- case EVENT_RESELECT_FAILED: return "reselect failed";
- default: return "unknown";
- }
-}
-
-/*
- * Function : void dump_events (struct Scsi_Host *host, count)
- *
- * Purpose : print last count events which have occurred.
- */
-static void
-dump_events (struct Scsi_Host *host, int count) {
- struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
- host->hostdata[0];
- struct NCR53c7x0_event event;
- int i;
- unsigned long flags;
- if (hostdata->events) {
- if (count > hostdata->event_size)
- count = hostdata->event_size;
- for (i = hostdata->event_index; count > 0;
- i = (i ? i - 1 : hostdata->event_size -1), --count) {
-/*
- * By copying the event we're currently examining with interrupts
- * disabled, we can do multiple printk(), etc. operations and
- * still be guaranteed that they're happening on the same
- * event structure.
- */
- local_irq_save(flags);
-#if 0
- event = hostdata->events[i];
-#else
- memcpy ((void *) &event, (void *) &(hostdata->events[i]),
- sizeof(event));
-#endif
-
- local_irq_restore(flags);
- printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
- host->host_no, event_name (event.event), count,
- (long) event.time.tv_sec, (long) event.time.tv_usec,
- event.target, event.lun);
- if (event.dsa)
- printk (" event for dsa 0x%lx (virt 0x%p)\n",
- virt_to_bus(event.dsa), event.dsa);
- if (event.pid != -1) {
- printk (" event for pid %ld ", event.pid);
- __scsi_print_command (event.cmnd);
- }
- }
- }
-}
-
-/*
- * Function: check_address
- *
- * Purpose: Check to see if a possibly corrupt pointer will fault the
- * kernel.
- *
- * Inputs: addr - address; size - size of area
- *
- * Returns: 0 if area is OK, -1 on error.
- *
- * NOTES: should be implemented in terms of vverify on kernels
- * that have it.
- */
-
-static int
-check_address (unsigned long addr, int size) {
- return (virt_to_phys((void *)addr) < PAGE_SIZE || virt_to_phys((void *)(addr + size)) > virt_to_phys(high_memory) ? -1 : 0);
-}
-
-#ifdef MODULE
-int
-NCR53c7x0_release(struct Scsi_Host *host) {
- struct NCR53c7x0_hostdata *hostdata =
- (struct NCR53c7x0_hostdata *) host->hostdata[0];
- struct NCR53c7x0_cmd *cmd, *tmp;
- shutdown (host);
- if (host->irq != SCSI_IRQ_NONE)
- {
- int irq_count;
- struct Scsi_Host *tmp;
- for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
- if (tmp->hostt == the_template && tmp->irq == host->irq)
- ++irq_count;
- if (irq_count == 1)
- free_irq(host->irq, NULL);
- }
- if (host->dma_channel != DMA_NONE)
- free_dma(host->dma_channel);
- if (host->io_port)
- release_region(host->io_port, host->n_io_port);
-
- for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
- --hostdata->num_cmds) {
- tmp = (struct NCR53c7x0_cmd *) cmd->next;
- /*
- * If we're going to loop, try to stop it to get a more accurate
- * count of the leaked commands.
- */
- cmd->next = NULL;
- if (cmd->free)
- cmd->free ((void *) cmd->real, cmd->size);
- }
- if (hostdata->num_cmds)
- printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
- host->host_no, hostdata->num_cmds);
-
- vfree(hostdata->events);
-
- /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
- * XXX may be invalid (CONFIG_060_WRITETHROUGH)
- */
- kernel_set_cachemode((void *)hostdata, 8192, IOMAP_FULL_CACHING);
- free_pages ((u32)hostdata, 1);
- return 1;
-}
-#endif /* def MODULE */
diff --git a/drivers/scsi/53c7xx.h b/drivers/scsi/53c7xx.h
deleted file mode 100644
index 218f3b901537..000000000000
--- a/drivers/scsi/53c7xx.h
+++ /dev/null
@@ -1,1608 +0,0 @@
-/*
- * 53c710 driver. Modified from Drew Eckhardts driver
- * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
- *
- * I have left the code for the 53c8xx family in here, because it didn't
- * seem worth removing it. The possibility of IO_MAPPED chips rather
- * than MEMORY_MAPPED remains, in case someone wants to add support for
- * 53c710 chips on Intel PCs (some older machines have them on the
- * motherboard).
- *
- * NOTE THERE MAY BE PROBLEMS WITH CASTS IN read8 AND Co.
- */
-
-/*
- * NCR 53c{7,8}0x0 driver, header file
- *
- * Sponsored by
- * iX Multiuser Multitasking Magazine
- * Hannover, Germany
- * hm@ix.de
- *
- * Copyright 1993, 1994, 1995 Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@PoohSticks.ORG
- * +1 (303) 786-7975
- *
- * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
- *
- * PRE-ALPHA
- *
- * For more information, please consult
- *
- * NCR 53C700/53C700-66
- * SCSI I/O Processor
- * Data Manual
- *
- * NCR 53C810
- * PCI-SCSI I/O Processor
- * Data Manual
- *
- * NCR Microelectronics
- * 1635 Aeroplaza Drive
- * Colorado Springs, CO 80916
- * +1 (719) 578-3400
- *
- * Toll free literature number
- * +1 (800) 334-5454
- *
- */
-
-#ifndef NCR53c710_H
-#define NCR53c710_H
-
-#ifndef HOSTS_C
-
-/* SCSI control 0 rw, default = 0xc0 */
-#define SCNTL0_REG 0x00
-#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
-#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
-#define SCNTL0_STRT 0x20 /* Start Sequence */
-#define SCNTL0_WATN 0x10 /* Select with ATN */
-#define SCNTL0_EPC 0x08 /* Enable parity checking */
-/* Bit 2 is reserved on 800 series chips */
-#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
-#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
-#define SCNTL0_TRG 0x01 /* Target mode */
-
-/* SCSI control 1 rw, default = 0x00 */
-
-#define SCNTL1_REG 0x01
-#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
-#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
-#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
- and reselection */
-#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
- target mode only */
-#define SCNTL1_CON 0x10 /* Connected */
-#define SCNTL1_RST 0x08 /* SCSI RST/ */
-#define SCNTL1_AESP 0x04 /* Force bad parity */
-#define SCNTL1_SND_700 0x02 /* Start SCSI send */
-#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
- arbitration immediately after
- busfree is detected */
-#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
-#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
-
-/* SCSI control 2 rw, */
-
-#define SCNTL2_REG_800 0x02
-#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
-
-/* SCSI control 3 rw */
-
-#define SCNTL3_REG_800 0x03
-#define SCNTL3_800_SCF_SHIFT 4
-#define SCNTL3_800_SCF_MASK 0x70
-#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
-#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
-#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
- /* 0x20 = SCLK/1.5
- 0x30 = SCLK/2
- 0x40 = SCLK/3 */
-
-#define SCNTL3_800_CCF_SHIFT 0
-#define SCNTL3_800_CCF_MASK 0x07
-#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
-#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
-#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
- 0x03 37.51 - 50
- 0x04 50.01 - 66 */
-
-/*
- * SCSI destination ID rw - the appropriate bit is set for the selected
- * target ID. This is written by the SCSI SCRIPTS processor.
- * default = 0x00
- */
-#define SDID_REG_700 0x02
-#define SDID_REG_800 0x06
-
-#define GP_REG_800 0x07 /* General purpose IO */
-#define GP_800_IO1 0x02
-#define GP_800_IO2 0x01
-
-/* SCSI interrupt enable rw, default = 0x00 */
-#define SIEN_REG_700 0x03
-#define SIEN0_REG_800 0x40
-#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
-#define SIEN_FC 0x40 /* Function complete */
-#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
-#define SIEN_800_SEL 0x20 /* Selected */
-#define SIEN_700_SEL 0x10 /* Selected or reselected */
-#define SIEN_800_RESEL 0x10 /* Reselected */
-#define SIEN_SGE 0x08 /* SCSI gross error */
-#define SIEN_UDC 0x04 /* Unexpected disconnect */
-#define SIEN_RST 0x02 /* SCSI RST/ received */
-#define SIEN_PAR 0x01 /* Parity error */
-
-/*
- * SCSI chip ID rw
- * NCR53c700 :
- * When arbitrating, the highest bit is used, when reselection or selection
- * occurs, the chip responds to all IDs for which a bit is set.
- * default = 0x00
- * NCR53c810 :
- * Uses bit mapping
- */
-#define SCID_REG 0x04
-/* Bit 7 is reserved on 800 series chips */
-#define SCID_800_RRE 0x40 /* Enable response to reselection */
-#define SCID_800_SRE 0x20 /* Enable response to selection */
-/* Bits four and three are reserved on 800 series chips */
-#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
-
-/* SCSI transfer rw, default = 0x00 */
-#define SXFER_REG 0x05
-#define SXFER_DHP 0x80 /* Disable halt on parity */
-
-#define SXFER_TP2 0x40 /* Transfer period msb */
-#define SXFER_TP1 0x20
-#define SXFER_TP0 0x10 /* lsb */
-#define SXFER_TP_MASK 0x70
-/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
-#define SXFER_TP_SHIFT 5
-#define SXFER_TP_4 0x00 /* Divisors */
-#define SXFER_TP_5 0x10<<1
-#define SXFER_TP_6 0x20<<1
-#define SXFER_TP_7 0x30<<1
-#define SXFER_TP_8 0x40<<1
-#define SXFER_TP_9 0x50<<1
-#define SXFER_TP_10 0x60<<1
-#define SXFER_TP_11 0x70<<1
-
-#define SXFER_MO3 0x08 /* Max offset msb */
-#define SXFER_MO2 0x04
-#define SXFER_MO1 0x02
-#define SXFER_MO0 0x01 /* lsb */
-#define SXFER_MO_MASK 0x0f
-#define SXFER_MO_SHIFT 0
-
-/*
- * SCSI output data latch rw
- * The contents of this register are driven onto the SCSI bus when
- * the Assert Data Bus bit of the SCNTL1 register is set and
- * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
- */
-#define SODL_REG_700 0x06
-#define SODL_REG_800 0x54
-
-
-/*
- * SCSI output control latch rw, default = 0
- * Note that when the chip is being manually programmed as an initiator,
- * the MSG, CD, and IO bits must be set correctly for the phase the target
- * is driving the bus in. Otherwise no data transfer will occur due to
- * phase mismatch.
- */
-
-#define SOCL_REG 0x07
-#define SOCL_REQ 0x80 /* REQ */
-#define SOCL_ACK 0x40 /* ACK */
-#define SOCL_BSY 0x20 /* BSY */
-#define SOCL_SEL 0x10 /* SEL */
-#define SOCL_ATN 0x08 /* ATN */
-#define SOCL_MSG 0x04 /* MSG */
-#define SOCL_CD 0x02 /* C/D */
-#define SOCL_IO 0x01 /* I/O */
-
-/*
- * SCSI first byte received latch ro
- * This register contains the first byte received during a block MOVE
- * SCSI SCRIPTS instruction, including
- *
- * Initiator mode Target mode
- * Message in Command
- * Status Message out
- * Data in Data out
- *
- * It also contains the selecting or reselecting device's ID and our
- * ID.
- *
- * Note that this is the register the various IF conditionals can
- * operate on.
- */
-#define SFBR_REG 0x08
-
-/*
- * SCSI input data latch ro
- * In initiator mode, data is latched into this register on the rising
- * edge of REQ/. In target mode, data is latched on the rising edge of
- * ACK/
- */
-#define SIDL_REG_700 0x09
-#define SIDL_REG_800 0x50
-
-/*
- * SCSI bus data lines ro
- * This register reflects the instantaneous status of the SCSI data
- * lines. Note that SCNTL0 must be set to disable parity checking,
- * otherwise reading this register will latch new parity.
- */
-#define SBDL_REG_700 0x0a
-#define SBDL_REG_800 0x58
-
-#define SSID_REG_800 0x0a
-#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
-#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
-
-
-/*
- * SCSI bus control lines rw,
- * instantaneous readout of control lines
- */
-#define SBCL_REG 0x0b
-#define SBCL_REQ 0x80 /* REQ ro */
-#define SBCL_ACK 0x40 /* ACK ro */
-#define SBCL_BSY 0x20 /* BSY ro */
-#define SBCL_SEL 0x10 /* SEL ro */
-#define SBCL_ATN 0x08 /* ATN ro */
-#define SBCL_MSG 0x04 /* MSG ro */
-#define SBCL_CD 0x02 /* C/D ro */
-#define SBCL_IO 0x01 /* I/O ro */
-#define SBCL_PHASE_CMDOUT SBCL_CD
-#define SBCL_PHASE_DATAIN SBCL_IO
-#define SBCL_PHASE_DATAOUT 0
-#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
-#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
-#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
-#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
-/*
- * Synchronous SCSI Clock Control bits
- * 0 - set by DCNTL
- * 1 - SCLK / 1.0
- * 2 - SCLK / 1.5
- * 3 - SCLK / 2.0
- */
-#define SBCL_SSCF1 0x02 /* wo, -66 only */
-#define SBCL_SSCF0 0x01 /* wo, -66 only */
-#define SBCL_SSCF_MASK 0x03
-
-/*
- * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
- * insure that 10 clocks elapse between the two
- */
-/* DMA status ro */
-#define DSTAT_REG 0x0c
-#define DSTAT_DFE 0x80 /* DMA FIFO empty */
-#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
-#define DSTAT_800_BF 0x20 /* Bus Fault */
-#define DSTAT_ABRT 0x10 /* Aborted - set on error */
-#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
-#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
- set when INT instruction is
- executed */
-#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
-#define DSTAT_OPC 0x01 /* Illegal instruction */
-#define DSTAT_800_IID 0x01 /* Same thing, different name */
-
-
-/* NCR53c800 moves this stuff into SIST0 */
-#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
-#define SIST0_REG_800 0x42
-#define SSTAT0_MA 0x80 /* ini : phase mismatch,
- * tgt : ATN/ asserted
- */
-#define SSTAT0_CMP 0x40 /* function complete */
-#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
-#define SIST0_800_SEL 0x20 /* Selected */
-#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
-#define SIST0_800_RSL 0x10 /* Reselected */
-#define SSTAT0_SGE 0x08 /* SCSI gross error */
-#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
-#define SSTAT0_RST 0x02 /* SCSI RST/ received */
-#define SSTAT0_PAR 0x01 /* Parity error */
-
-/* And uses SSTAT0 for what was SSTAT1 */
-
-#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
-#define SSTAT1_ILF 0x80 /* SIDL full */
-#define SSTAT1_ORF 0x40 /* SODR full */
-#define SSTAT1_OLF 0x20 /* SODL full */
-#define SSTAT1_AIP 0x10 /* Arbitration in progress */
-#define SSTAT1_LOA 0x08 /* Lost arbitration */
-#define SSTAT1_WOA 0x04 /* Won arbitration */
-#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
-#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
-
-#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
-#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
-#define SSTAT2_FF2 0x40 /* data FIFO */
-#define SSTAT2_FF1 0x20
-#define SSTAT2_FF0 0x10
-#define SSTAT2_FF_MASK 0xf0
-#define SSTAT2_FF_SHIFT 4
-
-/*
- * Latched signals, latched on the leading edge of REQ/ for initiators,
- * ACK/ for targets.
- */
-#define SSTAT2_SDP 0x08 /* SDP */
-#define SSTAT2_MSG 0x04 /* MSG */
-#define SSTAT2_CD 0x02 /* C/D */
-#define SSTAT2_IO 0x01 /* I/O */
-#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
-#define SSTAT2_PHASE_DATAIN SSTAT2_IO
-#define SSTAT2_PHASE_DATAOUT 0
-#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
-#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
-#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
-#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
-
-
-/* NCR53c700-66 only */
-#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
-/* NCR53c710 and higher */
-#define DSA_REG 0x10 /* DATA structure address */
-
-#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
-#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
-/* 0x80 - 0x04 are reserved */
-#define CTEST0_700_RTRG 0x02 /* Real target mode */
-#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
- * SCSI bus to host, 0 =
- * host to SCSI.
- */
-
-#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
-#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
-#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
-#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
-#define CTEST1_FMT1 0x20
-#define CTEST1_FMT0 0x10
-
-#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
-#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
-#define CTEST1_FFL1 0x02
-#define CTEST1_FFL0 0x01
-
-#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
-#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
-
-#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
-#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
- Reading this register clears */
-#define CTEST2_800_CIO 0x20 /* Configured as IO */.
-#define CTEST2_800_CM 0x10 /* Configured as memory */
-
-/* 0x80 - 0x40 are reserved on 700 series chips */
-#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
- * As an initiator, this bit is
- * one when the synchronous offset
- * is zero, as a target this bit
- * is one when the synchronous
- * offset is at the maximum
- * defined in SXFER
- */
-#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
- * reading CTEST3 unloads a byte
- * from the FIFO and sets this
- */
-#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
- * reading CTEST6 unloads a byte
- * from the FIFO and sets this
- */
-#define CTEST2_TEOP 0x04 /* SCSI true end of process,
- * indicates a totally finished
- * transfer
- */
-#define CTEST2_DREQ 0x02 /* Data request signal */
-/* 0x01 is reserved on 700 series chips */
-#define CTEST2_800_DACK 0x01
-
-/*
- * Chip test 3 ro
- * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
- * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
- * error results if a read is attempted on this register. Also note
- * that 16 and 32 bit reads of this register will cause corruption.
- */
-#define CTEST3_REG_700 0x17
-/* Chip test 3 rw */
-#define CTEST3_REG_800 0x1b
-#define CTEST3_800_V3 0x80 /* Chip revision */
-#define CTEST3_800_V2 0x40
-#define CTEST3_800_V1 0x20
-#define CTEST3_800_V0 0x10
-#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
-#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
-#define CTEST3_800_FM 0x02 /* Fetch mode pin */
-/* bit 0 is reserved on 800 series chips */
-
-#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
-#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
-/* 0x80 is reserved on 700 series chips */
-#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
-#define CTEST4_ZMOD 0x40 /* High impedance mode */
-#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
-#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
-#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
-#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
- * redirects writes from SODL
- * to the SCSI FIFO.
- */
-#define CTEST4_800_MPEE 0x08 /* Enable parity checking
- during master cycles on PCI
- bus */
-
-/*
- * These bits send the contents of the CTEST6 register to the appropriate
- * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
- * the high bit means the low two bits select the byte lane.
- */
-#define CTEST4_FBL2 0x04
-#define CTEST4_FBL1 0x02
-#define CTEST4_FBL0 0x01
-#define CTEST4_FBL_MASK 0x07
-#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
-#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
-#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
-#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
-#define CTEST4_800_SAVE (CTEST4_800_BDIS)
-
-
-#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
-#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
-/*
- * Clock Address Incrementor. When set, it increments the
- * DNAD register to the next bus size boundary. It automatically
- * resets itself when the operation is complete.
- */
-#define CTEST5_ADCK 0x80
-/*
- * Clock Byte Counter. When set, it decrements the DBC register to
- * the next bus size boundary.
- */
-#define CTEST5_BBCK 0x40
-/*
- * Reset SCSI Offset. Setting this bit to 1 clears the current offset
- * pointer in the SCSI synchronous offset counter (SSTAT). This bit
- * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
- * be cleared when a synchronous transfer fails. When written, it is
- * automatically cleared after the SCSI synchronous offset counter is
- * reset.
- */
-/* Bit 5 is reserved on 800 series chips */
-#define CTEST5_700_ROFF 0x20
-/*
- * Master Control for Set or Reset pulses. When 1, causes the low
- * four bits of register to set when set, 0 causes the low bits to
- * clear when set.
- */
-#define CTEST5_MASR 0x10
-#define CTEST5_DDIR 0x08 /* DMA direction */
-/*
- * Bits 2-0 are reserved on 800 series chips
- */
-#define CTEST5_700_EOP 0x04 /* End of process */
-#define CTEST5_700_DREQ 0x02 /* Data request */
-#define CTEST5_700_DACK 0x01 /* Data acknowledge */
-
-/*
- * Chip test 6 rw - writing to this register writes to the byte
- * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
- * register.
- */
-#define CTEST6_REG_700 0x1a
-#define CTEST6_REG_800 0x23
-
-#define CTEST7_REG 0x1b /* Chip test 7 rw */
-/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
-#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
-#define CTEST7_10_SC1 0x40 /* Snoop control bits */
-#define CTEST7_10_SC0 0x20
-#define CTEST7_10_SC_MASK 0x60
-/* 0x20 is reserved on the NCR53c700 */
-#define CTEST7_0060_FM 0x20 /* Fetch mode */
-#define CTEST7_STD 0x10 /* Selection timeout disable */
-#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
-#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
-#define CTEST7_10_TT1 0x02 /* Transfer type */
-#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
- fetch */
-#define CTEST7_DIFF 0x01 /* Differential mode */
-
-#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
-
-
-#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
-
-#define DFIFO_REG 0x20 /* DMA FIFO rw */
-/*
- * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
- * moved into the CTEST8 register.
- */
-#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
-#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
-#define DFIFO_BO6 0x40
-#define DFIFO_BO5 0x20
-#define DFIFO_BO4 0x10
-#define DFIFO_BO3 0x08
-#define DFIFO_BO2 0x04
-#define DFIFO_BO1 0x02
-#define DFIFO_BO0 0x01
-#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
-#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
-
-/*
- * Interrupt status rw
- * Note that this is the only register which can be read while SCSI
- * SCRIPTS are being executed.
- */
-#define ISTAT_REG_700 0x21
-#define ISTAT_REG_800 0x14
-#define ISTAT_ABRT 0x80 /* Software abort, write
- *1 to abort, wait for interrupt. */
-/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
-#define ISTAT_10_SRST 0x40 /* software reset */
-#define ISTAT_10_SIGP 0x20 /* signal script */
-/* 0x10 is reserved on NCR53c700 series chips */
-#define ISTAT_800_SEM 0x10 /* semaphore */
-#define ISTAT_CON 0x08 /* 1 when connected */
-#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
-#define ISTAT_700_PRE 0x04 /* Pointer register empty.
- * Set to 1 when DSPS and DSP
- * registers are empty in pipeline
- * mode, always set otherwise.
- */
-#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
- * SCSI portion of SIOP see
- * SSTAT0
- */
-#define ISTAT_DIP 0x01 /* DMA interrupt pending
- * see DSTAT
- */
-
-/* NCR53c700-66 and NCR53c710 only */
-#define CTEST8_REG 0x22 /* Chip test 8 rw */
-#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
- * ie read from SCLK/ rather than CLK/
- */
-#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
-#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
- * pass through. This insures that
- * bad parity won't reach the host
- * bus.
- */
-#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
- * active negation, should only
- * be used for slow SCSI
- * non-differential.
- */
-#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
-#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
- * must be set for fast SCSI-II
- * speeds.
- */
-#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
- * switching.
- */
-#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
- * The status of pending
- * disconnect is maintained by
- * the core, eliminating
- * the possibility of missing a
- * selection or reselection
- * while waiting to fetch a
- * WAIT DISCONNECT opcode.
- */
-
-#define CTEST8_10_V3 0x80 /* Chip revision */
-#define CTEST8_10_V2 0x40
-#define CTEST8_10_V1 0x20
-#define CTEST8_10_V0 0x10
-#define CTEST8_10_V_MASK 0xf0
-#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
-#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
-#define CTEST8_10_FM 0x02 /* Fetch pin mode */
-#define CTEST8_10_SM 0x01 /* Snoop pin mode */
-
-
-/*
- * The CTEST9 register may be used to differentiate between a
- * NCR53c700 and a NCR53c710.
- *
- * Write 0xff to this register.
- * Read it.
- * If the contents are 0xff, it is a NCR53c700
- * If the contents are 0x00, it is a NCR53c700-66 first revision
- * If the contents are some other value, it is some other NCR53c700-66
- */
-#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
-#define LCRC_REG_10 0x23
-
-/*
- * 0x24 through 0x27 are the DMA byte counter register. Instructions
- * write their high 8 bits into the DCMD register, the low 24 bits into
- * the DBC register.
- *
- * Function is dependent on the command type being executed.
- */
-
-
-#define DBC_REG 0x24
-/*
- * For Block Move Instructions, DBC is a 24 bit quantity representing
- * the number of bytes to transfer.
- * For Transfer Control Instructions, DBC is bit fielded as follows :
- */
-/* Bits 20 - 23 should be clear */
-#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
-#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
-#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
-#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
-/* Bits 8 - 15 are reserved on some implementations ? */
-#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
-#define DBC_TCI_MASK_SHIFT 8
-#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
-#define DBC_TCI_DATA_SHIFT 0
-
-#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
-#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
-#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
-#define DBC_RWRI_ADDRESS_SHIFT 16
-
-
-/*
- * DMA command r/w
- */
-#define DCMD_REG 0x27
-#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
-#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
-#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
-#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
-#define DCMD_BMI_MSG 0x04 /* instruction */
-
-#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
-#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
-#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
-
-#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
-
-#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
- instruction */
-#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
-#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
-#define DCMD_TCI_MSG 0x04 /* instruction */
-#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
-#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
-#define DCMD_TCI_OP_CALL 0x08 /* CALL */
-#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
-#define DCMD_TCI_OP_INT 0x18 /* INT */
-
-#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
- instruction */
-#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
-#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
-#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
-#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
-
-#define DCMD_RWRI_OP_MASK 0x07
-#define DCMD_RWRI_OP_MOVE 0x00
-#define DCMD_RWRI_OP_SHL 0x01
-#define DCMD_RWRI_OP_OR 0x02
-#define DCMD_RWRI_OP_XOR 0x03
-#define DCMD_RWRI_OP_AND 0x04
-#define DCMD_RWRI_OP_SHR 0x05
-#define DCMD_RWRI_OP_ADD 0x06
-#define DCMD_RWRI_OP_ADDC 0x07
-
-#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
- (three words) */
-
-
-#define DNAD_REG 0x28 /* through 0x2b DMA next address for
- data */
-#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
-#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
- save rw */
-#define DMODE_REG_00 0x34 /* DMA mode rw */
-#define DMODE_00_BL1 0x80 /* Burst length bits */
-#define DMODE_00_BL0 0x40
-#define DMODE_BL_MASK 0xc0
-/* Burst lengths (800) */
-#define DMODE_BL_2 0x00 /* 2 transfer */
-#define DMODE_BL_4 0x40 /* 4 transfers */
-#define DMODE_BL_8 0x80 /* 8 transfers */
-#define DMODE_BL_16 0xc0 /* 16 transfers */
-
-#define DMODE_10_BL_1 0x00 /* 1 transfer */
-#define DMODE_10_BL_2 0x40 /* 2 transfers */
-#define DMODE_10_BL_4 0x80 /* 4 transfers */
-#define DMODE_10_BL_8 0xc0 /* 8 transfers */
-#define DMODE_10_FC2 0x20 /* Driven to FC2 pin */
-#define DMODE_10_FC1 0x10 /* Driven to FC1 pin */
-#define DMODE_710_PD 0x08 /* Program/data on FC0 pin */
-#define DMODE_710_UO 0x02 /* User prog. output */
-
-#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
-#define DMODE_700_286 0x10 /* 286 mode */
-#define DMODE_700_IOM 0x08 /* Transfer to IO port */
-#define DMODE_700_FAM 0x04 /* Fixed address mode */
-#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
- * automatic fetch / exec
- */
-#define DMODE_MAN 0x01 /* Manual start mode,
- * requires a 1 to be written
- * to the start DMA bit in the DCNTL
- * register to run scripts
- */
-
-#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
-
-/* NCR53c800 series only */
-#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
-/* NCR53c710 only */
-#define SCRATCHB_REG_10 0x34 /* through 0x37 scratch B rw */
-
-#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
-#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
-#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
-#define DMODE_800_ERL 0x08 /* Enable Read Line */
-
-/* 35-38 are reserved on 700 and 700-66 series chips */
-#define DIEN_REG 0x39 /* DMA interrupt enable rw */
-/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
-#define DIEN_800_MDPE 0x40 /* Master data parity error */
-#define DIEN_800_BF 0x20 /* BUS fault */
-#define DIEN_700_BF 0x20 /* BUS fault */
-#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
-#define DIEN_SSI 0x08 /* Enable single step interrupt */
-#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
- * interrupt
- */
-/* 0x02 is reserved on 800 series chips */
-#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
-#define DIEN_700_OPC 0x01 /* Enable illegal instruction
- * interrupt
- */
-#define DIEN_800_IID 0x01 /* Same meaning, different name */
-
-/*
- * DMA watchdog timer rw
- * set in 16 CLK input periods.
- */
-#define DWT_REG 0x3a
-
-/* DMA control rw */
-#define DCNTL_REG 0x3b
-#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
-#define DCNTL_700_CF0 0x40
-#define DCNTL_700_CF_MASK 0xc0
-/* Clock divisors Divisor SCLK range (MHZ) */
-#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
-#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
-#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
-#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
-
-#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
-#define DCNTL_SSM 0x10 /* Single step mode */
-#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
- * after selection */
-#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
-#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
-/* 0x02 is reserved */
-#define DCNTL_00_RST 0x01 /* Software reset, resets everything
- * but 286 mode bit in DMODE. On the
- * NCR53c710, this bit moved to CTEST8
- */
-#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
-#define DCNTL_10_EA 0x20 /* Enable Ack - needed for MVME16x */
-
-#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
-
-
-/* NCR53c700-66 only */
-#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
-#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
-/* NCR53c710 only */
-#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
-
-#define SIEN1_REG_800 0x41
-#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
-#define SIEN1_800_GEN 0x02 /* general purpose timer */
-#define SIEN1_800_HTH 0x01 /* handshake to handshake */
-
-#define SIST1_REG_800 0x43
-#define SIST1_800_STO 0x04 /* selection/reselection timeout */
-#define SIST1_800_GEN 0x02 /* general purpose timer */
-#define SIST1_800_HTH 0x01 /* handshake to handshake */
-
-#define SLPAR_REG_800 0x44 /* Parity */
-
-#define MACNTL_REG_800 0x46 /* Memory access control */
-#define MACNTL_800_TYP3 0x80
-#define MACNTL_800_TYP2 0x40
-#define MACNTL_800_TYP1 0x20
-#define MACNTL_800_TYP0 0x10
-#define MACNTL_800_DWR 0x08
-#define MACNTL_800_DRD 0x04
-#define MACNTL_800_PSCPT 0x02
-#define MACNTL_800_SCPTS 0x01
-
-#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
-
-/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
-#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
-#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
-#define STIME0_800_HTH_SHIFT 4
-#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
-#define STIME0_800_SEL_SHIFT 0
-
-#define STIME1_REG_800 0x49
-#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
-
-#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
- bits on narrow chips, 16 on WIDE */
-
-#define STEST0_REG_800 0x4c
-#define STEST0_800_SLT 0x08 /* Selection response logic test */
-#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
-#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
-#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
-
-#define STEST1_REG_800 0x4d
-#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
-
-#define STEST2_REG_800 0x4e
-#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
-#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
-#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
-#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
-#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
-#define STEST2_800_LOW 0x01 /* SCSI low level mode */
-
-#define STEST3_REG_800 0x4f
-#define STEST3_800_TE 0x80 /* Enable active negation */
-#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
-#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
-#define STEST3_800_DSI 0x10 /* Disable single initiator response */
-#define STEST3_800_TTM 0x04 /* Time test mode */
-#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
-#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
-
-#define OPTION_PARITY 0x1 /* Enable parity checking */
-#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
-#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
-#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
-#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
-#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
- simple test code, return
- DID_NO_CONNECT if any SCSI
- commands are attempted. */
-#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
- SCSI write is attempted */
-#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
- each address and instruction
- executed to debug buffer. */
-#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
- instruction */
-#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
-#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
- memory mapping */
-#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
- I/O mapping */
-#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
-#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
-#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
-#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
-#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
-#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
-#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
-#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
-#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
-#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
-#define OPTION_DEBUG_DSA 0x800000
-#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
-#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
-#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
-#define OPTION_DISCONNECT 0x8000000 /* Allow disconnect */
-#define OPTION_DEBUG_DISCONNECT 0x10000000
-#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
- on power up */
-#define OPTION_DEBUG_QUEUES 0x80000000
-#define OPTION_DEBUG_ALLOCATION 0x100000000LL
-#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
- SCNTL3 registers */
-#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
- SDTR for async transfers when
- we haven't been told to do
- a synchronous transfer. */
-#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
- the reselect/WAIT DISCONNECT
- race condition hits */
-#if !defined(PERM_OPTIONS)
-#define PERM_OPTIONS 0
-#endif
-
-/*
- * Some data which is accessed by the NCR chip must be 4-byte aligned.
- * For some hosts the default is less than that (eg. 68K uses 2-byte).
- * Alignment has only been forced where it is important; also if one
- * 32 bit structure field is aligned then it is assumed that following
- * 32 bit fields are also aligned. Take care when adding fields
- * which are other than 32 bit.
- */
-
-struct NCR53c7x0_synchronous {
- u32 select_indirect /* Value used for indirect selection */
- __attribute__ ((aligned (4)));
- u32 sscf_710; /* Used to set SSCF bits for 710 */
- u32 script[8]; /* Size ?? Script used when target is
- reselected */
- unsigned char synchronous_want[5]; /* Per target desired SDTR */
-/*
- * Set_synchronous programs these, select_indirect and current settings after
- * int_debug_should show a match.
- */
- unsigned char sxfer_sanity, scntl3_sanity;
-};
-
-#define CMD_FLAG_SDTR 1 /* Initiating synchronous
- transfer negotiation */
-#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
- negotiation */
-#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
-#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
-
-struct NCR53c7x0_table_indirect {
- u32 count;
- void *address;
-};
-
-enum ncr_event {
- EVENT_NONE = 0,
-/*
- * Order is IMPORTANT, since these must correspond to the event interrupts
- * in 53c7,8xx.scr
- */
-
- EVENT_ISSUE_QUEUE = 0x5000000, /* 0 Command was added to issue queue */
- EVENT_START_QUEUE, /* 1 Command moved to start queue */
- EVENT_SELECT, /* 2 Command completed selection */
- EVENT_DISCONNECT, /* 3 Command disconnected */
- EVENT_RESELECT, /* 4 Command reselected */
- EVENT_COMPLETE, /* 5 Command completed */
- EVENT_IDLE, /* 6 */
- EVENT_SELECT_FAILED, /* 7 */
- EVENT_BEFORE_SELECT, /* 8 */
- EVENT_RESELECT_FAILED /* 9 */
-};
-
-struct NCR53c7x0_event {
- enum ncr_event event; /* What type of event */
- unsigned char target;
- unsigned char lun;
- struct timeval time;
- u32 *dsa; /* What's in the DSA register now (virt) */
-/*
- * A few things from that SCSI pid so we know what happened after
- * the Scsi_Cmnd structure in question may have disappeared.
- */
- unsigned long pid; /* The SCSI PID which caused this
- event */
- unsigned char cmnd[12];
-};
-
-/*
- * Things in the NCR53c7x0_cmd structure are split into two parts :
- *
- * 1. A fixed portion, for things which are not accessed directly by static NCR
- * code (ie, are referenced only by the Linux side of the driver,
- * or only by dynamically generated code).
- *
- * 2. The DSA portion, for things which are accessed directly by static NCR
- * code.
- *
- * This is a little ugly, but it
- * 1. Avoids conflicts between the NCR code's picture of the structure, and
- * Linux code's idea of what it looks like.
- *
- * 2. Minimizes the pain in the Linux side of the code needed
- * to calculate real dsa locations for things, etc.
- *
- */
-
-struct NCR53c7x0_cmd {
- void *real; /* Real, unaligned address for
- free function */
- void (* free)(void *, int); /* Command to deallocate; NULL
- for structures allocated with
- scsi_register, etc. */
- Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
- structure, Scsi_Cmnd points
- at NCR53c7x0_cmd using
- host_scribble structure */
-
- int size; /* scsi_malloc'd size of this
- structure */
-
- int flags; /* CMD_* flags */
-
- unsigned char cmnd[12]; /* CDB, copied from Scsi_Cmnd */
- int result; /* Copy to Scsi_Cmnd when done */
-
- struct { /* Private non-cached bounce buffer */
- unsigned char buf[256];
- u32 addr;
- u32 len;
- } bounce;
-
-/*
- * SDTR and WIDE messages are an either/or affair
- * in this message, since we will go into message out and send
- * _the whole mess_ without dropping out of message out to
- * let the target go into message in after sending the first
- * message.
- */
-
- unsigned char select[11]; /* Select message, includes
- IDENTIFY
- (optional) QUEUE TAG
- (optional) SDTR or WDTR
- */
-
-
- volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
- running, eventually finished */
-
-
- u32 *data_transfer_start; /* Start of data transfer routines */
- u32 *data_transfer_end; /* Address after end of data transfer o
- routines */
-/*
- * The following three fields were moved from the DSA proper to here
- * since only dynamically generated NCR code refers to them, meaning
- * we don't need dsa_* absolutes, and it is simpler to let the
- * host code refer to them directly.
- */
-
-/*
- * HARD CODED : residual and saved_residual need to agree with the sizes
- * used in NCR53c7,8xx.scr.
- *
- * FIXME: we want to consider the case where we have odd-length
- * scatter/gather buffers and a WIDE transfer, in which case
- * we'll need to use the CHAIN MOVE instruction. Ick.
- */
- u32 residual[6] __attribute__ ((aligned (4)));
- /* Residual data transfer which
- allows pointer code to work
- right.
-
- [0-1] : Conditional call to
- appropriate other transfer
- routine.
- [2-3] : Residual block transfer
- instruction.
- [4-5] : Jump to instruction
- after splice.
- */
- u32 saved_residual[6]; /* Copy of old residual, so we
- can get another partial
- transfer and still recover
- */
-
- u32 saved_data_pointer; /* Saved data pointer */
-
- u32 dsa_next_addr; /* _Address_ of dsa_next field
- in this dsa for RISCy
- style constant. */
-
- u32 dsa_addr; /* Address of dsa; RISCy style
- constant */
-
- u32 dsa[0]; /* Variable length (depending
- on host type, number of scatter /
- gather buffers, etc). */
-};
-
-struct NCR53c7x0_break {
- u32 *address, old_instruction[2];
- struct NCR53c7x0_break *next;
- unsigned char old_size; /* Size of old instruction */
-};
-
-/* Indicates that the NCR is not executing code */
-#define STATE_HALTED 0
-/*
- * Indicates that the NCR is executing the wait for select / reselect
- * script. Only used when running NCR53c700 compatible scripts, only
- * state during which an ABORT is _not_ considered an error condition.
- */
-#define STATE_WAITING 1
-/* Indicates that the NCR is executing other code. */
-#define STATE_RUNNING 2
-/*
- * Indicates that the NCR was being aborted.
- */
-#define STATE_ABORTING 3
-/* Indicates that the NCR was successfully aborted. */
-#define STATE_ABORTED 4
-/* Indicates that the NCR has been disabled due to a fatal error */
-#define STATE_DISABLED 5
-
-/*
- * Where knowledge of SCSI SCRIPT(tm) specified values are needed
- * in an interrupt handler, an interrupt handler exists for each
- * different SCSI script so we don't have name space problems.
- *
- * Return values of these handlers are as follows :
- */
-#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
-#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
-#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
-#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
-#define SPECIFIC_INT_DONE 4 /* normal command completion */
-#define SPECIFIC_INT_BREAK 5 /* break point encountered */
-
-struct NCR53c7x0_hostdata {
- int size; /* Size of entire Scsi_Host
- structure */
- int board; /* set to board type, useful if
- we have host specific things,
- ie, a general purpose I/O
- bit is being used to enable
- termination, etc. */
-
- int chip; /* set to chip type; 700-66 is
- 700-66, rest are last three
- digits of part number */
-
- char valid_ids[8]; /* Valid SCSI ID's for adapter */
-
- u32 *dsp; /* dsp to restart with after
- all stacked interrupts are
- handled. */
-
- unsigned dsp_changed:1; /* Has dsp changed within this
- set of stacked interrupts ? */
-
- unsigned char dstat; /* Most recent value of dstat */
- unsigned dstat_valid:1;
-
- unsigned expecting_iid:1; /* Expect IID interrupt */
- unsigned expecting_sto:1; /* Expect STO interrupt */
-
- /*
- * The code stays cleaner if we use variables with function
- * pointers and offsets that are unique for the different
- * scripts rather than having a slew of switch(hostdata->chip)
- * statements.
- *
- * It also means that the #defines from the SCSI SCRIPTS(tm)
- * don't have to be visible outside of the script-specific
- * instructions, preventing name space pollution.
- */
-
- void (* init_fixup)(struct Scsi_Host *host);
- void (* init_save_regs)(struct Scsi_Host *host);
- void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
- void (* soft_reset)(struct Scsi_Host *host);
- int (* run_tests)(struct Scsi_Host *host);
-
- /*
- * Called when DSTAT_SIR is set, indicating an interrupt generated
- * by the INT instruction, where values are unique for each SCSI
- * script. Should return one of the SPEC_* values.
- */
-
- int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
-
- int dsa_len; /* Size of DSA structure */
-
- /*
- * Location of DSA fields for the SCSI SCRIPT corresponding to this
- * chip.
- */
-
- s32 dsa_start;
- s32 dsa_end;
- s32 dsa_next;
- s32 dsa_prev;
- s32 dsa_cmnd;
- s32 dsa_select;
- s32 dsa_msgout;
- s32 dsa_cmdout;
- s32 dsa_dataout;
- s32 dsa_datain;
- s32 dsa_msgin;
- s32 dsa_msgout_other;
- s32 dsa_write_sync;
- s32 dsa_write_resume;
- s32 dsa_check_reselect;
- s32 dsa_status;
- s32 dsa_saved_pointer;
- s32 dsa_jump_dest;
-
- /*
- * Important entry points that generic fixup code needs
- * to know about, fixed up.
- */
-
- s32 E_accept_message;
- s32 E_command_complete;
- s32 E_data_transfer;
- s32 E_dsa_code_template;
- s32 E_dsa_code_template_end;
- s32 E_end_data_transfer;
- s32 E_msg_in;
- s32 E_initiator_abort;
- s32 E_other_transfer;
- s32 E_other_in;
- s32 E_other_out;
- s32 E_target_abort;
- s32 E_debug_break;
- s32 E_reject_message;
- s32 E_respond_message;
- s32 E_select;
- s32 E_select_msgout;
- s32 E_test_0;
- s32 E_test_1;
- s32 E_test_2;
- s32 E_test_3;
- s32 E_dsa_zero;
- s32 E_cmdout_cmdout;
- s32 E_wait_reselect;
- s32 E_dsa_code_begin;
-
- long long options; /* Bitfielded set of options enabled */
- volatile u32 test_completed; /* Test completed */
- int test_running; /* Test currently running */
- s32 test_source
- __attribute__ ((aligned (4)));
- volatile s32 test_dest;
-
- volatile int state; /* state of driver, only used for
- OPTION_700 */
-
- unsigned char dmode; /*
- * set to the address of the DMODE
- * register for this chip.
- */
- unsigned char istat; /*
- * set to the address of the ISTAT
- * register for this chip.
- */
-
- int scsi_clock; /*
- * SCSI clock in HZ. 0 may be used
- * for unknown, although this will
- * disable synchronous negotiation.
- */
-
- volatile int intrs; /* Number of interrupts */
- volatile int resets; /* Number of SCSI resets */
- unsigned char saved_dmode;
- unsigned char saved_ctest4;
- unsigned char saved_ctest7;
- unsigned char saved_dcntl;
- unsigned char saved_scntl3;
-
- unsigned char this_id_mask;
-
- /* Debugger information */
- struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
- *breakpoint_current; /* Current breakpoint being stepped
- through, NULL if we are running
- normally. */
-#ifdef NCR_DEBUG
- int debug_size; /* Size of debug buffer */
- volatile int debug_count; /* Current data count */
- volatile char *debug_buf; /* Output ring buffer */
- volatile char *debug_write; /* Current write pointer */
- volatile char *debug_read; /* Current read pointer */
-#endif /* def NCR_DEBUG */
-
- /* XXX - primitive debugging junk, remove when working ? */
- int debug_print_limit; /* Number of commands to print
- out exhaustive debugging
- information for if
- OPTION_DEBUG_DUMP is set */
-
- unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
- set, puke if commands are sent
- to other target/lun combinations */
-
- int debug_count_limit; /* Number of commands to execute
- before puking to limit debugging
- output */
-
-
- volatile unsigned idle:1; /* set to 1 if idle */
-
- /*
- * Table of synchronous+wide transfer parameters set on a per-target
- * basis.
- */
-
- volatile struct NCR53c7x0_synchronous sync[16]
- __attribute__ ((aligned (4)));
-
- volatile Scsi_Cmnd *issue_queue
- __attribute__ ((aligned (4)));
- /* waiting to be issued by
- Linux driver */
- volatile struct NCR53c7x0_cmd *running_list;
- /* commands running, maintained
- by Linux driver */
-
- volatile struct NCR53c7x0_cmd *ncrcurrent; /* currently connected
- nexus, ONLY valid for
- NCR53c700/NCR53c700-66
- */
-
- volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
- allocated at probe time,
- which we can use for
- initialization */
- volatile struct NCR53c7x0_cmd *free;
- int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
- based on number of
- scatter/gather segments, etc.
- */
- volatile int num_cmds; /* Number of commands
- allocated */
- volatile int extra_allocate;
- volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
- for this target yet? If not,
- do so ASAP */
- volatile unsigned char busy[16][8]; /* number of commands
- executing on each target
- */
- /*
- * Eventually, I'll switch to a coroutine for calling
- * cmd->done(cmd), etc. so that we can overlap interrupt
- * processing with this code for maximum performance.
- */
-
- volatile struct NCR53c7x0_cmd *finished_queue;
-
- /* Shared variables between SCRIPT and host driver */
- volatile u32 *schedule
- __attribute__ ((aligned (4))); /* Array of JUMPs to dsa_begin
- routines of various DSAs.
- When not in use, replace
- with jump to next slot */
-
-
- volatile unsigned char msg_buf[16]; /* buffer for messages
- other than the command
- complete message */
-
- /* Per-target default synchronous and WIDE messages */
- volatile unsigned char synchronous_want[16][5];
- volatile unsigned char wide_want[16][4];
-
- /* Bit fielded set of targets we want to speak synchronously with */
- volatile u16 initiate_sdtr;
- /* Bit fielded set of targets we want to speak wide with */
- volatile u16 initiate_wdtr;
- /* Bit fielded list of targets we've talked to. */
- volatile u16 talked_to;
-
- /* Array of bit-fielded lun lists that we need to request_sense */
- volatile unsigned char request_sense[16];
-
- u32 addr_reconnect_dsa_head
- __attribute__ ((aligned (4))); /* RISCy style constant,
- address of following */
- volatile u32 reconnect_dsa_head;
- /* Data identifying nexus we are trying to match during reselection */
- volatile unsigned char reselected_identify; /* IDENTIFY message */
- volatile unsigned char reselected_tag; /* second byte of queue tag
- message or 0 */
-
- /* These were static variables before we moved them */
-
- s32 NCR53c7xx_zero
- __attribute__ ((aligned (4)));
- s32 NCR53c7xx_sink;
- u32 NOP_insn;
- char NCR53c7xx_msg_reject;
- char NCR53c7xx_msg_abort;
- char NCR53c7xx_msg_nop;
-
- /*
- * Following item introduced by RGH to support NCRc710, which is
- * VERY brain-dead when it come to memory moves
- */
-
- /* DSA save area used only by the NCR chip */
- volatile unsigned long saved2_dsa
- __attribute__ ((aligned (4)));
-
- volatile unsigned long emulated_intfly
- __attribute__ ((aligned (4)));
-
- volatile int event_size, event_index;
- volatile struct NCR53c7x0_event *events;
-
- /* If we need to generate code to kill off the currently connected
- command, this is where we do it. Should have a BMI instruction
- to source or sink the current data, followed by a JUMP
- to abort_connected */
-
- u32 *abort_script;
-
- int script_count; /* Size of script in words */
- u32 script[0]; /* Relocated SCSI script */
-
-};
-
-#define SCSI_IRQ_NONE 255
-#define DMA_NONE 255
-#define IRQ_AUTO 254
-#define DMA_AUTO 254
-
-#define BOARD_GENERIC 0
-
-#define NCR53c7x0_insn_size(insn) \
- (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
-
-
-#define NCR53c7x0_local_declare() \
- volatile unsigned char *NCR53c7x0_address_memory; \
- unsigned int NCR53c7x0_address_io; \
- int NCR53c7x0_memory_mapped
-
-#define NCR53c7x0_local_setup(host) \
- NCR53c7x0_address_memory = (void *) (host)->base; \
- NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
- NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
- host->hostdata[0])-> options & OPTION_MEMORY_MAPPED
-
-#ifdef BIG_ENDIAN
-/* These could be more efficient, given that we are always memory mapped,
- * but they don't give the same problems as the write macros, so leave
- * them. */
-#ifdef __mc68000__
-#define NCR53c7x0_read8(address) \
- ((unsigned int)raw_inb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) )
-
-#define NCR53c7x0_read16(address) \
- ((unsigned int)raw_inw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)))
-#else
-#define NCR53c7x0_read8(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) : \
- inb(NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_read16(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) : \
- inw(NCR53c7x0_address_io + (address)))
-#endif /* mc68000 */
-#else
-#define NCR53c7x0_read8(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readb((u32)NCR53c7x0_address_memory + (u32)(address)) : \
- inb(NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_read16(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int)readw((u32)NCR53c7x0_address_memory + (u32)(address)) : \
- inw(NCR53c7x0_address_io + (address)))
-#endif
-
-#ifdef __mc68000__
-#define NCR53c7x0_read32(address) \
- ((unsigned int) raw_inl((u32)NCR53c7x0_address_memory + (u32)(address)))
-#else
-#define NCR53c7x0_read32(address) \
- (NCR53c7x0_memory_mapped ? \
- (unsigned int) readl((u32)NCR53c7x0_address_memory + (u32)(address)) : \
- inl(NCR53c7x0_address_io + (address)))
-#endif /* mc68000*/
-
-#ifdef BIG_ENDIAN
-/* If we are big-endian, then we are not Intel, so probably don't have
- * an i/o map as well as a memory map. So, let's assume memory mapped.
- * Also, I am having terrible problems trying to persuade the compiler
- * not to lay down code which does a read after write for these macros.
- * If you remove 'volatile' from writeb() and friends it is ok....
- */
-
-#define NCR53c7x0_write8(address,value) \
- *(volatile unsigned char *) \
- ((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) = (value)
-
-#define NCR53c7x0_write16(address,value) \
- *(volatile unsigned short *) \
- ((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) = (value)
-
-#define NCR53c7x0_write32(address,value) \
- *(volatile unsigned long *) \
- ((u32)NCR53c7x0_address_memory + ((u32)(address))) = (value)
-
-#else
-
-#define NCR53c7x0_write8(address,value) \
- (NCR53c7x0_memory_mapped ? \
- ({writeb((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
- outb((value), NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_write16(address,value) \
- (NCR53c7x0_memory_mapped ? \
- ({writew((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
- outw((value), NCR53c7x0_address_io + (address)))
-
-#define NCR53c7x0_write32(address,value) \
- (NCR53c7x0_memory_mapped ? \
- ({writel((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
- outl((value), NCR53c7x0_address_io + (address)))
-
-#endif
-
-/* Patch arbitrary 32 bit words in the script */
-#define patch_abs_32(script, offset, symbol, value) \
- for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
- (u32)); ++i) { \
- (script)[A_##symbol##_used[i] - (offset)] += (value); \
- if (hostdata->options & OPTION_DEBUG_FIXUP) \
- printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
- host->host_no, #symbol, i, A_##symbol##_used[i] - \
- (int)(offset), #script, (script)[A_##symbol##_used[i] - \
- (offset)]); \
- }
-
-/* Patch read/write instruction immediate field */
-#define patch_abs_rwri_data(script, offset, symbol, value) \
- for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
- (u32)); ++i) \
- (script)[A_##symbol##_used[i] - (offset)] = \
- ((script)[A_##symbol##_used[i] - (offset)] & \
- ~DBC_RWRI_IMMEDIATE_MASK) | \
- (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
- DBC_RWRI_IMMEDIATE_MASK)
-
-/* Patch transfer control instruction data field */
-#define patch_abs_tci_data(script, offset, symbol, value) \
- for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
- (u32)); ++i) \
- (script)[A_##symbol##_used[i] - (offset)] = \
- ((script)[A_##symbol##_used[i] - (offset)] & \
- ~DBC_TCI_DATA_MASK) | \
- (((value) << DBC_TCI_DATA_SHIFT) & \
- DBC_TCI_DATA_MASK)
-
-/* Patch field in dsa structure (assignment should be +=?) */
-#define patch_dsa_32(dsa, symbol, word, value) \
- { \
- (dsa)[(hostdata->##symbol - hostdata->dsa_start) / sizeof(u32) \
- + (word)] = (value); \
- if (hostdata->options & OPTION_DEBUG_DSA) \
- printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
- #dsa, #symbol, hostdata->##symbol, \
- (word), (u32) (value)); \
- }
-
-/* Paranoid people could use panic() here. */
-#define FATAL(host) shutdown((host));
-
-extern int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
- unsigned long base, int io_port, int irq, int dma,
- long long options, int clock);
-
-#endif /* NCR53c710_C */
-#endif /* NCR53c710_H */
diff --git a/drivers/scsi/53c7xx.scr b/drivers/scsi/53c7xx.scr
deleted file mode 100644
index 9c5694a2da8a..000000000000
--- a/drivers/scsi/53c7xx.scr
+++ /dev/null
@@ -1,1591 +0,0 @@
-#undef DEBUG
-#undef EVENTS
-#undef NO_SELECTION_TIMEOUT
-#define BIG_ENDIAN
-
-; 53c710 driver. Modified from Drew Eckhardts driver
-; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
-;
-; I have left the script for the 53c8xx family in here, as it is likely
-; to be useful to see what I changed when bug hunting.
-
-; NCR 53c810 driver, main script
-; Sponsored by
-; iX Multiuser Multitasking Magazine
-; hm@ix.de
-;
-; Copyright 1993, 1994, 1995 Drew Eckhardt
-; Visionary Computing
-; (Unix and Linux consulting and custom programming)
-; drew@PoohSticks.ORG
-; +1 (303) 786-7975
-;
-; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
-;
-; PRE-ALPHA
-;
-; For more information, please consult
-;
-; NCR 53C810
-; PCI-SCSI I/O Processor
-; Data Manual
-;
-; NCR 53C710
-; SCSI I/O Processor
-; Programmers Guide
-;
-; NCR Microelectronics
-; 1635 Aeroplaza Drive
-; Colorado Springs, CO 80916
-; 1+ (719) 578-3400
-;
-; Toll free literature number
-; +1 (800) 334-5454
-;
-; IMPORTANT : This code is self modifying due to the limitations of
-; the NCR53c7,8xx series chips. Persons debugging this code with
-; the remote debugger should take this into account, and NOT set
-; breakpoints in modified instructions.
-;
-; Design:
-; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
-; microcontroller using a simple instruction set.
-;
-; So, to minimize the effects of interrupt latency, and to maximize
-; throughput, this driver offloads the practical maximum amount
-; of processing to the SCSI chip while still maintaining a common
-; structure.
-;
-; Where tradeoffs were needed between efficiency on the older
-; chips and the newer NCR53c800 series, the NCR53c800 series
-; was chosen.
-;
-; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
-; automate SCSI transfers without host processor intervention, this
-; isn't the case with the NCR53c710 and newer chips which allow
-;
-; - reads and writes to the internal registers from within the SCSI
-; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
-; state so that multiple threads of execution are possible, and also
-; provide an ALU for loop control, etc.
-;
-; - table indirect addressing for some instructions. This allows
-; pointers to be located relative to the DSA ((Data Structure
-; Address) register.
-;
-; These features make it possible to implement a mailbox style interface,
-; where the same piece of code is run to handle I/O for multiple threads
-; at once minimizing our need to relocate code. Since the NCR53c700/
-; NCR53c800 series have a unique combination of features, making a
-; a standard ingoing/outgoing mailbox system, costly, I've modified it.
-;
-; - Mailboxes are a mixture of code and data. This lets us greatly
-; simplify the NCR53c810 code and do things that would otherwise
-; not be possible.
-;
-; The saved data pointer is now implemented as follows :
-;
-; Control flow has been architected such that if control reaches
-; munge_save_data_pointer, on a restore pointers message or
-; reconnection, a jump to the address formerly in the TEMP register
-; will allow the SCSI command to resume execution.
-;
-
-;
-; Note : the DSA structures must be aligned on 32 bit boundaries,
-; since the source and destination of MOVE MEMORY instructions
-; must share the same alignment and this is the alignment of the
-; NCR registers.
-;
-
-; For some systems (MVME166, for example) dmode is always the same, so don't
-; waste time writing it
-
-#if 1
-#define DMODE_MEMORY_TO_NCR
-#define DMODE_MEMORY_TO_MEMORY
-#define DMODE_NCR_TO_MEMORY
-#else
-#define DMODE_MEMORY_TO_NCR MOVE dmode_memory_to_ncr TO DMODE
-#define DMODE_MEMORY_TO_MEMORY MOVE dmode_memory_to_memory TO DMODE
-#define DMODE_NCR_TO_MEMORY MOVE dmode_ncr_to_memory TO DMODE
-#endif
-
-ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
-ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
-ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
- ; for current dsa
-ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
- ; sync routine
-ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
- ; sscf value (53c710)
-ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
-ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
- ; saved data pointer
-ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
- ; current residual code
-ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
- ; saved residual code
-ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
-ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
-ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
-
-;
-; Once a device has initiated reselection, we need to compare it
-; against the singly linked list of commands which have disconnected
-; and are pending reselection. These commands are maintained in
-; an unordered singly linked list of DSA structures, through the
-; DSA pointers at their 'centers' headed by the reconnect_dsa_head
-; pointer.
-;
-; To avoid complications in removing commands from the list,
-; I minimize the amount of expensive (at eight operations per
-; addition @ 500-600ns each) pointer operations which must
-; be done in the NCR driver by precomputing them on the
-; host processor during dsa structure generation.
-;
-; The fixed-up per DSA code knows how to recognize the nexus
-; associated with the corresponding SCSI command, and modifies
-; the source and destination pointers for the MOVE MEMORY
-; instruction which is executed when reselected_ok is called
-; to remove the command from the list. Similarly, DSA is
-; loaded with the address of the next DSA structure and
-; reselected_check_next is called if a failure occurs.
-;
-; Perhaps more concisely, the net effect of the mess is
-;
-; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
-; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
-; src = &dsa->next;
-; if (target_id == dsa->id && target_lun == dsa->lun) {
-; *dest = *src;
-; break;
-; }
-; }
-;
-; if (!dsa)
-; error (int_err_unexpected_reselect);
-; else
-; longjmp (dsa->jump_resume, 0);
-;
-;
-
-#if (CHIP != 700) && (CHIP != 70066)
-; Define DSA structure used for mailboxes
-ENTRY dsa_code_template
-dsa_code_template:
-ENTRY dsa_code_begin
-dsa_code_begin:
-; RGH: Don't care about TEMP and DSA here
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- MOVE MEMORY 4, addr_scratch, saved_dsa
- ; We are about to go and select the device, so must set SSCF bits
- MOVE MEMORY 4, dsa_sscf_710, addr_scratch
-#ifdef BIG_ENDIAN
- MOVE SCRATCH3 TO SFBR
-#else
- MOVE SCRATCH0 TO SFBR
-#endif
- MOVE SFBR TO SBCL
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#else
- CALL scratch_to_dsa
-#endif
- CALL select
-; Handle the phase mismatch which may have resulted from the
-; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
-; may or may not be necessary, and we should update script_asm.pl
-; to handle multiple pieces.
- CLEAR ATN
- CLEAR ACK
-
-; Replace second operand with address of JUMP instruction dest operand
-; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
-ENTRY dsa_code_fix_jump
-dsa_code_fix_jump:
- MOVE MEMORY 4, NOP_insn, 0
- JUMP select_done
-
-; wrong_dsa loads the DSA register with the value of the dsa_next
-; field.
-;
-wrong_dsa:
-#if (CHIP == 710)
-; NOTE DSA is corrupt when we arrive here!
-#endif
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the destination address is the address of the OLD
-; next pointer.
-;
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
- DMODE_MEMORY_TO_NCR
-;
-; Move the _contents_ of the next pointer into the DSA register as
-; the next I_T_L or I_T_L_Q tupple to check against the established
-; nexus.
-;
- MOVE MEMORY 4, dsa_temp_next, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- MOVE MEMORY 4, addr_scratch, saved_dsa
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#else
- CALL scratch_to_dsa
-#endif
- JUMP reselected_check_next
-
-ABSOLUTE dsa_save_data_pointer = 0
-ENTRY dsa_code_save_data_pointer
-dsa_code_save_data_pointer:
-#if (CHIP == 710)
- ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
- ; We MUST return with DSA correct
- MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
- CLEAR ACK
-#ifdef DEBUG
- INT int_debug_saved
-#endif
- MOVE MEMORY 4, saved_dsa, addr_dsa
- JUMP jump_temp
-#else
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
- DMODE_MEMORY_TO_MEMORY
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
- CLEAR ACK
-#ifdef DEBUG
- INT int_debug_saved
-#endif
- RETURN
-#endif
-ABSOLUTE dsa_restore_pointers = 0
-ENTRY dsa_code_restore_pointers
-dsa_code_restore_pointers:
-#if (CHIP == 710)
- ; TEMP and DSA are corrupt when we get here, but who cares!
- MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
- CLEAR ACK
- ; Restore DSA, note we don't care about TEMP
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#ifdef DEBUG
- INT int_debug_restored
-#endif
- JUMP jump_temp
-#else
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
- DMODE_MEMORY_TO_MEMORY
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
- CLEAR ACK
-#ifdef DEBUG
- INT int_debug_restored
-#endif
- RETURN
-#endif
-
-ABSOLUTE dsa_check_reselect = 0
-; dsa_check_reselect determines whether or not the current target and
-; lun match the current DSA
-ENTRY dsa_code_check_reselect
-dsa_code_check_reselect:
-#if (CHIP == 710)
- /* Arrives here with DSA correct */
- /* Assumes we are always ID 7 */
- MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
- JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
-#else
- MOVE SSID TO SFBR ; SSID contains 3 bit target ID
-; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
- JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
-#endif
-;
-; Hack - move to scratch first, since SFBR is not writeable
-; via the CPU and hence a MOVE MEMORY instruction.
-;
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 1, reselected_identify, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#ifdef BIG_ENDIAN
- ; BIG ENDIAN ON MVME16x
- MOVE SCRATCH3 TO SFBR
-#else
- MOVE SCRATCH0 TO SFBR
-#endif
-; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
-; Are you sure about that? richard@sleepie.demon.co.uk
- JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the source address is the address of this dsa's
-; next pointer.
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
- CALL reselected_ok
-#if (CHIP == 710)
-; Restore DSA following memory moves in reselected_ok
-; dsa_temp_sync doesn't really care about DSA, but it has an
-; optional debug INT so a valid DSA is a good idea.
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
- CALL dsa_temp_sync
-; Release ACK on the IDENTIFY message _after_ we've set the synchronous
-; transfer parameters!
- CLEAR ACK
-; Implicitly restore pointers on reselection, so a RETURN
-; will transfer control back to the right spot.
- CALL REL (dsa_code_restore_pointers)
- RETURN
-ENTRY dsa_zero
-dsa_zero:
-ENTRY dsa_code_template_end
-dsa_code_template_end:
-
-; Perform sanity check for dsa_fields_start == dsa_code_template_end -
-; dsa_zero, puke.
-
-ABSOLUTE dsa_fields_start = 0 ; Sanity marker
- ; pad 48 bytes (fix this RSN)
-ABSOLUTE dsa_next = 48 ; len 4 Next DSA
- ; del 4 Previous DSA address
-ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
-ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
- ; table indirect select
-ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
- ; select message
-ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
- ; command
-ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
-ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
-ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
-ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
-ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
- ; (Synchronous transfer negotiation, etc).
-ABSOLUTE dsa_end = 112
-
-ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
- ; terminated by a call to JUMP wait_reselect
-
-; Linked lists of DSA structures
-ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
-ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
- ; address of reconnect_dsa_head
-
-; These select the source and destination of a MOVE MEMORY instruction
-ABSOLUTE dmode_memory_to_memory = 0x0
-ABSOLUTE dmode_memory_to_ncr = 0x0
-ABSOLUTE dmode_ncr_to_memory = 0x0
-
-ABSOLUTE addr_scratch = 0x0
-ABSOLUTE addr_temp = 0x0
-#if (CHIP == 710)
-ABSOLUTE saved_dsa = 0x0
-ABSOLUTE emulfly = 0x0
-ABSOLUTE addr_dsa = 0x0
-#endif
-#endif /* CHIP != 700 && CHIP != 70066 */
-
-; Interrupts -
-; MSB indicates type
-; 0 handle error condition
-; 1 handle message
-; 2 handle normal condition
-; 3 debugging interrupt
-; 4 testing interrupt
-; Next byte indicates specific error
-
-; XXX not yet implemented, I'm not sure if I want to -
-; Next byte indicates the routine the error occurred in
-; The LSB indicates the specific place the error occurred
-
-ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
-ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
-ABSOLUTE int_err_unexpected_reselect = 0x00020000
-ABSOLUTE int_err_check_condition = 0x00030000
-ABSOLUTE int_err_no_phase = 0x00040000
-ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
-ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
-ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
- ; received
-
-ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
- ; registers.
-ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
-ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
-ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
-ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
-ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
-ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
-ABSOLUTE int_debug_break = 0x03000000 ; Break point
-#ifdef DEBUG
-ABSOLUTE int_debug_scheduled = 0x03010000 ; new I/O scheduled
-ABSOLUTE int_debug_idle = 0x03020000 ; scheduler is idle
-ABSOLUTE int_debug_dsa_loaded = 0x03030000 ; dsa reloaded
-ABSOLUTE int_debug_reselected = 0x03040000 ; NCR reselected
-ABSOLUTE int_debug_head = 0x03050000 ; issue head overwritten
-ABSOLUTE int_debug_disconnected = 0x03060000 ; disconnected
-ABSOLUTE int_debug_disconnect_msg = 0x03070000 ; got message to disconnect
-ABSOLUTE int_debug_dsa_schedule = 0x03080000 ; in dsa_schedule
-ABSOLUTE int_debug_reselect_check = 0x03090000 ; Check for reselection of DSA
-ABSOLUTE int_debug_reselected_ok = 0x030a0000 ; Reselection accepted
-#endif
-ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
-#ifdef DEBUG
-ABSOLUTE int_debug_saved = 0x030c0000 ; save/restore pointers
-ABSOLUTE int_debug_restored = 0x030d0000
-ABSOLUTE int_debug_sync = 0x030e0000 ; Sanity check synchronous
- ; parameters.
-ABSOLUTE int_debug_datain = 0x030f0000 ; going into data in phase
- ; now.
-ABSOLUTE int_debug_check_dsa = 0x03100000 ; Sanity check DSA against
- ; SDID.
-#endif
-
-ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
-ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
-ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
-
-
-; These should start with 0x05000000, with low bits incrementing for
-; each one.
-
-#ifdef EVENTS
-ABSOLUTE int_EVENT_SELECT = 0
-ABSOLUTE int_EVENT_DISCONNECT = 0
-ABSOLUTE int_EVENT_RESELECT = 0
-ABSOLUTE int_EVENT_COMPLETE = 0
-ABSOLUTE int_EVENT_IDLE = 0
-ABSOLUTE int_EVENT_SELECT_FAILED = 0
-ABSOLUTE int_EVENT_BEFORE_SELECT = 0
-ABSOLUTE int_EVENT_RESELECT_FAILED = 0
-#endif
-
-ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
-ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
-ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
-ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
-ABSOLUTE NOP_insn = 0 ; NOP instruction
-
-; Pointer to message, potentially multi-byte
-ABSOLUTE msg_buf = 0
-
-; Pointer to holding area for reselection information
-ABSOLUTE reselected_identify = 0
-ABSOLUTE reselected_tag = 0
-
-; Request sense command pointer, it's a 6 byte command, should
-; be constant for all commands since we always want 16 bytes of
-; sense and we don't need to change any fields as we did under
-; SCSI-I when we actually cared about the LUN field.
-;EXTERNAL NCR53c7xx_sense ; Request sense command
-
-#if (CHIP != 700) && (CHIP != 70066)
-; dsa_schedule
-; PURPOSE : after a DISCONNECT message has been received, and pointers
-; saved, insert the current DSA structure at the head of the
-; disconnected queue and fall through to the scheduler.
-;
-; CALLS : OK
-;
-; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
-; of disconnected commands
-;
-; MODIFIES : SCRATCH, reconnect_dsa_head
-;
-; EXITS : always passes control to schedule
-
-ENTRY dsa_schedule
-dsa_schedule:
-#ifdef DEBUG
- INT int_debug_dsa_schedule
-#endif
-
-;
-; Calculate the address of the next pointer within the DSA
-; structure of the command that is currently disconnecting
-;
-#if (CHIP == 710)
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- MOVE SCRATCH0 + dsa_next TO SCRATCH0
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-; Point the next field of this DSA structure at the current disconnected
-; list
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
- DMODE_MEMORY_TO_MEMORY
-dsa_schedule_insert:
- MOVE MEMORY 4, reconnect_dsa_head, 0
-
-; And update the head pointer.
-#if (CHIP == 710)
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
- DMODE_MEMORY_TO_MEMORY
-/* Temporarily, see what happens. */
-#ifndef ORIGINAL
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- CLEAR ACK
-#endif
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
- WAIT DISCONNECT
-#ifdef EVENTS
- INT int_EVENT_DISCONNECT;
-#endif
-#ifdef DEBUG
- INT int_debug_disconnected
-#endif
- JUMP schedule
-#endif
-
-;
-; select
-;
-; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
-; On success, the current DSA structure is removed from the issue
-; queue. Usually, this is entered as a fall-through from schedule,
-; although the contingent allegiance handling code will write
-; the select entry address to the DSP to restart a command as a
-; REQUEST SENSE. A message is sent (usually IDENTIFY, although
-; additional SDTR or WDTR messages may be sent). COMMAND OUT
-; is handled.
-;
-; INPUTS : DSA - SCSI command, issue_dsa_head
-;
-; CALLS : NOT OK
-;
-; MODIFIES : SCRATCH, issue_dsa_head
-;
-; EXITS : on reselection or selection, go to select_failed
-; otherwise, RETURN so control is passed back to
-; dsa_begin.
-;
-
-ENTRY select
-select:
-
-#ifdef EVENTS
- INT int_EVENT_BEFORE_SELECT
-#endif
-
-#ifdef DEBUG
- INT int_debug_scheduled
-#endif
- CLEAR TARGET
-
-; XXX
-;
-; In effect, SELECTION operations are backgrounded, with execution
-; continuing until code which waits for REQ or a fatal interrupt is
-; encountered.
-;
-; So, for more performance, we could overlap the code which removes
-; the command from the NCRs issue queue with the selection, but
-; at this point I don't want to deal with the error recovery.
-;
-
-#if (CHIP != 700) && (CHIP != 70066)
-#if (CHIP == 710)
- ; Enable selection timer
-#ifdef NO_SELECTION_TIMEOUT
- MOVE CTEST7 & 0xff TO CTEST7
-#else
- MOVE CTEST7 & 0xef TO CTEST7
-#endif
-#endif
- SELECT ATN FROM dsa_select, select_failed
- JUMP select_msgout, WHEN MSG_OUT
-ENTRY select_msgout
-select_msgout:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
- MOVE FROM dsa_msgout, WHEN MSG_OUT
-#else
-ENTRY select_msgout
- SELECT ATN 0, select_failed
-select_msgout:
- MOVE 0, 0, WHEN MSGOUT
-#endif
-
-#ifdef EVENTS
- INT int_EVENT_SELECT
-#endif
- RETURN
-
-;
-; select_done
-;
-; PURPOSE: continue on to normal data transfer; called as the exit
-; point from dsa_begin.
-;
-; INPUTS: dsa
-;
-; CALLS: OK
-;
-;
-
-select_done:
-#if (CHIP == 710)
-; NOTE DSA is corrupt when we arrive here!
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-
-#ifdef DEBUG
-ENTRY select_check_dsa
-select_check_dsa:
- INT int_debug_check_dsa
-#endif
-
-; After a successful selection, we should get either a CMD phase or
-; some transfer request negotiation message.
-
- JUMP cmdout, WHEN CMD
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
-
-select_msg_in:
- CALL msg_in, WHEN MSG_IN
- JUMP select_msg_in, WHEN MSG_IN
-
-cmdout:
- INT int_err_unexpected_phase, WHEN NOT CMD
-#if (CHIP == 700)
- INT int_norm_selected
-#endif
-ENTRY cmdout_cmdout
-cmdout_cmdout:
-#if (CHIP != 700) && (CHIP != 70066)
- MOVE FROM dsa_cmdout, WHEN CMD
-#else
- MOVE 0, 0, WHEN CMD
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-
-;
-; data_transfer
-; other_out
-; other_in
-; other_transfer
-;
-; PURPOSE : handle the main data transfer for a SCSI command in
-; several parts. In the first part, data_transfer, DATA_IN
-; and DATA_OUT phases are allowed, with the user provided
-; code (usually dynamically generated based on the scatter/gather
-; list associated with a SCSI command) called to handle these
-; phases.
-;
-; After control has passed to one of the user provided
-; DATA_IN or DATA_OUT routines, back calls are made to
-; other_transfer_in or other_transfer_out to handle non-DATA IN
-; and DATA OUT phases respectively, with the state of the active
-; data pointer being preserved in TEMP.
-;
-; On completion, the user code passes control to other_transfer
-; which causes DATA_IN and DATA_OUT to result in unexpected_phase
-; interrupts so that data overruns may be trapped.
-;
-; INPUTS : DSA - SCSI command
-;
-; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
-; other_transfer
-;
-; MODIFIES : SCRATCH
-;
-; EXITS : if STATUS IN is detected, signifying command completion,
-; the NCR jumps to command_complete. If MSG IN occurs, a
-; CALL is made to msg_in. Otherwise, other_transfer runs in
-; an infinite loop.
-;
-
-ENTRY data_transfer
-data_transfer:
- JUMP cmdout_cmdout, WHEN CMD
- CALL msg_in, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- JUMP do_dataout, WHEN DATA_OUT
- JUMP do_datain, WHEN DATA_IN
- JUMP command_complete, WHEN STATUS
- JUMP data_transfer
-ENTRY end_data_transfer
-end_data_transfer:
-
-;
-; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
-; should be fixed up whenever the nexus changes so it can point to the
-; correct routine for that command.
-;
-
-#if (CHIP != 700) && (CHIP != 70066)
-; Nasty jump to dsa->dataout
-do_dataout:
-#if (CHIP == 710)
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
- DMODE_MEMORY_TO_MEMORY
-dataout_to_jump:
- MOVE MEMORY 4, 0, dataout_jump + 4
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-dataout_jump:
- JUMP 0
-
-; Nasty jump to dsa->dsain
-do_datain:
-#if (CHIP == 710)
- MOVE MEMORY 4, saved_dsa, addr_scratch
-#else
- CALL dsa_to_scratch
-#endif
- MOVE SCRATCH0 + dsa_datain TO SCRATCH0
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
- DMODE_MEMORY_TO_MEMORY
-ENTRY datain_to_jump
-datain_to_jump:
- MOVE MEMORY 4, 0, datain_jump + 4
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-#ifdef DEBUG
- INT int_debug_datain
-#endif
-datain_jump:
- JUMP 0
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-
-
-; Note that other_out and other_in loop until a non-data phase
-; is discovered, so we only execute return statements when we
-; can go on to the next data phase block move statement.
-
-ENTRY other_out
-other_out:
-#if 0
- INT 0x03ffdead
-#endif
- INT int_err_unexpected_phase, WHEN CMD
- JUMP msg_in_restart, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- INT int_err_unexpected_phase, WHEN DATA_IN
- JUMP command_complete, WHEN STATUS
- JUMP other_out, WHEN NOT DATA_OUT
-#if (CHIP == 710)
-; TEMP should be OK, as we got here from a call in the user dataout code.
-#endif
- RETURN
-
-ENTRY other_in
-other_in:
-#if 0
- INT 0x03ffdead
-#endif
- INT int_err_unexpected_phase, WHEN CMD
- JUMP msg_in_restart, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- INT int_err_unexpected_phase, WHEN DATA_OUT
- JUMP command_complete, WHEN STATUS
- JUMP other_in, WHEN NOT DATA_IN
-#if (CHIP == 710)
-; TEMP should be OK, as we got here from a call in the user datain code.
-#endif
- RETURN
-
-
-ENTRY other_transfer
-other_transfer:
- INT int_err_unexpected_phase, WHEN CMD
- CALL msg_in, WHEN MSG_IN
- INT int_err_unexpected_phase, WHEN MSG_OUT
- INT int_err_unexpected_phase, WHEN DATA_OUT
- INT int_err_unexpected_phase, WHEN DATA_IN
- JUMP command_complete, WHEN STATUS
- JUMP other_transfer
-
-;
-; msg_in_restart
-; msg_in
-; munge_msg
-;
-; PURPOSE : process messages from a target. msg_in is called when the
-; caller hasn't read the first byte of the message. munge_message
-; is called when the caller has read the first byte of the message,
-; and left it in SFBR. msg_in_restart is called when the caller
-; hasn't read the first byte of the message, and wishes RETURN
-; to transfer control back to the address of the conditional
-; CALL instruction rather than to the instruction after it.
-;
-; Various int_* interrupts are generated when the host system
-; needs to intervene, as is the case with SDTR, WDTR, and
-; INITIATE RECOVERY messages.
-;
-; When the host system handles one of these interrupts,
-; it can respond by reentering at reject_message,
-; which rejects the message and returns control to
-; the caller of msg_in or munge_msg, accept_message
-; which clears ACK and returns control, or reply_message
-; which sends the message pointed to by the DSA
-; msgout_other table indirect field.
-;
-; DISCONNECT messages are handled by moving the command
-; to the reconnect_dsa_queue.
-#if (CHIP == 710)
-; NOTE: DSA should be valid when we get here - we cannot save both it
-; and TEMP in this routine.
-#endif
-;
-; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
-; only)
-;
-; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
-;
-; MODIFIES : SCRATCH, DSA on DISCONNECT
-;
-; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
-; and normal return from message handlers running under
-; Linux, control is returned to the caller. Receipt
-; of DISCONNECT messages pass control to dsa_schedule.
-;
-ENTRY msg_in_restart
-msg_in_restart:
-; XXX - hackish
-;
-; Since it's easier to debug changes to the statically
-; compiled code, rather than the dynamically generated
-; stuff, such as
-;
-; MOVE x, y, WHEN data_phase
-; CALL other_z, WHEN NOT data_phase
-; MOVE x, y, WHEN data_phase
-;
-; I'd like to have certain routines (notably the message handler)
-; restart on the conditional call rather than the next instruction.
-;
-; So, subtract 8 from the return address
-
- MOVE TEMP0 + 0xf8 TO TEMP0
- MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
- MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
- MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
-
-ENTRY msg_in
-msg_in:
- MOVE 1, msg_buf, WHEN MSG_IN
-
-munge_msg:
- JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
- JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
-;
-; XXX - I've seen a handful of broken SCSI devices which fail to issue
-; a SAVE POINTERS message before disconnecting in the middle of
-; a transfer, assuming that the DATA POINTER will be implicitly
-; restored.
-;
-; Historically, I've often done an implicit save when the DISCONNECT
-; message is processed. We may want to consider having the option of
-; doing that here.
-;
- JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
- JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
- JUMP munge_disconnect, IF 0x04 ; DISCONNECT
- INT int_msg_1, IF 0x07 ; MESSAGE REJECT
- INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
-#ifdef EVENTS
- INT int_EVENT_SELECT_FAILED
-#endif
- JUMP reject_message
-
-munge_2:
- JUMP reject_message
-;
-; The SCSI standard allows targets to recover from transient
-; error conditions by backing up the data pointer with a
-; RESTORE POINTERS message.
-;
-; So, we must save and restore the _residual_ code as well as
-; the current instruction pointer. Because of this messiness,
-; it is simpler to put dynamic code in the dsa for this and to
-; just do a simple jump down there.
-;
-
-munge_save_data_pointer:
-#if (CHIP == 710)
- ; We have something in TEMP here, so first we must save that
- MOVE TEMP0 TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE TEMP1 TO SFBR
- MOVE SFBR TO SCRATCH1
- MOVE TEMP2 TO SFBR
- MOVE SFBR TO SCRATCH2
- MOVE TEMP3 TO SFBR
- MOVE SFBR TO SCRATCH3
- MOVE MEMORY 4, addr_scratch, jump_temp + 4
- ; Now restore DSA
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
- MOVE DSA0 + dsa_save_data_pointer TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH1
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH2
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH3
-
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
- DMODE_MEMORY_TO_MEMORY
-jump_dsa_save:
- JUMP 0
-
-munge_restore_pointers:
-#if (CHIP == 710)
- ; The code at dsa_restore_pointers will RETURN, but we don't care
- ; about TEMP here, as it will overwrite it anyway.
-#endif
- MOVE DSA0 + dsa_restore_pointers TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH1
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH2
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
- MOVE SFBR TO SCRATCH3
-
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
- DMODE_MEMORY_TO_MEMORY
-jump_dsa_restore:
- JUMP 0
-
-
-munge_disconnect:
-#ifdef DEBUG
- INT int_debug_disconnect_msg
-#endif
-
-/*
- * Before, we overlapped processing with waiting for disconnect, but
- * debugging was beginning to appear messy. Temporarily move things
- * to just before the WAIT DISCONNECT.
- */
-
-#ifdef ORIGINAL
-#if (CHIP == 710)
-; Following clears Unexpected Disconnect bit. What do we do?
-#else
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- CLEAR ACK
-#endif
-
-#if (CHIP != 700) && (CHIP != 70066)
- JUMP dsa_schedule
-#else
- WAIT DISCONNECT
- INT int_norm_disconnected
-#endif
-
-munge_extended:
- CLEAR ACK
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
- MOVE 1, msg_buf + 1, WHEN MSG_IN
- JUMP munge_extended_2, IF 0x02
- JUMP munge_extended_3, IF 0x03
- JUMP reject_message
-
-munge_extended_2:
- CLEAR ACK
- MOVE 1, msg_buf + 2, WHEN MSG_IN
- JUMP reject_message, IF NOT 0x02 ; Must be WDTR
- CLEAR ACK
- MOVE 1, msg_buf + 3, WHEN MSG_IN
- INT int_msg_wdtr
-
-munge_extended_3:
- CLEAR ACK
- MOVE 1, msg_buf + 2, WHEN MSG_IN
- JUMP reject_message, IF NOT 0x01 ; Must be SDTR
- CLEAR ACK
- MOVE 2, msg_buf + 3, WHEN MSG_IN
- INT int_msg_sdtr
-
-ENTRY reject_message
-reject_message:
- SET ATN
- CLEAR ACK
- MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
- RETURN
-
-ENTRY accept_message
-accept_message:
- CLEAR ATN
- CLEAR ACK
- RETURN
-
-ENTRY respond_message
-respond_message:
- SET ATN
- CLEAR ACK
- MOVE FROM dsa_msgout_other, WHEN MSG_OUT
- RETURN
-
-;
-; command_complete
-;
-; PURPOSE : handle command termination when STATUS IN is detected by reading
-; a status byte followed by a command termination message.
-;
-; Normal termination results in an INTFLY instruction, and
-; the host system can pick out which command terminated by
-; examining the MESSAGE and STATUS buffers of all currently
-; executing commands;
-;
-; Abnormal (CHECK_CONDITION) termination results in an
-; int_err_check_condition interrupt so that a REQUEST SENSE
-; command can be issued out-of-order so that no other command
-; clears the contingent allegiance condition.
-;
-;
-; INPUTS : DSA - command
-;
-; CALLS : OK
-;
-; EXITS : On successful termination, control is passed to schedule.
-; On abnormal termination, the user will usually modify the
-; DSA fields and corresponding buffers and return control
-; to select.
-;
-
-ENTRY command_complete
-command_complete:
- MOVE FROM dsa_status, WHEN STATUS
-#if (CHIP != 700) && (CHIP != 70066)
- MOVE SFBR TO SCRATCH0 ; Save status
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-ENTRY command_complete_msgin
-command_complete_msgin:
- MOVE FROM dsa_msgin, WHEN MSG_IN
-; Indicate that we should be expecting a disconnect
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#else
- ; Above code cleared the Unexpected Disconnect bit, what do we do?
-#endif
- CLEAR ACK
-#if (CHIP != 700) && (CHIP != 70066)
- WAIT DISCONNECT
-
-;
-; The SCSI specification states that when a UNIT ATTENTION condition
-; is pending, as indicated by a CHECK CONDITION status message,
-; the target shall revert to asynchronous transfers. Since
-; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
-; basis, and returning control to our scheduler could work on a command
-; running on another lun on that target using the old parameters, we must
-; interrupt the host processor to get them changed, or change them ourselves.
-;
-; Once SCSI-II tagged queueing is implemented, things will be even more
-; hairy, since contingent allegiance conditions exist on a per-target/lun
-; basis, and issuing a new command with a different tag would clear it.
-; In these cases, we must interrupt the host processor to get a request
-; added to the HEAD of the queue with the request sense command, or we
-; must automatically issue the request sense command.
-
-#if 0
- MOVE SCRATCH0 TO SFBR
- JUMP command_failed, IF 0x02
-#endif
-#if (CHIP == 710)
-#if defined(MVME16x_INTFLY)
-; For MVME16x (ie CHIP=710) we will force an INTFLY by triggering a software
-; interrupt (SW7). We can use SCRATCH, as we are about to jump to
-; schedule, which corrupts it anyway. Will probably remove this later,
-; but want to check performance effects first.
-
-#define INTFLY_ADDR 0xfff40070
-
- MOVE 0 TO SCRATCH0
- MOVE 0x80 TO SCRATCH1
- MOVE 0 TO SCRATCH2
- MOVE 0 TO SCRATCH3
- MOVE MEMORY 4, addr_scratch, INTFLY_ADDR
-#else
- INT int_norm_emulateintfly
-#endif
-#else
- INTFLY
-#endif
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-#ifdef EVENTS
- INT int_EVENT_COMPLETE
-#endif
-#if (CHIP != 700) && (CHIP != 70066)
- JUMP schedule
-command_failed:
- INT int_err_check_condition
-#else
- INT int_norm_command_complete
-#endif
-
-;
-; wait_reselect
-;
-; PURPOSE : This is essentially the idle routine, where control lands
-; when there are no new processes to schedule. wait_reselect
-; waits for reselection, selection, and new commands.
-;
-; When a successful reselection occurs, with the aid
-; of fixed up code in each DSA, wait_reselect walks the
-; reconnect_dsa_queue, asking each dsa if the target ID
-; and LUN match its.
-;
-; If a match is found, a call is made back to reselected_ok,
-; which through the miracles of self modifying code, extracts
-; the found DSA from the reconnect_dsa_queue and then
-; returns control to the DSAs thread of execution.
-;
-; INPUTS : NONE
-;
-; CALLS : OK
-;
-; MODIFIES : DSA,
-;
-; EXITS : On successful reselection, control is returned to the
-; DSA which called reselected_ok. If the WAIT RESELECT
-; was interrupted by a new commands arrival signaled by
-; SIG_P, control is passed to schedule. If the NCR is
-; selected, the host system is interrupted with an
-; int_err_selected which is usually responded to by
-; setting DSP to the target_abort address.
-
-ENTRY wait_reselect
-wait_reselect:
-#ifdef EVENTS
- int int_EVENT_IDLE
-#endif
-#ifdef DEBUG
- int int_debug_idle
-#endif
- WAIT RESELECT wait_reselect_failed
-
-reselected:
-#ifdef EVENTS
- int int_EVENT_RESELECT
-#endif
- CLEAR TARGET
- DMODE_MEMORY_TO_MEMORY
- ; Read all data needed to reestablish the nexus -
- MOVE 1, reselected_identify, WHEN MSG_IN
- ; We used to CLEAR ACK here.
-#if (CHIP != 700) && (CHIP != 70066)
-#ifdef DEBUG
- int int_debug_reselected
-#endif
-
- ; Point DSA at the current head of the disconnected queue.
- DMODE_MEMORY_TO_NCR
- MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- MOVE MEMORY 4, addr_scratch, saved_dsa
-#else
- CALL scratch_to_dsa
-#endif
-
- ; Fix the update-next pointer so that the reconnect_dsa_head
- ; pointer is the one that will be updated if this DSA is a hit
- ; and we remove it from the queue.
-
- MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-
-ENTRY reselected_check_next
-reselected_check_next:
-#ifdef DEBUG
- INT int_debug_reselect_check
-#endif
- ; Check for a NULL pointer.
- MOVE DSA0 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- MOVE DSA1 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- MOVE DSA2 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- MOVE DSA3 TO SFBR
- JUMP reselected_not_end, IF NOT 0
- INT int_err_unexpected_reselect
-
-reselected_not_end:
- ;
- ; XXX the ALU is only eight bits wide, and the assembler
- ; wont do the dirt work for us. As long as dsa_check_reselect
- ; is negative, we need to sign extend with 1 bits to the full
- ; 32 bit width of the address.
- ;
- ; A potential work around would be to have a known alignment
- ; of the DSA structure such that the base address plus
- ; dsa_check_reselect doesn't require carrying from bytes
- ; higher than the LSB.
- ;
-
- MOVE DSA0 TO SFBR
- MOVE SFBR + dsa_check_reselect TO SCRATCH0
- MOVE DSA1 TO SFBR
- MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
- MOVE DSA2 TO SFBR
- MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
- MOVE DSA3 TO SFBR
- MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
-
- DMODE_NCR_TO_MEMORY
- MOVE MEMORY 4, addr_scratch, reselected_check + 4
- DMODE_MEMORY_TO_MEMORY
-#if (CHIP == 710)
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-#endif
-reselected_check:
- JUMP 0
-
-
-;
-;
-#if (CHIP == 710)
-; We have problems here - the memory move corrupts TEMP and DSA. This
-; routine is called from DSA code, and patched from many places. Scratch
-; is probably free when it is called.
-; We have to:
-; copy temp to scratch, one byte at a time
-; write scratch to patch a jump in place of the return
-; do the move memory
-; jump to the patched in return address
-; DSA is corrupt when we get here, and can be left corrupt
-
-ENTRY reselected_ok
-reselected_ok:
- MOVE TEMP0 TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE TEMP1 TO SFBR
- MOVE SFBR TO SCRATCH1
- MOVE TEMP2 TO SFBR
- MOVE SFBR TO SCRATCH2
- MOVE TEMP3 TO SFBR
- MOVE SFBR TO SCRATCH3
- MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
-reselected_ok_patch:
- MOVE MEMORY 4, 0, 0
-reselected_ok_jump:
- JUMP 0
-#else
-ENTRY reselected_ok
-reselected_ok:
-reselected_ok_patch:
- MOVE MEMORY 4, 0, 0 ; Patched : first word
- ; is address of
- ; successful dsa_next
- ; Second word is last
- ; unsuccessful dsa_next,
- ; starting with
- ; dsa_reconnect_head
- ; We used to CLEAR ACK here.
-#ifdef DEBUG
- INT int_debug_reselected_ok
-#endif
-#ifdef DEBUG
- INT int_debug_check_dsa
-#endif
- RETURN ; Return control to where
-#endif
-#else
- INT int_norm_reselected
-#endif /* (CHIP != 700) && (CHIP != 70066) */
-
-selected:
- INT int_err_selected;
-
-;
-; A select or reselect failure can be caused by one of two conditions :
-; 1. SIG_P was set. This will be the case if the user has written
-; a new value to a previously NULL head of the issue queue.
-;
-; 2. The NCR53c810 was selected or reselected by another device.
-;
-; 3. The bus was already busy since we were selected or reselected
-; before starting the command.
-
-wait_reselect_failed:
-#ifdef EVENTS
- INT int_EVENT_RESELECT_FAILED
-#endif
-; Check selected bit.
-#if (CHIP == 710)
- ; Must work out how to tell if we are selected....
-#else
- MOVE SIST0 & 0x20 TO SFBR
- JUMP selected, IF 0x20
-#endif
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
- JUMP schedule, IF 0x40
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
- MOVE ISTAT & 0x08 TO SFBR
- JUMP reselected, IF 0x08
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-#if 0
- JUMP schedule
-#else
- INT int_debug_panic
-#endif
-
-
-select_failed:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
-#ifdef EVENTS
- int int_EVENT_SELECT_FAILED
-#endif
-; Otherwise, mask the selected and reselected bits off SIST0
-#if (CHIP ==710)
- ; Let's assume we don't get selected for now
- MOVE SSTAT0 & 0x10 TO SFBR
-#else
- MOVE SIST0 & 0x30 TO SFBR
- JUMP selected, IF 0x20
-#endif
- JUMP reselected, IF 0x10
-; If SIGP is set, the user just gave us another command, and
-; we should restart or return to the scheduler.
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
- JUMP select, IF 0x40
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
-; FIXME: is this really necessary?
- MOVE ISTAT & 0x08 TO SFBR
- JUMP reselected, IF 0x08
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-#if 0
- JUMP schedule
-#else
- INT int_debug_panic
-#endif
-
-;
-; test_1
-; test_2
-;
-; PURPOSE : run some verification tests on the NCR. test_1
-; copies test_src to test_dest and interrupts the host
-; processor, testing for cache coherency and interrupt
-; problems in the processes.
-;
-; test_2 runs a command with offsets relative to the
-; DSA on entry, and is useful for miscellaneous experimentation.
-;
-
-; Verify that interrupts are working correctly and that we don't
-; have a cache invalidation problem.
-
-ABSOLUTE test_src = 0, test_dest = 0
-ENTRY test_1
-test_1:
- MOVE MEMORY 4, test_src, test_dest
- INT int_test_1
-
-;
-; Run arbitrary commands, with test code establishing a DSA
-;
-
-ENTRY test_2
-test_2:
- CLEAR TARGET
-#if (CHIP == 710)
- ; Enable selection timer
-#ifdef NO_SELECTION_TIMEOUT
- MOVE CTEST7 & 0xff TO CTEST7
-#else
- MOVE CTEST7 & 0xef TO CTEST7
-#endif
-#endif
- SELECT ATN FROM 0, test_2_fail
- JUMP test_2_msgout, WHEN MSG_OUT
-ENTRY test_2_msgout
-test_2_msgout:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
- MOVE FROM 8, WHEN MSG_OUT
- MOVE FROM 16, WHEN CMD
- MOVE FROM 24, WHEN DATA_IN
- MOVE FROM 32, WHEN STATUS
- MOVE FROM 40, WHEN MSG_IN
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- CLEAR ACK
- WAIT DISCONNECT
-test_2_fail:
-#if (CHIP == 710)
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-#endif
- INT int_test_2
-
-ENTRY debug_break
-debug_break:
- INT int_debug_break
-
-;
-; initiator_abort
-; target_abort
-;
-; PURPOSE : Abort the currently established nexus from with initiator
-; or target mode.
-;
-;
-
-ENTRY target_abort
-target_abort:
- SET TARGET
- DISCONNECT
- CLEAR TARGET
- JUMP schedule
-
-ENTRY initiator_abort
-initiator_abort:
- SET ATN
-;
-; The SCSI-I specification says that targets may go into MSG out at
-; their leisure upon receipt of the ATN single. On all versions of the
-; specification, we can't change phases until REQ transitions true->false,
-; so we need to sink/source one byte of data to allow the transition.
-;
-; For the sake of safety, we'll only source one byte of data in all
-; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
-; arbitrary number of bytes.
- JUMP spew_cmd, WHEN CMD
- JUMP eat_msgin, WHEN MSG_IN
- JUMP eat_datain, WHEN DATA_IN
- JUMP eat_status, WHEN STATUS
- JUMP spew_dataout, WHEN DATA_OUT
- JUMP sated
-spew_cmd:
- MOVE 1, NCR53c7xx_zero, WHEN CMD
- JUMP sated
-eat_msgin:
- MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
- JUMP eat_msgin, WHEN MSG_IN
- JUMP sated
-eat_status:
- MOVE 1, NCR53c7xx_sink, WHEN STATUS
- JUMP eat_status, WHEN STATUS
- JUMP sated
-eat_datain:
- MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
- JUMP eat_datain, WHEN DATA_IN
- JUMP sated
-spew_dataout:
- MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
-sated:
-#if (CHIP != 710)
- MOVE SCNTL2 & 0x7f TO SCNTL2
-#endif
- MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
- WAIT DISCONNECT
- INT int_norm_aborted
-
-#if (CHIP != 710)
-;
-; dsa_to_scratch
-; scratch_to_dsa
-;
-; PURPOSE :
-; The NCR chips cannot do a move memory instruction with the DSA register
-; as the source or destination. So, we provide a couple of subroutines
-; that let us switch between the DSA register and scratch register.
-;
-; Memory moves to/from the DSPS register also don't work, but we
-; don't use them.
-;
-;
-
-
-dsa_to_scratch:
- MOVE DSA0 TO SFBR
- MOVE SFBR TO SCRATCH0
- MOVE DSA1 TO SFBR
- MOVE SFBR TO SCRATCH1
- MOVE DSA2 TO SFBR
- MOVE SFBR TO SCRATCH2
- MOVE DSA3 TO SFBR
- MOVE SFBR TO SCRATCH3
- RETURN
-
-scratch_to_dsa:
- MOVE SCRATCH0 TO SFBR
- MOVE SFBR TO DSA0
- MOVE SCRATCH1 TO SFBR
- MOVE SFBR TO DSA1
- MOVE SCRATCH2 TO SFBR
- MOVE SFBR TO DSA2
- MOVE SCRATCH3 TO SFBR
- MOVE SFBR TO DSA3
- RETURN
-#endif
-
-#if (CHIP == 710)
-; Little patched jump, used to overcome problems with TEMP getting
-; corrupted on memory moves.
-
-jump_temp:
- JUMP 0
-#endif
diff --git a/drivers/scsi/53c7xx_d.h_shipped b/drivers/scsi/53c7xx_d.h_shipped
deleted file mode 100644
index 21d31b08ec31..000000000000
--- a/drivers/scsi/53c7xx_d.h_shipped
+++ /dev/null
@@ -1,2874 +0,0 @@
-/* DO NOT EDIT - Generated automatically by script_asm.pl */
-static u32 SCRIPT[] = {
-/*
-
-
-
-
-
-; 53c710 driver. Modified from Drew Eckhardts driver
-; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
-;
-; I have left the script for the 53c8xx family in here, as it is likely
-; to be useful to see what I changed when bug hunting.
-
-; NCR 53c810 driver, main script
-; Sponsored by
-; iX Multiuser Multitasking Magazine
-; hm@ix.de
-;
-; Copyright 1993, 1994, 1995 Drew Eckhardt
-; Visionary Computing
-; (Unix and Linux consulting and custom programming)
-; drew@PoohSticks.ORG
-; +1 (303) 786-7975
-;
-; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
-;
-; PRE-ALPHA
-;
-; For more information, please consult
-;
-; NCR 53C810
-; PCI-SCSI I/O Processor
-; Data Manual
-;
-; NCR 53C710
-; SCSI I/O Processor
-; Programmers Guide
-;
-; NCR Microelectronics
-; 1635 Aeroplaza Drive
-; Colorado Springs, CO 80916
-; 1+ (719) 578-3400
-;
-; Toll free literature number
-; +1 (800) 334-5454
-;
-; IMPORTANT : This code is self modifying due to the limitations of
-; the NCR53c7,8xx series chips. Persons debugging this code with
-; the remote debugger should take this into account, and NOT set
-; breakpoints in modified instructions.
-;
-; Design:
-; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
-; microcontroller using a simple instruction set.
-;
-; So, to minimize the effects of interrupt latency, and to maximize
-; throughput, this driver offloads the practical maximum amount
-; of processing to the SCSI chip while still maintaining a common
-; structure.
-;
-; Where tradeoffs were needed between efficiency on the older
-; chips and the newer NCR53c800 series, the NCR53c800 series
-; was chosen.
-;
-; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
-; automate SCSI transfers without host processor intervention, this
-; isn't the case with the NCR53c710 and newer chips which allow
-;
-; - reads and writes to the internal registers from within the SCSI
-; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
-; state so that multiple threads of execution are possible, and also
-; provide an ALU for loop control, etc.
-;
-; - table indirect addressing for some instructions. This allows
-; pointers to be located relative to the DSA ((Data Structure
-; Address) register.
-;
-; These features make it possible to implement a mailbox style interface,
-; where the same piece of code is run to handle I/O for multiple threads
-; at once minimizing our need to relocate code. Since the NCR53c700/
-; NCR53c800 series have a unique combination of features, making a
-; a standard ingoing/outgoing mailbox system, costly, I've modified it.
-;
-; - Mailboxes are a mixture of code and data. This lets us greatly
-; simplify the NCR53c810 code and do things that would otherwise
-; not be possible.
-;
-; The saved data pointer is now implemented as follows :
-;
-; Control flow has been architected such that if control reaches
-; munge_save_data_pointer, on a restore pointers message or
-; reconnection, a jump to the address formerly in the TEMP register
-; will allow the SCSI command to resume execution.
-;
-
-;
-; Note : the DSA structures must be aligned on 32 bit boundaries,
-; since the source and destination of MOVE MEMORY instructions
-; must share the same alignment and this is the alignment of the
-; NCR registers.
-;
-
-; For some systems (MVME166, for example) dmode is always the same, so don't
-; waste time writing it
-
-
-
-
-
-
-
-
-
-
-
-ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
-ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
-ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
- ; for current dsa
-ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
- ; sync routine
-ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
- ; sscf value (53c710)
-ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
-ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
- ; saved data pointer
-ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
- ; current residual code
-ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
- ; saved residual code
-ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
-ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
-ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
-
-;
-; Once a device has initiated reselection, we need to compare it
-; against the singly linked list of commands which have disconnected
-; and are pending reselection. These commands are maintained in
-; an unordered singly linked list of DSA structures, through the
-; DSA pointers at their 'centers' headed by the reconnect_dsa_head
-; pointer.
-;
-; To avoid complications in removing commands from the list,
-; I minimize the amount of expensive (at eight operations per
-; addition @ 500-600ns each) pointer operations which must
-; be done in the NCR driver by precomputing them on the
-; host processor during dsa structure generation.
-;
-; The fixed-up per DSA code knows how to recognize the nexus
-; associated with the corresponding SCSI command, and modifies
-; the source and destination pointers for the MOVE MEMORY
-; instruction which is executed when reselected_ok is called
-; to remove the command from the list. Similarly, DSA is
-; loaded with the address of the next DSA structure and
-; reselected_check_next is called if a failure occurs.
-;
-; Perhaps more concisely, the net effect of the mess is
-;
-; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
-; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
-; src = &dsa->next;
-; if (target_id == dsa->id && target_lun == dsa->lun) {
-; *dest = *src;
-; break;
-; }
-; }
-;
-; if (!dsa)
-; error (int_err_unexpected_reselect);
-; else
-; longjmp (dsa->jump_resume, 0);
-;
-;
-
-
-; Define DSA structure used for mailboxes
-ENTRY dsa_code_template
-dsa_code_template:
-ENTRY dsa_code_begin
-dsa_code_begin:
-; RGH: Don't care about TEMP and DSA here
-
- MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
-
-at 0x00000000 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, saved_dsa
-
-at 0x00000003 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- ; We are about to go and select the device, so must set SSCF bits
- MOVE MEMORY 4, dsa_sscf_710, addr_scratch
-
-at 0x00000006 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- MOVE SCRATCH3 TO SFBR
-
-at 0x00000009 : */ 0x72370000,0x00000000,
-/*
-
-
-
- MOVE SFBR TO SBCL
-
-at 0x0000000b : */ 0x6a0b0000,0x00000000,
-/*
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000000d : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- CALL select
-
-at 0x00000010 : */ 0x88080000,0x000001f8,
-/*
-; Handle the phase mismatch which may have resulted from the
-; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
-; may or may not be necessary, and we should update script_asm.pl
-; to handle multiple pieces.
- CLEAR ATN
-
-at 0x00000012 : */ 0x60000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x00000014 : */ 0x60000040,0x00000000,
-/*
-
-; Replace second operand with address of JUMP instruction dest operand
-; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
-ENTRY dsa_code_fix_jump
-dsa_code_fix_jump:
- MOVE MEMORY 4, NOP_insn, 0
-
-at 0x00000016 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- JUMP select_done
-
-at 0x00000019 : */ 0x80080000,0x00000230,
-/*
-
-; wrong_dsa loads the DSA register with the value of the dsa_next
-; field.
-;
-wrong_dsa:
-
-; NOTE DSA is corrupt when we arrive here!
-
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the destination address is the address of the OLD
-; next pointer.
-;
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
-
-at 0x0000001b : */ 0xc0000004,0x00000000,0x000007ec,
-/*
-
-;
-; Move the _contents_ of the next pointer into the DSA register as
-; the next I_T_L or I_T_L_Q tupple to check against the established
-; nexus.
-;
- MOVE MEMORY 4, dsa_temp_next, addr_scratch
-
-at 0x0000001e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, saved_dsa
-
-at 0x00000021 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- JUMP reselected_check_next
-
-at 0x00000027 : */ 0x80080000,0x000006f0,
-/*
-
-ABSOLUTE dsa_save_data_pointer = 0
-ENTRY dsa_code_save_data_pointer
-dsa_code_save_data_pointer:
-
- ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
- ; We MUST return with DSA correct
- MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
-
-at 0x00000029 : */ 0xc0000004,0x000009c8,0x00000000,
-/*
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
-
-at 0x0000002c : */ 0xc0000018,0x00000000,0x00000000,
-/*
- CLEAR ACK
-
-at 0x0000002f : */ 0x60000040,0x00000000,
-/*
-
-
-
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000031 : */ 0xc0000004,0x00000000,0x00000000,
-/*
- JUMP jump_temp
-
-at 0x00000034 : */ 0x80080000,0x000009c4,
-/*
-
-ABSOLUTE dsa_restore_pointers = 0
-ENTRY dsa_code_restore_pointers
-dsa_code_restore_pointers:
-
- ; TEMP and DSA are corrupt when we get here, but who cares!
- MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
-
-at 0x00000036 : */ 0xc0000004,0x00000000,0x000009c8,
-/*
-; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
- MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
-
-at 0x00000039 : */ 0xc0000018,0x00000000,0x00000000,
-/*
- CLEAR ACK
-
-at 0x0000003c : */ 0x60000040,0x00000000,
-/*
- ; Restore DSA, note we don't care about TEMP
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000003e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- JUMP jump_temp
-
-at 0x00000041 : */ 0x80080000,0x000009c4,
-/*
-
-
-ABSOLUTE dsa_check_reselect = 0
-; dsa_check_reselect determines whether or not the current target and
-; lun match the current DSA
-ENTRY dsa_code_check_reselect
-dsa_code_check_reselect:
-
-
-
- MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
-
-at 0x00000043 : */ 0x72230000,0x00000000,
-/*
- JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
-
-at 0x00000045 : */ 0x80848000,0x00ffff50,
-/*
-
-
-
-
-
-;
-; Hack - move to scratch first, since SFBR is not writeable
-; via the CPU and hence a MOVE MEMORY instruction.
-;
-
- MOVE MEMORY 1, reselected_identify, addr_scratch
-
-at 0x00000047 : */ 0xc0000001,0x00000000,0x00000000,
-/*
-
-
- ; BIG ENDIAN ON MVME16x
- MOVE SCRATCH3 TO SFBR
-
-at 0x0000004a : */ 0x72370000,0x00000000,
-/*
-
-
-
-; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
-; Are you sure about that? richard@sleepie.demon.co.uk
- JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
-
-at 0x0000004c : */ 0x8084f800,0x00ffff34,
-/*
-; Patch the MOVE MEMORY INSTRUCTION such that
-; the source address is the address of this dsa's
-; next pointer.
- MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
-
-at 0x0000004e : */ 0xc0000004,0x00000000,0x000007e8,
-/*
- CALL reselected_ok
-
-at 0x00000051 : */ 0x88080000,0x00000798,
-/*
-
-; Restore DSA following memory moves in reselected_ok
-; dsa_temp_sync doesn't really care about DSA, but it has an
-; optional debug INT so a valid DSA is a good idea.
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000053 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- CALL dsa_temp_sync
-
-at 0x00000056 : */ 0x88080000,0x00000000,
-/*
-; Release ACK on the IDENTIFY message _after_ we've set the synchronous
-; transfer parameters!
- CLEAR ACK
-
-at 0x00000058 : */ 0x60000040,0x00000000,
-/*
-; Implicitly restore pointers on reselection, so a RETURN
-; will transfer control back to the right spot.
- CALL REL (dsa_code_restore_pointers)
-
-at 0x0000005a : */ 0x88880000,0x00ffff68,
-/*
- RETURN
-
-at 0x0000005c : */ 0x90080000,0x00000000,
-/*
-ENTRY dsa_zero
-dsa_zero:
-ENTRY dsa_code_template_end
-dsa_code_template_end:
-
-; Perform sanity check for dsa_fields_start == dsa_code_template_end -
-; dsa_zero, puke.
-
-ABSOLUTE dsa_fields_start = 0 ; Sanity marker
- ; pad 48 bytes (fix this RSN)
-ABSOLUTE dsa_next = 48 ; len 4 Next DSA
- ; del 4 Previous DSA address
-ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
-ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
- ; table indirect select
-ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
- ; select message
-ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
- ; command
-ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
-ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
-ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
-ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
-ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
- ; (Synchronous transfer negotiation, etc).
-ABSOLUTE dsa_end = 112
-
-ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
- ; terminated by a call to JUMP wait_reselect
-
-; Linked lists of DSA structures
-ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
-ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
- ; address of reconnect_dsa_head
-
-; These select the source and destination of a MOVE MEMORY instruction
-ABSOLUTE dmode_memory_to_memory = 0x0
-ABSOLUTE dmode_memory_to_ncr = 0x0
-ABSOLUTE dmode_ncr_to_memory = 0x0
-
-ABSOLUTE addr_scratch = 0x0
-ABSOLUTE addr_temp = 0x0
-
-ABSOLUTE saved_dsa = 0x0
-ABSOLUTE emulfly = 0x0
-ABSOLUTE addr_dsa = 0x0
-
-
-
-; Interrupts -
-; MSB indicates type
-; 0 handle error condition
-; 1 handle message
-; 2 handle normal condition
-; 3 debugging interrupt
-; 4 testing interrupt
-; Next byte indicates specific error
-
-; XXX not yet implemented, I'm not sure if I want to -
-; Next byte indicates the routine the error occurred in
-; The LSB indicates the specific place the error occurred
-
-ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
-ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
-ABSOLUTE int_err_unexpected_reselect = 0x00020000
-ABSOLUTE int_err_check_condition = 0x00030000
-ABSOLUTE int_err_no_phase = 0x00040000
-ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
-ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
-ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
- ; received
-
-ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
- ; registers.
-ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
-ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
-ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
-ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
-ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
-ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
-ABSOLUTE int_debug_break = 0x03000000 ; Break point
-
-ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
-
-
-ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
-ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
-ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
-
-
-; These should start with 0x05000000, with low bits incrementing for
-; each one.
-
-
-
-ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
-ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
-ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
-ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
-ABSOLUTE NOP_insn = 0 ; NOP instruction
-
-; Pointer to message, potentially multi-byte
-ABSOLUTE msg_buf = 0
-
-; Pointer to holding area for reselection information
-ABSOLUTE reselected_identify = 0
-ABSOLUTE reselected_tag = 0
-
-; Request sense command pointer, it's a 6 byte command, should
-; be constant for all commands since we always want 16 bytes of
-; sense and we don't need to change any fields as we did under
-; SCSI-I when we actually cared about the LUN field.
-;EXTERNAL NCR53c7xx_sense ; Request sense command
-
-
-; dsa_schedule
-; PURPOSE : after a DISCONNECT message has been received, and pointers
-; saved, insert the current DSA structure at the head of the
-; disconnected queue and fall through to the scheduler.
-;
-; CALLS : OK
-;
-; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
-; of disconnected commands
-;
-; MODIFIES : SCRATCH, reconnect_dsa_head
-;
-; EXITS : always passes control to schedule
-
-ENTRY dsa_schedule
-dsa_schedule:
-
-
-
-
-;
-; Calculate the address of the next pointer within the DSA
-; structure of the command that is currently disconnecting
-;
-
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x0000005e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- MOVE SCRATCH0 + dsa_next TO SCRATCH0
-
-at 0x00000061 : */ 0x7e343000,0x00000000,
-/*
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
-
-at 0x00000063 : */ 0x7f350000,0x00000000,
-/*
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
-
-at 0x00000065 : */ 0x7f360000,0x00000000,
-/*
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-at 0x00000067 : */ 0x7f370000,0x00000000,
-/*
-
-; Point the next field of this DSA structure at the current disconnected
-; list
-
- MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
-
-at 0x00000069 : */ 0xc0000004,0x00000000,0x000001b8,
-/*
-
-dsa_schedule_insert:
- MOVE MEMORY 4, reconnect_dsa_head, 0
-
-at 0x0000006c : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-; And update the head pointer.
-
- ; Read what should be the current DSA from memory - actual DSA
- ; register is probably corrupt
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x0000006f : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
- MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
-
-at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-
-
- CLEAR ACK
-
-at 0x00000075 : */ 0x60000040,0x00000000,
-/*
-
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x00000077 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- WAIT DISCONNECT
-
-at 0x0000007a : */ 0x48000000,0x00000000,
-/*
-
-
-
-
-
-
- JUMP schedule
-
-at 0x0000007c : */ 0x80080000,0x00000000,
-/*
-
-
-;
-; select
-;
-; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
-; On success, the current DSA structure is removed from the issue
-; queue. Usually, this is entered as a fall-through from schedule,
-; although the contingent allegiance handling code will write
-; the select entry address to the DSP to restart a command as a
-; REQUEST SENSE. A message is sent (usually IDENTIFY, although
-; additional SDTR or WDTR messages may be sent). COMMAND OUT
-; is handled.
-;
-; INPUTS : DSA - SCSI command, issue_dsa_head
-;
-; CALLS : NOT OK
-;
-; MODIFIES : SCRATCH, issue_dsa_head
-;
-; EXITS : on reselection or selection, go to select_failed
-; otherwise, RETURN so control is passed back to
-; dsa_begin.
-;
-
-ENTRY select
-select:
-
-
-
-
-
-
-
-
- CLEAR TARGET
-
-at 0x0000007e : */ 0x60000200,0x00000000,
-/*
-
-; XXX
-;
-; In effect, SELECTION operations are backgrounded, with execution
-; continuing until code which waits for REQ or a fatal interrupt is
-; encountered.
-;
-; So, for more performance, we could overlap the code which removes
-; the command from the NCRs issue queue with the selection, but
-; at this point I don't want to deal with the error recovery.
-;
-
-
-
- ; Enable selection timer
-
-
-
- MOVE CTEST7 & 0xef TO CTEST7
-
-at 0x00000080 : */ 0x7c1bef00,0x00000000,
-/*
-
-
- SELECT ATN FROM dsa_select, select_failed
-
-at 0x00000082 : */ 0x4300003c,0x00000828,
-/*
- JUMP select_msgout, WHEN MSG_OUT
-
-at 0x00000084 : */ 0x860b0000,0x00000218,
-/*
-ENTRY select_msgout
-select_msgout:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x00000086 : */ 0x7a1b1000,0x00000000,
-/*
-
- MOVE FROM dsa_msgout, WHEN MSG_OUT
-
-at 0x00000088 : */ 0x1e000000,0x00000040,
-/*
-
-
-
-
-
-
-
-
-
-
- RETURN
-
-at 0x0000008a : */ 0x90080000,0x00000000,
-/*
-
-;
-; select_done
-;
-; PURPOSE: continue on to normal data transfer; called as the exit
-; point from dsa_begin.
-;
-; INPUTS: dsa
-;
-; CALLS: OK
-;
-;
-
-select_done:
-
-; NOTE DSA is corrupt when we arrive here!
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000008c : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-
-
-
-
-; After a successful selection, we should get either a CMD phase or
-; some transfer request negotiation message.
-
- JUMP cmdout, WHEN CMD
-
-at 0x0000008f : */ 0x820b0000,0x0000025c,
-/*
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
-
-at 0x00000091 : */ 0x9f030000,0x00000000,
-/*
-
-select_msg_in:
- CALL msg_in, WHEN MSG_IN
-
-at 0x00000093 : */ 0x8f0b0000,0x0000041c,
-/*
- JUMP select_msg_in, WHEN MSG_IN
-
-at 0x00000095 : */ 0x870b0000,0x0000024c,
-/*
-
-cmdout:
- INT int_err_unexpected_phase, WHEN NOT CMD
-
-at 0x00000097 : */ 0x9a030000,0x00000000,
-/*
-
-
-
-ENTRY cmdout_cmdout
-cmdout_cmdout:
-
- MOVE FROM dsa_cmdout, WHEN CMD
-
-at 0x00000099 : */ 0x1a000000,0x00000048,
-/*
-
-
-
-
-;
-; data_transfer
-; other_out
-; other_in
-; other_transfer
-;
-; PURPOSE : handle the main data transfer for a SCSI command in
-; several parts. In the first part, data_transfer, DATA_IN
-; and DATA_OUT phases are allowed, with the user provided
-; code (usually dynamically generated based on the scatter/gather
-; list associated with a SCSI command) called to handle these
-; phases.
-;
-; After control has passed to one of the user provided
-; DATA_IN or DATA_OUT routines, back calls are made to
-; other_transfer_in or other_transfer_out to handle non-DATA IN
-; and DATA OUT phases respectively, with the state of the active
-; data pointer being preserved in TEMP.
-;
-; On completion, the user code passes control to other_transfer
-; which causes DATA_IN and DATA_OUT to result in unexpected_phase
-; interrupts so that data overruns may be trapped.
-;
-; INPUTS : DSA - SCSI command
-;
-; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
-; other_transfer
-;
-; MODIFIES : SCRATCH
-;
-; EXITS : if STATUS IN is detected, signifying command completion,
-; the NCR jumps to command_complete. If MSG IN occurs, a
-; CALL is made to msg_in. Otherwise, other_transfer runs in
-; an infinite loop.
-;
-
-ENTRY data_transfer
-data_transfer:
- JUMP cmdout_cmdout, WHEN CMD
-
-at 0x0000009b : */ 0x820b0000,0x00000264,
-/*
- CALL msg_in, WHEN MSG_IN
-
-at 0x0000009d : */ 0x8f0b0000,0x0000041c,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x0000009f : */ 0x9e0b0000,0x00000000,
-/*
- JUMP do_dataout, WHEN DATA_OUT
-
-at 0x000000a1 : */ 0x800b0000,0x000002a4,
-/*
- JUMP do_datain, WHEN DATA_IN
-
-at 0x000000a3 : */ 0x810b0000,0x000002fc,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000a5 : */ 0x830b0000,0x0000065c,
-/*
- JUMP data_transfer
-
-at 0x000000a7 : */ 0x80080000,0x0000026c,
-/*
-ENTRY end_data_transfer
-end_data_transfer:
-
-;
-; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
-; should be fixed up whenever the nexus changes so it can point to the
-; correct routine for that command.
-;
-
-
-; Nasty jump to dsa->dataout
-do_dataout:
-
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x000000a9 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
-
-at 0x000000ac : */ 0x7e345000,0x00000000,
-/*
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
-
-at 0x000000ae : */ 0x7f350000,0x00000000,
-/*
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
-
-at 0x000000b0 : */ 0x7f360000,0x00000000,
-/*
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-at 0x000000b2 : */ 0x7f370000,0x00000000,
-/*
-
- MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
-
-at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
-/*
-
-dataout_to_jump:
- MOVE MEMORY 4, 0, dataout_jump + 4
-
-at 0x000000b7 : */ 0xc0000004,0x00000000,0x000002f8,
-/*
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000000ba : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-dataout_jump:
- JUMP 0
-
-at 0x000000bd : */ 0x80080000,0x00000000,
-/*
-
-; Nasty jump to dsa->dsain
-do_datain:
-
- MOVE MEMORY 4, saved_dsa, addr_scratch
-
-at 0x000000bf : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
- MOVE SCRATCH0 + dsa_datain TO SCRATCH0
-
-at 0x000000c2 : */ 0x7e345400,0x00000000,
-/*
- MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
-
-at 0x000000c4 : */ 0x7f350000,0x00000000,
-/*
- MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
-
-at 0x000000c6 : */ 0x7f360000,0x00000000,
-/*
- MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
-
-at 0x000000c8 : */ 0x7f370000,0x00000000,
-/*
-
- MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
-
-at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
-/*
-
-ENTRY datain_to_jump
-datain_to_jump:
- MOVE MEMORY 4, 0, datain_jump + 4
-
-at 0x000000cd : */ 0xc0000004,0x00000000,0x00000350,
-/*
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000000d0 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-datain_jump:
- JUMP 0
-
-at 0x000000d3 : */ 0x80080000,0x00000000,
-/*
-
-
-
-; Note that other_out and other_in loop until a non-data phase
-; is discovered, so we only execute return statements when we
-; can go on to the next data phase block move statement.
-
-ENTRY other_out
-other_out:
-
-
-
- INT int_err_unexpected_phase, WHEN CMD
-
-at 0x000000d5 : */ 0x9a0b0000,0x00000000,
-/*
- JUMP msg_in_restart, WHEN MSG_IN
-
-at 0x000000d7 : */ 0x870b0000,0x000003fc,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x000000d9 : */ 0x9e0b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_IN
-
-at 0x000000db : */ 0x990b0000,0x00000000,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000dd : */ 0x830b0000,0x0000065c,
-/*
- JUMP other_out, WHEN NOT DATA_OUT
-
-at 0x000000df : */ 0x80030000,0x00000354,
-/*
-
-; TEMP should be OK, as we got here from a call in the user dataout code.
-
- RETURN
-
-at 0x000000e1 : */ 0x90080000,0x00000000,
-/*
-
-ENTRY other_in
-other_in:
-
-
-
- INT int_err_unexpected_phase, WHEN CMD
-
-at 0x000000e3 : */ 0x9a0b0000,0x00000000,
-/*
- JUMP msg_in_restart, WHEN MSG_IN
-
-at 0x000000e5 : */ 0x870b0000,0x000003fc,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x000000e7 : */ 0x9e0b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_OUT
-
-at 0x000000e9 : */ 0x980b0000,0x00000000,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000eb : */ 0x830b0000,0x0000065c,
-/*
- JUMP other_in, WHEN NOT DATA_IN
-
-at 0x000000ed : */ 0x81030000,0x0000038c,
-/*
-
-; TEMP should be OK, as we got here from a call in the user datain code.
-
- RETURN
-
-at 0x000000ef : */ 0x90080000,0x00000000,
-/*
-
-
-ENTRY other_transfer
-other_transfer:
- INT int_err_unexpected_phase, WHEN CMD
-
-at 0x000000f1 : */ 0x9a0b0000,0x00000000,
-/*
- CALL msg_in, WHEN MSG_IN
-
-at 0x000000f3 : */ 0x8f0b0000,0x0000041c,
-/*
- INT int_err_unexpected_phase, WHEN MSG_OUT
-
-at 0x000000f5 : */ 0x9e0b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_OUT
-
-at 0x000000f7 : */ 0x980b0000,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN DATA_IN
-
-at 0x000000f9 : */ 0x990b0000,0x00000000,
-/*
- JUMP command_complete, WHEN STATUS
-
-at 0x000000fb : */ 0x830b0000,0x0000065c,
-/*
- JUMP other_transfer
-
-at 0x000000fd : */ 0x80080000,0x000003c4,
-/*
-
-;
-; msg_in_restart
-; msg_in
-; munge_msg
-;
-; PURPOSE : process messages from a target. msg_in is called when the
-; caller hasn't read the first byte of the message. munge_message
-; is called when the caller has read the first byte of the message,
-; and left it in SFBR. msg_in_restart is called when the caller
-; hasn't read the first byte of the message, and wishes RETURN
-; to transfer control back to the address of the conditional
-; CALL instruction rather than to the instruction after it.
-;
-; Various int_* interrupts are generated when the host system
-; needs to intervene, as is the case with SDTR, WDTR, and
-; INITIATE RECOVERY messages.
-;
-; When the host system handles one of these interrupts,
-; it can respond by reentering at reject_message,
-; which rejects the message and returns control to
-; the caller of msg_in or munge_msg, accept_message
-; which clears ACK and returns control, or reply_message
-; which sends the message pointed to by the DSA
-; msgout_other table indirect field.
-;
-; DISCONNECT messages are handled by moving the command
-; to the reconnect_dsa_queue.
-
-; NOTE: DSA should be valid when we get here - we cannot save both it
-; and TEMP in this routine.
-
-;
-; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
-; only)
-;
-; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
-;
-; MODIFIES : SCRATCH, DSA on DISCONNECT
-;
-; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
-; and normal return from message handlers running under
-; Linux, control is returned to the caller. Receipt
-; of DISCONNECT messages pass control to dsa_schedule.
-;
-ENTRY msg_in_restart
-msg_in_restart:
-; XXX - hackish
-;
-; Since it's easier to debug changes to the statically
-; compiled code, rather than the dynamically generated
-; stuff, such as
-;
-; MOVE x, y, WHEN data_phase
-; CALL other_z, WHEN NOT data_phase
-; MOVE x, y, WHEN data_phase
-;
-; I'd like to have certain routines (notably the message handler)
-; restart on the conditional call rather than the next instruction.
-;
-; So, subtract 8 from the return address
-
- MOVE TEMP0 + 0xf8 TO TEMP0
-
-at 0x000000ff : */ 0x7e1cf800,0x00000000,
-/*
- MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
-
-at 0x00000101 : */ 0x7f1dff00,0x00000000,
-/*
- MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
-
-at 0x00000103 : */ 0x7f1eff00,0x00000000,
-/*
- MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
-
-at 0x00000105 : */ 0x7f1fff00,0x00000000,
-/*
-
-ENTRY msg_in
-msg_in:
- MOVE 1, msg_buf, WHEN MSG_IN
-
-at 0x00000107 : */ 0x0f000001,0x00000000,
-/*
-
-munge_msg:
- JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
-
-at 0x00000109 : */ 0x800c0001,0x00000574,
-/*
- JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
-
-at 0x0000010b : */ 0x800cdf20,0x00000464,
-/*
-;
-; XXX - I've seen a handful of broken SCSI devices which fail to issue
-; a SAVE POINTERS message before disconnecting in the middle of
-; a transfer, assuming that the DATA POINTER will be implicitly
-; restored.
-;
-; Historically, I've often done an implicit save when the DISCONNECT
-; message is processed. We may want to consider having the option of
-; doing that here.
-;
- JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
-
-at 0x0000010d : */ 0x800c0002,0x0000046c,
-/*
- JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
-
-at 0x0000010f : */ 0x800c0003,0x00000518,
-/*
- JUMP munge_disconnect, IF 0x04 ; DISCONNECT
-
-at 0x00000111 : */ 0x800c0004,0x0000056c,
-/*
- INT int_msg_1, IF 0x07 ; MESSAGE REJECT
-
-at 0x00000113 : */ 0x980c0007,0x01020000,
-/*
- INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
-
-at 0x00000115 : */ 0x980c000f,0x01020000,
-/*
-
-
-
- JUMP reject_message
-
-at 0x00000117 : */ 0x80080000,0x00000604,
-/*
-
-munge_2:
- JUMP reject_message
-
-at 0x00000119 : */ 0x80080000,0x00000604,
-/*
-;
-; The SCSI standard allows targets to recover from transient
-; error conditions by backing up the data pointer with a
-; RESTORE POINTERS message.
-;
-; So, we must save and restore the _residual_ code as well as
-; the current instruction pointer. Because of this messiness,
-; it is simpler to put dynamic code in the dsa for this and to
-; just do a simple jump down there.
-;
-
-munge_save_data_pointer:
-
- ; We have something in TEMP here, so first we must save that
- MOVE TEMP0 TO SFBR
-
-at 0x0000011b : */ 0x721c0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x0000011d : */ 0x6a340000,0x00000000,
-/*
- MOVE TEMP1 TO SFBR
-
-at 0x0000011f : */ 0x721d0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x00000121 : */ 0x6a350000,0x00000000,
-/*
- MOVE TEMP2 TO SFBR
-
-at 0x00000123 : */ 0x721e0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x00000125 : */ 0x6a360000,0x00000000,
-/*
- MOVE TEMP3 TO SFBR
-
-at 0x00000127 : */ 0x721f0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x00000129 : */ 0x6a370000,0x00000000,
-/*
- MOVE MEMORY 4, addr_scratch, jump_temp + 4
-
-at 0x0000012b : */ 0xc0000004,0x00000000,0x000009c8,
-/*
- ; Now restore DSA
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x0000012e : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
- MOVE DSA0 + dsa_save_data_pointer TO SFBR
-
-at 0x00000131 : */ 0x76100000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x00000133 : */ 0x6a340000,0x00000000,
-/*
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
-
-at 0x00000135 : */ 0x7711ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x00000137 : */ 0x6a350000,0x00000000,
-/*
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
-
-at 0x00000139 : */ 0x7712ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x0000013b : */ 0x6a360000,0x00000000,
-/*
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
-
-at 0x0000013d : */ 0x7713ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x0000013f : */ 0x6a370000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
-
-at 0x00000141 : */ 0xc0000004,0x00000000,0x00000514,
-/*
-
-jump_dsa_save:
- JUMP 0
-
-at 0x00000144 : */ 0x80080000,0x00000000,
-/*
-
-munge_restore_pointers:
-
- ; The code at dsa_restore_pointers will RETURN, but we don't care
- ; about TEMP here, as it will overwrite it anyway.
-
- MOVE DSA0 + dsa_restore_pointers TO SFBR
-
-at 0x00000146 : */ 0x76100000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x00000148 : */ 0x6a340000,0x00000000,
-/*
- MOVE DSA1 + 0xff TO SFBR WITH CARRY
-
-at 0x0000014a : */ 0x7711ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x0000014c : */ 0x6a350000,0x00000000,
-/*
- MOVE DSA2 + 0xff TO SFBR WITH CARRY
-
-at 0x0000014e : */ 0x7712ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x00000150 : */ 0x6a360000,0x00000000,
-/*
- MOVE DSA3 + 0xff TO SFBR WITH CARRY
-
-at 0x00000152 : */ 0x7713ff00,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x00000154 : */ 0x6a370000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
-
-at 0x00000156 : */ 0xc0000004,0x00000000,0x00000568,
-/*
-
-jump_dsa_restore:
- JUMP 0
-
-at 0x00000159 : */ 0x80080000,0x00000000,
-/*
-
-
-munge_disconnect:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- JUMP dsa_schedule
-
-at 0x0000015b : */ 0x80080000,0x00000178,
-/*
-
-
-
-
-
-munge_extended:
- CLEAR ACK
-
-at 0x0000015d : */ 0x60000040,0x00000000,
-/*
- INT int_err_unexpected_phase, WHEN NOT MSG_IN
-
-at 0x0000015f : */ 0x9f030000,0x00000000,
-/*
- MOVE 1, msg_buf + 1, WHEN MSG_IN
-
-at 0x00000161 : */ 0x0f000001,0x00000001,
-/*
- JUMP munge_extended_2, IF 0x02
-
-at 0x00000163 : */ 0x800c0002,0x000005a4,
-/*
- JUMP munge_extended_3, IF 0x03
-
-at 0x00000165 : */ 0x800c0003,0x000005d4,
-/*
- JUMP reject_message
-
-at 0x00000167 : */ 0x80080000,0x00000604,
-/*
-
-munge_extended_2:
- CLEAR ACK
-
-at 0x00000169 : */ 0x60000040,0x00000000,
-/*
- MOVE 1, msg_buf + 2, WHEN MSG_IN
-
-at 0x0000016b : */ 0x0f000001,0x00000002,
-/*
- JUMP reject_message, IF NOT 0x02 ; Must be WDTR
-
-at 0x0000016d : */ 0x80040002,0x00000604,
-/*
- CLEAR ACK
-
-at 0x0000016f : */ 0x60000040,0x00000000,
-/*
- MOVE 1, msg_buf + 3, WHEN MSG_IN
-
-at 0x00000171 : */ 0x0f000001,0x00000003,
-/*
- INT int_msg_wdtr
-
-at 0x00000173 : */ 0x98080000,0x01000000,
-/*
-
-munge_extended_3:
- CLEAR ACK
-
-at 0x00000175 : */ 0x60000040,0x00000000,
-/*
- MOVE 1, msg_buf + 2, WHEN MSG_IN
-
-at 0x00000177 : */ 0x0f000001,0x00000002,
-/*
- JUMP reject_message, IF NOT 0x01 ; Must be SDTR
-
-at 0x00000179 : */ 0x80040001,0x00000604,
-/*
- CLEAR ACK
-
-at 0x0000017b : */ 0x60000040,0x00000000,
-/*
- MOVE 2, msg_buf + 3, WHEN MSG_IN
-
-at 0x0000017d : */ 0x0f000002,0x00000003,
-/*
- INT int_msg_sdtr
-
-at 0x0000017f : */ 0x98080000,0x01010000,
-/*
-
-ENTRY reject_message
-reject_message:
- SET ATN
-
-at 0x00000181 : */ 0x58000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x00000183 : */ 0x60000040,0x00000000,
-/*
- MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
-
-at 0x00000185 : */ 0x0e000001,0x00000000,
-/*
- RETURN
-
-at 0x00000187 : */ 0x90080000,0x00000000,
-/*
-
-ENTRY accept_message
-accept_message:
- CLEAR ATN
-
-at 0x00000189 : */ 0x60000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x0000018b : */ 0x60000040,0x00000000,
-/*
- RETURN
-
-at 0x0000018d : */ 0x90080000,0x00000000,
-/*
-
-ENTRY respond_message
-respond_message:
- SET ATN
-
-at 0x0000018f : */ 0x58000008,0x00000000,
-/*
- CLEAR ACK
-
-at 0x00000191 : */ 0x60000040,0x00000000,
-/*
- MOVE FROM dsa_msgout_other, WHEN MSG_OUT
-
-at 0x00000193 : */ 0x1e000000,0x00000068,
-/*
- RETURN
-
-at 0x00000195 : */ 0x90080000,0x00000000,
-/*
-
-;
-; command_complete
-;
-; PURPOSE : handle command termination when STATUS IN is detected by reading
-; a status byte followed by a command termination message.
-;
-; Normal termination results in an INTFLY instruction, and
-; the host system can pick out which command terminated by
-; examining the MESSAGE and STATUS buffers of all currently
-; executing commands;
-;
-; Abnormal (CHECK_CONDITION) termination results in an
-; int_err_check_condition interrupt so that a REQUEST SENSE
-; command can be issued out-of-order so that no other command
-; clears the contingent allegiance condition.
-;
-;
-; INPUTS : DSA - command
-;
-; CALLS : OK
-;
-; EXITS : On successful termination, control is passed to schedule.
-; On abnormal termination, the user will usually modify the
-; DSA fields and corresponding buffers and return control
-; to select.
-;
-
-ENTRY command_complete
-command_complete:
- MOVE FROM dsa_status, WHEN STATUS
-
-at 0x00000197 : */ 0x1b000000,0x00000060,
-/*
-
- MOVE SFBR TO SCRATCH0 ; Save status
-
-at 0x00000199 : */ 0x6a340000,0x00000000,
-/*
-
-ENTRY command_complete_msgin
-command_complete_msgin:
- MOVE FROM dsa_msgin, WHEN MSG_IN
-
-at 0x0000019b : */ 0x1f000000,0x00000058,
-/*
-; Indicate that we should be expecting a disconnect
-
-
-
- ; Above code cleared the Unexpected Disconnect bit, what do we do?
-
- CLEAR ACK
-
-at 0x0000019d : */ 0x60000040,0x00000000,
-/*
-
- WAIT DISCONNECT
-
-at 0x0000019f : */ 0x48000000,0x00000000,
-/*
-
-;
-; The SCSI specification states that when a UNIT ATTENTION condition
-; is pending, as indicated by a CHECK CONDITION status message,
-; the target shall revert to asynchronous transfers. Since
-; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
-; basis, and returning control to our scheduler could work on a command
-; running on another lun on that target using the old parameters, we must
-; interrupt the host processor to get them changed, or change them ourselves.
-;
-; Once SCSI-II tagged queueing is implemented, things will be even more
-; hairy, since contingent allegiance conditions exist on a per-target/lun
-; basis, and issuing a new command with a different tag would clear it.
-; In these cases, we must interrupt the host processor to get a request
-; added to the HEAD of the queue with the request sense command, or we
-; must automatically issue the request sense command.
-
-
-
-
-
-
-
- INT int_norm_emulateintfly
-
-at 0x000001a1 : */ 0x98080000,0x02060000,
-/*
-
-
-
-
-
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000001a3 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
-
- JUMP schedule
-
-at 0x000001a6 : */ 0x80080000,0x00000000,
-/*
-command_failed:
- INT int_err_check_condition
-
-at 0x000001a8 : */ 0x98080000,0x00030000,
-/*
-
-
-
-
-;
-; wait_reselect
-;
-; PURPOSE : This is essentially the idle routine, where control lands
-; when there are no new processes to schedule. wait_reselect
-; waits for reselection, selection, and new commands.
-;
-; When a successful reselection occurs, with the aid
-; of fixed up code in each DSA, wait_reselect walks the
-; reconnect_dsa_queue, asking each dsa if the target ID
-; and LUN match its.
-;
-; If a match is found, a call is made back to reselected_ok,
-; which through the miracles of self modifying code, extracts
-; the found DSA from the reconnect_dsa_queue and then
-; returns control to the DSAs thread of execution.
-;
-; INPUTS : NONE
-;
-; CALLS : OK
-;
-; MODIFIES : DSA,
-;
-; EXITS : On successful reselection, control is returned to the
-; DSA which called reselected_ok. If the WAIT RESELECT
-; was interrupted by a new commands arrival signaled by
-; SIG_P, control is passed to schedule. If the NCR is
-; selected, the host system is interrupted with an
-; int_err_selected which is usually responded to by
-; setting DSP to the target_abort address.
-
-ENTRY wait_reselect
-wait_reselect:
-
-
-
-
-
-
- WAIT RESELECT wait_reselect_failed
-
-at 0x000001aa : */ 0x50000000,0x00000800,
-/*
-
-reselected:
-
-
-
- CLEAR TARGET
-
-at 0x000001ac : */ 0x60000200,0x00000000,
-/*
-
- ; Read all data needed to reestablish the nexus -
- MOVE 1, reselected_identify, WHEN MSG_IN
-
-at 0x000001ae : */ 0x0f000001,0x00000000,
-/*
- ; We used to CLEAR ACK here.
-
-
-
-
-
- ; Point DSA at the current head of the disconnected queue.
-
- MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
-
-at 0x000001b0 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, saved_dsa
-
-at 0x000001b3 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-
-
- ; Fix the update-next pointer so that the reconnect_dsa_head
- ; pointer is the one that will be updated if this DSA is a hit
- ; and we remove it from the queue.
-
- MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
-
-at 0x000001b6 : */ 0xc0000004,0x00000000,0x000007ec,
-/*
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000001b9 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-
-ENTRY reselected_check_next
-reselected_check_next:
-
-
-
- ; Check for a NULL pointer.
- MOVE DSA0 TO SFBR
-
-at 0x000001bc : */ 0x72100000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001be : */ 0x80040000,0x00000738,
-/*
- MOVE DSA1 TO SFBR
-
-at 0x000001c0 : */ 0x72110000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001c2 : */ 0x80040000,0x00000738,
-/*
- MOVE DSA2 TO SFBR
-
-at 0x000001c4 : */ 0x72120000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001c6 : */ 0x80040000,0x00000738,
-/*
- MOVE DSA3 TO SFBR
-
-at 0x000001c8 : */ 0x72130000,0x00000000,
-/*
- JUMP reselected_not_end, IF NOT 0
-
-at 0x000001ca : */ 0x80040000,0x00000738,
-/*
- INT int_err_unexpected_reselect
-
-at 0x000001cc : */ 0x98080000,0x00020000,
-/*
-
-reselected_not_end:
- ;
- ; XXX the ALU is only eight bits wide, and the assembler
- ; wont do the dirt work for us. As long as dsa_check_reselect
- ; is negative, we need to sign extend with 1 bits to the full
- ; 32 bit width of the address.
- ;
- ; A potential work around would be to have a known alignment
- ; of the DSA structure such that the base address plus
- ; dsa_check_reselect doesn't require carrying from bytes
- ; higher than the LSB.
- ;
-
- MOVE DSA0 TO SFBR
-
-at 0x000001ce : */ 0x72100000,0x00000000,
-/*
- MOVE SFBR + dsa_check_reselect TO SCRATCH0
-
-at 0x000001d0 : */ 0x6e340000,0x00000000,
-/*
- MOVE DSA1 TO SFBR
-
-at 0x000001d2 : */ 0x72110000,0x00000000,
-/*
- MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
-
-at 0x000001d4 : */ 0x6f35ff00,0x00000000,
-/*
- MOVE DSA2 TO SFBR
-
-at 0x000001d6 : */ 0x72120000,0x00000000,
-/*
- MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
-
-at 0x000001d8 : */ 0x6f36ff00,0x00000000,
-/*
- MOVE DSA3 TO SFBR
-
-at 0x000001da : */ 0x72130000,0x00000000,
-/*
- MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
-
-at 0x000001dc : */ 0x6f37ff00,0x00000000,
-/*
-
-
- MOVE MEMORY 4, addr_scratch, reselected_check + 4
-
-at 0x000001de : */ 0xc0000004,0x00000000,0x00000794,
-/*
-
-
- ; Time to correct DSA following memory move
- MOVE MEMORY 4, saved_dsa, addr_dsa
-
-at 0x000001e1 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-
-reselected_check:
- JUMP 0
-
-at 0x000001e4 : */ 0x80080000,0x00000000,
-/*
-
-
-;
-;
-
-; We have problems here - the memory move corrupts TEMP and DSA. This
-; routine is called from DSA code, and patched from many places. Scratch
-; is probably free when it is called.
-; We have to:
-; copy temp to scratch, one byte at a time
-; write scratch to patch a jump in place of the return
-; do the move memory
-; jump to the patched in return address
-; DSA is corrupt when we get here, and can be left corrupt
-
-ENTRY reselected_ok
-reselected_ok:
- MOVE TEMP0 TO SFBR
-
-at 0x000001e6 : */ 0x721c0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH0
-
-at 0x000001e8 : */ 0x6a340000,0x00000000,
-/*
- MOVE TEMP1 TO SFBR
-
-at 0x000001ea : */ 0x721d0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH1
-
-at 0x000001ec : */ 0x6a350000,0x00000000,
-/*
- MOVE TEMP2 TO SFBR
-
-at 0x000001ee : */ 0x721e0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH2
-
-at 0x000001f0 : */ 0x6a360000,0x00000000,
-/*
- MOVE TEMP3 TO SFBR
-
-at 0x000001f2 : */ 0x721f0000,0x00000000,
-/*
- MOVE SFBR TO SCRATCH3
-
-at 0x000001f4 : */ 0x6a370000,0x00000000,
-/*
- MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
-
-at 0x000001f6 : */ 0xc0000004,0x00000000,0x000007f4,
-/*
-reselected_ok_patch:
- MOVE MEMORY 4, 0, 0
-
-at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
-/*
-reselected_ok_jump:
- JUMP 0
-
-at 0x000001fc : */ 0x80080000,0x00000000,
-/*
-
-
-
-
-
-selected:
- INT int_err_selected;
-
-at 0x000001fe : */ 0x98080000,0x00010000,
-/*
-
-;
-; A select or reselect failure can be caused by one of two conditions :
-; 1. SIG_P was set. This will be the case if the user has written
-; a new value to a previously NULL head of the issue queue.
-;
-; 2. The NCR53c810 was selected or reselected by another device.
-;
-; 3. The bus was already busy since we were selected or reselected
-; before starting the command.
-
-wait_reselect_failed:
-
-
-
-; Check selected bit.
-
- ; Must work out how to tell if we are selected....
-
-
-
-
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
-
-at 0x00000200 : */ 0x74164000,0x00000000,
-/*
- JUMP schedule, IF 0x40
-
-at 0x00000202 : */ 0x800c0040,0x00000000,
-/*
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
- MOVE ISTAT & 0x08 TO SFBR
-
-at 0x00000204 : */ 0x74210800,0x00000000,
-/*
- JUMP reselected, IF 0x08
-
-at 0x00000206 : */ 0x800c0008,0x000006b0,
-/*
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-
-
-
- INT int_debug_panic
-
-at 0x00000208 : */ 0x98080000,0x030b0000,
-/*
-
-
-
-select_failed:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x0000020a : */ 0x7a1b1000,0x00000000,
-/*
-
-
-
-
-; Otherwise, mask the selected and reselected bits off SIST0
-
- ; Let's assume we don't get selected for now
- MOVE SSTAT0 & 0x10 TO SFBR
-
-at 0x0000020c : */ 0x740d1000,0x00000000,
-/*
-
-
-
-
- JUMP reselected, IF 0x10
-
-at 0x0000020e : */ 0x800c0010,0x000006b0,
-/*
-; If SIGP is set, the user just gave us another command, and
-; we should restart or return to the scheduler.
-; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
- MOVE CTEST2 & 0x40 TO SFBR
-
-at 0x00000210 : */ 0x74164000,0x00000000,
-/*
- JUMP select, IF 0x40
-
-at 0x00000212 : */ 0x800c0040,0x000001f8,
-/*
-; Check connected bit.
-; FIXME: this needs to change if we support target mode
-; FIXME: is this really necessary?
- MOVE ISTAT & 0x08 TO SFBR
-
-at 0x00000214 : */ 0x74210800,0x00000000,
-/*
- JUMP reselected, IF 0x08
-
-at 0x00000216 : */ 0x800c0008,0x000006b0,
-/*
-; FIXME : Something bogus happened, and we shouldn't fail silently.
-
-
-
- INT int_debug_panic
-
-at 0x00000218 : */ 0x98080000,0x030b0000,
-/*
-
-
-;
-; test_1
-; test_2
-;
-; PURPOSE : run some verification tests on the NCR. test_1
-; copies test_src to test_dest and interrupts the host
-; processor, testing for cache coherency and interrupt
-; problems in the processes.
-;
-; test_2 runs a command with offsets relative to the
-; DSA on entry, and is useful for miscellaneous experimentation.
-;
-
-; Verify that interrupts are working correctly and that we don't
-; have a cache invalidation problem.
-
-ABSOLUTE test_src = 0, test_dest = 0
-ENTRY test_1
-test_1:
- MOVE MEMORY 4, test_src, test_dest
-
-at 0x0000021a : */ 0xc0000004,0x00000000,0x00000000,
-/*
- INT int_test_1
-
-at 0x0000021d : */ 0x98080000,0x04000000,
-/*
-
-;
-; Run arbitrary commands, with test code establishing a DSA
-;
-
-ENTRY test_2
-test_2:
- CLEAR TARGET
-
-at 0x0000021f : */ 0x60000200,0x00000000,
-/*
-
- ; Enable selection timer
-
-
-
- MOVE CTEST7 & 0xef TO CTEST7
-
-at 0x00000221 : */ 0x7c1bef00,0x00000000,
-/*
-
-
- SELECT ATN FROM 0, test_2_fail
-
-at 0x00000223 : */ 0x43000000,0x000008dc,
-/*
- JUMP test_2_msgout, WHEN MSG_OUT
-
-at 0x00000225 : */ 0x860b0000,0x0000089c,
-/*
-ENTRY test_2_msgout
-test_2_msgout:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x00000227 : */ 0x7a1b1000,0x00000000,
-/*
-
- MOVE FROM 8, WHEN MSG_OUT
-
-at 0x00000229 : */ 0x1e000000,0x00000008,
-/*
- MOVE FROM 16, WHEN CMD
-
-at 0x0000022b : */ 0x1a000000,0x00000010,
-/*
- MOVE FROM 24, WHEN DATA_IN
-
-at 0x0000022d : */ 0x19000000,0x00000018,
-/*
- MOVE FROM 32, WHEN STATUS
-
-at 0x0000022f : */ 0x1b000000,0x00000020,
-/*
- MOVE FROM 40, WHEN MSG_IN
-
-at 0x00000231 : */ 0x1f000000,0x00000028,
-/*
-
-
-
- CLEAR ACK
-
-at 0x00000233 : */ 0x60000040,0x00000000,
-/*
- WAIT DISCONNECT
-
-at 0x00000235 : */ 0x48000000,0x00000000,
-/*
-test_2_fail:
-
- ; Disable selection timer
- MOVE CTEST7 | 0x10 TO CTEST7
-
-at 0x00000237 : */ 0x7a1b1000,0x00000000,
-/*
-
- INT int_test_2
-
-at 0x00000239 : */ 0x98080000,0x04010000,
-/*
-
-ENTRY debug_break
-debug_break:
- INT int_debug_break
-
-at 0x0000023b : */ 0x98080000,0x03000000,
-/*
-
-;
-; initiator_abort
-; target_abort
-;
-; PURPOSE : Abort the currently established nexus from with initiator
-; or target mode.
-;
-;
-
-ENTRY target_abort
-target_abort:
- SET TARGET
-
-at 0x0000023d : */ 0x58000200,0x00000000,
-/*
- DISCONNECT
-
-at 0x0000023f : */ 0x48000000,0x00000000,
-/*
- CLEAR TARGET
-
-at 0x00000241 : */ 0x60000200,0x00000000,
-/*
- JUMP schedule
-
-at 0x00000243 : */ 0x80080000,0x00000000,
-/*
-
-ENTRY initiator_abort
-initiator_abort:
- SET ATN
-
-at 0x00000245 : */ 0x58000008,0x00000000,
-/*
-;
-; The SCSI-I specification says that targets may go into MSG out at
-; their leisure upon receipt of the ATN single. On all versions of the
-; specification, we can't change phases until REQ transitions true->false,
-; so we need to sink/source one byte of data to allow the transition.
-;
-; For the sake of safety, we'll only source one byte of data in all
-; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
-; arbitrary number of bytes.
- JUMP spew_cmd, WHEN CMD
-
-at 0x00000247 : */ 0x820b0000,0x0000094c,
-/*
- JUMP eat_msgin, WHEN MSG_IN
-
-at 0x00000249 : */ 0x870b0000,0x0000095c,
-/*
- JUMP eat_datain, WHEN DATA_IN
-
-at 0x0000024b : */ 0x810b0000,0x0000098c,
-/*
- JUMP eat_status, WHEN STATUS
-
-at 0x0000024d : */ 0x830b0000,0x00000974,
-/*
- JUMP spew_dataout, WHEN DATA_OUT
-
-at 0x0000024f : */ 0x800b0000,0x000009a4,
-/*
- JUMP sated
-
-at 0x00000251 : */ 0x80080000,0x000009ac,
-/*
-spew_cmd:
- MOVE 1, NCR53c7xx_zero, WHEN CMD
-
-at 0x00000253 : */ 0x0a000001,0x00000000,
-/*
- JUMP sated
-
-at 0x00000255 : */ 0x80080000,0x000009ac,
-/*
-eat_msgin:
- MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
-
-at 0x00000257 : */ 0x0f000001,0x00000000,
-/*
- JUMP eat_msgin, WHEN MSG_IN
-
-at 0x00000259 : */ 0x870b0000,0x0000095c,
-/*
- JUMP sated
-
-at 0x0000025b : */ 0x80080000,0x000009ac,
-/*
-eat_status:
- MOVE 1, NCR53c7xx_sink, WHEN STATUS
-
-at 0x0000025d : */ 0x0b000001,0x00000000,
-/*
- JUMP eat_status, WHEN STATUS
-
-at 0x0000025f : */ 0x830b0000,0x00000974,
-/*
- JUMP sated
-
-at 0x00000261 : */ 0x80080000,0x000009ac,
-/*
-eat_datain:
- MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
-
-at 0x00000263 : */ 0x09000001,0x00000000,
-/*
- JUMP eat_datain, WHEN DATA_IN
-
-at 0x00000265 : */ 0x810b0000,0x0000098c,
-/*
- JUMP sated
-
-at 0x00000267 : */ 0x80080000,0x000009ac,
-/*
-spew_dataout:
- MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
-
-at 0x00000269 : */ 0x08000001,0x00000000,
-/*
-sated:
-
-
-
- MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
-
-at 0x0000026b : */ 0x0e000001,0x00000000,
-/*
- WAIT DISCONNECT
-
-at 0x0000026d : */ 0x48000000,0x00000000,
-/*
- INT int_norm_aborted
-
-at 0x0000026f : */ 0x98080000,0x02040000,
-/*
-
-
-
-
-; Little patched jump, used to overcome problems with TEMP getting
-; corrupted on memory moves.
-
-jump_temp:
- JUMP 0
-
-at 0x00000271 : */ 0x80080000,0x00000000,
-};
-
-#define A_NCR53c7xx_msg_abort 0x00000000
-static u32 A_NCR53c7xx_msg_abort_used[] __attribute((unused)) = {
- 0x0000026c,
-};
-
-#define A_NCR53c7xx_msg_reject 0x00000000
-static u32 A_NCR53c7xx_msg_reject_used[] __attribute((unused)) = {
- 0x00000186,
-};
-
-#define A_NCR53c7xx_sink 0x00000000
-static u32 A_NCR53c7xx_sink_used[] __attribute((unused)) = {
- 0x00000258,
- 0x0000025e,
- 0x00000264,
-};
-
-#define A_NCR53c7xx_zero 0x00000000
-static u32 A_NCR53c7xx_zero_used[] __attribute((unused)) = {
- 0x00000254,
- 0x0000026a,
-};
-
-#define A_NOP_insn 0x00000000
-static u32 A_NOP_insn_used[] __attribute((unused)) = {
- 0x00000017,
-};
-
-#define A_addr_dsa 0x00000000
-static u32 A_addr_dsa_used[] __attribute((unused)) = {
- 0x0000000f,
- 0x00000026,
- 0x00000033,
- 0x00000040,
- 0x00000055,
- 0x00000079,
- 0x0000008e,
- 0x000000bc,
- 0x000000d2,
- 0x00000130,
- 0x000001a5,
- 0x000001bb,
- 0x000001e3,
-};
-
-#define A_addr_reconnect_dsa_head 0x00000000
-static u32 A_addr_reconnect_dsa_head_used[] __attribute((unused)) = {
- 0x000001b7,
-};
-
-#define A_addr_scratch 0x00000000
-static u32 A_addr_scratch_used[] __attribute((unused)) = {
- 0x00000002,
- 0x00000004,
- 0x00000008,
- 0x00000020,
- 0x00000022,
- 0x00000049,
- 0x00000060,
- 0x0000006a,
- 0x00000071,
- 0x00000073,
- 0x000000ab,
- 0x000000b5,
- 0x000000c1,
- 0x000000cb,
- 0x0000012c,
- 0x00000142,
- 0x00000157,
- 0x000001b2,
- 0x000001b4,
- 0x000001df,
- 0x000001f7,
-};
-
-#define A_addr_temp 0x00000000
-static u32 A_addr_temp_used[] __attribute((unused)) = {
-};
-
-#define A_dmode_memory_to_memory 0x00000000
-static u32 A_dmode_memory_to_memory_used[] __attribute((unused)) = {
-};
-
-#define A_dmode_memory_to_ncr 0x00000000
-static u32 A_dmode_memory_to_ncr_used[] __attribute((unused)) = {
-};
-
-#define A_dmode_ncr_to_memory 0x00000000
-static u32 A_dmode_ncr_to_memory_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_check_reselect 0x00000000
-static u32 A_dsa_check_reselect_used[] __attribute((unused)) = {
- 0x000001d0,
-};
-
-#define A_dsa_cmdout 0x00000048
-static u32 A_dsa_cmdout_used[] __attribute((unused)) = {
- 0x0000009a,
-};
-
-#define A_dsa_cmnd 0x00000038
-static u32 A_dsa_cmnd_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_datain 0x00000054
-static u32 A_dsa_datain_used[] __attribute((unused)) = {
- 0x000000c2,
-};
-
-#define A_dsa_dataout 0x00000050
-static u32 A_dsa_dataout_used[] __attribute((unused)) = {
- 0x000000ac,
-};
-
-#define A_dsa_end 0x00000070
-static u32 A_dsa_end_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_fields_start 0x00000000
-static u32 A_dsa_fields_start_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_msgin 0x00000058
-static u32 A_dsa_msgin_used[] __attribute((unused)) = {
- 0x0000019c,
-};
-
-#define A_dsa_msgout 0x00000040
-static u32 A_dsa_msgout_used[] __attribute((unused)) = {
- 0x00000089,
-};
-
-#define A_dsa_msgout_other 0x00000068
-static u32 A_dsa_msgout_other_used[] __attribute((unused)) = {
- 0x00000194,
-};
-
-#define A_dsa_next 0x00000030
-static u32 A_dsa_next_used[] __attribute((unused)) = {
- 0x00000061,
-};
-
-#define A_dsa_restore_pointers 0x00000000
-static u32 A_dsa_restore_pointers_used[] __attribute((unused)) = {
- 0x00000146,
-};
-
-#define A_dsa_save_data_pointer 0x00000000
-static u32 A_dsa_save_data_pointer_used[] __attribute((unused)) = {
- 0x00000131,
-};
-
-#define A_dsa_select 0x0000003c
-static u32 A_dsa_select_used[] __attribute((unused)) = {
- 0x00000082,
-};
-
-#define A_dsa_sscf_710 0x00000000
-static u32 A_dsa_sscf_710_used[] __attribute((unused)) = {
- 0x00000007,
-};
-
-#define A_dsa_status 0x00000060
-static u32 A_dsa_status_used[] __attribute((unused)) = {
- 0x00000198,
-};
-
-#define A_dsa_temp_addr_array_value 0x00000000
-static u32 A_dsa_temp_addr_array_value_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_temp_addr_dsa_value 0x00000000
-static u32 A_dsa_temp_addr_dsa_value_used[] __attribute((unused)) = {
- 0x00000001,
-};
-
-#define A_dsa_temp_addr_new_value 0x00000000
-static u32 A_dsa_temp_addr_new_value_used[] __attribute((unused)) = {
-};
-
-#define A_dsa_temp_addr_next 0x00000000
-static u32 A_dsa_temp_addr_next_used[] __attribute((unused)) = {
- 0x0000001c,
- 0x0000004f,
-};
-
-#define A_dsa_temp_addr_residual 0x00000000
-static u32 A_dsa_temp_addr_residual_used[] __attribute((unused)) = {
- 0x0000002d,
- 0x0000003b,
-};
-
-#define A_dsa_temp_addr_saved_pointer 0x00000000
-static u32 A_dsa_temp_addr_saved_pointer_used[] __attribute((unused)) = {
- 0x0000002b,
- 0x00000037,
-};
-
-#define A_dsa_temp_addr_saved_residual 0x00000000
-static u32 A_dsa_temp_addr_saved_residual_used[] __attribute((unused)) = {
- 0x0000002e,
- 0x0000003a,
-};
-
-#define A_dsa_temp_lun 0x00000000
-static u32 A_dsa_temp_lun_used[] __attribute((unused)) = {
- 0x0000004c,
-};
-
-#define A_dsa_temp_next 0x00000000
-static u32 A_dsa_temp_next_used[] __attribute((unused)) = {
- 0x0000001f,
-};
-
-#define A_dsa_temp_sync 0x00000000
-static u32 A_dsa_temp_sync_used[] __attribute((unused)) = {
- 0x00000057,
-};
-
-#define A_dsa_temp_target 0x00000000
-static u32 A_dsa_temp_target_used[] __attribute((unused)) = {
- 0x00000045,
-};
-
-#define A_emulfly 0x00000000
-static u32 A_emulfly_used[] __attribute((unused)) = {
-};
-
-#define A_int_debug_break 0x03000000
-static u32 A_int_debug_break_used[] __attribute((unused)) = {
- 0x0000023c,
-};
-
-#define A_int_debug_panic 0x030b0000
-static u32 A_int_debug_panic_used[] __attribute((unused)) = {
- 0x00000209,
- 0x00000219,
-};
-
-#define A_int_err_check_condition 0x00030000
-static u32 A_int_err_check_condition_used[] __attribute((unused)) = {
- 0x000001a9,
-};
-
-#define A_int_err_no_phase 0x00040000
-static u32 A_int_err_no_phase_used[] __attribute((unused)) = {
-};
-
-#define A_int_err_selected 0x00010000
-static u32 A_int_err_selected_used[] __attribute((unused)) = {
- 0x000001ff,
-};
-
-#define A_int_err_unexpected_phase 0x00000000
-static u32 A_int_err_unexpected_phase_used[] __attribute((unused)) = {
- 0x00000092,
- 0x00000098,
- 0x000000a0,
- 0x000000d6,
- 0x000000da,
- 0x000000dc,
- 0x000000e4,
- 0x000000e8,
- 0x000000ea,
- 0x000000f2,
- 0x000000f6,
- 0x000000f8,
- 0x000000fa,
- 0x00000160,
-};
-
-#define A_int_err_unexpected_reselect 0x00020000
-static u32 A_int_err_unexpected_reselect_used[] __attribute((unused)) = {
- 0x000001cd,
-};
-
-#define A_int_msg_1 0x01020000
-static u32 A_int_msg_1_used[] __attribute((unused)) = {
- 0x00000114,
- 0x00000116,
-};
-
-#define A_int_msg_sdtr 0x01010000
-static u32 A_int_msg_sdtr_used[] __attribute((unused)) = {
- 0x00000180,
-};
-
-#define A_int_msg_wdtr 0x01000000
-static u32 A_int_msg_wdtr_used[] __attribute((unused)) = {
- 0x00000174,
-};
-
-#define A_int_norm_aborted 0x02040000
-static u32 A_int_norm_aborted_used[] __attribute((unused)) = {
- 0x00000270,
-};
-
-#define A_int_norm_command_complete 0x02020000
-static u32 A_int_norm_command_complete_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_disconnected 0x02030000
-static u32 A_int_norm_disconnected_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_emulateintfly 0x02060000
-static u32 A_int_norm_emulateintfly_used[] __attribute((unused)) = {
- 0x000001a2,
-};
-
-#define A_int_norm_reselect_complete 0x02010000
-static u32 A_int_norm_reselect_complete_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_reset 0x02050000
-static u32 A_int_norm_reset_used[] __attribute((unused)) = {
-};
-
-#define A_int_norm_select_complete 0x02000000
-static u32 A_int_norm_select_complete_used[] __attribute((unused)) = {
-};
-
-#define A_int_test_1 0x04000000
-static u32 A_int_test_1_used[] __attribute((unused)) = {
- 0x0000021e,
-};
-
-#define A_int_test_2 0x04010000
-static u32 A_int_test_2_used[] __attribute((unused)) = {
- 0x0000023a,
-};
-
-#define A_int_test_3 0x04020000
-static u32 A_int_test_3_used[] __attribute((unused)) = {
-};
-
-#define A_msg_buf 0x00000000
-static u32 A_msg_buf_used[] __attribute((unused)) = {
- 0x00000108,
- 0x00000162,
- 0x0000016c,
- 0x00000172,
- 0x00000178,
- 0x0000017e,
-};
-
-#define A_reconnect_dsa_head 0x00000000
-static u32 A_reconnect_dsa_head_used[] __attribute((unused)) = {
- 0x0000006d,
- 0x00000074,
- 0x000001b1,
-};
-
-#define A_reselected_identify 0x00000000
-static u32 A_reselected_identify_used[] __attribute((unused)) = {
- 0x00000048,
- 0x000001af,
-};
-
-#define A_reselected_tag 0x00000000
-static u32 A_reselected_tag_used[] __attribute((unused)) = {
-};
-
-#define A_saved_dsa 0x00000000
-static u32 A_saved_dsa_used[] __attribute((unused)) = {
- 0x00000005,
- 0x0000000e,
- 0x00000023,
- 0x00000025,
- 0x00000032,
- 0x0000003f,
- 0x00000054,
- 0x0000005f,
- 0x00000070,
- 0x00000078,
- 0x0000008d,
- 0x000000aa,
- 0x000000bb,
- 0x000000c0,
- 0x000000d1,
- 0x0000012f,
- 0x000001a4,
- 0x000001b5,
- 0x000001ba,
- 0x000001e2,
-};
-
-#define A_schedule 0x00000000
-static u32 A_schedule_used[] __attribute((unused)) = {
- 0x0000007d,
- 0x000001a7,
- 0x00000203,
- 0x00000244,
-};
-
-#define A_test_dest 0x00000000
-static u32 A_test_dest_used[] __attribute((unused)) = {
- 0x0000021c,
-};
-
-#define A_test_src 0x00000000
-static u32 A_test_src_used[] __attribute((unused)) = {
- 0x0000021b,
-};
-
-#define Ent_accept_message 0x00000624
-#define Ent_cmdout_cmdout 0x00000264
-#define Ent_command_complete 0x0000065c
-#define Ent_command_complete_msgin 0x0000066c
-#define Ent_data_transfer 0x0000026c
-#define Ent_datain_to_jump 0x00000334
-#define Ent_debug_break 0x000008ec
-#define Ent_dsa_code_begin 0x00000000
-#define Ent_dsa_code_check_reselect 0x0000010c
-#define Ent_dsa_code_fix_jump 0x00000058
-#define Ent_dsa_code_restore_pointers 0x000000d8
-#define Ent_dsa_code_save_data_pointer 0x000000a4
-#define Ent_dsa_code_template 0x00000000
-#define Ent_dsa_code_template_end 0x00000178
-#define Ent_dsa_schedule 0x00000178
-#define Ent_dsa_zero 0x00000178
-#define Ent_end_data_transfer 0x000002a4
-#define Ent_initiator_abort 0x00000914
-#define Ent_msg_in 0x0000041c
-#define Ent_msg_in_restart 0x000003fc
-#define Ent_other_in 0x0000038c
-#define Ent_other_out 0x00000354
-#define Ent_other_transfer 0x000003c4
-#define Ent_reject_message 0x00000604
-#define Ent_reselected_check_next 0x000006f0
-#define Ent_reselected_ok 0x00000798
-#define Ent_respond_message 0x0000063c
-#define Ent_select 0x000001f8
-#define Ent_select_msgout 0x00000218
-#define Ent_target_abort 0x000008f4
-#define Ent_test_1 0x00000868
-#define Ent_test_2 0x0000087c
-#define Ent_test_2_msgout 0x0000089c
-#define Ent_wait_reselect 0x000006a8
-static u32 LABELPATCHES[] __attribute((unused)) = {
- 0x00000011,
- 0x0000001a,
- 0x0000001d,
- 0x00000028,
- 0x0000002a,
- 0x00000035,
- 0x00000038,
- 0x00000042,
- 0x00000050,
- 0x00000052,
- 0x0000006b,
- 0x00000083,
- 0x00000085,
- 0x00000090,
- 0x00000094,
- 0x00000096,
- 0x0000009c,
- 0x0000009e,
- 0x000000a2,
- 0x000000a4,
- 0x000000a6,
- 0x000000a8,
- 0x000000b6,
- 0x000000b9,
- 0x000000cc,
- 0x000000cf,
- 0x000000d8,
- 0x000000de,
- 0x000000e0,
- 0x000000e6,
- 0x000000ec,
- 0x000000ee,
- 0x000000f4,
- 0x000000fc,
- 0x000000fe,
- 0x0000010a,
- 0x0000010c,
- 0x0000010e,
- 0x00000110,
- 0x00000112,
- 0x00000118,
- 0x0000011a,
- 0x0000012d,
- 0x00000143,
- 0x00000158,
- 0x0000015c,
- 0x00000164,
- 0x00000166,
- 0x00000168,
- 0x0000016e,
- 0x0000017a,
- 0x000001ab,
- 0x000001b8,
- 0x000001bf,
- 0x000001c3,
- 0x000001c7,
- 0x000001cb,
- 0x000001e0,
- 0x000001f8,
- 0x00000207,
- 0x0000020f,
- 0x00000213,
- 0x00000217,
- 0x00000224,
- 0x00000226,
- 0x00000248,
- 0x0000024a,
- 0x0000024c,
- 0x0000024e,
- 0x00000250,
- 0x00000252,
- 0x00000256,
- 0x0000025a,
- 0x0000025c,
- 0x00000260,
- 0x00000262,
- 0x00000266,
- 0x00000268,
-};
-
-static struct {
- u32 offset;
- void *address;
-} EXTERNAL_PATCHES[] __attribute((unused)) = {
-};
-
-static u32 INSTRUCTIONS __attribute((unused)) = 290;
-static u32 PATCHES __attribute((unused)) = 78;
-static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0;
diff --git a/drivers/scsi/53c7xx_u.h_shipped b/drivers/scsi/53c7xx_u.h_shipped
deleted file mode 100644
index 7b337174e228..000000000000
--- a/drivers/scsi/53c7xx_u.h_shipped
+++ /dev/null
@@ -1,102 +0,0 @@
-#undef A_NCR53c7xx_msg_abort
-#undef A_NCR53c7xx_msg_reject
-#undef A_NCR53c7xx_sink
-#undef A_NCR53c7xx_zero
-#undef A_NOP_insn
-#undef A_addr_dsa
-#undef A_addr_reconnect_dsa_head
-#undef A_addr_scratch
-#undef A_addr_temp
-#undef A_dmode_memory_to_memory
-#undef A_dmode_memory_to_ncr
-#undef A_dmode_ncr_to_memory
-#undef A_dsa_check_reselect
-#undef A_dsa_cmdout
-#undef A_dsa_cmnd
-#undef A_dsa_datain
-#undef A_dsa_dataout
-#undef A_dsa_end
-#undef A_dsa_fields_start
-#undef A_dsa_msgin
-#undef A_dsa_msgout
-#undef A_dsa_msgout_other
-#undef A_dsa_next
-#undef A_dsa_restore_pointers
-#undef A_dsa_save_data_pointer
-#undef A_dsa_select
-#undef A_dsa_sscf_710
-#undef A_dsa_status
-#undef A_dsa_temp_addr_array_value
-#undef A_dsa_temp_addr_dsa_value
-#undef A_dsa_temp_addr_new_value
-#undef A_dsa_temp_addr_next
-#undef A_dsa_temp_addr_residual
-#undef A_dsa_temp_addr_saved_pointer
-#undef A_dsa_temp_addr_saved_residual
-#undef A_dsa_temp_lun
-#undef A_dsa_temp_next
-#undef A_dsa_temp_sync
-#undef A_dsa_temp_target
-#undef A_emulfly
-#undef A_int_debug_break
-#undef A_int_debug_panic
-#undef A_int_err_check_condition
-#undef A_int_err_no_phase
-#undef A_int_err_selected
-#undef A_int_err_unexpected_phase
-#undef A_int_err_unexpected_reselect
-#undef A_int_msg_1
-#undef A_int_msg_sdtr
-#undef A_int_msg_wdtr
-#undef A_int_norm_aborted
-#undef A_int_norm_command_complete
-#undef A_int_norm_disconnected
-#undef A_int_norm_emulateintfly
-#undef A_int_norm_reselect_complete
-#undef A_int_norm_reset
-#undef A_int_norm_select_complete
-#undef A_int_test_1
-#undef A_int_test_2
-#undef A_int_test_3
-#undef A_msg_buf
-#undef A_reconnect_dsa_head
-#undef A_reselected_identify
-#undef A_reselected_tag
-#undef A_saved_dsa
-#undef A_schedule
-#undef A_test_dest
-#undef A_test_src
-#undef Ent_accept_message
-#undef Ent_cmdout_cmdout
-#undef Ent_command_complete
-#undef Ent_command_complete_msgin
-#undef Ent_data_transfer
-#undef Ent_datain_to_jump
-#undef Ent_debug_break
-#undef Ent_dsa_code_begin
-#undef Ent_dsa_code_check_reselect
-#undef Ent_dsa_code_fix_jump
-#undef Ent_dsa_code_restore_pointers
-#undef Ent_dsa_code_save_data_pointer
-#undef Ent_dsa_code_template
-#undef Ent_dsa_code_template_end
-#undef Ent_dsa_schedule
-#undef Ent_dsa_zero
-#undef Ent_end_data_transfer
-#undef Ent_initiator_abort
-#undef Ent_msg_in
-#undef Ent_msg_in_restart
-#undef Ent_other_in
-#undef Ent_other_out
-#undef Ent_other_transfer
-#undef Ent_reject_message
-#undef Ent_reselected_check_next
-#undef Ent_reselected_ok
-#undef Ent_respond_message
-#undef Ent_select
-#undef Ent_select_msgout
-#undef Ent_target_abort
-#undef Ent_test_1
-#undef Ent_test_2
-#undef Ent_test_2_msgout
-#undef Ent_wait_reselect
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 96f4cab07614..9b206176f717 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -304,18 +304,10 @@ static struct BusLogic_CCB *BusLogic_AllocateCCB(struct BusLogic_HostAdapter
static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB)
{
struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter;
- struct scsi_cmnd *cmd = CCB->Command;
- if (cmd->use_sg != 0) {
- pci_unmap_sg(HostAdapter->PCI_Device,
- (struct scatterlist *)cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else if (cmd->request_bufflen != 0) {
- pci_unmap_single(HostAdapter->PCI_Device, CCB->DataPointer,
- CCB->DataLength, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(CCB->Command);
pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer,
- CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
+ CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
CCB->Command = NULL;
CCB->Status = BusLogic_CCB_Free;
@@ -2648,7 +2640,8 @@ static void BusLogic_ProcessCompletedCCBs(struct BusLogic_HostAdapter *HostAdapt
*/
if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) {
struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID];
- struct SCSI_Inquiry *InquiryResult = (struct SCSI_Inquiry *) Command->request_buffer;
+ struct SCSI_Inquiry *InquiryResult =
+ (struct SCSI_Inquiry *) scsi_sglist(Command);
TargetFlags->TargetExists = true;
TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue;
TargetFlags->WideTransfersSupported = InquiryResult->WBus16;
@@ -2819,9 +2812,8 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
int CDB_Length = Command->cmd_len;
int TargetID = Command->device->id;
int LogicalUnit = Command->device->lun;
- void *BufferPointer = Command->request_buffer;
- int BufferLength = Command->request_bufflen;
- int SegmentCount = Command->use_sg;
+ int BufferLength = scsi_bufflen(Command);
+ int Count;
struct BusLogic_CCB *CCB;
/*
SCSI REQUEST_SENSE commands will be executed automatically by the Host
@@ -2851,36 +2843,35 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
return 0;
}
}
+
/*
Initialize the fields in the BusLogic Command Control Block (CCB).
*/
- if (SegmentCount == 0 && BufferLength != 0) {
- CCB->Opcode = BusLogic_InitiatorCCB;
- CCB->DataLength = BufferLength;
- CCB->DataPointer = pci_map_single(HostAdapter->PCI_Device,
- BufferPointer, BufferLength,
- Command->sc_data_direction);
- } else if (SegmentCount != 0) {
- struct scatterlist *ScatterList = (struct scatterlist *) BufferPointer;
- int Segment, Count;
-
- Count = pci_map_sg(HostAdapter->PCI_Device, ScatterList, SegmentCount,
- Command->sc_data_direction);
+ Count = scsi_dma_map(Command);
+ BUG_ON(Count < 0);
+ if (Count) {
+ struct scatterlist *sg;
+ int i;
+
CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment);
if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB);
else
CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList);
- for (Segment = 0; Segment < Count; Segment++) {
- CCB->ScatterGatherList[Segment].SegmentByteCount = sg_dma_len(ScatterList + Segment);
- CCB->ScatterGatherList[Segment].SegmentDataPointer = sg_dma_address(ScatterList + Segment);
+
+ scsi_for_each_sg(Command, sg, Count, i) {
+ CCB->ScatterGatherList[i].SegmentByteCount =
+ sg_dma_len(sg);
+ CCB->ScatterGatherList[i].SegmentDataPointer =
+ sg_dma_address(sg);
}
- } else {
+ } else if (!Count) {
CCB->Opcode = BusLogic_InitiatorCCB;
CCB->DataLength = BufferLength;
CCB->DataPointer = 0;
}
+
switch (CDB[0]) {
case READ_6:
case READ_10:
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index eb46cb0e3cb7..d2b3898b750a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -10,6 +10,7 @@ config RAID_ATTRS
config SCSI
tristate "SCSI device support"
depends on BLOCK
+ select SCSI_DMA if HAS_DMA
---help---
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
@@ -29,6 +30,10 @@ config SCSI
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a SCSI device.
+config SCSI_DMA
+ bool
+ default n
+
config SCSI_TGT
tristate "SCSI target support"
depends on SCSI && EXPERIMENTAL
@@ -277,7 +282,7 @@ config SCSI_ISCSI_ATTRS
config SCSI_SAS_ATTRS
tristate "SAS Transport Attributes"
- depends on SCSI
+ depends on SCSI && BLK_DEV_BSG
help
If you wish to export transport-specific information about
each attached SAS device to sysfs, say Y.
@@ -286,8 +291,12 @@ source "drivers/scsi/libsas/Kconfig"
endmenu
-menu "SCSI low-level drivers"
+menuconfig SCSI_LOWLEVEL
+ bool "SCSI low-level drivers"
depends on SCSI!=n
+ default y
+
+if SCSI_LOWLEVEL
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
@@ -478,7 +487,7 @@ source "drivers/scsi/aic94xx/Kconfig"
# All the I2O code and drivers do not seem to be 64bit safe.
config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support "
- depends on !64BIT && SCSI && PCI
+ depends on !64BIT && SCSI && PCI && VIRT_TO_BUS
help
This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained
@@ -540,7 +549,7 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
+ depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
---help---
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
@@ -739,7 +748,7 @@ config SCSI_GENERIC_NCR53C400
config SCSI_IBMMCA
tristate "IBMMCA SCSI support"
- depends on MCA_LEGACY && SCSI
+ depends on MCA && SCSI
---help---
This is support for the IBM SCSI adapter found in many of the PS/2
series computers. These machines have an MCA bus, so you need to
@@ -1007,6 +1016,11 @@ config SCSI_STEX
To compile this driver as a module, choose M here: the
module will be called stex.
+config 53C700_BE_BUS
+ bool
+ depends on SCSI_A4000T || SCSI_ZORRO7XX || MVME16x_SCSI || BVME6000_SCSI
+ default y
+
config SCSI_SYM53C8XX_2
tristate "SYM53C8XX Version 2 SCSI support"
depends on PCI && SCSI
@@ -1611,13 +1625,25 @@ config FASTLANE_SCSI
If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
one in the near future, say Y to this question. Otherwise, say N.
-config SCSI_AMIGA7XX
- bool "Amiga NCR53c710 SCSI support (EXPERIMENTAL)"
- depends on AMIGA && SCSI && EXPERIMENTAL && BROKEN
+config SCSI_A4000T
+ tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
+ depends on AMIGA && SCSI && EXPERIMENTAL
+ select SCSI_SPI_ATTRS
help
- Support for various NCR53c710-based SCSI controllers on the Amiga.
+ If you have an Amiga 4000T and have SCSI devices connected to the
+ built-in SCSI controller, say Y. Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called a4000t.
+
+config SCSI_ZORRO7XX
+ tristate "Zorro NCR53c710 SCSI support (EXPERIMENTAL)"
+ depends on ZORRO && SCSI && EXPERIMENTAL
+ select SCSI_SPI_ATTRS
+ help
+ Support for various NCR53c710-based SCSI controllers on Zorro
+ expansion boards for the Amiga.
This includes:
- - the builtin SCSI controller on the Amiga 4000T,
- the Amiga 4091 Zorro III SCSI-2 controller,
- the MacroSystem Development's WarpEngine Amiga SCSI-2 controller
(info at
@@ -1625,10 +1651,6 @@ config SCSI_AMIGA7XX
- the SCSI controller on the Phase5 Blizzard PowerUP 603e+
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
- Note that all of the above SCSI controllers, except for the builtin
- SCSI controller on the Amiga 4000T, reside on the Zorro expansion
- bus, so you also have to enable Zorro bus support if you want to use
- them.
config OKTAGON_SCSI
tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
@@ -1712,8 +1734,8 @@ config MVME147_SCSI
single-board computer.
config MVME16x_SCSI
- bool "NCR53C710 SCSI driver for MVME16x"
- depends on MVME16x && SCSI && BROKEN
+ tristate "NCR53C710 SCSI driver for MVME16x"
+ depends on MVME16x && SCSI
select SCSI_SPI_ATTRS
help
The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710
@@ -1721,22 +1743,14 @@ config MVME16x_SCSI
will want to say Y to this question.
config BVME6000_SCSI
- bool "NCR53C710 SCSI driver for BVME6000"
- depends on BVME6000 && SCSI && BROKEN
+ tristate "NCR53C710 SCSI driver for BVME6000"
+ depends on BVME6000 && SCSI
select SCSI_SPI_ATTRS
help
The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710
SCSI controller chip. Almost everyone using one of these boards
will want to say Y to this question.
-config SCSI_NCR53C7xx_FAST
- bool "allow FAST-SCSI [10MHz]"
- depends on SCSI_AMIGA7XX || MVME16x_SCSI || BVME6000_SCSI
- help
- This will enable 10MHz FAST-SCSI transfers with your host
- adapter. Some systems have problems with that speed, so it's safest
- to say N here.
-
config SUN3_SCSI
tristate "Sun3 NCR5380 SCSI"
depends on SUN3 && SCSI
@@ -1766,8 +1780,6 @@ config SCSI_SUNESP
To compile this driver as a module, choose M here: the
module will be called esp.
-# bool 'Cyberstorm Mk III SCSI support (EXPERIMENTAL)' CONFIG_CYBERSTORMIII_SCSI
-
config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries"
depends on S390 && QDIO && SCSI
@@ -1792,7 +1804,7 @@ config SCSI_SRP
To compile this driver as a module, choose M here: the
module will be called libsrp.
-endmenu
+endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b1b632791580..86a7ba7bad63 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -37,7 +37,8 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
-obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
+obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
+obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@@ -53,8 +54,8 @@ obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
-obj-$(CONFIG_MVME16x_SCSI) += mvme16x.o 53c7xx.o
-obj-$(CONFIG_BVME6000_SCSI) += bvme6000.o 53c7xx.o
+obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
+obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
@@ -89,7 +90,6 @@ obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
-obj-$(CONFIG_SCSI_FD_8xx) += seagate.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
@@ -132,6 +132,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
+obj-$(CONFIG_PS3_ROM) += ps3rom.o
obj-$(CONFIG_ARM) += arm/
@@ -148,9 +149,9 @@ obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
- scsicam.o scsi_error.o scsi_lib.o \
- scsi_scan.o scsi_sysfs.o \
- scsi_devinfo.o
+ scsicam.o scsi_error.o scsi_lib.o
+scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
+scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
@@ -168,10 +169,8 @@ NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
-clean-files := 53c7xx_d.h 53c700_d.h \
- 53c7xx_u.h 53c700_u.h
+clean-files := 53c700_d.h 53c700_u.h
-$(obj)/53c7xx.o: $(obj)/53c7xx_d.h $(obj)/53c7xx_u.h
$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
# If you want to play with the firmware, uncomment
@@ -179,11 +178,6 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
ifdef GENERATE_FIRMWARE
-$(obj)/53c7xx_d.h: $(src)/53c7xx.scr $(src)/script_asm.pl
- $(CPP) -traditional -DCHIP=710 - < $< | grep -v '^#' | $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h)
-
-$(obj)/53c7xx_u.h: $(obj)/53c7xx_d.h
-
$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl
$(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $<
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 88ea5a1fb606..f8e449a98d29 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -347,7 +347,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
if((r & bit) == val)
return 0;
if(!in_interrupt())
- yield();
+ cond_resched();
else
cpu_relax();
}
@@ -357,7 +357,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
static struct {
unsigned char value;
const char *name;
-} phases[] = {
+} phases[] __maybe_unused = {
{PHASE_DATAOUT, "DATAOUT"},
{PHASE_DATAIN, "DATAIN"},
{PHASE_CMDOUT, "CMDOUT"},
@@ -575,7 +575,8 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
* Locks: none, irqs must be enabled on entry
*/
-static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
+static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+ int possible)
{
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -629,7 +630,8 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
* Locks: none
*/
-static void __init NCR5380_print_options(struct Scsi_Host *instance)
+static void __init __maybe_unused
+NCR5380_print_options(struct Scsi_Host *instance)
{
printk(" generic options"
#ifdef AUTOPROBE_IRQ
@@ -703,8 +705,8 @@ char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len);
static
char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
-static
-int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout)
+static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
+ char *buffer, char **start, off_t offset, int length, int inout)
{
char *pos = buffer;
struct NCR5380_hostdata *hostdata;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 713a108c02ef..bccf13f71532 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -299,7 +299,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
static irqreturn_t NCR5380_intr(int irq, void *dev_id);
#endif
static void NCR5380_main(struct work_struct *work);
-static void NCR5380_print_options(struct Scsi_Host *instance);
+static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
#ifdef NDEBUG
static void NCR5380_print_phase(struct Scsi_Host *instance);
static void NCR5380_print(struct Scsi_Host *instance);
@@ -307,8 +307,8 @@ static void NCR5380_print(struct Scsi_Host *instance);
static int NCR5380_abort(Scsi_Cmnd * cmd);
static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *));
-static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
-off_t offset, int length, int inout);
+static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
+ char *buffer, char **start, off_t offset, int length, int inout);
static void NCR5380_reselect(struct Scsi_Host *instance);
static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag);
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 8b5334c56f0a..79b4df158140 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -95,6 +95,8 @@ enum {
/* The master ring of all esp hosts we are managing in this driver. */
static struct NCR_ESP *espchain;
int nesps = 0, esps_in_use = 0, esps_running = 0;
+EXPORT_SYMBOL(nesps);
+EXPORT_SYMBOL(esps_running);
irqreturn_t esp_intr(int irq, void *dev_id);
@@ -524,6 +526,7 @@ void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
/* Eat any bitrot in the chip and we are done... */
trash = esp_read(eregs->esp_intrpt);
}
+EXPORT_SYMBOL(esp_bootup_reset);
/* Allocate structure and insert basic data such as SCSI chip frequency
* data and a pointer to the device
@@ -772,6 +775,7 @@ const char *esp_info(struct Scsi_Host *host)
panic("Bogon ESP revision");
};
}
+EXPORT_SYMBOL(esp_info);
/* From Wolfgang Stanglmeier's NCR scsi driver. */
struct info_str
@@ -902,6 +906,7 @@ int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t off
*start = buffer;
return esp_host_info(esp, buffer, offset, length);
}
+EXPORT_SYMBOL(esp_proc_info);
static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
@@ -3535,6 +3540,7 @@ state_machine:
if(esp->dma_irq_exit)
esp->dma_irq_exit(esp);
}
+EXPORT_SYMBOL(esp_handle);
#ifndef CONFIG_SMP
irqreturn_t esp_intr(int irq, void *dev_id)
@@ -3606,11 +3612,10 @@ out:
int esp_slave_alloc(struct scsi_device *SDptr)
{
struct esp_device *esp_dev =
- kmalloc(sizeof(struct esp_device), GFP_ATOMIC);
+ kzalloc(sizeof(struct esp_device), GFP_ATOMIC);
if (!esp_dev)
return -ENOMEM;
- memset(esp_dev, 0, sizeof(struct esp_device));
SDptr->hostdata = esp_dev;
return 0;
}
@@ -3632,6 +3637,7 @@ void esp_release(void)
esps_in_use--;
esps_running = esps_in_use;
}
+EXPORT_SYMBOL(esp_release);
#endif
EXPORT_SYMBOL(esp_abort);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 7c0b17f86903..eda8c48f6be7 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -698,7 +698,7 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
int i;
VDEB(printk("NCR53c406a_queue called\n"));
- DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, SCpnt->request_bufflen));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt)));
#if 0
VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
@@ -785,8 +785,8 @@ static void NCR53c406a_intr(void *dev_id)
unsigned char status, int_reg;
#if USE_PIO
unsigned char pio_status;
- struct scatterlist *sglist;
- unsigned int sgcount;
+ struct scatterlist *sg;
+ int i;
#endif
VDEB(printk("NCR53c406a_intr called\n"));
@@ -866,22 +866,18 @@ static void NCR53c406a_intr(void *dev_id)
current_SC->SCp.phase = data_out;
VDEB(printk("NCR53c406a: Data-Out phase\n"));
outb(FLUSH_FIFO, CMD_REG);
- LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
#if USE_DMA /* No s/g support for DMA */
- NCR53c406a_dma_write(current_SC->request_buffer, current_SC->request_bufflen);
+ NCR53c406a_dma_write(scsi_sglist(current_SC),
+ scsdi_bufflen(current_SC));
+
#endif /* USE_DMA */
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
- if (!current_SC->use_sg) /* Don't use scatter-gather */
- NCR53c406a_pio_write(current_SC->request_buffer, current_SC->request_bufflen);
- else { /* use scatter-gather */
- sgcount = current_SC->use_sg;
- sglist = current_SC->request_buffer;
- while (sgcount--) {
- NCR53c406a_pio_write(page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
- }
+ scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
+ NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
+ sg->length);
+ }
REG0;
#endif /* USE_PIO */
}
@@ -893,22 +889,17 @@ static void NCR53c406a_intr(void *dev_id)
current_SC->SCp.phase = data_in;
VDEB(printk("NCR53c406a: Data-In phase\n"));
outb(FLUSH_FIFO, CMD_REG);
- LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
#if USE_DMA /* No s/g support for DMA */
- NCR53c406a_dma_read(current_SC->request_buffer, current_SC->request_bufflen);
+ NCR53c406a_dma_read(scsi_sglist(current_SC),
+ scsdi_bufflen(current_SC));
#endif /* USE_DMA */
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
- if (!current_SC->use_sg) /* Don't use scatter-gather */
- NCR53c406a_pio_read(current_SC->request_buffer, current_SC->request_bufflen);
- else { /* Use scatter-gather */
- sgcount = current_SC->use_sg;
- sglist = current_SC->request_buffer;
- while (sgcount--) {
- NCR53c406a_pio_read(page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
- }
+ scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
+ NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
+ sg->length);
+ }
REG0;
#endif /* USE_PIO */
}
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index f12864abed2f..3a8089705feb 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -181,13 +181,12 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
struct Scsi_Host *host;
int ret;
- hostdata = kmalloc(sizeof(*hostdata), GFP_KERNEL);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
if (!hostdata) {
printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
"data, detatching\n", siop);
return -ENOMEM;
}
- memset(hostdata, 0, sizeof(*hostdata));
if (!request_region(region, 64, "NCR_D700")) {
printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
index 778844c3544a..a8bbdc2273b8 100644
--- a/drivers/scsi/NCR_Q720.c
+++ b/drivers/scsi/NCR_Q720.c
@@ -148,11 +148,10 @@ NCR_Q720_probe(struct device *dev)
__u32 base_addr, mem_size;
void __iomem *mem_base;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
- memset(p, 0, sizeof(*p));
pos2 = mca_device_read_pos(mca_dev, 2);
/* enable device */
pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 7f4241bfb9c4..f608d4a1d6da 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -19,27 +19,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -75,6 +54,8 @@
* 9/28/04 Christoph Hellwig <hch@lst.de>
* - merge the two source files
* - remove internal queueing code
+ * 14/06/07 Alan Cox <alan@redhat.com>
+ * - Grand cleanup and Linuxisation
*/
#include <linux/module.h>
@@ -102,14 +83,12 @@
#include "a100u2w.h"
-#define JIFFIES_TO_MS(t) ((t) * 1000 / HZ)
-#define MS_TO_JIFFIES(j) ((j * HZ) / 1000)
+static struct orc_scb *__orc_alloc_scb(struct orc_host * host);
+static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb);
-static ORC_SCB *orc_alloc_scb(ORC_HCS * hcsp);
-static void inia100SCBPost(BYTE * pHcb, BYTE * pScb);
+static struct orc_nvram nvram, *nvramp = &nvram;
-static NVRAM nvram, *nvramp = &nvram;
-static UCHAR dftNvRam[64] =
+static u8 default_nvram[64] =
{
/*----------header -------------*/
0x01, /* 0x00: Sub System Vendor ID 0 */
@@ -158,823 +137,882 @@ static UCHAR dftNvRam[64] =
};
-/***************************************************************************/
-static void waitForPause(unsigned amount)
-{
- ULONG the_time = jiffies + MS_TO_JIFFIES(amount);
- while (time_before_eq(jiffies, the_time))
- cpu_relax();
-}
-
-/***************************************************************************/
-static UCHAR waitChipReady(ORC_HCS * hcsp)
+static u8 wait_chip_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
+ if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100);
}
return 0;
}
-/***************************************************************************/
-static UCHAR waitFWReady(ORC_HCS * hcsp)
+static u8 wait_firmware_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (ORC_RD(hcsp->HCS_Base, ORC_HSTUS) & RREADY) /* Wait READY set */
+ if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static UCHAR waitSCSIRSTdone(ORC_HCS * hcsp)
+static u8 wait_scsi_reset_done(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (!(ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
+ if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static UCHAR waitHDOoff(ORC_HCS * hcsp)
+static u8 wait_HDO_off(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if (!(ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & HDO)) /* Wait HDO off */
+ if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */
return 1;
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static UCHAR waitHDIset(ORC_HCS * hcsp, UCHAR * pData)
+static u8 wait_hdi_set(struct orc_host * host, u8 * data)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
- if ((*pData = ORC_RD(hcsp->HCS_Base, ORC_HSTUS)) & HDI)
+ if ((*data = inb(host->base + ORC_HSTUS)) & HDI)
return 1; /* Wait HDI set */
- waitForPause(100); /* wait 100ms before try again */
+ mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
-static unsigned short get_FW_version(ORC_HCS * hcsp)
+static unsigned short orc_read_fwrev(struct orc_host * host)
{
- UCHAR bData;
- union {
- unsigned short sVersion;
- unsigned char cVersion[2];
- } Version;
-
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_VERSION);
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ u16 version;
+ u8 data;
+
+ outb(ORC_CMD_VERSION, host->base + ORC_HDATA);
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- Version.cVersion[0] = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ version = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- Version.cVersion[1] = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ version |= inb(host->base + ORC_HDATA) << 8;
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
- return (Version.sVersion);
+ return version;
}
/***************************************************************************/
-static UCHAR set_NVRAM(ORC_HCS * hcsp, unsigned char address, unsigned char value)
+static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value)
{
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_SET_NVM); /* Write command */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, address); /* Write address */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(address, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, value); /* Write value */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(value, host->base + ORC_HDATA); /* Write value */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
return 1;
}
/***************************************************************************/
-static UCHAR get_NVRAM(ORC_HCS * hcsp, unsigned char address, unsigned char *pDataIn)
+static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
{
- unsigned char bData;
+ unsigned char data;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_GET_NVM); /* Write command */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, address); /* Write address */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(address, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- *pDataIn = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ *ptr = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
return 1;
+
}
-/***************************************************************************/
-static void orc_exec_scb(ORC_HCS * hcsp, ORC_SCB * scbp)
+/**
+ * orc_exec_sb - Queue an SCB with the HA
+ * @host: host adapter the SCB belongs to
+ * @scb: SCB to queue for execution
+ */
+
+static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb)
{
- scbp->SCB_Status = ORCSCB_POST;
- ORC_WR(hcsp->HCS_Base + ORC_PQUEUE, scbp->SCB_ScbIdx);
- return;
+ scb->status = ORCSCB_POST;
+ outb(scb->scbidx, host->base + ORC_PQUEUE);
}
-/***********************************************************************
- Read SCSI H/A configuration parameters from serial EEPROM
-************************************************************************/
-static int se2_rd_all(ORC_HCS * hcsp)
+/**
+ * se2_rd_all - read SCSI parameters from EEPROM
+ * @host: Host whose EEPROM is being loaded
+ *
+ * Read SCSI H/A configuration parameters from serial EEPROM
+ */
+
+static int se2_rd_all(struct orc_host * host)
{
int i;
- UCHAR *np, chksum = 0;
+ u8 *np, chksum = 0;
- np = (UCHAR *) nvramp;
+ np = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++) { /* <01> */
- if (get_NVRAM(hcsp, (unsigned char) i, np) == 0)
+ if (orc_nv_read(host, (u8) i, np) == 0)
return -1;
-// *np++ = get_NVRAM(hcsp, (unsigned char ) i);
}
-/*------ Is ckecksum ok ? ------*/
- np = (UCHAR *) nvramp;
+ /*------ Is ckecksum ok ? ------*/
+ np = (u8 *) nvramp;
for (i = 0; i < 63; i++)
chksum += *np++;
- if (nvramp->CheckSum != (UCHAR) chksum)
+ if (nvramp->CheckSum != (u8) chksum)
return -1;
return 1;
}
-/************************************************************************
- Update SCSI H/A configuration parameters from serial EEPROM
-*************************************************************************/
-static void se2_update_all(ORC_HCS * hcsp)
+/**
+ * se2_update_all - update the EEPROM
+ * @host: Host whose EEPROM is being updated
+ *
+ * Update changed bytes in the EEPROM image.
+ */
+
+static void se2_update_all(struct orc_host * host)
{ /* setup default pattern */
int i;
- UCHAR *np, *np1, chksum = 0;
+ u8 *np, *np1, chksum = 0;
/* Calculate checksum first */
- np = (UCHAR *) dftNvRam;
+ np = (u8 *) default_nvram;
for (i = 0; i < 63; i++)
chksum += *np++;
*np = chksum;
- np = (UCHAR *) dftNvRam;
- np1 = (UCHAR *) nvramp;
+ np = (u8 *) default_nvram;
+ np1 = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++, np1++) {
- if (*np != *np1) {
- set_NVRAM(hcsp, (unsigned char) i, *np);
- }
+ if (*np != *np1)
+ orc_nv_write(host, (u8) i, *np);
}
- return;
}
-/*************************************************************************
- Function name : read_eeprom
-**************************************************************************/
-static void read_eeprom(ORC_HCS * hcsp)
+/**
+ * read_eeprom - load EEPROM
+ * @host: Host EEPROM to read
+ *
+ * Read the EEPROM for a given host. If it is invalid or fails
+ * the restore the defaults and use them.
+ */
+
+static void read_eeprom(struct orc_host * host)
{
- if (se2_rd_all(hcsp) != 1) {
- se2_update_all(hcsp); /* setup default pattern */
- se2_rd_all(hcsp); /* load again */
+ if (se2_rd_all(host) != 1) {
+ se2_update_all(host); /* setup default pattern */
+ se2_rd_all(host); /* load again */
}
}
-/***************************************************************************/
-static UCHAR load_FW(ORC_HCS * hcsp)
+/**
+ * orc_load_firmware - initialise firmware
+ * @host: Host to set up
+ *
+ * Load the firmware from the EEPROM into controller SRAM. This
+ * is basically a 4K block copy and then a 4K block read to check
+ * correctness. The rest is convulted by the indirect interfaces
+ * in the hardware
+ */
+
+static u8 orc_load_firmware(struct orc_host * host)
{
- U32 dData;
- USHORT wBIOSAddress;
- USHORT i;
- UCHAR *pData, bData;
-
-
- bData = ORC_RD(hcsp->HCS_Base, ORC_GCFG);
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData | EEPRG); /* Enable EEPROM programming */
- ORC_WR(hcsp->HCS_Base + ORC_EBIOSADR2, 0x00);
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x00);
- if (ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA) != 0x55) {
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */
+ u32 data32;
+ u16 bios_addr;
+ u16 i;
+ u8 *data32_ptr, data;
+
+
+ /* Set up the EEPROM for access */
+
+ data = inb(host->base + ORC_GCFG);
+ outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */
+ outb(0x00, host->base + ORC_EBIOSADR2);
+ outw(0x0000, host->base + ORC_EBIOSADR0);
+ if (inb(host->base + ORC_EBIOSDATA) != 0x55) {
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x01);
- if (ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA) != 0xAA) {
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */
+ outw(0x0001, host->base + ORC_EBIOSADR0);
+ if (inb(host->base + ORC_EBIOSDATA) != 0xAA) {
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST | DOWNLOAD); /* Enable SRAM programming */
- pData = (UCHAR *) & dData;
- dData = 0; /* Initial FW address to 0 */
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x10);
- *pData = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x11);
- *(pData + 1) = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x12);
- *(pData + 2) = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
- ORC_WR(hcsp->HCS_Base + ORC_EBIOSADR2, *(pData + 2));
- ORC_WRLONG(hcsp->HCS_Base + ORC_FWBASEADR, dData); /* Write FW address */
-
- wBIOSAddress = (USHORT) dData; /* FW code locate at BIOS address + ? */
- for (i = 0, pData = (UCHAR *) & dData; /* Download the code */
+
+ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
+ data32_ptr = (u8 *) & data32;
+ data32 = 0; /* Initial FW address to 0 */
+ outw(0x0010, host->base + ORC_EBIOSADR0);
+ *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(0x0011, host->base + ORC_EBIOSADR0);
+ *(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(0x0012, host->base + ORC_EBIOSADR0);
+ *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
+ outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
+ outl(data32, host->base + ORC_FWBASEADR); /* Write FW address */
+
+ /* Copy the code from the BIOS to the SRAM */
+
+ bios_addr = (u16) data32; /* FW code locate at BIOS address + ? */
+ for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
i < 0x1000; /* Firmware code size = 4K */
- i++, wBIOSAddress++) {
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, wBIOSAddress);
- *pData++ = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
+ i++, bios_addr++) {
+ outw(bios_addr, host->base + ORC_EBIOSADR0);
+ *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
- ORC_WRLONG(hcsp->HCS_Base + ORC_RISCRAM, dData); /* Write every 4 bytes */
- pData = (UCHAR *) & dData;
+ outl(data32, host->base + ORC_RISCRAM); /* Write every 4 bytes */
+ data32_ptr = (u8 *) & data32;
}
}
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST | DOWNLOAD); /* Reset program count 0 */
- wBIOSAddress -= 0x1000; /* Reset the BIOS adddress */
- for (i = 0, pData = (UCHAR *) & dData; /* Check the code */
+ /* Go back and check they match */
+
+ outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
+ bios_addr -= 0x1000; /* Reset the BIOS adddress */
+ for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
i < 0x1000; /* Firmware code size = 4K */
- i++, wBIOSAddress++) {
- ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, wBIOSAddress);
- *pData++ = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */
+ i++, bios_addr++) {
+ outw(bios_addr, host->base + ORC_EBIOSADR0);
+ *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
- if (ORC_RDLONG(hcsp->HCS_Base, ORC_RISCRAM) != dData) {
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST); /* Reset program to 0 */
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /*Disable EEPROM programming */
+ if (inl(host->base + ORC_RISCRAM) != data32) {
+ outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
+ outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
return 0;
}
- pData = (UCHAR *) & dData;
+ data32_ptr = (u8 *) & data32;
}
}
- ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST); /* Reset program to 0 */
- ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */
+
+ /* Success */
+ outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
+ outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 1;
}
/***************************************************************************/
-static void setup_SCBs(ORC_HCS * hcsp)
+static void setup_SCBs(struct orc_host * host)
{
- ORC_SCB *pVirScb;
+ struct orc_scb *scb;
int i;
- ESCB *pVirEscb;
- dma_addr_t pPhysEscb;
+ struct orc_extended_scb *escb;
+ dma_addr_t escb_phys;
- /* Setup SCB HCS_Base and SCB Size registers */
- ORC_WR(hcsp->HCS_Base + ORC_SCBSIZE, ORC_MAXQUEUE); /* Total number of SCBs */
- /* SCB HCS_Base address 0 */
- ORC_WRLONG(hcsp->HCS_Base + ORC_SCBBASE0, hcsp->HCS_physScbArray);
- /* SCB HCS_Base address 1 */
- ORC_WRLONG(hcsp->HCS_Base + ORC_SCBBASE1, hcsp->HCS_physScbArray);
+ /* Setup SCB base and SCB Size registers */
+ outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */
+ /* SCB base address 0 */
+ outl(host->scb_phys, host->base + ORC_SCBBASE0);
+ /* SCB base address 1 */
+ outl(host->scb_phys, host->base + ORC_SCBBASE1);
/* setup scatter list address with one buffer */
- pVirScb = hcsp->HCS_virScbArray;
- pVirEscb = hcsp->HCS_virEscbArray;
+ scb = host->scb_virt;
+ escb = host->escb_virt;
for (i = 0; i < ORC_MAXQUEUE; i++) {
- pPhysEscb = (hcsp->HCS_physEscbArray + (sizeof(ESCB) * i));
- pVirScb->SCB_SGPAddr = (U32) pPhysEscb;
- pVirScb->SCB_SensePAddr = (U32) pPhysEscb;
- pVirScb->SCB_EScb = pVirEscb;
- pVirScb->SCB_ScbIdx = i;
- pVirScb++;
- pVirEscb++;
+ escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
+ scb->sg_addr = (u32) escb_phys;
+ scb->sense_addr = (u32) escb_phys;
+ scb->escb = escb;
+ scb->scbidx = i;
+ scb++;
+ escb++;
}
-
- return;
}
-/***************************************************************************/
-static void initAFlag(ORC_HCS * hcsp)
+/**
+ * init_alloc_map - initialise allocation map
+ * @host: host map to configure
+ *
+ * Initialise the allocation maps for this device. If the device
+ * is not quiescent the caller must hold the allocation lock
+ */
+
+static void init_alloc_map(struct orc_host * host)
{
- UCHAR i, j;
+ u8 i, j;
for (i = 0; i < MAX_CHANNELS; i++) {
for (j = 0; j < 8; j++) {
- hcsp->BitAllocFlag[i][j] = 0xffffffff;
+ host->allocation_map[i][j] = 0xffffffff;
}
}
}
-/***************************************************************************/
-static int init_orchid(ORC_HCS * hcsp)
+/**
+ * init_orchid - initialise the host adapter
+ * @host:host adapter to initialise
+ *
+ * Initialise the controller and if neccessary load the firmware.
+ *
+ * Returns -1 if the initialisation fails.
+ */
+
+static int init_orchid(struct orc_host * host)
{
- UBYTE *readBytep;
- USHORT revision;
- UCHAR i;
-
- initAFlag(hcsp);
- ORC_WR(hcsp->HCS_Base + ORC_GIMSK, 0xFF); /* Disable all interrupt */
- if (ORC_RD(hcsp->HCS_Base, ORC_HSTUS) & RREADY) { /* Orchid is ready */
- revision = get_FW_version(hcsp);
+ u8 *ptr;
+ u16 revision;
+ u8 i;
+
+ init_alloc_map(host);
+ outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */
+
+ if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */
+ revision = orc_read_fwrev(host);
if (revision == 0xFFFF) {
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, DEVRST); /* Reset Host Adapter */
- if (waitChipReady(hcsp) == 0)
- return (-1);
- load_FW(hcsp); /* Download FW */
- setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, 0); /* clear HOSTSTOP */
- if (waitFWReady(hcsp) == 0)
- return (-1);
+ outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
+ if (wait_chip_ready(host) == 0)
+ return -1;
+ orc_load_firmware(host); /* Download FW */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */
+ if (wait_firmware_ready(host) == 0)
+ return -1;
/* Wait for firmware ready */
} else {
- setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
}
} else { /* Orchid is not Ready */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, DEVRST); /* Reset Host Adapter */
- if (waitChipReady(hcsp) == 0)
- return (-1);
- load_FW(hcsp); /* Download FW */
- setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); /* Do Hardware Reset & */
+ outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
+ if (wait_chip_ready(host) == 0)
+ return -1;
+ orc_load_firmware(host); /* Download FW */
+ setup_SCBs(host); /* Setup SCB base and SCB Size registers */
+ outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */
/* clear HOSTSTOP */
- if (waitFWReady(hcsp) == 0) /* Wait for firmware ready */
- return (-1);
+ if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */
+ return -1;
}
-/*------------- get serial EEProm settting -------*/
-
- read_eeprom(hcsp);
-
- if (nvramp->Revision != 1)
- return (-1);
+ /* Load an EEProm copy into RAM */
+ /* Assumes single threaded at this point */
+ read_eeprom(host);
- hcsp->HCS_SCSI_ID = nvramp->SCSI0Id;
- hcsp->HCS_BIOS = nvramp->BIOSConfig1;
- hcsp->HCS_MaxTar = MAX_TARGETS;
- readBytep = (UCHAR *) & (nvramp->Target00Config);
- for (i = 0; i < 16; readBytep++, i++) {
- hcsp->TargetFlag[i] = *readBytep;
- hcsp->MaximumTags[i] = ORC_MAXTAGS;
- } /* for */
+ if (nvramp->revision != 1)
+ return -1;
- if (nvramp->SCSI0Config & NCC_BUSRESET) { /* Reset SCSI bus */
- hcsp->HCS_Flags |= HCF_SCSI_RESET;
+ host->scsi_id = nvramp->scsi_id;
+ host->BIOScfg = nvramp->BIOSConfig1;
+ host->max_targets = MAX_TARGETS;
+ ptr = (u8 *) & (nvramp->Target00Config);
+ for (i = 0; i < 16; ptr++, i++) {
+ host->target_flag[i] = *ptr;
+ host->max_tags[i] = ORC_MAXTAGS;
}
- ORC_WR(hcsp->HCS_Base + ORC_GIMSK, 0xFB); /* enable RP FIFO interrupt */
- return (0);
+
+ if (nvramp->SCSI0Config & NCC_BUSRESET)
+ host->flags |= HCF_SCSI_RESET;
+ outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */
+ return 0;
}
-/*****************************************************************************
- Function name : orc_reset_scsi_bus
- Description : Reset registers, reset a hanging bus and
- kill active and disconnected commands for target w/o soft reset
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int orc_reset_scsi_bus(ORC_HCS * pHCB)
+/**
+ * orc_reset_scsi_bus - perform bus reset
+ * @host: host being reset
+ *
+ * Perform a full bus reset on the adapter.
+ */
+
+static int orc_reset_scsi_bus(struct orc_host * host)
{ /* I need Host Control Block Information */
- ULONG flags;
+ unsigned long flags;
- spin_lock_irqsave(&(pHCB->BitAllocFlagLock), flags);
+ spin_lock_irqsave(&host->allocation_lock, flags);
- initAFlag(pHCB);
+ init_alloc_map(host);
/* reset scsi bus */
- ORC_WR(pHCB->HCS_Base + ORC_HCTRL, SCSIRST);
- if (waitSCSIRSTdone(pHCB) == 0) {
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+ outb(SCSIRST, host->base + ORC_HCTRL);
+ /* FIXME: We can spend up to a second with the lock held and
+ interrupts off here */
+ if (wait_scsi_reset_done(host) == 0) {
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
} else {
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
}
-/*****************************************************************************
- Function name : orc_device_reset
- Description : Reset registers, reset a hanging bus and
- kill active and disconnected commands for target w/o soft reset
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int orc_device_reset(ORC_HCS * pHCB, struct scsi_cmnd *SCpnt, unsigned int target)
+/**
+ * orc_device_reset - device reset handler
+ * @host: host to reset
+ * @cmd: command causing the reset
+ * @target; target device
+ *
+ * Reset registers, reset a hanging bus and kill active and disconnected
+ * commands for target w/o soft reset
+ */
+
+static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target)
{ /* I need Host Control Block Information */
- ORC_SCB *pScb;
- ESCB *pVirEscb;
- ORC_SCB *pVirScb;
- UCHAR i;
- ULONG flags;
+ struct orc_scb *scb;
+ struct orc_extended_scb *escb;
+ struct orc_scb *host_scb;
+ u8 i;
+ unsigned long flags;
- spin_lock_irqsave(&(pHCB->BitAllocFlagLock), flags);
- pScb = (ORC_SCB *) NULL;
- pVirEscb = (ESCB *) NULL;
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+ scb = (struct orc_scb *) NULL;
+ escb = (struct orc_extended_scb *) NULL;
/* setup scatter list address with one buffer */
- pVirScb = pHCB->HCS_virScbArray;
+ host_scb = host->scb_virt;
+
+ /* FIXME: is this safe if we then fail to issue the reset or race
+ a completion ? */
+ init_alloc_map(host);
- initAFlag(pHCB);
- /* device reset */
+ /* Find the scb corresponding to the command */
for (i = 0; i < ORC_MAXQUEUE; i++) {
- pVirEscb = pVirScb->SCB_EScb;
- if ((pVirScb->SCB_Status) && (pVirEscb->SCB_Srb == SCpnt))
+ escb = host_scb->escb;
+ if (host_scb->status && escb->srb == cmd)
break;
- pVirScb++;
+ host_scb++;
}
if (i == ORC_MAXQUEUE) {
- printk("Unable to Reset - No SCB Found\n");
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+ printk(KERN_ERR "Unable to Reset - No SCB Found\n");
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
- if ((pScb = orc_alloc_scb(pHCB)) == NULL) {
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+
+ /* Allocate a new SCB for the reset command to the firmware */
+ if ((scb = __orc_alloc_scb(host)) == NULL) {
+ /* Can't happen.. */
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
- pScb->SCB_Opcode = ORC_BUSDEVRST;
- pScb->SCB_Target = target;
- pScb->SCB_HaStat = 0;
- pScb->SCB_TaStat = 0;
- pScb->SCB_Status = 0x0;
- pScb->SCB_Link = 0xFF;
- pScb->SCB_Reserved0 = 0;
- pScb->SCB_Reserved1 = 0;
- pScb->SCB_XferLen = 0;
- pScb->SCB_SGLen = 0;
-
- pVirEscb->SCB_Srb = NULL;
- pVirEscb->SCB_Srb = SCpnt;
- orc_exec_scb(pHCB, pScb); /* Start execute SCB */
- spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags);
+
+ /* Reset device is handled by the firmare, we fill in an SCB and
+ fire it at the controller, it does the rest */
+ scb->opcode = ORC_BUSDEVRST;
+ scb->target = target;
+ scb->hastat = 0;
+ scb->tastat = 0;
+ scb->status = 0x0;
+ scb->link = 0xFF;
+ scb->reserved0 = 0;
+ scb->reserved1 = 0;
+ scb->xferlen = 0;
+ scb->sg_len = 0;
+
+ escb->srb = NULL;
+ escb->srb = cmd;
+ orc_exec_scb(host, scb); /* Start execute SCB */
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
+/**
+ * __orc_alloc_scb - allocate an SCB
+ * @host: host to allocate from
+ *
+ * Allocate an SCB and return a pointer to the SCB object. NULL
+ * is returned if no SCB is free. The caller must already hold
+ * the allocator lock at this point.
+ */
-/***************************************************************************/
-static ORC_SCB *__orc_alloc_scb(ORC_HCS * hcsp)
+
+static struct orc_scb *__orc_alloc_scb(struct orc_host * host)
{
- ORC_SCB *pTmpScb;
- UCHAR Ch;
- ULONG idx;
- UCHAR index;
- UCHAR i;
+ u8 channel;
+ unsigned long idx;
+ u8 index;
+ u8 i;
- Ch = hcsp->HCS_Index;
+ channel = host->index;
for (i = 0; i < 8; i++) {
for (index = 0; index < 32; index++) {
- if ((hcsp->BitAllocFlag[Ch][i] >> index) & 0x01) {
- hcsp->BitAllocFlag[Ch][i] &= ~(1 << index);
+ if ((host->allocation_map[channel][i] >> index) & 0x01) {
+ host->allocation_map[channel][i] &= ~(1 << index);
break;
}
}
idx = index + 32 * i;
- pTmpScb = (ORC_SCB *) ((ULONG) hcsp->HCS_virScbArray + (idx * sizeof(ORC_SCB)));
- return (pTmpScb);
+ /* Translate the index to a structure instance */
+ return (struct orc_scb *) ((unsigned long) host->scb_virt + (idx * sizeof(struct orc_scb)));
}
- return (NULL);
+ return NULL;
}
-static ORC_SCB *orc_alloc_scb(ORC_HCS * hcsp)
+/**
+ * orc_alloc_scb - allocate an SCB
+ * @host: host to allocate from
+ *
+ * Allocate an SCB and return a pointer to the SCB object. NULL
+ * is returned if no SCB is free.
+ */
+
+static struct orc_scb *orc_alloc_scb(struct orc_host * host)
{
- ORC_SCB *pTmpScb;
- ULONG flags;
+ struct orc_scb *scb;
+ unsigned long flags;
- spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags);
- pTmpScb = __orc_alloc_scb(hcsp);
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
- return (pTmpScb);
+ spin_lock_irqsave(&host->allocation_lock, flags);
+ scb = __orc_alloc_scb(host);
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
+ return scb;
}
+/**
+ * orc_release_scb - release an SCB
+ * @host: host owning the SCB
+ * @scb: SCB that is now free
+ *
+ * Called to return a completed SCB to the allocation pool. Before
+ * calling the SCB must be out of use on both the host and the HA.
+ */
-/***************************************************************************/
-static void orc_release_scb(ORC_HCS * hcsp, ORC_SCB * scbp)
+static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
{
- ULONG flags;
- UCHAR Index;
- UCHAR i;
- UCHAR Ch;
-
- spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags);
- Ch = hcsp->HCS_Index;
- Index = scbp->SCB_ScbIdx;
- i = Index / 32;
- Index %= 32;
- hcsp->BitAllocFlag[Ch][i] |= (1 << Index);
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
+ unsigned long flags;
+ u8 index, i, channel;
+
+ spin_lock_irqsave(&(host->allocation_lock), flags);
+ channel = host->index; /* Channel */
+ index = scb->scbidx;
+ i = index / 32;
+ index %= 32;
+ host->allocation_map[channel][i] |= (1 << index);
+ spin_unlock_irqrestore(&(host->allocation_lock), flags);
}
-/*****************************************************************************
- Function name : abort_SCB
- Description : Abort a queued command.
- (commands that are on the bus can't be aborted easily)
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int abort_SCB(ORC_HCS * hcsp, ORC_SCB * pScb)
+/**
+ * orchid_abort_scb - abort a command
+ *
+ * Abort a queued command that has been passed to the firmware layer
+ * if possible. This is all handled by the firmware. We aks the firmware
+ * and it either aborts the command or fails
+ */
+
+static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb)
{
- unsigned char bData, bStatus;
+ unsigned char data, status;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_ABORT_SCB); /* Write command */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- ORC_WR(hcsp->HCS_Base + ORC_HDATA, pScb->SCB_ScbIdx); /* Write address */
- ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
- if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
+ outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */
+ outb(HDO, host->base + ORC_HCTRL);
+ if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
- if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */
+ if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
- bStatus = ORC_RD(hcsp->HCS_Base, ORC_HDATA);
- ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */
+ status = inb(host->base + ORC_HDATA);
+ outb(data, host->base + ORC_HSTUS); /* Clear HDI */
- if (bStatus == 1) /* 0 - Successfully */
+ if (status == 1) /* 0 - Successfully */
return 0; /* 1 - Fail */
return 1;
}
-/*****************************************************************************
- Function name : inia100_abort
- Description : Abort a queued command.
- (commands that are on the bus can't be aborted easily)
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int orc_abort_srb(ORC_HCS * hcsp, struct scsi_cmnd *SCpnt)
+static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd)
{
- ESCB *pVirEscb;
- ORC_SCB *pVirScb;
- UCHAR i;
- ULONG flags;
+ struct orc_extended_scb *escb;
+ struct orc_scb *scb;
+ u8 i;
+ unsigned long flags;
- spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags);
+ spin_lock_irqsave(&(host->allocation_lock), flags);
- pVirScb = hcsp->HCS_virScbArray;
+ scb = host->scb_virt;
- for (i = 0; i < ORC_MAXQUEUE; i++, pVirScb++) {
- pVirEscb = pVirScb->SCB_EScb;
- if ((pVirScb->SCB_Status) && (pVirEscb->SCB_Srb == SCpnt)) {
- if (pVirScb->SCB_TagMsg == 0) {
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
- return FAILED;
+ /* Walk the queue until we find the SCB that belongs to the command
+ block. This isn't a performance critical path so a walk in the park
+ here does no harm */
+
+ for (i = 0; i < ORC_MAXQUEUE; i++, scb++) {
+ escb = scb->escb;
+ if (scb->status && escb->srb == cmd) {
+ if (scb->tag_msg == 0) {
+ goto out;
} else {
- if (abort_SCB(hcsp, pVirScb)) {
- pVirEscb->SCB_Srb = NULL;
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
+ /* Issue an ABORT to the firmware */
+ if (orchid_abort_scb(host, scb)) {
+ escb->srb = NULL;
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
- } else {
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
- return FAILED;
- }
+ } else
+ goto out;
}
}
}
- spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
+out:
+ spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
}
-/***********************************************************************
- Routine Description:
- This is the interrupt service routine for the Orchid SCSI adapter.
- It reads the interrupt register to determine if the adapter is indeed
- the source of the interrupt and clears the interrupt at the device.
- Arguments:
- HwDeviceExtension - HBA miniport driver's adapter data storage
- Return Value:
-***********************************************************************/
-static void orc_interrupt(
- ORC_HCS * hcsp
-)
+/**
+ * orc_interrupt - IRQ processing
+ * @host: Host causing the interrupt
+ *
+ * This function is called from the IRQ handler and protected
+ * by the host lock. While the controller reports that there are
+ * scb's for processing we pull them off the controller, turn the
+ * index into a host address pointer to the scb and call the scb
+ * handler.
+ *
+ * Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise
+ */
+
+static irqreturn_t orc_interrupt(struct orc_host * host)
{
- BYTE bScbIdx;
- ORC_SCB *pScb;
+ u8 scb_index;
+ struct orc_scb *scb;
- if (ORC_RD(hcsp->HCS_Base, ORC_RQUEUECNT) == 0) {
- return; // 0;
+ /* Check if we have an SCB queued for servicing */
+ if (inb(host->base + ORC_RQUEUECNT) == 0)
+ return IRQ_NONE;
- }
do {
- bScbIdx = ORC_RD(hcsp->HCS_Base, ORC_RQUEUE);
-
- pScb = (ORC_SCB *) ((ULONG) hcsp->HCS_virScbArray + (ULONG) (sizeof(ORC_SCB) * bScbIdx));
- pScb->SCB_Status = 0x0;
-
- inia100SCBPost((BYTE *) hcsp, (BYTE *) pScb);
- } while (ORC_RD(hcsp->HCS_Base, ORC_RQUEUECNT));
- return; //1;
-
+ /* Get the SCB index of the SCB to service */
+ scb_index = inb(host->base + ORC_RQUEUE);
+
+ /* Translate it back to a host pointer */
+ scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index));
+ scb->status = 0x0;
+ /* Process the SCB */
+ inia100_scb_handler(host, scb);
+ } while (inb(host->base + ORC_RQUEUECNT));
+ return IRQ_HANDLED;
} /* End of I1060Interrupt() */
-/*****************************************************************************
- Function name : inia100BuildSCB
- Description :
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * SCpnt)
+/**
+ * inia100_build_scb - build SCB
+ * @host: host owing the control block
+ * @scb: control block to use
+ * @cmd: Mid layer command
+ *
+ * Build a host adapter control block from the SCSI mid layer command
+ */
+
+static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
{ /* Create corresponding SCB */
- struct scatterlist *pSrbSG;
- ORC_SG *pSG; /* Pointer to SG list */
+ struct scatterlist *sg;
+ struct orc_sgent *sgent; /* Pointer to SG list */
int i, count_sg;
- ESCB *pEScb;
-
- pEScb = pSCB->SCB_EScb;
- pEScb->SCB_Srb = SCpnt;
- pSG = NULL;
-
- pSCB->SCB_Opcode = ORC_EXECSCSI;
- pSCB->SCB_Flags = SCF_NO_DCHK; /* Clear done bit */
- pSCB->SCB_Target = SCpnt->device->id;
- pSCB->SCB_Lun = SCpnt->device->lun;
- pSCB->SCB_Reserved0 = 0;
- pSCB->SCB_Reserved1 = 0;
- pSCB->SCB_SGLen = 0;
-
- if ((pSCB->SCB_XferLen = (U32) SCpnt->request_bufflen)) {
- pSG = (ORC_SG *) & pEScb->ESCB_SGList[0];
- if (SCpnt->use_sg) {
- pSrbSG = (struct scatterlist *) SCpnt->request_buffer;
- count_sg = pci_map_sg(pHCB->pdev, pSrbSG, SCpnt->use_sg,
- SCpnt->sc_data_direction);
- pSCB->SCB_SGLen = (U32) (count_sg * 8);
- for (i = 0; i < count_sg; i++, pSG++, pSrbSG++) {
- pSG->SG_Ptr = (U32) sg_dma_address(pSrbSG);
- pSG->SG_Len = (U32) sg_dma_len(pSrbSG);
- }
- } else if (SCpnt->request_bufflen != 0) {/* Non SG */
- pSCB->SCB_SGLen = 0x8;
- SCpnt->SCp.dma_handle = pci_map_single(pHCB->pdev,
- SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- pSG->SG_Ptr = (U32) SCpnt->SCp.dma_handle;
- pSG->SG_Len = (U32) SCpnt->request_bufflen;
- } else {
- pSCB->SCB_SGLen = 0;
- pSG->SG_Ptr = 0;
- pSG->SG_Len = 0;
+ struct orc_extended_scb *escb;
+
+ /* Links between the escb, scb and Linux scsi midlayer cmd */
+ escb = scb->escb;
+ escb->srb = cmd;
+ sgent = NULL;
+
+ /* Set up the SCB to do a SCSI command block */
+ scb->opcode = ORC_EXECSCSI;
+ scb->flags = SCF_NO_DCHK; /* Clear done bit */
+ scb->target = cmd->device->id;
+ scb->lun = cmd->device->lun;
+ scb->reserved0 = 0;
+ scb->reserved1 = 0;
+ scb->sg_len = 0;
+
+ scb->xferlen = (u32) scsi_bufflen(cmd);
+ sgent = (struct orc_sgent *) & escb->sglist[0];
+
+ count_sg = scsi_dma_map(cmd);
+ BUG_ON(count_sg < 0);
+
+ /* Build the scatter gather lists */
+ if (count_sg) {
+ scb->sg_len = (u32) (count_sg * 8);
+ scsi_for_each_sg(cmd, sg, count_sg, i) {
+ sgent->base = (u32) sg_dma_address(sg);
+ sgent->length = (u32) sg_dma_len(sg);
+ sgent++;
}
+ } else {
+ scb->sg_len = 0;
+ sgent->base = 0;
+ sgent->length = 0;
}
- pSCB->SCB_SGPAddr = (U32) pSCB->SCB_SensePAddr;
- pSCB->SCB_HaStat = 0;
- pSCB->SCB_TaStat = 0;
- pSCB->SCB_Link = 0xFF;
- pSCB->SCB_SenseLen = SENSE_SIZE;
- pSCB->SCB_CDBLen = SCpnt->cmd_len;
- if (pSCB->SCB_CDBLen >= IMAX_CDB) {
- printk("max cdb length= %x\b", SCpnt->cmd_len);
- pSCB->SCB_CDBLen = IMAX_CDB;
+ scb->sg_addr = (u32) scb->sense_addr;
+ scb->hastat = 0;
+ scb->tastat = 0;
+ scb->link = 0xFF;
+ scb->sense_len = SENSE_SIZE;
+ scb->cdb_len = cmd->cmd_len;
+ if (scb->cdb_len >= IMAX_CDB) {
+ printk("max cdb length= %x\b", cmd->cmd_len);
+ scb->cdb_len = IMAX_CDB;
}
- pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW;
- if (SCpnt->device->tagged_supported) { /* Tag Support */
- pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
+ scb->ident = cmd->device->lun | DISC_ALLOW;
+ if (cmd->device->tagged_supported) { /* Tag Support */
+ scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
- pSCB->SCB_TagMsg = 0; /* No tag support */
+ scb->tag_msg = 0; /* No tag support */
}
- memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, pSCB->SCB_CDBLen);
- return;
+ memcpy(&scb->cdb[0], &cmd->cmnd, scb->cdb_len);
}
-/*****************************************************************************
- Function name : inia100_queue
- Description : Queue a command and setup interrupts for a free bus.
- Input : pHCB - Pointer to host adapter structure
- Output : None.
- Return : pSRB - Pointer to SCSI request block.
-*****************************************************************************/
-static int inia100_queue(struct scsi_cmnd * SCpnt, void (*done) (struct scsi_cmnd *))
+/**
+ * inia100_queue - queue command with host
+ * @cmd: Command block
+ * @done: Completion function
+ *
+ * Called by the mid layer to queue a command. Process the command
+ * block, build the host specific scb structures and if there is room
+ * queue the command down to the controller
+ */
+
+static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
- register ORC_SCB *pSCB;
- ORC_HCS *pHCB; /* Point to Host adapter control block */
+ struct orc_scb *scb;
+ struct orc_host *host; /* Point to Host adapter control block */
- pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
- SCpnt->scsi_done = done;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ cmd->scsi_done = done;
/* Get free SCSI control block */
- if ((pSCB = orc_alloc_scb(pHCB)) == NULL)
+ if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
- inia100BuildSCB(pHCB, pSCB, SCpnt);
- orc_exec_scb(pHCB, pSCB); /* Start execute SCB */
-
- return (0);
+ inia100_build_scb(host, scb, cmd);
+ orc_exec_scb(host, scb); /* Start execute SCB */
+ return 0;
}
/*****************************************************************************
Function name : inia100_abort
Description : Abort a queued command.
(commands that are on the bus can't be aborted easily)
- Input : pHCB - Pointer to host adapter structure
+ Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
-static int inia100_abort(struct scsi_cmnd * SCpnt)
+static int inia100_abort(struct scsi_cmnd * cmd)
{
- ORC_HCS *hcsp;
+ struct orc_host *host;
- hcsp = (ORC_HCS *) SCpnt->device->host->hostdata;
- return orc_abort_srb(hcsp, SCpnt);
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return inia100_abort_cmd(host, cmd);
}
/*****************************************************************************
Function name : inia100_reset
Description : Reset registers, reset a hanging bus and
kill active and disconnected commands for target w/o soft reset
- Input : pHCB - Pointer to host adapter structure
+ Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
-static int inia100_bus_reset(struct scsi_cmnd * SCpnt)
+static int inia100_bus_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
- ORC_HCS *pHCB;
- pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
- return orc_reset_scsi_bus(pHCB);
+ struct orc_host *host;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return orc_reset_scsi_bus(host);
}
/*****************************************************************************
Function name : inia100_device_reset
Description : Reset the device
- Input : pHCB - Pointer to host adapter structure
+ Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
-static int inia100_device_reset(struct scsi_cmnd * SCpnt)
+static int inia100_device_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
- ORC_HCS *pHCB;
- pHCB = (ORC_HCS *) SCpnt->device->host->hostdata;
- return orc_device_reset(pHCB, SCpnt, scmd_id(SCpnt));
+ struct orc_host *host;
+ host = (struct orc_host *) cmd->device->host->hostdata;
+ return orc_device_reset(host, cmd, scmd_id(cmd));
}
-/*****************************************************************************
- Function name : inia100SCBPost
- Description : This is callback routine be called when orc finish one
- SCSI command.
- Input : pHCB - Pointer to host adapter control block.
- pSCB - Pointer to SCSI control block.
- Output : None.
- Return : None.
-*****************************************************************************/
-static void inia100SCBPost(BYTE * pHcb, BYTE * pScb)
+/**
+ * inia100_scb_handler - interrupt callback
+ * @host: Host causing the interrupt
+ * @scb: SCB the controller returned as needing processing
+ *
+ * Perform completion processing on a control block. Do the conversions
+ * from host to SCSI midlayer error coding, save any sense data and
+ * the complete with the midlayer and recycle the scb.
+ */
+
+static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
{
- struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */
- ORC_HCS *pHCB;
- ORC_SCB *pSCB;
- ESCB *pEScb;
-
- pHCB = (ORC_HCS *) pHcb;
- pSCB = (ORC_SCB *) pScb;
- pEScb = pSCB->SCB_EScb;
- if ((pSRB = (struct scsi_cmnd *) pEScb->SCB_Srb) == 0) {
- printk("inia100SCBPost: SRB pointer is empty\n");
- orc_release_scb(pHCB, pSCB); /* Release SCB for current channel */
+ struct scsi_cmnd *cmd; /* Pointer to SCSI request block */
+ struct orc_extended_scb *escb;
+
+ escb = scb->escb;
+ if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) {
+ printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n");
+ orc_release_scb(host, scb); /* Release SCB for current channel */
return;
}
- pEScb->SCB_Srb = NULL;
+ escb->srb = NULL;
- switch (pSCB->SCB_HaStat) {
+ switch (scb->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
- pSCB->SCB_HaStat = 0;
+ scb->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
- pSCB->SCB_HaStat = DID_TIME_OUT;
+ scb->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
- pSCB->SCB_HaStat = DID_RESET;
+ scb->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
- pSCB->SCB_HaStat = DID_ABORT;
+ scb->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -984,46 +1022,41 @@ static void inia100SCBPost(BYTE * pHcb, BYTE * pScb)
case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */
default:
- printk("inia100: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat);
- pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */
+ printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat);
+ scb->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
- if (pSCB->SCB_TaStat == 2) { /* Check condition */
- memcpy((unsigned char *) &pSRB->sense_buffer[0],
- (unsigned char *) &pEScb->ESCB_SGList[0], SENSE_SIZE);
+ if (scb->tastat == 2) { /* Check condition */
+ memcpy((unsigned char *) &cmd->sense_buffer[0],
+ (unsigned char *) &escb->sglist[0], SENSE_SIZE);
}
- pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16);
-
- if (pSRB->use_sg) {
- pci_unmap_sg(pHCB->pdev,
- (struct scatterlist *)pSRB->request_buffer,
- pSRB->use_sg, pSRB->sc_data_direction);
- } else if (pSRB->request_bufflen != 0) {
- pci_unmap_single(pHCB->pdev, pSRB->SCp.dma_handle,
- pSRB->request_bufflen,
- pSRB->sc_data_direction);
- }
-
- pSRB->scsi_done(pSRB); /* Notify system DONE */
-
- orc_release_scb(pHCB, pSCB); /* Release SCB for current channel */
+ cmd->result = scb->tastat | (scb->hastat << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd); /* Notify system DONE */
+ orc_release_scb(host, scb); /* Release SCB for current channel */
}
-/*
- * Interrupt handler (main routine of the driver)
+/**
+ * inia100_intr - interrupt handler
+ * @irqno: Interrupt value
+ * @devid: Host adapter
+ *
+ * Entry point for IRQ handling. All the real work is performed
+ * by orc_interrupt.
*/
static irqreturn_t inia100_intr(int irqno, void *devid)
{
- struct Scsi_Host *host = (struct Scsi_Host *)devid;
- ORC_HCS *pHcb = (ORC_HCS *)host->hostdata;
+ struct Scsi_Host *shost = (struct Scsi_Host *)devid;
+ struct orc_host *host = (struct orc_host *)shost->hostdata;
unsigned long flags;
+ irqreturn_t res;
- spin_lock_irqsave(host->host_lock, flags);
- orc_interrupt(pHcb);
- spin_unlock_irqrestore(host->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
+ res = orc_interrupt(host);
+ spin_unlock_irqrestore(shost->host_lock, flags);
- return IRQ_HANDLED;
+ return res;
}
static struct scsi_host_template inia100_template = {
@@ -1044,12 +1077,12 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
- ORC_HCS *pHCB;
+ struct orc_host *host;
unsigned long port, bios;
int error = -ENODEV;
u32 sz;
- unsigned long dBiosAdr;
- char *pbBiosAdr;
+ unsigned long biosaddr;
+ char *bios_phys;
if (pci_enable_device(pdev))
goto out;
@@ -1068,55 +1101,55 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
}
/* <02> read from base address + 0x50 offset to get the bios value. */
- bios = ORC_RDWORD(port, 0x50);
+ bios = inw(port + 0x50);
- shost = scsi_host_alloc(&inia100_template, sizeof(ORC_HCS));
+ shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host));
if (!shost)
goto out_release_region;
- pHCB = (ORC_HCS *)shost->hostdata;
- pHCB->pdev = pdev;
- pHCB->HCS_Base = port;
- pHCB->HCS_BIOS = bios;
- spin_lock_init(&pHCB->BitAllocFlagLock);
+ host = (struct orc_host *)shost->hostdata;
+ host->pdev = pdev;
+ host->base = port;
+ host->BIOScfg = bios;
+ spin_lock_init(&host->allocation_lock);
/* Get total memory needed for SCB */
- sz = ORC_MAXQUEUE * sizeof(ORC_SCB);
- pHCB->HCS_virScbArray = pci_alloc_consistent(pdev, sz,
- &pHCB->HCS_physScbArray);
- if (!pHCB->HCS_virScbArray) {
+ sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
+ host->scb_virt = pci_alloc_consistent(pdev, sz,
+ &host->scb_phys);
+ if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
}
- memset(pHCB->HCS_virScbArray, 0, sz);
+ memset(host->scb_virt, 0, sz);
/* Get total memory needed for ESCB */
- sz = ORC_MAXQUEUE * sizeof(ESCB);
- pHCB->HCS_virEscbArray = pci_alloc_consistent(pdev, sz,
- &pHCB->HCS_physEscbArray);
- if (!pHCB->HCS_virEscbArray) {
+ sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
+ host->escb_virt = pci_alloc_consistent(pdev, sz,
+ &host->escb_phys);
+ if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
}
- memset(pHCB->HCS_virEscbArray, 0, sz);
+ memset(host->escb_virt, 0, sz);
- dBiosAdr = pHCB->HCS_BIOS;
- dBiosAdr = (dBiosAdr << 4);
- pbBiosAdr = phys_to_virt(dBiosAdr);
- if (init_orchid(pHCB)) { /* Initialize orchid chip */
+ biosaddr = host->BIOScfg;
+ biosaddr = (biosaddr << 4);
+ bios_phys = phys_to_virt(biosaddr);
+ if (init_orchid(host)) { /* Initialize orchid chip */
printk("inia100: initial orchid fail!!\n");
goto out_free_escb_array;
}
- shost->io_port = pHCB->HCS_Base;
+ shost->io_port = host->base;
shost->n_io_port = 0xff;
shost->can_queue = ORC_MAXQUEUE;
shost->unique_id = shost->io_port;
- shost->max_id = pHCB->HCS_MaxTar;
+ shost->max_id = host->max_targets;
shost->max_lun = 16;
- shost->irq = pHCB->HCS_Intr = pdev->irq;
- shost->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */
+ shost->irq = pdev->irq;
+ shost->this_id = host->scsi_id; /* Assign HCS index */
shost->sg_tablesize = TOTAL_SG_ENTRY;
/* Initial orc chip */
@@ -1137,36 +1170,36 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
scsi_scan_host(shost);
return 0;
- out_free_irq:
+out_free_irq:
free_irq(shost->irq, shost);
- out_free_escb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ESCB),
- pHCB->HCS_virEscbArray, pHCB->HCS_physEscbArray);
- out_free_scb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ORC_SCB),
- pHCB->HCS_virScbArray, pHCB->HCS_physScbArray);
- out_host_put:
+out_free_escb_array:
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ host->escb_virt, host->escb_phys);
+out_free_scb_array:
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ host->scb_virt, host->scb_phys);
+out_host_put:
scsi_host_put(shost);
- out_release_region:
+out_release_region:
release_region(port, 256);
- out_disable_device:
+out_disable_device:
pci_disable_device(pdev);
- out:
+out:
return error;
}
static void __devexit inia100_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
- ORC_HCS *pHCB = (ORC_HCS *)shost->hostdata;
+ struct orc_host *host = (struct orc_host *)shost->hostdata;
scsi_remove_host(shost);
free_irq(shost->irq, shost);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ESCB),
- pHCB->HCS_virEscbArray, pHCB->HCS_physEscbArray);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ORC_SCB),
- pHCB->HCS_virScbArray, pHCB->HCS_physScbArray);
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ host->escb_virt, host->escb_phys);
+ pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
scsi_host_put(shost);
diff --git a/drivers/scsi/a100u2w.h b/drivers/scsi/a100u2w.h
index 6f542d2600ea..d40e0c528198 100644
--- a/drivers/scsi/a100u2w.h
+++ b/drivers/scsi/a100u2w.h
@@ -18,27 +18,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -50,30 +29,19 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- */
-
-/*
+ *
* Revision History:
* 06/18/98 HL, Initial production Version 1.02
* 12/19/98 bv, Use spinlocks for 2.1.95 and up
* 06/25/02 Doug Ledford <dledford@redhat.com>
* - This and the i60uscsi.h file are almost identical,
* merged them into a single header used by both .c files.
+ * 14/06/07 Alan Cox <alan@redhat.com>
+ * - Grand cleanup and Linuxisation
*/
#define inia100_REVID "Initio INI-A100U2W SCSI device driver; Revision: 1.02d"
-#define ULONG unsigned long
-#define USHORT unsigned short
-#define UCHAR unsigned char
-#define BYTE unsigned char
-#define WORD unsigned short
-#define DWORD unsigned long
-#define UBYTE unsigned char
-#define UWORD unsigned short
-#define UDWORD unsigned long
-#define U32 u32
-
#if 1
#define ORC_MAXQUEUE 245
#define ORC_MAXTAGS 64
@@ -90,10 +58,10 @@
/************************************************************************/
/* Scatter-Gather Element Structure */
/************************************************************************/
-typedef struct ORC_SG_Struc {
- U32 SG_Ptr; /* Data Pointer */
- U32 SG_Len; /* Data Length */
-} ORC_SG;
+struct orc_sgent {
+ u32 base; /* Data Pointer */
+ u32 length; /* Data Length */
+};
/* SCSI related definition */
#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */
@@ -165,42 +133,45 @@ typedef struct ORC_SG_Struc {
#define ORC_PRGMCTR1 0xE3 /* RISC program counter */
#define ORC_RISCRAM 0xEC /* RISC RAM data port 4 bytes */
-typedef struct orc_extended_scb { /* Extended SCB */
- ORC_SG ESCB_SGList[TOTAL_SG_ENTRY]; /*0 Start of SG list */
- struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */
-} ESCB;
+struct orc_extended_scb { /* Extended SCB */
+ struct orc_sgent sglist[TOTAL_SG_ENTRY]; /*0 Start of SG list */
+ struct scsi_cmnd *srb; /*50 SRB Pointer */
+};
/***********************************************************************
SCSI Control Block
+
+ 0x40 bytes long, the last 8 are user bytes
************************************************************************/
-typedef struct orc_scb { /* Scsi_Ctrl_Blk */
- UBYTE SCB_Opcode; /*00 SCB command code&residual */
- UBYTE SCB_Flags; /*01 SCB Flags */
- UBYTE SCB_Target; /*02 Target Id */
- UBYTE SCB_Lun; /*03 Lun */
- U32 SCB_Reserved0; /*04 Reserved for ORCHID must 0 */
- U32 SCB_XferLen; /*08 Data Transfer Length */
- U32 SCB_Reserved1; /*0C Reserved for ORCHID must 0 */
- U32 SCB_SGLen; /*10 SG list # * 8 */
- U32 SCB_SGPAddr; /*14 SG List Buf physical Addr */
- U32 SCB_SGPAddrHigh; /*18 SG Buffer high physical Addr */
- UBYTE SCB_HaStat; /*1C Host Status */
- UBYTE SCB_TaStat; /*1D Target Status */
- UBYTE SCB_Status; /*1E SCB status */
- UBYTE SCB_Link; /*1F Link pointer, default 0xFF */
- UBYTE SCB_SenseLen; /*20 Sense Allocation Length */
- UBYTE SCB_CDBLen; /*21 CDB Length */
- UBYTE SCB_Ident; /*22 Identify */
- UBYTE SCB_TagMsg; /*23 Tag Message */
- UBYTE SCB_CDB[IMAX_CDB]; /*24 SCSI CDBs */
- UBYTE SCB_ScbIdx; /*3C Index for this ORCSCB */
- U32 SCB_SensePAddr; /*34 Sense Buffer physical Addr */
-
- ESCB *SCB_EScb; /*38 Extended SCB Pointer */
-#ifndef ALPHA
- UBYTE SCB_Reserved2[4]; /*3E Reserved for Driver use */
+struct orc_scb { /* Scsi_Ctrl_Blk */
+ u8 opcode; /*00 SCB command code&residual */
+ u8 flags; /*01 SCB Flags */
+ u8 target; /*02 Target Id */
+ u8 lun; /*03 Lun */
+ u32 reserved0; /*04 Reserved for ORCHID must 0 */
+ u32 xferlen; /*08 Data Transfer Length */
+ u32 reserved1; /*0C Reserved for ORCHID must 0 */
+ u32 sg_len; /*10 SG list # * 8 */
+ u32 sg_addr; /*14 SG List Buf physical Addr */
+ u32 sg_addrhigh; /*18 SG Buffer high physical Addr */
+ u8 hastat; /*1C Host Status */
+ u8 tastat; /*1D Target Status */
+ u8 status; /*1E SCB status */
+ u8 link; /*1F Link pointer, default 0xFF */
+ u8 sense_len; /*20 Sense Allocation Length */
+ u8 cdb_len; /*21 CDB Length */
+ u8 ident; /*22 Identify */
+ u8 tag_msg; /*23 Tag Message */
+ u8 cdb[IMAX_CDB]; /*24 SCSI CDBs */
+ u8 scbidx; /*3C Index for this ORCSCB */
+ u32 sense_addr; /*34 Sense Buffer physical Addr */
+
+ struct orc_extended_scb *escb; /*38 Extended SCB Pointer */
+ /* 64bit pointer or 32bit pointer + reserved ? */
+#ifndef CONFIG_64BIT
+ u8 reserved2[4]; /*3E Reserved for Driver use */
#endif
-} ORC_SCB;
+};
/* Opcodes of ORCSCB_Opcode */
#define ORC_EXECSCSI 0x00 /* SCSI initiator command with residual */
@@ -239,13 +210,13 @@ typedef struct orc_scb { /* Scsi_Ctrl_Blk */
Target Device Control Structure
**********************************************************************/
-typedef struct ORC_Tar_Ctrl_Struc {
- UBYTE TCS_DrvDASD; /* 6 */
- UBYTE TCS_DrvSCSI; /* 7 */
- UBYTE TCS_DrvHead; /* 8 */
- UWORD TCS_DrvFlags; /* 4 */
- UBYTE TCS_DrvSector; /* 7 */
-} ORC_TCS;
+struct orc_target {
+ u8 TCS_DrvDASD; /* 6 */
+ u8 TCS_DrvSCSI; /* 7 */
+ u8 TCS_DrvHead; /* 8 */
+ u16 TCS_DrvFlags; /* 4 */
+ u8 TCS_DrvSector; /* 7 */
+};
/* Bit Definition for TCF_DrvFlags */
#define TCS_DF_NODASD_SUPT 0x20 /* Suppress OS/2 DASD Mgr support */
@@ -255,32 +226,23 @@ typedef struct ORC_Tar_Ctrl_Struc {
/***********************************************************************
Host Adapter Control Structure
************************************************************************/
-typedef struct ORC_Ha_Ctrl_Struc {
- USHORT HCS_Base; /* 00 */
- UBYTE HCS_Index; /* 02 */
- UBYTE HCS_Intr; /* 04 */
- UBYTE HCS_SCSI_ID; /* 06 H/A SCSI ID */
- UBYTE HCS_BIOS; /* 07 BIOS configuration */
-
- UBYTE HCS_Flags; /* 0B */
- UBYTE HCS_HAConfig1; /* 1B SCSI0MAXTags */
- UBYTE HCS_MaxTar; /* 1B SCSI0MAXTags */
-
- USHORT HCS_Units; /* Number of units this adapter */
- USHORT HCS_AFlags; /* Adapter info. defined flags */
- ULONG HCS_Timeout; /* Adapter timeout value */
- ORC_SCB *HCS_virScbArray; /* 28 Virtual Pointer to SCB array */
- dma_addr_t HCS_physScbArray; /* Scb Physical address */
- ESCB *HCS_virEscbArray; /* Virtual pointer to ESCB Scatter list */
- dma_addr_t HCS_physEscbArray; /* scatter list Physical address */
- UBYTE TargetFlag[16]; /* 30 target configuration, TCF_EN_TAG */
- UBYTE MaximumTags[16]; /* 40 ORC_MAX_SCBS */
- UBYTE ActiveTags[16][16]; /* 50 */
- ORC_TCS HCS_Tcs[16]; /* 28 */
- U32 BitAllocFlag[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
- spinlock_t BitAllocFlagLock;
+struct orc_host {
+ unsigned long base; /* Base address */
+ u8 index; /* Index (Channel)*/
+ u8 scsi_id; /* H/A SCSI ID */
+ u8 BIOScfg; /*BIOS configuration */
+ u8 flags;
+ u8 max_targets; /* SCSI0MAXTags */
+ struct orc_scb *scb_virt; /* Virtual Pointer to SCB array */
+ dma_addr_t scb_phys; /* Scb Physical address */
+ struct orc_extended_scb *escb_virt; /* Virtual pointer to ESCB Scatter list */
+ dma_addr_t escb_phys; /* scatter list Physical address */
+ u8 target_flag[16]; /* target configuration, TCF_EN_TAG */
+ u8 max_tags[16]; /* ORC_MAX_SCBS */
+ u32 allocation_map[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
+ spinlock_t allocation_lock;
struct pci_dev *pdev;
-} ORC_HCS;
+};
/* Bit Definition for HCS_Flags */
@@ -301,79 +263,79 @@ typedef struct ORC_Ha_Ctrl_Struc {
#define HCS_AF_DISABLE_RESET 0x10 /* Adapter disable reset */
#define HCS_AF_DISABLE_ADPT 0x80 /* Adapter disable */
-typedef struct _NVRAM {
+struct orc_nvram {
/*----------header ---------------*/
- UCHAR SubVendorID0; /* 00 - Sub Vendor ID */
- UCHAR SubVendorID1; /* 00 - Sub Vendor ID */
- UCHAR SubSysID0; /* 02 - Sub System ID */
- UCHAR SubSysID1; /* 02 - Sub System ID */
- UCHAR SubClass; /* 04 - Sub Class */
- UCHAR VendorID0; /* 05 - Vendor ID */
- UCHAR VendorID1; /* 05 - Vendor ID */
- UCHAR DeviceID0; /* 07 - Device ID */
- UCHAR DeviceID1; /* 07 - Device ID */
- UCHAR Reserved0[2]; /* 09 - Reserved */
- UCHAR Revision; /* 0B - Revision of data structure */
+ u8 SubVendorID0; /* 00 - Sub Vendor ID */
+ u8 SubVendorID1; /* 00 - Sub Vendor ID */
+ u8 SubSysID0; /* 02 - Sub System ID */
+ u8 SubSysID1; /* 02 - Sub System ID */
+ u8 SubClass; /* 04 - Sub Class */
+ u8 VendorID0; /* 05 - Vendor ID */
+ u8 VendorID1; /* 05 - Vendor ID */
+ u8 DeviceID0; /* 07 - Device ID */
+ u8 DeviceID1; /* 07 - Device ID */
+ u8 Reserved0[2]; /* 09 - Reserved */
+ u8 revision; /* 0B - revision of data structure */
/* ----Host Adapter Structure ---- */
- UCHAR NumOfCh; /* 0C - Number of SCSI channel */
- UCHAR BIOSConfig1; /* 0D - BIOS configuration 1 */
- UCHAR BIOSConfig2; /* 0E - BIOS boot channel&target ID */
- UCHAR BIOSConfig3; /* 0F - BIOS configuration 3 */
+ u8 NumOfCh; /* 0C - Number of SCSI channel */
+ u8 BIOSConfig1; /* 0D - BIOS configuration 1 */
+ u8 BIOSConfig2; /* 0E - BIOS boot channel&target ID */
+ u8 BIOSConfig3; /* 0F - BIOS configuration 3 */
/* ----SCSI channel Structure ---- */
/* from "CTRL-I SCSI Host Adapter SetUp menu " */
- UCHAR SCSI0Id; /* 10 - Channel 0 SCSI ID */
- UCHAR SCSI0Config; /* 11 - Channel 0 SCSI configuration */
- UCHAR SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */
- UCHAR SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */
- UCHAR ReservedforChannel0[2]; /* 14 - Reserved */
+ u8 scsi_id; /* 10 - Channel 0 SCSI ID */
+ u8 SCSI0Config; /* 11 - Channel 0 SCSI configuration */
+ u8 SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */
+ u8 SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */
+ u8 ReservedforChannel0[2]; /* 14 - Reserved */
/* ----SCSI target Structure ---- */
/* from "CTRL-I SCSI device SetUp menu " */
- UCHAR Target00Config; /* 16 - Channel 0 Target 0 config */
- UCHAR Target01Config; /* 17 - Channel 0 Target 1 config */
- UCHAR Target02Config; /* 18 - Channel 0 Target 2 config */
- UCHAR Target03Config; /* 19 - Channel 0 Target 3 config */
- UCHAR Target04Config; /* 1A - Channel 0 Target 4 config */
- UCHAR Target05Config; /* 1B - Channel 0 Target 5 config */
- UCHAR Target06Config; /* 1C - Channel 0 Target 6 config */
- UCHAR Target07Config; /* 1D - Channel 0 Target 7 config */
- UCHAR Target08Config; /* 1E - Channel 0 Target 8 config */
- UCHAR Target09Config; /* 1F - Channel 0 Target 9 config */
- UCHAR Target0AConfig; /* 20 - Channel 0 Target A config */
- UCHAR Target0BConfig; /* 21 - Channel 0 Target B config */
- UCHAR Target0CConfig; /* 22 - Channel 0 Target C config */
- UCHAR Target0DConfig; /* 23 - Channel 0 Target D config */
- UCHAR Target0EConfig; /* 24 - Channel 0 Target E config */
- UCHAR Target0FConfig; /* 25 - Channel 0 Target F config */
-
- UCHAR SCSI1Id; /* 26 - Channel 1 SCSI ID */
- UCHAR SCSI1Config; /* 27 - Channel 1 SCSI configuration */
- UCHAR SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */
- UCHAR SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */
- UCHAR ReservedforChannel1[2]; /* 2A - Reserved */
+ u8 Target00Config; /* 16 - Channel 0 Target 0 config */
+ u8 Target01Config; /* 17 - Channel 0 Target 1 config */
+ u8 Target02Config; /* 18 - Channel 0 Target 2 config */
+ u8 Target03Config; /* 19 - Channel 0 Target 3 config */
+ u8 Target04Config; /* 1A - Channel 0 Target 4 config */
+ u8 Target05Config; /* 1B - Channel 0 Target 5 config */
+ u8 Target06Config; /* 1C - Channel 0 Target 6 config */
+ u8 Target07Config; /* 1D - Channel 0 Target 7 config */
+ u8 Target08Config; /* 1E - Channel 0 Target 8 config */
+ u8 Target09Config; /* 1F - Channel 0 Target 9 config */
+ u8 Target0AConfig; /* 20 - Channel 0 Target A config */
+ u8 Target0BConfig; /* 21 - Channel 0 Target B config */
+ u8 Target0CConfig; /* 22 - Channel 0 Target C config */
+ u8 Target0DConfig; /* 23 - Channel 0 Target D config */
+ u8 Target0EConfig; /* 24 - Channel 0 Target E config */
+ u8 Target0FConfig; /* 25 - Channel 0 Target F config */
+
+ u8 SCSI1Id; /* 26 - Channel 1 SCSI ID */
+ u8 SCSI1Config; /* 27 - Channel 1 SCSI configuration */
+ u8 SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */
+ u8 SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */
+ u8 ReservedforChannel1[2]; /* 2A - Reserved */
/* ----SCSI target Structure ---- */
/* from "CTRL-I SCSI device SetUp menu " */
- UCHAR Target10Config; /* 2C - Channel 1 Target 0 config */
- UCHAR Target11Config; /* 2D - Channel 1 Target 1 config */
- UCHAR Target12Config; /* 2E - Channel 1 Target 2 config */
- UCHAR Target13Config; /* 2F - Channel 1 Target 3 config */
- UCHAR Target14Config; /* 30 - Channel 1 Target 4 config */
- UCHAR Target15Config; /* 31 - Channel 1 Target 5 config */
- UCHAR Target16Config; /* 32 - Channel 1 Target 6 config */
- UCHAR Target17Config; /* 33 - Channel 1 Target 7 config */
- UCHAR Target18Config; /* 34 - Channel 1 Target 8 config */
- UCHAR Target19Config; /* 35 - Channel 1 Target 9 config */
- UCHAR Target1AConfig; /* 36 - Channel 1 Target A config */
- UCHAR Target1BConfig; /* 37 - Channel 1 Target B config */
- UCHAR Target1CConfig; /* 38 - Channel 1 Target C config */
- UCHAR Target1DConfig; /* 39 - Channel 1 Target D config */
- UCHAR Target1EConfig; /* 3A - Channel 1 Target E config */
- UCHAR Target1FConfig; /* 3B - Channel 1 Target F config */
- UCHAR reserved[3]; /* 3C - Reserved */
+ u8 Target10Config; /* 2C - Channel 1 Target 0 config */
+ u8 Target11Config; /* 2D - Channel 1 Target 1 config */
+ u8 Target12Config; /* 2E - Channel 1 Target 2 config */
+ u8 Target13Config; /* 2F - Channel 1 Target 3 config */
+ u8 Target14Config; /* 30 - Channel 1 Target 4 config */
+ u8 Target15Config; /* 31 - Channel 1 Target 5 config */
+ u8 Target16Config; /* 32 - Channel 1 Target 6 config */
+ u8 Target17Config; /* 33 - Channel 1 Target 7 config */
+ u8 Target18Config; /* 34 - Channel 1 Target 8 config */
+ u8 Target19Config; /* 35 - Channel 1 Target 9 config */
+ u8 Target1AConfig; /* 36 - Channel 1 Target A config */
+ u8 Target1BConfig; /* 37 - Channel 1 Target B config */
+ u8 Target1CConfig; /* 38 - Channel 1 Target C config */
+ u8 Target1DConfig; /* 39 - Channel 1 Target D config */
+ u8 Target1EConfig; /* 3A - Channel 1 Target E config */
+ u8 Target1FConfig; /* 3B - Channel 1 Target F config */
+ u8 reserved[3]; /* 3C - Reserved */
/* ---------- CheckSum ---------- */
- UCHAR CheckSum; /* 3F - Checksum of NVRam */
-} NVRAM, *PNVRAM;
+ u8 CheckSum; /* 3F - Checksum of NVRam */
+};
/* Bios Configuration for nvram->BIOSConfig1 */
#define NBC_BIOSENABLE 0x01 /* BIOS enable */
@@ -407,10 +369,3 @@ typedef struct _NVRAM {
#define NCC_RESET_TIME 0x0A /* SCSI RESET recovering time */
#define NTC_DEFAULT (NTC_1GIGA | NTC_NO_WIDESYNC | NTC_DISC_ENABLE)
-#define ORC_RD(x,y) (UCHAR)(inb( (int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-#define ORC_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-#define ORC_RDLONG(x,y) (long)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-
-#define ORC_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
-#define ORC_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
-#define ORC_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
new file mode 100644
index 000000000000..0c758d1452ba
--- /dev/null
+++ b/drivers/scsi/a4000t.c
@@ -0,0 +1,144 @@
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga Technologies A4000T SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+
+static struct scsi_host_template a4000t_scsi_driver_template = {
+ .name = "A4000T builtin SCSI",
+ .proc_name = "A4000t",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *a4000t_scsi_device;
+
+#define A4000T_SCSI_ADDR 0xdd0040
+
+static int __devinit a4000t_probe(struct device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI)))
+ goto out;
+
+ if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000,
+ "A4000T builtin SCSI"))
+ goto out;
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n");
+ goto out_release;
+ }
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR);
+ hostdata->clock = 50;
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+
+ /* and register the chip */
+ host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, dev);
+ if (!host) {
+ printk(KERN_ERR "a4000t-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+
+ host->this_id = 7;
+ host->base = A4000T_SCSI_ADDR;
+ host->irq = IRQ_AMIGA_PORTS;
+
+ if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
+ host)) {
+ printk(KERN_ERR "a4000t-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ dev_set_drvdata(dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out_release:
+ release_mem_region(A4000T_SCSI_ADDR, 0x1000);
+ out:
+ return -ENODEV;
+}
+
+static __devexit int a4000t_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ release_mem_region(A4000T_SCSI_ADDR, 0x1000);
+
+ return 0;
+}
+
+static struct device_driver a4000t_scsi_driver = {
+ .name = "a4000t-scsi",
+ .bus = &platform_bus_type,
+ .probe = a4000t_probe,
+ .remove = __devexit_p(a4000t_device_remove),
+};
+
+static int __init a4000t_scsi_init(void)
+{
+ int err;
+
+ err = driver_register(&a4000t_scsi_driver);
+ if (err)
+ return err;
+
+ a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(a4000t_scsi_device)) {
+ driver_unregister(&a4000t_scsi_driver);
+ return PTR_ERR(a4000t_scsi_device);
+ }
+
+ return err;
+}
+
+static void __exit a4000t_scsi_exit(void)
+{
+ platform_device_unregister(a4000t_scsi_device);
+ driver_unregister(&a4000t_scsi_driver);
+}
+
+module_init(a4000t_scsi_init);
+module_exit(a4000t_scsi_exit);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8dcfe4ec35c2..a26baab09dbf 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -169,6 +169,18 @@ int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
+int update_interval = 30 * 60;
+module_param(update_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter.");
+
+int check_interval = 24 * 60 * 60;
+module_param(check_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks.");
+
+int check_reset = 1;
+module_param(check_reset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the adapter.");
+
int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
@@ -312,11 +324,10 @@ int aac_get_containers(struct aac_dev *dev)
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
- fsa_dev_ptr = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
+ fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
GFP_KERNEL);
if (!fsa_dev_ptr)
return -ENOMEM;
- memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
dev->fsa_dev = fsa_dev_ptr;
dev->maximum_num_containers = maximum_num_containers;
@@ -344,21 +355,16 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
{
void *buf;
int transfer_len;
- struct scatterlist *sg = scsicmd->request_buffer;
+ struct scatterlist *sg = scsi_sglist(scsicmd);
+
+ buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ transfer_len = min(sg->length, len + offset);
- if (scsicmd->use_sg) {
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- transfer_len = min(sg->length, len + offset);
- } else {
- buf = scsicmd->request_buffer;
- transfer_len = min(scsicmd->request_bufflen, len + offset);
- }
transfer_len -= offset;
if (buf && transfer_len > 0)
memcpy(buf + offset, data, transfer_len);
- if (scsicmd->use_sg)
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
@@ -451,7 +457,7 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
{
struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
- if (fsa_dev_ptr[scmd_id(scsicmd)].valid)
+ if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
return aac_scsi_cmd(scsicmd);
scsicmd->result = DID_NO_CONNECT << 16;
@@ -459,18 +465,18 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
return 0;
}
-static int _aac_probe_container2(void * context, struct fib * fibptr)
+static void _aac_probe_container2(void * context, struct fib * fibptr)
{
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
- if (!aac_valid_context(scsicmd, fibptr))
- return 0;
- fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
scsicmd->SCp.Status = 0;
+ fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
fsa_dev_ptr += scmd_id(scsicmd);
@@ -493,10 +499,11 @@ static int _aac_probe_container2(void * context, struct fib * fibptr)
aac_fib_free(fibptr);
callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
scsicmd->SCp.ptr = NULL;
- return (*callback)(scsicmd);
+ (*callback)(scsicmd);
+ return;
}
-static int _aac_probe_container1(void * context, struct fib * fibptr)
+static void _aac_probe_container1(void * context, struct fib * fibptr)
{
struct scsi_cmnd * scsicmd;
struct aac_mount * dresp;
@@ -506,13 +513,14 @@ static int _aac_probe_container1(void * context, struct fib * fibptr)
dresp = (struct aac_mount *) fib_data(fibptr);
dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
- (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE))
- return _aac_probe_container2(context, fibptr);
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+ _aac_probe_container2(context, fibptr);
+ return;
+ }
scsicmd = (struct scsi_cmnd *) context;
- scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
if (!aac_valid_context(scsicmd, fibptr))
- return 0;
+ return;
aac_fib_init(fibptr);
@@ -527,21 +535,18 @@ static int _aac_probe_container1(void * context, struct fib * fibptr)
sizeof(struct aac_query_mount),
FsaNormal,
0, 1,
- (fib_callback) _aac_probe_container2,
+ _aac_probe_container2,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS) {
+ if (status == -EINPROGRESS)
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
- return 0;
- }
- if (status < 0) {
+ else if (status < 0) {
/* Inherit results from VM_NameServe, if any */
dresp->status = cpu_to_le32(ST_OK);
- return _aac_probe_container2(context, fibptr);
+ _aac_probe_container2(context, fibptr);
}
- return 0;
}
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
@@ -566,7 +571,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
sizeof(struct aac_query_mount),
FsaNormal,
0, 1,
- (fib_callback) _aac_probe_container1,
+ _aac_probe_container1,
(void *) scsicmd);
/*
* Check that the command queued to the controller
@@ -620,7 +625,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
return -ENOMEM;
}
scsicmd->list.next = NULL;
- scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))_aac_probe_container1;
+ scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
@@ -746,6 +751,101 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
inqstrcpy ("V1.0", str->prl);
}
+static void get_container_serial_callback(void *context, struct fib * fibptr)
+{
+ struct aac_get_serial_resp * get_serial_reply;
+ struct scsi_cmnd * scsicmd;
+
+ BUG_ON(fibptr == NULL);
+
+ scsicmd = (struct scsi_cmnd *) context;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
+ /* Failure is irrelevant, using default value instead */
+ if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
+ char sp[13];
+ /* EVPD bit set */
+ sp[0] = INQD_PDT_DA;
+ sp[1] = scsicmd->cmnd[2];
+ sp[2] = 0;
+ sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ le32_to_cpu(get_serial_reply->uid));
+ aac_internal_transfer(scsicmd, sp, 0, sizeof(sp));
+ }
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ * aac_get_container_serial - get container serial, none blocking.
+ */
+static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
+{
+ int status;
+ struct aac_get_serial *dinfo;
+ struct fib * cmd_fibcontext;
+ struct aac_dev * dev;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+ if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(cmd_fibcontext);
+ dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
+
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
+ dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof (struct aac_get_serial),
+ FsaNormal,
+ 0, 1,
+ (fib_callback) get_container_serial_callback,
+ (void *) scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return -1;
+}
+
+/* Function: setinqserial
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI Unit Serial number.
+ * This is a fake. We should read a proper
+ * serial number from the container. <SuSE>But
+ * without docs it's quite hard to do it :-)
+ * So this will have to do in the meantime.</SuSE>
+ */
+
+static int setinqserial(struct aac_dev *dev, void *data, int cid)
+{
+ /*
+ * This breaks array migration.
+ */
+ return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
+ le32_to_cpu(dev->adapter_info.serial[0]), cid);
+}
+
static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
u8 a_sense_code, u8 incorrect_length,
u8 bit_pointer, u16 field_pointer,
@@ -825,7 +925,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
- readcmd->flags = cpu_to_le16(1);
+ readcmd->flags = cpu_to_le16(IO_TYPE_READ);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
@@ -904,7 +1004,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
(void *) cmd);
}
-static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_raw_io *writecmd;
@@ -914,7 +1014,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
- writecmd->flags = 0;
+ writecmd->flags = fua ?
+ cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
+ cpu_to_le16(IO_TYPE_WRITE);
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
@@ -933,7 +1035,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
(void *) cmd);
}
-static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_write64 *writecmd;
@@ -964,7 +1066,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
(void *) cmd);
}
-static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_write *writecmd;
@@ -1041,7 +1143,7 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
- srbcmd->count = cpu_to_le32(cmd->request_bufflen);
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
@@ -1069,7 +1171,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
- srbcmd->count = cpu_to_le32(cmd->request_bufflen);
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
@@ -1172,6 +1274,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (!dev->in_reset) {
+ char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
dev->name,
@@ -1192,16 +1295,23 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,tmp&0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
- if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- printk(KERN_INFO "%s%d: serial %x\n",
- dev->name, dev->id,
- le32_to_cpu(dev->adapter_info.serial[0]));
+ buffer[0] = '\0';
+ if (aac_show_serial_number(
+ shost_to_class(dev->scsi_host_ptr), buffer))
+ printk(KERN_INFO "%s%d: serial %s",
+ dev->name, dev->id, buffer);
if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
printk(KERN_INFO "%s%d: TSID %.*s\n",
dev->name, dev->id,
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
dev->supplement_adapter_info.VpdInfo.Tsid);
}
+ if (!check_reset ||
+ (dev->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_IGNORE_RESET))) {
+ printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
+ dev->name, dev->id);
+ }
}
dev->nondasd_support = 0;
@@ -1332,7 +1442,7 @@ static void io_callback(void *context, struct fib * fibptr)
if (!aac_valid_context(scsicmd, fibptr))
return;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+ dev = fibptr->dev;
cid = scmd_id(scsicmd);
if (nblank(dprintk(x))) {
@@ -1371,16 +1481,9 @@ static void io_callback(void *context, struct fib * fibptr)
}
BUG_ON(fibptr == NULL);
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->request_buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
+
+ scsi_dma_unmap(scsicmd);
+
readreply = (struct aac_read_reply *)fib_data(fibptr);
if (le32_to_cpu(readreply->status) == ST_OK)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@@ -1498,6 +1601,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
{
u64 lba;
u32 count;
+ int fua;
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
@@ -1512,6 +1616,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
+ fua = 0;
} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
@@ -1524,6 +1629,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ fua = scsicmd->cmnd[1] & 0x8;
} else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
@@ -1531,10 +1637,12 @@ static int aac_write(struct scsi_cmnd * scsicmd)
| (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
| (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ fua = scsicmd->cmnd[1] & 0x8;
} else {
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ fua = scsicmd->cmnd[1] & 0x8;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
@@ -1549,7 +1657,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
return 0;
}
- status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count);
+ status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
/*
* Check that the command queued to the controller
@@ -1592,7 +1700,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
- struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ struct aac_dev *dev = fibptr->dev;
u32 cid = sdev_id(sdev);
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
@@ -1699,7 +1807,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
- u32 cid = 0;
+ u32 cid;
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
@@ -1711,15 +1819,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
- if (scmd_id(scsicmd) != host->this_id) {
- if ((scmd_channel(scsicmd) == CONTAINER_CHANNEL)) {
- if((scmd_id(scsicmd) >= dev->maximum_num_containers) ||
+ cid = scmd_id(scsicmd);
+ if (cid != host->this_id) {
+ if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
+ if((cid >= dev->maximum_num_containers) ||
(scsicmd->device->lun != 0)) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
- cid = scmd_id(scsicmd);
/*
* If the target container doesn't exist, it may have
@@ -1782,9 +1890,52 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
struct inquiry_data inq_data;
- dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scmd_id(scsicmd)));
+ dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
memset(&inq_data, 0, sizeof (struct inquiry_data));
+ if (scsicmd->cmnd[1] & 0x1 ) {
+ char *arr = (char *)&inq_data;
+
+ /* EVPD bit set */
+ arr[0] = (scmd_id(scsicmd) == host->this_id) ?
+ INQD_PDT_PROC : INQD_PDT_DA;
+ if (scsicmd->cmnd[2] == 0) {
+ /* supported vital product data pages */
+ arr[3] = 2;
+ arr[4] = 0x0;
+ arr[5] = 0x80;
+ arr[1] = scsicmd->cmnd[2];
+ aac_internal_transfer(scsicmd, &inq_data, 0,
+ sizeof(inq_data));
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else if (scsicmd->cmnd[2] == 0x80) {
+ /* unit serial number page */
+ arr[3] = setinqserial(dev, &arr[4],
+ scmd_id(scsicmd));
+ arr[1] = scsicmd->cmnd[2];
+ aac_internal_transfer(scsicmd, &inq_data, 0,
+ sizeof(inq_data));
+ return aac_get_container_serial(scsicmd);
+ } else {
+ /* vpd page not implemented */
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST,
+ SENCODE_INVALID_CDB_FIELD,
+ ASENCODE_NO_SENSE, 0, 7, 2, 0);
+ memcpy(scsicmd->sense_buffer,
+ &dev->fsa_dev[cid].sense_data,
+ (sizeof(dev->fsa_dev[cid].sense_data) >
+ sizeof(scsicmd->sense_buffer))
+ ? sizeof(scsicmd->sense_buffer)
+ : sizeof(dev->fsa_dev[cid].sense_data));
+ }
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data.inqd_len = 31;
@@ -1794,7 +1945,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c
*/
- if (scmd_id(scsicmd) == host->this_id) {
+ if (cid == host->this_id) {
setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
@@ -1886,15 +2037,29 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE:
{
- char mode_buf[4];
+ char mode_buf[7];
+ int mode_buf_length = 4;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
mode_buf[0] = 3; /* Mode data length */
mode_buf[1] = 0; /* Medium type - default */
- mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+ mode_buf[2] = 0; /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ if (dev->raw_io_interface)
+ mode_buf[2] = 0x10;
mode_buf[3] = 0; /* Block descriptor length */
-
- aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mode_buf[0] = 6;
+ mode_buf[4] = 8;
+ mode_buf[5] = 1;
+ mode_buf[6] = 0x04; /* WCE */
+ mode_buf_length = 7;
+ if (mode_buf_length > scsicmd->cmnd[4])
+ mode_buf_length = scsicmd->cmnd[4];
+ }
+ aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -1902,18 +2067,33 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
case MODE_SENSE_10:
{
- char mode_buf[8];
+ char mode_buf[11];
+ int mode_buf_length = 8;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
mode_buf[0] = 0; /* Mode data length (MSB) */
mode_buf[1] = 6; /* Mode data length (LSB) */
mode_buf[2] = 0; /* Medium type - default */
- mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+ mode_buf[3] = 0; /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ if (dev->raw_io_interface)
+ mode_buf[3] = 0x10;
mode_buf[4] = 0; /* reserved */
mode_buf[5] = 0; /* reserved */
mode_buf[6] = 0; /* Block descriptor length (MSB) */
mode_buf[7] = 0; /* Block descriptor length (LSB) */
- aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mode_buf[1] = 9;
+ mode_buf[8] = 8;
+ mode_buf[9] = 1;
+ mode_buf[10] = 0x04; /* WCE */
+ mode_buf_length = 11;
+ if (mode_buf_length > scsicmd->cmnd[8])
+ mode_buf_length = scsicmd->cmnd[8];
+ }
+ aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -2028,7 +2208,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
}
else return -EINVAL;
- qd.valid = fsa_dev_ptr[qd.cnum].valid;
+ qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
qd.locked = fsa_dev_ptr[qd.cnum].locked;
qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
@@ -2136,28 +2316,21 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
if (!aac_valid_context(scsicmd, fibptr))
return;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
-
BUG_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
/*
* Calculate resid for sg
*/
-
- scsicmd->resid = scsicmd->request_bufflen -
- le32_to_cpu(srbreply->data_xfer_length);
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->request_buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
+
+ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+ - le32_to_cpu(srbreply->data_xfer_length));
+
+ scsi_dma_unmap(scsicmd);
/*
* First check the fib status
@@ -2233,7 +2406,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
break;
case SRB_STATUS_BUSY:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUS_RESET:
@@ -2343,34 +2516,33 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
+ int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
- psg->sg[0].count = 0;
- if (scsicmd->use_sg) {
+ psg->sg[0].count = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
- psg->count = cpu_to_le32(sg_count);
+ psg->count = cpu_to_le32(nseg);
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
- sg++;
}
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2378,18 +2550,6 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- u32 addr;
- scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- addr = scsicmd->SCp.dma_handle;
- psg->count = cpu_to_le32(1);
- psg->sg[0].addr = cpu_to_le32(addr);
- psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
@@ -2399,6 +2559,7 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
+ int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
@@ -2406,31 +2567,28 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
psg->sg[0].addr[0] = 0;
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
- if (scsicmd->use_sg) {
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
-
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
addr = sg_dma_address(sg);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
psg->sg[i].count = cpu_to_le32(count);
byte_count += count;
- sg++;
}
- psg->count = cpu_to_le32(sg_count);
+ psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2438,26 +2596,13 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- addr = scsicmd->SCp.dma_handle;
- psg->count = cpu_to_le32(1);
- psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
- psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
- psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
{
- struct Scsi_Host *host = scsicmd->device->host;
- struct aac_dev *dev = (struct aac_dev *)host->hostdata;
unsigned long byte_count = 0;
+ int nseg;
// Get rid of old data
psg->count = 0;
@@ -2467,16 +2612,14 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
psg->sg[0].flags = 0;
- if (scsicmd->use_sg) {
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
-
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
u64 addr = sg_dma_address(sg);
psg->sg[i].next = 0;
@@ -2486,15 +2629,14 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[i].count = cpu_to_le32(count);
psg->sg[i].flags = 0;
byte_count += count;
- sg++;
}
- psg->count = cpu_to_le32(sg_count);
+ psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2502,24 +2644,6 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- int count;
- u64 addr;
- scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- addr = scsicmd->SCp.dma_handle;
- count = scsicmd->request_bufflen;
- psg->count = cpu_to_le32(1);
- psg->sg[0].next = 0;
- psg->sg[0].prev = 0;
- psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
- psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- psg->sg[0].count = cpu_to_le32(count);
- psg->sg[0].flags = 0;
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index c81edf36913f..400d03403cd5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,8 +12,8 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2437
-# define AAC_DRIVER_BRANCH "-mh4"
+# define AAC_DRIVER_BUILD 2447
+# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -464,12 +464,12 @@ struct adapter_ops
int (*adapter_restart)(struct aac_dev *dev, int bled);
/* Transport operations */
int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
- irqreturn_t (*adapter_intr)(int irq, void *dev_id);
+ irq_handler_t adapter_intr;
/* Packet operations */
int (*adapter_deliver)(struct fib * fib);
int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
- int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
+ int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
/* Administrative operations */
int (*adapter_comm)(struct aac_dev * dev, int comm);
@@ -860,10 +860,12 @@ struct aac_supplement_adapter_info
__le32 FlashFirmwareBootBuild;
u8 MfgPcbaSerialNo[12];
u8 MfgWWNName[8];
- __le32 MoreFeatureBits;
+ __le32 SupportedOptions2;
__le32 ReservedGrowth[1];
};
#define AAC_FEATURE_FALCON 0x00000010
+#define AAC_OPTION_MU_RESET 0x00000001
+#define AAC_OPTION_IGNORE_RESET 0x00000002
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -1054,8 +1056,8 @@ struct aac_dev
#define aac_adapter_read(fib,cmd,lba,count) \
((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
-#define aac_adapter_write(fib,cmd,lba,count) \
- ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count)
+#define aac_adapter_write(fib,cmd,lba,count,fua) \
+ ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
#define aac_adapter_scsi(fib,cmd) \
((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
@@ -1213,6 +1215,9 @@ struct aac_write64
__le32 block;
__le16 pad;
__le16 flags;
+#define IO_TYPE_WRITE 0x00000000
+#define IO_TYPE_READ 0x00000001
+#define IO_SUREWRITE 0x00000008
struct sgmap64 sg; // Must be last in struct because it is variable
};
struct aac_write_reply
@@ -1257,6 +1262,19 @@ struct aac_synchronize_reply {
u8 data[16];
};
+#define CT_PAUSE_IO 65
+#define CT_RELEASE_IO 66
+struct aac_pause {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_PAUSE_IO */
+ __le32 timeout; /* 10ms ticks */
+ __le32 min;
+ __le32 noRescan;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 count; /* sizeof(((struct aac_pause_reply *)NULL)->data) */
+};
+
struct aac_srb
{
__le32 function;
@@ -1549,6 +1567,20 @@ struct aac_get_name_resp {
u8 data[16];
};
+#define CT_CID_TO_32BITS_UID 165
+struct aac_get_serial {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_CID_TO_32BITS_UID */
+ __le32 cid;
+};
+
+struct aac_get_serial_resp {
+ __le32 dummy0;
+ __le32 dummy1;
+ __le32 status; /* CT_OK */
+ __le32 uid;
+};
+
/*
* The following command is sent to shut down each container.
*/
@@ -1804,6 +1836,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+#ifndef shost_to_class
+#define shost_to_class(shost) &shost->shost_classdev
+#endif
+ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf);
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
int aac_rx_init(struct aac_dev *dev);
int aac_rkt_init(struct aac_dev *dev);
@@ -1813,6 +1849,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
+int aac_reset_adapter(struct aac_dev * dev, int forced);
int aac_check_health(struct aac_dev * dev);
int aac_command_thread(void *data);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
@@ -1832,3 +1869,6 @@ extern int aif_timeout;
extern int expose_physicals;
extern int aac_reset_devices;
extern int aac_commit;
+extern int update_interval;
+extern int check_interval;
+extern int check_reset;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 9aca57eda943..bb870906b4cf 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -80,7 +80,11 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
+ pci_free_consistent(dev->pdev,
+ dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ dev->hw_fib_va = NULL;
+ dev->hw_fib_pa = 0;
}
/**
@@ -1021,7 +1025,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
}
-static int _aac_reset_adapter(struct aac_dev *aac)
+static int _aac_reset_adapter(struct aac_dev *aac, int forced)
{
int index, quirks;
int retval;
@@ -1029,25 +1033,32 @@ static int _aac_reset_adapter(struct aac_dev *aac)
struct scsi_device *dev;
struct scsi_cmnd *command;
struct scsi_cmnd *command_list;
+ int jafo = 0;
/*
* Assumptions:
- * - host is locked.
+ * - host is locked, unless called by the aacraid thread.
+ * (a matter of convenience, due to legacy issues surrounding
+ * eh_host_adapter_reset).
* - in_reset is asserted, so no new i/o is getting to the
* card.
- * - The card is dead.
+ * - The card is dead, or will be very shortly ;-/ so no new
+ * commands are completing in the interrupt service.
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
aac_adapter_disable_int(aac);
- spin_unlock_irq(host->host_lock);
- kthread_stop(aac->thread);
+ if (aac->thread->pid != current->pid) {
+ spin_unlock_irq(host->host_lock);
+ kthread_stop(aac->thread);
+ jafo = 1;
+ }
/*
* If a positive health, means in a known DEAD PANIC
* state and the adapter could be reset to `try again'.
*/
- retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
+ retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
if (retval)
goto out;
@@ -1080,8 +1091,6 @@ static int _aac_reset_adapter(struct aac_dev *aac)
* case.
*/
aac_fib_map_free(aac);
- aac->hw_fib_va = NULL;
- aac->hw_fib_pa = 0;
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
aac->comm_addr = NULL;
aac->comm_phys = 0;
@@ -1091,12 +1100,12 @@ static int _aac_reset_adapter(struct aac_dev *aac)
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
- if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
- ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
+ if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
+ ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
goto out;
} else {
- if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
- ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
+ if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
+ ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
goto out;
}
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
@@ -1104,10 +1113,12 @@ static int _aac_reset_adapter(struct aac_dev *aac)
if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
goto out;
- aac->thread = kthread_run(aac_command_thread, aac, aac->name);
- if (IS_ERR(aac->thread)) {
- retval = PTR_ERR(aac->thread);
- goto out;
+ if (jafo) {
+ aac->thread = kthread_run(aac_command_thread, aac, aac->name);
+ if (IS_ERR(aac->thread)) {
+ retval = PTR_ERR(aac->thread);
+ goto out;
+ }
}
(void)aac_get_adapter_info(aac);
quirks = aac_get_driver_ident(index)->quirks;
@@ -1150,7 +1161,98 @@ static int _aac_reset_adapter(struct aac_dev *aac)
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
- spin_lock_irq(host->host_lock);
+ if (jafo) {
+ spin_lock_irq(host->host_lock);
+ }
+ return retval;
+}
+
+int aac_reset_adapter(struct aac_dev * aac, int forced)
+{
+ unsigned long flagv = 0;
+ int retval;
+ struct Scsi_Host * host;
+
+ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+ return -EBUSY;
+
+ if (aac->in_reset) {
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+ return -EBUSY;
+ }
+ aac->in_reset = 1;
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+ /*
+ * Wait for all commands to complete to this specific
+ * target (block maximum 60 seconds). Although not necessary,
+ * it does make us a good storage citizen.
+ */
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ if (forced < 2) for (retval = 60; retval; --retval) {
+ struct scsi_device * dev;
+ struct scsi_cmnd * command;
+ int active = 0;
+
+ __shost_for_each_device(dev, host) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+
+ /* Quiesce build, flush cache, write through mode */
+ aac_send_shutdown(aac);
+ spin_lock_irqsave(host->host_lock, flagv);
+ retval = _aac_reset_adapter(aac, forced);
+ spin_unlock_irqrestore(host->host_lock, flagv);
+
+ if (retval == -ENODEV) {
+ /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
+ struct fib * fibctx = aac_fib_alloc(aac);
+ if (fibctx) {
+ struct aac_pause *cmd;
+ int status;
+
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_pause *) fib_data(fibctx);
+
+ cmd->command = cpu_to_le32(VM_ContainerConfig);
+ cmd->type = cpu_to_le32(CT_PAUSE_IO);
+ cmd->timeout = cpu_to_le32(1);
+ cmd->min = cpu_to_le32(1);
+ cmd->noRescan = cpu_to_le32(1);
+ cmd->count = cpu_to_le32(0);
+
+ status = aac_fib_send(ContainerCommand,
+ fibctx,
+ sizeof(struct aac_pause),
+ FsaNormal,
+ -2 /* Timeout silently */, 1,
+ NULL, NULL);
+
+ if (status >= 0)
+ aac_fib_complete(fibctx);
+ aac_fib_free(fibctx);
+ }
+ }
+
return retval;
}
@@ -1270,10 +1372,15 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
+ if (!check_reset || (aac->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_IGNORE_RESET)))
+ goto out;
host = aac->scsi_host_ptr;
- spin_lock_irqsave(host->host_lock, flagv);
- BlinkLED = _aac_reset_adapter(aac);
- spin_unlock_irqrestore(host->host_lock, flagv);
+ if (aac->thread->pid != current->pid)
+ spin_lock_irqsave(host->host_lock, flagv);
+ BlinkLED = _aac_reset_adapter(aac, 0);
+ if (aac->thread->pid != current->pid)
+ spin_unlock_irqrestore(host->host_lock, flagv);
return BlinkLED;
out:
@@ -1300,6 +1407,9 @@ int aac_command_thread(void *data)
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
+ unsigned long next_jiffies = jiffies + HZ;
+ unsigned long next_check_jiffies = next_jiffies;
+ long difference = HZ;
/*
* We can only have one thread per adapter for AIF's.
@@ -1368,7 +1478,7 @@ int aac_command_thread(void *data)
cpu_to_le32(AifCmdJobProgress))) {
aac_handle_aif(dev, fib);
}
-
+
time_now = jiffies/HZ;
/*
@@ -1507,11 +1617,79 @@ int aac_command_thread(void *data)
* There are no more AIF's
*/
spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
- schedule();
+
+ /*
+ * Background activity
+ */
+ if ((time_before(next_check_jiffies,next_jiffies))
+ && ((difference = next_check_jiffies - jiffies) <= 0)) {
+ next_check_jiffies = next_jiffies;
+ if (aac_check_health(dev) == 0) {
+ difference = ((long)(unsigned)check_interval)
+ * HZ;
+ next_check_jiffies = jiffies + difference;
+ } else if (!dev->queues)
+ break;
+ }
+ if (!time_before(next_check_jiffies,next_jiffies)
+ && ((difference = next_jiffies - jiffies) <= 0)) {
+ struct timeval now;
+ int ret;
+
+ /* Don't even try to talk to adapter if its sick */
+ ret = aac_check_health(dev);
+ if (!ret && !dev->queues)
+ break;
+ next_check_jiffies = jiffies
+ + ((long)(unsigned)check_interval)
+ * HZ;
+ do_gettimeofday(&now);
+
+ /* Synchronize our watches */
+ if (((1000000 - (1000000 / HZ)) > now.tv_usec)
+ && (now.tv_usec > (1000000 / HZ)))
+ difference = (((1000000 - now.tv_usec) * HZ)
+ + 500000) / 1000000;
+ else if (ret == 0) {
+ struct fib *fibptr;
+
+ if ((fibptr = aac_fib_alloc(dev))) {
+ u32 * info;
+
+ aac_fib_init(fibptr);
+
+ info = (u32 *) fib_data(fibptr);
+ if (now.tv_usec > 500000)
+ ++now.tv_sec;
+
+ *info = cpu_to_le32(now.tv_sec);
+
+ (void)aac_fib_send(SendHostTime,
+ fibptr,
+ sizeof(*info),
+ FsaNormal,
+ 1, 1,
+ NULL,
+ NULL);
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
+ difference = (long)(unsigned)update_interval*HZ;
+ } else {
+ /* retry shortly */
+ difference = 10 * HZ;
+ }
+ next_jiffies = jiffies + difference;
+ if (time_before(next_check_jiffies,next_jiffies))
+ difference = next_check_jiffies - jiffies;
+ }
+ if (difference <= 0)
+ difference = 1;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(difference);
if (kthread_should_stop())
break;
- set_current_state(TASK_INTERRUPTIBLE);
}
if (dev->queues)
remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 5c487ff096c7..d76e1a8cb93a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -39,10 +39,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
#include <linux/syscalls.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <asm/semaphore.h>
@@ -223,12 +221,12 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
- { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */
+ { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
- { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
};
@@ -403,10 +401,6 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev)
{
- if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
- sdev->skip_ms_page_8 = 1;
- sdev->skip_ms_page_3f = 1;
- }
if ((sdev->type == TYPE_DISK) &&
(sdev_channel(sdev) != CONTAINER_CHANNEL)) {
if (expose_physicals == 0)
@@ -450,6 +444,43 @@ static int aac_slave_configure(struct scsi_device *sdev)
return 0;
}
+/**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+ * @depth: desired queue depth
+ *
+ * Alters queue depths for target device based on the host adapter's
+ * total capacity and the queue depth supported by the target device.
+ */
+
+static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num = 0;
+
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (sdev_channel(dev) == CONTAINER_CHANNEL))
+ ++num;
+ ++num;
+ }
+ if (num >= host->can_queue)
+ num = host->can_queue - 1;
+ if (depth > (host->can_queue - num))
+ depth = host->can_queue - num;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
+ } else
+ scsi_adjust_queue_depth(sdev, 0, 1);
+ return sdev->queue_depth;
+}
+
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
@@ -548,6 +579,14 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
ssleep(1);
}
printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+ /*
+ * This adapter needs a blind reset, only do so for Adapters that
+ * support a register, instead of a commanded, reset.
+ */
+ if ((aac->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_MU_RESET|AAC_OPTION_IGNORE_RESET)) ==
+ le32_to_cpu(AAC_OPTION_MU_RESET))
+ aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
}
@@ -731,15 +770,21 @@ static ssize_t aac_show_bios_version(struct class_device *class_dev,
return len;
}
-static ssize_t aac_show_serial_number(struct class_device *class_dev,
- char *buf)
+ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
int len = 0;
if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- len = snprintf(buf, PAGE_SIZE, "%x\n",
+ len = snprintf(buf, PAGE_SIZE, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
+ if (len &&
+ !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
+ sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len],
+ buf, len))
+ len = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
+ dev->supplement_adapter_info.MfgPcbaSerialNo);
return len;
}
@@ -755,6 +800,31 @@ static ssize_t aac_show_max_id(struct class_device *class_dev, char *buf)
class_to_shost(class_dev)->max_id);
}
+static ssize_t aac_store_reset_adapter(struct class_device *class_dev,
+ const char *buf, size_t count)
+{
+ int retval = -EACCES;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return retval;
+ retval = aac_reset_adapter((struct aac_dev*)class_to_shost(class_dev)->hostdata, buf[0] == '!');
+ if (retval >= 0)
+ retval = count;
+ return retval;
+}
+
+static ssize_t aac_show_reset_adapter(struct class_device *class_dev,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ int len, tmp;
+
+ tmp = aac_adapter_check_health(dev);
+ if ((tmp == 0) && dev->in_reset)
+ tmp = -EBUSY;
+ len = snprintf(buf, PAGE_SIZE, "0x%x", tmp);
+ return len;
+}
static struct class_device_attribute aac_model = {
.attr = {
@@ -812,6 +882,14 @@ static struct class_device_attribute aac_max_id = {
},
.show = aac_show_max_id,
};
+static struct class_device_attribute aac_reset = {
+ .attr = {
+ .name = "reset_host",
+ .mode = S_IWUSR|S_IRUGO,
+ },
+ .store = aac_store_reset_adapter,
+ .show = aac_show_reset_adapter,
+};
static struct class_device_attribute *aac_attrs[] = {
&aac_model,
@@ -822,6 +900,7 @@ static struct class_device_attribute *aac_attrs[] = {
&aac_serial_number,
&aac_max_channel,
&aac_max_id,
+ &aac_reset,
NULL
};
@@ -848,6 +927,7 @@ static struct scsi_host_template aac_driver_template = {
.bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.slave_configure = aac_slave_configure,
+ .change_queue_depth = aac_change_queue_depth,
.eh_abort_handler = aac_eh_abort,
.eh_host_reset_handler = aac_eh_reset,
.can_queue = AAC_NUM_IO_FIB,
@@ -1086,7 +1166,7 @@ static int __init aac_init(void)
{
int error;
- printk(KERN_INFO "Adaptec %s driver (%s)\n",
+ printk(KERN_INFO "Adaptec %s driver %s\n",
AAC_DRIVERNAME, aac_driver_version);
error = pci_register_driver(&aac_pci_driver);
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ae978a373c56..ebc65b9fea92 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -464,21 +464,24 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
{
u32 var;
- if (bled)
- printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
- dev->name, dev->id, bled);
- else {
- bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
- 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
- if (!bled && (var != 0x00000001))
- bled = -EINVAL;
- }
- if (bled && (bled != -ETIMEDOUT))
- bled = aac_adapter_sync_cmd(dev, IOP_RESET,
- 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+ if (!(dev->supplement_adapter_info.SupportedOptions2 &
+ le32_to_cpu(AAC_OPTION_MU_RESET)) || (bled >= 0) || (bled == -2)) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ else {
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+ if (!bled && (var != 0x00000001))
+ bled = -EINVAL;
+ }
+ if (bled && (bled != -ETIMEDOUT))
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
- if (bled && (bled != -ETIMEDOUT))
- return -EINVAL;
+ if (bled && (bled != -ETIMEDOUT))
+ return -EINVAL;
+ }
if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
rx_writel(dev, MUnit.reserved2, 3);
msleep(5000); /* Delay 5 seconds */
@@ -596,7 +599,7 @@ int _aac_rx_init(struct aac_dev *dev)
}
msleep(1);
}
- if (restart)
+ if (restart && aac_commit)
aac_commit = 1;
/*
* Fill in the common function dispatch table.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 9b3303b64113..2b6689709e53 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -798,7 +798,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
-#include "advansys.h"
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif /* CONFIG_PCI */
@@ -2014,7 +2013,7 @@ STATIC int AscSgListToQueue(int);
STATIC void AscEnableIsaDma(uchar);
#endif /* CONFIG_ISA */
STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
-
+static const char *advansys_info(struct Scsi_Host *shp);
/*
* --- Adv Library Constants and Macros
@@ -3970,10 +3969,6 @@ STATIC ushort asc_bus[ASC_NUM_BUS] __initdata = {
ASC_IS_PCI,
};
-/*
- * Used with the LILO 'advansys' option to eliminate or
- * limit I/O port probing at boot time, cf. advansys_setup().
- */
STATIC int asc_iopflag = ASC_FALSE;
STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 };
@@ -4055,10 +4050,6 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
#endif /* ADVANSYS_DEBUG */
-/*
- * --- Linux 'struct scsi_host_template' and advansys_setup() Functions
- */
-
#ifdef CONFIG_PROC_FS
/*
* advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
@@ -4080,7 +4071,7 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
* if 'prtbuf' is too small it will not be overwritten. Instead the
* user just won't get all the available statistics.
*/
-int
+static int
advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
off_t offset, int length, int inout)
{
@@ -4296,7 +4287,7 @@ advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
* it must not call SCSI mid-level functions including scsi_malloc()
* and scsi_free().
*/
-int __init
+static int __init
advansys_detect(struct scsi_host_template *tpnt)
{
static int detect_called = ASC_FALSE;
@@ -5428,7 +5419,7 @@ advansys_detect(struct scsi_host_template *tpnt)
*
* Release resources allocated for a single AdvanSys adapter.
*/
-int
+static int
advansys_release(struct Scsi_Host *shp)
{
asc_board_t *boardp;
@@ -5475,7 +5466,7 @@ advansys_release(struct Scsi_Host *shp)
* Note: The information line should not exceed ASC_INFO_SIZE bytes,
* otherwise the static 'info' array will be overrun.
*/
-const char *
+static const char *
advansys_info(struct Scsi_Host *shp)
{
static char info[ASC_INFO_SIZE];
@@ -5568,7 +5559,7 @@ advansys_info(struct Scsi_Host *shp)
* This function always returns 0. Command return status is saved
* in the 'scp' result field.
*/
-int
+static int
advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{
struct Scsi_Host *shp;
@@ -5656,7 +5647,7 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
* sleeping is allowed and no locking other than for host structures is
* required. Returns SUCCESS or FAILED.
*/
-int
+static int
advansys_reset(struct scsi_cmnd *scp)
{
struct Scsi_Host *shp;
@@ -5841,7 +5832,7 @@ advansys_reset(struct scsi_cmnd *scp)
* ip[1]: sectors
* ip[2]: cylinders
*/
-int
+static int
advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int ip[])
{
@@ -5875,82 +5866,6 @@ advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
}
/*
- * advansys_setup()
- *
- * This function is called from init/main.c at boot time.
- * It it passed LILO parameters that can be set from the
- * LILO command line or in /etc/lilo.conf.
- *
- * It is used by the AdvanSys driver to either disable I/O
- * port scanning or to limit scanning to 1 - 4 I/O ports.
- * Regardless of the option setting EISA and PCI boards
- * will still be searched for and detected. This option
- * only affects searching for ISA and VL boards.
- *
- * If ADVANSYS_DEBUG is defined the driver debug level may
- * be set using the 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port.
- *
- * Examples:
- * 1. Eliminate I/O port scanning:
- * boot: linux advansys=
- * or
- * boot: linux advansys=0x0
- * 2. Limit I/O port scanning to one I/O port:
- * boot: linux advansys=0x110
- * 3. Limit I/O port scanning to four I/O ports:
- * boot: linux advansys=0x110,0x210,0x230,0x330
- * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
- * set the driver debug level to 2.
- * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
- *
- * ints[0] - number of arguments
- * ints[1] - first argument
- * ints[2] - second argument
- * ...
- */
-void __init
-advansys_setup(char *str, int *ints)
-{
- int i;
-
- if (asc_iopflag == ASC_TRUE) {
- printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
- return;
- }
-
- asc_iopflag = ASC_TRUE;
-
- if (ints[0] > ASC_NUM_IOPORT_PROBE) {
-#ifdef ADVANSYS_DEBUG
- if ((ints[0] == ASC_NUM_IOPORT_PROBE + 1) &&
- (ints[ASC_NUM_IOPORT_PROBE + 1] >> 4 == 0xdeb)) {
- asc_dbglvl = ints[ASC_NUM_IOPORT_PROBE + 1] & 0xf;
- } else {
-#endif /* ADVANSYS_DEBUG */
- printk("AdvanSys SCSI: only %d I/O ports accepted\n",
- ASC_NUM_IOPORT_PROBE);
-#ifdef ADVANSYS_DEBUG
- }
-#endif /* ADVANSYS_DEBUG */
- }
-
-#ifdef ADVANSYS_DEBUG
- ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
- for (i = 1; i < ints[0]; i++) {
- ASC_DBG2(1, " ints[%d] 0x%x", i, ints[i]);
- }
- ASC_DBG(1, "\n");
-#endif /* ADVANSYS_DEBUG */
-
- for (i = 1; i <= ints[0] && i <= ASC_NUM_IOPORT_PROBE; i++) {
- asc_ioport[i-1] = ints[i];
- ASC_DBG2(1, "advansys_setup: asc_ioport[%d] 0x%x\n",
- i - 1, asc_ioport[i-1]);
- }
-}
-
-
-/*
* --- Loadable Driver Support
*/
diff --git a/drivers/scsi/advansys.h b/drivers/scsi/advansys.h
deleted file mode 100644
index 8ee7fb16a725..000000000000
--- a/drivers/scsi/advansys.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
- *
- * Copyright (c) 1995-2000 Advanced System Products, Inc.
- * Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that redistributions of source
- * code retain the above copyright notice and this comment without
- * modification.
- *
- * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys)
- * changed its name to ConnectCom Solutions, Inc.
- *
- */
-
-#ifndef _ADVANSYS_H
-#define _ADVANSYS_H
-
-/*
- * struct scsi_host_template function prototypes.
- */
-int advansys_detect(struct scsi_host_template *);
-int advansys_release(struct Scsi_Host *);
-const char *advansys_info(struct Scsi_Host *);
-int advansys_queuecommand(struct scsi_cmnd *, void (* done)(struct scsi_cmnd *));
-int advansys_reset(struct scsi_cmnd *);
-int advansys_biosparam(struct scsi_device *, struct block_device *,
- sector_t, int[]);
-static int advansys_slave_configure(struct scsi_device *);
-
-/* init/main.c setup function */
-void advansys_setup(char *, int *);
-
-#endif /* _ADVANSYS_H */
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4b4d1233ce8a..85f2394ffc3e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -240,6 +240,7 @@
#include <linux/io.h>
#include <linux/blkdev.h>
#include <asm/system.h>
+#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/wait.h>
@@ -253,7 +254,6 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/list.h>
-#include <asm/semaphore.h>
#include <scsi/scsicam.h>
#include "scsi.h"
@@ -551,7 +551,7 @@ struct aha152x_hostdata {
*/
struct aha152x_scdata {
Scsi_Cmnd *next; /* next sc in queue */
- struct semaphore *sem; /* semaphore to block on */
+ struct completion *done;/* semaphore to block on */
unsigned char cmd_len;
unsigned char cmnd[MAX_COMMAND_SIZE];
unsigned short use_sg;
@@ -608,7 +608,7 @@ struct aha152x_scdata {
#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble)
#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
-#define SCSEM(SCpnt) SCDATA(SCpnt)->sem
+#define SCSEM(SCpnt) SCDATA(SCpnt)->done
#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
@@ -969,7 +969,8 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
/*
* Queue a command and setup interrupts for a free bus.
*/
-static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int phase, void (*done)(Scsi_Cmnd *))
+static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
+ int phase, void (*done)(Scsi_Cmnd *))
{
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
@@ -1013,7 +1014,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
}
SCNEXT(SCpnt) = NULL;
- SCSEM(SCpnt) = sem;
+ SCSEM(SCpnt) = complete;
/* setup scratch area
SCp.ptr : buffer pointer
@@ -1084,9 +1085,9 @@ static void reset_done(Scsi_Cmnd *SCpnt)
DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt));
#endif
if(SCSEM(SCpnt)) {
- up(SCSEM(SCpnt));
+ complete(SCSEM(SCpnt));
} else {
- printk(KERN_ERR "aha152x: reset_done w/o semaphore\n");
+ printk(KERN_ERR "aha152x: reset_done w/o completion\n");
}
}
@@ -1139,21 +1140,6 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
return FAILED;
}
-static void timer_expired(unsigned long p)
-{
- Scsi_Cmnd *SCp = (Scsi_Cmnd *)p;
- struct semaphore *sem = SCSEM(SCp);
- struct Scsi_Host *shpnt = SCp->device->host;
- unsigned long flags;
-
- /* remove command from issue queue */
- DO_LOCK(flags);
- remove_SC(&ISSUE_SC, SCp);
- DO_UNLOCK(flags);
-
- up(sem);
-}
-
/*
* Reset a device
*
@@ -1161,14 +1147,14 @@ static void timer_expired(unsigned long p)
static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
{
struct Scsi_Host *shpnt = SCpnt->device->host;
- DECLARE_MUTEX_LOCKED(sem);
- struct timer_list timer;
+ DECLARE_COMPLETION(done);
int ret, issued, disconnected;
unsigned char old_cmd_len = SCpnt->cmd_len;
unsigned short old_use_sg = SCpnt->use_sg;
void *old_buffer = SCpnt->request_buffer;
unsigned old_bufflen = SCpnt->request_bufflen;
unsigned long flags;
+ unsigned long timeleft;
#if defined(AHA152X_DEBUG)
if(HOSTDATA(shpnt)->debug & debug_eh) {
@@ -1192,15 +1178,15 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
- init_timer(&timer);
- timer.data = (unsigned long) SCpnt;
- timer.expires = jiffies + 100*HZ; /* 10s */
- timer.function = (void (*)(unsigned long)) timer_expired;
+ aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
- aha152x_internal_queue(SCpnt, &sem, resetting, reset_done);
- add_timer(&timer);
- down(&sem);
- del_timer(&timer);
+ timeleft = wait_for_completion_timeout(&done, 100*HZ);
+ if (!timeleft) {
+ /* remove command from issue queue */
+ DO_LOCK(flags);
+ remove_SC(&ISSUE_SC, SCpnt);
+ DO_UNLOCK(flags);
+ }
SCpnt->cmd_len = old_cmd_len;
SCpnt->use_sg = old_use_sg;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index d7af9c63a04d..e4a4f3a965d9 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -271,20 +271,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
continue;
}
sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
- if (SCtmp->use_sg) {
- /* We used scatter-gather.
- Do the unmapping dance. */
- dma_unmap_sg (&edev->dev,
- (struct scatterlist *) SCtmp->request_buffer,
- SCtmp->use_sg,
- SCtmp->sc_data_direction);
- } else {
- dma_unmap_single (&edev->dev,
- sgptr->buf_dma_addr,
- SCtmp->request_bufflen,
- DMA_BIDIRECTIONAL);
- }
-
+ scsi_dma_unmap(SCtmp);
+
/* Free the sg block */
dma_free_coherent (&edev->dev,
sizeof (struct aha1740_sg),
@@ -349,11 +337,9 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
unchar target = scmd_id(SCpnt);
struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
unsigned long flags;
- void *buff = SCpnt->request_buffer;
- int bufflen = SCpnt->request_bufflen;
dma_addr_t sg_dma;
struct aha1740_sg *sgptr;
- int ecbno;
+ int ecbno, nseg;
DEB(int i);
if(*cmd == REQUEST_SENSE) {
@@ -423,24 +409,23 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
}
sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
sgptr->sg_dma_addr = sg_dma;
-
- if (SCpnt->use_sg) {
- struct scatterlist * sgpnt;
+
+ nseg = scsi_dma_map(SCpnt);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
struct aha1740_chain * cptr;
- int i, count;
+ int i;
DEB(unsigned char * ptr);
host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
* w/scatter-gather*/
- sgpnt = (struct scatterlist *) SCpnt->request_buffer;
cptr = sgptr->sg_chain;
- count = dma_map_sg (&host->edev->dev, sgpnt, SCpnt->use_sg,
- SCpnt->sc_data_direction);
- for(i=0; i < count; i++) {
- cptr[i].datalen = sg_dma_len (sgpnt + i);
- cptr[i].dataptr = sg_dma_address (sgpnt + i);
+ scsi_for_each_sg(SCpnt, sg, nseg, i) {
+ cptr[i].datalen = sg_dma_len (sg);
+ cptr[i].dataptr = sg_dma_address (sg);
}
- host->ecb[ecbno].datalen = count*sizeof(struct aha1740_chain);
+ host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
host->ecb[ecbno].dataptr = sg_dma;
#ifdef DEBUG
printk("cptr %x: ",cptr);
@@ -448,11 +433,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
for(i=0;i<24;i++) printk("%02x ", ptr[i]);
#endif
} else {
- host->ecb[ecbno].datalen = bufflen;
- sgptr->buf_dma_addr = dma_map_single (&host->edev->dev,
- buff, bufflen,
- DMA_BIDIRECTIONAL);
- host->ecb[ecbno].dataptr = sgptr->buf_dma_addr;
+ host->ecb[ecbno].datalen = 0;
+ host->ecb[ecbno].dataptr = 0;
}
host->ecb[ecbno].lun = SCpnt->device->lun;
host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6054881f21f1..286ab83116f9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -376,21 +376,10 @@ static __inline void
ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
{
struct scsi_cmnd *cmd;
- int direction;
cmd = scb->io_ctx;
- direction = cmd->sc_data_direction;
ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
- if (cmd->use_sg != 0) {
- struct scatterlist *sg;
-
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
- } else if (cmd->request_bufflen != 0) {
- pci_unmap_single(ahd->dev_softc,
- scb->platform_data->buf_busaddr,
- cmd->request_bufflen, direction);
- }
+ scsi_dma_unmap(cmd);
}
/******************************** Macros **************************************/
@@ -1422,6 +1411,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
u_int col_idx;
uint16_t mask;
unsigned long flags;
+ int nseg;
ahd_lock(ahd, &flags);
@@ -1494,18 +1484,17 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
ahd_set_residual(scb, 0);
ahd_set_sense_residual(scb, 0);
scb->sg_count = 0;
- if (cmd->use_sg != 0) {
- void *sg;
- struct scatterlist *cur_seg;
- u_int nseg;
- int dir;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- dir = cmd->sc_data_direction;
- nseg = pci_map_sg(ahd->dev_softc, cur_seg,
- cmd->use_sg, dir);
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg > 0) {
+ void *sg = scb->sg_list;
+ struct scatterlist *cur_seg;
+ int i;
+
scb->platform_data->xfer_len = 0;
- for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
+
+ scsi_for_each_sg(cmd, cur_seg, nseg, i) {
dma_addr_t addr;
bus_size_t len;
@@ -1513,22 +1502,8 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
len = sg_dma_len(cur_seg);
scb->platform_data->xfer_len += len;
sg = ahd_sg_setup(ahd, scb, sg, addr, len,
- /*last*/nseg == 1);
+ i == (nseg - 1));
}
- } else if (cmd->request_bufflen != 0) {
- void *sg;
- dma_addr_t addr;
- int dir;
-
- sg = scb->sg_list;
- dir = cmd->sc_data_direction;
- addr = pci_map_single(ahd->dev_softc,
- cmd->request_buffer,
- cmd->request_bufflen, dir);
- scb->platform_data->xfer_len = cmd->request_bufflen;
- scb->platform_data->buf_busaddr = addr;
- sg = ahd_sg_setup(ahd, scb, sg, addr,
- cmd->request_bufflen, /*last*/TRUE);
}
LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index ad9761b237dc..853998be1474 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -781,7 +781,7 @@ int ahd_get_transfer_dir(struct scb *scb)
static __inline
void ahd_set_residual(struct scb *scb, u_long resid)
{
- scb->io_ctx->resid = resid;
+ scsi_set_resid(scb->io_ctx, resid);
}
static __inline
@@ -793,7 +793,7 @@ void ahd_set_sense_residual(struct scb *scb, u_long resid)
static __inline
u_long ahd_get_residual(struct scb *scb)
{
- return (scb->io_ctx->resid);
+ return scsi_get_resid(scb->io_ctx);
}
static __inline
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 660f26e23a38..1803ab6fc21c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -402,18 +402,8 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
cmd = scb->io_ctx;
ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
- if (cmd->use_sg != 0) {
- struct scatterlist *sg;
-
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (cmd->request_bufflen != 0) {
- pci_unmap_single(ahc->dev_softc,
- scb->platform_data->buf_busaddr,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+
+ scsi_dma_unmap(cmd);
}
static __inline int
@@ -1381,6 +1371,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
struct ahc_tmode_tstate *tstate;
uint16_t mask;
struct scb_tailq *untagged_q = NULL;
+ int nseg;
/*
* Schedule us to run later. The only reason we are not
@@ -1472,23 +1463,21 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
ahc_set_residual(scb, 0);
ahc_set_sense_residual(scb, 0);
scb->sg_count = 0;
- if (cmd->use_sg != 0) {
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg > 0) {
struct ahc_dma_seg *sg;
struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
- int nseg;
+ int i;
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
- cmd->sc_data_direction);
- end_seg = cur_seg + nseg;
/* Copy the segments into the SG list. */
sg = scb->sg_list;
/*
* The sg_count may be larger than nseg if
* a transfer crosses a 32bit page.
- */
- while (cur_seg < end_seg) {
+ */
+ scsi_for_each_sg(cmd, cur_seg, nseg, i) {
dma_addr_t addr;
bus_size_t len;
int consumed;
@@ -1499,7 +1488,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
sg, addr, len);
sg += consumed;
scb->sg_count += consumed;
- cur_seg++;
}
sg--;
sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
@@ -1516,33 +1504,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
*/
scb->hscb->dataptr = scb->sg_list->addr;
scb->hscb->datacnt = scb->sg_list->len;
- } else if (cmd->request_bufflen != 0) {
- struct ahc_dma_seg *sg;
- dma_addr_t addr;
-
- sg = scb->sg_list;
- addr = pci_map_single(ahc->dev_softc,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- scb->platform_data->buf_busaddr = addr;
- scb->sg_count = ahc_linux_map_seg(ahc, scb,
- sg, addr,
- cmd->request_bufflen);
- sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
-
- /*
- * Reset the sg list pointer.
- */
- scb->hscb->sgptr =
- ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
-
- /*
- * Copy the first SG into the "current"
- * data pointer area.
- */
- scb->hscb->dataptr = sg->addr;
- scb->hscb->datacnt = sg->len;
} else {
scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
scb->hscb->dataptr = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 8fee7edc6eb3..b48dab447bde 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -751,7 +751,7 @@ int ahc_get_transfer_dir(struct scb *scb)
static __inline
void ahc_set_residual(struct scb *scb, u_long resid)
{
- scb->io_ctx->resid = resid;
+ scsi_set_resid(scb->io_ctx, resid);
}
static __inline
@@ -763,7 +763,7 @@ void ahc_set_sense_residual(struct scb *scb, u_long resid)
static __inline
u_long ahc_get_residual(struct scb *scb)
{
- return (scb->io_ctx->resid);
+ return scsi_get_resid(scb->io_ctx);
}
static __inline
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index a988d5abf702..4998bb850c49 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2690,17 +2690,8 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
struct aic7xxx_scb *scbp;
unsigned char queue_depth;
- if (cmd->use_sg > 1)
- {
- struct scatterlist *sg;
+ scsi_dma_unmap(cmd);
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
- }
- else if (cmd->request_bufflen)
- pci_unmap_single(p->pdev, aic7xxx_mapping(cmd),
- cmd->request_bufflen,
- cmd->sc_data_direction);
if (scb->flags & SCB_SENSE)
{
pci_unmap_single(p->pdev,
@@ -3869,7 +3860,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* the mid layer didn't check residual data counts to see if the
* command needs retried.
*/
- cmd->resid = scb->sg_length - actual;
+ scsi_set_resid(cmd, scb->sg_length - actual);
aic7xxx_status(cmd) = hscb->target_status;
}
}
@@ -6581,7 +6572,7 @@ aic7xxx_slave_alloc(struct scsi_device *SDptr)
struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata;
struct aic_dev_data *aic_dev;
- aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_ATOMIC | GFP_KERNEL);
+ aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_KERNEL);
if(!aic_dev)
return 1;
/*
@@ -10137,6 +10128,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
struct scsi_device *sdptr = cmd->device;
unsigned char tindex = TARGET_INDEX(cmd);
struct request *req = cmd->request;
+ int use_sg;
mask = (0x01 << tindex);
hscb = scb->hscb;
@@ -10209,8 +10201,10 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len);
hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd));
- if (cmd->use_sg)
- {
+ use_sg = scsi_dma_map(cmd);
+ BUG_ON(use_sg < 0);
+
+ if (use_sg) {
struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
/*
@@ -10219,11 +10213,11 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
* differences and the kernel SG list uses virtual addresses where
* we need physical addresses.
*/
- int i, use_sg;
+ int i;
- sg = (struct scatterlist *)cmd->request_buffer;
scb->sg_length = 0;
- use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
+
+
/*
* Copy the segments into the SG array. NOTE!!! - We used to
* have the first entry both in the data_pointer area and the first
@@ -10231,10 +10225,9 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
* entry in both places, but now we download the address of
* scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
*/
- for (i = 0; i < use_sg; i++)
- {
- unsigned int len = sg_dma_len(sg+i);
- scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg+i));
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ unsigned int len = sg_dma_len(sg);
+ scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg));
scb->sg_list[i].length = cpu_to_le32(len);
scb->sg_length += len;
}
@@ -10244,33 +10237,13 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
scb->sg_count = i;
hscb->SG_segment_count = i;
hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1]));
- }
- else
- {
- if (cmd->request_bufflen)
- {
- unsigned int address = pci_map_single(p->pdev, cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- aic7xxx_mapping(cmd) = address;
- scb->sg_list[0].address = cpu_to_le32(address);
- scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
- scb->sg_count = 1;
- scb->sg_length = cmd->request_bufflen;
- hscb->SG_segment_count = 1;
- hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[0]));
- hscb->data_count = scb->sg_list[0].length;
- hscb->data_pointer = scb->sg_list[0].address;
- }
- else
- {
+ } else {
scb->sg_count = 0;
scb->sg_length = 0;
hscb->SG_segment_count = 0;
hscb->SG_list_pointer = 0;
hscb->data_count = 0;
hscb->data_pointer = 0;
- }
}
}
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index c520e5b41fb5..3dce618bf414 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -126,7 +126,7 @@ static inline int asd_init_sata(struct domain_device *dev)
if (w76 & 0x100) /* NCQ? */
qdepth = (w75 & 0x1F) + 1;
asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
- (1<<qdepth)-1);
+ (1ULL<<qdepth)-1);
asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
}
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 27852b43b904..ab00aecc5466 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -81,6 +81,9 @@ static struct scsi_host_template aic94xx_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
+ .slave_alloc = sas_slave_alloc,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
};
static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
@@ -223,13 +226,8 @@ static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
{
int err, i;
- err = pci_read_config_byte(asd_ha->pcidev, PCI_REVISION_ID,
- &asd_ha->revision_id);
- if (err) {
- asd_printk("couldn't read REVISION ID register of %s\n",
- pci_name(asd_ha->pcidev));
- goto Err;
- }
+ asd_ha->revision_id = asd_ha->pcidev->revision;
+
err = -ENODEV;
if (asd_ha->revision_id < AIC9410_DEV_REV_B0) {
asd_printk("%s is revision %s (%X), which is not supported\n",
@@ -467,7 +465,7 @@ static int asd_create_global_caches(void)
sizeof(struct asd_dma_tok),
0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!asd_dma_token_cache) {
asd_printk("couldn't create dma token cache\n");
return -ENOMEM;
@@ -479,7 +477,7 @@ static int asd_create_global_caches(void)
sizeof(struct asd_ascb),
0,
SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (!asd_ascb_cache) {
asd_printk("couldn't create ascb cache\n");
goto Err;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index e2ad5bed9403..d5d8caba3560 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -74,8 +74,13 @@ static inline int asd_map_scatterlist(struct sas_task *task,
return 0;
}
- num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ /* STP tasks come from libata which has already mapped
+ * the SG list */
+ if (sas_protocol_ata(task->task_proto))
+ num_sg = task->num_scatter;
+ else
+ num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+ task->num_scatter, task->data_dir);
if (num_sg == 0)
return -ENOMEM;
@@ -120,8 +125,9 @@ static inline int asd_map_scatterlist(struct sas_task *task,
return 0;
err_unmap:
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ if (sas_protocol_ata(task->task_proto))
+ pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
+ task->data_dir);
return res;
}
@@ -142,8 +148,9 @@ static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
}
asd_free_coherent(asd_ha, ascb->sg_arr);
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ if (task->task_proto != SAS_PROTOCOL_STP)
+ pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
+ task->data_dir);
}
/* ---------- Task complete tasklet ---------- */
@@ -391,7 +398,6 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
scb->ata_task.fis = task->ata_task.fis;
- scb->ata_task.fis.fis_type = 0x27;
if (likely(!task->ata_task.device_control_reg_update))
scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
diff --git a/drivers/scsi/amiga7xx.c b/drivers/scsi/amiga7xx.c
deleted file mode 100644
index d5d3c4d5a253..000000000000
--- a/drivers/scsi/amiga7xx.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
- * Amiga MacroSystemUS WarpEngine SCSI controller.
- * Amiga Technologies A4000T SCSI controller.
- * Amiga Technologies/DKB A4091 SCSI controller.
- *
- * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
- * plus modifications of the 53c7xx.c driver to support the Amiga.
- */
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/blkdev.h>
-#include <linux/zorro.h>
-#include <linux/stat.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-#include <asm/dma.h>
-#include <asm/irq.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "53c7xx.h"
-#include "amiga7xx.h"
-
-
-static int amiga7xx_register_one(struct scsi_host_template *tpnt,
- unsigned long address)
-{
- long long options;
- int clock;
-
- if (!request_mem_region(address, 0x1000, "ncr53c710"))
- return 0;
-
- address = (unsigned long)z_ioremap(address, 0x1000);
- options = OPTION_MEMORY_MAPPED | OPTION_DEBUG_TEST1 | OPTION_INTFLY |
- OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS |
- OPTION_DISCONNECT;
- clock = 50000000; /* 50 MHz SCSI Clock */
- ncr53c7xx_init(tpnt, 0, 710, address, 0, IRQ_AMIGA_PORTS, DMA_NONE,
- options, clock);
- return 1;
-}
-
-
-#ifdef CONFIG_ZORRO
-
-static struct {
- zorro_id id;
- unsigned long offset;
- int absolute; /* offset is absolute address */
-} amiga7xx_table[] = {
- { .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS, .offset = 0xf40000,
- .absolute = 1 },
- { .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx, .offset = 0x40000 },
- { .id = ZORRO_PROD_CBM_A4091_1, .offset = 0x800000 },
- { .id = ZORRO_PROD_CBM_A4091_2, .offset = 0x800000 },
- { .id = ZORRO_PROD_GVP_GFORCE_040_060, .offset = 0x40000 },
- { 0 }
-};
-
-static int __init amiga7xx_zorro_detect(struct scsi_host_template *tpnt)
-{
- int num = 0, i;
- struct zorro_dev *z = NULL;
- unsigned long address;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- for (i = 0; amiga7xx_table[i].id; i++)
- if (z->id == amiga7xx_table[i].id)
- break;
- if (!amiga7xx_table[i].id)
- continue;
- if (amiga7xx_table[i].absolute)
- address = amiga7xx_table[i].offset;
- else
- address = z->resource.start + amiga7xx_table[i].offset;
- num += amiga7xx_register_one(tpnt, address);
- }
- return num;
-}
-
-#endif /* CONFIG_ZORRO */
-
-
-int __init amiga7xx_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- int num = 0;
-
- if (called || !MACH_IS_AMIGA)
- return 0;
-
- tpnt->proc_name = "Amiga7xx";
-
- if (AMIGAHW_PRESENT(A4000_SCSI))
- num += amiga7xx_register_one(tpnt, 0xdd0040);
-
-#ifdef CONFIG_ZORRO
- num += amiga7xx_zorro_detect(tpnt);
-#endif
-
- called = 1;
- return num;
-}
-
-static int amiga7xx_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "Amiga NCR53c710 SCSI",
- .detect = amiga7xx_detect,
- .release = amiga7xx_release,
- .queuecommand = NCR53c7xx_queue_command,
- .abort = NCR53c7xx_abort,
- .reset = NCR53c7xx_reset,
- .can_queue = 24,
- .this_id = 7,
- .sg_tablesize = 63,
- .cmd_per_lun = 3,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
diff --git a/drivers/scsi/amiga7xx.h b/drivers/scsi/amiga7xx.h
deleted file mode 100644
index 7cd63a996886..000000000000
--- a/drivers/scsi/amiga7xx.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef AMIGA7XX_H
-
-#include <linux/types.h>
-
-int amiga7xx_detect(struct scsi_host_template *);
-const char *NCR53c7x0_info(void);
-int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int NCR53c7xx_abort(Scsi_Cmnd *);
-int NCR53c7x0_release (struct Scsi_Host *);
-int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
-void NCR53c7x0_intr(int irq, void *dev_id);
-
-#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 3
-#endif
-
-#ifndef CAN_QUEUE
-#define CAN_QUEUE 24
-#endif
-
-#include <scsi/scsicam.h>
-
-#endif /* AMIGA7XX_H */
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index aff96db9ccf6..f0b8bf4534f0 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,9 +48,10 @@ struct class_device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 288
-#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13"
+#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.14"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
#define ARCMSR_MAX_TARGETID 17
#define ARCMSR_MAX_TARGETLUN 8
#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
@@ -469,4 +470,3 @@ extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
extern struct class_device_attribute *arcmsr_host_attrs[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb);
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
-
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 03bfed61bffc..06c0dce3b839 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -59,8 +59,9 @@
struct class_device_attribute *arcmsr_host_attrs[];
static ssize_t
-arcmsr_sysfs_iop_message_read(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+arcmsr_sysfs_iop_message_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct class_device *cdev = container_of(kobj,struct class_device,kobj);
struct Scsi_Host *host = class_to_shost(cdev);
@@ -105,8 +106,9 @@ arcmsr_sysfs_iop_message_read(struct kobject *kobj, char *buf, loff_t off,
}
static ssize_t
-arcmsr_sysfs_iop_message_write(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+arcmsr_sysfs_iop_message_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct class_device *cdev = container_of(kobj,struct class_device,kobj);
struct Scsi_Host *host = class_to_shost(cdev);
@@ -152,8 +154,9 @@ arcmsr_sysfs_iop_message_write(struct kobject *kobj, char *buf, loff_t off,
}
static ssize_t
-arcmsr_sysfs_iop_message_clear(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+arcmsr_sysfs_iop_message_clear(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct class_device *cdev = container_of(kobj,struct class_device,kobj);
struct Scsi_Host *host = class_to_shost(cdev);
@@ -188,7 +191,6 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
- .owner = THIS_MODULE,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
@@ -198,7 +200,6 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
@@ -208,7 +209,6 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 8b46158cc045..0ddfc21e9f7d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -57,6 +57,7 @@
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -71,7 +72,7 @@
#include "arcmsr.h"
MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
-MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
+MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
@@ -93,7 +94,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
-
+static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -104,7 +107,8 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_de
static struct scsi_host_template arcmsr_scsi_host_template = {
.module = THIS_MODULE,
- .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
+ .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
+ ARCMSR_DRIVER_VERSION,
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
.eh_abort_handler = arcmsr_abort,
@@ -119,6 +123,10 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
};
+static struct pci_error_handlers arcmsr_pci_error_handlers = {
+ .error_detected = arcmsr_pci_error_detected,
+ .slot_reset = arcmsr_pci_slot_reset,
+};
static struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
@@ -144,7 +152,8 @@ static struct pci_driver arcmsr_pci_driver = {
.id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
- .shutdown = arcmsr_shutdown
+ .shutdown = arcmsr_shutdown,
+ .err_handler = &arcmsr_pci_error_handlers,
};
static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -328,6 +337,8 @@ static int arcmsr_probe(struct pci_dev *pdev,
arcmsr_iop_init(acb);
pci_set_drvdata(pdev, host);
+ if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
+ host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
error = scsi_add_host(host, &pdev->dev);
if (error)
@@ -338,6 +349,7 @@ static int arcmsr_probe(struct pci_dev *pdev,
goto out_free_sysfs;
scsi_scan_host(host);
+ pci_enable_pcie_error_reporting(pdev);
return 0;
out_free_sysfs:
out_free_irq:
@@ -369,19 +381,9 @@ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
{
- struct AdapterControlBlock *acb = ccb->acb;
struct scsi_cmnd *pcmd = ccb->pcmd;
- if (pcmd->use_sg != 0) {
- struct scatterlist *sl;
-
- sl = (struct scatterlist *)pcmd->request_buffer;
- pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
- }
- else if (pcmd->request_bufflen != 0)
- pci_unmap_single(acb->pdev,
- pcmd->SCp.dma_handle,
- pcmd->request_bufflen, pcmd->sc_data_direction);
+ scsi_dma_unmap(pcmd);
}
static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
@@ -498,7 +500,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg=acb->pmu;
+ struct MessageUnit __iomem *reg = acb->pmu;
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
if (arcmsr_wait_msgint_ready(acb))
@@ -551,6 +553,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
int8_t *psge = (int8_t *)&arcmsr_cdb->u;
uint32_t address_lo, address_hi;
int arccdbsize = 0x30;
+ int nseg;
ccb->pcmd = pcmd;
memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
@@ -561,20 +564,20 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
- if (pcmd->use_sg) {
- int length, sgcount, i, cdb_sgcount = 0;
- struct scatterlist *sl;
-
- /* Get Scatter Gather List from scsiport. */
- sl = (struct scatterlist *) pcmd->request_buffer;
- sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
- pcmd->sc_data_direction);
+
+ nseg = scsi_dma_map(pcmd);
+ BUG_ON(nseg < 0);
+
+ if (nseg) {
+ int length, i, cdb_sgcount = 0;
+ struct scatterlist *sg;
+
/* map stor port SG list to our iop SG List. */
- for (i = 0; i < sgcount; i++) {
+ scsi_for_each_sg(pcmd, sg, nseg, i) {
/* Get the physical address of the current data pointer */
- length = cpu_to_le32(sg_dma_len(sl));
- address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
- address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
+ length = cpu_to_le32(sg_dma_len(sg));
+ address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
+ address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
if (address_hi == 0) {
struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
@@ -591,32 +594,12 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
psge += sizeof (struct SG64ENTRY);
arccdbsize += sizeof (struct SG64ENTRY);
}
- sl++;
cdb_sgcount++;
}
arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
- arcmsr_cdb->DataLength = pcmd->request_bufflen;
+ arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
if ( arccdbsize > 256)
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
- } else if (pcmd->request_bufflen) {
- dma_addr_t dma_addr;
- dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
- pcmd->request_bufflen, pcmd->sc_data_direction);
- pcmd->SCp.dma_handle = dma_addr;
- address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
- address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
- if (address_hi == 0) {
- struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
- pdma_sg->address = address_lo;
- pdma_sg->length = pcmd->request_bufflen;
- } else {
- struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
- pdma_sg->addresshigh = address_hi;
- pdma_sg->address = address_lo;
- pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
- }
- arcmsr_cdb->sgcount = 1;
- arcmsr_cdb->DataLength = pcmd->request_bufflen;
}
if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
@@ -747,7 +730,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
int id, lun;
/*
****************************************************************
- ** areca cdb command done
+ ** areca cdb command done
****************************************************************
*/
while (1) {
@@ -758,20 +741,20 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
(flag_ccb << 5));
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if (ccb->startdone == ARCMSR_CCB_ABORTED) {
- struct scsi_cmnd *abortcmd=ccb->pcmd;
+ struct scsi_cmnd *abortcmd = ccb->pcmd;
if (abortcmd) {
abortcmd->result |= DID_ABORT >> 16;
arcmsr_ccb_complete(ccb, 1);
printk(KERN_NOTICE
- "arcmsr%d: ccb='0x%p' isr got aborted command \n"
+ "arcmsr%d: ccb ='0x%p' isr got aborted command \n"
, acb->host->host_no, ccb);
}
continue;
}
printk(KERN_NOTICE
- "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
- "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
- " ccboutstandingcount=%d \n"
+ "arcmsr%d: isr get an illegal ccb command done acb = '0x%p'"
+ "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
+ " ccboutstandingcount = %d \n"
, acb->host->host_no
, acb
, ccb
@@ -791,7 +774,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
switch(ccb->arcmsr_cdb.DeviceStatus) {
case ARCMSR_DEV_SELECT_TIMEOUT: {
acb->devstate[id][lun] = ARECA_RAID_GONE;
- ccb->pcmd->result = DID_TIME_OUT << 16;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_ccb_complete(ccb, 1);
}
break;
@@ -810,8 +793,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
break;
default:
printk(KERN_NOTICE
- "arcmsr%d: scsi id=%d lun=%d"
- " isr get command error done,"
+ "arcmsr%d: scsi id = %d lun = %d"
+ " isr get command error done, "
"but got unknown DeviceStatus = 0x%x \n"
, acb->host->host_no
, id
@@ -848,24 +831,21 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
int retvalue = 0, transfer_len = 0;
char *buffer;
+ struct scatterlist *sg;
uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
(uint32_t ) cmd->cmnd[6] << 16 |
(uint32_t ) cmd->cmnd[7] << 8 |
(uint32_t ) cmd->cmnd[8];
/* 4 bytes: Areca io control code */
- if (cmd->use_sg) {
- struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- if (cmd->use_sg > 1) {
- retvalue = ARCMSR_MESSAGE_FAIL;
- goto message_out;
- }
- transfer_len += sg->length;
- } else {
- buffer = cmd->request_buffer;
- transfer_len = cmd->request_bufflen;
+ sg = scsi_sglist(cmd);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ if (scsi_sg_count(cmd) > 1) {
+ retvalue = ARCMSR_MESSAGE_FAIL;
+ goto message_out;
}
+ transfer_len += sg->length;
+
if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
@@ -1057,12 +1037,9 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
retvalue = ARCMSR_MESSAGE_FAIL;
}
message_out:
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- sg = (struct scatterlist *) cmd->request_buffer;
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- }
return retvalue;
}
@@ -1085,6 +1062,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
case INQUIRY: {
unsigned char inqdata[36];
char *buffer;
+ struct scatterlist *sg;
if (cmd->device->lun) {
cmd->result = (DID_TIME_OUT << 16);
@@ -1096,7 +1074,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
inqdata[1] = 0;
/* rem media bit & Dev Type Modifier */
inqdata[2] = 0;
- /* ISO,ECMA,& ANSI versions */
+ /* ISO, ECMA, & ANSI versions */
inqdata[4] = 31;
/* length of additional data */
strncpy(&inqdata[8], "Areca ", 8);
@@ -1104,21 +1082,14 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
strncpy(&inqdata[16], "RAID controller ", 16);
/* Product Identification */
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
- if (cmd->use_sg) {
- struct scatterlist *sg;
- sg = (struct scatterlist *) cmd->request_buffer;
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- } else {
- buffer = cmd->request_buffer;
- }
+ sg = scsi_sglist(cmd);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+
memcpy(buffer, inqdata, sizeof(inqdata));
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ sg = scsi_sglist(cmd);
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- sg = (struct scatterlist *) cmd->request_buffer;
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- }
cmd->scsi_done(cmd);
}
break;
@@ -1153,7 +1124,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
, acb->host->host_no);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if(target == 16) {
+ if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
return 0;
@@ -1166,7 +1137,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
printk(KERN_NOTICE
"arcmsr%d: block 'read/write'"
"command with gone raid volume"
- " Cmd=%2x, TargetId=%d, Lun=%d \n"
+ " Cmd = %2x, TargetId = %d, Lun = %d \n"
, acb->host->host_no
, cmd->cmnd[0]
, target, lun);
@@ -1257,7 +1228,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
(ccb == poll_ccb)) {
printk(KERN_NOTICE
- "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
+ "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
" poll command abort successfully \n"
, acb->host->host_no
, ccb->pcmd->device->id
@@ -1270,8 +1241,8 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
}
printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
- " command done ccb='0x%p'"
- "ccboutstandingcount=%d \n"
+ " command done ccb ='0x%p'"
+ "ccboutstandingcount = %d \n"
, acb->host->host_no
, ccb
, atomic_read(&acb->ccboutstandingcount));
@@ -1288,7 +1259,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
switch(ccb->arcmsr_cdb.DeviceStatus) {
case ARCMSR_DEV_SELECT_TIMEOUT: {
acb->devstate[id][lun] = ARECA_RAID_GONE;
- ccb->pcmd->result = DID_TIME_OUT << 16;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
arcmsr_ccb_complete(ccb, 1);
}
break;
@@ -1307,7 +1278,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
break;
default:
printk(KERN_NOTICE
- "arcmsr%d: scsi id=%d lun=%d"
+ "arcmsr%d: scsi id = %d lun = %d"
" polling and getting command error done"
"but got unknown DeviceStatus = 0x%x \n"
, acb->host->host_no
@@ -1322,6 +1293,94 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
}
}
}
+static void arcmsr_done4_abort_postqueue(struct AdapterControlBlock *acb)
+{
+ int i = 0, found = 0;
+ int id, lun;
+ uint32_t flag_ccb, outbound_intstatus;
+ struct MessageUnit __iomem *reg = acb->pmu;
+ struct CommandControlBlock *ccb;
+ /*clear and abort all outbound posted Q*/
+
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
+(i++ < 256)){
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
+(flag_ccb << 5));
+ if (ccb){
+ if ((ccb->acb != acb)||(ccb->startdone != \
+ARCMSR_CCB_START)){
+ printk(KERN_NOTICE "arcmsr%d: polling get \
+an illegal ccb" "command done ccb = '0x%p'""ccboutstandingcount = %d \n",
+ acb->host->host_no, ccb,
+ atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)){
+ if (acb->devstate[id][lun] == ARECA_RAID_GONE)
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ else {
+ switch(ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE
+ "arcmsr%d: scsi id = %d \
+ lun = %d""polling and \
+ getting command error \
+ done""but got unknown \
+ DeviceStatus = 0x%x \n",
+ acb->host->host_no, id,
+ lun, ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+ found = 1;
+ }
+ }
+ if (found){
+ outbound_intstatus = readl(&reg->outbound_intstatus) & \
+ acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ /*clear interrupt*/
+ }
+ return;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
@@ -1355,7 +1414,6 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg = acb->pmu;
struct CommandControlBlock *ccb;
uint32_t intmask_org;
int i = 0;
@@ -1368,21 +1426,17 @@ static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
/* clear all outbound posted Q */
- for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
- readl(&reg->outbound_queueport);
+ arcmsr_done4_abort_postqueue(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
ccb = acb->pccb_pool[i];
- if ((ccb->startdone == ARCMSR_CCB_START) ||
- (ccb->startdone == ARCMSR_CCB_ABORTED)) {
+ if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
- ccb->pcmd->result = DID_ABORT << 16;
- arcmsr_ccb_complete(ccb, 1);
}
}
/* enable all outbound interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
}
- atomic_set(&acb->ccboutstandingcount, 0);
+
}
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
@@ -1428,10 +1482,9 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
int i = 0;
printk(KERN_NOTICE
- "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
+ "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
acb->host->host_no, cmd->device->id, cmd->device->lun);
acb->num_aborts++;
-
/*
************************************************
** the all interrupt service routine is locked
@@ -1486,10 +1539,306 @@ static const char *arcmsr_info(struct Scsi_Host *host)
type = "X-TYPE";
break;
}
- sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
+ sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
type, raid6 ? "( RAID6 capable)" : "",
ARCMSR_DRIVER_VERSION);
return buf;
}
+static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ struct AdapterControlBlock *acb;
+ uint8_t bus, dev_fun;
+ int error;
+
+ error = pci_enable_device(pdev);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof \
+(struct AdapterControlBlock));
+ if (!host)
+ return PCI_ERS_RESULT_DISCONNECT;
+ acb = (struct AdapterControlBlock *)host->hostdata;
+ memset(acb, 0, sizeof (struct AdapterControlBlock));
+
+ error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (error) {
+ error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (error) {
+ printk(KERN_WARNING
+ "scsi%d: No suitable DMA mask available\n",
+ host->host_no);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ }
+ bus = pdev->bus->number;
+ dev_fun = pdev->devfn;
+ acb = (struct AdapterControlBlock *) host->hostdata;
+ memset(acb, 0, sizeof(struct AdapterControlBlock));
+ acb->pdev = pdev;
+ acb->host = host;
+ host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
+ host->max_lun = ARCMSR_MAX_TARGETLUN;
+ host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
+ host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
+ host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
+ host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
+ host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ host->this_id = ARCMSR_SCSI_INITIATOR_ID;
+ host->unique_id = (bus << 8) | dev_fun;
+ host->irq = pdev->irq;
+ error = pci_request_regions(pdev, "arcmsr");
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ acb->pmu = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!acb->pmu) {
+ printk(KERN_NOTICE "arcmsr%d: memory"
+ " mapping region fail \n", acb->host->host_no);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+ ACB_F_MESSAGE_RQBUFFER_CLEARED |
+ ACB_F_MESSAGE_WQBUFFER_READED);
+ acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
+ INIT_LIST_HEAD(&acb->ccb_free_list);
+
+ error = arcmsr_alloc_ccb_pool(acb);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ error = request_irq(pdev->irq, arcmsr_do_interrupt,
+ IRQF_DISABLED | IRQF_SHARED, "arcmsr", acb);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ arcmsr_iop_init(acb);
+ if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
+ host->max_sectors = ARCMSR_MAX_XFER_SECTORS_B;
+
+ pci_set_drvdata(pdev, host);
+
+ error = scsi_add_host(host, &pdev->dev);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ error = arcmsr_alloc_sysfs_attr(acb);
+ if (error)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ scsi_scan_host(host);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ struct MessageUnit __iomem *reg = acb->pmu;
+ struct CommandControlBlock *ccb;
+ /*clear and abort all outbound posted Q*/
+ int i = 0, found = 0;
+ int id, lun;
+ uint32_t flag_ccb, outbound_intstatus;
+
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
+ (i++ < 256)){
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset
+ + (flag_ccb << 5));
+ if (ccb){
+ if ((ccb->acb != acb)||(ccb->startdone !=
+ ARCMSR_CCB_START)){
+ printk(KERN_NOTICE "arcmsr%d: polling \
+ get an illegal ccb"" command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n",
+ acb->host->host_no, ccb,
+ atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
+ if (acb->devstate[id][lun] ==
+ ARECA_RAID_GONE)
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ else {
+ switch(ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE
+ "arcmsr%d: scsi \
+ id = %d lun = %d"
+ " polling and \
+ getting command \
+ error done"
+ "but got unknown \
+ DeviceStatus = 0x%x \n"
+ , acb->host->host_no,
+ id, lun,
+ ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+ found = 1;
+ }
+ }
+ if (found){
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ /*clear interrupt*/
+ }
+ return;
+}
+
+
+static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ struct MessageUnit __iomem *reg = acb->pmu;
+ struct CommandControlBlock *ccb;
+ /*clear and abort all outbound posted Q*/
+ int i = 0, found = 0;
+ int id, lun;
+ uint32_t flag_ccb, outbound_intstatus;
+
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
+ (i++ < 256)){
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
+ (flag_ccb << 5));
+ if (ccb){
+ if ((ccb->acb != acb)||(ccb->startdone !=
+ ARCMSR_CCB_START)){
+ printk(KERN_NOTICE
+ "arcmsr%d: polling get an illegal ccb"
+ " command done ccb = '0x%p'"
+ "ccboutstandingcount = %d \n",
+ acb->host->host_no, ccb,
+ atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
+ if (acb->devstate[id][lun] == ARECA_RAID_GONE)
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ else {
+ switch(ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] =
+ ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE "arcmsr%d: \
+ scsi id = %d lun = %d"
+ " polling and \
+ getting command error done"
+ "but got unknown \
+ DeviceStatus = 0x%x \n"
+ , acb->host->host_no,
+ id, lun, ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] =
+ ARECA_RAID_GONE;
+ ccb->pcmd->result =
+ DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+ found = 1;
+ }
+ }
+ if (found){
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
+ acb->outbound_int_enable;
+ writel(outbound_intstatus, &reg->outbound_intstatus);
+ /*clear interrupt*/
+ }
+ return;
+}
+
+static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ switch (state) {
+ case pci_channel_io_frozen:
+ arcmsr_pci_ers_need_reset_forepart(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ arcmsr_pci_ers_disconnect_forepart(pdev);
+ return PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+}
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index cf9a21cea6d9..49d838e90a24 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -24,7 +24,7 @@
#define CUMANASCSI_PUBLIC_RELEASE 1
-#define NCR5380_implementation_fields int port, ctrl
+#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
#define NCR5380_local_declare() struct Scsi_Host *_instance
#define NCR5380_setup(instance) _instance = instance
#define NCR5380_read(reg) cumanascsi_read(_instance, reg)
@@ -33,6 +33,11 @@
#define NCR5380_queue_command cumanascsi_queue_command
#define NCR5380_proc_info cumanascsi_proc_info
+#define NCR5380_implementation_fields \
+ unsigned ctrl; \
+ void __iomem *base; \
+ void __iomem *dma
+
#define BOARD_NORMAL 0
#define BOARD_NCR53C400 1
@@ -47,192 +52,162 @@ const char *cumanascsi_info(struct Scsi_Host *spnt)
return "";
}
-#ifdef NOT_EFFICIENT
-#define CTRL(p,v) outb(*ctrl = (v), (p) - 577)
-#define STAT(p) inb((p)+1)
-#define IN(p) inb((p))
-#define OUT(v,p) outb((v), (p))
-#else
-#define CTRL(p,v) (p[-2308] = (*ctrl = (v)))
-#define STAT(p) (p[4])
-#define IN(p) (*(p))
-#define IN2(p) ((unsigned short)(*(volatile unsigned long *)(p)))
-#define OUT(v,p) (*(p) = (v))
-#define OUT2(v,p) (*((volatile unsigned long *)(p)) = (v))
-#endif
-#define L(v) (((v)<<16)|((v) & 0x0000ffff))
-#define H(v) (((v)>>16)|((v) & 0xffff0000))
+#define CTRL 0x16fc
+#define STAT 0x2004
+#define L(v) (((v)<<16)|((v) & 0x0000ffff))
+#define H(v) (((v)>>16)|((v) & 0xffff0000))
static inline int
-NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr, int len)
+NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len)
{
- int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
- int oldctrl = *ctrl;
unsigned long *laddr;
-#ifdef NOT_EFFICIENT
- int iobase = instance->io_port;
- int dma_io = iobase & ~(0x3C0000>>2);
-#else
- volatile unsigned char *iobase = (unsigned char *)ioaddr(instance->io_port);
- volatile unsigned char *dma_io = (unsigned char *)((int)iobase & ~0x3C0000);
-#endif
+ void __iomem *dma = priv(host)->dma + 0x2000;
if(!len) return 0;
- CTRL(iobase, 0x02);
+ writeb(0x02, priv(host)->base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
- int status;
+ unsigned int status;
unsigned long v;
- status = STAT(iobase);
+ status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
continue;
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
- v=*laddr++; OUT2(L(v),dma_io); OUT2(H(v),dma_io);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
+ v=*laddr++; writew(L(v), dma); writew(H(v), dma);
len -= 32;
if(len == 0)
break;
}
addr = (unsigned char *)laddr;
- CTRL(iobase, 0x12);
+ writeb(0x12, priv(host)->base + CTRL);
+
while(len > 0)
{
- int status;
- status = STAT(iobase);
+ unsigned int status;
+ status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
- OUT(*addr++, dma_io);
+ writeb(*addr++, dma);
if(--len == 0)
break;
}
- status = STAT(iobase);
+ status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
- OUT(*addr++, dma_io);
+ writeb(*addr++, dma);
if(--len == 0)
break;
}
}
end:
- CTRL(iobase, oldctrl|0x40);
+ writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
return len;
}
static inline int
-NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr, int len)
+NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len)
{
- int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
- int oldctrl = *ctrl;
unsigned long *laddr;
-#ifdef NOT_EFFICIENT
- int iobase = instance->io_port;
- int dma_io = iobase & ~(0x3C0000>>2);
-#else
- volatile unsigned char *iobase = (unsigned char *)ioaddr(instance->io_port);
- volatile unsigned char *dma_io = (unsigned char *)((int)iobase & ~0x3C0000);
-#endif
+ void __iomem *dma = priv(host)->dma + 0x2000;
if(!len) return 0;
- CTRL(iobase, 0x00);
+ writeb(0x00, priv(host)->base + CTRL);
laddr = (unsigned long *)addr;
while(len >= 32)
{
- int status;
- status = STAT(iobase);
+ unsigned int status;
+ status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(!(status & 0x40))
continue;
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
- *laddr++ = IN2(dma_io)|(IN2(dma_io)<<16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
+ *laddr++ = readw(dma) | (readw(dma) << 16);
len -= 32;
if(len == 0)
break;
}
addr = (unsigned char *)laddr;
- CTRL(iobase, 0x10);
+ writeb(0x10, priv(host)->base + CTRL);
+
while(len > 0)
{
- int status;
- status = STAT(iobase);
+ unsigned int status;
+ status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
- *addr++ = IN(dma_io);
+ *addr++ = readb(dma);
if(--len == 0)
break;
}
- status = STAT(iobase);
+ status = readb(priv(host)->base + STAT);
if(status & 0x80)
goto end;
if(status & 0x40)
{
- *addr++ = IN(dma_io);
+ *addr++ = readb(dma);
if(--len == 0)
break;
}
}
end:
- CTRL(iobase, oldctrl|0x40);
+ writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
return len;
}
-#undef STAT
-#undef CTRL
-#undef IN
-#undef OUT
+static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+{
+ void __iomem *base = priv(host)->base;
+ unsigned char val;
-#define CTRL(p,v) outb(*ctrl = (v), (p) - 577)
+ writeb(0, base + CTRL);
-static char cumanascsi_read(struct Scsi_Host *instance, int reg)
-{
- unsigned int iobase = instance->io_port;
- int i;
- int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
+ val = readb(base + 0x2100 + (reg << 2));
- CTRL(iobase, 0);
- i = inb(iobase + 64 + reg);
- CTRL(iobase, 0x40);
+ priv(host)->ctrl = 0x40;
+ writeb(0x40, base + CTRL);
- return i;
+ return val;
}
-static void cumanascsi_write(struct Scsi_Host *instance, int reg, int value)
+static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
{
- int iobase = instance->io_port;
- int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl;
+ void __iomem *base = priv(host)->base;
- CTRL(iobase, 0);
- outb(value, iobase + 64 + reg);
- CTRL(iobase, 0x40);
-}
+ writeb(0, base + CTRL);
-#undef CTRL
+ writeb(value, base + 0x2100 + (reg << 2));
+
+ priv(host)->ctrl = 0x40;
+ writeb(0x40, base + CTRL);
+}
#include "../NCR5380.c"
@@ -256,32 +231,46 @@ static int __devinit
cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
- int ret = -ENOMEM;
+ int ret;
- host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata));
- if (!host)
+ ret = ecard_request_resources(ec);
+ if (ret)
goto out;
- host->io_port = ecard_address(ec, ECARD_IOC, ECARD_SLOW) + 0x800;
+ host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
+ priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+ ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+ priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->base || !priv(host)->dma) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
host->irq = ec->irq;
NCR5380_init(host, 0);
+ priv(host)->ctrl = 0;
+ writeb(0, priv(host)->base + CTRL);
+
host->n_io_port = 255;
if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) {
ret = -EBUSY;
- goto out_free;
+ goto out_unmap;
}
- ((struct NCR5380_hostdata *)host->hostdata)->ctrl = 0;
- outb(0x00, host->io_port - 577);
-
ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
"CumanaSCSI-1", host);
if (ret) {
printk("scsi%d: IRQ%d not free: %d\n",
host->host_no, host->irq, ret);
- goto out_release;
+ goto out_unmap;
}
printk("scsi%d: at port 0x%08lx irq %d",
@@ -301,10 +290,12 @@ cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
out_free_irq:
free_irq(host->irq, host);
- out_release:
- release_region(host->io_port, host->n_io_port);
- out_free:
+ out_unmap:
+ iounmap(priv(host)->base);
+ iounmap(priv(host)->dma);
scsi_host_put(host);
+ out_release:
+ ecard_release_resources(ec);
out:
return ret;
}
@@ -318,8 +309,10 @@ static void __devexit cumanascsi1_remove(struct expansion_card *ec)
scsi_remove_host(host);
free_irq(host->irq, host);
NCR5380_exit(host);
- release_region(host->io_port, host->n_io_port);
+ iounmap(priv(host)->base);
+ iounmap(priv(host)->dma);
scsi_host_put(host);
+ ecard_release_resources(ec);
}
static const struct ecard_id cumanascsi1_cids[] = {
diff --git a/drivers/scsi/arm/ecoscsi.c b/drivers/scsi/arm/ecoscsi.c
index 378e7af0c5d6..5265a9884338 100644
--- a/drivers/scsi/arm/ecoscsi.c
+++ b/drivers/scsi/arm/ecoscsi.c
@@ -34,35 +34,25 @@
#include "../scsi.h"
#include <scsi/scsi_host.h>
-#define NCR5380_implementation_fields int port, ctrl
-#define NCR5380_local_declare() struct Scsi_Host *_instance
-#define NCR5380_setup(instance) _instance = instance
+#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg) ecoscsi_read(_instance, reg)
-#define NCR5380_write(reg, value) ecoscsi_write(_instance, reg, value)
+#define NCR5380_local_declare() void __iomem *_base
+#define NCR5380_setup(host) _base = priv(host)->base
+
+#define NCR5380_read(reg) ({ writeb(reg | 8, _base); readb(_base + 4); })
+#define NCR5380_write(reg, value) ({ writeb(reg | 8, _base); writeb(value, _base + 4); })
#define NCR5380_intr ecoscsi_intr
#define NCR5380_queue_command ecoscsi_queue_command
#define NCR5380_proc_info ecoscsi_proc_info
+#define NCR5380_implementation_fields \
+ void __iomem *base
+
#include "../NCR5380.h"
#define ECOSCSI_PUBLIC_RELEASE 1
-static char ecoscsi_read(struct Scsi_Host *instance, int reg)
-{
- int iobase = instance->io_port;
- outb(reg | 8, iobase);
- return inb(iobase + 1);
-}
-
-static void ecoscsi_write(struct Scsi_Host *instance, int reg, int value)
-{
- int iobase = instance->io_port;
- outb(reg | 8, iobase);
- outb(value, iobase + 1);
-}
-
/*
* Function : ecoscsi_setup(char *str, int *ints)
*
@@ -82,73 +72,6 @@ const char * ecoscsi_info (struct Scsi_Host *spnt)
return "";
}
-#if 0
-#define STAT(p) inw(p + 144)
-
-static inline int NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr,
- int len)
-{
- int iobase = host->io_port;
-printk("writing %p len %d\n",addr, len);
- if(!len) return -1;
-
- while(1)
- {
- int status;
- while(((status = STAT(iobase)) & 0x100)==0);
- }
-}
-
-static inline int NCR5380_pread(struct Scsi_Host *host, unsigned char *addr,
- int len)
-{
- int iobase = host->io_port;
- int iobase2= host->io_port + 0x100;
- unsigned char *start = addr;
- int s;
-printk("reading %p len %d\n",addr, len);
- outb(inb(iobase + 128), iobase + 135);
- while(len > 0)
- {
- int status,b,i, timeout;
- timeout = 0x07FFFFFF;
- while(((status = STAT(iobase)) & 0x100)==0)
- {
- timeout--;
- if(status & 0x200 || !timeout)
- {
- printk("status = %p\n",status);
- outb(0, iobase + 135);
- return 1;
- }
- }
- if(len >= 128)
- {
- for(i=0; i<64; i++)
- {
- b = inw(iobase + 136);
- *addr++ = b;
- *addr++ = b>>8;
- }
- len -= 128;
- }
- else
- {
- b = inw(iobase + 136);
- *addr ++ = b;
- len -= 1;
- if(len)
- *addr ++ = b>>8;
- len -= 1;
- }
- }
- outb(0, iobase + 135);
- printk("first bytes = %02X %02X %02X %20X %02X %02X %02X\n",*start, start[1], start[2], start[3], start[4], start[5], start[6]);
- return 1;
-}
-#endif
-#undef STAT
-
#define BOARD_NORMAL 0
#define BOARD_NCR53C400 1
@@ -173,25 +96,36 @@ static struct Scsi_Host *host;
static int __init ecoscsi_init(void)
{
+ void __iomem *_base;
+ int ret;
- host = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
- if (!host)
- return 0;
+ if (!request_mem_region(0x33a0000, 4096, "ecoscsi")) {
+ ret = -EBUSY;
+ goto out;
+ }
- host->io_port = 0x80ce8000;
- host->n_io_port = 144;
- host->irq = IRQ_NONE;
+ _base = ioremap(0x33a0000, 4096);
+ if (!_base) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
- if (!(request_region(host->io_port, host->n_io_port, "ecoscsi")) )
- goto unregister_scsi;
+ NCR5380_write(MODE_REG, 0x20); /* Is it really SCSI? */
+ if (NCR5380_read(MODE_REG) != 0x20) /* Write to a reg. */
+ goto out_unmap;
- ecoscsi_write(host, MODE_REG, 0x20); /* Is it really SCSI? */
- if (ecoscsi_read(host, MODE_REG) != 0x20) /* Write to a reg. */
- goto release_reg;
+ NCR5380_write(MODE_REG, 0x00); /* it back. */
+ if (NCR5380_read(MODE_REG) != 0x00)
+ goto out_unmap;
- ecoscsi_write(host, MODE_REG, 0x00 ); /* it back. */
- if (ecoscsi_read(host, MODE_REG) != 0x00)
- goto release_reg;
+ host = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ priv(host)->base = _base;
+ host->irq = IRQ_NONE;
NCR5380_init(host, 0);
@@ -206,24 +140,20 @@ static int __init ecoscsi_init(void)
scsi_scan_host(host);
return 0;
-release_reg:
- release_region(host->io_port, host->n_io_port);
-unregister_scsi:
- scsi_host_put(host);
- return -ENODEV;
+ out_unmap:
+ iounmap(_base);
+ out_release:
+ release_mem_region(0x33a0000, 4096);
+ out:
+ return ret;
}
static void __exit ecoscsi_exit(void)
{
scsi_remove_host(host);
-
- if (shpnt->irq != IRQ_NONE)
- free_irq(shpnt->irq, NULL);
NCR5380_exit(host);
- if (shpnt->io_port)
- release_region(shpnt->io_port, shpnt->n_io_port);
-
scsi_host_put(host);
+ release_mem_region(0x33a0000, 4096);
return 0;
}
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index c21b8392c928..849cdf89f7bb 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -23,15 +23,18 @@
#define OAKSCSI_PUBLIC_RELEASE 1
-#define NCR5380_read(reg) oakscsi_read(_instance, reg)
-#define NCR5380_write(reg, value) oakscsi_write(_instance, reg, value)
+#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
+#define NCR5380_local_declare() void __iomem *_base
+#define NCR5380_setup(host) _base = priv(host)->base
+
+#define NCR5380_read(reg) readb(_base + ((reg) << 2))
+#define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2))
#define NCR5380_intr oakscsi_intr
#define NCR5380_queue_command oakscsi_queue_command
#define NCR5380_proc_info oakscsi_proc_info
-#define NCR5380_implementation_fields int port, ctrl
-#define NCR5380_local_declare() struct Scsi_Host *_instance
-#define NCR5380_setup(instance) _instance = instance
+#define NCR5380_implementation_fields \
+ void __iomem *base
#define BOARD_NORMAL 0
#define BOARD_NCR53C400 1
@@ -39,60 +42,62 @@
#include "../NCR5380.h"
#undef START_DMA_INITIATOR_RECEIVE_REG
-#define START_DMA_INITIATOR_RECEIVE_REG (7 + 128)
+#define START_DMA_INITIATOR_RECEIVE_REG (128 + 7)
const char * oakscsi_info (struct Scsi_Host *spnt)
{
return "";
}
-#define STAT(p) inw(p + 144)
-extern void inswb(int from, void *to, int len);
+#define STAT ((128 + 16) << 2)
+#define DATA ((128 + 8) << 2)
static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr,
int len)
{
- int iobase = instance->io_port;
+ void __iomem *base = priv(instance)->base;
+
printk("writing %p len %d\n",addr, len);
if(!len) return -1;
while(1)
{
int status;
- while(((status = STAT(iobase)) & 0x100)==0);
+ while (((status = readw(base + STAT)) & 0x100)==0);
}
}
static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr,
int len)
{
- int iobase = instance->io_port;
+ void __iomem *base = priv(instance)->base;
printk("reading %p len %d\n", addr, len);
while(len > 0)
{
- int status, timeout;
+ unsigned int status, timeout;
unsigned long b;
timeout = 0x01FFFFFF;
- while(((status = STAT(iobase)) & 0x100)==0)
+ while (((status = readw(base + STAT)) & 0x100)==0)
{
timeout--;
if(status & 0x200 || !timeout)
{
- printk("status = %08X\n",status);
+ printk("status = %08X\n", status);
return 1;
}
}
+
if(len >= 128)
{
- inswb(iobase + 136, addr, 128);
+ readsw(base + DATA, addr, 128);
addr += 128;
len -= 128;
}
else
{
- b = (unsigned long) inw(iobase + 136);
+ b = (unsigned long) readw(base + DATA);
*addr ++ = b;
len -= 1;
if(len)
@@ -103,10 +108,8 @@ printk("reading %p len %d\n", addr, len);
return 0;
}
-#define oakscsi_read(instance,reg) (inb((instance)->io_port + (reg)))
-#define oakscsi_write(instance,reg,val) (outb((val), (instance)->io_port + (reg)))
-
#undef STAT
+#undef DATA
#include "../NCR5380.c"
@@ -132,18 +135,26 @@ oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
struct Scsi_Host *host;
int ret = -ENOMEM;
- host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata));
- if (!host)
+ ret = ecard_request_resources(ec);
+ if (ret)
goto out;
- host->io_port = ecard_address(ec, ECARD_MEMC, 0);
+ host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata));
+ if (!host) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+ ecard_resource_len(ec, ECARD_RES_MEMC));
+ if (!priv(host)->base) {
+ ret = -ENOMEM;
+ goto unreg;
+ }
+
host->irq = IRQ_NONE;
host->n_io_port = 255;
- ret = -EBUSY;
- if (!request_region (host->io_port, host->n_io_port, "Oak SCSI"))
- goto unreg;
-
NCR5380_init(host, 0);
printk("scsi%d: at port 0x%08lx irqs disabled",
@@ -156,15 +167,17 @@ oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ret = scsi_add_host(host, &ec->dev);
if (ret)
- goto out_release;
+ goto out_unmap;
scsi_scan_host(host);
goto out;
- out_release:
- release_region(host->io_port, host->n_io_port);
+ out_unmap:
+ iounmap(priv(host)->base);
unreg:
scsi_host_put(host);
+ release:
+ ecard_release_resources(ec);
out:
return ret;
}
@@ -177,8 +190,9 @@ static void __devexit oakscsi_remove(struct expansion_card *ec)
scsi_remove_host(host);
NCR5380_exit(host);
- release_region(host->io_port, host->n_io_port);
+ iounmap(priv(host)->base);
scsi_host_put(host);
+ ecard_release_resources(ec);
}
static const struct ecard_id oakscsi_cids[] = {
diff --git a/drivers/scsi/bvme6000.c b/drivers/scsi/bvme6000.c
deleted file mode 100644
index 599b400a3c43..000000000000
--- a/drivers/scsi/bvme6000.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
- *
- * Based on work by Alan Hourihane
- */
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/blkdev.h>
-#include <linux/zorro.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/bvme6000hw.h>
-#include <asm/irq.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "53c7xx.h"
-#include "bvme6000.h"
-
-#include<linux/stat.h>
-
-
-int bvme6000_scsi_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- int clock;
- long long options;
-
- if (called)
- return 0;
- if (!MACH_IS_BVME6000)
- return 0;
-
- tpnt->proc_name = "BVME6000";
-
- options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
-
- clock = 40000000; /* 66MHz SCSI Clock */
-
- ncr53c7xx_init(tpnt, 0, 710, (unsigned long)BVME_NCR53C710_BASE,
- 0, BVME_IRQ_SCSI, DMA_NONE,
- options, clock);
- called = 1;
- return 1;
-}
-
-static int bvme6000_scsi_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "BVME6000 NCR53c710 SCSI",
- .detect = bvme6000_scsi_detect,
- .release = bvme6000_scsi_release,
- .queuecommand = NCR53c7xx_queue_command,
- .abort = NCR53c7xx_abort,
- .reset = NCR53c7xx_reset,
- .can_queue = 24,
- .this_id = 7,
- .sg_tablesize = 63,
- .cmd_per_lun = 3,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
diff --git a/drivers/scsi/bvme6000.h b/drivers/scsi/bvme6000.h
deleted file mode 100644
index ea3e4b2b9220..000000000000
--- a/drivers/scsi/bvme6000.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef BVME6000_SCSI_H
-#define BVME6000_SCSI_H
-
-#include <linux/types.h>
-
-int bvme6000_scsi_detect(struct scsi_host_template *);
-const char *NCR53c7x0_info(void);
-int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int NCR53c7xx_abort(Scsi_Cmnd *);
-int NCR53c7x0_release (struct Scsi_Host *);
-int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
-void NCR53c7x0_intr(int irq, void *dev_id);
-
-#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 3
-#endif
-
-#ifndef CAN_QUEUE
-#define CAN_QUEUE 24
-#endif
-
-#include <scsi/scsicam.h>
-
-#endif /* BVME6000_SCSI_H */
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
new file mode 100644
index 000000000000..cac354086737
--- /dev/null
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -0,0 +1,136 @@
+/*
+ * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
+ *
+ * Based on work by Alan Hourihane and Kars de Jong
+ *
+ * Rewritten to use 53c700.c by Richard Hirst <richard@sleepie.demon.co.uk>
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/bvme6000hw.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Richard Hirst <richard@sleepie.demon.co.uk>");
+MODULE_DESCRIPTION("BVME6000 NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template bvme6000_scsi_driver_template = {
+ .name = "BVME6000 NCR53c710 SCSI",
+ .proc_name = "BVME6000",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *bvme6000_scsi_device;
+
+static __devinit int
+bvme6000_probe(struct device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!MACH_IS_BVME6000)
+ goto out;
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "bvme6000-scsi: "
+ "Failed to allocate host data\n");
+ goto out;
+ }
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)BVME_NCR53C710_BASE;
+ hostdata->clock = 40; /* XXX - depends on the CPU clock! */
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ /* and register the chip */
+ host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, dev);
+ if (!host) {
+ printk(KERN_ERR "bvme6000-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+ host->base = BVME_NCR53C710_BASE;
+ host->this_id = 7;
+ host->irq = BVME_IRQ_SCSI;
+ if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi",
+ host)) {
+ printk(KERN_ERR "bvme6000-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ dev_set_drvdata(dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static __devexit int
+bvme6000_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+static struct device_driver bvme6000_scsi_driver = {
+ .name = "bvme6000-scsi",
+ .bus = &platform_bus_type,
+ .probe = bvme6000_probe,
+ .remove = __devexit_p(bvme6000_device_remove),
+};
+
+static int __init bvme6000_scsi_init(void)
+{
+ int err;
+
+ err = driver_register(&bvme6000_scsi_driver);
+ if (err)
+ return err;
+
+ bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(bvme6000_scsi_device)) {
+ driver_unregister(&bvme6000_scsi_driver);
+ return PTR_ERR(bvme6000_scsi_device);
+ }
+
+ return 0;
+}
+
+static void __exit bvme6000_scsi_exit(void)
+{
+ platform_device_unregister(bvme6000_scsi_device);
+ driver_unregister(&bvme6000_scsi_driver);
+}
+
+module_init(bvme6000_scsi_init);
+module_exit(bvme6000_scsi_exit);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 564ea90ed3a0..7b8a3457b696 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -979,6 +979,7 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
+ int nseg;
enum dma_data_direction dir = cmd->sc_data_direction;
dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n",
cmd->pid, dcb->target_id, dcb->target_lun);
@@ -1000,27 +1001,30 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
srb->scsi_phase = PH_BUS_FREE; /* initial phase */
srb->end_message = 0;
- if (dir == PCI_DMA_NONE || !cmd->request_buffer) {
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+
+ if (dir == PCI_DMA_NONE || !nseg) {
dprintkdbg(DBG_0,
"build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
- cmd->bufflen, cmd->request_buffer,
- cmd->use_sg, srb->segment_x[0].address);
- } else if (cmd->use_sg) {
+ cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
+ srb->segment_x[0].address);
+ } else {
int i;
- u32 reqlen = cmd->request_bufflen;
- struct scatterlist *sl = (struct scatterlist *)
- cmd->request_buffer;
+ u32 reqlen = scsi_bufflen(cmd);
+ struct scatterlist *sg;
struct SGentry *sgp = srb->segment_x;
- srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg,
- dir);
+
+ srb->sg_count = nseg;
+
dprintkdbg(DBG_0,
- "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
- reqlen, cmd->request_buffer, cmd->use_sg,
- srb->sg_count);
+ "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
+ reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
+ srb->sg_count);
- for (i = 0; i < srb->sg_count; i++) {
- u32 busaddr = (u32)sg_dma_address(&sl[i]);
- u32 seglen = (u32)sl[i].length;
+ scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
+ u32 busaddr = (u32)sg_dma_address(sg);
+ u32 seglen = (u32)sg->length;
sgp[i].address = busaddr;
sgp[i].length = seglen;
srb->total_xfer_length += seglen;
@@ -1050,23 +1054,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
- } else {
- srb->total_xfer_length = cmd->request_bufflen;
- srb->sg_count = 1;
- srb->segment_x[0].address =
- pci_map_single(dcb->acb->dev, cmd->request_buffer,
- srb->total_xfer_length, dir);
-
- /* Fixup for WIDE padding - make sure length is even */
- if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
- srb->total_xfer_length++;
-
- srb->segment_x[0].length = srb->total_xfer_length;
-
- dprintkdbg(DBG_0,
- "build_srb: [1] len=%d buf=%p use_sg=%d map=%08x\n",
- srb->total_xfer_length, cmd->request_buffer,
- cmd->use_sg, srb->segment_x[0].address);
}
srb->request_length = srb->total_xfer_length;
@@ -2128,7 +2115,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
/*clear_fifo(acb, "DOP1"); */
/* KG: What is this supposed to be useful for? WIDE padding stuff? */
if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
- && srb->cmd->request_bufflen % 2) {
+ && scsi_bufflen(srb->cmd) % 2) {
d_left_counter = 0;
dprintkl(KERN_INFO,
"data_out_phase0: Discard 1 byte (0x%02x)\n",
@@ -2159,7 +2146,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
sg_update_list(srb, d_left_counter);
/* KG: Most ugly hack! Apparently, this works around a chip bug */
if ((srb->segment_x[srb->sg_index].length ==
- diff && srb->cmd->use_sg)
+ diff && scsi_sg_count(srb->cmd))
|| ((oldxferred & ~PAGE_MASK) ==
(PAGE_SIZE - diff))
) {
@@ -2289,19 +2276,15 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
unsigned char *virt, *base = NULL;
unsigned long flags = 0;
size_t len = left_io;
+ size_t offset = srb->request_length - left_io;
+
+ local_irq_save(flags);
+ /* Assumption: it's inside one page as it's at most 4 bytes and
+ I just assume it's on a 4-byte boundary */
+ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
+ srb->sg_count, &offset, &len);
+ virt = base + offset;
- if (srb->cmd->use_sg) {
- size_t offset = srb->request_length - left_io;
- local_irq_save(flags);
- /* Assumption: it's inside one page as it's at most 4 bytes and
- I just assume it's on a 4-byte boundary */
- base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
- srb->sg_count, &offset, &len);
- virt = base + offset;
- } else {
- virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
- len = left_io;
- }
left_io -= len;
while (len) {
@@ -2341,10 +2324,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
}
- if (srb->cmd->use_sg) {
- scsi_kunmap_atomic_sg(base);
- local_irq_restore(flags);
- }
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
}
/*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
/*srb->total_xfer_length = 0; */
@@ -2455,7 +2436,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
*/
srb->state |= SRB_DATA_XFER;
DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
- if (srb->cmd->use_sg) { /* with S/G */
+ if (scsi_sg_count(srb->cmd)) { /* with S/G */
io_dir |= DMACMD_SG;
DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
srb->sg_bus_addr +
@@ -2513,18 +2494,14 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
unsigned char *virt, *base = NULL;
unsigned long flags = 0;
size_t len = left_io;
+ size_t offset = srb->request_length - left_io;
+
+ local_irq_save(flags);
+ /* Again, max 4 bytes */
+ base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
+ srb->sg_count, &offset, &len);
+ virt = base + offset;
- if (srb->cmd->use_sg) {
- size_t offset = srb->request_length - left_io;
- local_irq_save(flags);
- /* Again, max 4 bytes */
- base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
- srb->sg_count, &offset, &len);
- virt = base + offset;
- } else {
- virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
- len = left_io;
- }
left_io -= len;
while (len--) {
@@ -2536,10 +2513,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
sg_subtract_one(srb);
}
- if (srb->cmd->use_sg) {
- scsi_kunmap_atomic_sg(base);
- local_irq_restore(flags);
- }
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
}
if (srb->dcb->sync_period & WIDE_SYNC) {
if (ln % 2) {
@@ -3295,7 +3270,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
- if (cmd->use_sg && dir != PCI_DMA_NONE) {
+
+ if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3303,16 +3279,9 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
SEGMENTX_LEN,
PCI_DMA_TODEVICE);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
- cmd->use_sg, cmd->request_buffer);
+ scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
- pci_unmap_sg(acb->dev,
- (struct scatterlist *)cmd->request_buffer,
- cmd->use_sg, dir);
- } else if (cmd->request_buffer && dir != PCI_DMA_NONE) {
- dprintkdbg(DBG_SG, "pci_unmap_srb: buffer=%08x(%05x)\n",
- srb->segment_x[0].address, cmd->request_bufflen);
- pci_unmap_single(acb->dev, srb->segment_x[0].address,
- cmd->request_bufflen, dir);
+ scsi_dma_unmap(cmd);
}
}
@@ -3352,8 +3321,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid,
srb->cmd->device->id, srb->cmd->device->lun);
dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
- srb, cmd->use_sg, srb->sg_index, srb->sg_count,
- cmd->request_buffer);
+ srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
+ scsi_sgtalbe(cmd));
status = srb->target_status;
if (srb->flag & AUTO_REQSENSE) {
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
@@ -3482,16 +3451,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (dir != PCI_DMA_NONE) {
- if (cmd->use_sg)
- pci_dma_sync_sg_for_cpu(acb->dev,
- (struct scatterlist *)cmd->
- request_buffer, cmd->use_sg, dir);
- else if (cmd->request_buffer)
- pci_dma_sync_single_for_cpu(acb->dev,
- srb->segment_x[0].address,
- cmd->request_bufflen, dir);
- }
+ if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
+ pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), dir);
+
ckc_only = 0;
/* Check Error Conditions */
ckc_e:
@@ -3500,19 +3463,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
unsigned char *base = NULL;
struct ScsiInqData *ptr;
unsigned long flags = 0;
+ struct scatterlist* sg = scsi_sglist(cmd);
+ size_t offset = 0, len = sizeof(struct ScsiInqData);
- if (cmd->use_sg) {
- struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
- size_t offset = 0, len = sizeof(struct ScsiInqData);
-
- local_irq_save(flags);
- base = scsi_kmap_atomic_sg(sg, cmd->use_sg, &offset, &len);
- ptr = (struct ScsiInqData *)(base + offset);
- } else
- ptr = (struct ScsiInqData *)(cmd->request_buffer);
+ local_irq_save(flags);
+ base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
+ ptr = (struct ScsiInqData *)(base + offset);
if (!ckc_only && (cmd->result & RES_DID) == 0
- && cmd->cmnd[2] == 0 && cmd->request_bufflen >= 8
+ && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
&& dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
@@ -3527,14 +3486,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (cmd->use_sg) {
- scsi_kunmap_atomic_sg(base);
- local_irq_restore(flags);
- }
+ scsi_kunmap_atomic_sg(base);
+ local_irq_restore(flags);
}
/* Here is the info for Doug Gilbert's sg3 ... */
- cmd->resid = srb->total_xfer_length;
+ scsi_set_resid(cmd, srb->total_xfer_length);
/* This may be interpreted by sb. or not ... */
cmd->SCp.this_residual = srb->total_xfer_length;
cmd->SCp.buffers_residual = 0;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 8c7d2bbf9b1a..2e2362d787ca 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2078,12 +2078,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
u32 *lenptr;
int direction;
int scsidir;
+ int nseg;
u32 len;
u32 reqlen;
s32 rcode;
memset(msg, 0 , sizeof(msg));
- len = cmd->request_bufflen;
+ len = scsi_bufflen(cmd);
direction = 0x00000000;
scsidir = 0x00000000; // DATA NO XFER
@@ -2140,21 +2141,21 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
lenptr=mptr++; /* Remember me - fill in when we know */
reqlen = 14; // SINGLE SGE
/* Now fill in the SGList and command */
- if(cmd->use_sg) {
- struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
- int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
- cmd->sc_data_direction);
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
len = 0;
- for(i = 0 ; i < sg_count; i++) {
+ scsi_for_each_sg(cmd, sg, nseg, i) {
*mptr++ = direction|0x10000000|sg_dma_len(sg);
len+=sg_dma_len(sg);
*mptr++ = sg_dma_address(sg);
- sg++;
+ /* Make this an end of list */
+ if (i == nseg - 1)
+ mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
}
- /* Make this an end of list */
- mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
reqlen = mptr - msg;
*lenptr = len;
@@ -2163,16 +2164,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
len, cmd->underflow);
}
} else {
- *lenptr = len = cmd->request_bufflen;
- if(len == 0) {
- reqlen = 12;
- } else {
- *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
- *mptr++ = pci_map_single(pHba->pDev,
- cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ *lenptr = len = 0;
+ reqlen = 12;
}
/* Stick the headers on */
@@ -2232,7 +2225,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
hba_status = detailed_status >> 8;
// calculate resid for sg
- cmd->resid = cmd->request_bufflen - readl(reply+5);
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
pHba = (adpt_hba*) cmd->device->host->hostdata[0];
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 2d38025861a5..a83e9f150b97 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1609,8 +1609,9 @@ static int eata2x_detect(struct scsi_host_template *tpnt)
static void map_dma(unsigned int i, struct hostdata *ha)
{
- unsigned int k, count, pci_dir;
- struct scatterlist *sgpnt;
+ unsigned int k, pci_dir;
+ int count;
+ struct scatterlist *sg;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
@@ -1625,38 +1626,19 @@ static void map_dma(unsigned int i, struct hostdata *ha)
cpp->sense_len = sizeof SCpnt->sense_buffer;
- if (!SCpnt->use_sg) {
-
- /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */
- if (!SCpnt->request_bufflen)
- pci_dir = PCI_DMA_BIDIRECTIONAL;
-
- if (SCpnt->request_buffer)
- cpp->data_address = H2DEV(pci_map_single(ha->pdev,
- SCpnt->
- request_buffer,
- SCpnt->
- request_bufflen,
- pci_dir));
-
- cpp->data_len = H2DEV(SCpnt->request_bufflen);
- return;
- }
-
- sgpnt = (struct scatterlist *)SCpnt->request_buffer;
- count = pci_map_sg(ha->pdev, sgpnt, SCpnt->use_sg, pci_dir);
-
- for (k = 0; k < count; k++) {
- cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k]));
- cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k]));
+ count = scsi_dma_map(SCpnt);
+ BUG_ON(count < 0);
+ scsi_for_each_sg(SCpnt, sg, count, k) {
+ cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+ cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
}
cpp->sg = 1;
cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist,
- SCpnt->use_sg *
+ scsi_sg_count(SCpnt) *
sizeof(struct sg_list),
pci_dir));
- cpp->data_len = H2DEV((SCpnt->use_sg * sizeof(struct sg_list)));
+ cpp->data_len = H2DEV((scsi_sg_count(SCpnt) * sizeof(struct sg_list)));
}
static void unmap_dma(unsigned int i, struct hostdata *ha)
@@ -1673,9 +1655,7 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_unmap_sg(ha->pdev, SCpnt->request_buffer, SCpnt->use_sg,
- pci_dir);
+ scsi_dma_unmap(SCpnt);
if (!DEV2H(cpp->data_len))
pci_dir = PCI_DMA_BIDIRECTIONAL;
@@ -1700,9 +1680,9 @@ static void sync_dma(unsigned int i, struct hostdata *ha)
DEV2H(cpp->sense_len),
PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_dma_sync_sg_for_cpu(ha->pdev, SCpnt->request_buffer,
- SCpnt->use_sg, pci_dir);
+ if (scsi_sg_count(SCpnt))
+ pci_dma_sync_sg_for_cpu(ha->pdev, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt), pci_dir);
if (!DEV2H(cpp->data_len))
pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 71caf2ded6ba..77b06a983fa7 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -324,17 +324,14 @@ static void esp_reset_esp(struct esp *esp)
static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
- struct scatterlist *sg = cmd->request_buffer;
+ struct scatterlist *sg = scsi_sglist(cmd);
int dir = cmd->sc_data_direction;
int total, i;
if (dir == DMA_NONE)
return;
- BUG_ON(cmd->use_sg == 0);
-
- spriv->u.num_sg = esp->ops->map_sg(esp, sg,
- cmd->use_sg, dir);
+ spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
@@ -407,8 +404,7 @@ static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
if (dir == DMA_NONE)
return;
- esp->ops->unmap_sg(esp, cmd->request_buffer,
- spriv->u.num_sg, dir);
+ esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -921,7 +917,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct scsi_device *dev = cmd->device;
- struct esp *esp = host_to_esp(dev->host);
+ struct esp *esp = shost_priv(dev->host);
struct esp_cmd_priv *spriv;
struct esp_cmd_entry *ent;
@@ -2357,7 +2353,7 @@ EXPORT_SYMBOL(scsi_esp_unregister);
static int esp_slave_alloc(struct scsi_device *dev)
{
- struct esp *esp = host_to_esp(dev->host);
+ struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
struct esp_lun_data *lp;
@@ -2381,7 +2377,7 @@ static int esp_slave_alloc(struct scsi_device *dev)
static int esp_slave_configure(struct scsi_device *dev)
{
- struct esp *esp = host_to_esp(dev->host);
+ struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
@@ -2423,7 +2419,7 @@ static void esp_slave_destroy(struct scsi_device *dev)
static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
{
- struct esp *esp = host_to_esp(cmd->device->host);
+ struct esp *esp = shost_priv(cmd->device->host);
struct esp_cmd_entry *ent, *tmp;
struct completion eh_done;
unsigned long flags;
@@ -2539,7 +2535,7 @@ out_failure:
static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
- struct esp *esp = host_to_esp(cmd->device->host);
+ struct esp *esp = shost_priv(cmd->device->host);
struct completion eh_reset;
unsigned long flags;
@@ -2575,7 +2571,7 @@ static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
/* All bets are off, reset the entire device. */
static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- struct esp *esp = host_to_esp(cmd->device->host);
+ struct esp *esp = shost_priv(cmd->device->host);
unsigned long flags;
spin_lock_irqsave(esp->host->host_lock, flags);
@@ -2615,7 +2611,7 @@ EXPORT_SYMBOL(scsi_esp_template);
static void esp_get_signalling(struct Scsi_Host *host)
{
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
enum spi_signal_type type;
if (esp->flags & ESP_FLAG_DIFFERENTIAL)
@@ -2629,7 +2625,7 @@ static void esp_get_signalling(struct Scsi_Host *host)
static void esp_set_offset(struct scsi_target *target, int offset)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_offset = offset;
@@ -2639,7 +2635,7 @@ static void esp_set_offset(struct scsi_target *target, int offset)
static void esp_set_period(struct scsi_target *target, int period)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_period = period;
@@ -2649,7 +2645,7 @@ static void esp_set_period(struct scsi_target *target, int period)
static void esp_set_width(struct scsi_target *target, int width)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
- struct esp *esp = host_to_esp(host);
+ struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_width = (width ? 1 : 0);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8d4a6690401f..856e38b14861 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -220,7 +220,7 @@
#define ESP_BUSID_RESELID 0x10
#define ESP_BUSID_CTR32BIT 0x40
-#define ESP_BUS_TIMEOUT 250 /* In milli-seconds */
+#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
#define ESP_TIMEO_CONST 8192
#define ESP_NEG_DEFP(mhz, cfact) \
((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
@@ -517,8 +517,6 @@ struct esp {
struct sbus_dma *dma;
};
-#define host_to_esp(host) ((struct esp *)(host)->hostdata)
-
/* A front-end driver for the ESP chip should do the following in
* it's device probe routine:
* 1) Allocate the host and private area using scsi_host_alloc()
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 5d4ea6f77953..36169d597e98 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -410,6 +410,8 @@ static irqreturn_t do_fdomain_16x0_intr( int irq, void *dev_id );
static char * fdomain = NULL;
module_param(fdomain, charp, 0);
+#ifndef PCMCIA
+
static unsigned long addresses[] = {
0xc8000,
0xca000,
@@ -426,6 +428,8 @@ static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
+#endif /* !PCMCIA */
+
/*
READ THIS BEFORE YOU ADD A SIGNATURE!
@@ -458,6 +462,8 @@ static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
*/
+#ifndef PCMCIA
+
static struct signature {
const char *signature;
int sig_offset;
@@ -503,6 +509,8 @@ static struct signature {
#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
+#endif /* !PCMCIA */
+
static void print_banner( struct Scsi_Host *shpnt )
{
if (!shpnt) return; /* This won't ever happen */
@@ -633,6 +641,8 @@ static int fdomain_test_loopback( void )
return 0;
}
+#ifndef PCMCIA
+
/* fdomain_get_irq assumes that we have a valid MCA ID for a
TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
bios_base matches these ports. If someone was unlucky enough to have
@@ -667,7 +677,6 @@ static int fdomain_get_irq( int base )
static int fdomain_isa_detect( int *irq, int *iobase )
{
-#ifndef PCMCIA
int i, j;
int base = 0xdeadbeef;
int flag = 0;
@@ -786,11 +795,22 @@ found:
*iobase = base;
return 1; /* success */
-#else
- return 0;
-#endif
}
+#else /* PCMCIA */
+
+static int fdomain_isa_detect( int *irq, int *iobase )
+{
+ if (irq)
+ *irq = 0;
+ if (iobase)
+ *iobase = 0;
+ return 0;
+}
+
+#endif /* !PCMCIA */
+
+
/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
iobase) This function gets the Interrupt Level and I/O base address from
the PCI configuration registers. */
@@ -1345,16 +1365,15 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
#if ERRORS_ONLY
if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
- if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) {
+ char *buf = scsi_sglist(current_SC);
+ if ((unsigned char)(*(buf + 2)) & 0x0f) {
unsigned char key;
unsigned char code;
unsigned char qualifier;
- key = (unsigned char)(*((char *)current_SC->request_buffer + 2))
- & 0x0f;
- code = (unsigned char)(*((char *)current_SC->request_buffer + 12));
- qualifier = (unsigned char)(*((char *)current_SC->request_buffer
- + 13));
+ key = (unsigned char)(*(buf + 2)) & 0x0f;
+ code = (unsigned char)(*(buf + 12));
+ qualifier = (unsigned char)(*(buf + 13));
if (key != UNIT_ATTENTION
&& !(key == NOT_READY
@@ -1405,8 +1424,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
SCpnt->target,
*(unsigned char *)SCpnt->cmnd,
- SCpnt->use_sg,
- SCpnt->request_bufflen );
+ scsi_sg_count(SCpnt),
+ scsi_bufflen(SCpnt));
#endif
fdomain_make_bus_idle();
@@ -1416,20 +1435,19 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
/* Initialize static data */
- if (current_SC->use_sg) {
- current_SC->SCp.buffer =
- (struct scatterlist *)current_SC->request_buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
- current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
- current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
+ if (scsi_sg_count(current_SC)) {
+ current_SC->SCp.buffer = scsi_sglist(current_SC);
+ current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
+ + current_SC->SCp.buffer->offset;
+ current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
+ current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
- current_SC->SCp.ptr = (char *)current_SC->request_buffer;
- current_SC->SCp.this_residual = current_SC->request_bufflen;
- current_SC->SCp.buffer = NULL;
- current_SC->SCp.buffers_residual = 0;
+ current_SC->SCp.ptr = 0;
+ current_SC->SCp.this_residual = 0;
+ current_SC->SCp.buffer = NULL;
+ current_SC->SCp.buffers_residual = 0;
}
-
-
+
current_SC->SCp.Status = 0;
current_SC->SCp.Message = 0;
current_SC->SCp.have_data_in = 0;
@@ -1472,8 +1490,8 @@ static void print_info(struct scsi_cmnd *SCpnt)
SCpnt->SCp.phase,
SCpnt->device->id,
*(unsigned char *)SCpnt->cmnd,
- SCpnt->use_sg,
- SCpnt->request_bufflen );
+ scsi_sg_count(SCpnt),
+ scsi_bufflen(SCpnt));
printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
SCpnt->SCp.sent_command,
SCpnt->SCp.have_data_in,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 60446b88f721..d0b95ce0ba00 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -876,7 +876,7 @@ static int __init gdth_search_pci(gdth_pci_str *pcistr)
/* Vortex only makes RAID controllers.
* We do not really want to specify all 550 ids here, so wildcard match.
*/
-static struct pci_device_id gdthtable[] __attribute_used__ = {
+static struct pci_device_id gdthtable[] __maybe_unused = {
{PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID},
{PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID},
@@ -1955,7 +1955,7 @@ static int __init gdth_search_drives(int hanum)
for (j = 0; j < 12; ++j)
rtc[j] = CMOS_READ(j);
} while (rtc[0] != CMOS_READ(0));
- spin_lock_irqrestore(&rtc_lock, flags);
+ spin_unlock_irqrestore(&rtc_lock, flags);
TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
*(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
/* 3. send to controller firmware */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index bec83cbee59a..0e579ca45814 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -339,20 +339,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
scp = hba->reqs[tag].scp;
- if (HPT_SCP(scp)->mapped) {
- if (scp->use_sg)
- pci_unmap_sg(hba->pcidev,
- (struct scatterlist *)scp->request_buffer,
- scp->use_sg,
- scp->sc_data_direction
- );
- else
- pci_unmap_single(hba->pcidev,
- HPT_SCP(scp)->dma_handle,
- scp->request_bufflen,
- scp->sc_data_direction
- );
- }
+ if (HPT_SCP(scp)->mapped)
+ scsi_dma_unmap(scp);
switch (le32_to_cpu(req->header.result)) {
case IOP_RESULT_SUCCESS:
@@ -448,43 +436,26 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
{
struct Scsi_Host *host = scp->device->host;
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
- struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer;
-
- /*
- * though we'll not get non-use_sg fields anymore,
- * keep use_sg checking anyway
- */
- if (scp->use_sg) {
- int idx;
-
- HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
- sglist, scp->use_sg,
- scp->sc_data_direction);
- HPT_SCP(scp)->mapped = 1;
- BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
-
- for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
- psg[idx].pci_address =
- cpu_to_le64(sg_dma_address(&sglist[idx]));
- psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
- psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
- cpu_to_le32(1) : 0;
- }
+ struct scatterlist *sg;
+ int idx, nseg;
+
+ nseg = scsi_dma_map(scp);
+ BUG_ON(nseg < 0);
+ if (!nseg)
+ return 0;
- return HPT_SCP(scp)->sgcnt;
- } else {
- HPT_SCP(scp)->dma_handle = pci_map_single(
- hba->pcidev,
- scp->request_buffer,
- scp->request_bufflen,
- scp->sc_data_direction
- );
- HPT_SCP(scp)->mapped = 1;
- psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle);
- psg->size = cpu_to_le32(scp->request_bufflen);
- psg->eot = cpu_to_le32(1);
- return 1;
+ HPT_SCP(scp)->sgcnt = nseg;
+ HPT_SCP(scp)->mapped = 1;
+
+ BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
+
+ scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
+ psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
+ psg[idx].size = cpu_to_le32(sg_dma_len(sg));
+ psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
+ cpu_to_le32(1) : 0;
}
+ return HPT_SCP(scp)->sgcnt;
}
static int hptiop_queuecommand(struct scsi_cmnd *scp,
@@ -529,9 +500,8 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
/* build S/G table */
- if (scp->request_bufflen)
- sg_count = hptiop_buildsgl(scp, req->sg_list);
- else
+ sg_count = hptiop_buildsgl(scp, req->sg_list);
+ if (!sg_count)
HPT_SCP(scp)->mapped = 0;
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
@@ -540,7 +510,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
(u32)_req->index);
req->header.context_hi32 = 0;
- req->dataxfer_length = cpu_to_le32(scp->request_bufflen);
+ req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
req->channel = scp->device->channel;
req->target = scp->device->id;
req->lun = scp->device->lun;
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 0e57fb6964d5..4275d1b04ced 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -31,14 +31,21 @@
#include <linux/mca.h>
#include <linux/spinlock.h>
#include <linux/init.h>
-#include <linux/mca-legacy.h>
#include <asm/system.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
-#include "ibmmca.h"
+
+/* Common forward declarations for all Linux-versions: */
+static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
+static int ibmmca_abort (Scsi_Cmnd *);
+static int ibmmca_host_reset (Scsi_Cmnd *);
+static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
+static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
+
+
/* current version of this driver-source: */
#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
@@ -65,11 +72,11 @@
#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
/* relative addresses of hardware registers on a subsystem */
-#define IM_CMD_REG(hi) (hosts[(hi)]->io_port) /*Command Interface, (4 bytes long) */
-#define IM_ATTN_REG(hi) (hosts[(hi)]->io_port+4) /*Attention (1 byte) */
-#define IM_CTR_REG(hi) (hosts[(hi)]->io_port+5) /*Basic Control (1 byte) */
-#define IM_INTR_REG(hi) (hosts[(hi)]->io_port+6) /*Interrupt Status (1 byte, r/o) */
-#define IM_STAT_REG(hi) (hosts[(hi)]->io_port+7) /*Basic Status (1 byte, read only) */
+#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
+#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
+#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
+#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
+#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
/* basic I/O-port of first adapter */
#define IM_IO_PORT 0x3540
@@ -266,30 +273,36 @@ static int global_adapter_speed = 0; /* full speed by default */
if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
-/*list of supported subsystems */
-struct subsys_list_struct {
- unsigned short mca_id;
- char *description;
-};
-
/* types of different supported hardware that goes to hostdata special */
#define IBM_SCSI2_FW 0
#define IBM_7568_WCACHE 1
#define IBM_EXP_UNIT 2
#define IBM_SCSI_WCACHE 3
#define IBM_SCSI 4
+#define IBM_INTEGSCSI 5
/* other special flags for hostdata structure */
#define FORCED_DETECTION 100
#define INTEGRATED_SCSI 101
/* List of possible IBM-SCSI-adapters */
-static struct subsys_list_struct subsys_list[] = {
- {0x8efc, "IBM SCSI-2 F/W Adapter"}, /* special = 0 */
- {0x8efd, "IBM 7568 Industrial Computer SCSI Adapter w/Cache"}, /* special = 1 */
- {0x8ef8, "IBM Expansion Unit SCSI Controller"}, /* special = 2 */
- {0x8eff, "IBM SCSI Adapter w/Cache"}, /* special = 3 */
- {0x8efe, "IBM SCSI Adapter"}, /* special = 4 */
+static short ibmmca_id_table[] = {
+ 0x8efc,
+ 0x8efd,
+ 0x8ef8,
+ 0x8eff,
+ 0x8efe,
+ /* No entry for integrated SCSI, that's part of the register */
+ 0
+};
+
+static const char *ibmmca_description[] = {
+ "IBM SCSI-2 F/W Adapter", /* special = 0 */
+ "IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
+ "IBM Expansion Unit SCSI Controller", /* special = 2 */
+ "IBM SCSI Adapter w/Cache", /* special = 3 */
+ "IBM SCSI Adapter", /* special = 4 */
+ "IBM Integrated SCSI Controller", /* special = 5 */
};
/* Max number of logical devices (can be up from 0 to 14). 15 is the address
@@ -375,30 +388,30 @@ struct ibmmca_hostdata {
};
/* macros to access host data structure */
-#define subsystem_pun(hi) (hosts[(hi)]->this_id)
-#define subsystem_maxid(hi) (hosts[(hi)]->max_id)
-#define ld(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_ld)
-#define get_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_ldn)
-#define get_scsi(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_scsi)
-#define local_checking_phase_flag(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_local_checking_phase_flag)
-#define got_interrupt(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_got_interrupt)
-#define stat_result(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_stat_result)
-#define reset_status(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_reset_status)
-#define last_scsi_command(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_command)
-#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type)
-#define last_scsi_blockcount(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_blockcount)
-#define last_scsi_logical_block(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_logical_block)
-#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type)
-#define next_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_next_ldn)
-#define IBM_DS(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_IBM_DS)
-#define special(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_special)
-#define subsystem_connector_size(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_connector_size)
-#define adapter_speed(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_adapter_speed)
-#define pos2(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[2])
-#define pos3(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[3])
-#define pos4(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[4])
-#define pos5(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[5])
-#define pos6(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[6])
+#define subsystem_pun(h) ((h)->this_id)
+#define subsystem_maxid(h) ((h)->max_id)
+#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
+#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
+#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
+#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
+#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
+#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
+#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
+#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
+#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
+#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
+#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
+#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
+#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
+#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
+#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
+#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
+#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
+#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
+#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
+#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
+#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
+#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
/* Define a arbitrary number as subsystem-marker-type. This number is, as
described in the ANSI-SCSI-standard, not occupied by other device-types. */
@@ -459,11 +472,6 @@ MODULE_LICENSE("GPL");
/*counter of concurrent disk read/writes, to turn on/off disk led */
static int disk_rw_in_progress = 0;
-/* host information */
-static int found = 0;
-static struct Scsi_Host *hosts[IM_MAX_HOSTS + 1] = {
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
-};
static unsigned int pos[8]; /* whole pos register-line for diagnosis */
/* Taking into account the additions, made by ZP Gu.
* This selects now the preset value from the configfile and
@@ -474,70 +482,68 @@ static char ibm_ansi_order = 1;
static char ibm_ansi_order = 0;
#endif
-static void issue_cmd(int, unsigned long, unsigned char);
+static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
static void internal_done(Scsi_Cmnd * cmd);
-static void check_devices(int, int);
-static int immediate_assign(int, unsigned int, unsigned int, unsigned int, unsigned int);
-static int immediate_feature(int, unsigned int, unsigned int);
+static void check_devices(struct Scsi_Host *, int);
+static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
+static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
-static int immediate_reset(int, unsigned int);
+static int immediate_reset(struct Scsi_Host *, unsigned int);
#endif
-static int device_inquiry(int, int);
-static int read_capacity(int, int);
-static int get_pos_info(int);
+static int device_inquiry(struct Scsi_Host *, int);
+static int read_capacity(struct Scsi_Host *, int);
+static int get_pos_info(struct Scsi_Host *);
static char *ti_p(int);
static char *ti_l(int);
static char *ibmrate(unsigned int, int);
static int probe_display(int);
-static int probe_bus_mode(int);
-static int device_exists(int, int, int *, int *);
-static struct Scsi_Host *ibmmca_register(struct scsi_host_template *, int, int, int, char *);
+static int probe_bus_mode(struct Scsi_Host *);
+static int device_exists(struct Scsi_Host *, int, int *, int *);
static int option_setup(char *);
/* local functions needed for proc_info */
-static int ldn_access_load(int, int);
-static int ldn_access_total_read_write(int);
+static int ldn_access_load(struct Scsi_Host *, int);
+static int ldn_access_total_read_write(struct Scsi_Host *);
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
- int host_index, ihost_index;
unsigned int intr_reg;
unsigned int cmd_result;
unsigned int ldn;
+ unsigned long flags;
Scsi_Cmnd *cmd;
int lastSCSI;
- struct Scsi_Host *dev = dev_id;
+ struct device *dev = dev_id;
+ struct Scsi_Host *shpnt = dev_get_drvdata(dev);
- spin_lock(dev->host_lock);
- /* search for one adapter-response on shared interrupt */
- for (host_index = 0; hosts[host_index] && !(inb(IM_STAT_REG(host_index)) & IM_INTR_REQUEST); host_index++);
- /* return if some other device on this IRQ caused the interrupt */
- if (!hosts[host_index]) {
- spin_unlock(dev->host_lock);
+ spin_lock_irqsave(shpnt->host_lock, flags);
+
+ if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_NONE;
}
/* the reset-function already did all the job, even ints got
renabled on the subsystem, so just return */
- if ((reset_status(host_index) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(host_index) == IM_RESET_FINISHED_OK_NO_INT)) {
- reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS;
- spin_unlock(dev->host_lock);
+ if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
+ reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/*must wait for attention reg not busy, then send EOI to subsystem */
while (1) {
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
cpu_relax();
}
- ihost_index = host_index;
+
/*get command result and logical device */
- intr_reg = (unsigned char) (inb(IM_INTR_REG(ihost_index)));
+ intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
cmd_result = intr_reg & 0xf0;
ldn = intr_reg & 0x0f;
/* get the last_scsi_command here */
- lastSCSI = last_scsi_command(ihost_index)[ldn];
- outb(IM_EOI | ldn, IM_ATTN_REG(ihost_index));
+ lastSCSI = last_scsi_command(shpnt)[ldn];
+ outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
/*these should never happen (hw fails, or a local programming bug) */
if (!global_command_error_excuse) {
@@ -547,38 +553,38 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
case IM_SOFTWARE_SEQUENCING_ERROR:
case IM_CMD_ERROR:
printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
- printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(ihost_index)[ldn].scb.enable);
- if (ld(ihost_index)[ldn].cmd)
- printk("%ld/%ld,", (long) (ld(ihost_index)[ldn].cmd->request_bufflen), (long) (ld(ihost_index)[ldn].scb.sys_buf_length));
+ printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
+ if (ld(shpnt)[ldn].cmd)
+ printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
else
printk("none,");
- if (ld(ihost_index)[ldn].cmd)
- printk("Blocksize=%d", ld(ihost_index)[ldn].scb.u2.blk.length);
+ if (ld(shpnt)[ldn].cmd)
+ printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
else
printk("Blocksize=none");
- printk(", host=0x%x, ldn=0x%x\n", ihost_index, ldn);
- if (ld(ihost_index)[ldn].cmd) {
- printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u2.blk.count);
- printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u1.log_blk_adr);
+ printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
+ if (ld(shpnt)[ldn].cmd) {
+ printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
+ printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
}
printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
/* if errors appear, enter this section to give detailed info */
printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
- printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(ihost_index)[ldn]);
- printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(ihost_index)));
- printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(ihost_index)));
+ printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
+ printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
+ printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
- printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(ihost_index)));
- if ((last_scsi_type(ihost_index)[ldn] == IM_SCB) || (last_scsi_type(ihost_index)[ldn] == IM_LONG_SCB)) {
- printk(KERN_ERR " SCB-Command.................: %x\n", ld(ihost_index)[ldn].scb.command);
- printk(KERN_ERR " SCB-Enable..................: %x\n", ld(ihost_index)[ldn].scb.enable);
- printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(ihost_index)[ldn].scb.u1.log_blk_adr);
- printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_adr);
- printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_length);
- printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(ihost_index)[ldn].scb.tsb_adr);
- printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(ihost_index)[ldn].scb.scb_chain_adr);
- printk(KERN_ERR " SCB-block count.............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.count);
- printk(KERN_ERR " SCB-block length............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.length);
+ printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
+ if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
+ printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
+ printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
+ printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
+ printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
+ printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
+ printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
+ printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
+ printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
+ printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
}
printk(KERN_ERR " Send this report to the maintainer.\n");
panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
@@ -600,72 +606,73 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
}
}
/* if no panic appeared, increase the interrupt-counter */
- IBM_DS(ihost_index).total_interrupts++;
+ IBM_DS(shpnt).total_interrupts++;
/*only for local checking phase */
- if (local_checking_phase_flag(ihost_index)) {
- stat_result(ihost_index) = cmd_result;
- got_interrupt(ihost_index) = 1;
- reset_status(ihost_index) = IM_RESET_FINISHED_OK;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- spin_unlock(dev->host_lock);
+ if (local_checking_phase_flag(shpnt)) {
+ stat_result(shpnt) = cmd_result;
+ got_interrupt(shpnt) = 1;
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
/* handling of commands coming from upper level of scsi driver */
- if (last_scsi_type(ihost_index)[ldn] == IM_IMM_CMD) {
+ if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
/* verify ldn, and may handle rare reset immediate command */
- if ((reset_status(ihost_index) == IM_RESET_IN_PROGRESS) && (last_scsi_command(ihost_index)[ldn] == IM_RESET_IMM_CMD)) {
+ if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- reset_status(ihost_index) = IM_RESET_FINISHED_FAIL;
+ reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
} else {
/*reset disk led counter, turn off disk led */
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- reset_status(ihost_index) = IM_RESET_FINISHED_OK;
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
}
- stat_result(ihost_index) = cmd_result;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- last_scsi_type(ihost_index)[ldn] = 0;
- spin_unlock(dev->host_lock);
+ stat_result(shpnt) = cmd_result;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ last_scsi_type(shpnt)[ldn] = 0;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
- } else if (last_scsi_command(ihost_index)[ldn] == IM_ABORT_IMM_CMD) {
+ } else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
/* react on SCSI abort command */
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
#endif
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- cmd = ld(ihost_index)[ldn].cmd;
- ld(ihost_index)[ldn].cmd = NULL;
+ cmd = ld(shpnt)[ldn].cmd;
+ ld(shpnt)[ldn].cmd = NULL;
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
cmd->result = DID_NO_CONNECT << 16;
else
cmd->result = DID_ABORT << 16;
- stat_result(ihost_index) = cmd_result;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- last_scsi_type(ihost_index)[ldn] = 0;
+ stat_result(shpnt) = cmd_result;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ last_scsi_type(shpnt)[ldn] = 0;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd); /* should be the internal_done */
- spin_unlock(dev->host_lock);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
} else {
disk_rw_in_progress = 0;
PS2_DISK_LED_OFF();
- reset_status(ihost_index) = IM_RESET_FINISHED_OK;
- stat_result(ihost_index) = cmd_result;
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- spin_unlock(dev->host_lock);
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
+ stat_result(shpnt) = cmd_result;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
}
- last_scsi_command(ihost_index)[ldn] = NO_SCSI;
- last_scsi_type(ihost_index)[ldn] = 0;
- cmd = ld(ihost_index)[ldn].cmd;
- ld(ihost_index)[ldn].cmd = NULL;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI;
+ last_scsi_type(shpnt)[ldn] = 0;
+ cmd = ld(shpnt)[ldn].cmd;
+ ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_TIMEOUT
if (cmd) {
if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
+ spin_unlock_irqsave(shpnt->host_lock, flags);
printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
return IRQ_HANDLED;
}
@@ -674,15 +681,15 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
/*if no command structure, just return, else clear cmd */
if (!cmd)
{
- spin_unlock(dev->host_lock);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
#ifdef IM_DEBUG_INT
- printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(ihost_index)[ldn].tsb.dev_status, ld(ihost_index)[ldn].tsb.cmd_status, ld(ihost_index)[ldn].tsb.dev_error, ld(ihost_index)[ldn].tsb.cmd_error);
+ printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
#endif
/*if this is end of media read/write, may turn off PS/2 disk led */
- if ((ld(ihost_index)[ldn].device_type != TYPE_NO_LUN) && (ld(ihost_index)[ldn].device_type != TYPE_NO_DEVICE)) {
+ if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
/* only access this, if there was a valid device addressed */
if (--disk_rw_in_progress == 0)
PS2_DISK_LED_OFF();
@@ -693,8 +700,8 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
* adapters do not support CMD_TERMINATED, TASK_SET_FULL and
* ACA_ACTIVE as returning statusbyte information. (ML) */
if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
- cmd->result = (unsigned char) (ld(ihost_index)[ldn].tsb.dev_status & 0x1e);
- IBM_DS(ihost_index).total_errors++;
+ cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
+ IBM_DS(shpnt).total_errors++;
} else
cmd->result = 0;
/* write device status into cmd->result, and call done function */
@@ -705,24 +712,25 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
cmd->result |= DID_OK << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
- spin_unlock(dev->host_lock);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return IRQ_HANDLED;
}
-static void issue_cmd(int host_index, unsigned long cmd_reg, unsigned char attn_reg)
+static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
+ unsigned char attn_reg)
{
unsigned long flags;
/* must wait for attention reg not busy */
while (1) {
- spin_lock_irqsave(hosts[host_index]->host_lock, flags);
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ spin_lock_irqsave(shpnt->host_lock, flags);
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
- spin_unlock_irqrestore(hosts[host_index]->host_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
}
/* write registers and enable system interrupts */
- outl(cmd_reg, IM_CMD_REG(host_index));
- outb(attn_reg, IM_ATTN_REG(host_index));
- spin_unlock_irqrestore(hosts[host_index]->host_lock, flags);
+ outl(cmd_reg, IM_CMD_REG(shpnt));
+ outb(attn_reg, IM_ATTN_REG(shpnt));
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
}
static void internal_done(Scsi_Cmnd * cmd)
@@ -732,34 +740,34 @@ static void internal_done(Scsi_Cmnd * cmd)
}
/* SCSI-SCB-command for device_inquiry */
-static int device_inquiry(int host_index, int ldn)
+static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
- scb = &(ld(host_index)[ldn].scb);
- tsb = &(ld(host_index)[ldn].tsb);
- buf = (unsigned char *) (&(ld(host_index)[ldn].buf));
- ld(host_index)[ldn].tsb.dev_status = 0; /* prepare statusblock */
+ scb = &(ld(shpnt)[ldn].scb);
+ tsb = &(ld(shpnt)[ldn].tsb);
+ buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
+ ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
for (retr = 0; retr < 3; retr++) {
/* fill scb with inquiry command */
scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(host_index)[ldn] = IM_DEVICE_INQUIRY_CMD;
- last_scsi_type(host_index)[ldn] = IM_SCB;
+ last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
scb->tsb_adr = isa_virt_to_bus(tsb);
/* issue scb to passed ldn, and busy wait for interrupt */
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn);
- while (!got_interrupt(host_index))
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
+ while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
- if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
+ if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
@@ -769,34 +777,34 @@ static int device_inquiry(int host_index, int ldn)
return 1;
}
-static int read_capacity(int host_index, int ldn)
+static int read_capacity(struct Scsi_Host *shpnt, int ldn)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
- scb = &(ld(host_index)[ldn].scb);
- tsb = &(ld(host_index)[ldn].tsb);
- buf = (unsigned char *) (&(ld(host_index)[ldn].buf));
- ld(host_index)[ldn].tsb.dev_status = 0;
+ scb = &(ld(shpnt)[ldn].scb);
+ tsb = &(ld(shpnt)[ldn].tsb);
+ buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
+ ld(shpnt)[ldn].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with read capacity command */
scb->command = IM_READ_CAPACITY_CMD;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(host_index)[ldn] = IM_READ_CAPACITY_CMD;
- last_scsi_type(host_index)[ldn] = IM_SCB;
+ last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
scb->sys_buf_length = 8;
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to passed ldn, and busy wait for interrupt */
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn);
- while (!got_interrupt(host_index))
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
+ while (!got_interrupt(shpnt))
barrier();
/*if got capacity, get block length and return one device found */
- if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
+ if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/*if all three retries failed, return "no device at this ldn" */
@@ -806,39 +814,39 @@ static int read_capacity(int host_index, int ldn)
return 1;
}
-static int get_pos_info(int host_index)
+static int get_pos_info(struct Scsi_Host *shpnt)
{
int retr;
struct im_scb *scb;
struct im_tsb *tsb;
unsigned char *buf;
- scb = &(ld(host_index)[MAX_LOG_DEV].scb);
- tsb = &(ld(host_index)[MAX_LOG_DEV].tsb);
- buf = (unsigned char *) (&(ld(host_index)[MAX_LOG_DEV].buf));
- ld(host_index)[MAX_LOG_DEV].tsb.dev_status = 0;
+ scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
+ tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
+ buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
+ ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
for (retr = 0; retr < 3; retr++) {
/*fill scb with get_pos_info command */
scb->command = IM_GET_POS_INFO_CMD;
scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
- last_scsi_command(host_index)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
- last_scsi_type(host_index)[MAX_LOG_DEV] = IM_SCB;
+ last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
+ last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
scb->sys_buf_adr = isa_virt_to_bus(buf);
- if (special(host_index) == IBM_SCSI2_FW)
+ if (special(shpnt) == IBM_SCSI2_FW)
scb->sys_buf_length = 256; /* get all info from F/W adapter */
else
scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
scb->tsb_adr = isa_virt_to_bus(tsb);
/*issue scb to ldn=15, and busy wait for interrupt */
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
/* FIXME: timeout */
- while (!got_interrupt(host_index))
+ while (!got_interrupt(shpnt))
barrier();
/*if got POS-stuff, get block length and return one device found */
- if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
+ if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
return 1;
}
/* if all three retries failed, return "no device at this ldn" */
@@ -851,14 +859,16 @@ static int get_pos_info(int host_index)
/* SCSI-immediate-command for assign. This functions maps/unmaps specific
ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
subsystem and for dynamical remapping od ldns. */
-static int immediate_assign(int host_index, unsigned int pun, unsigned int lun, unsigned int ldn, unsigned int operation)
+static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
+ unsigned int lun, unsigned int ldn,
+ unsigned int operation)
{
int retr;
unsigned long imm_cmd;
for (retr = 0; retr < 3; retr++) {
/* select mutation level of the SCSI-adapter */
- switch (special(host_index)) {
+ switch (special(shpnt)) {
case IBM_SCSI2_FW:
imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -867,7 +877,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
default:
- imm_cmd = inl(IM_CMD_REG(host_index));
+ imm_cmd = inl(IM_CMD_REG(shpnt));
imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -876,15 +886,15 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
imm_cmd |= (unsigned long) ((ldn & 15) << 16);
break;
}
- last_scsi_command(host_index)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
- last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD;
- got_interrupt(host_index) = 0;
- issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
- while (!got_interrupt(host_index))
+ last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
+ last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
+ got_interrupt(shpnt) = 0;
+ issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
+ while (!got_interrupt(shpnt))
barrier();
/*if command successful, break */
- if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
+ if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
@@ -893,7 +903,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
return 1;
}
-static int immediate_feature(int host_index, unsigned int speed, unsigned int timeout)
+static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
{
int retr;
unsigned long imm_cmd;
@@ -903,16 +913,16 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
imm_cmd = IM_FEATURE_CTR_IMM_CMD;
imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
- last_scsi_command(host_index)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
- last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD;
- got_interrupt(host_index) = 0;
+ last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
+ last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
+ got_interrupt(shpnt) = 0;
/* we need to run into command errors in order to probe for the
* right speed! */
global_command_error_excuse = 1;
- issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
+ issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
/* FIXME: timeout */
- while (!got_interrupt(host_index))
+ while (!got_interrupt(shpnt))
barrier();
if (global_command_error_excuse == CMD_FAIL) {
global_command_error_excuse = 0;
@@ -920,7 +930,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
} else
global_command_error_excuse = 0;
/*if command successful, break */
- if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
+ if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retr >= 3)
@@ -930,35 +940,35 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
}
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
-static int immediate_reset(int host_index, unsigned int ldn)
+static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
{
int retries;
int ticks;
unsigned long imm_command;
for (retries = 0; retries < 3; retries++) {
- imm_command = inl(IM_CMD_REG(host_index));
+ imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
- last_scsi_command(host_index)[ldn] = IM_RESET_IMM_CMD;
- last_scsi_type(host_index)[ldn] = IM_IMM_CMD;
- got_interrupt(host_index) = 0;
- reset_status(host_index) = IM_RESET_IN_PROGRESS;
- issue_cmd(host_index, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
+ last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
+ got_interrupt(shpnt) = 0;
+ reset_status(shpnt) = IM_RESET_IN_PROGRESS;
+ issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
ticks = IM_RESET_DELAY * HZ;
- while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks) {
+ while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just complain */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
- reset_status(host_index) = IM_RESET_FINISHED_OK;
+ reset_status(shpnt) = IM_RESET_FINISHED_OK;
/* did not work, finish */
return 1;
}
/*if command successful, break */
- if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED)
+ if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
return 1;
}
if (retries >= 3)
@@ -1060,35 +1070,35 @@ static int probe_display(int what)
return 0;
}
-static int probe_bus_mode(int host_index)
+static int probe_bus_mode(struct Scsi_Host *shpnt)
{
struct im_pos_info *info;
int num_bus = 0;
int ldn;
- info = (struct im_pos_info *) (&(ld(host_index)[MAX_LOG_DEV].buf));
- if (get_pos_info(host_index)) {
+ info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
+ if (get_pos_info(shpnt)) {
if (info->connector_size & 0xf000)
- subsystem_connector_size(host_index) = 16;
+ subsystem_connector_size(shpnt) = 16;
else
- subsystem_connector_size(host_index) = 32;
+ subsystem_connector_size(shpnt) = 32;
num_bus |= (info->pos_4b & 8) >> 3;
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- if ((special(host_index) == IBM_SCSI_WCACHE) || (special(host_index) == IBM_7568_WCACHE)) {
+ if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
if (!((info->cache_stat >> ldn) & 1))
- ld(host_index)[ldn].cache_flag = 0;
+ ld(shpnt)[ldn].cache_flag = 0;
}
if (!((info->retry_stat >> ldn) & 1))
- ld(host_index)[ldn].retry_flag = 0;
+ ld(shpnt)[ldn].retry_flag = 0;
}
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: SCSI-Cache bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- printk("%d", ld(host_index)[ldn].cache_flag);
+ printk("%d", ld(shpnt)[ldn].cache_flag);
}
printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- printk("%d", ld(host_index)[ldn].retry_flag);
+ printk("%d", ld(shpnt)[ldn].retry_flag);
}
printk("\n");
#endif
@@ -1097,7 +1107,7 @@ static int probe_bus_mode(int host_index)
}
/* probing scsi devices */
-static void check_devices(int host_index, int adaptertype)
+static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
{
int id, lun, ldn, ticks;
int count_devices; /* local counter for connected device */
@@ -1108,24 +1118,24 @@ static void check_devices(int host_index, int adaptertype)
/* assign default values to certain variables */
ticks = 0;
count_devices = 0;
- IBM_DS(host_index).dyn_flag = 0; /* normally no need for dynamical ldn management */
- IBM_DS(host_index).total_errors = 0; /* set errorcounter to 0 */
- next_ldn(host_index) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
+ IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
+ IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
+ next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
/* initialize the very important driver-informational arrays/structs */
- memset(ld(host_index), 0, sizeof(ld(host_index)));
+ memset(ld(shpnt), 0, sizeof(ld(shpnt)));
for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
- last_scsi_command(host_index)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
- last_scsi_type(host_index)[ldn] = 0;
- ld(host_index)[ldn].cache_flag = 1;
- ld(host_index)[ldn].retry_flag = 1;
+ last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
+ last_scsi_type(shpnt)[ldn] = 0;
+ ld(shpnt)[ldn].cache_flag = 1;
+ ld(shpnt)[ldn].retry_flag = 1;
}
- memset(get_ldn(host_index), TYPE_NO_DEVICE, sizeof(get_ldn(host_index))); /* this is essential ! */
- memset(get_scsi(host_index), TYPE_NO_DEVICE, sizeof(get_scsi(host_index))); /* this is essential ! */
+ memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
+ memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
for (lun = 0; lun < 8; lun++) {
/* mark the adapter at its pun on all luns */
- get_scsi(host_index)[subsystem_pun(host_index)][lun] = TYPE_IBM_SCSI_ADAPTER;
- get_ldn(host_index)[subsystem_pun(host_index)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
+ get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
+ get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
ldn is active for all
luns. */
}
@@ -1134,9 +1144,9 @@ static void check_devices(int host_index, int adaptertype)
/* monitor connected on model XX95. */
/* STEP 1: */
- adapter_speed(host_index) = global_adapter_speed;
- speedrun = adapter_speed(host_index);
- while (immediate_feature(host_index, speedrun, adapter_timeout) == 2) {
+ adapter_speed(shpnt) = global_adapter_speed;
+ speedrun = adapter_speed(shpnt);
+ while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
probe_display(1);
if (speedrun == 7)
panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
@@ -1144,30 +1154,30 @@ static void check_devices(int host_index, int adaptertype)
if (speedrun > 7)
speedrun = 7;
}
- adapter_speed(host_index) = speedrun;
+ adapter_speed(shpnt) = speedrun;
/* Get detailed information about the current adapter, necessary for
* device operations: */
- num_bus = probe_bus_mode(host_index);
+ num_bus = probe_bus_mode(shpnt);
/* num_bus contains only valid data for the F/W adapter! */
if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
/* F/W adapter PUN-space extension evaluation: */
if (num_bus) {
printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
- subsystem_maxid(host_index) = 16;
+ subsystem_maxid(shpnt) = 16;
} else {
printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
- subsystem_maxid(host_index) = 8;
+ subsystem_maxid(shpnt) = 8;
}
printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
} else /* all other IBM SCSI adapters: */
printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
/* assign correct PUN device space */
- max_pun = subsystem_maxid(host_index);
+ max_pun = subsystem_maxid(shpnt);
#ifdef IM_DEBUG_PROBE
- printk("IBM MCA SCSI: Current SCSI-host index: %d\n", host_index);
+ printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
#else
printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
@@ -1177,7 +1187,7 @@ static void check_devices(int host_index, int adaptertype)
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
- immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
+ immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
}
lun = 0; /* default lun is 0 */
#ifndef IM_DEBUG_PROBE
@@ -1196,18 +1206,18 @@ static void check_devices(int host_index, int adaptertype)
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
- if (id != subsystem_pun(host_index)) {
+ if (id != subsystem_pun(shpnt)) {
/* if pun is not the adapter: */
/* set ldn=0 to pun,lun */
- immediate_assign(host_index, id, lun, PROBE_LDN, SET_LDN);
- if (device_inquiry(host_index, PROBE_LDN)) { /* probe device */
- get_scsi(host_index)[id][lun] = (unsigned char) (ld(host_index)[PROBE_LDN].buf[0]);
+ immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
+ if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
+ get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
/* entry, even for NO_LUN */
- if (ld(host_index)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
+ if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
count_devices++; /* a existing device is found */
}
/* remove ldn */
- immediate_assign(host_index, id, lun, PROBE_LDN, REMOVE_LDN);
+ immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
}
}
#ifndef IM_DEBUG_PROBE
@@ -1227,16 +1237,16 @@ static void check_devices(int host_index, int adaptertype)
#ifdef IM_DEBUG_PROBE
printk(".");
#endif
- if (id != subsystem_pun(host_index)) {
- if (get_scsi(host_index)[id][lun] != TYPE_NO_LUN && get_scsi(host_index)[id][lun] != TYPE_NO_DEVICE) {
+ if (id != subsystem_pun(shpnt)) {
+ if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
/* Only map if accepted type. Always enter for
lun == 0 to get no gaps into ldn-mapping for ldn<7. */
- immediate_assign(host_index, id, lun, ldn, SET_LDN);
- get_ldn(host_index)[id][lun] = ldn; /* map ldn */
- if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) {
+ immediate_assign(shpnt, id, lun, ldn, SET_LDN);
+ get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
+ if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
printk("resetting device at ldn=%x ... ", ldn);
- immediate_reset(host_index, ldn);
+ immediate_reset(shpnt, ldn);
#endif
ldn++;
} else {
@@ -1244,15 +1254,15 @@ static void check_devices(int host_index, int adaptertype)
* handle it or because it has problems */
if (lun > 0) {
/* remove mapping */
- get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE;
- immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN);
+ get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
+ immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
} else
ldn++;
}
} else if (lun == 0) {
/* map lun == 0, even if no device exists */
- immediate_assign(host_index, id, lun, ldn, SET_LDN);
- get_ldn(host_index)[id][lun] = ldn; /* map ldn */
+ immediate_assign(shpnt, id, lun, ldn, SET_LDN);
+ get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
ldn++;
}
}
@@ -1262,14 +1272,14 @@ static void check_devices(int host_index, int adaptertype)
/* map remaining ldns to non-existing devices */
for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
- if (get_scsi(host_index)[id][lun] == TYPE_NO_LUN || get_scsi(host_index)[id][lun] == TYPE_NO_DEVICE) {
+ if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
probe_display(1);
/* Map remaining ldns only to NON-existing pun,lun
combinations to make sure an inquiry will fail.
For MULTI_LUN, it is needed to avoid adapter autonome
SCSI-remapping. */
- immediate_assign(host_index, id, lun, ldn, SET_LDN);
- get_ldn(host_index)[id][lun] = ldn;
+ immediate_assign(shpnt, id, lun, ldn, SET_LDN);
+ get_ldn(shpnt)[id][lun] = ldn;
ldn++;
}
}
@@ -1292,51 +1302,51 @@ static void check_devices(int host_index, int adaptertype)
for (id = 0; id < max_pun; id++) {
printk("%2d ", id);
for (lun = 0; lun < 8; lun++)
- printk("%2s ", ti_p(get_scsi(host_index)[id][lun]));
+ printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
printk(" %2d ", id);
for (lun = 0; lun < 8; lun++)
- printk("%2s ", ti_l(get_ldn(host_index)[id][lun]));
+ printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
printk("\n");
}
#endif
/* assign total number of found SCSI-devices to the statistics struct */
- IBM_DS(host_index).total_scsi_devices = count_devices;
+ IBM_DS(shpnt).total_scsi_devices = count_devices;
/* decide for output in /proc-filesystem, if the configuration of
SCSI-devices makes dynamical reassignment of devices necessary */
if (count_devices >= MAX_LOG_DEV)
- IBM_DS(host_index).dyn_flag = 1; /* dynamical assignment is necessary */
+ IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
else
- IBM_DS(host_index).dyn_flag = 0; /* dynamical assignment is not necessary */
+ IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
/* If no SCSI-devices are assigned, return 1 in order to cause message. */
if (ldn == 0)
printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
/* reset the counters for statistics on the current adapter */
- IBM_DS(host_index).scbs = 0;
- IBM_DS(host_index).long_scbs = 0;
- IBM_DS(host_index).total_accesses = 0;
- IBM_DS(host_index).total_interrupts = 0;
- IBM_DS(host_index).dynamical_assignments = 0;
- memset(IBM_DS(host_index).ldn_access, 0x0, sizeof(IBM_DS(host_index).ldn_access));
- memset(IBM_DS(host_index).ldn_read_access, 0x0, sizeof(IBM_DS(host_index).ldn_read_access));
- memset(IBM_DS(host_index).ldn_write_access, 0x0, sizeof(IBM_DS(host_index).ldn_write_access));
- memset(IBM_DS(host_index).ldn_inquiry_access, 0x0, sizeof(IBM_DS(host_index).ldn_inquiry_access));
- memset(IBM_DS(host_index).ldn_modeselect_access, 0x0, sizeof(IBM_DS(host_index).ldn_modeselect_access));
- memset(IBM_DS(host_index).ldn_assignments, 0x0, sizeof(IBM_DS(host_index).ldn_assignments));
+ IBM_DS(shpnt).scbs = 0;
+ IBM_DS(shpnt).long_scbs = 0;
+ IBM_DS(shpnt).total_accesses = 0;
+ IBM_DS(shpnt).total_interrupts = 0;
+ IBM_DS(shpnt).dynamical_assignments = 0;
+ memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
+ memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
+ memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
+ memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
+ memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
+ memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
probe_display(0);
return;
}
-static int device_exists(int host_index, int ldn, int *block_length, int *device_type)
+static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
{
unsigned char *buf;
/* if no valid device found, return immediately with 0 */
- if (!(device_inquiry(host_index, ldn)))
+ if (!(device_inquiry(shpnt, ldn)))
return 0;
- buf = (unsigned char *) (&(ld(host_index)[ldn].buf));
+ buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
if (*buf == TYPE_ROM) {
*device_type = TYPE_ROM;
*block_length = 2048; /* (standard blocksize for yellow-/red-book) */
@@ -1349,7 +1359,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
}
if (*buf == TYPE_DISK) {
*device_type = TYPE_DISK;
- if (read_capacity(host_index, ldn)) {
+ if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
@@ -1357,7 +1367,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
}
if (*buf == TYPE_MOD) {
*device_type = TYPE_MOD;
- if (read_capacity(host_index, ldn)) {
+ if (read_capacity(shpnt, ldn)) {
*block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
return 1;
} else
@@ -1430,6 +1440,9 @@ static void internal_ibmmca_scsi_setup(char *str, int *ints)
return;
}
+#if 0
+ FIXME NEED TO MOVE TO SYSFS
+
static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
{
struct Scsi_Host *shpnt;
@@ -1480,58 +1493,34 @@ static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
return len;
}
+#endif
-int ibmmca_detect(struct scsi_host_template * scsi_template)
+static struct scsi_host_template ibmmca_driver_template = {
+ .proc_name = "ibmmca",
+ .proc_info = ibmmca_proc_info,
+ .name = "IBM SCSI-Subsystem",
+ .queuecommand = ibmmca_queuecommand,
+ .eh_abort_handler = ibmmca_abort,
+ .eh_host_reset_handler = ibmmca_host_reset,
+ .bios_param = ibmmca_biosparam,
+ .can_queue = 16,
+ .this_id = 7,
+ .sg_tablesize = 16,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int ibmmca_probe(struct device *dev)
{
struct Scsi_Host *shpnt;
- int port, id, i, j, k, slot;
- int devices_on_irq_11 = 0;
- int devices_on_irq_14 = 0;
- int IRQ14_registered = 0;
- int IRQ11_registered = 0;
-
- found = 0; /* make absolutely sure, that found is set to 0 */
+ int port, id, i, j, k, irq, enabled, ret = -EINVAL;
+ struct mca_device *mca_dev = to_mca_device(dev);
+ const char *description = ibmmca_description[mca_dev->index];
/* First of all, print the version number of the driver. This is
* important to allow better user bugreports in case of already
* having problems with the MCA_bus probing. */
printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
- /* if this is not MCA machine, return "nothing found" */
- if (!MCA_bus) {
- printk(KERN_INFO "IBM MCA SCSI: No Microchannel-bus present --> Aborting.\n" " This machine does not have any IBM MCA-bus\n" " or the MCA-Kernel-support is not enabled!\n");
- return 0;
- }
-
-#ifdef MODULE
- /* If the driver is run as module, read from conf.modules or cmd-line */
- if (boot_options)
- option_setup(boot_options);
-#endif
-
- /* get interrupt request level */
- if (request_irq(IM_IRQ, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
- printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ);
- return 0;
- } else
- IRQ14_registered++;
-
- /* if ibmmcascsi setup option was passed to kernel, return "found" */
- for (i = 0; i < IM_MAX_HOSTS; i++)
- if (io_port[i] > 0 && scsi_id[i] >= 0 && scsi_id[i] < 8) {
- printk("IBM MCA SCSI: forced detected SCSI Adapter, io=0x%x, scsi id=%d.\n", io_port[i], scsi_id[i]);
- if ((shpnt = ibmmca_register(scsi_template, io_port[i], scsi_id[i], FORCED_DETECTION, "forced detected SCSI Adapter"))) {
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = 0;
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = FORCED_DETECTION;
- mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter");
- mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(MCA_INTEGSCSI);
- devices_on_irq_14++;
- }
- }
- if (found)
- return found;
-
/* The POS2-register of all PS/2 model SCSI-subsystems has the following
* interpretation of bits:
* Bit 7 - 4 : Chip Revision ID (Release)
@@ -1558,7 +1547,14 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
/* first look for the IBM SCSI integrated subsystem on the motherboard */
for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_read_stored_pos(MCA_INTEGSCSI, j);
+ pos[j] = mca_device_read_pos(mca_dev, j);
+ id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
+ enabled = (pos[2] &0x01);
+ if (!enabled) {
+ printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
+ printk(KERN_WARNING " SCSI-operations may not work.\n");
+ }
+
/* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
* if we ignore the settings of all surrounding pos registers, it is not
* completely sufficient to only check pos2 and pos3. */
@@ -1566,232 +1562,137 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
* make sure, we see a real integrated onboard SCSI-interface and no
* internal system information, which gets mapped to some pos registers
* on models 95xx. */
- if ((!pos[0] && !pos[1] && pos[2] > 0 && pos[3] > 0 && !pos[4] && !pos[5] && !pos[6] && !pos[7]) || (pos[0] == 0xff && pos[1] == 0xff && pos[2] < 0xff && pos[3] < 0xff && pos[4] == 0xff && pos[5] == 0xff && pos[6] == 0xff && pos[7] == 0xff)) {
- if ((pos[2] & 1) == 1) /* is the subsystem chip enabled ? */
- port = IM_IO_PORT;
- else { /* if disabled, no IRQs will be generated, as the chip won't
- * listen to the incoming commands and will do really nothing,
- * except for listening to the pos-register settings. If this
- * happens, I need to hugely think about it, as one has to
- * write something to the MCA-Bus pos register in order to
- * enable the chip. Normally, IBM-SCSI won't pass the POST,
- * when the chip is disabled (see IBM tech. ref.). */
- port = IM_IO_PORT; /* anyway, set the portnumber and warn */
- printk("IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n" " SCSI-operations may not work.\n");
+ if (mca_dev->slot == MCA_INTEGSCSI &&
+ ((!pos[0] && !pos[1] && pos[2] > 0 &&
+ pos[3] > 0 && !pos[4] && !pos[5] &&
+ !pos[6] && !pos[7]) ||
+ (pos[0] == 0xff && pos[1] == 0xff &&
+ pos[2] < 0xff && pos[3] < 0xff &&
+ pos[4] == 0xff && pos[5] == 0xff &&
+ pos[6] == 0xff && pos[7] == 0xff))) {
+ irq = IM_IRQ;
+ port = IM_IO_PORT;
+ } else {
+ irq = IM_IRQ;
+ port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
+ if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
+ printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
+ printk(KERN_ERR " Impossible to determine adapter PUN!\n");
+ printk(KERN_ERR " Guessing adapter PUN = 7.\n");
+ id = 7;
+ } else {
+ id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
+ if (mca_dev->index == IBM_SCSI2_FW) {
+ id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
+ * for F/W adapters */
+ }
}
- id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
- /* give detailed information on the subsystem. This helps me
- * additionally during debugging and analyzing bug-reports. */
- printk(KERN_INFO "IBM MCA SCSI: IBM Integrated SCSI Controller ffound, io=0x%x, scsi id=%d,\n", port, id);
- printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
-
- /* register the found integrated SCSI-subsystem */
- if ((shpnt = ibmmca_register(scsi_template, port, id, INTEGRATED_SCSI, "IBM Integrated SCSI Controller")))
- {
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
- mca_set_adapter_name(MCA_INTEGSCSI, "IBM Integrated SCSI Controller");
- mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(MCA_INTEGSCSI);
- devices_on_irq_14++;
+ if ((mca_dev->index == IBM_SCSI2_FW) &&
+ (pos[4] & 0x01) && (pos[6] == 0)) {
+ /* IRQ11 is used by SCSI-2 F/W Adapter/A */
+ printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
+ irq = IM_IRQ_FW;
}
}
- /* now look for other adapters in MCA slots, */
- /* determine the number of known IBM-SCSI-subsystem types */
- /* see the pos[2] dependence to get the adapter port-offset. */
- for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
- /* scan each slot for a fitting adapter id */
- slot = 0; /* start at slot 0 */
- while ((slot = mca_find_adapter(subsys_list[i].mca_id, slot))
- != MCA_NOTFOUND) { /* scan through all slots */
- for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_read_stored_pos(slot, j);
- if ((pos[2] & 1) == 1)
- /* is the subsystem chip enabled ? */
- /* (explanations see above) */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- else {
- /* anyway, set the portnumber and warn */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
- printk(KERN_WARNING " SCSI-operations may not work.\n");
- }
- if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
- printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
- printk(KERN_ERR " Impossible to determine adapter PUN!\n");
- printk(KERN_ERR " Guessing adapter PUN = 7.\n");
- id = 7;
- } else {
- id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
- if (i == IBM_SCSI2_FW) {
- id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
- * for F/W adapters */
- }
- }
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
- /* IRQ11 is used by SCSI-2 F/W Adapter/A */
- printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
- /* get interrupt request level */
- if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
- printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
- } else
- IRQ11_registered++;
- }
- printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
- if ((pos[2] & 0xf0) == 0xf0)
- printk(KERN_DEBUG" ROM Addr.=off,");
- else
- printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
- printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
-
- /* register the hostadapter */
- if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
- for (k = 2; k < 8; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
- mca_set_adapter_name(slot, subsys_list[i].description);
- mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(slot);
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
- devices_on_irq_11++;
- else
- devices_on_irq_14++;
- }
- slot++; /* advance to next slot */
- } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
- }
- /* now check for SCSI-adapters, mapped to the integrated SCSI
- * area. E.g. a W/Cache in MCA-slot 9(!). Do the check correct here,
- * as this is a known effect on some models 95xx. */
- for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
- /* scan each slot for a fitting adapter id */
- slot = mca_find_adapter(subsys_list[i].mca_id, MCA_INTEGSCSI);
- if (slot != MCA_NOTFOUND) { /* scan through all slots */
- for (j = 0; j < 8; j++) /* read the pos-information */
- pos[j] = mca_read_stored_pos(slot, j);
- if ((pos[2] & 1) == 1) { /* is the subsystem chip enabled ? */
- /* (explanations see above) */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- } else { /* anyway, set the portnumber and warn */
- port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
- printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
- printk(KERN_WARNING " SCSI-operations may not work.\n");
- }
- if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
- printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
- printk(KERN_ERR " Impossible to determine adapter PUN!\n");
- printk(KERN_ERR " Guessing adapter PUN = 7.\n");
- id = 7;
- } else {
- id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
- if (i == IBM_SCSI2_FW)
- id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
- * for F/W adapters */
- }
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
- /* IRQ11 is used by SCSI-2 F/W Adapter/A */
- printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
- /* get interrupt request level */
- if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts))
- printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
- else
- IRQ11_registered++;
- }
- printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
- if ((pos[2] & 0xf0) == 0xf0)
- printk(KERN_DEBUG " ROM Addr.=off,");
- else
- printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
- printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
-
- /* register the hostadapter */
- if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
- for (k = 2; k < 7; k++)
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
- ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
- mca_set_adapter_name(slot, subsys_list[i].description);
- mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
- mca_mark_as_used(slot);
- if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
- devices_on_irq_11++;
- else
- devices_on_irq_14++;
- }
- slot++; /* advance to next slot */
- } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
- }
- if (IRQ11_registered && !devices_on_irq_11)
- free_irq(IM_IRQ_FW, hosts); /* no devices on IRQ 11 */
- if (IRQ14_registered && !devices_on_irq_14)
- free_irq(IM_IRQ, hosts); /* no devices on IRQ 14 */
- if (!devices_on_irq_11 && !devices_on_irq_14)
- printk(KERN_WARNING "IBM MCA SCSI: No IBM SCSI-subsystem adapter attached.\n");
- return found; /* return the number of found SCSI hosts. Should be 1 or 0. */
-}
-static struct Scsi_Host *ibmmca_register(struct scsi_host_template * scsi_template, int port, int id, int adaptertype, char *hostname)
-{
- struct Scsi_Host *shpnt;
- int i, j;
- unsigned int ctrl;
+ /* give detailed information on the subsystem. This helps me
+ * additionally during debugging and analyzing bug-reports. */
+ printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
+ description, port, id);
+ if (mca_dev->slot == MCA_INTEGSCSI)
+ printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
+ else {
+ if ((pos[2] & 0xf0) == 0xf0)
+ printk(KERN_DEBUG " ROM Addr.=off,");
+ else
+ printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
+
+ printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
+ }
/* check I/O region */
- if (!request_region(port, IM_N_IO_PORT, hostname)) {
+ if (!request_region(port, IM_N_IO_PORT, description)) {
printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
- return NULL;
+ goto out_fail;
}
/* register host */
- shpnt = scsi_register(scsi_template, sizeof(struct ibmmca_hostdata));
+ shpnt = scsi_host_alloc(&ibmmca_driver_template,
+ sizeof(struct ibmmca_hostdata));
if (!shpnt) {
printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
- release_region(port, IM_N_IO_PORT);
- return NULL;
+ goto out_release;
+ }
+
+ dev_set_drvdata(dev, shpnt);
+ if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
+ printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
+ goto out_free_host;
}
/* request I/O region */
- hosts[found] = shpnt; /* add new found hostadapter to the list */
- special(found) = adaptertype; /* important assignment or else crash! */
- subsystem_connector_size(found) = 0; /* preset slot-size */
- shpnt->irq = IM_IRQ; /* assign necessary stuff for the adapter */
+ special(shpnt) = mca_dev->index; /* important assignment or else crash! */
+ subsystem_connector_size(shpnt) = 0; /* preset slot-size */
+ shpnt->irq = irq; /* assign necessary stuff for the adapter */
shpnt->io_port = port;
shpnt->n_io_port = IM_N_IO_PORT;
shpnt->this_id = id;
shpnt->max_id = 8; /* 8 PUNs are default */
/* now, the SCSI-subsystem is connected to Linux */
- ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
#ifdef IM_DEBUG_PROBE
+ ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
printk("IBM MCA SCSI: This adapters' POS-registers: ");
for (i = 0; i < 8; i++)
printk("%x ", pos[i]);
printk("\n");
#endif
- reset_status(found) = IM_RESET_NOT_IN_PROGRESS;
+ reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
for (i = 0; i < 16; i++) /* reset the tables */
for (j = 0; j < 8; j++)
- get_ldn(found)[i][j] = MAX_LOG_DEV;
+ get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
/* check which logical devices exist */
/* after this line, local interrupting is possible: */
- local_checking_phase_flag(found) = 1;
- check_devices(found, adaptertype); /* call by value, using the global variable hosts */
- local_checking_phase_flag(found) = 0;
- found++; /* now increase index to be prepared for next found subsystem */
+ local_checking_phase_flag(shpnt) = 1;
+ check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
+ local_checking_phase_flag(shpnt) = 0;
+
/* an ibm mca subsystem has been detected */
- return shpnt;
+
+ for (k = 2; k < 7; k++)
+ ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
+ ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
+ mca_device_set_name(mca_dev, description);
+ /* FIXME: NEED TO REPLUMB TO SYSFS
+ mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
+ */
+ mca_device_set_claim(mca_dev, 1);
+ if (scsi_add_host(shpnt, dev)) {
+ dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
+ goto out_free_host;
+ }
+ scsi_scan_host(shpnt);
+
+ return 0;
+ out_free_host:
+ scsi_host_put(shpnt);
+ out_release:
+ release_region(port, IM_N_IO_PORT);
+ out_fail:
+ return ret;
}
-static int ibmmca_release(struct Scsi_Host *shpnt)
+static int __devexit ibmmca_remove(struct device *dev)
{
+ struct Scsi_Host *shpnt = dev_get_drvdata(dev);
+ scsi_remove_host(shpnt);
release_region(shpnt->io_port, shpnt->n_io_port);
- if (!(--found))
- free_irq(shpnt->irq, hosts);
+ free_irq(shpnt->irq, dev);
return 0;
}
@@ -1805,33 +1706,24 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
int current_ldn;
int id, lun;
int target;
- int host_index;
int max_pun;
int i;
- struct scatterlist *sl;
+ struct scatterlist *sg;
shpnt = cmd->device->host;
- /* search for the right hostadapter */
- for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
- if (!hosts[host_index]) { /* invalid hostadapter descriptor address */
- cmd->result = DID_NO_CONNECT << 16;
- if (done)
- done(cmd);
- return 0;
- }
- max_pun = subsystem_maxid(host_index);
+ max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
- if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index)))
+ if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
- else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index)))
+ else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* if (target,lun) is NO LUN or not existing at all, return error */
- if ((get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
+ if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
cmd->result = DID_NO_CONNECT << 16;
if (done)
done(cmd);
@@ -1839,16 +1731,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
}
/*if (target,lun) unassigned, do further checks... */
- ldn = get_ldn(host_index)[target][cmd->device->lun];
+ ldn = get_ldn(shpnt)[target][cmd->device->lun];
if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
- current_ldn = next_ldn(host_index); /* stop-value for one circle */
- while (ld(host_index)[next_ldn(host_index)].cmd) { /* search for a occupied, but not in */
+ current_ldn = next_ldn(shpnt); /* stop-value for one circle */
+ while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
/* command-processing ldn. */
- next_ldn(host_index)++;
- if (next_ldn(host_index) >= MAX_LOG_DEV)
- next_ldn(host_index) = 7;
- if (current_ldn == next_ldn(host_index)) { /* One circle done ? */
+ next_ldn(shpnt)++;
+ if (next_ldn(shpnt) >= MAX_LOG_DEV)
+ next_ldn(shpnt) = 7;
+ if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
/* no non-processing ldn found */
scmd_printk(KERN_WARNING, cmd,
"IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
@@ -1864,56 +1756,56 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* unmap non-processing ldn */
for (id = 0; id < max_pun; id++)
for (lun = 0; lun < 8; lun++) {
- if (get_ldn(host_index)[id][lun] == next_ldn(host_index)) {
- get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE;
- get_scsi(host_index)[id][lun] = TYPE_NO_DEVICE;
+ if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
+ get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
+ get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
/* unmap entry */
}
}
/* set reduced interrupt_handler-mode for checking */
- local_checking_phase_flag(host_index) = 1;
+ local_checking_phase_flag(shpnt) = 1;
/* map found ldn to pun,lun */
- get_ldn(host_index)[target][cmd->device->lun] = next_ldn(host_index);
+ get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
/* change ldn to the right value, that is now next_ldn */
- ldn = next_ldn(host_index);
+ ldn = next_ldn(shpnt);
/* unassign all ldns (pun,lun,ldn does not matter for remove) */
- immediate_assign(host_index, 0, 0, 0, REMOVE_LDN);
+ immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* set only LDN for remapped device */
- immediate_assign(host_index, target, cmd->device->lun, ldn, SET_LDN);
+ immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
/* get device information for ld[ldn] */
- if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) {
- ld(host_index)[ldn].cmd = NULL; /* To prevent panic set 0, because
+ if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
+ ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
devices that were not assigned,
should have nothing in progress. */
- get_scsi(host_index)[target][cmd->device->lun] = ld(host_index)[ldn].device_type;
+ get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
/* increase assignment counters for statistics in /proc */
- IBM_DS(host_index).dynamical_assignments++;
- IBM_DS(host_index).ldn_assignments[ldn]++;
+ IBM_DS(shpnt).dynamical_assignments++;
+ IBM_DS(shpnt).ldn_assignments[ldn]++;
} else
/* panic here, because a device, found at boottime has
vanished */
panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
/* unassign again all ldns (pun,lun,ldn does not matter for remove) */
- immediate_assign(host_index, 0, 0, 0, REMOVE_LDN);
+ immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
/* remap all ldns, as written in the pun/lun table */
lun = 0;
#ifdef CONFIG_SCSI_MULTI_LUN
for (lun = 0; lun < 8; lun++)
#endif
for (id = 0; id < max_pun; id++) {
- if (get_ldn(host_index)[id][lun] <= MAX_LOG_DEV)
- immediate_assign(host_index, id, lun, get_ldn(host_index)[id][lun], SET_LDN);
+ if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
+ immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
}
/* set back to normal interrupt_handling */
- local_checking_phase_flag(host_index) = 0;
+ local_checking_phase_flag(shpnt) = 0;
#ifdef IM_DEBUG_PROBE
/* Information on syslog terminal */
printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
#endif
/* increase next_ldn for next dynamical assignment */
- next_ldn(host_index)++;
- if (next_ldn(host_index) >= MAX_LOG_DEV)
- next_ldn(host_index) = 7;
+ next_ldn(shpnt)++;
+ if (next_ldn(shpnt) >= MAX_LOG_DEV)
+ next_ldn(shpnt) = 7;
} else { /* wall against Linux accesses to the subsystem adapter */
cmd->result = DID_BAD_TARGET << 16;
if (done)
@@ -1923,34 +1815,32 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
}
/*verify there is no command already in progress for this log dev */
- if (ld(host_index)[ldn].cmd)
+ if (ld(shpnt)[ldn].cmd)
panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
/*save done in cmd, and save cmd for the interrupt handler */
cmd->scsi_done = done;
- ld(host_index)[ldn].cmd = cmd;
+ ld(shpnt)[ldn].cmd = cmd;
/*fill scb information independent of the scsi command */
- scb = &(ld(host_index)[ldn].scb);
- ld(host_index)[ldn].tsb.dev_status = 0;
+ scb = &(ld(shpnt)[ldn].scb);
+ ld(shpnt)[ldn].tsb.dev_status = 0;
scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
- scb->tsb_adr = isa_virt_to_bus(&(ld(host_index)[ldn].tsb));
+ scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
scsi_cmd = cmd->cmnd[0];
- if (cmd->use_sg) {
- i = cmd->use_sg;
- sl = (struct scatterlist *) (cmd->request_buffer);
- if (i > 16)
- panic("IBM MCA SCSI: scatter-gather list too long.\n");
- while (--i >= 0) {
- ld(host_index)[ldn].sge[i].address = (void *) (isa_page_to_bus(sl[i].page) + sl[i].offset);
- ld(host_index)[ldn].sge[i].byte_length = sl[i].length;
+ if (scsi_sg_count(cmd)) {
+ BUG_ON(scsi_sg_count(cmd) > 16);
+
+ scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
+ ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
+ ld(shpnt)[ldn].sge[i].byte_length = sg->length;
}
scb->enable |= IM_POINTER_TO_LIST;
- scb->sys_buf_adr = isa_virt_to_bus(&(ld(host_index)[ldn].sge[0]));
- scb->sys_buf_length = cmd->use_sg * sizeof(struct im_sge);
+ scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
+ scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
} else {
- scb->sys_buf_adr = isa_virt_to_bus(cmd->request_buffer);
+ scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
/* recent Linux midlevel SCSI places 1024 byte for inquiry
* command. Far too much for old PS/2 hardware. */
switch (scsi_cmd) {
@@ -1961,16 +1851,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
case REQUEST_SENSE:
case MODE_SENSE:
case MODE_SELECT:
- if (cmd->request_bufflen > 255)
+ if (scsi_bufflen(cmd) > 255)
scb->sys_buf_length = 255;
else
- scb->sys_buf_length = cmd->request_bufflen;
+ scb->sys_buf_length = scsi_bufflen(cmd);
break;
case TEST_UNIT_READY:
scb->sys_buf_length = 0;
break;
default:
- scb->sys_buf_length = cmd->request_bufflen;
+ scb->sys_buf_length = scsi_bufflen(cmd);
break;
}
}
@@ -1982,16 +1872,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* for specific device-type debugging: */
#ifdef IM_DEBUG_CMD_SPEC_DEV
- if (ld(host_index)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
- printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(host_index)[ldn].device_type, scsi_cmd, ldn);
+ if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
+ printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
#endif
/* for possible panics store current command */
- last_scsi_command(host_index)[ldn] = scsi_cmd;
- last_scsi_type(host_index)[ldn] = IM_SCB;
+ last_scsi_command(shpnt)[ldn] = scsi_cmd;
+ last_scsi_type(shpnt)[ldn] = IM_SCB;
/* update statistical info */
- IBM_DS(host_index).total_accesses++;
- IBM_DS(host_index).ldn_access[ldn]++;
+ IBM_DS(shpnt).total_accesses++;
+ IBM_DS(shpnt).ldn_access[ldn]++;
switch (scsi_cmd) {
case READ_6:
@@ -2003,17 +1893,17 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* Distinguish between disk and other devices. Only disks (that are the
most frequently accessed devices) should be supported by the
IBM-SCSI-Subsystem commands. */
- switch (ld(host_index)[ldn].device_type) {
+ switch (ld(shpnt)[ldn].device_type) {
case TYPE_DISK: /* for harddisks enter here ... */
case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
/* you like, if this won't work.) */
if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
/* read command preparations */
scb->enable |= IM_READ_CONTROL;
- IBM_DS(host_index).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
+ IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
} else { /* write command preparations */
- IBM_DS(host_index).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
+ IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
}
if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
@@ -2023,9 +1913,9 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
}
- last_scsi_logical_block(host_index)[ldn] = scb->u1.log_blk_adr;
- last_scsi_blockcount(host_index)[ldn] = scb->u2.blk.count;
- scb->u2.blk.length = ld(host_index)[ldn].block_length;
+ last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
+ last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
+ scb->u2.blk.length = ld(shpnt)[ldn].block_length;
break;
/* for other devices, enter here. Other types are not known by
Linux! TYPE_NO_LUN is forbidden as valid device. */
@@ -2046,14 +1936,14 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->enable |= IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
/* Read/write on this non-disk devices is also displayworthy,
so flash-up the LED/display. */
break;
}
break;
case INQUIRY:
- IBM_DS(host_index).ldn_inquiry_access[ldn]++;
+ IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
scb->command = IM_DEVICE_INQUIRY_CMD;
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.log_blk_adr = 0;
@@ -2064,7 +1954,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->u1.log_blk_adr = 0;
scb->u1.scsi_cmd_length = 6;
memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
case READ_CAPACITY:
/* the length of system memory buffer must be exactly 8 bytes */
@@ -2081,12 +1971,12 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
/* Commands that need write-only-mode (system -> device): */
case MODE_SELECT:
case MODE_SELECT_10:
- IBM_DS(host_index).ldn_modeselect_access[ldn]++;
+ IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
scb->command = IM_OTHER_SCSI_CMD_CMD;
scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
/* For other commands, read-only is useful. Most other commands are
running without an input-data-block. */
@@ -2095,19 +1985,19 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
scb->u1.scsi_cmd_length = cmd->cmd_len;
memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
- last_scsi_type(host_index)[ldn] = IM_LONG_SCB;
+ last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
break;
}
/*issue scb command, and return */
if (++disk_rw_in_progress == 1)
PS2_DISK_LED_ON(shpnt->host_no, target);
- if (last_scsi_type(host_index)[ldn] == IM_LONG_SCB) {
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
- IBM_DS(host_index).long_scbs++;
+ if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
+ IBM_DS(shpnt).long_scbs++;
} else {
- issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn);
- IBM_DS(host_index).scbs++;
+ issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
+ IBM_DS(shpnt).scbs++;
}
return 0;
}
@@ -2122,7 +2012,6 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
unsigned int ldn;
void (*saved_done) (Scsi_Cmnd *);
int target;
- int host_index;
int max_pun;
unsigned long imm_command;
@@ -2131,35 +2020,23 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
#endif
shpnt = cmd->device->host;
- /* search for the right hostadapter */
- for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
- if (!hosts[host_index]) { /* invalid hostadapter descriptor address */
- cmd->result = DID_NO_CONNECT << 16;
- if (cmd->scsi_done)
- (cmd->scsi_done) (cmd);
- shpnt = cmd->device->host;
-#ifdef IM_DEBUG_PROBE
- printk(KERN_DEBUG "IBM MCA SCSI: Abort adapter selection failed!\n");
-#endif
- return SUCCESS;
- }
- max_pun = subsystem_maxid(host_index);
+ max_pun = subsystem_maxid(shpnt);
if (ibm_ansi_order) {
target = max_pun - 1 - cmd->device->id;
- if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index)))
+ if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
target--;
- else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index)))
+ else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
target++;
} else
target = cmd->device->id;
/* get logical device number, and disable system interrupts */
printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
- ldn = get_ldn(host_index)[target][cmd->device->lun];
+ ldn = get_ldn(shpnt)[target][cmd->device->lun];
/*if cmd for this ldn has already finished, no need to abort */
- if (!ld(host_index)[ldn].cmd) {
+ if (!ld(shpnt)[ldn].cmd) {
return SUCCESS;
}
@@ -2170,20 +2047,20 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
saved_done = cmd->scsi_done;
cmd->scsi_done = internal_done;
cmd->SCp.Status = 0;
- last_scsi_command(host_index)[ldn] = IM_ABORT_IMM_CMD;
- last_scsi_type(host_index)[ldn] = IM_IMM_CMD;
- imm_command = inl(IM_CMD_REG(host_index));
+ last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
+ last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
+ imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
/* must wait for attention reg not busy */
/* FIXME - timeout, politeness */
while (1) {
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
}
/* write registers and enable system interrupts */
- outl(imm_command, IM_CMD_REG(host_index));
- outb(IM_IMM_CMD | ldn, IM_ATTN_REG(host_index));
+ outl(imm_command, IM_CMD_REG(shpnt));
+ outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort queued to adapter...\n");
#endif
@@ -2202,7 +2079,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
cmd->result |= DID_ABORT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
- ld(host_index)[ldn].cmd = NULL;
+ ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort finished with success.\n");
#endif
@@ -2211,7 +2088,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
cmd->result |= DID_NO_CONNECT << 16;
if (cmd->scsi_done)
(cmd->scsi_done) (cmd);
- ld(host_index)[ldn].cmd = NULL;
+ ld(shpnt)[ldn].cmd = NULL;
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort failed.\n");
#endif
@@ -2236,71 +2113,65 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
struct Scsi_Host *shpnt;
Scsi_Cmnd *cmd_aid;
int ticks, i;
- int host_index;
unsigned long imm_command;
BUG_ON(cmd == NULL);
ticks = IM_RESET_DELAY * HZ;
shpnt = cmd->device->host;
- /* search for the right hostadapter */
- for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
-
- if (!hosts[host_index]) /* invalid hostadapter descriptor address */
- return FAILED;
- if (local_checking_phase_flag(host_index)) {
+ if (local_checking_phase_flag(shpnt)) {
printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
return FAILED;
}
/* issue reset immediate command to subsystem, and wait for interrupt */
printk("IBM MCA SCSI: resetting all devices.\n");
- reset_status(host_index) = IM_RESET_IN_PROGRESS;
- last_scsi_command(host_index)[0xf] = IM_RESET_IMM_CMD;
- last_scsi_type(host_index)[0xf] = IM_IMM_CMD;
- imm_command = inl(IM_CMD_REG(host_index));
+ reset_status(shpnt) = IM_RESET_IN_PROGRESS;
+ last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
+ last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
+ imm_command = inl(IM_CMD_REG(shpnt));
imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
/* must wait for attention reg not busy */
while (1) {
- if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY))
+ if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
break;
spin_unlock_irq(shpnt->host_lock);
yield();
spin_lock_irq(shpnt->host_lock);
}
/*write registers and enable system interrupts */
- outl(imm_command, IM_CMD_REG(host_index));
- outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(host_index));
+ outl(imm_command, IM_CMD_REG(shpnt));
+ outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
/* wait for interrupt finished or intr_stat register to be set, as the
* interrupt will not be executed, while we are in here! */
/* FIXME: This is really really icky we so want a sleeping version of this ! */
- while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(host_index)) & 0x8f) != 0x8f)) {
+ while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
udelay((1 + 999 / HZ) * 1000);
barrier();
}
/* if reset did not complete, just return an error */
if (!ticks) {
printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
- reset_status(host_index) = IM_RESET_FINISHED_FAIL;
+ reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
return FAILED;
}
- if ((inb(IM_INTR_REG(host_index)) & 0x8f) == 0x8f) {
+ if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
/* analysis done by this routine and not by the intr-routine */
- if (inb(IM_INTR_REG(host_index)) == 0xaf)
- reset_status(host_index) = IM_RESET_FINISHED_OK_NO_INT;
- else if (inb(IM_INTR_REG(host_index)) == 0xcf)
- reset_status(host_index) = IM_RESET_FINISHED_FAIL;
+ if (inb(IM_INTR_REG(shpnt)) == 0xaf)
+ reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
+ else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
+ reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
else /* failed, 4get it */
- reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
- outb(IM_EOI | 0xf, IM_ATTN_REG(host_index));
+ reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
+ outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
}
/* if reset failed, just return an error */
- if (reset_status(host_index) == IM_RESET_FINISHED_FAIL) {
+ if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
return FAILED;
}
@@ -2308,9 +2179,9 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
/* so reset finished ok - call outstanding done's, and return success */
printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
for (i = 0; i < MAX_LOG_DEV; i++) {
- cmd_aid = ld(host_index)[i].cmd;
+ cmd_aid = ld(shpnt)[i].cmd;
if (cmd_aid && cmd_aid->scsi_done) {
- ld(host_index)[i].cmd = NULL;
+ ld(shpnt)[i].cmd = NULL;
cmd_aid->result = DID_RESET << 16;
}
}
@@ -2351,46 +2222,46 @@ static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev,
}
/* calculate percentage of total accesses on a ldn */
-static int ldn_access_load(int host_index, int ldn)
+static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
{
- if (IBM_DS(host_index).total_accesses == 0)
+ if (IBM_DS(shpnt).total_accesses == 0)
return (0);
- if (IBM_DS(host_index).ldn_access[ldn] == 0)
+ if (IBM_DS(shpnt).ldn_access[ldn] == 0)
return (0);
- return (IBM_DS(host_index).ldn_access[ldn] * 100) / IBM_DS(host_index).total_accesses;
+ return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
}
/* calculate total amount of r/w-accesses */
-static int ldn_access_total_read_write(int host_index)
+static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(host_index).ldn_read_access[i] + IBM_DS(host_index).ldn_write_access[i];
+ a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
return (a);
}
-static int ldn_access_total_inquiry(int host_index)
+static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(host_index).ldn_inquiry_access[i];
+ a += IBM_DS(shpnt).ldn_inquiry_access[i];
return (a);
}
-static int ldn_access_total_modeselect(int host_index)
+static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
{
int a;
int i;
a = 0;
for (i = 0; i <= MAX_LOG_DEV; i++)
- a += IBM_DS(host_index).ldn_modeselect_access[i];
+ a += IBM_DS(shpnt).ldn_modeselect_access[i];
return (a);
}
@@ -2398,19 +2269,14 @@ static int ldn_access_total_modeselect(int host_index)
static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
{
int len = 0;
- int i, id, lun, host_index;
+ int i, id, lun;
unsigned long flags;
int max_pun;
- for (i = 0; hosts[i] && hosts[i] != shpnt; i++);
- spin_lock_irqsave(hosts[i]->host_lock, flags); /* Check it */
- host_index = i;
- if (!shpnt) {
- len += sprintf(buffer + len, "\nIBM MCA SCSI: Can't find adapter");
- return len;
- }
- max_pun = subsystem_maxid(host_index);
+ spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
+
+ max_pun = subsystem_maxid(shpnt);
len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
@@ -2421,40 +2287,40 @@ static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start,
len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
#endif
len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
- len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(host_index)));
+ len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
- len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(host_index).total_interrupts);
- len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(host_index).total_accesses);
- len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(host_index).scbs);
- len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(host_index).long_scbs);
- len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(host_index));
- len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(host_index));
- len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(host_index));
- len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(host_index).total_accesses - ldn_access_total_read_write(host_index)
- - ldn_access_total_modeselect(host_index)
- - ldn_access_total_inquiry(host_index));
- len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(host_index).total_errors);
+ len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
+ len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
+ len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
+ len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
+ len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
+ len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
+ len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
+ len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
+ - ldn_access_total_modeselect(shpnt)
+ - ldn_access_total_inquiry(shpnt));
+ len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
for (i = 0; i <= MAX_LOG_DEV; i++)
- len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(host_index, i), IBM_DS(host_index).ldn_read_access[i], IBM_DS(host_index).ldn_write_access[i], IBM_DS(host_index).ldn_assignments[i]);
+ len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
- len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(host_index).total_scsi_devices);
- len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(host_index).dyn_flag ? "Yes" : "No ");
- len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(host_index));
- len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(host_index).dynamical_assignments);
+ len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
+ len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
+ len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
+ len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
for (id = 0; id < max_pun; id++) {
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
- len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(host_index)[id][lun]));
+ len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
len += sprintf(buffer + len, " %2d ", id);
for (lun = 0; lun < 8; lun++)
- len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(host_index)[id][lun]));
+ len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
len += sprintf(buffer + len, "\n");
}
@@ -2488,20 +2354,31 @@ static int option_setup(char *str)
__setup("ibmmcascsi=", option_setup);
-static struct scsi_host_template driver_template = {
- .proc_name = "ibmmca",
- .proc_info = ibmmca_proc_info,
- .name = "IBM SCSI-Subsystem",
- .detect = ibmmca_detect,
- .release = ibmmca_release,
- .queuecommand = ibmmca_queuecommand,
- .eh_abort_handler = ibmmca_abort,
- .eh_host_reset_handler = ibmmca_host_reset,
- .bios_param = ibmmca_biosparam,
- .can_queue = 16,
- .this_id = 7,
- .sg_tablesize = 16,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
+static struct mca_driver ibmmca_driver = {
+ .id_table = ibmmca_id_table,
+ .driver = {
+ .name = "ibmmca",
+ .bus = &mca_bus_type,
+ .probe = ibmmca_probe,
+ .remove = __devexit_p(ibmmca_remove),
+ },
};
-#include "scsi_module.c"
+
+static int __init ibmmca_init(void)
+{
+#ifdef MODULE
+ /* If the driver is run as module, read from conf.modules or cmd-line */
+ if (boot_options)
+ option_setup(boot_options);
+#endif
+
+ return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
+}
+
+static void __exit ibmmca_exit(void)
+{
+ mca_unregister_driver(&ibmmca_driver);
+}
+
+module_init(ibmmca_init);
+module_exit(ibmmca_exit);
diff --git a/drivers/scsi/ibmmca.h b/drivers/scsi/ibmmca.h
deleted file mode 100644
index 017ee2fa6d63..000000000000
--- a/drivers/scsi/ibmmca.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Low Level Driver for the IBM Microchannel SCSI Subsystem
- * (Headerfile, see Documentation/scsi/ibmmca.txt for description of the
- * IBM MCA SCSI-driver.
- * For use under the GNU General Public License within the Linux-kernel project.
- * This include file works only correctly with kernel 2.4.0 or higher!!! */
-
-#ifndef _IBMMCA_H
-#define _IBMMCA_H
-
-/* Common forward declarations for all Linux-versions: */
-
-/* Interfaces to the midlevel Linux SCSI driver */
-static int ibmmca_detect (struct scsi_host_template *);
-static int ibmmca_release (struct Scsi_Host *);
-static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
-static int ibmmca_abort (Scsi_Cmnd *);
-static int ibmmca_host_reset (Scsi_Cmnd *);
-static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
-
-#endif /* _IBMMCA_H */
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index b10eefe735c5..5870866abc99 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -173,9 +173,8 @@ static void release_event_pool(struct event_pool *pool,
}
}
if (in_use)
- printk(KERN_WARNING
- "ibmvscsi: releasing event pool with %d "
- "events still in use?\n", in_use);
+ dev_warn(hostdata->dev, "releasing event pool with %d "
+ "events still in use?\n", in_use);
kfree(pool->events);
dma_free_coherent(hostdata->dev,
pool->size * sizeof(*pool->iu_storage),
@@ -210,15 +209,13 @@ static void free_event_struct(struct event_pool *pool,
struct srp_event_struct *evt)
{
if (!valid_event_struct(pool, evt)) {
- printk(KERN_ERR
- "ibmvscsi: Freeing invalid event_struct %p "
- "(not in pool %p)\n", evt, pool->events);
+ dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
+ "(not in pool %p)\n", evt, pool->events);
return;
}
if (atomic_inc_return(&evt->free) != 1) {
- printk(KERN_ERR
- "ibmvscsi: Freeing event_struct %p "
- "which is not in use!\n", evt);
+ dev_err(evt->hostdata->dev, "Freeing event_struct %p "
+ "which is not in use!\n", evt);
return;
}
}
@@ -353,20 +350,19 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
}
}
-static int map_sg_list(int num_entries,
- struct scatterlist *sg,
+static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
struct srp_direct_buf *md)
{
int i;
+ struct scatterlist *sg;
u64 total_length = 0;
- for (i = 0; i < num_entries; ++i) {
+ scsi_for_each_sg(cmd, sg, nseg, i) {
struct srp_direct_buf *descr = md + i;
- struct scatterlist *sg_entry = &sg[i];
- descr->va = sg_dma_address(sg_entry);
- descr->len = sg_dma_len(sg_entry);
+ descr->va = sg_dma_address(sg);
+ descr->len = sg_dma_len(sg);
descr->key = 0;
- total_length += sg_dma_len(sg_entry);
+ total_length += sg_dma_len(sg);
}
return total_length;
}
@@ -387,40 +383,37 @@ static int map_sg_data(struct scsi_cmnd *cmd,
int sg_mapped;
u64 total_length = 0;
- struct scatterlist *sg = cmd->request_buffer;
struct srp_direct_buf *data =
(struct srp_direct_buf *) srp_cmd->add_data;
struct srp_indirect_buf *indirect =
(struct srp_indirect_buf *) data;
- sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL);
-
- if (sg_mapped == 0)
+ sg_mapped = scsi_dma_map(cmd);
+ if (!sg_mapped)
+ return 1;
+ else if (sg_mapped < 0)
return 0;
+ else if (sg_mapped > SG_ALL) {
+ printk(KERN_ERR
+ "ibmvscsi: More than %d mapped sg entries, got %d\n",
+ SG_ALL, sg_mapped);
+ return 0;
+ }
set_srp_direction(cmd, srp_cmd, sg_mapped);
/* special case; we can use a single direct descriptor */
if (sg_mapped == 1) {
- data->va = sg_dma_address(&sg[0]);
- data->len = sg_dma_len(&sg[0]);
- data->key = 0;
+ map_sg_list(cmd, sg_mapped, data);
return 1;
}
- if (sg_mapped > SG_ALL) {
- printk(KERN_ERR
- "ibmvscsi: More than %d mapped sg entries, got %d\n",
- SG_ALL, sg_mapped);
- return 0;
- }
-
indirect->table_desc.va = 0;
indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
indirect->table_desc.key = 0;
if (sg_mapped <= MAX_INDIRECT_BUFS) {
- total_length = map_sg_list(sg_mapped, sg,
+ total_length = map_sg_list(cmd, sg_mapped,
&indirect->desc_list[0]);
indirect->len = total_length;
return 1;
@@ -429,61 +422,27 @@ static int map_sg_data(struct scsi_cmnd *cmd,
/* get indirect table */
if (!evt_struct->ext_list) {
evt_struct->ext_list = (struct srp_direct_buf *)
- dma_alloc_coherent(dev,
+ dma_alloc_coherent(dev,
SG_ALL * sizeof(struct srp_direct_buf),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
- printk(KERN_ERR
- "ibmvscsi: Can't allocate memory for indirect table\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't allocate memory for indirect table\n");
return 0;
-
}
}
- total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
+ total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
indirect->len = total_length;
indirect->table_desc.va = evt_struct->ext_list_token;
indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
memcpy(indirect->desc_list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
-
return 1;
}
/**
- * map_single_data: - Maps memory and initializes memory decriptor fields
- * @cmd: struct scsi_cmnd with the memory to be mapped
- * @srp_cmd: srp_cmd that contains the memory descriptor
- * @dev: device for which to map dma memory
- *
- * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
- * Returns 1 on success.
-*/
-static int map_single_data(struct scsi_cmnd *cmd,
- struct srp_cmd *srp_cmd, struct device *dev)
-{
- struct srp_direct_buf *data =
- (struct srp_direct_buf *) srp_cmd->add_data;
-
- data->va =
- dma_map_single(dev, cmd->request_buffer,
- cmd->request_bufflen,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(data->va)) {
- printk(KERN_ERR
- "ibmvscsi: Unable to map request_buffer for command!\n");
- return 0;
- }
- data->len = cmd->request_bufflen;
- data->key = 0;
-
- set_srp_direction(cmd, srp_cmd, 1);
-
- return 1;
-}
-
-/**
* map_data_for_srp_cmd: - Calls functions to map data for srp cmds
* @cmd: struct scsi_cmnd with the memory to be mapped
* @srp_cmd: srp_cmd that contains the memory descriptor
@@ -503,23 +462,83 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
case DMA_NONE:
return 1;
case DMA_BIDIRECTIONAL:
- printk(KERN_ERR
- "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "Can't map DMA_BIDIRECTIONAL to read/write\n");
return 0;
default:
- printk(KERN_ERR
- "ibmvscsi: Unknown data direction 0x%02x; can't map!\n",
- cmd->sc_data_direction);
+ sdev_printk(KERN_ERR, cmd->device,
+ "Unknown data direction 0x%02x; can't map!\n",
+ cmd->sc_data_direction);
return 0;
}
- if (!cmd->request_buffer)
- return 1;
- if (cmd->use_sg)
- return map_sg_data(cmd, evt_struct, srp_cmd, dev);
- return map_single_data(cmd, srp_cmd, dev);
+ return map_sg_data(cmd, evt_struct, srp_cmd, dev);
}
+/**
+ * purge_requests: Our virtual adapter just shut down. purge any sent requests
+ * @hostdata: the adapter
+ */
+static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+{
+ struct srp_event_struct *tmp_evt, *pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
+ list_del(&tmp_evt->list);
+ del_timer(&tmp_evt->timer);
+ if (tmp_evt->cmnd) {
+ tmp_evt->cmnd->result = (error_code << 16);
+ unmap_cmd_data(&tmp_evt->iu.srp.cmd,
+ tmp_evt,
+ tmp_evt->hostdata->dev);
+ if (tmp_evt->cmnd_done)
+ tmp_evt->cmnd_done(tmp_evt->cmnd);
+ } else if (tmp_evt->done)
+ tmp_evt->done(tmp_evt);
+ free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
+ }
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+}
+
+/**
+ * ibmvscsi_reset_host - Reset the connection to the server
+ * @hostdata: struct ibmvscsi_host_data to reset
+*/
+static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
+{
+ scsi_block_requests(hostdata->host);
+ atomic_set(&hostdata->request_limit, 0);
+
+ purge_requests(hostdata, DID_ERROR);
+ if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
+ (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
+ (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
+ atomic_set(&hostdata->request_limit, -1);
+ dev_err(hostdata->dev, "error after reset\n");
+ }
+
+ scsi_unblock_requests(hostdata->host);
+}
+
+/**
+ * ibmvscsi_timeout - Internal command timeout handler
+ * @evt_struct: struct srp_event_struct that timed out
+ *
+ * Called when an internally generated command times out
+*/
+static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
+{
+ struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+ dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
+ evt_struct->iu.srp.cmd.opcode);
+
+ ibmvscsi_reset_host(hostdata);
+}
+
+
/* ------------------------------------------------------------
* Routines for sending and receiving SRPs
*/
@@ -527,12 +546,14 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
* ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
* @evt_struct: evt_struct to be sent
* @hostdata: ibmvscsi_host_data of host
+ * @timeout: timeout in seconds - 0 means do not time command
*
* Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
* Note that this routine assumes that host_lock is held for synchronization
*/
static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
- struct ibmvscsi_host_data *hostdata)
+ struct ibmvscsi_host_data *hostdata,
+ unsigned long timeout)
{
u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
int request_status;
@@ -588,12 +609,20 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
*/
list_add_tail(&evt_struct->list, &hostdata->sent);
+ init_timer(&evt_struct->timer);
+ if (timeout) {
+ evt_struct->timer.data = (unsigned long) evt_struct;
+ evt_struct->timer.expires = jiffies + (timeout * HZ);
+ evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
+ add_timer(&evt_struct->timer);
+ }
+
if ((rc =
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list);
+ del_timer(&evt_struct->timer);
- printk(KERN_ERR "ibmvscsi: send error %d\n",
- rc);
+ dev_err(hostdata->dev, "send error %d\n", rc);
atomic_inc(&hostdata->request_limit);
goto send_error;
}
@@ -634,9 +663,8 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (unlikely(rsp->opcode != SRP_RSP)) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: bad SRP RSP type %d\n",
- rsp->opcode);
+ dev_warn(evt_struct->hostdata->dev,
+ "bad SRP RSP type %d\n", rsp->opcode);
}
if (cmnd) {
@@ -650,9 +678,9 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
evt_struct->hostdata->dev);
if (rsp->flags & SRP_RSP_FLAG_DOOVER)
- cmnd->resid = rsp->data_out_res_cnt;
+ scsi_set_resid(cmnd, rsp->data_out_res_cnt);
else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
- cmnd->resid = rsp->data_in_res_cnt;
+ scsi_set_resid(cmnd, rsp->data_in_res_cnt);
}
if (evt_struct->cmnd_done)
@@ -697,7 +725,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
- printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
+ sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -722,7 +750,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
offsetof(struct srp_indirect_buf, desc_list);
}
- return ibmvscsi_send_srp_event(evt_struct, hostdata);
+ return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
}
/* ------------------------------------------------------------
@@ -744,16 +772,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
DMA_BIDIRECTIONAL);
if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
- printk("ibmvscsi: error %d getting adapter info\n",
- evt_struct->xfer_iu->mad.adapter_info.common.status);
+ dev_err(hostdata->dev, "error %d getting adapter info\n",
+ evt_struct->xfer_iu->mad.adapter_info.common.status);
} else {
- printk("ibmvscsi: host srp version: %s, "
- "host partition %s (%d), OS %d, max io %u\n",
- hostdata->madapter_info.srp_version,
- hostdata->madapter_info.partition_name,
- hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type,
- hostdata->madapter_info.port_max_txu[0]);
+ dev_info(hostdata->dev, "host srp version: %s, "
+ "host partition %s (%d), OS %d, max io %u\n",
+ hostdata->madapter_info.srp_version,
+ hostdata->madapter_info.partition_name,
+ hostdata->madapter_info.partition_number,
+ hostdata->madapter_info.os_type,
+ hostdata->madapter_info.port_max_txu[0]);
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
@@ -761,11 +789,10 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
if (hostdata->madapter_info.os_type == 3 &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
- printk("ibmvscsi: host (Ver. %s) doesn't support large"
- "transfers\n",
- hostdata->madapter_info.srp_version);
- printk("ibmvscsi: limiting scatterlists to %d\n",
- MAX_INDIRECT_BUFS);
+ dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+ hostdata->madapter_info.srp_version);
+ dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+ MAX_INDIRECT_BUFS);
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
}
@@ -784,19 +811,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
{
struct viosrp_adapter_info *req;
struct srp_event_struct *evt_struct;
+ unsigned long flags;
dma_addr_t addr;
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
- printk(KERN_ERR "ibmvscsi: couldn't allocate an event "
- "for ADAPTER_INFO_REQ!\n");
+ dev_err(hostdata->dev,
+ "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
return;
}
init_event_struct(evt_struct,
adapter_info_rsp,
VIOSRP_MAD_FORMAT,
- init_timeout * HZ);
+ init_timeout);
req = &evt_struct->iu.mad.adapter_info;
memset(req, 0x00, sizeof(*req));
@@ -809,20 +837,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
DMA_BIDIRECTIONAL);
if (dma_mapping_error(req->buffer)) {
- printk(KERN_ERR
- "ibmvscsi: Unable to map request_buffer "
- "for adapter_info!\n");
+ dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
free_event_struct(&hostdata->pool, evt_struct);
return;
}
- if (ibmvscsi_send_srp_event(evt_struct, hostdata)) {
- printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n");
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
+ dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
dma_unmap_single(hostdata->dev,
addr,
sizeof(hostdata->madapter_info),
DMA_BIDIRECTIONAL);
}
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
};
/**
@@ -839,24 +867,23 @@ static void login_rsp(struct srp_event_struct *evt_struct)
case SRP_LOGIN_RSP: /* it worked! */
break;
case SRP_LOGIN_REJ: /* refused! */
- printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n",
- evt_struct->xfer_iu->srp.login_rej.reason);
+ dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
+ evt_struct->xfer_iu->srp.login_rej.reason);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
return;
default:
- printk(KERN_ERR
- "ibmvscsi: Invalid login response typecode 0x%02x!\n",
- evt_struct->xfer_iu->srp.login_rsp.opcode);
+ dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
+ evt_struct->xfer_iu->srp.login_rsp.opcode);
/* Login failed. */
atomic_set(&hostdata->request_limit, -1);
return;
}
- printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n");
+ dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
- printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n");
+ dev_err(hostdata->dev, "Invalid request_limit.\n");
/* Now we know what the real request-limit is.
* This value is set rather than added to request_limit because
@@ -885,15 +912,14 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
struct srp_login_req *login;
struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
- printk(KERN_ERR
- "ibmvscsi: couldn't allocate an event for login req!\n");
+ dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
return FAILED;
}
init_event_struct(evt_struct,
login_rsp,
VIOSRP_SRP_FORMAT,
- init_timeout * HZ);
+ init_timeout);
login = &evt_struct->iu.srp.login_req;
memset(login, 0x00, sizeof(struct srp_login_req));
@@ -907,9 +933,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
*/
atomic_set(&hostdata->request_limit, 1);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk("ibmvscsic: sent SRP login\n");
+ dev_info(hostdata->dev, "sent SRP login\n");
return rc;
};
@@ -958,20 +984,20 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (!found_evt) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- return FAILED;
+ return SUCCESS;
}
evt = get_event_struct(&hostdata->pool);
if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
+ sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
return FAILED;
}
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
- init_timeout * HZ);
+ init_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@@ -982,15 +1008,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
tsk_mgmt->task_tag = (u64) found_evt;
- printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
- tsk_mgmt->lun, tsk_mgmt->task_tag);
+ sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
+ tsk_mgmt->lun, tsk_mgmt->task_tag);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) {
- printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to send abort() event. rc=%d\n", rsp_rc);
return FAILED;
}
@@ -999,9 +1026,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: abort bad SRP RSP type %d\n",
- srp_rsp.srp.rsp.opcode);
+ sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
+ srp_rsp.srp.rsp.opcode);
return FAILED;
}
@@ -1012,10 +1038,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (rsp_rc) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: abort code %d for task tag 0x%lx\n",
- rsp_rc,
- tsk_mgmt->task_tag);
+ sdev_printk(KERN_WARNING, cmd->device,
+ "abort code %d for task tag 0x%lx\n",
+ rsp_rc, tsk_mgmt->task_tag);
return FAILED;
}
@@ -1034,15 +1059,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (found_evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk(KERN_INFO
- "ibmvscsi: aborted task tag 0x%lx completed\n",
- tsk_mgmt->task_tag);
+ sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
+ tsk_mgmt->task_tag);
return SUCCESS;
}
- printk(KERN_INFO
- "ibmvscsi: successfully aborted task tag 0x%lx\n",
- tsk_mgmt->task_tag);
+ sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
+ tsk_mgmt->task_tag);
cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list);
@@ -1076,14 +1099,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
evt = get_event_struct(&hostdata->pool);
if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
+ sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
return FAILED;
}
init_event_struct(evt,
sync_completion,
VIOSRP_SRP_FORMAT,
- init_timeout * HZ);
+ init_timeout);
tsk_mgmt = &evt->iu.srp.tsk_mgmt;
@@ -1093,15 +1116,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
tsk_mgmt->lun = ((u64) lun) << 48;
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
- printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
- tsk_mgmt->lun);
+ sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
+ tsk_mgmt->lun);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
- rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
+ rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) {
- printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
+ sdev_printk(KERN_ERR, cmd->device,
+ "failed to send reset event. rc=%d\n", rsp_rc);
return FAILED;
}
@@ -1110,9 +1134,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: reset bad SRP RSP type %d\n",
- srp_rsp.srp.rsp.opcode);
+ sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
+ srp_rsp.srp.rsp.opcode);
return FAILED;
}
@@ -1123,9 +1146,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
if (rsp_rc) {
if (printk_ratelimit())
- printk(KERN_WARNING
- "ibmvscsi: reset code %d for task tag 0x%lx\n",
- rsp_rc, tsk_mgmt->task_tag);
+ sdev_printk(KERN_WARNING, cmd->device,
+ "reset code %d for task tag 0x%lx\n",
+ rsp_rc, tsk_mgmt->task_tag);
return FAILED;
}
@@ -1154,32 +1177,30 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
}
/**
- * purge_requests: Our virtual adapter just shut down. purge any sent requests
- * @hostdata: the adapter
- */
-static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
+ * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
+ * @cmd: struct scsi_cmnd having problems
+*/
+static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- struct srp_event_struct *tmp_evt, *pos;
- unsigned long flags;
+ unsigned long wait_switch = 0;
+ struct ibmvscsi_host_data *hostdata =
+ (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
- spin_lock_irqsave(hostdata->host->host_lock, flags);
- list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
- list_del(&tmp_evt->list);
- if (tmp_evt->cmnd) {
- tmp_evt->cmnd->result = (error_code << 16);
- unmap_cmd_data(&tmp_evt->iu.srp.cmd,
- tmp_evt,
- tmp_evt->hostdata->dev);
- if (tmp_evt->cmnd_done)
- tmp_evt->cmnd_done(tmp_evt->cmnd);
- } else {
- if (tmp_evt->done) {
- tmp_evt->done(tmp_evt);
- }
- }
- free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
+ dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
+
+ ibmvscsi_reset_host(hostdata);
+
+ for (wait_switch = jiffies + (init_timeout * HZ);
+ time_before(jiffies, wait_switch) &&
+ atomic_read(&hostdata->request_limit) < 2;) {
+
+ msleep(10);
}
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+
+ if (atomic_read(&hostdata->request_limit) <= 0)
+ return FAILED;
+
+ return SUCCESS;
}
/**
@@ -1191,6 +1212,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct ibmvscsi_host_data *hostdata)
{
+ long rc;
unsigned long flags;
struct srp_event_struct *evt_struct =
(struct srp_event_struct *)crq->IU_data_ptr;
@@ -1198,27 +1220,25 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
case 0xC0: /* initialization */
switch (crq->format) {
case 0x01: /* Initialization message */
- printk(KERN_INFO "ibmvscsi: partner initialized\n");
+ dev_info(hostdata->dev, "partner initialized\n");
/* Send back a response */
- if (ibmvscsi_send_crq(hostdata,
- 0xC002000000000000LL, 0) == 0) {
+ if ((rc = ibmvscsi_send_crq(hostdata,
+ 0xC002000000000000LL, 0)) == 0) {
/* Now login */
send_srp_login(hostdata);
} else {
- printk(KERN_ERR
- "ibmvscsi: Unable to send init rsp\n");
+ dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
}
break;
case 0x02: /* Initialization response */
- printk(KERN_INFO
- "ibmvscsi: partner initialization complete\n");
+ dev_info(hostdata->dev, "partner initialization complete\n");
/* Now login */
send_srp_login(hostdata);
break;
default:
- printk(KERN_ERR "ibmvscsi: unknown crq message type\n");
+ dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
}
return;
case 0xFF: /* Hypervisor telling us the connection is closed */
@@ -1226,8 +1246,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
atomic_set(&hostdata->request_limit, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
- printk(KERN_INFO
- "ibmvscsi: Re-enabling adapter!\n");
+ dev_info(hostdata->dev, "Re-enabling adapter!\n");
purge_requests(hostdata, DID_REQUEUE);
if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
hostdata)) ||
@@ -1235,14 +1254,11 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
0xC001000000000000LL, 0))) {
atomic_set(&hostdata->request_limit,
-1);
- printk(KERN_ERR
- "ibmvscsi: error after"
- " enable\n");
+ dev_err(hostdata->dev, "error after enable\n");
}
} else {
- printk(KERN_INFO
- "ibmvscsi: Virtual adapter failed rc %d!\n",
- crq->format);
+ dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
+ crq->format);
purge_requests(hostdata, DID_ERROR);
if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
@@ -1251,8 +1267,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
0xC001000000000000LL, 0))) {
atomic_set(&hostdata->request_limit,
-1);
- printk(KERN_ERR
- "ibmvscsi: error after reset\n");
+ dev_err(hostdata->dev, "error after reset\n");
}
}
scsi_unblock_requests(hostdata->host);
@@ -1260,9 +1275,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
case 0x80: /* real payload */
break;
default:
- printk(KERN_ERR
- "ibmvscsi: got an invalid message type 0x%02x\n",
- crq->valid);
+ dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
+ crq->valid);
return;
}
@@ -1271,16 +1285,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
* actually sent
*/
if (!valid_event_struct(&hostdata->pool, evt_struct)) {
- printk(KERN_ERR
- "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
+ dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
(void *)crq->IU_data_ptr);
return;
}
if (atomic_read(&evt_struct->free)) {
- printk(KERN_ERR
- "ibmvscsi: received duplicate correlation_token 0x%p!\n",
- (void *)crq->IU_data_ptr);
+ dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
+ (void *)crq->IU_data_ptr);
return;
}
@@ -1288,11 +1300,12 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
&hostdata->request_limit);
+ del_timer(&evt_struct->timer);
+
if (evt_struct->done)
evt_struct->done(evt_struct);
else
- printk(KERN_ERR
- "ibmvscsi: returned done() is NULL; not running it!\n");
+ dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
/*
* Lock the host_lock before messing with these structures, since we
@@ -1313,20 +1326,20 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
{
struct viosrp_host_config *host_config;
struct srp_event_struct *evt_struct;
+ unsigned long flags;
dma_addr_t addr;
int rc;
evt_struct = get_event_struct(&hostdata->pool);
if (!evt_struct) {
- printk(KERN_ERR
- "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
+ dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
return -1;
}
init_event_struct(evt_struct,
sync_completion,
VIOSRP_MAD_FORMAT,
- init_timeout * HZ);
+ init_timeout);
host_config = &evt_struct->iu.mad.host_config;
@@ -1339,14 +1352,15 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(host_config->buffer)) {
- printk(KERN_ERR
- "ibmvscsi: dma_mapping error " "getting host config\n");
+ dev_err(hostdata->dev, "dma_mapping error getting host config\n");
free_event_struct(&hostdata->pool, evt_struct);
return -1;
}
init_completion(&evt_struct->comp);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata);
+ spin_lock_irqsave(hostdata->host->host_lock, flags);
+ rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rc == 0)
wait_for_completion(&evt_struct->comp);
dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
@@ -1375,6 +1389,23 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
return 0;
}
+/**
+ * ibmvscsi_change_queue_depth - Change the device's queue depth
+ * @sdev: scsi device struct
+ * @qdepth: depth to set
+ *
+ * Return value:
+ * actual depth set
+ **/
+static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
+ qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
+
+ scsi_adjust_queue_depth(sdev, 0, qdepth);
+ return sdev->queue_depth;
+}
+
/* ------------------------------------------------------------
* sysfs attributes
*/
@@ -1520,7 +1551,9 @@ static struct scsi_host_template driver_template = {
.queuecommand = ibmvscsi_queuecommand,
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
+ .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
.slave_configure = ibmvscsi_slave_configure,
+ .change_queue_depth = ibmvscsi_change_queue_depth,
.cmd_per_lun = 16,
.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
.this_id = -1,
@@ -1545,7 +1578,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
driver_template.can_queue = max_requests;
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
if (!host) {
- printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n");
+ dev_err(&vdev->dev, "couldn't allocate host data\n");
goto scsi_host_alloc_failed;
}
@@ -1559,11 +1592,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
if (rc != 0 && rc != H_RESOURCE) {
- printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
+ dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
goto init_crq_failed;
}
if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
- printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n");
+ dev_err(&vdev->dev, "couldn't initialize event pool\n");
goto init_pool_failed;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 77cc1d40f5bb..b19c2e26c2a5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -45,6 +45,7 @@ struct Scsi_Host;
#define MAX_INDIRECT_BUFS 10
#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
+#define IBMVSCSI_MAX_CMDS_PER_LUN 64
/* ------------------------------------------------------------
* Data Structures
@@ -69,6 +70,7 @@ struct srp_event_struct {
union viosrp_iu iu;
void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp;
+ struct timer_list timer;
union viosrp_iu *sync_srp;
struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token;
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index d8700aaa6114..9c14e789df5f 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -177,7 +177,7 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
memset(&hostdata->madapter_info, 0x00,
sizeof(hostdata->madapter_info));
- printk(KERN_INFO "rpa_vscsi: SPR_VERSION: %s\n", SRP_VERSION);
+ dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
strncpy(hostdata->madapter_info.partition_name, partition_name,
@@ -232,25 +232,24 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
if (rc == 2) {
/* Adapter is good, but other end is not ready */
- printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
retrc = 0;
} else if (rc != 0) {
- printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
+ dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
goto reg_crq_failed;
}
if (request_irq(vdev->irq,
ibmvscsi_handle_event,
0, "ibmvscsi", (void *)hostdata) != 0) {
- printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n",
- vdev->irq);
+ dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
+ vdev->irq);
goto req_irq_failed;
}
rc = vio_enable_interrupts(vdev);
if (rc != 0) {
- printk(KERN_ERR "ibmvscsi: Error %d enabling interrupts!!!\n",
- rc);
+ dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
goto req_irq_failed;
}
@@ -294,7 +293,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
if (rc)
- printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc);
+ dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
return rc;
}
@@ -327,10 +326,9 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
queue->msg_token, PAGE_SIZE);
if (rc == 2) {
/* Adapter is good, but other end is not ready */
- printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
+ dev_warn(hostdata->dev, "Partner adapter not ready\n");
} else if (rc != 0) {
- printk(KERN_WARNING
- "ibmvscsi: couldn't register crq--rc 0x%x\n", rc);
+ dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
}
return rc;
}
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 0464c182c577..005d2b05f32d 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1159,11 +1159,10 @@ static int __imm_attach(struct parport *pb)
init_waitqueue_head(&waiting);
- dev = kmalloc(sizeof(imm_struct), GFP_KERNEL);
+ dev = kzalloc(sizeof(imm_struct), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- memset(dev, 0, sizeof(imm_struct));
dev->base = -1;
dev->mode = IMM_AUTODETECT;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7e7635ca78f1..d9dfb69ae031 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -3,7 +3,8 @@
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
- * All rights reserved.
+ * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2007 Red Hat <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,38 +20,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
*
*************************************************************************
*
@@ -70,14 +39,14 @@
* - Fix memory allocation problem
* 03/04/98 hc - v1.01l
* - Fix tape rewind which will hang the system problem
- * - Set can_queue to tul_num_scb
+ * - Set can_queue to initio_num_scb
* 06/25/98 hc - v1.01m
* - Get it work for kernel version >= 2.1.75
- * - Dynamic assign SCSI bus reset holding time in init_tulip()
+ * - Dynamic assign SCSI bus reset holding time in initio_init()
* 07/02/98 hc - v1.01n
* - Support 0002134A
* 08/07/98 hc - v1.01o
- * - Change the tul_abort_srb routine to use scsi_done. <01>
+ * - Change the initio_abort_srb routine to use scsi_done. <01>
* 09/07/98 hl - v1.02
* - Change the INI9100U define and proc_dir_entry to
* reflect the newer Kernel 2.1.118, but the v1.o1o
@@ -150,23 +119,13 @@
static unsigned int i91u_debug = DEBUG_DEFAULT;
#endif
-#define TUL_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
-
-typedef struct PCI_ID_Struc {
- unsigned short vendor_id;
- unsigned short device_id;
-} PCI_ID;
-
-static int tul_num_ch = 4; /* Maximum 4 adapters */
-static int tul_num_scb;
-static int tul_tag_enable = 1;
-static SCB *tul_scb;
+static int initio_tag_enable = 1;
#ifdef DEBUG_i91u
static int setup_debug = 0;
#endif
-static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
+static void i91uSCBPost(u8 * pHcb, u8 * pScb);
/* PCI Devices supported by this driver */
static struct pci_device_id i91u_pci_devices[] = {
@@ -184,74 +143,66 @@ MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
#define DEBUG_STATE 0
#define INT_DISC 0
-/*--- external functions --*/
-static void tul_se2_wait(void);
-
-/*--- forward refrence ---*/
-static SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun);
-static SCB *tul_find_done_scb(HCS * pCurHcb);
-
-static int tulip_main(HCS * pCurHcb);
-
-static int tul_next_state(HCS * pCurHcb);
-static int tul_state_1(HCS * pCurHcb);
-static int tul_state_2(HCS * pCurHcb);
-static int tul_state_3(HCS * pCurHcb);
-static int tul_state_4(HCS * pCurHcb);
-static int tul_state_5(HCS * pCurHcb);
-static int tul_state_6(HCS * pCurHcb);
-static int tul_state_7(HCS * pCurHcb);
-static int tul_xfer_data_in(HCS * pCurHcb);
-static int tul_xfer_data_out(HCS * pCurHcb);
-static int tul_xpad_in(HCS * pCurHcb);
-static int tul_xpad_out(HCS * pCurHcb);
-static int tul_status_msg(HCS * pCurHcb);
-
-static int tul_msgin(HCS * pCurHcb);
-static int tul_msgin_sync(HCS * pCurHcb);
-static int tul_msgin_accept(HCS * pCurHcb);
-static int tul_msgout_reject(HCS * pCurHcb);
-static int tul_msgin_extend(HCS * pCurHcb);
-
-static int tul_msgout_ide(HCS * pCurHcb);
-static int tul_msgout_abort_targ(HCS * pCurHcb);
-static int tul_msgout_abort_tag(HCS * pCurHcb);
-
-static int tul_bus_device_reset(HCS * pCurHcb);
-static void tul_select_atn(HCS * pCurHcb, SCB * pCurScb);
-static void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb);
-static void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb);
-static int int_tul_busfree(HCS * pCurHcb);
-static int int_tul_scsi_rst(HCS * pCurHcb);
-static int int_tul_bad_seq(HCS * pCurHcb);
-static int int_tul_resel(HCS * pCurHcb);
-static int tul_sync_done(HCS * pCurHcb);
-static int wdtr_done(HCS * pCurHcb);
-static int wait_tulip(HCS * pCurHcb);
-static int tul_wait_done_disc(HCS * pCurHcb);
-static int tul_wait_disc(HCS * pCurHcb);
-static void tulip_scsi(HCS * pCurHcb);
-static int tul_post_scsi_rst(HCS * pCurHcb);
-
-static void tul_se2_ew_en(WORD CurBase);
-static void tul_se2_ew_ds(WORD CurBase);
-static int tul_se2_rd_all(WORD CurBase);
-static void tul_se2_update_all(WORD CurBase); /* setup default pattern */
-static void tul_read_eeprom(WORD CurBase);
-
- /* ---- INTERNAL VARIABLES ---- */
-static HCS tul_hcs[MAX_SUPPORTED_ADAPTERS];
-static INI_ADPT_STRUCT i91u_adpt[MAX_SUPPORTED_ADAPTERS];
-
-/*NVRAM nvram, *nvramp = &nvram; */
+/*--- forward references ---*/
+static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
+static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
+
+static int tulip_main(struct initio_host * host);
+
+static int initio_next_state(struct initio_host * host);
+static int initio_state_1(struct initio_host * host);
+static int initio_state_2(struct initio_host * host);
+static int initio_state_3(struct initio_host * host);
+static int initio_state_4(struct initio_host * host);
+static int initio_state_5(struct initio_host * host);
+static int initio_state_6(struct initio_host * host);
+static int initio_state_7(struct initio_host * host);
+static int initio_xfer_data_in(struct initio_host * host);
+static int initio_xfer_data_out(struct initio_host * host);
+static int initio_xpad_in(struct initio_host * host);
+static int initio_xpad_out(struct initio_host * host);
+static int initio_status_msg(struct initio_host * host);
+
+static int initio_msgin(struct initio_host * host);
+static int initio_msgin_sync(struct initio_host * host);
+static int initio_msgin_accept(struct initio_host * host);
+static int initio_msgout_reject(struct initio_host * host);
+static int initio_msgin_extend(struct initio_host * host);
+
+static int initio_msgout_ide(struct initio_host * host);
+static int initio_msgout_abort_targ(struct initio_host * host);
+static int initio_msgout_abort_tag(struct initio_host * host);
+
+static int initio_bus_device_reset(struct initio_host * host);
+static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
+static int int_initio_busfree(struct initio_host * host);
+static int int_initio_scsi_rst(struct initio_host * host);
+static int int_initio_bad_seq(struct initio_host * host);
+static int int_initio_resel(struct initio_host * host);
+static int initio_sync_done(struct initio_host * host);
+static int wdtr_done(struct initio_host * host);
+static int wait_tulip(struct initio_host * host);
+static int initio_wait_done_disc(struct initio_host * host);
+static int initio_wait_disc(struct initio_host * host);
+static void tulip_scsi(struct initio_host * host);
+static int initio_post_scsi_rst(struct initio_host * host);
+
+static void initio_se2_ew_en(unsigned long base);
+static void initio_se2_ew_ds(unsigned long base);
+static int initio_se2_rd_all(unsigned long base);
+static void initio_se2_update_all(unsigned long base); /* setup default pattern */
+static void initio_read_eeprom(unsigned long base);
+
+/* ---- INTERNAL VARIABLES ---- */
+
static NVRAM i91unvram;
static NVRAM *i91unvramp;
-
-
-static UCHAR i91udftNvRam[64] =
+static u8 i91udftNvRam[64] =
{
-/*----------- header -----------*/
+ /*----------- header -----------*/
0x25, 0xc9, /* Signature */
0x40, /* Size */
0x01, /* Revision */
@@ -289,7 +240,7 @@ static UCHAR i91udftNvRam[64] =
0, 0}; /* - CheckSum - */
-static UCHAR tul_rate_tbl[8] = /* fast 20 */
+static u8 initio_rate_tbl[8] = /* fast 20 */
{
/* nanosecond devide by 4 */
12, /* 50ns, 20M */
@@ -302,53 +253,17 @@ static UCHAR tul_rate_tbl[8] = /* fast 20 */
62 /* 250ns, 4M */
};
-static void tul_do_pause(unsigned amount)
-{ /* Pause for amount jiffies */
+static void initio_do_pause(unsigned amount)
+{
+ /* Pause for amount jiffies */
unsigned long the_time = jiffies + amount;
- while (time_before_eq(jiffies, the_time));
+ while (time_before_eq(jiffies, the_time))
+ cpu_relax();
}
/*-- forward reference --*/
-/*******************************************************************
- Use memeory refresh time ~ 15us * 2
-********************************************************************/
-void tul_se2_wait(void)
-{
-#if 1
- udelay(30);
-#else
- UCHAR readByte;
-
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) == 0x10) {
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) == 0x10)
- break;
- }
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) != 0x10)
- break;
- }
- } else {
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) == 0x10)
- break;
- }
- for (;;) {
- readByte = TUL_RD(0, 0x61);
- if ((readByte & 0x10) != 0x10)
- break;
- }
- }
-#endif
-}
-
-
/******************************************************************
Input: instruction for Serial E2PROM
@@ -379,1174 +294,1019 @@ void tul_se2_wait(void)
******************************************************************/
-static void tul_se2_instr(WORD CurBase, UCHAR instr)
+
+/**
+ * initio_se2_instr - bitbang an instruction
+ * @base: Base of InitIO controller
+ * @instr: Instruction for serial E2PROM
+ *
+ * Bitbang an instruction out to the serial E2Prom
+ */
+
+static void initio_se2_instr(unsigned long base, u8 instr)
{
int i;
- UCHAR b;
+ u8 b;
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* cs+start bit */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK | SE2DO); /* +CLK */
- tul_se2_wait();
+ outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
+ udelay(30);
+ outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
for (i = 0; i < 8; i++) {
if (instr & 0x80)
- b = SE2CS | SE2DO; /* -CLK+dataBit */
+ b = SE2CS | SE2DO; /* -CLK+dataBit */
else
- b = SE2CS; /* -CLK */
- TUL_WR(CurBase + TUL_NVRAM, b);
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, b | SE2CLK); /* +CLK */
- tul_se2_wait();
+ b = SE2CS; /* -CLK */
+ outb(b, base + TUL_NVRAM);
+ udelay(30);
+ outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
instr <<= 1;
}
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
- tul_se2_wait();
- return;
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
}
-/******************************************************************
- Function name : tul_se2_ew_en
- Description : Enable erase/write state of serial EEPROM
-******************************************************************/
-void tul_se2_ew_en(WORD CurBase)
+/**
+ * initio_se2_ew_en - Enable erase/write
+ * @base: Base address of InitIO controller
+ *
+ * Enable erase/write state of serial EEPROM
+ */
+void initio_se2_ew_en(unsigned long base)
{
- tul_se2_instr(CurBase, 0x30); /* EWEN */
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- tul_se2_wait();
- return;
+ initio_se2_instr(base, 0x30); /* EWEN */
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
}
-/************************************************************************
- Disable erase/write state of serial EEPROM
-*************************************************************************/
-void tul_se2_ew_ds(WORD CurBase)
+/**
+ * initio_se2_ew_ds - Disable erase/write
+ * @base: Base address of InitIO controller
+ *
+ * Disable erase/write state of serial EEPROM
+ */
+void initio_se2_ew_ds(unsigned long base)
{
- tul_se2_instr(CurBase, 0); /* EWDS */
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- tul_se2_wait();
- return;
+ initio_se2_instr(base, 0); /* EWDS */
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
}
-/******************************************************************
- Input :address of Serial E2PROM
- Output :value stored in Serial E2PROM
-*******************************************************************/
-static USHORT tul_se2_rd(WORD CurBase, ULONG adr)
+/**
+ * initio_se2_rd - read E2PROM word
+ * @base: Base of InitIO controller
+ * @addr: Address of word in E2PROM
+ *
+ * Read a word from the NV E2PROM device
+ */
+static u16 initio_se2_rd(unsigned long base, u8 addr)
{
- UCHAR instr, readByte;
- USHORT readWord;
+ u8 instr, rb;
+ u16 val = 0;
int i;
- instr = (UCHAR) (adr | 0x80);
- tul_se2_instr(CurBase, instr); /* READ INSTR */
- readWord = 0;
+ instr = (u8) (addr | 0x80);
+ initio_se2_instr(base, instr); /* READ INSTR */
for (i = 15; i >= 0; i--) {
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
/* sample data after the following edge of clock */
- readByte = TUL_RD(CurBase, TUL_NVRAM);
- readByte &= SE2DI;
- readWord += (readByte << i);
- tul_se2_wait(); /* 6/20/95 */
+ rb = inb(base + TUL_NVRAM);
+ rb &= SE2DI;
+ val += (rb << i);
+ udelay(30); /* 6/20/95 */
}
- TUL_WR(CurBase + TUL_NVRAM, 0); /* no chip select */
- tul_se2_wait();
- return readWord;
+ outb(0, base + TUL_NVRAM); /* no chip select */
+ udelay(30);
+ return val;
}
-
-/******************************************************************
- Input: new value in Serial E2PROM, address of Serial E2PROM
-*******************************************************************/
-static void tul_se2_wr(WORD CurBase, UCHAR adr, USHORT writeWord)
+/**
+ * initio_se2_wr - read E2PROM word
+ * @base: Base of InitIO controller
+ * @addr: Address of word in E2PROM
+ * @val: Value to write
+ *
+ * Write a word to the NV E2PROM device. Used when recovering from
+ * a problem with the NV.
+ */
+static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
- UCHAR readByte;
- UCHAR instr;
+ u8 rb;
+ u8 instr;
int i;
- instr = (UCHAR) (adr | 0x40);
- tul_se2_instr(CurBase, instr); /* WRITE INSTR */
+ instr = (u8) (addr | 0x40);
+ initio_se2_instr(base, instr); /* WRITE INSTR */
for (i = 15; i >= 0; i--) {
- if (writeWord & 0x8000)
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* -CLK+dataBit 1 */
+ if (val & 0x8000)
+ outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
else
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK+dataBit 0 */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */
- tul_se2_wait();
- writeWord <<= 1;
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
+ udelay(30);
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ val <<= 1;
}
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- tul_se2_wait();
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+ outb(0, base + TUL_NVRAM); /* -CS */
+ udelay(30);
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* +CS */
- tul_se2_wait();
+ outb(SE2CS, base + TUL_NVRAM); /* +CS */
+ udelay(30);
for (;;) {
- TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */
- tul_se2_wait();
- TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */
- tul_se2_wait();
- if ((readByte = TUL_RD(CurBase, TUL_NVRAM)) & SE2DI)
+ outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
+ udelay(30);
+ outb(SE2CS, base + TUL_NVRAM); /* -CLK */
+ udelay(30);
+ if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
break; /* write complete */
}
- TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */
- return;
+ outb(0, base + TUL_NVRAM); /* -CS */
}
+/**
+ * initio_se2_rd_all - read hostadapter NV configuration
+ * @base: Base address of InitIO controller
+ *
+ * Reads the E2PROM data into main memory. Ensures that the checksum
+ * and header marker are valid. Returns 1 on success -1 on error.
+ */
-/***********************************************************************
- Read SCSI H/A configuration parameters from serial EEPROM
-************************************************************************/
-int tul_se2_rd_all(WORD CurBase)
+static int initio_se2_rd_all(unsigned long base)
{
int i;
- ULONG chksum = 0;
- USHORT *np;
+ u16 chksum = 0;
+ u16 *np;
i91unvramp = &i91unvram;
- np = (USHORT *) i91unvramp;
- for (i = 0; i < 32; i++) {
- *np++ = tul_se2_rd(CurBase, i);
- }
+ np = (u16 *) i91unvramp;
+ for (i = 0; i < 32; i++)
+ *np++ = initio_se2_rd(base, i);
-/*--------------------Is signature "ini" ok ? ----------------*/
+ /* Is signature "ini" ok ? */
if (i91unvramp->NVM_Signature != INI_SIGNATURE)
return -1;
-/*---------------------- Is ckecksum ok ? ----------------------*/
- np = (USHORT *) i91unvramp;
+ /* Is ckecksum ok ? */
+ np = (u16 *) i91unvramp;
for (i = 0; i < 31; i++)
chksum += *np++;
- if (i91unvramp->NVM_CheckSum != (USHORT) chksum)
+ if (i91unvramp->NVM_CheckSum != chksum)
return -1;
return 1;
}
-
-/***********************************************************************
- Update SCSI H/A configuration parameters from serial EEPROM
-************************************************************************/
-void tul_se2_update_all(WORD CurBase)
+/**
+ * initio_se2_update_all - Update E2PROM
+ * @base: Base of InitIO controller
+ *
+ * Update the E2PROM by wrting any changes into the E2PROM
+ * chip, rewriting the checksum.
+ */
+static void initio_se2_update_all(unsigned long base)
{ /* setup default pattern */
int i;
- ULONG chksum = 0;
- USHORT *np, *np1;
+ u16 chksum = 0;
+ u16 *np, *np1;
i91unvramp = &i91unvram;
/* Calculate checksum first */
- np = (USHORT *) i91udftNvRam;
+ np = (u16 *) i91udftNvRam;
for (i = 0; i < 31; i++)
chksum += *np++;
- *np = (USHORT) chksum;
- tul_se2_ew_en(CurBase); /* Enable write */
+ *np = chksum;
+ initio_se2_ew_en(base); /* Enable write */
- np = (USHORT *) i91udftNvRam;
- np1 = (USHORT *) i91unvramp;
+ np = (u16 *) i91udftNvRam;
+ np1 = (u16 *) i91unvramp;
for (i = 0; i < 32; i++, np++, np1++) {
- if (*np != *np1) {
- tul_se2_wr(CurBase, i, *np);
- }
+ if (*np != *np1)
+ initio_se2_wr(base, i, *np);
}
-
- tul_se2_ew_ds(CurBase); /* Disable write */
- return;
+ initio_se2_ew_ds(base); /* Disable write */
}
-/*************************************************************************
- Function name : read_eeprom
-**************************************************************************/
-void tul_read_eeprom(WORD CurBase)
-{
- UCHAR gctrl;
-
- i91unvramp = &i91unvram;
-/*------Enable EEProm programming ---*/
- gctrl = TUL_RD(CurBase, TUL_GCTRL);
- TUL_WR(CurBase + TUL_GCTRL, gctrl | TUL_GCTRL_EEPROM_BIT);
- if (tul_se2_rd_all(CurBase) != 1) {
- tul_se2_update_all(CurBase); /* setup default pattern */
- tul_se2_rd_all(CurBase); /* load again */
- }
-/*------ Disable EEProm programming ---*/
- gctrl = TUL_RD(CurBase, TUL_GCTRL);
- TUL_WR(CurBase + TUL_GCTRL, gctrl & ~TUL_GCTRL_EEPROM_BIT);
-} /* read_eeprom */
+/**
+ * initio_read_eeprom - Retrieve configuration
+ * @base: Base of InitIO Host Adapter
+ *
+ * Retrieve the host adapter configuration data from E2Prom. If the
+ * data is invalid then the defaults are used and are also restored
+ * into the E2PROM. This forms the access point for the SCSI driver
+ * into the E2PROM layer, the other functions for the E2PROM are all
+ * internal use.
+ *
+ * Must be called single threaded, uses a shared global area.
+ */
-static int Addi91u_into_Adapter_table(WORD wBIOS, WORD wBASE, BYTE bInterrupt,
- BYTE bBus, BYTE bDevice)
+static void initio_read_eeprom(unsigned long base)
{
- int i, j;
+ u8 gctrl;
- for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) {
- if (i91u_adpt[i].ADPT_BIOS < wBIOS)
- continue;
- if (i91u_adpt[i].ADPT_BIOS == wBIOS) {
- if (i91u_adpt[i].ADPT_BASE == wBASE) {
- if (i91u_adpt[i].ADPT_Bus != 0xFF)
- return 1;
- } else if (i91u_adpt[i].ADPT_BASE < wBASE)
- continue;
- }
- for (j = MAX_SUPPORTED_ADAPTERS - 1; j > i; j--) {
- i91u_adpt[j].ADPT_BASE = i91u_adpt[j - 1].ADPT_BASE;
- i91u_adpt[j].ADPT_INTR = i91u_adpt[j - 1].ADPT_INTR;
- i91u_adpt[j].ADPT_BIOS = i91u_adpt[j - 1].ADPT_BIOS;
- i91u_adpt[j].ADPT_Bus = i91u_adpt[j - 1].ADPT_Bus;
- i91u_adpt[j].ADPT_Device = i91u_adpt[j - 1].ADPT_Device;
- }
- i91u_adpt[i].ADPT_BASE = wBASE;
- i91u_adpt[i].ADPT_INTR = bInterrupt;
- i91u_adpt[i].ADPT_BIOS = wBIOS;
- i91u_adpt[i].ADPT_Bus = bBus;
- i91u_adpt[i].ADPT_Device = bDevice;
- return 0;
+ i91unvramp = &i91unvram;
+ /* Enable EEProm programming */
+ gctrl = inb(base + TUL_GCTRL);
+ outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
+ if (initio_se2_rd_all(base) != 1) {
+ initio_se2_update_all(base); /* setup default pattern */
+ initio_se2_rd_all(base); /* load again */
}
- return 1;
+ /* Disable EEProm programming */
+ gctrl = inb(base + TUL_GCTRL);
+ outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
}
-static void init_i91uAdapter_table(void)
-{
- int i;
-
- for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) { /* Initialize adapter structure */
- i91u_adpt[i].ADPT_BIOS = 0xffff;
- i91u_adpt[i].ADPT_BASE = 0xffff;
- i91u_adpt[i].ADPT_INTR = 0xff;
- i91u_adpt[i].ADPT_Bus = 0xff;
- i91u_adpt[i].ADPT_Device = 0xff;
- }
- return;
-}
+/**
+ * initio_stop_bm - stop bus master
+ * @host: InitIO we are stopping
+ *
+ * Stop any pending DMA operation, aborting the DMA if neccessary
+ */
-static void tul_stop_bm(HCS * pCurHcb)
+static void initio_stop_bm(struct initio_host * host)
{
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO);
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
+ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
- while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0);
+ while ((inb(host->addr + TUL_Int) & XABT) == 0)
+ cpu_relax();
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
-/***************************************************************************/
-static void get_tulipPCIConfig(HCS * pCurHcb, int ch_idx)
-{
- pCurHcb->HCS_Base = i91u_adpt[ch_idx].ADPT_BASE; /* Supply base address */
- pCurHcb->HCS_BIOS = i91u_adpt[ch_idx].ADPT_BIOS; /* Supply BIOS address */
- pCurHcb->HCS_Intr = i91u_adpt[ch_idx].ADPT_INTR; /* Supply interrupt line */
- return;
-}
+/**
+ * initio_reset_scsi - Reset SCSI host controller
+ * @host: InitIO host to reset
+ * @seconds: Recovery time
+ *
+ * Perform a full reset of the SCSI subsystem.
+ */
-/***************************************************************************/
-static int tul_reset_scsi(HCS * pCurHcb, int seconds)
+static int initio_reset_scsi(struct initio_host * host, int seconds)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_BUS);
+ outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
- while (!((pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt)) & TSS_SCSIRST_INT));
- /* reset tulip chip */
+ while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
+ cpu_relax();
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, 0);
+ /* reset tulip chip */
+ outb(0, host->addr + TUL_SSignal);
/* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
/* SONY 5200 tape drive won't work if only stall for 1 sec */
- tul_do_pause(seconds * HZ);
-
- TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
+ /* FIXME: this is a very long busy wait right now */
+ initio_do_pause(seconds * HZ);
- return (SCSI_RESET_SUCCESS);
+ inb(host->addr + TUL_SInt);
+ return SCSI_RESET_SUCCESS;
}
-/***************************************************************************/
-static int init_tulip(HCS * pCurHcb, SCB * scbp, int tul_num_scb,
- BYTE * pbBiosAdr, int seconds)
+/**
+ * initio_init - set up an InitIO host adapter
+ * @host: InitIO host adapter
+ * @num_scbs: Number of SCBS
+ * @bios_addr: BIOS address
+ *
+ * Set up the host adapter and devices according to the configuration
+ * retrieved from the E2PROM.
+ *
+ * Locking: Calls E2PROM layer code which is not re-enterable so must
+ * run single threaded for now.
+ */
+
+static void initio_init(struct initio_host * host, u8 *bios_addr)
{
int i;
- BYTE *pwFlags;
- BYTE *pbHeads;
- SCB *pTmpScb, *pPrevScb = NULL;
-
- pCurHcb->HCS_NumScbs = tul_num_scb;
- pCurHcb->HCS_Semaph = 1;
- spin_lock_init(&pCurHcb->HCS_SemaphLock);
- pCurHcb->HCS_JSStatus0 = 0;
- pCurHcb->HCS_Scb = scbp;
- pCurHcb->HCS_NxtPend = scbp;
- pCurHcb->HCS_NxtAvail = scbp;
- for (i = 0, pTmpScb = scbp; i < tul_num_scb; i++, pTmpScb++) {
- pTmpScb->SCB_TagId = i;
- if (i != 0)
- pPrevScb->SCB_NxtScb = pTmpScb;
- pPrevScb = pTmpScb;
- }
- pPrevScb->SCB_NxtScb = NULL;
- pCurHcb->HCS_ScbEnd = pTmpScb;
- pCurHcb->HCS_FirstAvail = scbp;
- pCurHcb->HCS_LastAvail = pPrevScb;
- spin_lock_init(&pCurHcb->HCS_AvailLock);
- pCurHcb->HCS_FirstPend = NULL;
- pCurHcb->HCS_LastPend = NULL;
- pCurHcb->HCS_FirstBusy = NULL;
- pCurHcb->HCS_LastBusy = NULL;
- pCurHcb->HCS_FirstDone = NULL;
- pCurHcb->HCS_LastDone = NULL;
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
-
- tul_read_eeprom(pCurHcb->HCS_Base);
-/*---------- get H/A configuration -------------*/
+ u8 *flags;
+ u8 *heads;
+
+ /* Get E2Prom configuration */
+ initio_read_eeprom(host->addr);
if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
- pCurHcb->HCS_MaxTar = 8;
+ host->max_tar = 8;
else
- pCurHcb->HCS_MaxTar = 16;
+ host->max_tar = 16;
- pCurHcb->HCS_Config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
+ host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
- pCurHcb->HCS_SCSI_ID = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
- pCurHcb->HCS_IdMask = ~(1 << pCurHcb->HCS_SCSI_ID);
+ host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
+ host->idmask = ~(1 << host->scsi_id);
#ifdef CHK_PARITY
/* Enable parity error response */
- TUL_WR(pCurHcb->HCS_Base + TUL_PCMD, TUL_RD(pCurHcb->HCS_Base, TUL_PCMD) | 0x40);
+ outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
#endif
/* Mask all the interrupt */
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
+ outb(0x1F, host->addr + TUL_Mask);
- tul_stop_bm(pCurHcb);
+ initio_stop_bm(host);
/* --- Initialize the tulip --- */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_CHIP);
+ outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
/* program HBA's SCSI ID */
- TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId, pCurHcb->HCS_SCSI_ID << 4);
+ outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
/* Enable Initiator Mode ,phase latch,alternate sync period mode,
disable SCSI reset */
- if (pCurHcb->HCS_Config & HCC_EN_PAR)
- pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
+ if (host->config & HCC_EN_PAR)
+ host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
else
- pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_SConf1);
+ host->sconf1 = (TSC_INITDEFAULT);
+ outb(host->sconf1, host->addr + TUL_SConfig);
- /* Enable HW reselect */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT);
+ /* Enable HW reselect */
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, 0);
+ outb(0, host->addr + TUL_SPeriod);
/* selection time out = 250 ms */
- TUL_WR(pCurHcb->HCS_Base + TUL_STimeOut, 153);
+ outb(153, host->addr + TUL_STimeOut);
-/*--------- Enable SCSI terminator -----*/
- TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, (pCurHcb->HCS_Config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)));
- TUL_WR(pCurHcb->HCS_Base + TUL_GCTRL1,
- ((pCurHcb->HCS_Config & HCC_AUTO_TERM) >> 4) | (TUL_RD(pCurHcb->HCS_Base, TUL_GCTRL1) & 0xFE));
+ /* Enable SCSI terminator */
+ outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
+ host->addr + TUL_XCtrl);
+ outb(((host->config & HCC_AUTO_TERM) >> 4) |
+ (inb(host->addr + TUL_GCTRL1) & 0xFE),
+ host->addr + TUL_GCTRL1);
for (i = 0,
- pwFlags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
- pbHeads = pbBiosAdr + 0x180;
- i < pCurHcb->HCS_MaxTar;
- i++, pwFlags++) {
- pCurHcb->HCS_Tcs[i].TCS_Flags = *pwFlags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
- if (pCurHcb->HCS_Tcs[i].TCS_Flags & TCF_EN_255)
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63;
+ flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
+ heads = bios_addr + 0x180;
+ i < host->max_tar;
+ i++, flags++) {
+ host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ if (host->targets[i].flags & TCF_EN_255)
+ host->targets[i].drv_flags = TCF_DRV_255_63;
else
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0;
- pCurHcb->HCS_Tcs[i].TCS_JS_Period = 0;
- pCurHcb->HCS_Tcs[i].TCS_SConfig0 = pCurHcb->HCS_SConf1;
- pCurHcb->HCS_Tcs[i].TCS_DrvHead = *pbHeads++;
- if (pCurHcb->HCS_Tcs[i].TCS_DrvHead == 255)
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63;
+ host->targets[i].drv_flags = 0;
+ host->targets[i].js_period = 0;
+ host->targets[i].sconfig0 = host->sconf1;
+ host->targets[i].heads = *heads++;
+ if (host->targets[i].heads == 255)
+ host->targets[i].drv_flags = TCF_DRV_255_63;
else
- pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0;
- pCurHcb->HCS_Tcs[i].TCS_DrvSector = *pbHeads++;
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY;
- pCurHcb->HCS_ActTags[i] = 0;
- pCurHcb->HCS_MaxTags[i] = 0xFF;
+ host->targets[i].drv_flags = 0;
+ host->targets[i].sectors = *heads++;
+ host->targets[i].flags &= ~TCF_BUSY;
+ host->act_tags[i] = 0;
+ host->max_tags[i] = 0xFF;
} /* for */
printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
- pCurHcb->HCS_Base, pCurHcb->HCS_Intr,
- pCurHcb->HCS_BIOS, pCurHcb->HCS_SCSI_ID);
-/*------------------- reset SCSI Bus ---------------------------*/
- if (pCurHcb->HCS_Config & HCC_SCSI_RESET) {
- printk("i91u: Reset SCSI Bus ... \n");
- tul_reset_scsi(pCurHcb, seconds);
+ host->addr, host->irq,
+ host->bios_addr, host->scsi_id);
+ /* Reset SCSI Bus */
+ if (host->config & HCC_SCSI_RESET) {
+ printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
+ initio_reset_scsi(host, 10);
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SCFG1, 0x17);
- TUL_WR(pCurHcb->HCS_Base + TUL_SIntEnable, 0xE9);
- return (0);
+ outb(0x17, host->addr + TUL_SCFG1);
+ outb(0xE9, host->addr + TUL_SIntEnable);
}
-/***************************************************************************/
-static SCB *tul_alloc_scb(HCS * hcsp)
+/**
+ * initio_alloc_scb - Allocate an SCB
+ * @host: InitIO host we are allocating for
+ *
+ * Walk the SCB list for the controller and allocate a free SCB if
+ * one exists.
+ */
+static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
{
- SCB *pTmpScb;
- ULONG flags;
- spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags);
- if ((pTmpScb = hcsp->HCS_FirstAvail) != NULL) {
+ struct scsi_ctrl_blk *scb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->avail_lock, flags);
+ if ((scb = host->first_avail) != NULL) {
#if DEBUG_QUEUE
- printk("find scb at %08lx\n", (ULONG) pTmpScb);
+ printk("find scb at %p\n", scb);
#endif
- if ((hcsp->HCS_FirstAvail = pTmpScb->SCB_NxtScb) == NULL)
- hcsp->HCS_LastAvail = NULL;
- pTmpScb->SCB_NxtScb = NULL;
- pTmpScb->SCB_Status = SCB_RENT;
+ if ((host->first_avail = scb->next) == NULL)
+ host->last_avail = NULL;
+ scb->next = NULL;
+ scb->status = SCB_RENT;
}
- spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags);
- return (pTmpScb);
+ spin_unlock_irqrestore(&host->avail_lock, flags);
+ return scb;
}
-/***************************************************************************/
-static void tul_release_scb(HCS * hcsp, SCB * scbp)
+/**
+ * initio_release_scb - Release an SCB
+ * @host: InitIO host that owns the SCB
+ * @cmnd: SCB command block being returned
+ *
+ * Return an allocated SCB to the host free list
+ */
+
+static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
{
- ULONG flags;
+ unsigned long flags;
#if DEBUG_QUEUE
- printk("Release SCB %lx; ", (ULONG) scbp);
+ printk("Release SCB %p; ", cmnd);
#endif
- spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags);
- scbp->SCB_Srb = NULL;
- scbp->SCB_Status = 0;
- scbp->SCB_NxtScb = NULL;
- if (hcsp->HCS_LastAvail != NULL) {
- hcsp->HCS_LastAvail->SCB_NxtScb = scbp;
- hcsp->HCS_LastAvail = scbp;
+ spin_lock_irqsave(&(host->avail_lock), flags);
+ cmnd->srb = NULL;
+ cmnd->status = 0;
+ cmnd->next = NULL;
+ if (host->last_avail != NULL) {
+ host->last_avail->next = cmnd;
+ host->last_avail = cmnd;
} else {
- hcsp->HCS_FirstAvail = scbp;
- hcsp->HCS_LastAvail = scbp;
+ host->first_avail = cmnd;
+ host->last_avail = cmnd;
}
- spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags);
+ spin_unlock_irqrestore(&(host->avail_lock), flags);
}
/***************************************************************************/
-static void tul_append_pend_scb(HCS * pCurHcb, SCB * scbp)
+static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
- printk("Append pend SCB %lx; ", (ULONG) scbp);
+ printk("Append pend SCB %p; ", scbp);
#endif
- scbp->SCB_Status = SCB_PEND;
- scbp->SCB_NxtScb = NULL;
- if (pCurHcb->HCS_LastPend != NULL) {
- pCurHcb->HCS_LastPend->SCB_NxtScb = scbp;
- pCurHcb->HCS_LastPend = scbp;
+ scbp->status = SCB_PEND;
+ scbp->next = NULL;
+ if (host->last_pending != NULL) {
+ host->last_pending->next = scbp;
+ host->last_pending = scbp;
} else {
- pCurHcb->HCS_FirstPend = scbp;
- pCurHcb->HCS_LastPend = scbp;
+ host->first_pending = scbp;
+ host->last_pending = scbp;
}
}
/***************************************************************************/
-static void tul_push_pend_scb(HCS * pCurHcb, SCB * scbp)
+static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
- printk("Push pend SCB %lx; ", (ULONG) scbp);
+ printk("Push pend SCB %p; ", scbp);
#endif
- scbp->SCB_Status = SCB_PEND;
- if ((scbp->SCB_NxtScb = pCurHcb->HCS_FirstPend) != NULL) {
- pCurHcb->HCS_FirstPend = scbp;
+ scbp->status = SCB_PEND;
+ if ((scbp->next = host->first_pending) != NULL) {
+ host->first_pending = scbp;
} else {
- pCurHcb->HCS_FirstPend = scbp;
- pCurHcb->HCS_LastPend = scbp;
+ host->first_pending = scbp;
+ host->last_pending = scbp;
}
}
-/***************************************************************************/
-static SCB *tul_find_first_pend_scb(HCS * pCurHcb)
+static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
{
- SCB *pFirstPend;
+ struct scsi_ctrl_blk *first;
- pFirstPend = pCurHcb->HCS_FirstPend;
- while (pFirstPend != NULL) {
- if (pFirstPend->SCB_Opcode != ExecSCSI) {
- return (pFirstPend);
- }
- if (pFirstPend->SCB_TagMsg == 0) {
- if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] == 0) &&
- !(pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) {
- return (pFirstPend);
- }
+ first = host->first_pending;
+ while (first != NULL) {
+ if (first->opcode != ExecSCSI)
+ return first;
+ if (first->tagmsg == 0) {
+ if ((host->act_tags[first->target] == 0) &&
+ !(host->targets[first->target].flags & TCF_BUSY))
+ return first;
} else {
- if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] >=
- pCurHcb->HCS_MaxTags[pFirstPend->SCB_Target]) |
- (pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) {
- pFirstPend = pFirstPend->SCB_NxtScb;
+ if ((host->act_tags[first->target] >=
+ host->max_tags[first->target]) |
+ (host->targets[first->target].flags & TCF_BUSY)) {
+ first = first->next;
continue;
}
- return (pFirstPend);
+ return first;
}
- pFirstPend = pFirstPend->SCB_NxtScb;
+ first = first->next;
}
-
-
- return (pFirstPend);
+ return first;
}
-/***************************************************************************/
-static void tul_unlink_pend_scb(HCS * pCurHcb, SCB * pCurScb)
+
+static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- SCB *pTmpScb, *pPrevScb;
+ struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
- printk("unlink pend SCB %lx; ", (ULONG) pCurScb);
+ printk("unlink pend SCB %p; ", scb);
#endif
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend;
- while (pTmpScb != NULL) {
- if (pCurScb == pTmpScb) { /* Unlink this SCB */
- if (pTmpScb == pCurHcb->HCS_FirstPend) {
- if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastPend = NULL;
+ prev = tmp = host->first_pending;
+ while (tmp != NULL) {
+ if (scb == tmp) { /* Unlink this SCB */
+ if (tmp == host->first_pending) {
+ if ((host->first_pending = tmp->next) == NULL)
+ host->last_pending = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastPend)
- pCurHcb->HCS_LastPend = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_pending)
+ host->last_pending = prev;
}
- pTmpScb->SCB_NxtScb = NULL;
+ tmp->next = NULL;
break;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
- return;
}
-/***************************************************************************/
-static void tul_append_busy_scb(HCS * pCurHcb, SCB * scbp)
+
+static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
#if DEBUG_QUEUE
- printk("append busy SCB %lx; ", (ULONG) scbp);
+ printk("append busy SCB %o; ", scbp);
#endif
- if (scbp->SCB_TagMsg)
- pCurHcb->HCS_ActTags[scbp->SCB_Target]++;
+ if (scbp->tagmsg)
+ host->act_tags[scbp->target]++;
else
- pCurHcb->HCS_Tcs[scbp->SCB_Target].TCS_Flags |= TCF_BUSY;
- scbp->SCB_Status = SCB_BUSY;
- scbp->SCB_NxtScb = NULL;
- if (pCurHcb->HCS_LastBusy != NULL) {
- pCurHcb->HCS_LastBusy->SCB_NxtScb = scbp;
- pCurHcb->HCS_LastBusy = scbp;
+ host->targets[scbp->target].flags |= TCF_BUSY;
+ scbp->status = SCB_BUSY;
+ scbp->next = NULL;
+ if (host->last_busy != NULL) {
+ host->last_busy->next = scbp;
+ host->last_busy = scbp;
} else {
- pCurHcb->HCS_FirstBusy = scbp;
- pCurHcb->HCS_LastBusy = scbp;
+ host->first_busy = scbp;
+ host->last_busy = scbp;
}
}
/***************************************************************************/
-static SCB *tul_pop_busy_scb(HCS * pCurHcb)
+static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
{
- SCB *pTmpScb;
+ struct scsi_ctrl_blk *tmp;
- if ((pTmpScb = pCurHcb->HCS_FirstBusy) != NULL) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
- pTmpScb->SCB_NxtScb = NULL;
- if (pTmpScb->SCB_TagMsg)
- pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--;
+ if ((tmp = host->first_busy) != NULL) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
+ tmp->next = NULL;
+ if (tmp->tagmsg)
+ host->act_tags[tmp->target]--;
else
- pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY;
+ host->targets[tmp->target].flags &= ~TCF_BUSY;
}
#if DEBUG_QUEUE
- printk("Pop busy SCB %lx; ", (ULONG) pTmpScb);
+ printk("Pop busy SCB %p; ", tmp);
#endif
- return (pTmpScb);
+ return tmp;
}
/***************************************************************************/
-static void tul_unlink_busy_scb(HCS * pCurHcb, SCB * pCurScb)
+static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- SCB *pTmpScb, *pPrevScb;
+ struct scsi_ctrl_blk *tmp, *prev;
#if DEBUG_QUEUE
- printk("unlink busy SCB %lx; ", (ULONG) pCurScb);
+ printk("unlink busy SCB %p; ", scb);
#endif
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy;
- while (pTmpScb != NULL) {
- if (pCurScb == pTmpScb) { /* Unlink this SCB */
- if (pTmpScb == pCurHcb->HCS_FirstBusy) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
+ prev = tmp = host->first_busy;
+ while (tmp != NULL) {
+ if (scb == tmp) { /* Unlink this SCB */
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastBusy)
- pCurHcb->HCS_LastBusy = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
}
- pTmpScb->SCB_NxtScb = NULL;
- if (pTmpScb->SCB_TagMsg)
- pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--;
+ tmp->next = NULL;
+ if (tmp->tagmsg)
+ host->act_tags[tmp->target]--;
else
- pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY;
+ host->targets[tmp->target].flags &= ~TCF_BUSY;
break;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
return;
}
-/***************************************************************************/
-SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun)
+struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
{
- SCB *pTmpScb, *pPrevScb;
- WORD scbp_tarlun;
+ struct scsi_ctrl_blk *tmp, *prev;
+ u16 scbp_tarlun;
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy;
- while (pTmpScb != NULL) {
- scbp_tarlun = (pTmpScb->SCB_Lun << 8) | (pTmpScb->SCB_Target);
+ prev = tmp = host->first_busy;
+ while (tmp != NULL) {
+ scbp_tarlun = (tmp->lun << 8) | (tmp->target);
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
break;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
#if DEBUG_QUEUE
- printk("find busy SCB %lx; ", (ULONG) pTmpScb);
+ printk("find busy SCB %p; ", tmp);
#endif
- return (pTmpScb);
+ return tmp;
}
-/***************************************************************************/
-static void tul_append_done_scb(HCS * pCurHcb, SCB * scbp)
+static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
{
-
#if DEBUG_QUEUE
- printk("append done SCB %lx; ", (ULONG) scbp);
+ printk("append done SCB %p; ", scbp);
#endif
- scbp->SCB_Status = SCB_DONE;
- scbp->SCB_NxtScb = NULL;
- if (pCurHcb->HCS_LastDone != NULL) {
- pCurHcb->HCS_LastDone->SCB_NxtScb = scbp;
- pCurHcb->HCS_LastDone = scbp;
+ scbp->status = SCB_DONE;
+ scbp->next = NULL;
+ if (host->last_done != NULL) {
+ host->last_done->next = scbp;
+ host->last_done = scbp;
} else {
- pCurHcb->HCS_FirstDone = scbp;
- pCurHcb->HCS_LastDone = scbp;
+ host->first_done = scbp;
+ host->last_done = scbp;
}
}
-/***************************************************************************/
-SCB *tul_find_done_scb(HCS * pCurHcb)
+struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
{
- SCB *pTmpScb;
+ struct scsi_ctrl_blk *tmp;
-
- if ((pTmpScb = pCurHcb->HCS_FirstDone) != NULL) {
- if ((pCurHcb->HCS_FirstDone = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastDone = NULL;
- pTmpScb->SCB_NxtScb = NULL;
+ if ((tmp = host->first_done) != NULL) {
+ if ((host->first_done = tmp->next) == NULL)
+ host->last_done = NULL;
+ tmp->next = NULL;
}
#if DEBUG_QUEUE
- printk("find done SCB %lx; ", (ULONG) pTmpScb);
+ printk("find done SCB %p; ",tmp);
#endif
- return (pTmpScb);
+ return tmp;
}
-/***************************************************************************/
-static int tul_abort_srb(HCS * pCurHcb, struct scsi_cmnd *srbp)
+static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
{
- ULONG flags;
- SCB *pTmpScb, *pPrevScb;
+ unsigned long flags;
+ struct scsi_ctrl_blk *tmp, *prev;
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
+ spin_lock_irqsave(&host->semaph_lock, flags);
- if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
+ if ((host->semaph == 0) && (host->active == NULL)) {
/* disable Jasmin SCSI Int */
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tulip_main(pCurHcb);
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
+ outb(0x1F, host->addr + TUL_Mask);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ /* FIXME: synchronize_irq needed ? */
+ tulip_main(host);
+ spin_lock_irqsave(&host->semaph_lock, flags);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SNOOZE;
}
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend; /* Check Pend queue */
- while (pTmpScb != NULL) {
+ prev = tmp = host->first_pending; /* Check Pend queue */
+ while (tmp != NULL) {
/* 07/27/98 */
- if (pTmpScb->SCB_Srb == srbp) {
- if (pTmpScb == pCurHcb->HCS_ActScb) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ if (tmp->srb == srbp) {
+ if (tmp == host->active) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
- } else if (pTmpScb == pCurHcb->HCS_FirstPend) {
- if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastPend = NULL;
+ } else if (tmp == host->first_pending) {
+ if ((host->first_pending = tmp->next) == NULL)
+ host->last_pending = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastPend)
- pCurHcb->HCS_LastPend = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_pending)
+ host->last_pending = prev;
}
- pTmpScb->SCB_HaStat = HOST_ABORTED;
- pTmpScb->SCB_Flags |= SCF_DONE;
- if (pTmpScb->SCB_Flags & SCF_POST)
- (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb);
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ tmp->hastat = HOST_ABORTED;
+ tmp->flags |= SCF_DONE;
+ if (tmp->flags & SCF_POST)
+ (*tmp->post) ((u8 *) host, (u8 *) tmp);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
- while (pTmpScb != NULL) {
-
- if (pTmpScb->SCB_Srb == srbp) {
-
- if (pTmpScb == pCurHcb->HCS_ActScb) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ prev = tmp = host->first_busy; /* Check Busy queue */
+ while (tmp != NULL) {
+ if (tmp->srb == srbp) {
+ if (tmp == host->active) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
- } else if (pTmpScb->SCB_TagMsg == 0) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ } else if (tmp->tagmsg == 0) {
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_BUSY;
} else {
- pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--;
- if (pTmpScb == pCurHcb->HCS_FirstBusy) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
+ host->act_tags[tmp->target]--;
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastBusy)
- pCurHcb->HCS_LastBusy = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
}
- pTmpScb->SCB_NxtScb = NULL;
+ tmp->next = NULL;
- pTmpScb->SCB_HaStat = HOST_ABORTED;
- pTmpScb->SCB_Flags |= SCF_DONE;
- if (pTmpScb->SCB_Flags & SCF_POST)
- (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb);
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ tmp->hastat = HOST_ABORTED;
+ tmp->flags |= SCF_DONE;
+ if (tmp->flags & SCF_POST)
+ (*tmp->post) ((u8 *) host, (u8 *) tmp);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return SCSI_ABORT_SUCCESS;
}
}
- pPrevScb = pTmpScb;
- pTmpScb = pTmpScb->SCB_NxtScb;
+ prev = tmp;
+ tmp = tmp->next;
}
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return (SCSI_ABORT_NOT_RUNNING);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
+ return SCSI_ABORT_NOT_RUNNING;
}
/***************************************************************************/
-static int tul_bad_seq(HCS * pCurHcb)
+static int initio_bad_seq(struct initio_host * host)
{
- SCB *pCurScb;
-
- printk("tul_bad_seg c=%d\n", pCurHcb->HCS_Index);
-
- if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) {
- tul_unlink_busy_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- pCurScb->SCB_TaStat = 0;
- tul_append_done_scb(pCurHcb, pCurScb);
- }
- tul_stop_bm(pCurHcb);
-
- tul_reset_scsi(pCurHcb, 8); /* 7/29/98 */
-
- return (tul_post_scsi_rst(pCurHcb));
-}
-
-#if 0
-
-/************************************************************************/
-static int tul_device_reset(HCS * pCurHcb, struct scsi_cmnd *pSrb,
- unsigned int target, unsigned int ResetFlags)
-{
- ULONG flags;
- SCB *pScb;
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- if (ResetFlags & SCSI_RESET_ASYNCHRONOUS) {
-
- if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- /* disable Jasmin SCSI Int */
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tulip_main(pCurHcb);
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- return SCSI_RESET_SNOOZE;
- }
- pScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
- while (pScb != NULL) {
- if (pScb->SCB_Srb == pSrb)
- break;
- pScb = pScb->SCB_NxtScb;
- }
- if (pScb == NULL) {
- printk("Unable to Reset - No SCB Found\n");
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return SCSI_RESET_NOT_RUNNING;
- }
- }
- if ((pScb = tul_alloc_scb(pCurHcb)) == NULL) {
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return SCSI_RESET_NOT_RUNNING;
- }
- pScb->SCB_Opcode = BusDevRst;
- pScb->SCB_Flags = SCF_POST;
- pScb->SCB_Target = target;
- pScb->SCB_Mode = 0;
-
- pScb->SCB_Srb = NULL;
- if (ResetFlags & SCSI_RESET_SYNCHRONOUS) {
- pScb->SCB_Srb = pSrb;
- }
- tul_push_pend_scb(pCurHcb, pScb); /* push this SCB to Pending queue */
-
- if (pCurHcb->HCS_Semaph == 1) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- /* disable Jasmin SCSI Int */
- pCurHcb->HCS_Semaph = 0;
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ struct scsi_ctrl_blk *scb;
- tulip_main(pCurHcb);
+ printk("initio_bad_seg c=%d\n", host->index);
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
+ if ((scb = host->active) != NULL) {
+ initio_unlink_busy_scb(host, scb);
+ scb->hastat = HOST_BAD_PHAS;
+ scb->tastat = 0;
+ initio_append_done_scb(host, scb);
}
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return SCSI_RESET_PENDING;
+ initio_stop_bm(host);
+ initio_reset_scsi(host, 8); /* 7/29/98 */
+ return initio_post_scsi_rst(host);
}
-static int tul_reset_scsi_bus(HCS * pCurHcb)
-{
- ULONG flags;
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- pCurHcb->HCS_Semaph = 0;
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tul_stop_bm(pCurHcb);
-
- tul_reset_scsi(pCurHcb, 2); /* 7/29/98 */
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
- tul_post_scsi_rst(pCurHcb);
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
-
- tulip_main(pCurHcb);
-
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
- return (SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET);
-}
-
-#endif /* 0 */
/************************************************************************/
-static void tul_exec_scb(HCS * pCurHcb, SCB * pCurScb)
+static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- ULONG flags;
+ unsigned long flags;
- pCurScb->SCB_Mode = 0;
+ scb->mode = 0;
- pCurScb->SCB_SGIdx = 0;
- pCurScb->SCB_SGMax = pCurScb->SCB_SGLen;
+ scb->sgidx = 0;
+ scb->sgmax = scb->sglen;
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
+ spin_lock_irqsave(&host->semaph_lock, flags);
- tul_append_pend_scb(pCurHcb, pCurScb); /* Append this SCB to Pending queue */
+ initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
/* VVVVV 07/21/98 */
- if (pCurHcb->HCS_Semaph == 1) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
- /* disable Jasmin SCSI Int */
- pCurHcb->HCS_Semaph = 0;
-
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ if (host->semaph == 1) {
+ /* Disable Jasmin SCSI Int */
+ outb(0x1F, host->addr + TUL_Mask);
+ host->semaph = 0;
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
- tulip_main(pCurHcb);
+ tulip_main(host);
- spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
-
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
+ spin_lock_irqsave(&host->semaph_lock, flags);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
}
- spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
+ spin_unlock_irqrestore(&host->semaph_lock, flags);
return;
}
/***************************************************************************/
-static int tul_isr(HCS * pCurHcb)
+static int initio_isr(struct initio_host * host)
{
- /* Enter critical section */
-
- if (TUL_RD(pCurHcb->HCS_Base, TUL_Int) & TSS_INT_PENDING) {
- if (pCurHcb->HCS_Semaph == 1) {
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
+ if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
+ if (host->semaph == 1) {
+ outb(0x1F, host->addr + TUL_Mask);
/* Disable Tulip SCSI Int */
- pCurHcb->HCS_Semaph = 0;
+ host->semaph = 0;
- tulip_main(pCurHcb);
+ tulip_main(host);
- pCurHcb->HCS_Semaph = 1;
- TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
- return (1);
+ host->semaph = 1;
+ outb(0x0F, host->addr + TUL_Mask);
+ return 1;
}
}
- return (0);
+ return 0;
}
-/***************************************************************************/
-int tulip_main(HCS * pCurHcb)
+static int tulip_main(struct initio_host * host)
{
- SCB *pCurScb;
+ struct scsi_ctrl_blk *scb;
for (;;) {
-
- tulip_scsi(pCurHcb); /* Call tulip_scsi */
-
- while ((pCurScb = tul_find_done_scb(pCurHcb)) != NULL) { /* find done entry */
- if (pCurScb->SCB_TaStat == INI_QUEUE_FULL) {
- pCurHcb->HCS_MaxTags[pCurScb->SCB_Target] =
- pCurHcb->HCS_ActTags[pCurScb->SCB_Target] - 1;
- pCurScb->SCB_TaStat = 0;
- tul_append_pend_scb(pCurHcb, pCurScb);
+ tulip_scsi(host); /* Call tulip_scsi */
+
+ /* Walk the list of completed SCBs */
+ while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
+ if (scb->tastat == INI_QUEUE_FULL) {
+ host->max_tags[scb->target] =
+ host->act_tags[scb->target] - 1;
+ scb->tastat = 0;
+ initio_append_pend_scb(host, scb);
continue;
}
- if (!(pCurScb->SCB_Mode & SCM_RSENS)) { /* not in auto req. sense mode */
- if (pCurScb->SCB_TaStat == 2) {
+ if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
+ if (scb->tastat == 2) {
/* clr sync. nego flag */
- if (pCurScb->SCB_Flags & SCF_SENSE) {
- BYTE len;
- len = pCurScb->SCB_SenseLen;
+ if (scb->flags & SCF_SENSE) {
+ u8 len;
+ len = scb->senselen;
if (len == 0)
len = 1;
- pCurScb->SCB_BufLen = pCurScb->SCB_SenseLen;
- pCurScb->SCB_BufPtr = pCurScb->SCB_SensePtr;
- pCurScb->SCB_Flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
-/* pCurScb->SCB_Flags |= SCF_NO_DCHK; */
- /* so, we won't report worng direction in xfer_data_in,
+ scb->buflen = scb->senselen;
+ scb->bufptr = scb->senseptr;
+ scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
+ /* so, we won't report wrong direction in xfer_data_in,
and won't report HOST_DO_DU in state_6 */
- pCurScb->SCB_Mode = SCM_RSENS;
- pCurScb->SCB_Ident &= 0xBF; /* Disable Disconnect */
- pCurScb->SCB_TagMsg = 0;
- pCurScb->SCB_TaStat = 0;
- pCurScb->SCB_CDBLen = 6;
- pCurScb->SCB_CDB[0] = SCSICMD_RequestSense;
- pCurScb->SCB_CDB[1] = 0;
- pCurScb->SCB_CDB[2] = 0;
- pCurScb->SCB_CDB[3] = 0;
- pCurScb->SCB_CDB[4] = len;
- pCurScb->SCB_CDB[5] = 0;
- tul_push_pend_scb(pCurHcb, pCurScb);
+ scb->mode = SCM_RSENS;
+ scb->ident &= 0xBF; /* Disable Disconnect */
+ scb->tagmsg = 0;
+ scb->tastat = 0;
+ scb->cdblen = 6;
+ scb->cdb[0] = SCSICMD_RequestSense;
+ scb->cdb[1] = 0;
+ scb->cdb[2] = 0;
+ scb->cdb[3] = 0;
+ scb->cdb[4] = len;
+ scb->cdb[5] = 0;
+ initio_push_pend_scb(host, scb);
break;
}
}
} else { /* in request sense mode */
- if (pCurScb->SCB_TaStat == 2) { /* check contition status again after sending
+ if (scb->tastat == 2) { /* check contition status again after sending
requset sense cmd 0x3 */
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
+ scb->hastat = HOST_BAD_PHAS;
}
- pCurScb->SCB_TaStat = 2;
+ scb->tastat = 2;
}
- pCurScb->SCB_Flags |= SCF_DONE;
- if (pCurScb->SCB_Flags & SCF_POST) {
- (*pCurScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pCurScb);
+ scb->flags |= SCF_DONE;
+ if (scb->flags & SCF_POST) {
+ /* FIXME: only one post method and lose casts */
+ (*scb->post) ((u8 *) host, (u8 *) scb);
}
} /* while */
-
/* find_active: */
- if (TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0) & TSS_INT_PENDING)
+ if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
continue;
-
- if (pCurHcb->HCS_ActScb) { /* return to OS and wait for xfer_done_ISR/Selected_ISR */
+ if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
return 1; /* return to OS, enable interrupt */
- }
/* Check pending SCB */
- if (tul_find_first_pend_scb(pCurHcb) == NULL) {
+ if (initio_find_first_pend_scb(host) == NULL)
return 1; /* return to OS, enable interrupt */
- }
} /* End of for loop */
/* statement won't reach here */
}
-
-
-
-/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-/***************************************************************************/
-/***************************************************************************/
-/***************************************************************************/
-/***************************************************************************/
-
-/***************************************************************************/
-void tulip_scsi(HCS * pCurHcb)
+static void tulip_scsi(struct initio_host * host)
{
- SCB *pCurScb;
- TCS *pCurTcb;
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
/* make sure to service interrupt asap */
-
- if ((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) & TSS_INT_PENDING) {
-
- pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK;
- pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1);
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
- int_tul_scsi_rst(pCurHcb);
+ if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
+ host->phase = host->jsstatus0 & TSS_PH_MASK;
+ host->jsstatus1 = inb(host->addr + TUL_SStatus1);
+ host->jsint = inb(host->addr + TUL_SInt);
+ if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
+ int_initio_scsi_rst(host);
return;
}
- if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if selected/reselected interrupt */
- if (int_tul_resel(pCurHcb) == 0)
- tul_next_state(pCurHcb);
+ if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
+ if (int_initio_resel(host) == 0)
+ initio_next_state(host);
return;
}
- if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) {
- int_tul_busfree(pCurHcb);
+ if (host->jsint & TSS_SEL_TIMEOUT) {
+ int_initio_busfree(host);
return;
}
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- int_tul_busfree(pCurHcb); /* unexpected bus free or sel timeout */
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ int_initio_busfree(host); /* unexpected bus free or sel timeout */
return;
}
- if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
- if ((pCurScb = pCurHcb->HCS_ActScb) != NULL)
- tul_next_state(pCurHcb);
+ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
+ if ((scb = host->active) != NULL)
+ initio_next_state(host);
return;
}
}
- if (pCurHcb->HCS_ActScb != NULL)
+ if (host->active != NULL)
return;
- if ((pCurScb = tul_find_first_pend_scb(pCurHcb)) == NULL)
+ if ((scb = initio_find_first_pend_scb(host)) == NULL)
return;
/* program HBA's SCSI ID & target SCSI ID */
- TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId,
- (pCurHcb->HCS_SCSI_ID << 4) | (pCurScb->SCB_Target & 0x0F));
- if (pCurScb->SCB_Opcode == ExecSCSI) {
- pCurTcb = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
+ outb((host->scsi_id << 4) | (scb->target & 0x0F),
+ host->addr + TUL_SScsiId);
+ if (scb->opcode == ExecSCSI) {
+ active_tc = &host->targets[scb->target];
- if (pCurScb->SCB_TagMsg)
- pCurTcb->TCS_DrvFlags |= TCF_DRV_EN_TAG;
+ if (scb->tagmsg)
+ active_tc->drv_flags |= TCF_DRV_EN_TAG;
else
- pCurTcb->TCS_DrvFlags &= ~TCF_DRV_EN_TAG;
+ active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period);
- if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
- tul_select_atn_stop(pCurHcb, pCurScb);
+ outb(active_tc->js_period, host->addr + TUL_SPeriod);
+ if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
+ initio_select_atn_stop(host, scb);
} else {
- if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
- tul_select_atn_stop(pCurHcb, pCurScb);
+ if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
+ initio_select_atn_stop(host, scb);
} else {
- if (pCurScb->SCB_TagMsg)
- tul_select_atn3(pCurHcb, pCurScb);
+ if (scb->tagmsg)
+ initio_select_atn3(host, scb);
else
- tul_select_atn(pCurHcb, pCurScb);
+ initio_select_atn(host, scb);
}
}
- if (pCurScb->SCB_Flags & SCF_POLL) {
- while (wait_tulip(pCurHcb) != -1) {
- if (tul_next_state(pCurHcb) == -1)
+ if (scb->flags & SCF_POLL) {
+ while (wait_tulip(host) != -1) {
+ if (initio_next_state(host) == -1)
break;
}
}
- } else if (pCurScb->SCB_Opcode == BusDevRst) {
- tul_select_atn_stop(pCurHcb, pCurScb);
- pCurScb->SCB_NxtStat = 8;
- if (pCurScb->SCB_Flags & SCF_POLL) {
- while (wait_tulip(pCurHcb) != -1) {
- if (tul_next_state(pCurHcb) == -1)
+ } else if (scb->opcode == BusDevRst) {
+ initio_select_atn_stop(host, scb);
+ scb->next_state = 8;
+ if (scb->flags & SCF_POLL) {
+ while (wait_tulip(host) != -1) {
+ if (initio_next_state(host) == -1)
break;
}
}
- } else if (pCurScb->SCB_Opcode == AbortCmd) {
- if (tul_abort_srb(pCurHcb, pCurScb->SCB_Srb) != 0) {
-
-
- tul_unlink_pend_scb(pCurHcb, pCurScb);
-
- tul_release_scb(pCurHcb, pCurScb);
+ } else if (scb->opcode == AbortCmd) {
+ if (initio_abort_srb(host, scb->srb) != 0) {
+ initio_unlink_pend_scb(host, scb);
+ initio_release_scb(host, scb);
} else {
- pCurScb->SCB_Opcode = BusDevRst;
- tul_select_atn_stop(pCurHcb, pCurScb);
- pCurScb->SCB_NxtStat = 8;
+ scb->opcode = BusDevRst;
+ initio_select_atn_stop(host, scb);
+ scb->next_state = 8;
}
-
-/* 08/03/98 */
} else {
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = 0x16; /* bad command */
- tul_append_done_scb(pCurHcb, pCurScb);
+ initio_unlink_pend_scb(host, scb);
+ scb->hastat = 0x16; /* bad command */
+ initio_append_done_scb(host, scb);
}
return;
}
+/**
+ * initio_next_state - Next SCSI state
+ * @host: InitIO host we are processing
+ *
+ * Progress the active command block along the state machine
+ * until we hit a state which we must wait for activity to occur.
+ *
+ * Returns zero or a negative code.
+ */
-/***************************************************************************/
-int tul_next_state(HCS * pCurHcb)
+static int initio_next_state(struct initio_host * host)
{
int next;
- next = pCurHcb->HCS_ActScb->SCB_NxtStat;
+ next = host->active->next_state;
for (;;) {
switch (next) {
case 1:
- next = tul_state_1(pCurHcb);
+ next = initio_state_1(host);
break;
case 2:
- next = tul_state_2(pCurHcb);
+ next = initio_state_2(host);
break;
case 3:
- next = tul_state_3(pCurHcb);
+ next = initio_state_3(host);
break;
case 4:
- next = tul_state_4(pCurHcb);
+ next = initio_state_4(host);
break;
case 5:
- next = tul_state_5(pCurHcb);
+ next = initio_state_5(host);
break;
case 6:
- next = tul_state_6(pCurHcb);
+ next = initio_state_6(host);
break;
case 7:
- next = tul_state_7(pCurHcb);
+ next = initio_state_7(host);
break;
case 8:
- return (tul_bus_device_reset(pCurHcb));
+ return initio_bus_device_reset(host);
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
if (next <= 0)
return next;
@@ -1554,338 +1314,363 @@ int tul_next_state(HCS * pCurHcb)
}
-/***************************************************************************/
-/* sTate after selection with attention & stop */
-int tul_state_1(HCS * pCurHcb)
+/**
+ * initio_state_1 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * Perform SCSI state processing for Select/Attention/Stop
+ */
+
+static int initio_state_1(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s1-");
#endif
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- tul_append_busy_scb(pCurHcb, pCurScb);
+ /* Move the SCB from pending to busy */
+ initio_unlink_pend_scb(host, scb);
+ initio_append_busy_scb(host, scb);
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0);
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig );
/* ATN on */
- if (pCurHcb->HCS_Phase == MSG_OUT) {
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, (TSC_EN_BUS_IN | TSC_HW_RESELECT));
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident);
-
- if (pCurScb->SCB_TagMsg) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId);
- }
- if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
-
- pCurTcb->TCS_Flags |= TCF_WDTR_DONE;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2); /* Extended msg length */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* Sync request */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* Start from 16 bits */
- } else if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
-
- pCurTcb->TCS_Flags |= TCF_SYNC_DONE;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* extended msg length */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)));
- return (3);
-}
-
+ if (host->phase == MSG_OUT) {
+ outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+ outb(scb->ident, host->addr + TUL_SFifo);
+
+ if (scb->tagmsg) {
+ outb(scb->tagmsg, host->addr + TUL_SFifo);
+ outb(scb->tagid, host->addr + TUL_SFifo);
+ }
+ if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
+ active_tc->flags |= TCF_WDTR_DONE;
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(2, host->addr + TUL_SFifo); /* Extended msg length */
+ outb(3, host->addr + TUL_SFifo); /* Sync request */
+ outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
+ } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
+ active_tc->flags |= TCF_SYNC_DONE;
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo); /* extended msg length */
+ outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
+ outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
+ }
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
+ /* Into before CDB xfer */
+ return 3;
+}
+
+
+/**
+ * initio_state_2 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * state after selection with attention
+ * state after selection with attention3
+ */
-/***************************************************************************/
-/* state after selection with attention */
-/* state after selection with attention3 */
-int tul_state_2(HCS * pCurHcb)
+static int initio_state_2(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
#if DEBUG_STATE
printk("-s2-");
#endif
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- tul_append_busy_scb(pCurHcb, pCurScb);
+ initio_unlink_pend_scb(host, scb);
+ initio_append_busy_scb(host, scb);
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0);
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig);
- if (pCurHcb->HCS_JSStatus1 & TSS_CMD_PH_CMP) {
- return (4);
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)));
- return (3);
+ if (host->jsstatus1 & TSS_CMD_PH_CMP)
+ return 4;
+
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
+ /* Into before CDB xfer */
+ return 3;
}
-/***************************************************************************/
-/* state before CDB xfer is done */
-int tul_state_3(HCS * pCurHcb)
+/**
+ * initio_state_3 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * state before CDB xfer is done
+ */
+
+static int initio_state_3(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
int i;
#if DEBUG_STATE
printk("-s3-");
#endif
for (;;) {
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case CMD_OUT: /* Command out phase */
- for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++)
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase == CMD_OUT) {
- return (tul_bad_seq(pCurHcb));
- }
- return (4);
+ for (i = 0; i < (int) scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ if (host->phase == CMD_OUT)
+ return initio_bad_seq(host);
+ return 4;
case MSG_IN: /* Message in phase */
- pCurScb->SCB_NxtStat = 3;
- if (tul_msgin(pCurHcb) == -1)
- return (-1);
+ scb->next_state = 3;
+ if (initio_msgin(host) == -1)
+ return -1;
break;
case STATUS_IN: /* Status phase */
- if (tul_status_msg(pCurHcb) == -1)
- return (-1);
+ if (initio_status_msg(host) == -1)
+ return -1;
break;
case MSG_OUT: /* Message out phase */
- if (pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
-
+ if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
} else {
- pCurTcb->TCS_Flags |= TCF_SYNC_DONE;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* ext. msg len */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7));
+ active_tc->flags |= TCF_SYNC_DONE;
+
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo); /* ext. msg len */
+ outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
+ outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
}
break;
-
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
}
}
+/**
+ * initio_state_4 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * SCSI state machine. State 4
+ */
-/***************************************************************************/
-int tul_state_4(HCS * pCurHcb)
+static int initio_state_4(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s4-");
#endif
- if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_NO_XF) {
- return (6); /* Go to state 6 */
+ if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
+ return 6; /* Go to state 6 (After data) */
}
for (;;) {
- if (pCurScb->SCB_BufLen == 0)
- return (6); /* Go to state 6 */
+ if (scb->buflen == 0)
+ return 6;
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case STATUS_IN: /* Status phase */
- if ((pCurScb->SCB_Flags & SCF_DIR) != 0) { /* if direction bit set then report data underrun */
- pCurScb->SCB_HaStat = HOST_DO_DU;
- }
- if ((tul_status_msg(pCurHcb)) == -1)
- return (-1);
+ if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
+ scb->hastat = HOST_DO_DU;
+ if ((initio_status_msg(host)) == -1)
+ return -1;
break;
case MSG_IN: /* Message in phase */
- pCurScb->SCB_NxtStat = 0x4;
- if (tul_msgin(pCurHcb) == -1)
- return (-1);
+ scb->next_state = 0x4;
+ if (initio_msgin(host) == -1)
+ return -1;
break;
case MSG_OUT: /* Message out phase */
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) {
- pCurScb->SCB_BufLen = 0;
- pCurScb->SCB_HaStat = HOST_DO_DU;
- if (tul_msgout_ide(pCurHcb) == -1)
- return (-1);
- return (6); /* Go to state 6 */
+ if (host->jsstatus0 & TSS_PAR_ERROR) {
+ scb->buflen = 0;
+ scb->hastat = HOST_DO_DU;
+ if (initio_msgout_ide(host) == -1)
+ return -1;
+ return 6;
} else {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
}
break;
case DATA_IN: /* Data in phase */
- return (tul_xfer_data_in(pCurHcb));
+ return initio_xfer_data_in(host);
case DATA_OUT: /* Data out phase */
- return (tul_xfer_data_out(pCurHcb));
+ return initio_xfer_data_out(host);
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
}
}
-/***************************************************************************/
-/* state after dma xfer done or phase change before xfer done */
-int tul_state_5(HCS * pCurHcb)
+/**
+ * initio_state_5 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * State after dma xfer done or phase change before xfer done
+ */
+
+static int initio_state_5(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
#if DEBUG_STATE
printk("-s5-");
#endif
-/*------ get remaining count -------*/
-
- cnt = TUL_RDLONG(pCurHcb->HCS_Base, TUL_SCnt0) & 0x0FFFFFF;
+ /*------ get remaining count -------*/
+ cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XCmd) & 0x20) {
+ if (inb(host->addr + TUL_XCmd) & 0x20) {
/* ----------------------- DATA_IN ----------------------------- */
/* check scsi parity error */
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) {
- pCurScb->SCB_HaStat = HOST_DO_DU;
- }
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
+ if (host->jsstatus0 & TSS_PAR_ERROR)
+ scb->hastat = HOST_DO_DU;
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
/* tell Hardware scsi xfer has been terminated */
- TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, TUL_RD(pCurHcb->HCS_Base, TUL_XCtrl) | 0x80);
+ outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
/* wait until DMA xfer not pending */
- while (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND);
+ while (inb(host->addr + TUL_XStatus) & XPEND)
+ cpu_relax();
}
} else {
-/*-------- DATA OUT -----------*/
- if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0) {
- if (pCurHcb->HCS_ActTcs->TCS_JS_Period & TSC_WIDE_SCSI)
- cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F) << 1;
+ /*-------- DATA OUT -----------*/
+ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
+ if (host->active_tc->js_period & TSC_WIDE_SCSI)
+ cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
else
- cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F);
+ cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
}
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT);
+ if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
+ outb(TAX_X_ABT, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
- while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0);
+ while ((inb(host->addr + TUL_Int) & XABT) == 0)
+ cpu_relax();
}
- if ((cnt == 1) && (pCurHcb->HCS_Phase == DATA_OUT)) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1) {
- return (-1);
- }
+ if ((cnt == 1) && (host->phase == DATA_OUT)) {
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
cnt = 0;
} else {
- if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0)
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
}
-
if (cnt == 0) {
- pCurScb->SCB_BufLen = 0;
- return (6); /* Go to state 6 */
+ scb->buflen = 0;
+ return 6; /* After Data */
}
/* Update active data pointer */
- xcnt = (long) pCurScb->SCB_BufLen - cnt; /* xcnt== bytes already xferred */
- pCurScb->SCB_BufLen = (U32) cnt; /* cnt == bytes left to be xferred */
- if (pCurScb->SCB_Flags & SCF_SG) {
- register SG *sgp;
- ULONG i;
-
- sgp = &pCurScb->SCB_SGList[pCurScb->SCB_SGIdx];
- for (i = pCurScb->SCB_SGIdx; i < pCurScb->SCB_SGMax; sgp++, i++) {
- xcnt -= (long) sgp->SG_Len;
+ xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
+ scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
+ if (scb->flags & SCF_SG) {
+ struct sg_entry *sgp;
+ unsigned long i;
+
+ sgp = &scb->sglist[scb->sgidx];
+ for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
+ xcnt -= (long) sgp->len;
if (xcnt < 0) { /* this sgp xfer half done */
- xcnt += (long) sgp->SG_Len; /* xcnt == bytes xferred in this sgp */
- sgp->SG_Ptr += (U32) xcnt; /* new ptr to be xfer */
- sgp->SG_Len -= (U32) xcnt; /* new len to be xfer */
- pCurScb->SCB_BufPtr += ((U32) (i - pCurScb->SCB_SGIdx) << 3);
+ xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
+ sgp->data += (u32) xcnt; /* new ptr to be xfer */
+ sgp->len -= (u32) xcnt; /* new len to be xfer */
+ scb->bufptr += ((u32) (i - scb->sgidx) << 3);
/* new SG table ptr */
- pCurScb->SCB_SGLen = (BYTE) (pCurScb->SCB_SGMax - i);
+ scb->sglen = (u8) (scb->sgmax - i);
/* new SG table len */
- pCurScb->SCB_SGIdx = (WORD) i;
+ scb->sgidx = (u16) i;
/* for next disc and come in this loop */
- return (4); /* Go to state 4 */
+ return 4; /* Go to state 4 */
}
/* else (xcnt >= 0 , i.e. this sgp already xferred */
} /* for */
- return (6); /* Go to state 6 */
+ return 6; /* Go to state 6 */
} else {
- pCurScb->SCB_BufPtr += (U32) xcnt;
+ scb->bufptr += (u32) xcnt;
}
- return (4); /* Go to state 4 */
+ return 4; /* Go to state 4 */
}
-/***************************************************************************/
-/* state after Data phase */
-int tul_state_6(HCS * pCurHcb)
+/**
+ * initio_state_6 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ * State after Data phase
+ */
+
+static int initio_state_6(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
#if DEBUG_STATE
printk("-s6-");
#endif
for (;;) {
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case STATUS_IN: /* Status phase */
- if ((tul_status_msg(pCurHcb)) == -1)
- return (-1);
+ if ((initio_status_msg(host)) == -1)
+ return -1;
break;
case MSG_IN: /* Message in phase */
- pCurScb->SCB_NxtStat = 6;
- if ((tul_msgin(pCurHcb)) == -1)
- return (-1);
+ scb->next_state = 6;
+ if ((initio_msgin(host)) == -1)
+ return -1;
break;
case MSG_OUT: /* Message out phase */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
+ outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
break;
case DATA_IN: /* Data in phase */
- return (tul_xpad_in(pCurHcb));
+ return initio_xpad_in(host);
case DATA_OUT: /* Data out phase */
- return (tul_xpad_out(pCurHcb));
+ return initio_xpad_out(host);
default:
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
}
}
-/***************************************************************************/
-int tul_state_7(HCS * pCurHcb)
+/**
+ * initio_state_7 - SCSI state machine
+ * @host: InitIO host we are controlling
+ *
+ */
+
+int initio_state_7(struct initio_host * host)
{
int cnt, i;
@@ -1893,1139 +1678,1029 @@ int tul_state_7(HCS * pCurHcb)
printk("-s7-");
#endif
/* flush SCSI FIFO */
- cnt = TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F;
+ cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
if (cnt) {
for (i = 0; i < cnt; i++)
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
+ inb(host->addr + TUL_SFifo);
}
- switch (pCurHcb->HCS_Phase) {
+ switch (host->phase) {
case DATA_IN: /* Data in phase */
case DATA_OUT: /* Data out phase */
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
default:
- return (6); /* Go to state 6 */
+ return 6; /* Go to state 6 */
}
}
-/***************************************************************************/
-int tul_xfer_data_in(HCS * pCurHcb)
+/**
+ * initio_xfer_data_in - Commence data input
+ * @host: InitIO host in use
+ *
+ * Commence a block of data transfer. The transfer itself will
+ * be managed by the controller and we will get a completion (or
+ * failure) interrupt.
+ */
+static int initio_xfer_data_in(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
- if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DOUT) {
- return (6); /* wrong direction */
- }
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
+ if ((scb->flags & SCF_DIR) == SCF_DOUT)
+ return 6; /* wrong direction */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_IN); /* 7/25/95 */
+ outl(scb->buflen, host->addr + TUL_SCnt0);
+ outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
- if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_IN);
+ if (scb->flags & SCF_SG) { /* S/G xfer */
+ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_SG_IN, host->addr + TUL_XCmd);
} else {
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_IN);
+ outl(scb->buflen, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_X_IN, host->addr + TUL_XCmd);
}
- pCurScb->SCB_NxtStat = 0x5;
- return (0); /* return to OS, wait xfer done , let jas_isr come in */
+ scb->next_state = 0x5;
+ return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
+/**
+ * initio_xfer_data_out - Commence data output
+ * @host: InitIO host in use
+ *
+ * Commence a block of data transfer. The transfer itself will
+ * be managed by the controller and we will get a completion (or
+ * failure) interrupt.
+ */
-/***************************************************************************/
-int tul_xfer_data_out(HCS * pCurHcb)
+static int initio_xfer_data_out(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
- if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DIN) {
- return (6); /* wrong direction */
- }
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_OUT);
+ if ((scb->flags & SCF_DIR) == SCF_DIN)
+ return 6; /* wrong direction */
- if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_OUT);
+ outl(scb->buflen, host->addr + TUL_SCnt0);
+ outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
+
+ if (scb->flags & SCF_SG) { /* S/G xfer */
+ outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_SG_OUT, host->addr + TUL_XCmd);
} else {
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr);
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_OUT);
+ outl(scb->buflen, host->addr + TUL_XCntH);
+ outl(scb->bufptr, host->addr + TUL_XAddH);
+ outb(TAX_X_OUT, host->addr + TUL_XCmd);
}
- pCurScb->SCB_NxtStat = 0x5;
- return (0); /* return to OS, wait xfer done , let jas_isr come in */
+ scb->next_state = 0x5;
+ return 0; /* return to OS, wait xfer done , let jas_isr come in */
}
-
-/***************************************************************************/
-int tul_xpad_in(HCS * pCurHcb)
+int initio_xpad_in(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
- if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) {
- pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */
- }
+ if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
+ scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
- if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI)
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2);
+ if (active_tc->js_period & TSC_WIDE_SCSI)
+ outl(2, host->addr + TUL_SCnt0);
else
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
+ outl(1, host->addr + TUL_SCnt0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1) {
- return (-1);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ if (host->phase != DATA_IN) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ return 6;
}
- if (pCurHcb->HCS_Phase != DATA_IN) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- return (6);
- }
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
+ inb(host->addr + TUL_SFifo);
}
}
-int tul_xpad_out(HCS * pCurHcb)
+int initio_xpad_out(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
- if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) {
- pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */
- }
+ if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
+ scb->hastat = HOST_DO_DU; /* over run */
for (;;) {
- if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI)
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2);
+ if (active_tc->js_period & TSC_WIDE_SCSI)
+ outl(2, host->addr + TUL_SCnt0);
else
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
+ outl(1, host->addr + TUL_SCnt0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- if ((wait_tulip(pCurHcb)) == -1) {
- return (-1);
- }
- if (pCurHcb->HCS_Phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- return (6);
+ outb(0, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ if ((wait_tulip(host)) == -1)
+ return -1;
+ if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ return 6;
}
}
}
-
-/***************************************************************************/
-int tul_status_msg(HCS * pCurHcb)
+int initio_status_msg(struct initio_host * host)
{ /* status & MSG_IN */
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- BYTE msg;
+ struct scsi_ctrl_blk *scb = host->active;
+ u8 msg;
+
+ outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_CMD_COMP);
- if ((wait_tulip(pCurHcb)) == -1) {
- return (-1);
- }
/* get status */
- pCurScb->SCB_TaStat = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
+ scb->tastat = inb(host->addr + TUL_SFifo);
- if (pCurHcb->HCS_Phase == MSG_OUT) {
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY);
- } else {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP);
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
- }
- if (pCurHcb->HCS_Phase == MSG_IN) {
- msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
- if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { /* Parity error */
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_OUT)
- return (tul_bad_seq(pCurHcb));
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ if (host->phase == MSG_OUT) {
+ if (host->jsstatus0 & TSS_PAR_ERROR)
+ outb(MSG_PARITY, host->addr + TUL_SFifo);
+ else
+ outb(MSG_NOP, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
+ }
+ if (host->phase == MSG_IN) {
+ msg = inb(host->addr + TUL_SFifo);
+ if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
+ outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
if (msg == 0) { /* Command complete */
- if ((pCurScb->SCB_TaStat & 0x18) == 0x10) { /* No link support */
- return (tul_bad_seq(pCurHcb));
- }
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
- return tul_wait_done_disc(pCurHcb);
+ if ((scb->tastat & 0x18) == 0x10) /* No link support */
+ return initio_bad_seq(host);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_done_disc(host);
}
- if ((msg == MSG_LINK_COMP) || (msg == MSG_LINK_FLAG)) {
- if ((pCurScb->SCB_TaStat & 0x18) == 0x10)
- return (tul_msgin_accept(pCurHcb));
+ if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
+ if ((scb->tastat & 0x18) == 0x10)
+ return initio_msgin_accept(host);
}
}
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
-/***************************************************************************/
/* scsi bus free */
-int int_tul_busfree(HCS * pCurHcb)
+int int_initio_busfree(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
+ struct scsi_ctrl_blk *scb = host->active;
- if (pCurScb != NULL) {
- if (pCurScb->SCB_Status & SCB_SELECT) { /* selection timeout */
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = HOST_SEL_TOUT;
- tul_append_done_scb(pCurHcb, pCurScb);
+ if (scb != NULL) {
+ if (scb->status & SCB_SELECT) { /* selection timeout */
+ initio_unlink_pend_scb(host, scb);
+ scb->hastat = HOST_SEL_TOUT;
+ initio_append_done_scb(host, scb);
} else { /* Unexpected bus free */
- tul_unlink_busy_scb(pCurHcb, pCurScb);
- pCurScb->SCB_HaStat = HOST_BUS_FREE;
- tul_append_done_scb(pCurHcb, pCurScb);
+ initio_unlink_busy_scb(host, scb);
+ scb->hastat = HOST_BUS_FREE;
+ initio_append_done_scb(host, scb);
}
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
+ host->active = NULL;
+ host->active_tc = NULL;
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- return (-1);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
}
-/***************************************************************************/
-/* scsi bus reset */
-static int int_tul_scsi_rst(HCS * pCurHcb)
+/**
+ * int_initio_scsi_rst - SCSI reset occurred
+ * @host: Host seeing the reset
+ *
+ * A SCSI bus reset has occurred. Clean up any pending transfer
+ * the hardware is doing by DMA and then abort all active and
+ * disconnected commands. The mid layer should sort the rest out
+ * for us
+ */
+
+static int int_initio_scsi_rst(struct initio_host * host)
{
- SCB *pCurScb;
+ struct scsi_ctrl_blk *scb;
int i;
/* if DMA xfer is pending, abort DMA xfer */
- if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & 0x01) {
- TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO);
+ if (inb(host->addr + TUL_XStatus) & 0x01) {
+ outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
/* wait Abort DMA xfer done */
- while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & 0x04) == 0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ while ((inb(host->addr + TUL_Int) & 0x04) == 0)
+ cpu_relax();
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
}
/* Abort all active & disconnected scb */
- while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) {
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- tul_append_done_scb(pCurHcb, pCurScb);
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
}
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
+ host->active = NULL;
+ host->active_tc = NULL;
/* clr sync nego. done flag */
- for (i = 0; i < pCurHcb->HCS_MaxTar; i++) {
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
- }
- return (-1);
+ for (i = 0; i < host->max_tar; i++)
+ host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ return -1;
}
+/**
+ * int_initio_scsi_resel - Reselection occured
+ * @host: InitIO host adapter
+ *
+ * A SCSI reselection event has been signalled and the interrupt
+ * is now being processed. Work out which command block needs attention
+ * and continue processing that command.
+ */
-/***************************************************************************/
-/* scsi reselection */
-int int_tul_resel(HCS * pCurHcb)
+int int_initio_resel(struct initio_host * host)
{
- SCB *pCurScb;
- TCS *pCurTcb;
- BYTE tag, msg = 0;
- BYTE tar, lun;
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
+ u8 tag, msg = 0;
+ u8 tar, lun;
- if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) {
- if (pCurScb->SCB_Status & SCB_SELECT) { /* if waiting for selection complete */
- pCurScb->SCB_Status &= ~SCB_SELECT;
- }
- pCurHcb->HCS_ActScb = NULL;
+ if ((scb = host->active) != NULL) {
+ /* FIXME: Why check and not just clear ? */
+ if (scb->status & SCB_SELECT) /* if waiting for selection complete */
+ scb->status &= ~SCB_SELECT;
+ host->active = NULL;
}
/* --------- get target id---------------------- */
- tar = TUL_RD(pCurHcb->HCS_Base, TUL_SBusId);
+ tar = inb(host->addr + TUL_SBusId);
/* ------ get LUN from Identify message----------- */
- lun = TUL_RD(pCurHcb->HCS_Base, TUL_SIdent) & 0x0F;
+ lun = inb(host->addr + TUL_SIdent) & 0x0F;
/* 07/22/98 from 0x1F -> 0x0F */
- pCurTcb = &pCurHcb->HCS_Tcs[tar];
- pCurHcb->HCS_ActTcs = pCurTcb;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period);
-
+ active_tc = &host->targets[tar];
+ host->active_tc = active_tc;
+ outb(active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(active_tc->js_period, host->addr + TUL_SPeriod);
/* ------------- tag queueing ? ------------------- */
- if (pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG) {
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_IN)
+ if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
+ if (host->phase != MSG_IN)
goto no_tag;
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1)
- return (-1);
- msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag Message */
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
- if ((msg < MSG_STAG) || (msg > MSG_OTAG)) /* Is simple Tag */
+ if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
goto no_tag;
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
- if (pCurHcb->HCS_Phase != MSG_IN)
+ if (host->phase != MSG_IN)
goto no_tag;
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1)
- return (-1);
- tag = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag ID */
- pCurScb = pCurHcb->HCS_Scb + tag;
- if ((pCurScb->SCB_Target != tar) || (pCurScb->SCB_Lun != lun)) {
- return tul_msgout_abort_tag(pCurHcb);
- }
- if (pCurScb->SCB_Status != SCB_BUSY) { /* 03/24/95 */
- return tul_msgout_abort_tag(pCurHcb);
- }
- pCurHcb->HCS_ActScb = pCurScb;
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
+ scb = host->scb + tag;
+ if (scb->target != tar || scb->lun != lun) {
+ return initio_msgout_abort_tag(host);
+ }
+ if (scb->status != SCB_BUSY) { /* 03/24/95 */
+ return initio_msgout_abort_tag(host);
+ }
+ host->active = scb;
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
} else { /* No tag */
no_tag:
- if ((pCurScb = tul_find_busy_scb(pCurHcb, tar | (lun << 8))) == NULL) {
- return tul_msgout_abort_targ(pCurHcb);
+ if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
+ return initio_msgout_abort_targ(host);
}
- pCurHcb->HCS_ActScb = pCurScb;
- if (!(pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG)) {
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ host->active = scb;
+ if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
+ if ((initio_msgin_accept(host)) == -1)
+ return -1;
}
}
return 0;
}
+/**
+ * int_initio_bad_seq - out of phase
+ * @host: InitIO host flagging event
+ *
+ * We have ended up out of phase somehow. Reset the host controller
+ * and throw all our toys out of the pram. Let the midlayer clean up
+ */
-/***************************************************************************/
-static int int_tul_bad_seq(HCS * pCurHcb)
+static int int_initio_bad_seq(struct initio_host * host)
{ /* target wrong phase */
- SCB *pCurScb;
+ struct scsi_ctrl_blk *scb;
int i;
- tul_reset_scsi(pCurHcb, 10);
+ initio_reset_scsi(host, 10);
- while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) {
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- tul_append_done_scb(pCurHcb, pCurScb);
- }
- for (i = 0; i < pCurHcb->HCS_MaxTar; i++) {
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
}
- return (-1);
+ for (i = 0; i < host->max_tar; i++)
+ host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ return -1;
}
-/***************************************************************************/
-int tul_msgout_abort_targ(HCS * pCurHcb)
+/**
+ * initio_msgout_abort_targ - abort a tag
+ * @host: InitIO host
+ *
+ * Abort when the target/lun does not match or when our SCB is not
+ * busy. Used by untagged commands.
+ */
+
+static int initio_msgout_abort_targ(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- if (tul_msgin_accept(pCurHcb) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_OUT)
- return (tul_bad_seq(pCurHcb));
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
+ outb(MSG_ABORT, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
- return tul_wait_disc(pCurHcb);
+ return initio_wait_disc(host);
}
-/***************************************************************************/
-int tul_msgout_abort_tag(HCS * pCurHcb)
+/**
+ * initio_msgout_abort_tag - abort a tag
+ * @host: InitIO host
+ *
+ * Abort when the target/lun does not match or when our SCB is not
+ * busy. Used for tagged commands.
+ */
+
+static int initio_msgout_abort_tag(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- if (tul_msgin_accept(pCurHcb) == -1)
- return (-1);
- if (pCurHcb->HCS_Phase != MSG_OUT)
- return (tul_bad_seq(pCurHcb));
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
+ if (host->phase != MSG_OUT)
+ return initio_bad_seq(host);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT_TAG);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
+ outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
- return tul_wait_disc(pCurHcb);
+ return initio_wait_disc(host);
}
-/***************************************************************************/
-int tul_msgin(HCS * pCurHcb)
+/**
+ * initio_msgin - Message in
+ * @host: InitIO Host
+ *
+ * Process incoming message
+ */
+static int initio_msgin(struct initio_host * host)
{
- TCS *pCurTcb;
+ struct target_control *active_tc;
for (;;) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
-
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if ((wait_tulip(pCurHcb)) == -1)
- return (-1);
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
- switch (TUL_RD(pCurHcb->HCS_Base, TUL_SFifo)) {
+ switch (inb(host->addr + TUL_SFifo)) {
case MSG_DISC: /* Disconnect msg */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
-
- return tul_wait_disc(pCurHcb);
-
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_disc(host);
case MSG_SDP:
case MSG_RESTORE:
case MSG_NOP:
- tul_msgin_accept(pCurHcb);
+ initio_msgin_accept(host);
break;
-
case MSG_REJ: /* Clear ATN first */
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal,
- (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)));
- pCurTcb = pCurHcb->HCS_ActTcs;
- if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync nego */
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- }
- tul_msgin_accept(pCurHcb);
+ outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
+ host->addr + TUL_SSignal);
+ active_tc = host->active_tc;
+ if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
+ host->addr + TUL_SSignal);
+ initio_msgin_accept(host);
break;
-
case MSG_EXTEND: /* extended msg */
- tul_msgin_extend(pCurHcb);
+ initio_msgin_extend(host);
break;
-
case MSG_IGNOREWIDE:
- tul_msgin_accept(pCurHcb);
+ initio_msgin_accept(host);
break;
-
- /* get */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if (wait_tulip(pCurHcb) == -1)
- return -1;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0); /* put pad */
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get IGNORE field */
- TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get pad */
-
- tul_msgin_accept(pCurHcb);
- break;
-
case MSG_COMP:
- {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
- return tul_wait_done_disc(pCurHcb);
- }
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return initio_wait_done_disc(host);
default:
- tul_msgout_reject(pCurHcb);
+ initio_msgout_reject(host);
break;
}
- if (pCurHcb->HCS_Phase != MSG_IN)
- return (pCurHcb->HCS_Phase);
+ if (host->phase != MSG_IN)
+ return host->phase;
}
/* statement won't reach here */
}
-
-
-
-/***************************************************************************/
-int tul_msgout_reject(HCS * pCurHcb)
+static int initio_msgout_reject(struct initio_host * host)
{
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
-
- if ((tul_msgin_accept(pCurHcb)) == -1)
- return (-1);
+ if (initio_msgin_accept(host) == -1)
+ return -1;
- if (pCurHcb->HCS_Phase == MSG_OUT) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_REJ); /* Msg reject */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ if (host->phase == MSG_OUT) {
+ outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
- return (pCurHcb->HCS_Phase);
+ return host->phase;
}
-
-
-/***************************************************************************/
-int tul_msgout_ide(HCS * pCurHcb)
+static int initio_msgout_ide(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_IDE); /* Initiator Detected Error */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
-
-/***************************************************************************/
-int tul_msgin_extend(HCS * pCurHcb)
+static int initio_msgin_extend(struct initio_host * host)
{
- BYTE len, idx;
+ u8 len, idx;
- if (tul_msgin_accept(pCurHcb) != MSG_IN)
- return (pCurHcb->HCS_Phase);
+ if (initio_msgin_accept(host) != MSG_IN)
+ return host->phase;
/* Get extended msg length */
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
- len = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
- pCurHcb->HCS_Msg[0] = len;
+ len = inb(host->addr + TUL_SFifo);
+ host->msg[0] = len;
for (idx = 1; len != 0; len--) {
- if ((tul_msgin_accept(pCurHcb)) != MSG_IN)
- return (pCurHcb->HCS_Phase);
- TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
- if (wait_tulip(pCurHcb) == -1)
- return (-1);
- pCurHcb->HCS_Msg[idx++] = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
- }
- if (pCurHcb->HCS_Msg[1] == 1) { /* if it's synchronous data transfer request */
- if (pCurHcb->HCS_Msg[0] != 3) /* if length is not right */
- return (tul_msgout_reject(pCurHcb));
- if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
- pCurHcb->HCS_Msg[3] = 0;
+ if ((initio_msgin_accept(host)) != MSG_IN)
+ return host->phase;
+ outl(1, host->addr + TUL_SCnt0);
+ outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
+ if (wait_tulip(host) == -1)
+ return -1;
+ host->msg[idx++] = inb(host->addr + TUL_SFifo);
+ }
+ if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
+ u8 r;
+ if (host->msg[0] != 3) /* if length is not right */
+ return initio_msgout_reject(host);
+ if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
+ host->msg[3] = 0;
} else {
- if ((tul_msgin_sync(pCurHcb) == 0) &&
- (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SYNC_DONE)) {
- tul_sync_done(pCurHcb);
- return (tul_msgin_accept(pCurHcb));
+ if (initio_msgin_sync(host) == 0 &&
+ (host->active_tc->flags & TCF_SYNC_DONE)) {
+ initio_sync_done(host);
+ return initio_msgin_accept(host);
}
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- if ((tul_msgin_accept(pCurHcb)) != MSG_OUT)
- return (pCurHcb->HCS_Phase);
+ r = inb(host->addr + TUL_SSignal);
+ outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
+ host->addr + TUL_SSignal);
+ if (initio_msgin_accept(host) != MSG_OUT)
+ return host->phase;
/* sync msg out */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
- tul_sync_done(pCurHcb);
+ initio_sync_done(host);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[3]);
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo);
+ outb(1, host->addr + TUL_SFifo);
+ outb(host->msg[2], host->addr + TUL_SFifo);
+ outb(host->msg[3], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
- if ((pCurHcb->HCS_Msg[0] != 2) || (pCurHcb->HCS_Msg[1] != 3))
- return (tul_msgout_reject(pCurHcb));
+ if (host->msg[0] != 2 || host->msg[1] != 3)
+ return initio_msgout_reject(host);
/* if it's WIDE DATA XFER REQ */
- if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) {
- pCurHcb->HCS_Msg[2] = 0;
+ if (host->active_tc->flags & TCF_NO_WDTR) {
+ host->msg[2] = 0;
} else {
- if (pCurHcb->HCS_Msg[2] > 2) /* > 32 bits */
- return (tul_msgout_reject(pCurHcb));
- if (pCurHcb->HCS_Msg[2] == 2) { /* == 32 */
- pCurHcb->HCS_Msg[2] = 1;
+ if (host->msg[2] > 2) /* > 32 bits */
+ return initio_msgout_reject(host);
+ if (host->msg[2] == 2) { /* == 32 */
+ host->msg[2] = 1;
} else {
- if ((pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) == 0) {
- wdtr_done(pCurHcb);
- if ((pCurHcb->HCS_ActTcs->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
- return (tul_msgin_accept(pCurHcb));
+ if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
+ wdtr_done(host);
+ if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
+ return initio_msgin_accept(host);
}
}
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN));
+ outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
- if (tul_msgin_accept(pCurHcb) != MSG_OUT)
- return (pCurHcb->HCS_Phase);
+ if (initio_msgin_accept(host) != MSG_OUT)
+ return host->phase;
/* WDTR msg out */
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
- return (wait_tulip(pCurHcb));
+ outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(2, host->addr + TUL_SFifo);
+ outb(3, host->addr + TUL_SFifo);
+ outb(host->msg[2], host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
-/***************************************************************************/
-int tul_msgin_sync(HCS * pCurHcb)
+static int initio_msgin_sync(struct initio_host * host)
{
char default_period;
- default_period = tul_rate_tbl[pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SCSI_RATE];
- if (pCurHcb->HCS_Msg[3] > MAX_OFFSET) {
- pCurHcb->HCS_Msg[3] = MAX_OFFSET;
- if (pCurHcb->HCS_Msg[2] < default_period) {
- pCurHcb->HCS_Msg[2] = default_period;
+ default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
+ if (host->msg[3] > MAX_OFFSET) {
+ host->msg[3] = MAX_OFFSET;
+ if (host->msg[2] < default_period) {
+ host->msg[2] = default_period;
return 1;
}
- if (pCurHcb->HCS_Msg[2] >= 59) { /* Change to async */
- pCurHcb->HCS_Msg[3] = 0;
- }
+ if (host->msg[2] >= 59) /* Change to async */
+ host->msg[3] = 0;
return 1;
}
/* offset requests asynchronous transfers ? */
- if (pCurHcb->HCS_Msg[3] == 0) {
+ if (host->msg[3] == 0) {
return 0;
}
- if (pCurHcb->HCS_Msg[2] < default_period) {
- pCurHcb->HCS_Msg[2] = default_period;
+ if (host->msg[2] < default_period) {
+ host->msg[2] = default_period;
return 1;
}
- if (pCurHcb->HCS_Msg[2] >= 59) {
- pCurHcb->HCS_Msg[3] = 0;
+ if (host->msg[2] >= 59) {
+ host->msg[3] = 0;
return 1;
}
return 0;
}
-
-/***************************************************************************/
-int wdtr_done(HCS * pCurHcb)
+static int wdtr_done(struct initio_host * host)
{
- pCurHcb->HCS_ActTcs->TCS_Flags &= ~TCF_SYNC_DONE;
- pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_WDTR_DONE;
+ host->active_tc->flags &= ~TCF_SYNC_DONE;
+ host->active_tc->flags |= TCF_WDTR_DONE;
- pCurHcb->HCS_ActTcs->TCS_JS_Period = 0;
- if (pCurHcb->HCS_Msg[2]) { /* if 16 bit */
- pCurHcb->HCS_ActTcs->TCS_JS_Period |= TSC_WIDE_SCSI;
- }
- pCurHcb->HCS_ActTcs->TCS_SConfig0 &= ~TSC_ALT_PERIOD;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period);
+ host->active_tc->js_period = 0;
+ if (host->msg[2]) /* if 16 bit */
+ host->active_tc->js_period |= TSC_WIDE_SCSI;
+ host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
+ outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
return 1;
}
-/***************************************************************************/
-int tul_sync_done(HCS * pCurHcb)
+static int initio_sync_done(struct initio_host * host)
{
int i;
- pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_SYNC_DONE;
+ host->active_tc->flags |= TCF_SYNC_DONE;
- if (pCurHcb->HCS_Msg[3]) {
- pCurHcb->HCS_ActTcs->TCS_JS_Period |= pCurHcb->HCS_Msg[3];
+ if (host->msg[3]) {
+ host->active_tc->js_period |= host->msg[3];
for (i = 0; i < 8; i++) {
- if (tul_rate_tbl[i] >= pCurHcb->HCS_Msg[2]) /* pick the big one */
+ if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
break;
}
- pCurHcb->HCS_ActTcs->TCS_JS_Period |= (i << 4);
- pCurHcb->HCS_ActTcs->TCS_SConfig0 |= TSC_ALT_PERIOD;
+ host->active_tc->js_period |= (i << 4);
+ host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
}
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0);
- TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period);
+ outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
+ outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
- return (-1);
+ return -1;
}
-int tul_post_scsi_rst(HCS * pCurHcb)
+static int initio_post_scsi_rst(struct initio_host * host)
{
- SCB *pCurScb;
- TCS *pCurTcb;
+ struct scsi_ctrl_blk *scb;
+ struct target_control *active_tc;
int i;
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
- pCurHcb->HCS_Flags = 0;
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags = 0;
- while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) {
- pCurScb->SCB_HaStat = HOST_BAD_PHAS;
- tul_append_done_scb(pCurHcb, pCurScb);
+ while ((scb = initio_pop_busy_scb(host)) != NULL) {
+ scb->hastat = HOST_BAD_PHAS;
+ initio_append_done_scb(host, scb);
}
/* clear sync done flag */
- pCurTcb = &pCurHcb->HCS_Tcs[0];
- for (i = 0; i < pCurHcb->HCS_MaxTar; pCurTcb++, i++) {
- pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
+ active_tc = &host->targets[0];
+ for (i = 0; i < host->max_tar; active_tc++, i++) {
+ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
/* Initialize the sync. xfer register values to an asyn xfer */
- pCurTcb->TCS_JS_Period = 0;
- pCurTcb->TCS_SConfig0 = pCurHcb->HCS_SConf1;
- pCurHcb->HCS_ActTags[0] = 0; /* 07/22/98 */
- pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY; /* 07/22/98 */
+ active_tc->js_period = 0;
+ active_tc->sconfig0 = host->sconf1;
+ host->act_tags[0] = 0; /* 07/22/98 */
+ host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
} /* for */
- return (-1);
+ return -1;
}
-/***************************************************************************/
-void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb)
+static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
- pCurScb->SCB_Status |= SCB_SELECT;
- pCurScb->SCB_NxtStat = 0x1;
- pCurHcb->HCS_ActScb = pCurScb;
- pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SELATNSTOP);
- return;
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x1;
+ host->active = scb;
+ host->active_tc = &host->targets[scb->target];
+ outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
}
-/***************************************************************************/
-void tul_select_atn(HCS * pCurHcb, SCB * pCurScb)
+static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
- pCurScb->SCB_Status |= SCB_SELECT;
- pCurScb->SCB_NxtStat = 0x2;
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x2;
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident);
- for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++)
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]);
- pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
- pCurHcb->HCS_ActScb = pCurScb;
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN);
- return;
+ outb(scb->ident, host->addr + TUL_SFifo);
+ for (i = 0; i < (int) scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ host->active_tc = &host->targets[scb->target];
+ host->active = scb;
+ outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
}
-/***************************************************************************/
-void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb)
+static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
{
int i;
- pCurScb->SCB_Status |= SCB_SELECT;
- pCurScb->SCB_NxtStat = 0x2;
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg);
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId);
- for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++)
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]);
- pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target];
- pCurHcb->HCS_ActScb = pCurScb;
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN3);
- return;
+ scb->status |= SCB_SELECT;
+ scb->next_state = 0x2;
+
+ outb(scb->ident, host->addr + TUL_SFifo);
+ outb(scb->tagmsg, host->addr + TUL_SFifo);
+ outb(scb->tagid, host->addr + TUL_SFifo);
+ for (i = 0; i < scb->cdblen; i++)
+ outb(scb->cdb[i], host->addr + TUL_SFifo);
+ host->active_tc = &host->targets[scb->target];
+ host->active = scb;
+ outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
}
-/***************************************************************************/
-/* SCSI Bus Device Reset */
-int tul_bus_device_reset(HCS * pCurHcb)
+/**
+ * initio_bus_device_reset - SCSI Bus Device Reset
+ * @host: InitIO host to reset
+ *
+ * Perform a device reset and abort all pending SCBs for the
+ * victim device
+ */
+int initio_bus_device_reset(struct initio_host * host)
{
- SCB *pCurScb = pCurHcb->HCS_ActScb;
- TCS *pCurTcb = pCurHcb->HCS_ActTcs;
- SCB *pTmpScb, *pPrevScb;
- BYTE tar;
+ struct scsi_ctrl_blk *scb = host->active;
+ struct target_control *active_tc = host->active_tc;
+ struct scsi_ctrl_blk *tmp, *prev;
+ u8 tar;
- if (pCurHcb->HCS_Phase != MSG_OUT) {
- return (int_tul_bad_seq(pCurHcb)); /* Unexpected phase */
- }
- tul_unlink_pend_scb(pCurHcb, pCurScb);
- tul_release_scb(pCurHcb, pCurScb);
+ if (host->phase != MSG_OUT)
+ return int_initio_bad_seq(host); /* Unexpected phase */
+
+ initio_unlink_pend_scb(host, scb);
+ initio_release_scb(host, scb);
- tar = pCurScb->SCB_Target; /* target */
- pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
+ tar = scb->target; /* target */
+ active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
/* clr sync. nego & WDTR flags 07/22/98 */
/* abort all SCB with same target */
- pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
- while (pTmpScb != NULL) {
-
- if (pTmpScb->SCB_Target == tar) {
+ prev = tmp = host->first_busy; /* Check Busy queue */
+ while (tmp != NULL) {
+ if (tmp->target == tar) {
/* unlink it */
- if (pTmpScb == pCurHcb->HCS_FirstBusy) {
- if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL)
- pCurHcb->HCS_LastBusy = NULL;
+ if (tmp == host->first_busy) {
+ if ((host->first_busy = tmp->next) == NULL)
+ host->last_busy = NULL;
} else {
- pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb;
- if (pTmpScb == pCurHcb->HCS_LastBusy)
- pCurHcb->HCS_LastBusy = pPrevScb;
+ prev->next = tmp->next;
+ if (tmp == host->last_busy)
+ host->last_busy = prev;
}
- pTmpScb->SCB_HaStat = HOST_ABORTED;
- tul_append_done_scb(pCurHcb, pTmpScb);
+ tmp->hastat = HOST_ABORTED;
+ initio_append_done_scb(host, tmp);
}
/* Previous haven't change */
else {
- pPrevScb = pTmpScb;
+ prev = tmp;
}
- pTmpScb = pTmpScb->SCB_NxtScb;
+ tmp = tmp->next;
}
-
- TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_DEVRST);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT);
-
- return tul_wait_disc(pCurHcb);
+ outb(MSG_DEVRST, host->addr + TUL_SFifo);
+ outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
+ return initio_wait_disc(host);
}
-/***************************************************************************/
-int tul_msgin_accept(HCS * pCurHcb)
+static int initio_msgin_accept(struct initio_host * host)
{
- TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT);
- return (wait_tulip(pCurHcb));
+ outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
+ return wait_tulip(host);
}
-/***************************************************************************/
-int wait_tulip(HCS * pCurHcb)
+static int wait_tulip(struct initio_host * host)
{
- while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
- & TSS_INT_PENDING));
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
+ & TSS_INT_PENDING))
+ cpu_relax();
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
- pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK;
- pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1);
+ host->jsint = inb(host->addr + TUL_SInt);
+ host->phase = host->jsstatus0 & TSS_PH_MASK;
+ host->jsstatus1 = inb(host->addr + TUL_SStatus1);
- if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if SCSI bus reset detected */
- return (int_tul_resel(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) { /* if selected/reselected timeout interrupt */
- return (int_tul_busfree(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */
- return (int_tul_scsi_rst(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- if (pCurHcb->HCS_Flags & HCF_EXPECT_DONE_DISC) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb);
- pCurHcb->HCS_ActScb->SCB_HaStat = 0;
- tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb);
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
- pCurHcb->HCS_Flags &= ~HCF_EXPECT_DONE_DISC;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- return (-1);
+ if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
+ return int_initio_resel(host);
+ if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
+ return int_initio_busfree(host);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ if (host->flags & HCF_EXPECT_DONE_DISC) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ initio_unlink_busy_scb(host, host->active);
+ host->active->hastat = 0;
+ initio_append_done_scb(host, host->active);
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags &= ~HCF_EXPECT_DONE_DISC;
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
}
- if (pCurHcb->HCS_Flags & HCF_EXPECT_DISC) {
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- pCurHcb->HCS_ActScb = NULL;
- pCurHcb->HCS_ActTcs = NULL;
- pCurHcb->HCS_Flags &= ~HCF_EXPECT_DISC;
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- return (-1);
+ if (host->flags & HCF_EXPECT_DISC) {
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ host->active = NULL;
+ host->active_tc = NULL;
+ host->flags &= ~HCF_EXPECT_DISC;
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ return -1;
}
- return (int_tul_busfree(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) {
- return (pCurHcb->HCS_Phase);
+ return int_initio_busfree(host);
}
- return (pCurHcb->HCS_Phase);
+ /* The old code really does the below. Can probably be removed */
+ if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
+ return host->phase;
+ return host->phase;
}
-/***************************************************************************/
-int tul_wait_disc(HCS * pCurHcb)
-{
-
- while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
- & TSS_INT_PENDING));
+static int initio_wait_disc(struct initio_host * host)
+{
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
+ cpu_relax();
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
+ host->jsint = inb(host->addr + TUL_SInt);
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */
- return (int_tul_scsi_rst(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- pCurHcb->HCS_ActScb = NULL;
- return (-1);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ host->active = NULL;
+ return -1;
}
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
-/***************************************************************************/
-int tul_wait_done_disc(HCS * pCurHcb)
+static int initio_wait_done_disc(struct initio_host * host)
{
+ while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
+ & TSS_INT_PENDING))
+ cpu_relax();
+ host->jsint = inb(host->addr + TUL_SInt);
- while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
- & TSS_INT_PENDING));
-
- pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
-
-
- if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */
- return (int_tul_scsi_rst(pCurHcb));
- }
- if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
- TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
- TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
- tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb);
+ if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
+ return int_initio_scsi_rst(host);
+ if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
+ outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
+ outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
+ outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
+ initio_unlink_busy_scb(host, host->active);
- tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb);
- pCurHcb->HCS_ActScb = NULL;
- return (-1);
+ initio_append_done_scb(host, host->active);
+ host->active = NULL;
+ return -1;
}
- return (tul_bad_seq(pCurHcb));
+ return initio_bad_seq(host);
}
+/**
+ * i91u_intr - IRQ handler
+ * @irqno: IRQ number
+ * @dev_id: IRQ identifier
+ *
+ * Take the relevant locks and then invoke the actual isr processing
+ * code under the lock.
+ */
+
static irqreturn_t i91u_intr(int irqno, void *dev_id)
{
struct Scsi_Host *dev = dev_id;
unsigned long flags;
+ int r;
spin_lock_irqsave(dev->host_lock, flags);
- tul_isr((HCS *)dev->base);
+ r = initio_isr((struct initio_host *)dev->hostdata);
spin_unlock_irqrestore(dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-
-static int tul_NewReturnNumberOfAdapters(void)
-{
- struct pci_dev *pDev = NULL; /* Start from none */
- int iAdapters = 0;
- long dRegValue;
- WORD wBIOS;
- int i = 0;
-
- init_i91uAdapter_table();
-
- for (i = 0; i < ARRAY_SIZE(i91u_pci_devices); i++)
- {
- while ((pDev = pci_find_device(i91u_pci_devices[i].vendor, i91u_pci_devices[i].device, pDev)) != NULL) {
- if (pci_enable_device(pDev))
- continue;
- pci_read_config_dword(pDev, 0x44, (u32 *) & dRegValue);
- wBIOS = (UWORD) (dRegValue & 0xFF);
- if (((dRegValue & 0xFF00) >> 8) == 0xFF)
- dRegValue = 0;
- wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8));
- if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) {
- printk(KERN_WARNING
- "i91u: Could not set 32 bit DMA mask\n");
- continue;
- }
-
- if (Addi91u_into_Adapter_table(wBIOS,
- (pDev->resource[0].start),
- pDev->irq,
- pDev->bus->number,
- (pDev->devfn >> 3)
- ) == 0)
- iAdapters++;
- }
- }
-
- return (iAdapters);
+ if (r)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
-static int i91u_detect(struct scsi_host_template * tpnt)
-{
- HCS *pHCB;
- struct Scsi_Host *hreg;
- unsigned long i; /* 01/14/98 */
- int ok = 0, iAdapters;
- ULONG dBiosAdr;
- BYTE *pbBiosAdr;
-
- /* Get total number of adapters in the motherboard */
- iAdapters = tul_NewReturnNumberOfAdapters();
- if (iAdapters == 0) /* If no tulip founded, return */
- return (0);
-
- tul_num_ch = (iAdapters > tul_num_ch) ? tul_num_ch : iAdapters;
- /* Update actually channel number */
- if (tul_tag_enable) { /* 1.01i */
- tul_num_scb = MAX_TARGETS * i91u_MAXQUEUE;
- } else {
- tul_num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
- } /* Update actually SCBs per adapter */
-
- /* Get total memory needed for HCS */
- i = tul_num_ch * sizeof(HCS);
- memset((unsigned char *) &tul_hcs[0], 0, i); /* Initialize tul_hcs 0 */
- /* Get total memory needed for SCB */
-
- for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) {
- i = tul_num_ch * tul_num_scb * sizeof(SCB);
- if ((tul_scb = kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL)
- break;
- }
- if (tul_scb == NULL) {
- printk("i91u: SCB memory allocation error\n");
- return (0);
- }
- memset((unsigned char *) tul_scb, 0, i);
- for (i = 0, pHCB = &tul_hcs[0]; /* Get pointer for control block */
- i < tul_num_ch;
- i++, pHCB++) {
- get_tulipPCIConfig(pHCB, i);
-
- dBiosAdr = pHCB->HCS_BIOS;
- dBiosAdr = (dBiosAdr << 4);
-
- pbBiosAdr = phys_to_virt(dBiosAdr);
-
- init_tulip(pHCB, tul_scb + (i * tul_num_scb), tul_num_scb, pbBiosAdr, 10);
- request_region(pHCB->HCS_Base, 256, "i91u"); /* Register */
-
- pHCB->HCS_Index = i; /* 7/29/98 */
- hreg = scsi_register(tpnt, sizeof(HCS));
- if(hreg == NULL) {
- release_region(pHCB->HCS_Base, 256);
- return 0;
- }
- hreg->io_port = pHCB->HCS_Base;
- hreg->n_io_port = 0xff;
- hreg->can_queue = tul_num_scb; /* 03/05/98 */
- hreg->unique_id = pHCB->HCS_Base;
- hreg->max_id = pHCB->HCS_MaxTar;
- hreg->max_lun = 32; /* 10/21/97 */
- hreg->irq = pHCB->HCS_Intr;
- hreg->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */
- hreg->base = (unsigned long)pHCB;
- hreg->sg_tablesize = TOTAL_SG_ENTRY; /* Maximun support is 32 */
-
- /* Initial tulip chip */
- ok = request_irq(pHCB->HCS_Intr, i91u_intr, IRQF_DISABLED | IRQF_SHARED, "i91u", hreg);
- if (ok < 0) {
- printk(KERN_WARNING "i91u: unable to request IRQ %d\n\n", pHCB->HCS_Intr);
- return 0;
- }
- }
-
- tpnt->this_id = -1;
- tpnt->can_queue = 1;
-
- return 1;
-}
+/**
+ * initio_build_scb - Build the mappings and SCB
+ * @host: InitIO host taking the command
+ * @cblk: Firmware command block
+ * @cmnd: SCSI midlayer command block
+ *
+ * Translate the abstract SCSI command into a firmware command block
+ * suitable for feeding to the InitIO host controller. This also requires
+ * we build the scatter gather lists and ensure they are mapped properly.
+ */
-static void i91uBuildSCB(HCS * pHCB, SCB * pSCB, struct scsi_cmnd * SCpnt)
+static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
{ /* Create corresponding SCB */
- struct scatterlist *pSrbSG;
- SG *pSG; /* Pointer to SG list */
- int i;
- long TotalLen;
+ struct scatterlist *sglist;
+ struct sg_entry *sg; /* Pointer to SG list */
+ int i, nseg;
+ long total_len;
dma_addr_t dma_addr;
- pSCB->SCB_Post = i91uSCBPost; /* i91u's callback routine */
- pSCB->SCB_Srb = SCpnt;
- pSCB->SCB_Opcode = ExecSCSI;
- pSCB->SCB_Flags = SCF_POST; /* After SCSI done, call post routine */
- pSCB->SCB_Target = SCpnt->device->id;
- pSCB->SCB_Lun = SCpnt->device->lun;
- pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW;
-
- pSCB->SCB_Flags |= SCF_SENSE; /* Turn on auto request sense */
- dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->sense_buffer,
- SENSE_SIZE, DMA_FROM_DEVICE);
- pSCB->SCB_SensePtr = cpu_to_le32((u32)dma_addr);
- pSCB->SCB_SenseLen = cpu_to_le32(SENSE_SIZE);
- SCpnt->SCp.ptr = (char *)(unsigned long)dma_addr;
+ /* Fill in the command headers */
+ cblk->post = i91uSCBPost; /* i91u's callback routine */
+ cblk->srb = cmnd;
+ cblk->opcode = ExecSCSI;
+ cblk->flags = SCF_POST; /* After SCSI done, call post routine */
+ cblk->target = cmnd->device->id;
+ cblk->lun = cmnd->device->lun;
+ cblk->ident = cmnd->device->lun | DISC_ALLOW;
- pSCB->SCB_CDBLen = SCpnt->cmd_len;
- pSCB->SCB_HaStat = 0;
- pSCB->SCB_TaStat = 0;
- memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, SCpnt->cmd_len);
+ cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
- if (SCpnt->device->tagged_supported) { /* Tag Support */
- pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
+ /* Map the sense buffer into bus memory */
+ dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
+ SENSE_SIZE, DMA_FROM_DEVICE);
+ cblk->senseptr = cpu_to_le32((u32)dma_addr);
+ cblk->senselen = cpu_to_le32(SENSE_SIZE);
+ cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
+ cblk->cdblen = cmnd->cmd_len;
+
+ /* Clear the returned status */
+ cblk->hastat = 0;
+ cblk->tastat = 0;
+ /* Command the command */
+ memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len);
+
+ /* Set up tags */
+ if (cmnd->device->tagged_supported) { /* Tag Support */
+ cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
- pSCB->SCB_TagMsg = 0; /* No tag support */
+ cblk->tagmsg = 0; /* No tag support */
}
+
/* todo handle map_sg error */
- if (SCpnt->use_sg) {
- dma_addr = dma_map_single(&pHCB->pci_dev->dev, &pSCB->SCB_SGList[0],
- sizeof(struct SG_Struc) * TOTAL_SG_ENTRY,
+ nseg = scsi_dma_map(cmnd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
+ sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
- pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr);
- SCpnt->SCp.dma_handle = dma_addr;
-
- pSrbSG = (struct scatterlist *) SCpnt->request_buffer;
- pSCB->SCB_SGLen = dma_map_sg(&pHCB->pci_dev->dev, pSrbSG,
- SCpnt->use_sg, SCpnt->sc_data_direction);
-
- pSCB->SCB_Flags |= SCF_SG; /* Turn on SG list flag */
- for (i = 0, TotalLen = 0, pSG = &pSCB->SCB_SGList[0]; /* 1.01g */
- i < pSCB->SCB_SGLen; i++, pSG++, pSrbSG++) {
- pSG->SG_Ptr = cpu_to_le32((u32)sg_dma_address(pSrbSG));
- TotalLen += pSG->SG_Len = cpu_to_le32((u32)sg_dma_len(pSrbSG));
+ cblk->bufptr = cpu_to_le32((u32)dma_addr);
+ cmnd->SCp.dma_handle = dma_addr;
+
+
+ cblk->flags |= SCF_SG; /* Turn on SG list flag */
+ total_len = 0;
+ sg = &cblk->sglist[0];
+ scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
+ sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
+ total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
}
- pSCB->SCB_BufLen = (SCpnt->request_bufflen > TotalLen) ?
- TotalLen : SCpnt->request_bufflen;
- } else if (SCpnt->request_bufflen) { /* Non SG */
- dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->request_buffer,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- SCpnt->SCp.dma_handle = dma_addr;
- pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr);
- pSCB->SCB_BufLen = cpu_to_le32((u32)SCpnt->request_bufflen);
- pSCB->SCB_SGLen = 0;
- } else {
- pSCB->SCB_BufLen = 0;
- pSCB->SCB_SGLen = 0;
+ cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
+ total_len : scsi_bufflen(cmnd);
+ } else { /* No data transfer required */
+ cblk->buflen = 0;
+ cblk->sglen = 0;
}
}
+/**
+ * i91u_queuecommand - Queue a new command if possible
+ * @cmd: SCSI command block from the mid layer
+ * @done: Completion handler
+ *
+ * Attempts to queue a new command with the host adapter. Will return
+ * zero if successful or indicate a host busy condition if not (which
+ * will cause the mid layer to call us again later with the command)
+ */
+
static int i91u_queuecommand(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
- HCS *pHCB = (HCS *) cmd->device->host->base;
- register SCB *pSCB;
+ struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
+ struct scsi_ctrl_blk *cmnd;
cmd->scsi_done = done;
- pSCB = tul_alloc_scb(pHCB);
- if (!pSCB)
+ cmnd = initio_alloc_scb(host);
+ if (!cmnd)
return SCSI_MLQUEUE_HOST_BUSY;
- i91uBuildSCB(pHCB, pSCB, cmd);
- tul_exec_scb(pHCB, pSCB);
+ initio_build_scb(host, cmnd, cmd);
+ initio_exec_scb(host, cmnd);
return 0;
}
-#if 0 /* no new EH yet */
-/*
- * Abort a queued command
- * (commands that are on the bus can't be aborted easily)
- */
-static int i91u_abort(struct scsi_cmnd * SCpnt)
-{
- HCS *pHCB;
-
- pHCB = (HCS *) SCpnt->device->host->base;
- return tul_abort_srb(pHCB, SCpnt);
-}
-
-/*
- * Reset registers, reset a hanging bus and
- * kill active and disconnected commands for target w/o soft reset
+/**
+ * i91u_bus_reset - reset the SCSI bus
+ * @cmnd: Command block we want to trigger the reset for
+ *
+ * Initiate a SCSI bus reset sequence
*/
-static int i91u_reset(struct scsi_cmnd * SCpnt, unsigned int reset_flags)
-{ /* I need Host Control Block Information */
- HCS *pHCB;
-
- pHCB = (HCS *) SCpnt->device->host->base;
- if (reset_flags & (SCSI_RESET_SUGGEST_BUS_RESET | SCSI_RESET_SUGGEST_HOST_RESET))
- return tul_reset_scsi_bus(pHCB);
- else
- return tul_device_reset(pHCB, SCpnt, SCpnt->device->id, reset_flags);
-}
-#endif
-
-static int i91u_bus_reset(struct scsi_cmnd * SCpnt)
+static int i91u_bus_reset(struct scsi_cmnd * cmnd)
{
- HCS *pHCB;
+ struct initio_host *host;
- pHCB = (HCS *) SCpnt->device->host->base;
+ host = (struct initio_host *) cmnd->device->host->hostdata;
- spin_lock_irq(SCpnt->device->host->host_lock);
- tul_reset_scsi(pHCB, 0);
- spin_unlock_irq(SCpnt->device->host->host_lock);
+ spin_lock_irq(cmnd->device->host->host_lock);
+ initio_reset_scsi(host, 0);
+ spin_unlock_irq(cmnd->device->host->host_lock);
return SUCCESS;
}
-/*
- * Return the "logical geometry"
+/**
+ * i91u_biospararm - return the "logical geometry
+ * @sdev: SCSI device
+ * @dev; Matching block device
+ * @capacity: Sector size of drive
+ * @info_array: Return space for BIOS geometry
+ *
+ * Map the device geometry in a manner compatible with the host
+ * controller BIOS behaviour.
+ *
+ * FIXME: limited to 2^32 sector devices.
*/
+
static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int *info_array)
{
- HCS *pHcb; /* Point to Host adapter control block */
- TCS *pTcb;
+ struct initio_host *host; /* Point to Host adapter control block */
+ struct target_control *tc;
- pHcb = (HCS *) sdev->host->base;
- pTcb = &pHcb->HCS_Tcs[sdev->id];
+ host = (struct initio_host *) sdev->host->hostdata;
+ tc = &host->targets[sdev->id];
- if (pTcb->TCS_DrvHead) {
- info_array[0] = pTcb->TCS_DrvHead;
- info_array[1] = pTcb->TCS_DrvSector;
- info_array[2] = (unsigned long)capacity / pTcb->TCS_DrvHead / pTcb->TCS_DrvSector;
+ if (tc->heads) {
+ info_array[0] = tc->heads;
+ info_array[1] = tc->sectors;
+ info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
} else {
- if (pTcb->TCS_DrvFlags & TCF_DRV_255_63) {
+ if (tc->drv_flags & TCF_DRV_255_63) {
info_array[0] = 255;
info_array[1] = 63;
info_array[2] = (unsigned long)capacity / 255 / 63;
@@ -3047,7 +2722,16 @@ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
return 0;
}
-static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
+/**
+ * i91u_unmap_scb - Unmap a command
+ * @pci_dev: PCI device the command is for
+ * @cmnd: The command itself
+ *
+ * Unmap any PCI mapping/IOMMU resources allocated when the command
+ * was mapped originally as part of initio_build_scb
+ */
+
+static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
if (cmnd->SCp.ptr) {
@@ -3058,65 +2742,63 @@ static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
}
/* request buffer */
- if (cmnd->use_sg) {
+ if (scsi_sg_count(cmnd)) {
dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
- sizeof(struct SG_Struc) * TOTAL_SG_ENTRY,
+ sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
- dma_unmap_sg(&pci_dev->dev, cmnd->request_buffer,
- cmnd->use_sg,
- cmnd->sc_data_direction);
- } else if (cmnd->request_bufflen) {
- dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
- cmnd->request_bufflen,
- cmnd->sc_data_direction);
+ scsi_dma_unmap(cmnd);
}
}
-/*****************************************************************************
- Function name : i91uSCBPost
- Description : This is callback routine be called when tulip finish one
- SCSI command.
- Input : pHCB - Pointer to host adapter control block.
- pSCB - Pointer to SCSI control block.
- Output : None.
- Return : None.
-*****************************************************************************/
-static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
-{
- struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */
- HCS *pHCB;
- SCB *pSCB;
+/**
+ * i91uSCBPost - SCSI callback
+ * @host: Pointer to host adapter control block.
+ * @cmnd: Pointer to SCSI control block.
+ *
+ * This is callback routine be called when tulip finish one
+ * SCSI command.
+ */
- pHCB = (HCS *) pHcb;
- pSCB = (SCB *) pScb;
- if ((pSRB = pSCB->SCB_Srb) == 0) {
- printk("i91uSCBPost: SRB pointer is empty\n");
+static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
+{
+ struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
+ struct initio_host *host;
+ struct scsi_ctrl_blk *cblk;
- tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */
+ host = (struct initio_host *) host_mem;
+ cblk = (struct scsi_ctrl_blk *) cblk_mem;
+ if ((cmnd = cblk->srb) == NULL) {
+ printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
+ WARN_ON(1);
+ initio_release_scb(host, cblk); /* Release SCB for current channel */
return;
}
- switch (pSCB->SCB_HaStat) {
+
+ /*
+ * Remap the firmware error status into a mid layer one
+ */
+ switch (cblk->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
- pSCB->SCB_HaStat = 0;
+ cblk->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
- pSCB->SCB_HaStat = DID_TIME_OUT;
+ cblk->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
- pSCB->SCB_HaStat = DID_RESET;
+ cblk->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
- pSCB->SCB_HaStat = DID_ABORT;
+ cblk->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -3126,49 +2808,196 @@ static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
case 0x16: /* Invalid SCB Operation Code. */
default:
- printk("ini9100u: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat);
- pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */
+ printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
+ cblk->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
- pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16);
+ cmnd->result = cblk->tastat | (cblk->hastat << 16);
+ WARN_ON(cmnd == NULL);
+ i91u_unmap_scb(host->pci_dev, cmnd);
+ cmnd->scsi_done(cmnd); /* Notify system DONE */
+ initio_release_scb(host, cblk); /* Release SCB for current channel */
+}
+
+static struct scsi_host_template initio_template = {
+ .proc_name = "INI9100U",
+ .name = "Initio INI-9X00U/UW SCSI device driver",
+ .queuecommand = i91u_queuecommand,
+ .eh_bus_reset_handler = i91u_bus_reset,
+ .bios_param = i91u_biosparam,
+ .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
+ .this_id = 1,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+static int initio_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ struct initio_host *host;
+ u32 reg;
+ u16 bios_seg;
+ struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
+ int num_scb, i, error;
+
+ error = pci_enable_device(pdev);
+ if (error)
+ return error;
+
+ pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
+ bios_seg = (u16) (reg & 0xFF);
+ if (((reg & 0xFF00) >> 8) == 0xFF)
+ reg = 0;
+ bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
+
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+ printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
+ error = -ENODEV;
+ goto out_disable_device;
+ }
+ shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
+ if (!shost) {
+ printk(KERN_WARNING "initio: Could not allocate host structure.\n");
+ error = -ENOMEM;
+ goto out_disable_device;
+ }
+ host = (struct initio_host *)shost->hostdata;
+ memset(host, 0, sizeof(struct initio_host));
- if (pSRB == NULL) {
- printk("pSRB is NULL\n");
+ if (!request_region(host->addr, 256, "i91u")) {
+ printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
+ error = -ENODEV;
+ goto out_host_put;
}
- i91u_unmap_cmnd(pHCB->pci_dev, pSRB);
- pSRB->scsi_done(pSRB); /* Notify system DONE */
+ if (initio_tag_enable) /* 1.01i */
+ num_scb = MAX_TARGETS * i91u_MAXQUEUE;
+ else
+ num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
- tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */
-}
+ for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
+ i = num_scb * sizeof(struct scsi_ctrl_blk);
+ if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ break;
+ }
+
+ if (!scb) {
+ printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
+ error = -ENOMEM;
+ goto out_release_region;
+ }
-/*
- * Release ressources
+ host->num_scbs = num_scb;
+ host->scb = scb;
+ host->next_pending = scb;
+ host->next_avail = scb;
+ for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
+ tmp->tagid = i;
+ if (i != 0)
+ prev->next = tmp;
+ prev = tmp;
+ }
+ prev->next = NULL;
+ host->scb_end = tmp;
+ host->first_avail = scb;
+ host->last_avail = prev;
+
+ initio_init(host, phys_to_virt(bios_seg << 4));
+
+ host->jsstatus0 = 0;
+
+ shost->io_port = host->addr;
+ shost->n_io_port = 0xff;
+ shost->can_queue = num_scb; /* 03/05/98 */
+ shost->unique_id = host->addr;
+ shost->max_id = host->max_tar;
+ shost->max_lun = 32; /* 10/21/97 */
+ shost->irq = pdev->irq;
+ shost->this_id = host->scsi_id; /* Assign HCS index */
+ shost->base = host->addr;
+ shost->sg_tablesize = TOTAL_SG_ENTRY;
+
+ error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
+ if (error < 0) {
+ printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
+ goto out_free_scbs;
+ }
+
+ pci_set_drvdata(pdev, shost);
+ host->pci_dev = pdev;
+
+ error = scsi_add_host(shost, &pdev->dev);
+ if (error)
+ goto out_free_irq;
+ scsi_scan_host(shost);
+ return 0;
+out_free_irq:
+ free_irq(pdev->irq, shost);
+out_free_scbs:
+ kfree(host->scb);
+out_release_region:
+ release_region(host->addr, 256);
+out_host_put:
+ scsi_host_put(shost);
+out_disable_device:
+ pci_disable_device(pdev);
+ return error;
+}
+
+/**
+ * initio_remove_one - control shutdown
+ * @pdev: PCI device being released
+ *
+ * Release the resources assigned to this adapter after it has
+ * finished being used.
*/
-static int i91u_release(struct Scsi_Host *hreg)
+
+static void initio_remove_one(struct pci_dev *pdev)
{
- free_irq(hreg->irq, hreg);
- release_region(hreg->io_port, 256);
- return 0;
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct initio_host *s = (struct initio_host *)host->hostdata;
+ scsi_remove_host(host);
+ free_irq(pdev->irq, host);
+ release_region(s->addr, 256);
+ scsi_host_put(host);
+ pci_disable_device(pdev);
}
-MODULE_LICENSE("Dual BSD/GPL");
-
-static struct scsi_host_template driver_template = {
- .proc_name = "INI9100U",
- .name = i91u_REVID,
- .detect = i91u_detect,
- .release = i91u_release,
- .queuecommand = i91u_queuecommand,
-// .abort = i91u_abort,
-// .reset = i91u_reset,
- .eh_bus_reset_handler = i91u_bus_reset,
- .bios_param = i91u_biosparam,
- .can_queue = 1,
- .this_id = 1,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING,
+
+MODULE_LICENSE("GPL");
+
+static struct pci_device_id initio_pci_tbl[] = {
+ {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
+
+static struct pci_driver initio_pci_driver = {
+ .name = "initio",
+ .id_table = initio_pci_tbl,
+ .probe = initio_probe_one,
+ .remove = __devexit_p(initio_remove_one),
};
-#include "scsi_module.c"
+static int __init initio_init_driver(void)
+{
+ return pci_register_driver(&initio_pci_driver);
+}
+
+static void __exit initio_exit_driver(void)
+{
+ pci_unregister_driver(&initio_pci_driver);
+}
+
+MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
+MODULE_AUTHOR("Initio Corporation");
+MODULE_LICENSE("GPL");
+
+module_init(initio_init_driver);
+module_exit(initio_exit_driver);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index acb67a4af2cc..cb48efa81fe2 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,6 +4,8 @@
* Copyright (c) 1994-1998 Initio Corporation
* All rights reserved.
*
+ * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
@@ -18,27 +20,6 @@
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * --------------------------------------------------------------------------
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * Where this Software is combined with software released under the terms of
- * the GNU General Public License ("GPL") and the terms of the GPL would require the
- * combined work to also be released under the terms of the GPL, the terms
- * and conditions of this License will apply in addition to those of the
- * GPL with the exception of any terms or conditions of this License that
- * conflict with, or are expressly prohibited by, the GPL.
- *
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -56,17 +37,6 @@
#include <linux/types.h>
-#define ULONG unsigned long
-#define USHORT unsigned short
-#define UCHAR unsigned char
-#define BYTE unsigned char
-#define WORD unsigned short
-#define DWORD unsigned long
-#define UBYTE unsigned char
-#define UWORD unsigned short
-#define UDWORD unsigned long
-#define U32 u32
-
#define TOTAL_SG_ENTRY 32
#define MAX_SUPPORTED_ADAPTERS 8
#define MAX_OFFSET 15
@@ -368,55 +338,55 @@ typedef struct {
/************************************************************************/
/* Scatter-Gather Element Structure */
/************************************************************************/
-typedef struct SG_Struc {
- U32 SG_Ptr; /* Data Pointer */
- U32 SG_Len; /* Data Length */
-} SG;
+struct sg_entry {
+ u32 data; /* Data Pointer */
+ u32 len; /* Data Length */
+};
/***********************************************************************
SCSI Control Block
************************************************************************/
-typedef struct Scsi_Ctrl_Blk {
- struct Scsi_Ctrl_Blk *SCB_NxtScb;
- UBYTE SCB_Status; /*4 */
- UBYTE SCB_NxtStat; /*5 */
- UBYTE SCB_Mode; /*6 */
- UBYTE SCB_Msgin; /*7 SCB_Res0 */
- UWORD SCB_SGIdx; /*8 */
- UWORD SCB_SGMax; /*A */
+struct scsi_ctrl_blk {
+ struct scsi_ctrl_blk *next;
+ u8 status; /*4 */
+ u8 next_state; /*5 */
+ u8 mode; /*6 */
+ u8 msgin; /*7 SCB_Res0 */
+ u16 sgidx; /*8 */
+ u16 sgmax; /*A */
#ifdef ALPHA
- U32 SCB_Reserved[2]; /*C */
+ u32 reserved[2]; /*C */
#else
- U32 SCB_Reserved[3]; /*C */
+ u32 reserved[3]; /*C */
#endif
- U32 SCB_XferLen; /*18 Current xfer len */
- U32 SCB_TotXLen; /*1C Total xfer len */
- U32 SCB_PAddr; /*20 SCB phy. Addr. */
-
- UBYTE SCB_Opcode; /*24 SCB command code */
- UBYTE SCB_Flags; /*25 SCB Flags */
- UBYTE SCB_Target; /*26 Target Id */
- UBYTE SCB_Lun; /*27 Lun */
- U32 SCB_BufPtr; /*28 Data Buffer Pointer */
- U32 SCB_BufLen; /*2C Data Allocation Length */
- UBYTE SCB_SGLen; /*30 SG list # */
- UBYTE SCB_SenseLen; /*31 Sense Allocation Length */
- UBYTE SCB_HaStat; /*32 */
- UBYTE SCB_TaStat; /*33 */
- UBYTE SCB_CDBLen; /*34 CDB Length */
- UBYTE SCB_Ident; /*35 Identify */
- UBYTE SCB_TagMsg; /*36 Tag Message */
- UBYTE SCB_TagId; /*37 Queue Tag */
- UBYTE SCB_CDB[12]; /*38 */
- U32 SCB_SGPAddr; /*44 SG List/Sense Buf phy. Addr. */
- U32 SCB_SensePtr; /*48 Sense data pointer */
- void (*SCB_Post) (BYTE *, BYTE *); /*4C POST routine */
- struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */
- SG SCB_SGList[TOTAL_SG_ENTRY]; /*54 Start of SG list */
-} SCB;
-
-/* Bit Definition for SCB_Status */
+ u32 xferlen; /*18 Current xfer len */
+ u32 totxlen; /*1C Total xfer len */
+ u32 paddr; /*20 SCB phy. Addr. */
+
+ u8 opcode; /*24 SCB command code */
+ u8 flags; /*25 SCB Flags */
+ u8 target; /*26 Target Id */
+ u8 lun; /*27 Lun */
+ u32 bufptr; /*28 Data Buffer Pointer */
+ u32 buflen; /*2C Data Allocation Length */
+ u8 sglen; /*30 SG list # */
+ u8 senselen; /*31 Sense Allocation Length */
+ u8 hastat; /*32 */
+ u8 tastat; /*33 */
+ u8 cdblen; /*34 CDB Length */
+ u8 ident; /*35 Identify */
+ u8 tagmsg; /*36 Tag Message */
+ u8 tagid; /*37 Queue Tag */
+ u8 cdb[12]; /*38 */
+ u32 sgpaddr; /*44 SG List/Sense Buf phy. Addr. */
+ u32 senseptr; /*48 Sense data pointer */
+ void (*post) (u8 *, u8 *); /*4C POST routine */
+ struct scsi_cmnd *srb; /*50 SRB Pointer */
+ struct sg_entry sglist[TOTAL_SG_ENTRY]; /*54 Start of SG list */
+};
+
+/* Bit Definition for status */
#define SCB_RENT 0x01
#define SCB_PEND 0x02
#define SCB_CONTIG 0x04 /* Contigent Allegiance */
@@ -425,17 +395,17 @@ typedef struct Scsi_Ctrl_Blk {
#define SCB_DONE 0x20
-/* Opcodes of SCB_Opcode */
+/* Opcodes for opcode */
#define ExecSCSI 0x1
#define BusDevRst 0x2
#define AbortCmd 0x3
-/* Bit Definition for SCB_Mode */
+/* Bit Definition for mode */
#define SCM_RSENS 0x01 /* request sense mode */
-/* Bit Definition for SCB_Flags */
+/* Bit Definition for flags */
#define SCF_DONE 0x01
#define SCF_POST 0x02
#define SCF_SENSE 0x04
@@ -492,15 +462,14 @@ typedef struct Scsi_Ctrl_Blk {
Target Device Control Structure
**********************************************************************/
-typedef struct Tar_Ctrl_Struc {
- UWORD TCS_Flags; /* 0 */
- UBYTE TCS_JS_Period; /* 2 */
- UBYTE TCS_SConfig0; /* 3 */
-
- UWORD TCS_DrvFlags; /* 4 */
- UBYTE TCS_DrvHead; /* 6 */
- UBYTE TCS_DrvSector; /* 7 */
-} TCS;
+struct target_control {
+ u16 flags;
+ u8 js_period;
+ u8 sconfig0;
+ u16 drv_flags;
+ u8 heads;
+ u8 sectors;
+};
/***********************************************************************
Target Device Control Structure
@@ -523,62 +492,53 @@ typedef struct Tar_Ctrl_Struc {
#define TCF_DRV_EN_TAG 0x0800
#define TCF_DRV_255_63 0x0400
-typedef struct I91u_Adpt_Struc {
- UWORD ADPT_BIOS; /* 0 */
- UWORD ADPT_BASE; /* 1 */
- UBYTE ADPT_Bus; /* 2 */
- UBYTE ADPT_Device; /* 3 */
- UBYTE ADPT_INTR; /* 4 */
-} INI_ADPT_STRUCT;
-
-
/***********************************************************************
Host Adapter Control Structure
************************************************************************/
-typedef struct Ha_Ctrl_Struc {
- UWORD HCS_Base; /* 00 */
- UWORD HCS_BIOS; /* 02 */
- UBYTE HCS_Intr; /* 04 */
- UBYTE HCS_SCSI_ID; /* 05 */
- UBYTE HCS_MaxTar; /* 06 */
- UBYTE HCS_NumScbs; /* 07 */
-
- UBYTE HCS_Flags; /* 08 */
- UBYTE HCS_Index; /* 09 */
- UBYTE HCS_HaId; /* 0A */
- UBYTE HCS_Config; /* 0B */
- UWORD HCS_IdMask; /* 0C */
- UBYTE HCS_Semaph; /* 0E */
- UBYTE HCS_Phase; /* 0F */
- UBYTE HCS_JSStatus0; /* 10 */
- UBYTE HCS_JSInt; /* 11 */
- UBYTE HCS_JSStatus1; /* 12 */
- UBYTE HCS_SConf1; /* 13 */
-
- UBYTE HCS_Msg[8]; /* 14 */
- SCB *HCS_NxtAvail; /* 1C */
- SCB *HCS_Scb; /* 20 */
- SCB *HCS_ScbEnd; /* 24 */
- SCB *HCS_NxtPend; /* 28 */
- SCB *HCS_NxtContig; /* 2C */
- SCB *HCS_ActScb; /* 30 */
- TCS *HCS_ActTcs; /* 34 */
-
- SCB *HCS_FirstAvail; /* 38 */
- SCB *HCS_LastAvail; /* 3C */
- SCB *HCS_FirstPend; /* 40 */
- SCB *HCS_LastPend; /* 44 */
- SCB *HCS_FirstBusy; /* 48 */
- SCB *HCS_LastBusy; /* 4C */
- SCB *HCS_FirstDone; /* 50 */
- SCB *HCS_LastDone; /* 54 */
- UBYTE HCS_MaxTags[16]; /* 58 */
- UBYTE HCS_ActTags[16]; /* 68 */
- TCS HCS_Tcs[MAX_TARGETS]; /* 78 */
- spinlock_t HCS_AvailLock;
- spinlock_t HCS_SemaphLock;
+struct initio_host {
+ u16 addr; /* 00 */
+ u16 bios_addr; /* 02 */
+ u8 irq; /* 04 */
+ u8 scsi_id; /* 05 */
+ u8 max_tar; /* 06 */
+ u8 num_scbs; /* 07 */
+
+ u8 flags; /* 08 */
+ u8 index; /* 09 */
+ u8 ha_id; /* 0A */
+ u8 config; /* 0B */
+ u16 idmask; /* 0C */
+ u8 semaph; /* 0E */
+ u8 phase; /* 0F */
+ u8 jsstatus0; /* 10 */
+ u8 jsint; /* 11 */
+ u8 jsstatus1; /* 12 */
+ u8 sconf1; /* 13 */
+
+ u8 msg[8]; /* 14 */
+ struct scsi_ctrl_blk *next_avail; /* 1C */
+ struct scsi_ctrl_blk *scb; /* 20 */
+ struct scsi_ctrl_blk *scb_end; /* 24 */ /*UNUSED*/
+ struct scsi_ctrl_blk *next_pending; /* 28 */
+ struct scsi_ctrl_blk *next_contig; /* 2C */ /*UNUSED*/
+ struct scsi_ctrl_blk *active; /* 30 */
+ struct target_control *active_tc; /* 34 */
+
+ struct scsi_ctrl_blk *first_avail; /* 38 */
+ struct scsi_ctrl_blk *last_avail; /* 3C */
+ struct scsi_ctrl_blk *first_pending; /* 40 */
+ struct scsi_ctrl_blk *last_pending; /* 44 */
+ struct scsi_ctrl_blk *first_busy; /* 48 */
+ struct scsi_ctrl_blk *last_busy; /* 4C */
+ struct scsi_ctrl_blk *first_done; /* 50 */
+ struct scsi_ctrl_blk *last_done; /* 54 */
+ u8 max_tags[16]; /* 58 */
+ u8 act_tags[16]; /* 68 */
+ struct target_control targets[MAX_TARGETS]; /* 78 */
+ spinlock_t avail_lock;
+ spinlock_t semaph_lock;
struct pci_dev *pci_dev;
-} HCS;
+};
/* Bit Definition for HCB_Config */
#define HCC_SCSI_RESET 0x01
@@ -599,47 +559,47 @@ typedef struct Ha_Ctrl_Struc {
*******************************************************************/
typedef struct _NVRAM_SCSI { /* SCSI channel configuration */
- UCHAR NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
- UCHAR NVM_ChConfig1; /* 0Dh -> Channel config 1 */
- UCHAR NVM_ChConfig2; /* 0Eh -> Channel config 2 */
- UCHAR NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
+ u8 NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
+ u8 NVM_ChConfig1; /* 0Dh -> Channel config 1 */
+ u8 NVM_ChConfig2; /* 0Eh -> Channel config 2 */
+ u8 NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
/* SCSI target configuration */
- UCHAR NVM_Targ0Config; /* 10h -> Target 0 configuration */
- UCHAR NVM_Targ1Config; /* 11h -> Target 1 configuration */
- UCHAR NVM_Targ2Config; /* 12h -> Target 2 configuration */
- UCHAR NVM_Targ3Config; /* 13h -> Target 3 configuration */
- UCHAR NVM_Targ4Config; /* 14h -> Target 4 configuration */
- UCHAR NVM_Targ5Config; /* 15h -> Target 5 configuration */
- UCHAR NVM_Targ6Config; /* 16h -> Target 6 configuration */
- UCHAR NVM_Targ7Config; /* 17h -> Target 7 configuration */
- UCHAR NVM_Targ8Config; /* 18h -> Target 8 configuration */
- UCHAR NVM_Targ9Config; /* 19h -> Target 9 configuration */
- UCHAR NVM_TargAConfig; /* 1Ah -> Target A configuration */
- UCHAR NVM_TargBConfig; /* 1Bh -> Target B configuration */
- UCHAR NVM_TargCConfig; /* 1Ch -> Target C configuration */
- UCHAR NVM_TargDConfig; /* 1Dh -> Target D configuration */
- UCHAR NVM_TargEConfig; /* 1Eh -> Target E configuration */
- UCHAR NVM_TargFConfig; /* 1Fh -> Target F configuration */
+ u8 NVM_Targ0Config; /* 10h -> Target 0 configuration */
+ u8 NVM_Targ1Config; /* 11h -> Target 1 configuration */
+ u8 NVM_Targ2Config; /* 12h -> Target 2 configuration */
+ u8 NVM_Targ3Config; /* 13h -> Target 3 configuration */
+ u8 NVM_Targ4Config; /* 14h -> Target 4 configuration */
+ u8 NVM_Targ5Config; /* 15h -> Target 5 configuration */
+ u8 NVM_Targ6Config; /* 16h -> Target 6 configuration */
+ u8 NVM_Targ7Config; /* 17h -> Target 7 configuration */
+ u8 NVM_Targ8Config; /* 18h -> Target 8 configuration */
+ u8 NVM_Targ9Config; /* 19h -> Target 9 configuration */
+ u8 NVM_TargAConfig; /* 1Ah -> Target A configuration */
+ u8 NVM_TargBConfig; /* 1Bh -> Target B configuration */
+ u8 NVM_TargCConfig; /* 1Ch -> Target C configuration */
+ u8 NVM_TargDConfig; /* 1Dh -> Target D configuration */
+ u8 NVM_TargEConfig; /* 1Eh -> Target E configuration */
+ u8 NVM_TargFConfig; /* 1Fh -> Target F configuration */
} NVRAM_SCSI;
typedef struct _NVRAM {
/*----------header ---------------*/
- USHORT NVM_Signature; /* 0,1: Signature */
- UCHAR NVM_Size; /* 2: Size of data structure */
- UCHAR NVM_Revision; /* 3: Revision of data structure */
+ u16 NVM_Signature; /* 0,1: Signature */
+ u8 NVM_Size; /* 2: Size of data structure */
+ u8 NVM_Revision; /* 3: Revision of data structure */
/* ----Host Adapter Structure ---- */
- UCHAR NVM_ModelByte0; /* 4: Model number (byte 0) */
- UCHAR NVM_ModelByte1; /* 5: Model number (byte 1) */
- UCHAR NVM_ModelInfo; /* 6: Model information */
- UCHAR NVM_NumOfCh; /* 7: Number of SCSI channel */
- UCHAR NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
- UCHAR NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
- UCHAR NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
- UCHAR NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
+ u8 NVM_ModelByte0; /* 4: Model number (byte 0) */
+ u8 NVM_ModelByte1; /* 5: Model number (byte 1) */
+ u8 NVM_ModelInfo; /* 6: Model information */
+ u8 NVM_NumOfCh; /* 7: Number of SCSI channel */
+ u8 NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
+ u8 NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
+ u8 NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
+ u8 NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
NVRAM_SCSI NVM_SCSIInfo[2];
- UCHAR NVM_reserved[10];
+ u8 NVM_reserved[10];
/* ---------- CheckSum ---------- */
- USHORT NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
+ u16 NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
} NVRAM, *PNVRAM;
/* Bios Configuration for nvram->BIOSConfig1 */
@@ -681,19 +641,6 @@ typedef struct _NVRAM {
#define DISC_ALLOW 0xC0 /* Disconnect is allowed */
#define SCSICMD_RequestSense 0x03
-typedef struct _HCSinfo {
- ULONG base;
- UCHAR vec;
- UCHAR bios; /* High byte of BIOS address */
- USHORT BaseAndBios; /* high byte: pHcsInfo->bios,low byte:pHcsInfo->base */
-} HCSINFO;
-
-#define TUL_RD(x,y) (UCHAR)(inb( (int)((ULONG)(x+y)) ))
-#define TUL_RDLONG(x,y) (ULONG)(inl((int)((ULONG)(x+y)) ))
-#define TUL_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
-#define TUL_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
-#define TUL_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
-
#define SCSI_ABORT_SNOOZE 0
#define SCSI_ABORT_SUCCESS 1
#define SCSI_ABORT_PENDING 2
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fa6ff295e568..f142eafb6fc7 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_unmap_sglist - Unmap scatterlist if mapped
- * @ioa_cfg: ioa config struct
- * @ipr_cmd: ipr command struct
- *
- * Return value:
- * nothing
- **/
-static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
- struct ipr_cmnd *ipr_cmd)
-{
- struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
-
- if (ipr_cmd->dma_use_sg) {
- if (scsi_cmd->use_sg > 0) {
- pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
- scsi_cmd->use_sg,
- scsi_cmd->sc_data_direction);
- } else {
- pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
- scsi_cmd->request_bufflen,
- scsi_cmd->sc_data_direction);
- }
- }
-}
-
-/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
* @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_cmd->result |= (DID_ERROR << 16);
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
}
@@ -2465,6 +2439,7 @@ restart:
/**
* ipr_read_trace - Dump the adapter trace
* @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
@@ -2472,8 +2447,9 @@ restart:
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t ipr_read_trace(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct class_device *cdev = container_of(kobj,struct class_device,kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3166,6 +3142,7 @@ static struct class_device_attribute *ipr_ioa_attrs[] = {
/**
* ipr_read_dump - Dump the adapter
* @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
@@ -3173,8 +3150,9 @@ static struct class_device_attribute *ipr_ioa_attrs[] = {
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t ipr_read_dump(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct class_device *cdev = container_of(kobj,struct class_device,kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3327,6 +3305,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
/**
* ipr_write_dump - Setup dump state of adapter
* @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
@@ -3334,8 +3313,9 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* number of bytes printed to buffer
**/
-static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
- loff_t off, size_t count)
+static ssize_t ipr_write_dump(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct class_device *cdev = container_of(kobj,struct class_device,kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -4292,93 +4272,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd)
{
- int i;
- struct scatterlist *sglist;
+ int i, nseg;
+ struct scatterlist *sg;
u32 length;
u32 ioadl_flags = 0;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
- length = scsi_cmd->request_bufflen;
-
- if (length == 0)
+ length = scsi_bufflen(scsi_cmd);
+ if (!length)
return 0;
- if (scsi_cmd->use_sg) {
- ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
- scsi_cmd->request_buffer,
- scsi_cmd->use_sg,
- scsi_cmd->sc_data_direction);
-
- if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(length);
- ioarcb->read_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
- }
-
- sglist = scsi_cmd->request_buffer;
+ nseg = scsi_dma_map(scsi_cmd);
+ if (nseg < 0) {
+ dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+ return -1;
+ }
- if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- }
+ ipr_cmd->dma_use_sg = nseg;
- for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
- ioadl[i].flags_and_data_len =
- cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
- ioadl[i].address =
- cpu_to_be32(sg_dma_address(&sglist[i]));
- }
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->write_data_transfer_length = cpu_to_be32(length);
+ ioarcb->write_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+ ioarcb->read_data_transfer_length = cpu_to_be32(length);
+ ioarcb->read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ }
- if (likely(ipr_cmd->dma_use_sg)) {
- ioadl[i-1].flags_and_data_len |=
- cpu_to_be32(IPR_IOADL_FLAGS_LAST);
- return 0;
- } else
- dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
- } else {
- if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_WRITE;
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
- ioadl_flags = IPR_IOADL_FLAGS_READ;
- ioarcb->read_data_transfer_length = cpu_to_be32(length);
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- }
+ if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
+ ioadl = ioarcb->add_data.u.ioadl;
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
+ offsetof(struct ipr_ioarcb, add_data));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
- ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
- scsi_cmd->request_buffer, length,
- scsi_cmd->sc_data_direction);
-
- if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- ipr_cmd->dma_use_sg = 1;
- ioadl[0].flags_and_data_len =
- cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
- ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
- return 0;
- } else
- dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
+ scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+ ioadl[i].flags_and_data_len =
+ cpu_to_be32(ioadl_flags | sg_dma_len(sg));
+ ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
}
- return -1;
+ ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ return 0;
}
/**
@@ -4441,7 +4383,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->needs_sync_complete = 1;
res->in_erp = 0;
}
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -4819,7 +4761,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
}
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -4840,10 +4782,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
- scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
+ scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
- ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+ scsi_dma_unmap(ipr_cmd->scsi_cmd);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
} else
@@ -5367,18 +5309,12 @@ static const u16 ipr_blocked_processors[] = {
**/
static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
{
- u8 rev_id;
int i;
- if (ioa_cfg->type == 0x5702) {
- if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
- &rev_id) == PCIBIOS_SUCCESSFUL) {
- if (rev_id < 4) {
- for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
- if (__is_processor(ipr_blocked_processors[i]))
- return 1;
- }
- }
+ if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
+ for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
+ if (__is_processor(ipr_blocked_processors[i]))
+ return 1;
}
}
return 0;
@@ -7535,13 +7471,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
else
ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
- rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &ioa_cfg->revid);
-
- if (rc != PCIBIOS_SUCCESSFUL) {
- dev_err(&pdev->dev, "Failed to read PCI revision ID\n");
- rc = -EIO;
- goto out_scsi_host_put;
- }
+ ioa_cfg->revid = pdev->revision;
ipr_regs_pci = pci_resource_start(pdev, 0);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8b704f73055a..492a51bd6aa8 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -211,19 +211,6 @@ module_param(ips, charp, 0);
#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
#endif
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
-#include <linux/blk.h>
-#include "sd.h"
-#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
-#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
-#ifndef __devexit_p
-#define __devexit_p(x) x
-#endif
-#else
-#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
-#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
-#endif
-
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
PCI_DMA_BIDIRECTIONAL : \
@@ -381,24 +368,13 @@ static struct scsi_host_template ips_driver_template = {
.eh_abort_handler = ips_eh_abort,
.eh_host_reset_handler = ips_eh_reset,
.proc_name = "ips",
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
.proc_info = ips_proc_info,
.slave_configure = ips_slave_configure,
-#else
- .proc_info = ips_proc24_info,
- .select_queue_depths = ips_select_queue_depth,
-#endif
.bios_param = ips_biosparam,
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- .use_new_eh_code = 1,
-#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- .highmem_io = 1,
-#endif
};
@@ -731,7 +707,7 @@ ips_release(struct Scsi_Host *sh)
/* free IRQ */
free_irq(ha->irq, ha);
- IPS_REMOVE_HOST(sh);
+ scsi_remove_host(sh);
scsi_host_put(sh);
ips_released_controllers++;
@@ -813,7 +789,6 @@ int ips_eh_abort(struct scsi_cmnd *SC)
ips_ha_t *ha;
ips_copp_wait_item_t *item;
int ret;
- unsigned long cpu_flags;
struct Scsi_Host *host;
METHOD_TRACE("ips_eh_abort", 1);
@@ -830,7 +805,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
if (!ha->active)
return (FAILED);
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
/* See if the command is on the copp queue */
item = ha->copp_waitlist.head;
@@ -851,7 +826,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
ret = (FAILED);
}
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
return ret;
}
@@ -1129,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
/* A Reset IOCTL is only sent by the boot CD in extreme cases. */
/* There can never be any system activity ( network or disk ), but check */
/* anyway just as a good practice. */
- pt = (ips_passthru_t *) SC->request_buffer;
+ pt = (ips_passthru_t *) scsi_sglist(SC);
if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
(pt->CoppCP.cmd.reset.adapter_flag == 1)) {
if (ha->scb_activelist.count != 0) {
@@ -1176,18 +1151,10 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
/* Set bios geometry for the controller */
/* */
/****************************************************************************/
-static int
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-ips_biosparam(Disk * disk, kdev_t dev, int geom[])
-{
- ips_ha_t *ha = (ips_ha_t *) disk->device->host->hostdata;
- unsigned long capacity = disk->capacity;
-#else
-ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int geom[])
+static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
{
ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
-#endif
int heads;
int sectors;
int cylinders;
@@ -1225,70 +1192,6 @@ ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
return (0);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-
-/* ips_proc24_info is a wrapper around ips_proc_info *
- * for compatibility with the 2.4 scsi parameters */
-static int
-ips_proc24_info(char *buffer, char **start, off_t offset, int length,
- int hostno, int func)
-{
- int i;
-
- for (i = 0; i < ips_next_controller; i++) {
- if (ips_sh[i] && ips_sh[i]->host_no == hostno) {
- return ips_proc_info(ips_sh[i], buffer, start,
- offset, length, func);
- }
- }
- return -EINVAL;
-}
-
-/****************************************************************************/
-/* */
-/* Routine Name: ips_select_queue_depth */
-/* */
-/* Routine Description: */
-/* */
-/* Select queue depths for the devices on the contoller */
-/* */
-/****************************************************************************/
-static void
-ips_select_queue_depth(struct Scsi_Host *host, struct scsi_device * scsi_devs)
-{
- struct scsi_device *device;
- ips_ha_t *ha;
- int count = 0;
- int min;
-
- ha = IPS_HA(host);
- min = ha->max_cmds / 4;
-
- for (device = scsi_devs; device; device = device->next) {
- if (device->host == host) {
- if ((device->channel == 0) && (device->type == 0))
- count++;
- }
- }
-
- for (device = scsi_devs; device; device = device->next) {
- if (device->host == host) {
- if ((device->channel == 0) && (device->type == 0)) {
- device->queue_depth =
- (ha->max_cmds - 1) / count;
- if (device->queue_depth < min)
- device->queue_depth = min;
- } else {
- device->queue_depth = 2;
- }
-
- if (device->queue_depth < 2)
- device->queue_depth = 2;
- }
- }
-}
-
-#else
/****************************************************************************/
/* */
/* Routine Name: ips_slave_configure */
@@ -1316,7 +1219,6 @@ ips_slave_configure(struct scsi_device * SDptr)
SDptr->skip_ms_page_3f = 1;
return 0;
}
-#endif
/****************************************************************************/
/* */
@@ -1331,7 +1233,6 @@ static irqreturn_t
do_ipsintr(int irq, void *dev_id)
{
ips_ha_t *ha;
- unsigned long cpu_flags;
struct Scsi_Host *host;
int irqstatus;
@@ -1347,16 +1248,16 @@ do_ipsintr(int irq, void *dev_id)
return IRQ_HANDLED;
}
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
if (!ha->active) {
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
return IRQ_HANDLED;
}
irqstatus = (*ha->func.intr) (ha);
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
/* start the next command */
ips_next(ha, IPS_INTR_ON);
@@ -1606,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
(SC->device->channel == 0) &&
(SC->device->id == IPS_ADAPTER_ID) &&
- (SC->device->lun == 0) && SC->request_buffer) {
- if ((!SC->use_sg) && SC->request_bufflen &&
- (((char *) SC->request_buffer)[0] == 'C') &&
- (((char *) SC->request_buffer)[1] == 'O') &&
- (((char *) SC->request_buffer)[2] == 'P') &&
- (((char *) SC->request_buffer)[3] == 'P'))
- return 1;
- else if (SC->use_sg) {
- struct scatterlist *sg = SC->request_buffer;
- char *buffer;
-
- /* kmap_atomic() ensures addressability of the user buffer.*/
- /* local_irq_save() protects the KM_IRQ0 address slot. */
- local_irq_save(flags);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
- buffer[2] == 'P' && buffer[3] == 'P') {
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- return 1;
- }
- kunmap_atomic(buffer - sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- }
+ (SC->device->lun == 0) && scsi_sglist(SC)) {
+ struct scatterlist *sg = scsi_sglist(SC);
+ char *buffer;
+
+ /* kmap_atomic() ensures addressability of the user buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
+ buffer[2] == 'P' && buffer[3] == 'P') {
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
+ return 1;
+ }
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
}
return 0;
}
@@ -1680,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
{
ips_passthru_t *pt;
int length = 0;
- int ret;
+ int i, ret;
+ struct scatterlist *sg = scsi_sglist(SC);
METHOD_TRACE("ips_make_passthru", 1);
- if (!SC->use_sg) {
- length = SC->request_bufflen;
- } else {
- struct scatterlist *sg = SC->request_buffer;
- int i;
- for (i = 0; i < SC->use_sg; i++)
- length += sg[i].length;
- }
+ scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
+ length += sg[i].length;
+
if (length < sizeof (ips_passthru_t)) {
/* wrong size */
DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
@@ -2115,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_cleanup_passthru", 1);
- if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) {
+ if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
ips_name, ha->host_num);
@@ -2730,7 +2619,6 @@ ips_next(ips_ha_t * ha, int intr)
struct scsi_cmnd *q;
ips_copp_wait_item_t *item;
int ret;
- unsigned long cpu_flags = 0;
struct Scsi_Host *host;
METHOD_TRACE("ips_next", 1);
@@ -2742,7 +2630,7 @@ ips_next(ips_ha_t * ha, int intr)
* this command won't time out
*/
if (intr == IPS_INTR_ON)
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
if ((ha->subsys->param[3] & 0x300000)
&& (ha->scb_activelist.count == 0)) {
@@ -2769,14 +2657,14 @@ ips_next(ips_ha_t * ha, int intr)
item = ips_removeq_copp_head(&ha->copp_waitlist);
ha->num_ioctl++;
if (intr == IPS_INTR_ON)
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
scb->scsi_cmd = item->scsi_cmd;
kfree(item);
ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
if (intr == IPS_INTR_ON)
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
switch (ret) {
case IPS_FAILURE:
if (scb->scsi_cmd) {
@@ -2846,7 +2734,7 @@ ips_next(ips_ha_t * ha, int intr)
SC = ips_removeq_wait(&ha->scb_waitlist, q);
if (intr == IPS_INTR_ON)
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */
+ spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
SC->result = DID_OK;
SC->host_scribble = NULL;
@@ -2866,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
/* copy in the CDB */
memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
- /* Now handle the data buffer */
- if (SC->use_sg) {
+ scb->sg_count = scsi_dma_map(SC);
+ BUG_ON(scb->sg_count < 0);
+ if (scb->sg_count) {
struct scatterlist *sg;
int i;
- sg = SC->request_buffer;
- scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
- SC->sc_data_direction);
scb->flags |= IPS_SCB_MAP_SG;
- for (i = 0; i < scb->sg_count; i++) {
+
+ scsi_for_each_sg(SC, sg, scb->sg_count, i) {
if (ips_fill_scb_sg_single
- (ha, sg_dma_address(&sg[i]), scb, i,
- sg_dma_len(&sg[i])) < 0)
+ (ha, sg_dma_address(sg), scb, i,
+ sg_dma_len(sg)) < 0)
break;
}
scb->dcdb.transfer_length = scb->data_len;
} else {
- if (SC->request_bufflen) {
- scb->data_busaddr =
- pci_map_single(ha->pcidev,
- SC->request_buffer,
- SC->request_bufflen,
- SC->sc_data_direction);
- scb->flags |= IPS_SCB_MAP_SINGLE;
- ips_fill_scb_sg_single(ha, scb->data_busaddr,
- scb, 0,
- SC->request_bufflen);
- scb->dcdb.transfer_length = scb->data_len;
- } else {
- scb->data_busaddr = 0L;
- scb->sg_len = 0;
- scb->data_len = 0;
- scb->dcdb.transfer_length = 0;
- }
-
+ scb->data_busaddr = 0L;
+ scb->sg_len = 0;
+ scb->data_len = 0;
+ scb->dcdb.transfer_length = 0;
}
scb->dcdb.cmd_attribute =
@@ -2919,7 +2792,7 @@ ips_next(ips_ha_t * ha, int intr)
scb->dcdb.transfer_length = 0;
}
if (intr == IPS_INTR_ON)
- IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+ spin_lock(host->host_lock);
ret = ips_send_cmd(ha, scb);
@@ -2958,7 +2831,7 @@ ips_next(ips_ha_t * ha, int intr)
} /* end while */
if (intr == IPS_INTR_ON)
- IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
+ spin_unlock(host->host_lock);
}
/****************************************************************************/
@@ -3377,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
* the rest of the data and continue.
*/
if ((scb->breakup) || (scb->sg_break)) {
+ struct scatterlist *sg;
+ int sg_dma_index, ips_sg_index = 0;
+
/* we had a data breakup */
scb->data_len = 0;
- if (scb->sg_count) {
- /* S/G request */
- struct scatterlist *sg;
- int ips_sg_index = 0;
- int sg_dma_index;
-
- sg = scb->scsi_cmd->request_buffer;
-
- /* Spin forward to last dma chunk */
- sg_dma_index = scb->breakup;
-
- /* Take care of possible partial on last chunk */
- ips_fill_scb_sg_single(ha,
- sg_dma_address(&sg
- [sg_dma_index]),
- scb, ips_sg_index++,
- sg_dma_len(&sg
- [sg_dma_index]));
-
- for (; sg_dma_index < scb->sg_count;
- sg_dma_index++) {
- if (ips_fill_scb_sg_single
- (ha,
- sg_dma_address(&sg[sg_dma_index]),
- scb, ips_sg_index++,
- sg_dma_len(&sg[sg_dma_index])) < 0)
- break;
+ sg = scsi_sglist(scb->scsi_cmd);
- }
+ /* Spin forward to last dma chunk */
+ sg_dma_index = scb->breakup;
- } else {
- /* Non S/G Request */
- (void) ips_fill_scb_sg_single(ha,
- scb->
- data_busaddr +
- (scb->sg_break *
- ha->max_xfer),
- scb, 0,
- scb->scsi_cmd->
- request_bufflen -
- (scb->sg_break *
- ha->max_xfer));
- }
+ /* Take care of possible partial on last chunk */
+ ips_fill_scb_sg_single(ha,
+ sg_dma_address(&sg[sg_dma_index]),
+ scb, ips_sg_index++,
+ sg_dma_len(&sg[sg_dma_index]));
+
+ for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
+ sg_dma_index++) {
+ if (ips_fill_scb_sg_single
+ (ha,
+ sg_dma_address(&sg[sg_dma_index]),
+ scb, ips_sg_index++,
+ sg_dma_len(&sg[sg_dma_index])) < 0)
+ break;
+ }
scb->dcdb.transfer_length = scb->data_len;
scb->dcdb.cmd_attribute |=
@@ -3653,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
static void
ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
{
- if (scmd->use_sg) {
- int i;
- unsigned int min_cnt, xfer_cnt;
- char *cdata = (char *) data;
- unsigned char *buffer;
- unsigned long flags;
- struct scatterlist *sg = scmd->request_buffer;
- for (i = 0, xfer_cnt = 0;
- (i < scmd->use_sg) && (xfer_cnt < count); i++) {
- min_cnt = min(count - xfer_cnt, sg[i].length);
-
- /* kmap_atomic() ensures addressability of the data buffer.*/
- /* local_irq_save() protects the KM_IRQ0 address slot. */
- local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
- memcpy(buffer, &cdata[xfer_cnt], min_cnt);
- kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
- local_irq_restore(flags);
-
- xfer_cnt += min_cnt;
- }
-
- } else {
- unsigned int min_cnt = min(count, scmd->request_bufflen);
- memcpy(scmd->request_buffer, data, min_cnt);
- }
+ int i;
+ unsigned int min_cnt, xfer_cnt;
+ char *cdata = (char *) data;
+ unsigned char *buffer;
+ unsigned long flags;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ for (i = 0, xfer_cnt = 0;
+ (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
+ min_cnt = min(count - xfer_cnt, sg[i].length);
+
+ /* kmap_atomic() ensures addressability of the data buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ memcpy(buffer, &cdata[xfer_cnt], min_cnt);
+ kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
+ local_irq_restore(flags);
+
+ xfer_cnt += min_cnt;
+ }
}
/****************************************************************************/
@@ -3691,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
static void
ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
{
- if (scmd->use_sg) {
- int i;
- unsigned int min_cnt, xfer_cnt;
- char *cdata = (char *) data;
- unsigned char *buffer;
- unsigned long flags;
- struct scatterlist *sg = scmd->request_buffer;
- for (i = 0, xfer_cnt = 0;
- (i < scmd->use_sg) && (xfer_cnt < count); i++) {
- min_cnt = min(count - xfer_cnt, sg[i].length);
-
- /* kmap_atomic() ensures addressability of the data buffer.*/
- /* local_irq_save() protects the KM_IRQ0 address slot. */
- local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
- memcpy(&cdata[xfer_cnt], buffer, min_cnt);
- kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
- local_irq_restore(flags);
-
- xfer_cnt += min_cnt;
- }
-
- } else {
- unsigned int min_cnt = min(count, scmd->request_bufflen);
- memcpy(data, scmd->request_buffer, min_cnt);
- }
+ int i;
+ unsigned int min_cnt, xfer_cnt;
+ char *cdata = (char *) data;
+ unsigned char *buffer;
+ unsigned long flags;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ for (i = 0, xfer_cnt = 0;
+ (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
+ min_cnt = min(count - xfer_cnt, sg[i].length);
+
+ /* kmap_atomic() ensures addressability of the data buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ memcpy(&cdata[xfer_cnt], buffer, min_cnt);
+ kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
+ local_irq_restore(flags);
+
+ xfer_cnt += min_cnt;
+ }
}
/****************************************************************************/
@@ -4350,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_rdcap", 1);
- if (scb->scsi_cmd->request_bufflen < 8)
+ if (scsi_bufflen(scb->scsi_cmd) < 8)
return (0);
cap.lba =
@@ -4735,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ips_freescb", 1);
if (scb->flags & IPS_SCB_MAP_SG)
- pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer,
- scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
+ scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE)
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
IPS_DMA_DIR(scb));
@@ -7004,7 +6846,6 @@ ips_register_scsi(int index)
kfree(oldha);
ips_sh[index] = sh;
ips_ha[index] = ha;
- IPS_SCSI_SET_DEVICE(sh, ha);
/* Store away needed values for later use */
sh->io_port = ha->io_addr;
@@ -7016,17 +6857,16 @@ ips_register_scsi(int index)
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
sh->use_clustering = sh->hostt->use_clustering;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
sh->max_sectors = 128;
-#endif
sh->max_id = ha->ntargets;
sh->max_lun = ha->nlun;
sh->max_channel = ha->nbus - 1;
sh->can_queue = ha->max_cmds - 1;
- IPS_ADD_HOST(sh, NULL);
+ scsi_add_host(sh, NULL);
+ scsi_scan_host(sh);
+
return 0;
}
@@ -7069,7 +6909,7 @@ ips_module_init(void)
return -ENODEV;
ips_driver_template.module = THIS_MODULE;
ips_order_controllers();
- if (IPS_REGISTER_HOSTS(&ips_driver_template)) {
+ if (!ips_detect(&ips_driver_template)) {
pci_unregister_driver(&ips_pci_driver);
return -ENODEV;
}
@@ -7087,7 +6927,6 @@ ips_module_init(void)
static void __exit
ips_module_exit(void)
{
- IPS_UNREGISTER_HOSTS(&ips_driver_template);
pci_unregister_driver(&ips_pci_driver);
unregister_reboot_notifier(&ips_notifier);
}
@@ -7148,7 +6987,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
uint32_t mem_addr;
uint32_t io_len;
uint32_t mem_len;
- uint8_t revision_id;
uint8_t bus;
uint8_t func;
uint8_t irq;
@@ -7227,23 +7065,16 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
}
}
- /* get the revision ID */
- if (pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id)) {
- IPS_PRINTK(KERN_WARNING, pci_dev, "Can't get revision id.\n");
- return -1;
- }
-
subdevice_id = pci_dev->subsystem_device;
/* found a controller */
- ha = kmalloc(sizeof (ips_ha_t), GFP_KERNEL);
+ ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
if (ha == NULL) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate temporary ha struct\n");
return -1;
}
- memset(ha, 0, sizeof (ips_ha_t));
ips_sh[index] = NULL;
ips_ha[index] = ha;
@@ -7258,7 +7089,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
ha->mem_ptr = mem_ptr;
ha->ioremap_ptr = ioremap_ptr;
ha->host_num = (uint32_t) index;
- ha->revision_id = revision_id;
+ ha->revision_id = pci_dev->revision;
ha->slot_num = PCI_SLOT(pci_dev->devfn);
ha->device_id = pci_dev->device;
ha->subdevice_id = subdevice_id;
@@ -7443,15 +7274,9 @@ ips_init_phase2(int index)
return SUCCESS;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
MODULE_LICENSE("GPL");
-#endif
-
MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
-
-#ifdef MODULE_VERSION
MODULE_VERSION(IPS_VER_STRING);
-#endif
/*
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index b726dcc424b1..24123d537c58 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -58,10 +58,6 @@
/*
* Some handy macros
*/
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) || defined CONFIG_HIGHIO
- #define IPS_HIGHIO
- #endif
-
#define IPS_HA(x) ((ips_ha_t *) x->hostdata)
#define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
#define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
@@ -84,38 +80,8 @@
#define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \
sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST))
- #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
- #define pci_set_dma_mask(dev,mask) ( mask > 0xffffffff ? 1:0 )
- #define scsi_set_pci_device(sh,dev) (0)
- #endif
-
- #ifndef IRQ_NONE
- typedef void irqreturn_t;
- #define IRQ_NONE
- #define IRQ_HANDLED
- #define IRQ_RETVAL(x)
- #endif
-
- #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- #define IPS_REGISTER_HOSTS(SHT) scsi_register_module(MODULE_SCSI_HA,SHT)
- #define IPS_UNREGISTER_HOSTS(SHT) scsi_unregister_module(MODULE_SCSI_HA,SHT)
- #define IPS_ADD_HOST(shost,device)
- #define IPS_REMOVE_HOST(shost)
- #define IPS_SCSI_SET_DEVICE(sh,ha) scsi_set_pci_device(sh, (ha)->pcidev)
- #define IPS_PRINTK(level, pcidev, format, arg...) \
- printk(level "%s %s:" format , "ips" , \
- (pcidev)->slot_name , ## arg)
- #define scsi_host_alloc(sh,size) scsi_register(sh,size)
- #define scsi_host_put(sh) scsi_unregister(sh)
- #else
- #define IPS_REGISTER_HOSTS(SHT) (!ips_detect(SHT))
- #define IPS_UNREGISTER_HOSTS(SHT)
- #define IPS_ADD_HOST(shost,device) do { scsi_add_host(shost,device); scsi_scan_host(shost); } while (0)
- #define IPS_REMOVE_HOST(shost) scsi_remove_host(shost)
- #define IPS_SCSI_SET_DEVICE(sh,ha) do { } while (0)
- #define IPS_PRINTK(level, pcidev, format, arg...) \
+ #define IPS_PRINTK(level, pcidev, format, arg...) \
dev_printk(level , &((pcidev)->dev) , format , ## arg)
- #endif
#define MDELAY(n) \
do { \
@@ -134,7 +100,7 @@
#define pci_dma_hi32(a) ((a >> 16) >> 16)
#define pci_dma_lo32(a) (a & 0xffffffff)
- #if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO)
+ #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
#define IPS_ENABLE_DMA64 (1)
#else
#define IPS_ENABLE_DMA64 (0)
@@ -451,16 +417,10 @@
/*
* Scsi_Host Template
*/
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
- static int ips_proc24_info(char *, char **, off_t, int, int, int);
- static void ips_select_queue_depth(struct Scsi_Host *, struct scsi_device *);
- static int ips_biosparam(Disk *disk, kdev_t dev, int geom[]);
-#else
static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[]);
static int ips_slave_configure(struct scsi_device *SDptr);
-#endif
/*
* Raid Command Formats
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c9a3abf9e7b6..7829ab1e2fb4 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -29,14 +29,15 @@
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
+#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
-#include <linux/mutex.h>
#include <net/tcp.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -109,7 +110,7 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
- buf->sg.length = tcp_conn->hdr_size;
+ buf->sg.length += sizeof(u32);
}
static inline int
@@ -211,16 +212,14 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
static int
iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
- int rc;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
struct iscsi_session *session = conn->session;
+ struct scsi_cmnd *sc = ctask->sc;
int datasn = be32_to_cpu(rhdr->datasn);
- rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (rc)
- return rc;
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
/*
* setup Data-In byte counter (gets decremented..)
*/
@@ -229,31 +228,36 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (tcp_conn->in.datalen == 0)
return 0;
- if (ctask->datasn != datasn)
+ if (tcp_ctask->exp_datasn != datasn) {
+ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
+ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
return ISCSI_ERR_DATASN;
+ }
- ctask->datasn++;
+ tcp_ctask->exp_datasn++;
tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
- if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length)
+ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+ __FUNCTION__, tcp_ctask->data_offset,
+ tcp_conn->in.datalen, scsi_bufflen(sc));
return ISCSI_ERR_DATA_OFFSET;
+ }
if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
- struct scsi_cmnd *sc = ctask->sc;
-
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
int res_count = be32_to_cpu(rhdr->residual_count);
if (res_count > 0 &&
- res_count <= sc->request_bufflen) {
- sc->resid = res_count;
+ res_count <= scsi_bufflen(sc)) {
+ scsi_set_resid(sc, res_count);
sc->result = (DID_OK << 16) | rhdr->cmd_status;
} else
sc->result = (DID_BAD_TARGET << 16) |
rhdr->cmd_status;
} else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
- sc->resid = be32_to_cpu(rhdr->residual_count);
+ scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
sc->result = (DID_OK << 16) | rhdr->cmd_status;
} else
sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -281,6 +285,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
{
struct iscsi_data *hdr;
struct scsi_cmnd *sc = ctask->sc;
+ int i, sg_count = 0;
+ struct scatterlist *sg;
hdr = &r2t->dtask.hdr;
memset(hdr, 0, sizeof(struct iscsi_data));
@@ -308,39 +314,30 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
sizeof(struct iscsi_hdr));
- if (sc->use_sg) {
- int i, sg_count = 0;
- struct scatterlist *sg = sc->request_buffer;
-
- r2t->sg = NULL;
- for (i = 0; i < sc->use_sg; i++, sg += 1) {
- /* FIXME: prefetch ? */
- if (sg_count + sg->length > r2t->data_offset) {
- int page_offset;
+ sg = scsi_sglist(sc);
+ r2t->sg = NULL;
+ for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
+ /* FIXME: prefetch ? */
+ if (sg_count + sg->length > r2t->data_offset) {
+ int page_offset;
- /* sg page found! */
+ /* sg page found! */
- /* offset within this page */
- page_offset = r2t->data_offset - sg_count;
+ /* offset within this page */
+ page_offset = r2t->data_offset - sg_count;
- /* fill in this buffer */
- iscsi_buf_init_sg(&r2t->sendbuf, sg);
- r2t->sendbuf.sg.offset += page_offset;
- r2t->sendbuf.sg.length -= page_offset;
+ /* fill in this buffer */
+ iscsi_buf_init_sg(&r2t->sendbuf, sg);
+ r2t->sendbuf.sg.offset += page_offset;
+ r2t->sendbuf.sg.length -= page_offset;
- /* xmit logic will continue with next one */
- r2t->sg = sg + 1;
- break;
- }
- sg_count += sg->length;
+ /* xmit logic will continue with next one */
+ r2t->sg = sg + 1;
+ break;
}
- BUG_ON(r2t->sg == NULL);
- } else {
- iscsi_buf_init_iov(&r2t->sendbuf,
- (char*)sc->request_buffer + r2t->data_offset,
- r2t->data_count);
- r2t->sg = NULL;
+ sg_count += sg->length;
}
+ BUG_ON(r2t->sg == NULL);
}
/**
@@ -365,17 +362,16 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
return ISCSI_ERR_DATALEN;
}
- if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
+ if (tcp_ctask->exp_datasn != r2tsn){
+ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
return ISCSI_ERR_R2TSN;
-
- rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (rc)
- return rc;
-
- /* FIXME: use R2TSN to detect missing R2T */
+ }
/* fill-in new R2T associated with the task */
spin_lock(&session->lock);
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
if (!ctask->sc || ctask->mtask ||
session->state != ISCSI_STATE_LOGGED_IN) {
printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
@@ -401,11 +397,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_length, session->max_burst);
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
- if (r2t->data_offset + r2t->data_length > ctask->total_length) {
+ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
spin_unlock(&session->lock);
printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
"offset %u and total length %d\n", r2t->data_length,
- r2t->data_offset, ctask->total_length);
+ r2t->data_offset, scsi_bufflen(ctask->sc));
return ISCSI_ERR_DATALEN;
}
@@ -414,9 +410,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_solicit_data_init(conn, ctask, r2t);
- tcp_ctask->exp_r2tsn = r2tsn + 1;
+ tcp_ctask->exp_datasn = r2tsn + 1;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
- tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
+ tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT;
list_move_tail(&ctask->running, &conn->xmitqueue);
scsi_queue_work(session->host, &conn->xmitwork);
@@ -600,7 +596,7 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
int buf_left = buf_size - (tcp_conn->data_copied + offset);
- int size = min(tcp_conn->in.copy, buf_left);
+ unsigned size = min(tcp_conn->in.copy, buf_left);
int rc;
size = min(size, ctask->data_count);
@@ -609,7 +605,7 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
size, tcp_conn->in.offset, tcp_conn->in.copied);
BUG_ON(size <= 0);
- BUG_ON(tcp_ctask->sent + size > ctask->total_length);
+ BUG_ON(tcp_ctask->sent + size > scsi_bufflen(ctask->sc));
rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
(char*)buf + (offset + tcp_conn->data_copied), size);
@@ -707,25 +703,8 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
BUG_ON((void*)ctask != sc->SCp.ptr);
- /*
- * copying Data-In into the Scsi_Cmnd
- */
- if (!sc->use_sg) {
- i = ctask->data_count;
- rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
- sc->request_bufflen,
- tcp_ctask->data_offset);
- if (rc == -EAGAIN)
- return rc;
- if (conn->datadgst_en)
- iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
- i);
- rc = 0;
- goto done;
- }
-
offset = tcp_ctask->data_offset;
- sg = sc->request_buffer;
+ sg = scsi_sglist(sc);
if (tcp_ctask->data_offset)
for (i = 0; i < tcp_ctask->sg_count; i++)
@@ -734,7 +713,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
if (offset < 0)
offset = 0;
- for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) {
+ for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
char *dest;
dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
@@ -779,7 +758,6 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
}
BUG_ON(ctask->data_count);
-done:
/* check for non-exceptional status */
if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
@@ -895,11 +873,27 @@ more:
}
}
- if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
+ if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV &&
+ tcp_conn->in.copy) {
uint32_t recv_digest;
debug_tcp("extra data_recv offset %d copy %d\n",
tcp_conn->in.offset, tcp_conn->in.copy);
+
+ if (!tcp_conn->data_copied) {
+ if (tcp_conn->in.padding) {
+ debug_tcp("padding -> %d\n",
+ tcp_conn->in.padding);
+ memset(pad, 0, tcp_conn->in.padding);
+ sg_init_one(&sg, pad, tcp_conn->in.padding);
+ crypto_hash_update(&tcp_conn->rx_hash,
+ &sg, sg.length);
+ }
+ crypto_hash_final(&tcp_conn->rx_hash,
+ (u8 *) &tcp_conn->in.datadgst);
+ debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
+ }
+
rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
if (rc) {
if (rc == -EAGAIN)
@@ -924,8 +918,7 @@ more:
}
if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
- tcp_conn->in.copy) {
-
+ tcp_conn->in.copy) {
debug_tcp("data_recv offset %d copy %d\n",
tcp_conn->in.offset, tcp_conn->in.copy);
@@ -936,24 +929,32 @@ more:
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return 0;
}
- tcp_conn->in.copy -= tcp_conn->in.padding;
- tcp_conn->in.offset += tcp_conn->in.padding;
- if (conn->datadgst_en) {
- if (tcp_conn->in.padding) {
- debug_tcp("padding -> %d\n",
- tcp_conn->in.padding);
- memset(pad, 0, tcp_conn->in.padding);
- sg_init_one(&sg, pad, tcp_conn->in.padding);
- crypto_hash_update(&tcp_conn->rx_hash,
- &sg, sg.length);
- }
- crypto_hash_final(&tcp_conn->rx_hash,
- (u8 *) &tcp_conn->in.datadgst);
- debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
+
+ if (tcp_conn->in.padding)
+ tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
+ else if (conn->datadgst_en)
tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
- tcp_conn->data_copied = 0;
- } else
+ else
+ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ tcp_conn->data_copied = 0;
+ }
+
+ if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV &&
+ tcp_conn->in.copy) {
+ int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied,
+ tcp_conn->in.copy);
+
+ tcp_conn->in.copy -= copylen;
+ tcp_conn->in.offset += copylen;
+ tcp_conn->data_copied += copylen;
+
+ if (tcp_conn->data_copied != tcp_conn->in.padding)
+ tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
+ else if (conn->datadgst_en)
+ tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
+ else
tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ tcp_conn->data_copied = 0;
}
debug_tcp("f, processed %d from out of %d padding %d\n",
@@ -1215,7 +1216,6 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
struct iscsi_r2t_info *r2t, int left)
{
struct iscsi_data *hdr;
- struct scsi_cmnd *sc = ctask->sc;
int new_offset;
hdr = &r2t->dtask.hdr;
@@ -1245,15 +1245,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
if (iscsi_buf_left(&r2t->sendbuf))
return;
- if (sc->use_sg) {
- iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
- r2t->sg += 1;
- } else {
- iscsi_buf_init_iov(&r2t->sendbuf,
- (char*)sc->request_buffer + new_offset,
- r2t->data_count);
- r2t->sg = NULL;
- }
+ iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
+ r2t->sg += 1;
}
static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
@@ -1277,41 +1270,10 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
static void
iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
{
- struct scsi_cmnd *sc = ctask->sc;
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
-
- tcp_ctask->sent = 0;
- tcp_ctask->sg_count = 0;
-
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- tcp_ctask->xmstate = XMSTATE_W_HDR;
- tcp_ctask->exp_r2tsn = 0;
- BUG_ON(ctask->total_length == 0);
-
- if (sc->use_sg) {
- struct scatterlist *sg = sc->request_buffer;
-
- iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
- tcp_ctask->sg = sg + 1;
- tcp_ctask->bad_sg = sg + sc->use_sg;
- } else {
- iscsi_buf_init_iov(&tcp_ctask->sendbuf,
- sc->request_buffer,
- sc->request_bufflen);
- tcp_ctask->sg = NULL;
- tcp_ctask->bad_sg = NULL;
- }
- debug_scsi("cmd [itt 0x%x total %d imm_data %d "
- "unsol count %d, unsol offset %d]\n",
- ctask->itt, ctask->total_length, ctask->imm_count,
- ctask->unsol_count, ctask->unsol_offset);
- } else
- tcp_ctask->xmstate = XMSTATE_R_HDR;
-
- iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
- sizeof(struct iscsi_hdr));
+ tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT;
}
/**
@@ -1324,9 +1286,11 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
* call it again later, or recover. '0' return code means successful
* xmit.
*
- * Management xmit state machine consists of two states:
- * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
- * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
+ * Management xmit state machine consists of these states:
+ * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header
+ * XMSTATE_IMM_HDR - PDU Header xmit in progress
+ * XMSTATE_IMM_DATA - PDU Data xmit in progress
+ * XMSTATE_IDLE - management PDU is done
**/
static int
iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
@@ -1337,23 +1301,34 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
conn->id, tcp_mtask->xmstate, mtask->itt);
- if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
- tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
- if (mtask->data_count)
+ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) {
+ iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
+ sizeof(struct iscsi_hdr));
+
+ if (mtask->data_count) {
tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
+ iscsi_buf_init_iov(&tcp_mtask->sendbuf,
+ (char*)mtask->data,
+ mtask->data_count);
+ }
+
if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
conn->stop_stage != STOP_CONN_RECOVER &&
conn->hdrdgst_en)
iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
(u8*)tcp_mtask->hdrext);
+
+ tcp_mtask->sent = 0;
+ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT;
+ tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
+ }
+
+ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
mtask->data_count);
- if (rc) {
- tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
- if (mtask->data_count)
- tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
+ if (rc)
return rc;
- }
+ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
}
if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
@@ -1387,55 +1362,67 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
return 0;
}
-static inline int
-iscsi_send_read_hdr(struct iscsi_conn *conn,
- struct iscsi_tcp_cmd_task *tcp_ctask)
+static int
+iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
- int rc;
+ struct scsi_cmnd *sc = ctask->sc;
+ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ int rc = 0;
- tcp_ctask->xmstate &= ~XMSTATE_R_HDR;
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
- (u8*)tcp_ctask->hdrext);
- rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
- if (!rc) {
- BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
- return 0; /* wait for Data-In */
- }
- tcp_ctask->xmstate |= XMSTATE_R_HDR;
- return rc;
-}
+ if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) {
+ tcp_ctask->sent = 0;
+ tcp_ctask->sg_count = 0;
+ tcp_ctask->exp_datasn = 0;
-static inline int
-iscsi_send_write_hdr(struct iscsi_conn *conn,
- struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- int rc;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ struct scatterlist *sg = scsi_sglist(sc);
- tcp_ctask->xmstate &= ~XMSTATE_W_HDR;
- if (conn->hdrdgst_en)
- iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
- (u8*)tcp_ctask->hdrext);
- rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
- if (rc) {
- tcp_ctask->xmstate |= XMSTATE_W_HDR;
- return rc;
+ iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
+ tcp_ctask->sg = sg + 1;
+ tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
+
+ debug_scsi("cmd [itt 0x%x total %d imm_data %d "
+ "unsol count %d, unsol offset %d]\n",
+ ctask->itt, scsi_bufflen(sc),
+ ctask->imm_count, ctask->unsol_count,
+ ctask->unsol_offset);
+ }
+
+ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
+ sizeof(struct iscsi_hdr));
+
+ if (conn->hdrdgst_en)
+ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
+ (u8*)tcp_ctask->hdrext);
+ tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT;
+ tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT;
}
- if (ctask->imm_count) {
- tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
- iscsi_set_padding(tcp_ctask, ctask->imm_count);
+ if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) {
+ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
+ if (rc)
+ return rc;
+ tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT;
+
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+ if (ctask->imm_count) {
+ tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
+ iscsi_set_padding(tcp_ctask, ctask->imm_count);
- if (ctask->conn->datadgst_en) {
- iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
- tcp_ctask->immdigest = 0;
+ if (ctask->conn->datadgst_en) {
+ iscsi_data_digest_init(ctask->conn->dd_data,
+ tcp_ctask);
+ tcp_ctask->immdigest = 0;
+ }
}
- }
- if (ctask->unsol_count)
- tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
- return 0;
+ if (ctask->unsol_count)
+ tcp_ctask->xmstate |=
+ XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
+ }
+ return rc;
}
static int
@@ -1624,9 +1611,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
struct iscsi_data_task *dtask;
int left, rc;
- if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
- tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
- tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
+ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) {
if (!tcp_ctask->r2t) {
spin_lock_bh(&session->lock);
__kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
@@ -1640,12 +1625,19 @@ send_hdr:
if (conn->hdrdgst_en)
iscsi_hdr_digest(conn, &r2t->headbuf,
(u8*)dtask->hdrext);
+ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT;
+ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
+ }
+
+ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
+ r2t = tcp_ctask->r2t;
+ dtask = &r2t->dtask;
+
rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
- if (rc) {
- tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
- tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
+ if (rc)
return rc;
- }
+ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
+ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
if (conn->datadgst_en) {
iscsi_data_digest_init(conn->dd_data, tcp_ctask);
@@ -1677,8 +1669,6 @@ send_hdr:
left = r2t->data_length - r2t->sent;
if (left) {
iscsi_solicit_data_cont(conn, ctask, r2t, left);
- tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
- tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
goto send_hdr;
}
@@ -1693,8 +1683,6 @@ send_hdr:
if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
sizeof(void*))) {
tcp_ctask->r2t = r2t;
- tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
- tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
spin_unlock_bh(&session->lock);
goto send_hdr;
}
@@ -1703,6 +1691,46 @@ send_hdr:
return 0;
}
+/**
+ * iscsi_tcp_ctask_xmit - xmit normal PDU task
+ * @conn: iscsi connection
+ * @ctask: iscsi command task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+ * call it again later, or recover. '0' return code means successful
+ * xmit.
+ * The function is devided to logical helpers (above) for the different
+ * xmit stages.
+ *
+ *iscsi_send_cmd_hdr()
+ * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate
+ * Header Digest
+ * XMSTATE_CMD_HDR_XMIT - Transmit header in progress
+ *
+ *iscsi_send_padding
+ * XMSTATE_W_PAD - Prepare and send pading
+ * XMSTATE_W_RESEND_PAD - retry send pading
+ *
+ *iscsi_send_digest
+ * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
+ * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest
+ *
+ *iscsi_send_unsol_hdr
+ * XMSTATE_UNS_INIT - prepare un-solicit data header and digest
+ * XMSTATE_UNS_HDR - send un-solicit header
+ *
+ *iscsi_send_unsol_pdu
+ * XMSTATE_UNS_DATA - send un-solicit data in progress
+ *
+ *iscsi_send_sol_pdu
+ * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize
+ * XMSTATE_SOL_HDR - send solicit header
+ * XMSTATE_SOL_DATA - send solicit data
+ *
+ *iscsi_tcp_ctask_xmit
+ * XMSTATE_IMM_DATA - xmit managment data (??)
+ **/
static int
iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
@@ -1712,20 +1740,11 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
conn->id, tcp_ctask->xmstate, ctask->itt);
- /*
- * serialize with TMF AbortTask
- */
- if (ctask->mtask)
+ rc = iscsi_send_cmd_hdr(conn, ctask);
+ if (rc)
return rc;
-
- if (tcp_ctask->xmstate & XMSTATE_R_HDR)
- return iscsi_send_read_hdr(conn, tcp_ctask);
-
- if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
- rc = iscsi_send_write_hdr(conn, ctask);
- if (rc)
- return rc;
- }
+ if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
@@ -1810,18 +1829,22 @@ tcp_conn_alloc_fail:
static void
iscsi_tcp_release_conn(struct iscsi_conn *conn)
{
+ struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct socket *sock = tcp_conn->sock;
- if (!tcp_conn->sock)
+ if (!sock)
return;
- sock_hold(tcp_conn->sock->sk);
+ sock_hold(sock->sk);
iscsi_conn_restore_callbacks(tcp_conn);
- sock_put(tcp_conn->sock->sk);
+ sock_put(sock->sk);
- sock_release(tcp_conn->sock);
+ spin_lock_bh(&session->lock);
tcp_conn->sock = NULL;
conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
}
static void
@@ -1852,6 +1875,46 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
}
+static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+ char *buf, int *port,
+ int (*getname)(struct socket *, struct sockaddr *,
+ int *addrlen))
+{
+ struct sockaddr_storage *addr;
+ struct sockaddr_in6 *sin6;
+ struct sockaddr_in *sin;
+ int rc = 0, len;
+
+ addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ if (getname(sock, (struct sockaddr *) addr, &len)) {
+ rc = -ENODEV;
+ goto free_addr;
+ }
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ spin_lock_bh(&conn->session->lock);
+ sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+ *port = be16_to_cpu(sin->sin_port);
+ spin_unlock_bh(&conn->session->lock);
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ spin_lock_bh(&conn->session->lock);
+ sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+ *port = be16_to_cpu(sin6->sin6_port);
+ spin_unlock_bh(&conn->session->lock);
+ break;
+ }
+free_addr:
+ kfree(addr);
+ return rc;
+}
+
static int
iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -1869,10 +1932,24 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
return -EEXIST;
}
+ /*
+ * copy these values now because if we drop the session
+ * userspace may still want to query the values since we will
+ * be using them for the reconnect
+ */
+ err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
+ &conn->portal_port, kernel_getpeername);
+ if (err)
+ goto free_socket;
+
+ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
+ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
- return err;
+ goto free_socket;
/* bind iSCSI connection and socket */
tcp_conn->sock = sock;
@@ -1896,25 +1973,19 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
* set receive state machine into initial state
*/
tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
-
return 0;
+
+free_socket:
+ sockfd_put(sock);
+ return err;
}
/* called with host lock */
static void
-iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
- char *data, uint32_t data_size)
+iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
{
struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
-
- iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
- sizeof(struct iscsi_hdr));
- tcp_mtask->xmstate = XMSTATE_IMM_HDR;
- tcp_mtask->sent = 0;
-
- if (mtask->data_count)
- iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
- mtask->data_count);
+ tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT;
}
static int
@@ -2026,41 +2097,18 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct inet_sock *inet;
- struct ipv6_pinfo *np;
- struct sock *sk;
int len;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
- mutex_lock(&conn->xmitmutex);
- if (!tcp_conn->sock) {
- mutex_unlock(&conn->xmitmutex);
- return -EINVAL;
- }
-
- inet = inet_sk(tcp_conn->sock->sk);
- len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
- mutex_unlock(&conn->xmitmutex);
+ spin_lock_bh(&conn->session->lock);
+ len = sprintf(buf, "%hu\n", conn->portal_port);
+ spin_unlock_bh(&conn->session->lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- mutex_lock(&conn->xmitmutex);
- if (!tcp_conn->sock) {
- mutex_unlock(&conn->xmitmutex);
- return -EINVAL;
- }
-
- sk = tcp_conn->sock->sk;
- if (sk->sk_family == PF_INET) {
- inet = inet_sk(sk);
- len = sprintf(buf, NIPQUAD_FMT "\n",
- NIPQUAD(inet->daddr));
- } else {
- np = inet6_sk(sk);
- len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
- }
- mutex_unlock(&conn->xmitmutex);
+ spin_lock_bh(&conn->session->lock);
+ len = sprintf(buf, "%s\n", conn->portal_address);
+ spin_unlock_bh(&conn->session->lock);
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
@@ -2069,6 +2117,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
return len;
}
+static int
+iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+{
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ spin_lock_bh(&session->lock);
+ if (!session->leadconn)
+ len = -ENODEV;
+ else
+ len = sprintf(buf, "%s\n",
+ session->leadconn->local_address);
+ spin_unlock_bh(&session->lock);
+ break;
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return len;
+}
+
static void
iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
{
@@ -2096,6 +2167,7 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
static struct iscsi_cls_session *
iscsi_tcp_session_create(struct iscsi_transport *iscsit,
struct scsi_transport_template *scsit,
+ uint16_t cmds_max, uint16_t qdepth,
uint32_t initial_cmdsn, uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
@@ -2103,7 +2175,7 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit,
uint32_t hn;
int cmd_i;
- cls_session = iscsi_session_setup(iscsit, scsit,
+ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
sizeof(struct iscsi_tcp_cmd_task),
sizeof(struct iscsi_tcp_mgmt_task),
initial_cmdsn, &hn);
@@ -2142,17 +2214,24 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_session_teardown(cls_session);
}
+static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+{
+ blk_queue_dma_alignment(sdev->request_queue, 0);
+ return 0;
+}
+
static struct scsi_host_template iscsi_sht = {
.name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
- .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
.sg_tablesize = ISCSI_SG_TABLESIZE,
.max_sectors = 0xFFFF,
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_host_reset_handler = iscsi_eh_host_reset,
.use_clustering = DISABLE_CLUSTERING,
+ .slave_configure = iscsi_tcp_slave_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
};
@@ -2179,8 +2258,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
- ISCSI_TARGET_NAME |
- ISCSI_TPGT,
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
.host_template = &iscsi_sht,
.conndata_size = sizeof(struct iscsi_conn),
.max_conn = 1,
@@ -2197,6 +2280,9 @@ static struct iscsi_transport iscsi_tcp_transport = {
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 32736831790e..7eba44df0a7f 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -29,11 +29,12 @@
#define IN_PROGRESS_HEADER_GATHER 0x1
#define IN_PROGRESS_DATA_RECV 0x2
#define IN_PROGRESS_DDIGEST_RECV 0x3
+#define IN_PROGRESS_PAD_RECV 0x4
/* xmit state machine */
#define XMSTATE_IDLE 0x0
-#define XMSTATE_R_HDR 0x1
-#define XMSTATE_W_HDR 0x2
+#define XMSTATE_CMD_HDR_INIT 0x1
+#define XMSTATE_CMD_HDR_XMIT 0x2
#define XMSTATE_IMM_HDR 0x4
#define XMSTATE_IMM_DATA 0x8
#define XMSTATE_UNS_INIT 0x10
@@ -44,6 +45,8 @@
#define XMSTATE_W_PAD 0x200
#define XMSTATE_W_RESEND_PAD 0x400
#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
+#define XMSTATE_IMM_HDR_INIT 0x1000
+#define XMSTATE_SOL_HDR_INIT 0x2000
#define ISCSI_PAD_LEN 4
#define ISCSI_SG_TABLESIZE SG_ALL
@@ -152,7 +155,7 @@ struct iscsi_tcp_cmd_task {
struct scatterlist *sg; /* per-cmd SG list */
struct scatterlist *bad_sg; /* assert statement */
int sg_count; /* SG's to process */
- uint32_t exp_r2tsn;
+ uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
int data_offset;
struct iscsi_r2t_info *r2t; /* in progress R2T */
struct iscsi_queue r2tpool;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 81e497d9eae0..5d231015bb20 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
*
- * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
*/
#include <linux/kernel.h>
@@ -143,7 +143,7 @@ static int __devinit esp_jazz_probe(struct platform_device *dev)
goto fail;
host->max_id = 8;
- esp = host_to_esp(host);
+ esp = shost_priv(host);
esp->host = host;
esp->dev = dev;
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 5c32a69e41ba..3126824da36d 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -101,13 +101,12 @@ lasi700_probe(struct parisc_device *dev)
struct NCR_700_Host_Parameters *hostdata;
struct Scsi_Host *host;
- hostdata = kmalloc(sizeof(*hostdata), GFP_KERNEL);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
if (!hostdata) {
printk(KERN_ERR "%s: Failed to allocate host data\n",
dev->dev.bus_id);
return -ENOMEM;
}
- memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_32BIT_MASK);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3f5b9b445b29..4d85ce100192 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -22,7 +22,6 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/types.h>
-#include <linux/mutex.h>
#include <linux/kfifo.h>
#include <linux/delay.h>
#include <asm/unaligned.h>
@@ -46,27 +45,53 @@ class_to_transport_session(struct iscsi_cls_session *cls_session)
}
EXPORT_SYMBOL_GPL(class_to_transport_session);
-#define INVALID_SN_DELTA 0xffff
+/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+#define SNA32_CHECK 2147483648UL
-int
-iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+static int iscsi_sna_lt(u32 n1, u32 n2)
+{
+ return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+ (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+}
+
+/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+static int iscsi_sna_lte(u32 n1, u32 n2)
+{
+ return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+ (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+}
+
+void
+iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
{
uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
- if (max_cmdsn < exp_cmdsn -1 &&
- max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
- return ISCSI_ERR_MAX_CMDSN;
- if (max_cmdsn > session->max_cmdsn ||
- max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
- session->max_cmdsn = max_cmdsn;
- if (exp_cmdsn > session->exp_cmdsn ||
- exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
+ /*
+ * standard specifies this check for when to update expected and
+ * max sequence numbers
+ */
+ if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+ return;
+
+ if (exp_cmdsn != session->exp_cmdsn &&
+ !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
session->exp_cmdsn = exp_cmdsn;
- return 0;
+ if (max_cmdsn != session->max_cmdsn &&
+ !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+ session->max_cmdsn = max_cmdsn;
+ /*
+ * if the window closed with IO queued, then kick the
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+ __kfifo_len(session->leadconn->mgmtqueue))
+ scsi_queue_work(session->host,
+ &session->leadconn->xmitwork);
+ }
}
-EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
+EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
struct iscsi_data *hdr)
@@ -115,14 +140,17 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
hdr->itt = build_itt(ctask->itt, conn->id, session->age);
- hdr->data_length = cpu_to_be32(sc->request_bufflen);
+ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
hdr->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
- memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
+ if (sc->cmd_len < MAX_COMMAND_SIZE)
+ memset(&hdr->cdb[sc->cmd_len], 0,
+ MAX_COMMAND_SIZE - sc->cmd_len);
ctask->data_count = 0;
+ ctask->imm_count = 0;
if (sc->sc_data_direction == DMA_TO_DEVICE) {
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/*
@@ -139,25 +167,24 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
*
* pad_count bytes to be sent as zero-padding
*/
- ctask->imm_count = 0;
ctask->unsol_count = 0;
ctask->unsol_offset = 0;
ctask->unsol_datasn = 0;
if (session->imm_data_en) {
- if (ctask->total_length >= session->first_burst)
+ if (scsi_bufflen(sc) >= session->first_burst)
ctask->imm_count = min(session->first_burst,
conn->max_xmit_dlength);
else
- ctask->imm_count = min(ctask->total_length,
+ ctask->imm_count = min(scsi_bufflen(sc),
conn->max_xmit_dlength);
hton24(ctask->hdr->dlength, ctask->imm_count);
} else
zero_data(ctask->hdr->dlength);
if (!session->initial_r2t_en) {
- ctask->unsol_count = min(session->first_burst,
- ctask->total_length) - ctask->imm_count;
+ ctask->unsol_count = min((session->first_burst),
+ (scsi_bufflen(sc))) - ctask->imm_count;
ctask->unsol_offset = ctask->imm_count;
}
@@ -165,7 +192,6 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
/* No unsolicit Data-Out's */
ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
- ctask->datasn = 0;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
zero_data(hdr->dlength);
@@ -174,8 +200,13 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
}
conn->scsicmd_pdus_cnt++;
+
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+ "cmdsn %d win %d]\n",
+ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
+ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
}
-EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
/**
* iscsi_complete_command - return command back to scsi-ml
@@ -204,26 +235,12 @@ static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
atomic_inc(&ctask->refcount);
}
-static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
-{
- spin_lock_bh(&ctask->conn->session->lock);
- __iscsi_get_ctask(ctask);
- spin_unlock_bh(&ctask->conn->session->lock);
-}
-
static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
{
if (atomic_dec_and_test(&ctask->refcount))
iscsi_complete_command(ctask);
}
-static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
-{
- spin_lock_bh(&ctask->conn->session->lock);
- __iscsi_put_ctask(ctask);
- spin_unlock_bh(&ctask->conn->session->lock);
-}
-
/**
* iscsi_cmd_rsp - SCSI Command Response processing
* @conn: iscsi connection
@@ -235,21 +252,15 @@ static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
* iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
* then completes the command and task.
**/
-static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- struct iscsi_cmd_task *ctask, char *data,
- int datalen)
+static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
{
- int rc;
struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = ctask->sc;
- rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (rc) {
- sc->result = DID_ERROR << 16;
- goto out;
- }
-
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -286,14 +297,14 @@ invalid_datalen:
if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
int res_count = be32_to_cpu(rhdr->residual_count);
- if (res_count > 0 && res_count <= sc->request_bufflen)
- sc->resid = res_count;
+ if (res_count > 0 && res_count <= scsi_bufflen(sc))
+ scsi_set_resid(sc, res_count);
else
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
} else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
- sc->resid = be32_to_cpu(rhdr->residual_count);
+ scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
out:
debug_scsi("done [sc %lx res %d itt 0x%x]\n",
@@ -301,7 +312,6 @@ out:
conn->scsirsp_pdus_cnt++;
__iscsi_put_ctask(ctask);
- return rc;
}
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -381,8 +391,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
switch(opcode) {
case ISCSI_OP_SCSI_CMD_RSP:
BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
- rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
- datalen);
+ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
+ datalen);
break;
case ISCSI_OP_SCSI_DATA_IN:
BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
@@ -405,11 +415,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
opcode, conn->id, mtask->itt, datalen);
- rc = iscsi_check_assign_cmdsn(session,
- (struct iscsi_nopin*)hdr);
- if (rc)
- goto done;
-
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
switch(opcode) {
case ISCSI_OP_LOGOUT_RSP:
if (datalen) {
@@ -458,10 +464,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
break;
}
} else if (itt == ~0U) {
- rc = iscsi_check_assign_cmdsn(session,
- (struct iscsi_nopin*)hdr);
- if (rc)
- goto done;
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
switch(opcode) {
case ISCSI_OP_NOOP_IN:
@@ -491,7 +494,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else
rc = ISCSI_ERR_BAD_ITT;
-done:
return rc;
}
EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -578,17 +580,47 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+static void iscsi_prep_mtask(struct iscsi_conn *conn,
+ struct iscsi_mgmt_task *mtask)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_hdr *hdr = mtask->hdr;
+ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+
+ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ /*
+ * pre-format CmdSN for outgoing PDU.
+ */
+ nop->cmdsn = cpu_to_be32(session->cmdsn);
+ if (hdr->itt != RESERVED_ITT) {
+ hdr->itt = build_itt(mtask->itt, conn->id, session->age);
+ if (conn->c_stage == ISCSI_CONN_STARTED &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE))
+ session->cmdsn++;
+ }
+
+ if (session->tt->init_mgmt_task)
+ session->tt->init_mgmt_task(conn, mtask);
+
+ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+ hdr->opcode, hdr->itt, mtask->data_count);
+}
+
static int iscsi_xmit_mtask(struct iscsi_conn *conn)
{
struct iscsi_hdr *hdr = conn->mtask->hdr;
int rc, was_logout = 0;
+ spin_unlock_bh(&conn->session->lock);
if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
conn->session->state = ISCSI_STATE_IN_RECOVERY;
iscsi_block_session(session_to_cls(conn->session));
was_logout = 1;
}
rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
+ spin_lock_bh(&conn->session->lock);
if (rc)
return rc;
@@ -602,6 +634,45 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
return 0;
}
+static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+{
+ struct iscsi_session *session = conn->session;
+
+ /*
+ * Check for iSCSI window and take care of CmdSN wrap-around
+ */
+ if (!iscsi_sna_lte(session->cmdsn, session->max_cmdsn)) {
+ debug_scsi("iSCSI CmdSN closed. MaxCmdSN %u CmdSN %u\n",
+ session->max_cmdsn, session->cmdsn);
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc = 0;
+
+ /*
+ * serialize with TMF AbortTask
+ */
+ if (ctask->state == ISCSI_TASK_ABORTING)
+ goto done;
+
+ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+ __iscsi_put_ctask(ctask);
+
+done:
+ if (!rc)
+ /* done with this ctask */
+ conn->ctask = NULL;
+ return rc;
+}
+
/**
* iscsi_data_xmit - xmit any command into the scheduled connection
* @conn: iscsi connection
@@ -613,106 +684,79 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
**/
static int iscsi_data_xmit(struct iscsi_conn *conn)
{
- struct iscsi_transport *tt;
int rc = 0;
+ spin_lock_bh(&conn->session->lock);
if (unlikely(conn->suspend_tx)) {
debug_scsi("conn %d Tx suspended!\n", conn->id);
+ spin_unlock_bh(&conn->session->lock);
return -ENODATA;
}
- tt = conn->session->tt;
-
- /*
- * Transmit in the following order:
- *
- * 1) un-finished xmit (ctask or mtask)
- * 2) immediate control PDUs
- * 3) write data
- * 4) SCSI commands
- * 5) non-immediate control PDUs
- *
- * No need to lock around __kfifo_get as long as
- * there's one producer and one consumer.
- */
-
- BUG_ON(conn->ctask && conn->mtask);
if (conn->ctask) {
- iscsi_get_ctask(conn->ctask);
- rc = tt->xmit_cmd_task(conn, conn->ctask);
- iscsi_put_ctask(conn->ctask);
+ rc = iscsi_xmit_ctask(conn);
if (rc)
goto again;
- /* done with this in-progress ctask */
- conn->ctask = NULL;
}
+
if (conn->mtask) {
rc = iscsi_xmit_mtask(conn);
if (rc)
goto again;
}
- /* process immediate first */
- if (unlikely(__kfifo_len(conn->immqueue))) {
- while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
- sizeof(void*))) {
- spin_lock_bh(&conn->session->lock);
- list_add_tail(&conn->mtask->running,
- &conn->mgmt_run_list);
- spin_unlock_bh(&conn->session->lock);
- rc = iscsi_xmit_mtask(conn);
- if (rc)
- goto again;
- }
+ /*
+ * process mgmt pdus like nops before commands since we should
+ * only have one nop-out as a ping from us and targets should not
+ * overflow us with nop-ins
+ */
+check_mgmt:
+ while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
+ sizeof(void*))) {
+ iscsi_prep_mtask(conn, conn->mtask);
+ list_add_tail(&conn->mtask->running, &conn->mgmt_run_list);
+ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
}
/* process command queue */
- spin_lock_bh(&conn->session->lock);
while (!list_empty(&conn->xmitqueue)) {
+ rc = iscsi_check_cmdsn_window_closed(conn);
+ if (rc) {
+ spin_unlock_bh(&conn->session->lock);
+ return rc;
+ }
/*
* iscsi tcp may readd the task to the xmitqueue to send
* write data
*/
conn->ctask = list_entry(conn->xmitqueue.next,
struct iscsi_cmd_task, running);
+ if (conn->ctask->state == ISCSI_TASK_PENDING) {
+ iscsi_prep_scsi_cmd_pdu(conn->ctask);
+ conn->session->tt->init_cmd_task(conn->ctask);
+ }
conn->ctask->state = ISCSI_TASK_RUNNING;
list_move_tail(conn->xmitqueue.next, &conn->run_list);
- __iscsi_get_ctask(conn->ctask);
- spin_unlock_bh(&conn->session->lock);
-
- rc = tt->xmit_cmd_task(conn, conn->ctask);
-
- spin_lock_bh(&conn->session->lock);
- __iscsi_put_ctask(conn->ctask);
- if (rc) {
- spin_unlock_bh(&conn->session->lock);
+ rc = iscsi_xmit_ctask(conn);
+ if (rc)
goto again;
- }
+ /*
+ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+ if (__kfifo_len(conn->mgmtqueue))
+ goto check_mgmt;
}
spin_unlock_bh(&conn->session->lock);
- /* done with this ctask */
- conn->ctask = NULL;
-
- /* process the rest control plane PDUs, if any */
- if (unlikely(__kfifo_len(conn->mgmtqueue))) {
- while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
- sizeof(void*))) {
- spin_lock_bh(&conn->session->lock);
- list_add_tail(&conn->mtask->running,
- &conn->mgmt_run_list);
- spin_unlock_bh(&conn->session->lock);
- rc = iscsi_xmit_mtask(conn);
- if (rc)
- goto again;
- }
- }
-
return -ENODATA;
again:
if (unlikely(conn->suspend_tx))
- return -ENODATA;
-
+ rc = -ENODATA;
+ spin_unlock_bh(&conn->session->lock);
return rc;
}
@@ -724,11 +768,9 @@ static void iscsi_xmitworker(struct work_struct *work)
/*
* serialize Xmit worker on a per-connection basis.
*/
- mutex_lock(&conn->xmitmutex);
do {
rc = iscsi_data_xmit(conn);
} while (rc >= 0 || rc == -EAGAIN);
- mutex_unlock(&conn->xmitmutex);
}
enum {
@@ -786,20 +828,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto fault;
}
- /*
- * Check for iSCSI window and take care of CmdSN wrap-around
- */
- if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
- reason = FAILURE_WINDOW_CLOSED;
- goto reject;
- }
-
conn = session->leadconn;
if (!conn) {
reason = FAILURE_SESSION_FREED;
goto fault;
}
+ /*
+ * We check this here and in data xmit, because if we get to the point
+ * that this check is hitting the window then we have enough IO in
+ * flight and enough IO waiting to be transmitted it is better
+ * to let the scsi/block layer queue up.
+ */
+ if (iscsi_check_cmdsn_window_closed(conn)) {
+ reason = FAILURE_WINDOW_CLOSED;
+ goto reject;
+ }
+
if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
sizeof(void*))) {
reason = FAILURE_OOM;
@@ -814,18 +859,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
ctask->conn = conn;
ctask->sc = sc;
INIT_LIST_HEAD(&ctask->running);
- ctask->total_length = sc->request_bufflen;
- iscsi_prep_scsi_cmd_pdu(ctask);
-
- session->tt->init_cmd_task(ctask);
list_add_tail(&ctask->running, &conn->xmitqueue);
- debug_scsi(
- "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
- "win %d]\n",
- sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
- conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
spin_unlock(&session->lock);
scsi_queue_work(host, &conn->xmitwork);
@@ -841,7 +876,7 @@ fault:
printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
sc->cmnd[0], reason);
sc->result = (DID_NO_CONNECT << 16);
- sc->resid = sc->request_bufflen;
+ scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
return 0;
}
@@ -856,19 +891,16 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
}
EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
-static int
-iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *data, uint32_t data_size)
+static struct iscsi_mgmt_task *
+__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
struct iscsi_mgmt_task *mtask;
- spin_lock_bh(&session->lock);
- if (session->state == ISCSI_STATE_TERMINATE) {
- spin_unlock_bh(&session->lock);
- return -EPERM;
- }
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+
if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
/*
@@ -882,27 +914,11 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
if (!__kfifo_get(session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*))) {
- spin_unlock_bh(&session->lock);
- return -ENOSPC;
- }
+ (void*)&mtask, sizeof(void*)))
+ return NULL;
}
- /*
- * pre-format CmdSN for outgoing PDU.
- */
- if (hdr->itt != RESERVED_ITT) {
- hdr->itt = build_itt(mtask->itt, conn->id, session->age);
- nop->cmdsn = cpu_to_be32(session->cmdsn);
- if (conn->c_stage == ISCSI_CONN_STARTED &&
- !(hdr->opcode & ISCSI_OP_IMMEDIATE))
- session->cmdsn++;
- } else
- /* do not advance CmdSN */
- nop->cmdsn = cpu_to_be32(session->cmdsn);
-
if (data_size) {
memcpy(mtask->data, data, data_size);
mtask->data_count = data_size;
@@ -911,38 +927,23 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
INIT_LIST_HEAD(&mtask->running);
memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
- if (session->tt->init_mgmt_task)
- session->tt->init_mgmt_task(conn, mtask, data, data_size);
- spin_unlock_bh(&session->lock);
-
- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
- hdr->opcode, hdr->itt, data_size);
-
- /*
- * since send_pdu() could be called at least from two contexts,
- * we need to serialize __kfifo_put, so we don't have to take
- * additional lock on fast data-path
- */
- if (hdr->opcode & ISCSI_OP_IMMEDIATE)
- __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
- else
- __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
-
- scsi_queue_work(session->host, &conn->xmitwork);
- return 0;
+ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
+ return mtask;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- int rc;
-
- mutex_lock(&conn->xmitmutex);
- rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
- mutex_unlock(&conn->xmitmutex);
+ struct iscsi_session *session = conn->session;
+ int err = 0;
- return rc;
+ spin_lock_bh(&session->lock);
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
+ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
}
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -1027,14 +1028,12 @@ static void iscsi_tmabort_timedout(unsigned long data)
spin_unlock(&session->lock);
}
-/* must be called with the mutex lock */
static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
struct iscsi_cmd_task *ctask)
{
struct iscsi_conn *conn = ctask->conn;
struct iscsi_session *session = conn->session;
struct iscsi_tm *hdr = &conn->tmhdr;
- int rc;
/*
* ctask timed out but session is OK requests must be serialized.
@@ -1047,32 +1046,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
hdr->rtt = ctask->hdr->itt;
hdr->refcmdsn = ctask->hdr->cmdsn;
- rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (rc) {
+ ctask->mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+ if (!ctask->mtask) {
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt,
- rc);
- return rc;
+ debug_scsi("abort sent failure [itt 0x%x]\n", ctask->itt);
+ return -EPERM;
}
+ ctask->state = ISCSI_TASK_ABORTING;
debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
- spin_lock_bh(&session->lock);
- ctask->mtask = (struct iscsi_mgmt_task *)
- session->mgmt_cmds[get_itt(hdr->itt) -
- ISCSI_MGMT_ITT_OFFSET];
-
if (conn->tmabort_state == TMABORT_INITIAL) {
conn->tmfcmd_pdus_cnt++;
- conn->tmabort_timer.expires = 10*HZ + jiffies;
+ conn->tmabort_timer.expires = 20*HZ + jiffies;
conn->tmabort_timer.function = iscsi_tmabort_timedout;
conn->tmabort_timer.data = (unsigned long)ctask;
add_timer(&conn->tmabort_timer);
debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
}
spin_unlock_bh(&session->lock);
- mutex_unlock(&conn->xmitmutex);
+ scsi_queue_work(session->host, &conn->xmitwork);
/*
* block eh thread until:
@@ -1089,13 +1083,12 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
if (signal_pending(current))
flush_signals(current);
del_timer_sync(&conn->tmabort_timer);
-
- mutex_lock(&conn->xmitmutex);
+ spin_lock_bh(&session->lock);
return 0;
}
/*
- * xmit mutex and session lock must be held
+ * session lock must be held
*/
static struct iscsi_mgmt_task *
iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
@@ -1127,7 +1120,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
if (!ctask->mtask)
return -EINVAL;
- if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt))
+ if (!iscsi_remove_mgmt_task(conn->mgmtqueue, ctask->mtask->itt))
list_del(&ctask->mtask->running);
__kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
sizeof(void*));
@@ -1136,7 +1129,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
}
/*
- * session lock and xmitmutex must be held
+ * session lock must be held
*/
static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
int err)
@@ -1147,11 +1140,14 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
if (!sc)
return;
- conn->session->tt->cleanup_cmd_task(conn, ctask);
+ if (ctask->state != ISCSI_TASK_PENDING)
+ conn->session->tt->cleanup_cmd_task(conn, ctask);
iscsi_ctask_mtask_cleanup(ctask);
sc->result = err;
- sc->resid = sc->request_bufflen;
+ scsi_set_resid(sc, scsi_bufflen(sc));
+ if (conn->ctask == ctask)
+ conn->ctask = NULL;
/* release ref from queuecommand */
__iscsi_put_ctask(ctask);
}
@@ -1179,7 +1175,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
conn->eh_abort_cnt++;
debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
- mutex_lock(&conn->xmitmutex);
spin_lock_bh(&session->lock);
/*
@@ -1192,9 +1187,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
/* ctask completed before time out */
if (!ctask->sc) {
- spin_unlock_bh(&session->lock);
debug_scsi("sc completed while abort in progress\n");
- goto success_rel_mutex;
+ goto success;
}
/* what should we do here ? */
@@ -1204,15 +1198,13 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto failed;
}
- if (ctask->state == ISCSI_TASK_PENDING)
- goto success_cleanup;
+ if (ctask->state == ISCSI_TASK_PENDING) {
+ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
conn->tmabort_state = TMABORT_INITIAL;
-
- spin_unlock_bh(&session->lock);
rc = iscsi_exec_abort_task(sc, ctask);
- spin_lock_bh(&session->lock);
-
if (rc || sc->SCp.phase != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
goto failed;
@@ -1220,45 +1212,44 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
switch (conn->tmabort_state) {
case TMABORT_SUCCESS:
- goto success_cleanup;
+ spin_unlock_bh(&session->lock);
+ /*
+ * clean up task if aborted. grab the recv lock as a writer
+ */
+ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_command(conn, ctask, DID_ABORT << 16);
+ spin_unlock(&session->lock);
+ write_unlock_bh(conn->recv_lock);
+ /*
+ * make sure xmit thread is not still touching the
+ * ctask/scsi_cmnd
+ */
+ scsi_flush_work(session->host);
+ goto success_unlocked;
case TMABORT_NOT_FOUND:
if (!ctask->sc) {
/* ctask completed before tmf abort response */
- spin_unlock_bh(&session->lock);
debug_scsi("sc completed while abort in progress\n");
- goto success_rel_mutex;
+ goto success;
}
/* fall through */
default:
/* timedout or failed */
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- spin_lock_bh(&session->lock);
- goto failed;
+ goto failed_unlocked;
}
-success_cleanup:
- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+success:
spin_unlock_bh(&session->lock);
-
- /*
- * clean up task if aborted. we have the xmitmutex so grab
- * the recv lock as a writer
- */
- write_lock_bh(conn->recv_lock);
- spin_lock(&session->lock);
- fail_command(conn, ctask, DID_ABORT << 16);
- spin_unlock(&session->lock);
- write_unlock_bh(conn->recv_lock);
-
-success_rel_mutex:
- mutex_unlock(&conn->xmitmutex);
+success_unlocked:
+ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
return SUCCESS;
failed:
spin_unlock_bh(&session->lock);
- mutex_unlock(&conn->xmitmutex);
-
+failed_unlocked:
debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
return FAILED;
}
@@ -1339,6 +1330,10 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
* iscsi_session_setup - create iscsi cls session and host and session
* @scsit: scsi transport template
* @iscsit: iscsi transport template
+ * @cmds_max: scsi host can queue
+ * @qdepth: scsi host cmds per lun
+ * @cmd_task_size: LLD ctask private data size
+ * @mgmt_task_size: LLD mtask private data size
* @initial_cmdsn: initial CmdSN
* @hostno: host no allocated
*
@@ -1348,6 +1343,7 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
struct iscsi_cls_session *
iscsi_session_setup(struct iscsi_transport *iscsit,
struct scsi_transport_template *scsit,
+ uint16_t cmds_max, uint16_t qdepth,
int cmd_task_size, int mgmt_task_size,
uint32_t initial_cmdsn, uint32_t *hostno)
{
@@ -1356,11 +1352,32 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
struct iscsi_cls_session *cls_session;
int cmd_i;
+ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+ if (qdepth != 0)
+ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+ "Queue depth must be between 1 and %d.\n",
+ qdepth, ISCSI_MAX_CMD_PER_LUN);
+ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+ if (cmds_max < 2 || (cmds_max & (cmds_max - 1)) ||
+ cmds_max >= ISCSI_MGMT_ITT_OFFSET) {
+ if (cmds_max != 0)
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
+ "can_queue must be a power of 2 and between "
+ "2 and %d - setting to %d.\n", cmds_max,
+ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
+ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+
shost = scsi_host_alloc(iscsit->host_template,
hostdata_privsize(sizeof(*session)));
if (!shost)
return NULL;
+ /* the iscsi layer takes one task for reserve */
+ shost->can_queue = cmds_max - 1;
+ shost->cmd_per_lun = qdepth;
shost->max_id = 1;
shost->max_channel = 0;
shost->max_lun = iscsit->max_lun;
@@ -1374,7 +1391,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
session->host = shost;
session->state = ISCSI_STATE_FREE;
session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
- session->cmds_max = ISCSI_XMIT_CMDS_MAX;
+ session->cmds_max = cmds_max;
session->cmdsn = initial_cmdsn;
session->exp_cmdsn = initial_cmdsn + 1;
session->max_cmdsn = initial_cmdsn + 1;
@@ -1461,7 +1478,14 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ kfree(session->password);
+ kfree(session->password_in);
+ kfree(session->username);
+ kfree(session->username_in);
kfree(session->targetname);
+ kfree(session->netdev);
+ kfree(session->hwaddress);
+ kfree(session->initiatorname);
iscsi_destroy_session(cls_session);
scsi_host_put(shost);
@@ -1499,11 +1523,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
INIT_LIST_HEAD(&conn->xmitqueue);
/* initialize general immediate & non-immediate PDU commands queue */
- conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (conn->immqueue == ERR_PTR(-ENOMEM))
- goto immqueue_alloc_fail;
-
conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
GFP_KERNEL, NULL);
if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
@@ -1527,7 +1546,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
conn->login_mtask->data = conn->data = data;
init_timer(&conn->tmabort_timer);
- mutex_init(&conn->xmitmutex);
init_waitqueue_head(&conn->ehwait);
return cls_conn;
@@ -1538,8 +1556,6 @@ login_mtask_data_alloc_fail:
login_mtask_alloc_fail:
kfifo_free(conn->mgmtqueue);
mgmtqueue_alloc_fail:
- kfifo_free(conn->immqueue);
-immqueue_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
}
@@ -1558,10 +1574,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *session = conn->session;
unsigned long flags;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- mutex_lock(&conn->xmitmutex);
-
spin_lock_bh(&session->lock);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
/*
@@ -1572,8 +1586,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
- mutex_unlock(&conn->xmitmutex);
-
/*
* Block until all in-progress commands for this connection
* time out or fail.
@@ -1610,7 +1622,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
- kfifo_free(conn->immqueue);
kfifo_free(conn->mgmtqueue);
iscsi_destroy_conn(cls_conn);
@@ -1671,8 +1682,7 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
struct iscsi_mgmt_task *mtask, *tmp;
/* handle pending */
- while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) ||
- __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
+ while (__kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
if (mtask == conn->login_mtask)
continue;
debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
@@ -1742,12 +1752,12 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
conn->c_stage = ISCSI_CONN_STOPPED;
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
spin_unlock_bh(&session->lock);
+ scsi_flush_work(session->host);
write_lock_bh(conn->recv_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(conn->recv_lock);
- mutex_lock(&conn->xmitmutex);
/*
* for connection level recovery we should not calculate
* header digest. conn->hdr_size used for optimization
@@ -1771,8 +1781,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
fail_all_commands(conn);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
-
- mutex_unlock(&conn->xmitmutex);
}
void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
@@ -1867,6 +1875,30 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_EXP_STATSN:
sscanf(buf, "%u", &conn->exp_statsn);
break;
+ case ISCSI_PARAM_USERNAME:
+ kfree(session->username);
+ session->username = kstrdup(buf, GFP_KERNEL);
+ if (!session->username)
+ return -ENOMEM;
+ break;
+ case ISCSI_PARAM_USERNAME_IN:
+ kfree(session->username_in);
+ session->username_in = kstrdup(buf, GFP_KERNEL);
+ if (!session->username_in)
+ return -ENOMEM;
+ break;
+ case ISCSI_PARAM_PASSWORD:
+ kfree(session->password);
+ session->password = kstrdup(buf, GFP_KERNEL);
+ if (!session->password)
+ return -ENOMEM;
+ break;
+ case ISCSI_PARAM_PASSWORD_IN:
+ kfree(session->password_in);
+ session->password_in = kstrdup(buf, GFP_KERNEL);
+ if (!session->password_in)
+ return -ENOMEM;
+ break;
case ISCSI_PARAM_TARGET_NAME:
/* this should not change between logins */
if (session->targetname)
@@ -1940,6 +1972,18 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_TPGT:
len = sprintf(buf, "%d\n", session->tpgt);
break;
+ case ISCSI_PARAM_USERNAME:
+ len = sprintf(buf, "%s\n", session->username);
+ break;
+ case ISCSI_PARAM_USERNAME_IN:
+ len = sprintf(buf, "%s\n", session->username_in);
+ break;
+ case ISCSI_PARAM_PASSWORD:
+ len = sprintf(buf, "%s\n", session->password);
+ break;
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
default:
return -ENOSYS;
}
@@ -1990,6 +2034,66 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+{
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+
+int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+{
+ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ if (!session->netdev)
+ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ if (!session->hwaddress)
+ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ if (!session->initiatorname)
+ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+
MODULE_AUTHOR("Mike Christie");
MODULE_DESCRIPTION("iSCSI library functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index aafdc92f8312..3a3c1ac9c6cd 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -30,6 +30,13 @@ config SCSI_SAS_LIBSAS
This provides transport specific helpers for SAS drivers which
use the domain device construct (like the aic94xxx).
+config SCSI_SAS_ATA
+ bool "ATA support for libsas (requires libata)"
+ depends on SCSI_SAS_LIBSAS && ATA
+ help
+ Builds in ATA support into libsas. Will necessitate
+ the loading of libata along with libsas.
+
config SCSI_SAS_LIBSAS_DEBUG
bool "Compile the SAS Domain Transport Attributes in debug mode"
default y
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 44d972a3b4bd..fd387b91856e 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -34,3 +34,4 @@ libsas-y += sas_init.o \
sas_discover.o \
sas_expander.o \
sas_scsi_host.o
+libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
new file mode 100644
index 000000000000..ced2de32c511
--- /dev/null
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -0,0 +1,817 @@
+/*
+ * Support for SATA devices on Serial Attached SCSI (SAS) controllers
+ *
+ * Copyright (C) 2006 IBM Corporation
+ *
+ * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/scatterlist.h>
+
+#include <scsi/sas_ata.h>
+#include "sas_internal.h"
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_sas.h>
+#include "../scsi_sas_internal.h"
+#include "../scsi_transport_api.h"
+#include <scsi/scsi_eh.h>
+
+static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
+{
+ /* Cheesy attempt to translate SAS errors into ATA. Hah! */
+
+ /* transport error */
+ if (ts->resp == SAS_TASK_UNDELIVERED)
+ return AC_ERR_ATA_BUS;
+
+ /* ts->resp == SAS_TASK_COMPLETE */
+ /* task delivered, what happened afterwards? */
+ switch (ts->stat) {
+ case SAS_DEV_NO_RESPONSE:
+ return AC_ERR_TIMEOUT;
+
+ case SAS_INTERRUPTED:
+ case SAS_PHY_DOWN:
+ case SAS_NAK_R_ERR:
+ return AC_ERR_ATA_BUS;
+
+
+ case SAS_DATA_UNDERRUN:
+ /*
+ * Some programs that use the taskfile interface
+ * (smartctl in particular) can cause underrun
+ * problems. Ignore these errors, perhaps at our
+ * peril.
+ */
+ return 0;
+
+ case SAS_DATA_OVERRUN:
+ case SAS_QUEUE_FULL:
+ case SAS_DEVICE_UNKNOWN:
+ case SAS_SG_ERR:
+ return AC_ERR_INVALID;
+
+ case SAM_CHECK_COND:
+ case SAS_OPEN_TO:
+ case SAS_OPEN_REJECT:
+ SAS_DPRINTK("%s: Saw error %d. What to do?\n",
+ __FUNCTION__, ts->stat);
+ return AC_ERR_OTHER;
+
+ case SAS_ABORTED_TASK:
+ return AC_ERR_DEV;
+
+ case SAS_PROTO_RESPONSE:
+ /* This means the ending_fis has the error
+ * value; return 0 here to collect it */
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void sas_ata_task_done(struct sas_task *task)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+ struct domain_device *dev;
+ struct task_status_struct *stat = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
+ struct sas_ha_struct *sas_ha;
+ enum ata_completion_errors ac;
+ unsigned long flags;
+
+ if (!qc)
+ goto qc_already_gone;
+
+ dev = qc->ap->private_data;
+ sas_ha = dev->port->ha;
+
+ spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_GOOD) {
+ ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+ qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ dev->sata_dev.sstatus = resp->sstatus;
+ dev->sata_dev.serror = resp->serror;
+ dev->sata_dev.scontrol = resp->scontrol;
+ } else if (stat->stat != SAM_STAT_GOOD) {
+ ac = sas_to_ata_err(stat);
+ if (ac) {
+ SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__,
+ stat->stat);
+ /* We saw a SAS error. Send a vague error. */
+ qc->err_mask = ac;
+ dev->sata_dev.tf.feature = 0x04; /* status err */
+ dev->sata_dev.tf.command = ATA_ERR;
+ }
+ }
+
+ qc->lldd_task = NULL;
+ if (qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, NULL);
+ ata_qc_complete(qc);
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+
+ /*
+ * If the sas_task has an ata qc, a scsi_cmnd and the aborted
+ * flag is set, then we must have come in via the libsas EH
+ * functions. When we exit this function, we need to put the
+ * scsi_cmnd on the list of finished errors. The ata_qc_complete
+ * call cleans up the libata side of things but we're protected
+ * from the scsi_cmnd going away because the scsi_cmnd is owned
+ * by the EH, making libata's call to scsi_done a NOP.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (qc->scsicmd && task->task_state_flags & SAS_TASK_STATE_ABORTED)
+ scsi_eh_finish_cmd(qc->scsicmd, &sas_ha->eh_done_q);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+qc_already_gone:
+ list_del_init(&task->list);
+ sas_free_task(task);
+}
+
+static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+{
+ int res;
+ struct sas_task *task;
+ struct domain_device *dev = qc->ap->private_data;
+ struct sas_ha_struct *sas_ha = dev->port->ha;
+ struct Scsi_Host *host = sas_ha->core.shost;
+ struct sas_internal *i = to_sas_internal(host->transportt);
+ struct scatterlist *sg;
+ unsigned int num = 0;
+ unsigned int xfer = 0;
+
+ task = sas_alloc_task(GFP_ATOMIC);
+ if (!task)
+ return AC_ERR_SYSTEM;
+ task->dev = dev;
+ task->task_proto = SAS_PROTOCOL_STP;
+ task->task_done = sas_ata_task_done;
+
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ /* Need to zero out the tag libata assigned us */
+ qc->tf.nsect = 0;
+ }
+
+ ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
+ task->uldd_task = qc;
+ if (is_atapi_taskfile(&qc->tf)) {
+ memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+ task->total_xfer_len = qc->nbytes + qc->pad_len;
+ task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
+ } else {
+ ata_for_each_sg(sg, qc) {
+ num++;
+ xfer += sg->length;
+ }
+
+ task->total_xfer_len = xfer;
+ task->num_scatter = num;
+ }
+
+ task->data_dir = qc->dma_dir;
+ task->scatter = qc->__sg;
+ task->ata_task.retry_count = 1;
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ qc->lldd_task = task;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NCQ:
+ task->ata_task.use_ncq = 1;
+ /* fall through */
+ case ATA_PROT_ATAPI_DMA:
+ case ATA_PROT_DMA:
+ task->ata_task.dma_xfer = 1;
+ break;
+ }
+
+ if (qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, task);
+
+ if (sas_ha->lldd_max_execute_num < 2)
+ res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
+ else
+ res = sas_queue_up(task);
+
+ /* Examine */
+ if (res) {
+ SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
+
+ if (qc->scsicmd)
+ ASSIGN_SAS_TASK(qc->scsicmd, NULL);
+ sas_free_task(task);
+ return AC_ERR_SYSTEM;
+ }
+
+ return 0;
+}
+
+static u8 sas_ata_check_status(struct ata_port *ap)
+{
+ struct domain_device *dev = ap->private_data;
+ return dev->sata_dev.tf.command;
+}
+
+static void sas_ata_phy_reset(struct ata_port *ap)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i =
+ to_sas_internal(dev->port->ha->core.shost->transportt);
+ int res = 0;
+
+ if (i->dft->lldd_I_T_nexus_reset)
+ res = i->dft->lldd_I_T_nexus_reset(dev);
+
+ if (res)
+ SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
+
+ switch (dev->sata_dev.command_set) {
+ case ATA_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__);
+ ap->device[0].class = ATA_DEV_ATA;
+ break;
+ case ATAPI_COMMAND_SET:
+ SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__);
+ ap->device[0].class = ATA_DEV_ATAPI;
+ break;
+ default:
+ SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
+ __FUNCTION__,
+ dev->sata_dev.command_set);
+ ap->device[0].class = ATA_DEV_UNKNOWN;
+ break;
+ }
+
+ ap->cbl = ATA_CBL_SATA;
+}
+
+static void sas_ata_post_internal(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ qc->err_mask |= AC_ERR_OTHER;
+
+ if (qc->err_mask) {
+ /*
+ * Find the sas_task and kill it. By this point,
+ * libata has decided to kill the qc, so we needn't
+ * bother with sas_ata_task_done. But we still
+ * ought to abort the task.
+ */
+ struct sas_task *task = qc->lldd_task;
+ unsigned long flags;
+
+ qc->lldd_task = NULL;
+ if (task) {
+ /* Should this be a AT(API) device reset? */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ task->uldd_task = NULL;
+ __sas_task_abort(task);
+ }
+ }
+}
+
+static void sas_ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct domain_device *dev = ap->private_data;
+ memcpy(tf, &dev->sata_dev.tf, sizeof (*tf));
+}
+
+static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
+ u32 val)
+{
+ struct domain_device *dev = ap->private_data;
+
+ SAS_DPRINTK("STUB %s\n", __FUNCTION__);
+ switch (sc_reg_in) {
+ case SCR_STATUS:
+ dev->sata_dev.sstatus = val;
+ break;
+ case SCR_CONTROL:
+ dev->sata_dev.scontrol = val;
+ break;
+ case SCR_ERROR:
+ dev->sata_dev.serror = val;
+ break;
+ case SCR_ACTIVE:
+ dev->sata_dev.ap->sactive = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
+ u32 *val)
+{
+ struct domain_device *dev = ap->private_data;
+
+ SAS_DPRINTK("STUB %s\n", __FUNCTION__);
+ switch (sc_reg_in) {
+ case SCR_STATUS:
+ *val = dev->sata_dev.sstatus;
+ return 0;
+ case SCR_CONTROL:
+ *val = dev->sata_dev.scontrol;
+ return 0;
+ case SCR_ERROR:
+ *val = dev->sata_dev.serror;
+ return 0;
+ case SCR_ACTIVE:
+ *val = dev->sata_dev.ap->sactive;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct ata_port_operations sas_sata_ops = {
+ .port_disable = ata_port_disable,
+ .check_status = sas_ata_check_status,
+ .check_altstatus = sas_ata_check_status,
+ .dev_select = ata_noop_dev_select,
+ .phy_reset = sas_ata_phy_reset,
+ .post_internal_cmd = sas_ata_post_internal,
+ .tf_read = sas_ata_tf_read,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = sas_ata_qc_issue,
+ .port_start = ata_sas_port_start,
+ .port_stop = ata_sas_port_stop,
+ .scr_read = sas_ata_scr_read,
+ .scr_write = sas_ata_scr_write
+};
+
+static struct ata_port_info sata_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+ .pio_mask = 0x1f, /* PIO0-4 */
+ .mwdma_mask = 0x07, /* MWDMA0-2 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sas_sata_ops
+};
+
+int sas_ata_init_host_and_port(struct domain_device *found_dev,
+ struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct ata_port *ap;
+
+ ata_host_init(&found_dev->sata_dev.ata_host,
+ &ha->pcidev->dev,
+ sata_port_info.flags,
+ &sas_sata_ops);
+ ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
+ &sata_port_info,
+ shost);
+ if (!ap) {
+ SAS_DPRINTK("ata_sas_port_alloc failed.\n");
+ return -ENODEV;
+ }
+
+ ap->private_data = found_dev;
+ ap->cbl = ATA_CBL_SATA;
+ ap->scsi_host = shost;
+ found_dev->sata_dev.ap = ap;
+
+ return 0;
+}
+
+void sas_ata_task_abort(struct sas_task *task)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+ struct completion *waiting;
+
+ /* Bounce SCSI-initiated commands to the SCSI EH */
+ if (qc->scsicmd) {
+ scsi_req_abort_cmd(qc->scsicmd);
+ scsi_schedule_eh(qc->scsicmd->device->host);
+ return;
+ }
+
+ /* Internal command, fake a timeout and complete. */
+ qc->flags &= ~ATA_QCFLAG_ACTIVE;
+ qc->flags |= ATA_QCFLAG_FAILED;
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ waiting = qc->private_data;
+ complete(waiting);
+}
+
+static void sas_task_timedout(unsigned long _task)
+{
+ struct sas_task *task = (void *) _task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ complete(&task->completion);
+}
+
+static void sas_disc_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->timer))
+ return;
+ complete(&task->completion);
+}
+
+#define SAS_DEV_TIMEOUT 10
+
+/**
+ * sas_execute_task -- Basic task processing for discovery
+ * @task: the task to be executed
+ * @buffer: pointer to buffer to do I/O
+ * @size: size of @buffer
+ * @pci_dma_dir: PCI_DMA_...
+ */
+static int sas_execute_task(struct sas_task *task, void *buffer, int size,
+ int pci_dma_dir)
+{
+ int res = 0;
+ struct scatterlist *scatter = NULL;
+ struct task_status_struct *ts = &task->task_status;
+ int num_scatter = 0;
+ int retries = 0;
+ struct sas_internal *i =
+ to_sas_internal(task->dev->port->ha->core.shost->transportt);
+
+ if (pci_dma_dir != PCI_DMA_NONE) {
+ scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
+ if (!scatter)
+ goto out;
+
+ sg_init_one(scatter, buffer, size);
+ num_scatter = 1;
+ }
+
+ task->task_proto = task->dev->tproto;
+ task->scatter = scatter;
+ task->num_scatter = num_scatter;
+ task->total_xfer_len = size;
+ task->data_dir = pci_dma_dir;
+ task->task_done = sas_disc_task_done;
+ if (pci_dma_dir != PCI_DMA_NONE &&
+ sas_protocol_ata(task->task_proto)) {
+ task->num_scatter = pci_map_sg(task->dev->port->ha->pcidev,
+ task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ }
+
+ for (retries = 0; retries < 5; retries++) {
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ init_completion(&task->completion);
+
+ task->timer.data = (unsigned long) task;
+ task->timer.function = sas_task_timedout;
+ task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
+ add_timer(&task->timer);
+
+ res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
+ if (res) {
+ del_timer(&task->timer);
+ SAS_DPRINTK("executing SAS discovery task failed:%d\n",
+ res);
+ goto ex_err;
+ }
+ wait_for_completion(&task->completion);
+ res = -ETASK;
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ int res2;
+ SAS_DPRINTK("task aborted, flags:0x%x\n",
+ task->task_state_flags);
+ res2 = i->dft->lldd_abort_task(task);
+ SAS_DPRINTK("came back from abort task\n");
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ if (res2 == TMF_RESP_FUNC_COMPLETE)
+ continue; /* Retry the task */
+ else
+ goto ex_err;
+ }
+ }
+ if (task->task_status.stat == SAM_BUSY ||
+ task->task_status.stat == SAM_TASK_SET_FULL ||
+ task->task_status.stat == SAS_QUEUE_FULL) {
+ SAS_DPRINTK("task: q busy, sleeping...\n");
+ schedule_timeout_interruptible(HZ);
+ } else if (task->task_status.stat == SAM_CHECK_COND) {
+ struct scsi_sense_hdr shdr;
+
+ if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
+ &shdr)) {
+ SAS_DPRINTK("couldn't normalize sense\n");
+ continue;
+ }
+ if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
+ (shdr.sense_key == 2 && shdr.asc == 4 &&
+ shdr.ascq == 1)) {
+ SAS_DPRINTK("device %016llx LUN: %016llx "
+ "powering up or not ready yet, "
+ "sleeping...\n",
+ SAS_ADDR(task->dev->sas_addr),
+ SAS_ADDR(task->ssp_task.LUN));
+
+ schedule_timeout_interruptible(5*HZ);
+ } else if (shdr.sense_key == 1) {
+ res = 0;
+ break;
+ } else if (shdr.sense_key == 5) {
+ break;
+ } else {
+ SAS_DPRINTK("dev %016llx LUN: %016llx "
+ "sense key:0x%x ASC:0x%x ASCQ:0x%x"
+ "\n",
+ SAS_ADDR(task->dev->sas_addr),
+ SAS_ADDR(task->ssp_task.LUN),
+ shdr.sense_key,
+ shdr.asc, shdr.ascq);
+ }
+ } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
+ task->task_status.stat != SAM_GOOD) {
+ SAS_DPRINTK("task finished with resp:0x%x, "
+ "stat:0x%x\n",
+ task->task_status.resp,
+ task->task_status.stat);
+ goto ex_err;
+ } else {
+ res = 0;
+ break;
+ }
+ }
+ex_err:
+ if (pci_dma_dir != PCI_DMA_NONE) {
+ if (sas_protocol_ata(task->task_proto))
+ pci_unmap_sg(task->dev->port->ha->pcidev,
+ task->scatter, task->num_scatter,
+ task->data_dir);
+ kfree(scatter);
+ }
+out:
+ return res;
+}
+
+/* ---------- SATA ---------- */
+
+static void sas_get_ata_command_set(struct domain_device *dev)
+{
+ struct dev_to_host_fis *fis =
+ (struct dev_to_host_fis *) dev->frame_rcvd;
+
+ if ((fis->sector_count == 1 && /* ATA */
+ fis->lbal == 1 &&
+ fis->lbam == 0 &&
+ fis->lbah == 0 &&
+ fis->device == 0)
+ ||
+ (fis->sector_count == 0 && /* CE-ATA (mATA) */
+ fis->lbal == 0 &&
+ fis->lbam == 0xCE &&
+ fis->lbah == 0xAA &&
+ (fis->device & ~0x10) == 0))
+
+ dev->sata_dev.command_set = ATA_COMMAND_SET;
+
+ else if ((fis->interrupt_reason == 1 && /* ATAPI */
+ fis->lbal == 1 &&
+ fis->byte_count_low == 0x14 &&
+ fis->byte_count_high == 0xEB &&
+ (fis->device & ~0x10) == 0))
+
+ dev->sata_dev.command_set = ATAPI_COMMAND_SET;
+
+ else if ((fis->sector_count == 1 && /* SEMB */
+ fis->lbal == 1 &&
+ fis->lbam == 0x3C &&
+ fis->lbah == 0xC3 &&
+ fis->device == 0)
+ ||
+ (fis->interrupt_reason == 1 && /* SATA PM */
+ fis->lbal == 1 &&
+ fis->byte_count_low == 0x69 &&
+ fis->byte_count_high == 0x96 &&
+ (fis->device & ~0x10) == 0))
+
+ /* Treat it as a superset? */
+ dev->sata_dev.command_set = ATAPI_COMMAND_SET;
+}
+
+/**
+ * sas_issue_ata_cmd -- Basic SATA command processing for discovery
+ * @dev: the device to send the command to
+ * @command: the command register
+ * @features: the features register
+ * @buffer: pointer to buffer to do I/O
+ * @size: size of @buffer
+ * @pci_dma_dir: PCI_DMA_...
+ */
+static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
+ u8 features, void *buffer, int size,
+ int pci_dma_dir)
+{
+ int res = 0;
+ struct sas_task *task;
+ struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
+ &dev->frame_rcvd[0];
+
+ res = -ENOMEM;
+ task = sas_alloc_task(GFP_KERNEL);
+ if (!task)
+ goto out;
+
+ task->dev = dev;
+
+ task->ata_task.fis.fis_type = 0x27;
+ task->ata_task.fis.command = command;
+ task->ata_task.fis.features = features;
+ task->ata_task.fis.device = d2h_fis->device;
+ task->ata_task.retry_count = 1;
+
+ res = sas_execute_task(task, buffer, size, pci_dma_dir);
+
+ sas_free_task(task);
+out:
+ return res;
+}
+
+static void sas_sata_propagate_sas_addr(struct domain_device *dev)
+{
+ unsigned long flags;
+ struct asd_sas_port *port = dev->port;
+ struct asd_sas_phy *phy;
+
+ BUG_ON(dev->parent);
+
+ memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
+ spin_lock_irqsave(&port->phy_list_lock, flags);
+ list_for_each_entry(phy, &port->phy_list, port_phy_el)
+ memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
+ spin_unlock_irqrestore(&port->phy_list_lock, flags);
+}
+
+#define ATA_IDENTIFY_DEV 0xEC
+#define ATA_IDENTIFY_PACKET_DEV 0xA1
+#define ATA_SET_FEATURES 0xEF
+#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
+
+/**
+ * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
+ * @dev: STP/SATA device of interest (ATA/ATAPI)
+ *
+ * The LLDD has already been notified of this device, so that we can
+ * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
+ * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
+ * performance for this device.
+ */
+static int sas_discover_sata_dev(struct domain_device *dev)
+{
+ int res;
+ __le16 *identify_x;
+ u8 command;
+
+ identify_x = kzalloc(512, GFP_KERNEL);
+ if (!identify_x)
+ return -ENOMEM;
+
+ if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
+ dev->sata_dev.identify_device = identify_x;
+ command = ATA_IDENTIFY_DEV;
+ } else {
+ dev->sata_dev.identify_packet_device = identify_x;
+ command = ATA_IDENTIFY_PACKET_DEV;
+ }
+
+ res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
+ PCI_DMA_FROMDEVICE);
+ if (res)
+ goto out_err;
+
+ /* lives on the media? */
+ if (le16_to_cpu(identify_x[0]) & 4) {
+ /* incomplete response */
+ SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
+ "dev %llx\n", SAS_ADDR(dev->sas_addr));
+ if (!le16_to_cpu(identify_x[83] & (1<<6)))
+ goto cont1;
+ res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
+ ATA_FEATURE_PUP_STBY_SPIN_UP,
+ NULL, 0, PCI_DMA_NONE);
+ if (res)
+ goto cont1;
+
+ schedule_timeout_interruptible(5*HZ); /* More time? */
+ res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
+ PCI_DMA_FROMDEVICE);
+ if (res)
+ goto out_err;
+ }
+cont1:
+ /* Get WWN */
+ if (dev->port->oob_mode != SATA_OOB_MODE) {
+ memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
+ SAS_ADDR_SIZE);
+ } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
+ (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
+ == 0x5000) {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ dev->sas_addr[2*i] =
+ (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
+ dev->sas_addr[2*i+1] =
+ le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
+ }
+ }
+ sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
+ if (!dev->parent)
+ sas_sata_propagate_sas_addr(dev);
+
+ /* XXX Hint: register this SATA device with SATL.
+ When this returns, dev->sata_dev->lu is alive and
+ present.
+ sas_satl_register_dev(dev);
+ */
+
+ sas_fill_in_rphy(dev, dev->rphy);
+
+ return 0;
+out_err:
+ dev->sata_dev.identify_packet_device = NULL;
+ dev->sata_dev.identify_device = NULL;
+ kfree(identify_x);
+ return res;
+}
+
+static int sas_discover_sata_pm(struct domain_device *dev)
+{
+ return -ENODEV;
+}
+
+/**
+ * sas_discover_sata -- discover an STP/SATA domain device
+ * @dev: pointer to struct domain_device of interest
+ *
+ * First we notify the LLDD of this device, so we can send frames to
+ * it. Then depending on the type of device we call the appropriate
+ * discover functions. Once device discover is done, we notify the
+ * LLDD so that it can fine-tune its parameters for the device, by
+ * removing it and then adding it. That is, the second time around,
+ * the driver would have certain fields, that it is looking at, set.
+ * Finally we initialize the kobj so that the device can be added to
+ * the system at registration time. Devices directly attached to a HA
+ * port, have no parents. All other devices do, and should have their
+ * "parent" pointer set appropriately before calling this function.
+ */
+int sas_discover_sata(struct domain_device *dev)
+{
+ int res;
+
+ sas_get_ata_command_set(dev);
+
+ res = sas_notify_lldd_dev_found(dev);
+ if (res)
+ return res;
+
+ switch (dev->dev_type) {
+ case SATA_DEV:
+ res = sas_discover_sata_dev(dev);
+ break;
+ case SATA_PM:
+ res = sas_discover_sata_pm(dev);
+ break;
+ default:
+ break;
+ }
+ sas_notify_lldd_dev_gone(dev);
+ if (!res) {
+ sas_notify_lldd_dev_found(dev);
+ res = sas_rphy_add(dev->rphy);
+ }
+
+ return res;
+}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index a65598b1e536..6ac9f61d006a 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -55,149 +55,6 @@ void sas_init_dev(struct domain_device *dev)
}
}
-static void sas_task_timedout(unsigned long _task)
-{
- struct sas_task *task = (void *) _task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->completion);
-}
-
-static void sas_disc_task_done(struct sas_task *task)
-{
- if (!del_timer(&task->timer))
- return;
- complete(&task->completion);
-}
-
-#define SAS_DEV_TIMEOUT 10
-
-/**
- * sas_execute_task -- Basic task processing for discovery
- * @task: the task to be executed
- * @buffer: pointer to buffer to do I/O
- * @size: size of @buffer
- * @pci_dma_dir: PCI_DMA_...
- */
-static int sas_execute_task(struct sas_task *task, void *buffer, int size,
- int pci_dma_dir)
-{
- int res = 0;
- struct scatterlist *scatter = NULL;
- struct task_status_struct *ts = &task->task_status;
- int num_scatter = 0;
- int retries = 0;
- struct sas_internal *i =
- to_sas_internal(task->dev->port->ha->core.shost->transportt);
-
- if (pci_dma_dir != PCI_DMA_NONE) {
- scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
- if (!scatter)
- goto out;
-
- sg_init_one(scatter, buffer, size);
- num_scatter = 1;
- }
-
- task->task_proto = task->dev->tproto;
- task->scatter = scatter;
- task->num_scatter = num_scatter;
- task->total_xfer_len = size;
- task->data_dir = pci_dma_dir;
- task->task_done = sas_disc_task_done;
-
- for (retries = 0; retries < 5; retries++) {
- task->task_state_flags = SAS_TASK_STATE_PENDING;
- init_completion(&task->completion);
-
- task->timer.data = (unsigned long) task;
- task->timer.function = sas_task_timedout;
- task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
- add_timer(&task->timer);
-
- res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
- if (res) {
- del_timer(&task->timer);
- SAS_DPRINTK("executing SAS discovery task failed:%d\n",
- res);
- goto ex_err;
- }
- wait_for_completion(&task->completion);
- res = -ETASK;
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
- int res2;
- SAS_DPRINTK("task aborted, flags:0x%x\n",
- task->task_state_flags);
- res2 = i->dft->lldd_abort_task(task);
- SAS_DPRINTK("came back from abort task\n");
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- if (res2 == TMF_RESP_FUNC_COMPLETE)
- continue; /* Retry the task */
- else
- goto ex_err;
- }
- }
- if (task->task_status.stat == SAM_BUSY ||
- task->task_status.stat == SAM_TASK_SET_FULL ||
- task->task_status.stat == SAS_QUEUE_FULL) {
- SAS_DPRINTK("task: q busy, sleeping...\n");
- schedule_timeout_interruptible(HZ);
- } else if (task->task_status.stat == SAM_CHECK_COND) {
- struct scsi_sense_hdr shdr;
-
- if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
- &shdr)) {
- SAS_DPRINTK("couldn't normalize sense\n");
- continue;
- }
- if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
- (shdr.sense_key == 2 && shdr.asc == 4 &&
- shdr.ascq == 1)) {
- SAS_DPRINTK("device %016llx LUN: %016llx "
- "powering up or not ready yet, "
- "sleeping...\n",
- SAS_ADDR(task->dev->sas_addr),
- SAS_ADDR(task->ssp_task.LUN));
-
- schedule_timeout_interruptible(5*HZ);
- } else if (shdr.sense_key == 1) {
- res = 0;
- break;
- } else if (shdr.sense_key == 5) {
- break;
- } else {
- SAS_DPRINTK("dev %016llx LUN: %016llx "
- "sense key:0x%x ASC:0x%x ASCQ:0x%x"
- "\n",
- SAS_ADDR(task->dev->sas_addr),
- SAS_ADDR(task->ssp_task.LUN),
- shdr.sense_key,
- shdr.asc, shdr.ascq);
- }
- } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
- task->task_status.stat != SAM_GOOD) {
- SAS_DPRINTK("task finished with resp:0x%x, "
- "stat:0x%x\n",
- task->task_status.resp,
- task->task_status.stat);
- goto ex_err;
- } else {
- res = 0;
- break;
- }
- }
-ex_err:
- if (pci_dma_dir != PCI_DMA_NONE)
- kfree(scatter);
-out:
- return res;
-}
-
/* ---------- Domain device discovery ---------- */
/**
@@ -255,6 +112,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
switch (dev->dev_type) {
case SAS_END_DEV:
+ case SATA_DEV:
rphy = sas_end_device_alloc(port->port);
break;
case EDGE_DEV:
@@ -265,7 +123,6 @@ static int sas_get_port_device(struct asd_sas_port *port)
rphy = sas_expander_alloc(port->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
- case SATA_DEV:
default:
printk("ERROR: Unidentified device type %d\n", dev->dev_type);
rphy = NULL;
@@ -292,207 +149,15 @@ static int sas_get_port_device(struct asd_sas_port *port)
port->disc.max_level = 0;
dev->rphy = rphy;
- spin_lock(&port->dev_list_lock);
+ spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
- spin_unlock(&port->dev_list_lock);
+ spin_unlock_irq(&port->dev_list_lock);
return 0;
}
/* ---------- Discover and Revalidate ---------- */
-/* ---------- SATA ---------- */
-
-static void sas_get_ata_command_set(struct domain_device *dev)
-{
- struct dev_to_host_fis *fis =
- (struct dev_to_host_fis *) dev->frame_rcvd;
-
- if ((fis->sector_count == 1 && /* ATA */
- fis->lbal == 1 &&
- fis->lbam == 0 &&
- fis->lbah == 0 &&
- fis->device == 0)
- ||
- (fis->sector_count == 0 && /* CE-ATA (mATA) */
- fis->lbal == 0 &&
- fis->lbam == 0xCE &&
- fis->lbah == 0xAA &&
- (fis->device & ~0x10) == 0))
-
- dev->sata_dev.command_set = ATA_COMMAND_SET;
-
- else if ((fis->interrupt_reason == 1 && /* ATAPI */
- fis->lbal == 1 &&
- fis->byte_count_low == 0x14 &&
- fis->byte_count_high == 0xEB &&
- (fis->device & ~0x10) == 0))
-
- dev->sata_dev.command_set = ATAPI_COMMAND_SET;
-
- else if ((fis->sector_count == 1 && /* SEMB */
- fis->lbal == 1 &&
- fis->lbam == 0x3C &&
- fis->lbah == 0xC3 &&
- fis->device == 0)
- ||
- (fis->interrupt_reason == 1 && /* SATA PM */
- fis->lbal == 1 &&
- fis->byte_count_low == 0x69 &&
- fis->byte_count_high == 0x96 &&
- (fis->device & ~0x10) == 0))
-
- /* Treat it as a superset? */
- dev->sata_dev.command_set = ATAPI_COMMAND_SET;
-}
-
-/**
- * sas_issue_ata_cmd -- Basic SATA command processing for discovery
- * @dev: the device to send the command to
- * @command: the command register
- * @features: the features register
- * @buffer: pointer to buffer to do I/O
- * @size: size of @buffer
- * @pci_dma_dir: PCI_DMA_...
- */
-static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
- u8 features, void *buffer, int size,
- int pci_dma_dir)
-{
- int res = 0;
- struct sas_task *task;
- struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
- &dev->frame_rcvd[0];
-
- res = -ENOMEM;
- task = sas_alloc_task(GFP_KERNEL);
- if (!task)
- goto out;
-
- task->dev = dev;
-
- task->ata_task.fis.command = command;
- task->ata_task.fis.features = features;
- task->ata_task.fis.device = d2h_fis->device;
- task->ata_task.retry_count = 1;
-
- res = sas_execute_task(task, buffer, size, pci_dma_dir);
-
- sas_free_task(task);
-out:
- return res;
-}
-
-static void sas_sata_propagate_sas_addr(struct domain_device *dev)
-{
- unsigned long flags;
- struct asd_sas_port *port = dev->port;
- struct asd_sas_phy *phy;
-
- BUG_ON(dev->parent);
-
- memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
- spin_lock_irqsave(&port->phy_list_lock, flags);
- list_for_each_entry(phy, &port->phy_list, port_phy_el)
- memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
- spin_unlock_irqrestore(&port->phy_list_lock, flags);
-}
-
-#define ATA_IDENTIFY_DEV 0xEC
-#define ATA_IDENTIFY_PACKET_DEV 0xA1
-#define ATA_SET_FEATURES 0xEF
-#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
-
-/**
- * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
- * @dev: STP/SATA device of interest (ATA/ATAPI)
- *
- * The LLDD has already been notified of this device, so that we can
- * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
- * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
- * performance for this device.
- */
-static int sas_discover_sata_dev(struct domain_device *dev)
-{
- int res;
- __le16 *identify_x;
- u8 command;
-
- identify_x = kzalloc(512, GFP_KERNEL);
- if (!identify_x)
- return -ENOMEM;
-
- if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
- dev->sata_dev.identify_device = identify_x;
- command = ATA_IDENTIFY_DEV;
- } else {
- dev->sata_dev.identify_packet_device = identify_x;
- command = ATA_IDENTIFY_PACKET_DEV;
- }
-
- res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
- PCI_DMA_FROMDEVICE);
- if (res)
- goto out_err;
-
- /* lives on the media? */
- if (le16_to_cpu(identify_x[0]) & 4) {
- /* incomplete response */
- SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
- "dev %llx\n", SAS_ADDR(dev->sas_addr));
- if (!le16_to_cpu(identify_x[83] & (1<<6)))
- goto cont1;
- res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
- ATA_FEATURE_PUP_STBY_SPIN_UP,
- NULL, 0, PCI_DMA_NONE);
- if (res)
- goto cont1;
-
- schedule_timeout_interruptible(5*HZ); /* More time? */
- res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
- PCI_DMA_FROMDEVICE);
- if (res)
- goto out_err;
- }
-cont1:
- /* Get WWN */
- if (dev->port->oob_mode != SATA_OOB_MODE) {
- memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
- SAS_ADDR_SIZE);
- } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
- (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
- == 0x5000) {
- int i;
-
- for (i = 0; i < 4; i++) {
- dev->sas_addr[2*i] =
- (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
- dev->sas_addr[2*i+1] =
- le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
- }
- }
- sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
- if (!dev->parent)
- sas_sata_propagate_sas_addr(dev);
-
- /* XXX Hint: register this SATA device with SATL.
- When this returns, dev->sata_dev->lu is alive and
- present.
- sas_satl_register_dev(dev);
- */
- return 0;
-out_err:
- dev->sata_dev.identify_packet_device = NULL;
- dev->sata_dev.identify_device = NULL;
- kfree(identify_x);
- return res;
-}
-
-static int sas_discover_sata_pm(struct domain_device *dev)
-{
- return -ENODEV;
-}
-
int sas_notify_lldd_dev_found(struct domain_device *dev)
{
int res = 0;
@@ -525,60 +190,6 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
/* ---------- Common/dispatchers ---------- */
-/**
- * sas_discover_sata -- discover an STP/SATA domain device
- * @dev: pointer to struct domain_device of interest
- *
- * First we notify the LLDD of this device, so we can send frames to
- * it. Then depending on the type of device we call the appropriate
- * discover functions. Once device discover is done, we notify the
- * LLDD so that it can fine-tune its parameters for the device, by
- * removing it and then adding it. That is, the second time around,
- * the driver would have certain fields, that it is looking at, set.
- * Finally we initialize the kobj so that the device can be added to
- * the system at registration time. Devices directly attached to a HA
- * port, have no parents. All other devices do, and should have their
- * "parent" pointer set appropriately before calling this function.
- */
-int sas_discover_sata(struct domain_device *dev)
-{
- int res;
-
- sas_get_ata_command_set(dev);
-
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- goto out_err2;
-
- switch (dev->dev_type) {
- case SATA_DEV:
- res = sas_discover_sata_dev(dev);
- break;
- case SATA_PM:
- res = sas_discover_sata_pm(dev);
- break;
- default:
- break;
- }
- if (res)
- goto out_err;
-
- sas_notify_lldd_dev_gone(dev);
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- goto out_err2;
-
- res = sas_rphy_add(dev->rphy);
- if (res)
- goto out_err;
-
- return res;
-
-out_err:
- sas_notify_lldd_dev_gone(dev);
-out_err2:
- return res;
-}
/**
* sas_discover_end_dev -- discover an end device (SSP, etc)
@@ -685,11 +296,14 @@ static void sas_discover_domain(struct work_struct *work)
case FANOUT_DEV:
error = sas_discover_root_expander(dev);
break;
+#ifdef CONFIG_SCSI_SAS_ATA
case SATA_DEV:
case SATA_PM:
error = sas_discover_sata(dev);
break;
+#endif
default:
+ error = -ENXIO;
SAS_DPRINTK("unhandled device %d\n", dev->dev_type);
break;
}
@@ -698,9 +312,9 @@ static void sas_discover_domain(struct work_struct *work)
sas_rphy_free(dev->rphy);
dev->rphy = NULL;
- spin_lock(&port->dev_list_lock);
+ spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
- spin_unlock(&port->dev_list_lock);
+ spin_unlock_irq(&port->dev_list_lock);
kfree(dev); /* not kobject_register-ed yet */
port->port_dev = NULL;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e34442e405e8..b500f0c1449c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -23,6 +23,7 @@
*/
#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
#include "sas_internal.h"
@@ -36,12 +37,6 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
u8 *sas_addr, int include);
static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
-#if 0
-/* FIXME: smp needs to migrate into the sas class */
-static ssize_t smp_portal_read(struct kobject *, char *, loff_t, size_t);
-static ssize_t smp_portal_write(struct kobject *, char *, loff_t, size_t);
-#endif
-
/* ---------- SMP task management ---------- */
static void smp_task_timedout(unsigned long _task)
@@ -218,6 +213,36 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
#define DISCOVER_REQ_SIZE 16
#define DISCOVER_RESP_SIZE 56
+static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
+ u8 *disc_resp, int single)
+{
+ int i, res;
+
+ disc_req[9] = single;
+ for (i = 1 ; i < 3; i++) {
+ struct discover_resp *dr;
+
+ res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (res)
+ return res;
+ /* This is detecting a failure to transmit inital
+ * dev to host FIS as described in section G.5 of
+ * sas-2 r 04b */
+ dr = &((struct smp_resp *)disc_resp)->disc;
+ if (!(dr->attached_dev_type == 0 &&
+ dr->attached_sata_dev))
+ break;
+ /* In order to generate the dev to host FIS, we
+ * send a link reset to the expander port */
+ sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
+ /* Wait for the reset to trigger the negotiation */
+ msleep(500);
+ }
+ sas_set_ex_phy(dev, single, disc_resp);
+ return 0;
+}
+
static int sas_ex_phy_discover(struct domain_device *dev, int single)
{
struct expander_device *ex = &dev->ex_dev;
@@ -238,23 +263,15 @@ static int sas_ex_phy_discover(struct domain_device *dev, int single)
disc_req[1] = SMP_DISCOVER;
if (0 <= single && single < ex->num_phys) {
- disc_req[9] = single;
- res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
- disc_resp, DISCOVER_RESP_SIZE);
- if (res)
- goto out_err;
- sas_set_ex_phy(dev, single, disc_resp);
+ res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single);
} else {
int i;
for (i = 0; i < ex->num_phys; i++) {
- disc_req[9] = i;
- res = smp_execute_task(dev, disc_req,
- DISCOVER_REQ_SIZE, disc_resp,
- DISCOVER_RESP_SIZE);
+ res = sas_ex_phy_discover_helper(dev, disc_req,
+ disc_resp, i);
if (res)
goto out_err;
- sas_set_ex_phy(dev, i, disc_resp);
}
}
out_err:
@@ -518,6 +535,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
}
+#ifdef CONFIG_SCSI_SAS_ATA
+
#define RPS_REQ_SIZE 16
#define RPS_RESP_SIZE 60
@@ -527,6 +546,7 @@ static int sas_get_report_phy_sata(struct domain_device *dev,
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
+ u8 *resp = (u8 *)rps_resp;
if (!rps_req)
return -ENOMEM;
@@ -537,9 +557,30 @@ static int sas_get_report_phy_sata(struct domain_device *dev,
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
rps_resp, RPS_RESP_SIZE);
+ /* 0x34 is the FIS type for the D2H fis. There's a potential
+ * standards cockup here. sas-2 explicitly specifies the FIS
+ * should be encoded so that FIS type is in resp[24].
+ * However, some expanders endian reverse this. Undo the
+ * reversal here */
+ if (!res && resp[27] == 0x34 && resp[24] != 0x34) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ int j = 24 + (i*4);
+ u8 a, b;
+ a = resp[j + 0];
+ b = resp[j + 1];
+ resp[j + 0] = resp[j + 3];
+ resp[j + 1] = resp[j + 2];
+ resp[j + 2] = b;
+ resp[j + 3] = a;
+ }
+ }
+
kfree(rps_req);
- return 0;
+ return res;
}
+#endif
static void sas_ex_get_linkrate(struct domain_device *parent,
struct domain_device *child,
@@ -607,6 +648,7 @@ static struct domain_device *sas_ex_discover_end_dev(
}
sas_ex_get_linkrate(parent, child, phy);
+#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
child->dev_type = SATA_DEV;
if (phy->attached_tproto & SAS_PROTO_STP)
@@ -623,16 +665,30 @@ static struct domain_device *sas_ex_discover_end_dev(
}
memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
+
+ rphy = sas_end_device_alloc(phy->port);
+ if (unlikely(!rphy))
+ goto out_free;
+
sas_init_dev(child);
+
+ child->rphy = rphy;
+
+ spin_lock_irq(&parent->port->dev_list_lock);
+ list_add_tail(&child->dev_list_node, &parent->port->dev_list);
+ spin_unlock_irq(&parent->port->dev_list_lock);
+
res = sas_discover_sata(child);
if (res) {
SAS_DPRINTK("sas_discover_sata() for device %16llx at "
"%016llx:0x%x returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
- goto out_free;
+ goto out_list_del;
}
- } else if (phy->attached_tproto & SAS_PROTO_SSP) {
+ } else
+#endif
+ if (phy->attached_tproto & SAS_PROTO_SSP) {
child->dev_type = SAS_END_DEV;
rphy = sas_end_device_alloc(phy->port);
/* FIXME: error handling */
@@ -644,9 +700,9 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
sas_fill_in_rphy(child, rphy);
- spin_lock(&parent->port->dev_list_lock);
+ spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
- spin_unlock(&parent->port->dev_list_lock);
+ spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_end_dev(child);
if (res) {
@@ -660,6 +716,7 @@ static struct domain_device *sas_ex_discover_end_dev(
SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
phy->attached_tproto, SAS_ADDR(parent->sas_addr),
phy_id);
+ goto out_free;
}
list_add_tail(&child->siblings, &parent_ex->children);
@@ -759,9 +816,9 @@ static struct domain_device *sas_ex_discover_expander(
sas_fill_in_rphy(child, rphy);
sas_rphy_add(rphy);
- spin_lock(&parent->port->dev_list_lock);
+ spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
- spin_unlock(&parent->port->dev_list_lock);
+ spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_expander(child);
if (res) {
@@ -1357,31 +1414,6 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
return 0;
}
-#if 0
-#define SMP_BIN_ATTR_NAME "smp_portal"
-
-static void sas_ex_smp_hook(struct domain_device *dev)
-{
- struct expander_device *ex_dev = &dev->ex_dev;
- struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr;
-
- memset(bin_attr, 0, sizeof(*bin_attr));
-
- bin_attr->attr.name = SMP_BIN_ATTR_NAME;
- bin_attr->attr.owner = THIS_MODULE;
- bin_attr->attr.mode = 0600;
-
- bin_attr->size = 0;
- bin_attr->private = NULL;
- bin_attr->read = smp_portal_read;
- bin_attr->write= smp_portal_write;
- bin_attr->mmap = NULL;
-
- ex_dev->smp_portal_pid = -1;
- init_MUTEX(&ex_dev->smp_sema);
-}
-#endif
-
/**
* sas_discover_expander -- expander discovery
* @ex: pointer to expander domain device
@@ -1843,74 +1875,49 @@ out:
return res;
}
-#if 0
-/* ---------- SMP portal ---------- */
-
-static ssize_t smp_portal_write(struct kobject *kobj, char *buf, loff_t offs,
- size_t size)
+int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
{
- struct domain_device *dev = to_dom_device(kobj);
- struct expander_device *ex = &dev->ex_dev;
-
- if (offs != 0)
- return -EFBIG;
- else if (size == 0)
- return 0;
+ struct domain_device *dev;
+ int ret, type = rphy->identify.device_type;
+ struct request *rsp = req->next_rq;
- down_interruptible(&ex->smp_sema);
- if (ex->smp_req)
- kfree(ex->smp_req);
- ex->smp_req = kzalloc(size, GFP_USER);
- if (!ex->smp_req) {
- up(&ex->smp_sema);
- return -ENOMEM;
+ if (!rsp) {
+ printk("%s: space for a smp response is missing\n",
+ __FUNCTION__);
+ return -EINVAL;
}
- memcpy(ex->smp_req, buf, size);
- ex->smp_req_size = size;
- ex->smp_portal_pid = current->pid;
- up(&ex->smp_sema);
- return size;
-}
-
-static ssize_t smp_portal_read(struct kobject *kobj, char *buf, loff_t offs,
- size_t size)
-{
- struct domain_device *dev = to_dom_device(kobj);
- struct expander_device *ex = &dev->ex_dev;
- u8 *smp_resp;
- int res = -EINVAL;
-
- /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact
- * it should be 0.
- */
+ /* seems aic94xx doesn't support */
+ if (!rphy) {
+ printk("%s: can we send a smp request to a host?\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
- down_interruptible(&ex->smp_sema);
- if (!ex->smp_req || ex->smp_portal_pid != current->pid)
- goto out;
+ if (type != SAS_EDGE_EXPANDER_DEVICE &&
+ type != SAS_FANOUT_EXPANDER_DEVICE) {
+ printk("%s: can we send a smp request to a device?\n",
+ __FUNCTION__);
+ return -EINVAL;
+ }
- res = 0;
- if (size == 0)
- goto out;
+ dev = sas_find_dev_by_rphy(rphy);
+ if (!dev) {
+ printk("%s: fail to find a domain_device?\n", __FUNCTION__);
+ return -EINVAL;
+ }
- res = -ENOMEM;
- smp_resp = alloc_smp_resp(size);
- if (!smp_resp)
- goto out;
- res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size,
- smp_resp, size);
- if (!res) {
- memcpy(buf, smp_resp, size);
- res = size;
+ /* do we need to support multiple segments? */
+ if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
+ printk("%s: multiple segments req %u %u, rsp %u %u\n",
+ __FUNCTION__, req->bio->bi_vcnt, req->data_len,
+ rsp->bio->bi_vcnt, rsp->data_len);
+ return -EINVAL;
}
- kfree(smp_resp);
-out:
- kfree(ex->smp_req);
- ex->smp_req = NULL;
- ex->smp_req_size = 0;
- ex->smp_portal_pid = -1;
- up(&ex->smp_sema);
- return res;
+ ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
+ bio_data(rsp->bio), rsp->data_len);
+
+ return ret;
}
-#endif
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 965698c8b7bf..9cd5abe9e714 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -259,6 +259,7 @@ static struct sas_function_template sft = {
.phy_reset = sas_phy_reset,
.set_phy_speed = sas_set_phy_speed,
.get_linkerrors = sas_get_linkerrors,
+ .smp_handler = sas_smp_handler,
};
struct scsi_transport_template *
@@ -292,7 +293,7 @@ EXPORT_SYMBOL_GPL(sas_domain_release_transport);
static int __init sas_class_init(void)
{
sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!sas_task_cache)
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index a78638df2018..2b8213b1832d 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -39,6 +39,9 @@
#define SAS_DPRINTK(fmt, ...)
#endif
+#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
+#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
+
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_show_class(enum sas_class class, char *buf);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b4b52694497c..7663841eb4cf 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -34,19 +34,19 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
+#include <scsi/sas_ata.h>
#include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include "../scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
+#include <linux/freezer.h>
#include <linux/scatterlist.h>
+#include <linux/libata.h>
/* ---------- SCSI Host glue ---------- */
-#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
-#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
-
static void sas_scsi_task_done(struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
@@ -76,8 +76,8 @@ static void sas_scsi_task_done(struct sas_task *task)
hs = DID_NO_CONNECT;
break;
case SAS_DATA_UNDERRUN:
- sc->resid = ts->residual;
- if (sc->request_bufflen - sc->resid < sc->underflow)
+ scsi_set_resid(sc, ts->residual);
+ if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
hs = DID_ERROR;
break;
case SAS_DATA_OVERRUN:
@@ -161,9 +161,9 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
- task->scatter = cmd->request_buffer;
- task->num_scatter = cmd->use_sg;
- task->total_xfer_len = cmd->request_bufflen;
+ task->scatter = scsi_sglist(cmd);
+ task->num_scatter = scsi_sg_count(cmd);
+ task->total_xfer_len = scsi_bufflen(cmd);
task->data_dir = cmd->sc_data_direction;
task->task_done = sas_scsi_task_done;
@@ -171,7 +171,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
return task;
}
-static int sas_queue_up(struct sas_task *task)
+int sas_queue_up(struct sas_task *task)
{
struct sas_ha_struct *sas_ha = task->dev->port->ha;
struct scsi_core *core = &sas_ha->core;
@@ -212,6 +212,16 @@ int sas_queuecommand(struct scsi_cmnd *cmd,
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
+ if (dev_is_sata(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
+ res = ata_sas_queuecmd(cmd, scsi_done,
+ dev->sata_dev.ap);
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
+ goto out;
+ }
+
res = -ENOMEM;
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
@@ -683,6 +693,16 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
return EH_NOT_HANDLED;
}
+int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
+
+ if (dev_is_sata(dev))
+ return ata_scsi_ioctl(sdev, cmd, arg);
+
+ return -EINVAL;
+}
+
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
@@ -722,10 +742,17 @@ static inline struct domain_device *sas_find_target(struct scsi_target *starget)
int sas_target_alloc(struct scsi_target *starget)
{
struct domain_device *found_dev = sas_find_target(starget);
+ int res;
if (!found_dev)
return -ENODEV;
+ if (dev_is_sata(found_dev)) {
+ res = sas_ata_init_host_and_port(found_dev, starget);
+ if (res)
+ return res;
+ }
+
starget->hostdata = found_dev;
return 0;
}
@@ -740,6 +767,11 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
+ if (dev_is_sata(dev)) {
+ ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
+ return 0;
+ }
+
sas_ha = dev->port->ha;
sas_read_port_mode_page(scsi_dev);
@@ -763,6 +795,10 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
void sas_slave_destroy(struct scsi_device *scsi_dev)
{
+ struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+
+ if (dev_is_sata(dev))
+ ata_port_disable(dev->sata_dev.ap);
}
int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
@@ -868,8 +904,6 @@ static int sas_queue_thread(void *_sas_ha)
{
struct sas_ha_struct *sas_ha = _sas_ha;
- current->flags |= PF_NOFREEZE;
-
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
@@ -981,10 +1015,38 @@ void sas_task_abort(struct sas_task *task)
return;
}
+ if (dev_is_sata(task->dev)) {
+ sas_ata_task_abort(task);
+ return;
+ }
+
scsi_req_abort_cmd(sc);
scsi_schedule_eh(sc->device->host);
}
+int sas_slave_alloc(struct scsi_device *scsi_dev)
+{
+ struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+
+ if (dev_is_sata(dev))
+ return ata_sas_port_init(dev->sata_dev.ap);
+
+ return 0;
+}
+
+void sas_target_destroy(struct scsi_target *starget)
+{
+ struct domain_device *found_dev = sas_find_target(starget);
+
+ if (!found_dev)
+ return;
+
+ if (dev_is_sata(found_dev))
+ ata_sas_port_destroy(found_dev->sata_dev.ap);
+
+ return;
+}
+
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
@@ -998,3 +1060,6 @@ EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_phy_enable);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
+EXPORT_SYMBOL_GPL(sas_slave_alloc);
+EXPORT_SYMBOL_GPL(sas_target_destroy);
+EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index d1be465d5f55..1c286707dd5f 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
-# * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
# * www.emulex.com *
# * *
@@ -27,4 +27,5 @@ endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
- lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o
+ lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
+ lpfc_vport.o lpfc_debugfs.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 82e8f90c4617..f8f64d6485cd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -19,8 +19,9 @@
* included with this package. *
*******************************************************************/
-struct lpfc_sli2_slim;
+#include <scsi/scsi_host.h>
+struct lpfc_sli2_slim;
#define LPFC_MAX_TARGET 256 /* max number of targets supported */
#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
@@ -32,6 +33,20 @@ struct lpfc_sli2_slim;
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
+/*
+ * Following time intervals are used of adjusting SCSI device
+ * queue depths when there are driver resource error or Firmware
+ * resource error.
+ */
+#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
+#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
+
+/* Number of exchanges reserved for discovery to complete */
+#define LPFC_DISC_IOCB_BUFF_COUNT 20
+
+#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
+#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
+
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -61,6 +76,11 @@ struct lpfc_dma_pool {
uint32_t current_count;
};
+struct hbq_dmabuf {
+ struct lpfc_dmabuf dbuf;
+ uint32_t tag;
+};
+
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
#define MEM_PRI 0x100
@@ -90,6 +110,29 @@ typedef struct lpfc_vpd {
uint32_t sli2FwRev;
uint8_t sli2FwName[16];
} rev;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2 :24; /* Reserved */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t rsvd2 :24; /* Reserved */
+#endif
+ } sli3Feat;
} lpfc_vpd_t;
struct lpfc_scsi_buf;
@@ -122,6 +165,7 @@ struct lpfc_stats {
uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsXmitFLOGI;
+ uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
uint32_t elsXmitPRLI;
uint32_t elsXmitADISC;
@@ -165,50 +209,186 @@ struct lpfc_sysfs_mbox {
struct lpfcMboxq * mbox;
};
+struct lpfc_hba;
+
+
+enum discovery_state {
+ LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
+ LPFC_VPORT_FAILED = 1, /* vport has failed */
+ LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
+ LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
+ LPFC_FDISC = 8, /* FDISC sent for vport */
+ LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
+ * configured */
+ LPFC_NS_REG = 10, /* Register with NameServer */
+ LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
+ LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
+ * device authentication / discovery */
+ LPFC_DISC_AUTH = 13, /* Processing ADISC list */
+ LPFC_VPORT_READY = 32,
+};
+
+enum hba_state {
+ LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */
+ LPFC_WARM_START = 1, /* HBA state after selective reset */
+ LPFC_INIT_START = 2, /* Initial state after board reset */
+ LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
+ LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
+ LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
+ LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
+ * CLEAR_LA */
+ LPFC_HBA_READY = 32,
+ LPFC_HBA_ERROR = -1
+};
+
+struct lpfc_vport {
+ struct list_head listentry;
+ struct lpfc_hba *phba;
+ uint8_t port_type;
+#define LPFC_PHYSICAL_PORT 1
+#define LPFC_NPIV_PORT 2
+#define LPFC_FABRIC_PORT 3
+ enum discovery_state port_state;
+
+ uint16_t vpi;
+
+ uint32_t fc_flag; /* FC flags */
+/* Several of these flags are HBA centric and should be moved to
+ * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
+ */
+#define FC_PT2PT 0x1 /* pt2pt with no fabric */
+#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
+#define FC_DISC_TMO 0x4 /* Discovery timer running */
+#define FC_PUBLIC_LOOP 0x8 /* Public loop */
+#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
+#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
+#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
+#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
+#define FC_FABRIC 0x100 /* We are fabric attached */
+#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
+#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
+#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
+#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
+#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
+#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
+#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
+#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
+#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
+
+ struct list_head fc_nodes;
+
+ /* Keep counters for the number of entries in each list. */
+ uint16_t fc_plogi_cnt;
+ uint16_t fc_adisc_cnt;
+ uint16_t fc_reglogin_cnt;
+ uint16_t fc_prli_cnt;
+ uint16_t fc_unmap_cnt;
+ uint16_t fc_map_cnt;
+ uint16_t fc_npr_cnt;
+ uint16_t fc_unused_cnt;
+ struct serv_parm fc_sparam; /* buffer for our service parameters */
+
+ uint32_t fc_myDID; /* fibre channel S_ID */
+ uint32_t fc_prevDID; /* previous fibre channel S_ID */
+
+ int32_t stopped; /* HBA has not been restarted since last ERATT */
+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
+
+ uint32_t num_disc_nodes; /*in addition to hba_state */
+
+ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
+ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
+ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
+ struct lpfc_name fc_nodename; /* fc nodename */
+ struct lpfc_name fc_portname; /* fc portname */
+
+ struct lpfc_work_evt disc_timeout_evt;
+
+ struct timer_list fc_disctmo; /* Discovery rescue timer */
+ uint8_t fc_ns_retry; /* retries for fabric nameserver */
+ uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
+
+ spinlock_t work_port_lock;
+ uint32_t work_port_events; /* Timeout to be handled */
+#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
+#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
+#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+
+#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
+#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
+#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timout */
+#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
+#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
+
+ struct timer_list fc_fdmitmo;
+ struct timer_list els_tmofunc;
+
+ int unreg_vpi_cmpl;
+
+ uint8_t load_flag;
+#define FC_LOADING 0x1 /* HBA in process of loading drvr */
+#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
+ char *vname; /* Application assigned name */
+ struct fc_vport *fc_vport;
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct dentry *debug_disc_trc;
+ struct dentry *debug_nodelist;
+ struct dentry *vport_debugfs_root;
+ struct lpfc_disc_trc *disc_trc;
+ atomic_t disc_trc_cnt;
+#endif
+};
+
+struct hbq_s {
+ uint16_t entry_count; /* Current number of HBQ slots */
+ uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
+ uint32_t hbqPutIdx; /* HBQ slot to use */
+ uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
+};
+
+#define LPFC_MAX_HBQS 16
+/* this matches the possition in the lpfc_hbq_defs array */
+#define LPFC_ELS_HBQ 0
+
struct lpfc_hba {
struct lpfc_sli sli;
+ uint32_t sli_rev; /* SLI2 or SLI3 */
+ uint32_t sli3_options; /* Mask of enabled SLI3 options */
+#define LPFC_SLI3_ENABLED 0x01
+#define LPFC_SLI3_HBQ_ENABLED 0x02
+#define LPFC_SLI3_NPIV_ENABLED 0x04
+#define LPFC_SLI3_VPORT_TEARDOWN 0x08
+ uint32_t iocb_cmd_size;
+ uint32_t iocb_rsp_size;
+
+ enum hba_state link_state;
+ uint32_t link_flag; /* link state flags */
+#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
+ /* This flag is set while issuing */
+ /* INIT_LINK mailbox command */
+#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
+#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
+
struct lpfc_sli2_slim *slim2p;
+ struct lpfc_dmabuf hbqslimp;
+
dma_addr_t slim2p_mapping;
+
uint16_t pci_cfg_value;
- int32_t hba_state;
-
-#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */
-#define LPFC_WARM_START 1 /* HBA state after selective reset */
-#define LPFC_INIT_START 2 /* Initial state after board reset */
-#define LPFC_INIT_MBX_CMDS 3 /* Initialize HBA with mbox commands */
-#define LPFC_LINK_DOWN 4 /* HBA initialized, link is down */
-#define LPFC_LINK_UP 5 /* Link is up - issue READ_LA */
-#define LPFC_LOCAL_CFG_LINK 6 /* local NPORT Id configured */
-#define LPFC_FLOGI 7 /* FLOGI sent to Fabric */
-#define LPFC_FABRIC_CFG_LINK 8 /* Fabric assigned NPORT Id
- configured */
-#define LPFC_NS_REG 9 /* Register with NameServer */
-#define LPFC_NS_QRY 10 /* Query NameServer for NPort ID list */
-#define LPFC_BUILD_DISC_LIST 11 /* Build ADISC and PLOGI lists for
- * device authentication / discovery */
-#define LPFC_DISC_AUTH 12 /* Processing ADISC list */
-#define LPFC_CLEAR_LA 13 /* authentication cmplt - issue
- CLEAR_LA */
-#define LPFC_HBA_READY 32
-#define LPFC_HBA_ERROR -1
+ uint8_t work_found;
+#define LPFC_MAX_WORKER_ITERATION 4
- int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */
- uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
- uint32_t num_disc_nodes; /*in addition to hba_state */
struct timer_list fc_estabtmo; /* link establishment timer */
- struct timer_list fc_disctmo; /* Discovery rescue timer */
- struct timer_list fc_fdmitmo; /* fdmi timer */
/* These fields used to be binfo */
- struct lpfc_name fc_nodename; /* fc nodename */
- struct lpfc_name fc_portname; /* fc portname */
uint32_t fc_pref_DID; /* preferred D_ID */
- uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint8_t fc_pref_ALPA; /* preferred AL_PA */
uint32_t fc_edtov; /* E_D_TOV timer value */
uint32_t fc_arbtov; /* ARB_TOV timer value */
uint32_t fc_ratov; /* R_A_TOV timer value */
@@ -216,61 +396,21 @@ struct lpfc_hba {
uint32_t fc_altov; /* AL_TOV timer value */
uint32_t fc_crtov; /* C_R_TOV timer value */
uint32_t fc_citov; /* C_I_TOV timer value */
- uint32_t fc_myDID; /* fibre channel S_ID */
- uint32_t fc_prevDID; /* previous fibre channel S_ID */
- struct serv_parm fc_sparam; /* buffer for our service parameters */
struct serv_parm fc_fabparam; /* fabric service parameters buffer */
uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
- uint8_t fc_ns_retry; /* retries for fabric nameserver */
- uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
- uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
- struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
uint32_t lmt;
- uint32_t fc_flag; /* FC flags */
-#define FC_PT2PT 0x1 /* pt2pt with no fabric */
-#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
-#define FC_DISC_TMO 0x4 /* Discovery timer running */
-#define FC_PUBLIC_LOOP 0x8 /* Public loop */
-#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
-#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
-#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
-#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
-#define FC_FABRIC 0x100 /* We are fabric attached */
-#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
-#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
-#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
-#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
-#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
-#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
-#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
-#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
-#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
- /* This flag is set while issuing */
- /* INIT_LINK mailbox command */
-#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
uint32_t fc_topology; /* link topology, from LINK INIT */
struct lpfc_stats fc_stat;
- struct list_head fc_nodes;
-
- /* Keep counters for the number of entries in each list. */
- uint16_t fc_plogi_cnt;
- uint16_t fc_adisc_cnt;
- uint16_t fc_reglogin_cnt;
- uint16_t fc_prli_cnt;
- uint16_t fc_unmap_cnt;
- uint16_t fc_map_cnt;
- uint16_t fc_npr_cnt;
- uint16_t fc_unused_cnt;
struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
- uint32_t wwnn[2];
+ uint8_t wwnn[8];
+ uint8_t wwpn[8];
uint32_t RandomData[7];
uint32_t cfg_log_verbose;
@@ -278,6 +418,9 @@ struct lpfc_hba {
uint32_t cfg_nodev_tmo;
uint32_t cfg_devloss_tmo;
uint32_t cfg_hba_queue_depth;
+ uint32_t cfg_peer_port_login;
+ uint32_t cfg_vport_restrict_login;
+ uint32_t cfg_npiv_enable;
uint32_t cfg_fcp_class;
uint32_t cfg_use_adisc;
uint32_t cfg_ack0;
@@ -304,22 +447,20 @@ struct lpfc_hba {
lpfc_vpd_t vpd; /* vital product data */
- struct Scsi_Host *host;
struct pci_dev *pcidev;
struct list_head work_list;
uint32_t work_ha; /* Host Attention Bits for WT */
uint32_t work_ha_mask; /* HA Bits owned by WT */
uint32_t work_hs; /* HS stored in case of ERRAT */
uint32_t work_status[2]; /* Extra status from SLIM */
- uint32_t work_hba_events; /* Timeout to be handled */
-#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
-#define WORKER_ELS_TMO 0x2 /* ELS timeout */
-#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
-#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
wait_queue_head_t *work_wait;
struct task_struct *worker_thread;
+ struct list_head hbq_buffer_list;
+ uint32_t hbq_count; /* Count of configured HBQs */
+ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
+
unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
void __iomem *slim_memmap_p; /* Kernel memory mapped address for
@@ -334,6 +475,10 @@ struct lpfc_hba {
reg */
void __iomem *HCregaddr; /* virtual address for host ctl reg */
+ struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+ uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
+ uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
+
int brd_no; /* FC board number */
char SerialNumber[32]; /* adapter Serial Number */
@@ -353,7 +498,6 @@ struct lpfc_hba {
uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
- struct timer_list els_tmofunc;
/*
* stat counters
@@ -370,31 +514,69 @@ struct lpfc_hba {
uint32_t total_scsi_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
+ spinlock_t hbalock;
/* pci_mem_pools */
struct pci_pool *lpfc_scsi_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool;
+ struct pci_pool *lpfc_hbq_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
mempool_t *nlp_mem_pool;
struct fc_host_statistics link_stats;
+
+ struct list_head port_list;
+ struct lpfc_vport *pport; /* physical lpfc_vport pointer */
+ uint16_t max_vpi; /* Maximum virtual nports */
+#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
+ unsigned long *vpi_bmask; /* vpi allocation table */
+
+ /* Data structure used by fabric iocb scheduler */
+ struct list_head fabric_iocb_list;
+ atomic_t fabric_iocb_count;
+ struct timer_list fabric_block_timer;
+ unsigned long bit_flags;
+#define FABRIC_COMANDS_BLOCKED 0
+ atomic_t num_rsrc_err;
+ atomic_t num_cmd_success;
+ unsigned long last_rsrc_error_time;
+ unsigned long last_ramp_down_time;
+ unsigned long last_ramp_up_time;
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct dentry *hba_debugfs_root;
+ atomic_t debugfs_vport_count;
+#endif
+
+ /* Fields used for heart beat. */
+ unsigned long last_completion_time;
+ struct timer_list hb_tmofunc;
+ uint8_t hb_outstanding;
};
+static inline struct Scsi_Host *
+lpfc_shost_from_vport(struct lpfc_vport *vport)
+{
+ return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
+}
+
static inline void
-lpfc_set_loopback_flag(struct lpfc_hba *phba) {
+lpfc_set_loopback_flag(struct lpfc_hba *phba)
+{
if (phba->cfg_topology == FLAGS_LOCAL_LB)
- phba->fc_flag |= FC_LOOPBACK_MODE;
+ phba->link_flag |= LS_LOOPBACK_MODE;
else
- phba->fc_flag &= ~FC_LOOPBACK_MODE;
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
}
-struct rnidrsp {
- void *buf;
- uint32_t uniqueid;
- struct list_head list;
- uint32_t data;
-};
+static inline int
+lpfc_is_link_up(struct lpfc_hba *phba)
+{
+ return phba->link_state == LPFC_LINK_UP ||
+ phba->link_state == LPFC_CLEAR_LA ||
+ phba->link_state == LPFC_HBA_READY;
+}
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
+
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 95fe77e816f8..860a52c090f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,7 @@
#include "lpfc_version.h"
#include "lpfc_compat.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
@@ -76,116 +77,156 @@ static ssize_t
lpfc_info_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
+
return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
}
static ssize_t
lpfc_serialnum_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
}
static ssize_t
lpfc_modeldesc_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
}
static ssize_t
lpfc_modelname_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
}
static ssize_t
lpfc_programtype_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
}
static ssize_t
-lpfc_portnum_show(struct class_device *cdev, char *buf)
+lpfc_vportnum_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
}
static ssize_t
lpfc_fwrev_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
char fwrev[32];
+
lpfc_decode_firmware_rev(phba, fwrev, 1);
- return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
+ return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
}
static ssize_t
lpfc_hdw_show(struct class_device *cdev, char *buf)
{
char hdw[9];
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
lpfc_vpd_t *vp = &phba->vpd;
+
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
static ssize_t
lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
static ssize_t
lpfc_state_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
- int len = 0;
- switch (phba->hba_state) {
- case LPFC_STATE_UNKNOWN:
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len = 0;
+
+ switch (phba->link_state) {
+ case LPFC_LINK_UNKNOWN:
case LPFC_WARM_START:
case LPFC_INIT_START:
case LPFC_INIT_MBX_CMDS:
case LPFC_LINK_DOWN:
+ case LPFC_HBA_ERROR:
len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
break;
case LPFC_LINK_UP:
- case LPFC_LOCAL_CFG_LINK:
- len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
- break;
- case LPFC_FLOGI:
- case LPFC_FABRIC_CFG_LINK:
- case LPFC_NS_REG:
- case LPFC_NS_QRY:
- case LPFC_BUILD_DISC_LIST:
- case LPFC_DISC_AUTH:
case LPFC_CLEAR_LA:
- len += snprintf(buf + len, PAGE_SIZE-len,
- "Link Up - Discovery\n");
- break;
case LPFC_HBA_READY:
- len += snprintf(buf + len, PAGE_SIZE-len,
- "Link Up - Ready:\n");
+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
+
+ switch (vport->port_state) {
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "initializing\n");
+ break;
+ case LPFC_LOCAL_CFG_LINK:
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Configuring Link\n");
+ break;
+ case LPFC_FDISC:
+ case LPFC_FLOGI:
+ case LPFC_FABRIC_CFG_LINK:
+ case LPFC_NS_REG:
+ case LPFC_NS_QRY:
+ case LPFC_BUILD_DISC_LIST:
+ case LPFC_DISC_AUTH:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Discovery\n");
+ break;
+ case LPFC_VPORT_READY:
+ len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
+ break;
+
+ case LPFC_VPORT_FAILED:
+ len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
+ break;
+
+ case LPFC_VPORT_UNKNOWN:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Unknown\n");
+ break;
+ }
+
if (phba->fc_topology == TOPOLOGY_LOOP) {
- if (phba->fc_flag & FC_PUBLIC_LOOP)
+ if (vport->fc_flag & FC_PUBLIC_LOOP)
len += snprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
else
len += snprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (phba->fc_flag & FC_FABRIC)
+ if (vport->fc_flag & FC_FABRIC)
len += snprintf(buf + len, PAGE_SIZE-len,
" Fabric\n");
else
@@ -193,29 +234,32 @@ lpfc_state_show(struct class_device *cdev, char *buf)
" Point-2-Point\n");
}
}
+
return len;
}
static ssize_t
lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
- phba->fc_unmap_cnt);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ vport->fc_map_cnt + vport->fc_unmap_cnt);
}
static int
-lpfc_issue_lip(struct Scsi_Host *host)
+lpfc_issue_lip(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmboxq;
int mbxstatus = MBXERR_ERROR;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
- (phba->fc_flag & FC_BLOCK_MGMT_IO) ||
- (phba->hba_state != LPFC_HBA_READY))
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (vport->port_state != LPFC_VPORT_READY))
return -EPERM;
pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
@@ -238,9 +282,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
}
lpfc_set_loopback_flag(phba);
- if (mbxstatus == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
if (mbxstatus == MBXERR_ERROR)
@@ -320,8 +362,10 @@ lpfc_selective_reset(struct lpfc_hba *phba)
static ssize_t
lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
int status = -EINVAL;
if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
@@ -336,23 +380,26 @@ lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
static ssize_t
lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
static ssize_t
lpfc_board_mode_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
char * state;
- if (phba->hba_state == LPFC_HBA_ERROR)
+ if (phba->link_state == LPFC_HBA_ERROR)
state = "error";
- else if (phba->hba_state == LPFC_WARM_START)
+ else if (phba->link_state == LPFC_WARM_START)
state = "warm start";
- else if (phba->hba_state == LPFC_INIT_START)
+ else if (phba->link_state == LPFC_INIT_START)
state = "offline";
else
state = "online";
@@ -363,8 +410,9 @@ lpfc_board_mode_show(struct class_device *cdev, char *buf)
static ssize_t
lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int status=0;
@@ -389,11 +437,166 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
return -EIO;
}
+int
+lpfc_get_hba_info(struct lpfc_hba *phba,
+ uint32_t *mxri, uint32_t *axri,
+ uint32_t *mrpi, uint32_t *arpi,
+ uint32_t *mvpi, uint32_t *avpi)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc = 0;
+
+ /*
+ * prevent udev from issuing mailbox commands until the port is
+ * configured.
+ */
+ if (phba->link_state < LPFC_LINK_DOWN ||
+ !phba->mbox_mem_pool ||
+ (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+ return 0;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
+ return 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return 0;
+ memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->mb;
+ pmb->mbxCommand = MBX_READ_CONFIG;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ rc = MBX_NOT_FINISHED;
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return 0;
+ }
+
+ if (mrpi)
+ *mrpi = pmb->un.varRdConfig.max_rpi;
+ if (arpi)
+ *arpi = pmb->un.varRdConfig.avail_rpi;
+ if (mxri)
+ *mxri = pmb->un.varRdConfig.max_xri;
+ if (axri)
+ *axri = pmb->un.varRdConfig.avail_xri;
+ if (mvpi)
+ *mvpi = pmb->un.varRdConfig.max_vpi;
+ if (avpi)
+ *avpi = pmb->un.varRdConfig.avail_vpi;
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return 1;
+}
+
+static ssize_t
+lpfc_max_rpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_used_rpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_max_xri_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_used_xri_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_max_vpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
+ return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_used_vpi_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t cnt, acnt;
+
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+ return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
+}
+
+static ssize_t
+lpfc_npiv_info_show(struct class_device *cdev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (!(phba->max_vpi))
+ return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
+ return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
+}
+
static ssize_t
lpfc_poll_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
@@ -402,8 +605,9 @@ static ssize_t
lpfc_poll_store(struct class_device *cdev, const char *buf,
size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
uint32_t creg_val;
uint32_t old_val;
int val=0;
@@ -417,7 +621,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
if ((val & 0x3) != val)
return -EINVAL;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
old_val = phba->cfg_poll;
@@ -432,16 +636,16 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
lpfc_poll_start_timer(phba);
}
} else if (val != 0x0) {
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EINVAL;
}
if (!(val & DISABLE_FCP_RING_INT) &&
(old_val & DISABLE_FCP_RING_INT))
{
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
del_timer(&phba->fcp_poll_timer);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
creg_val = readl(phba->HCregaddr);
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
@@ -450,7 +654,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
phba->cfg_poll = val;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return strlen(buf);
}
@@ -459,8 +663,9 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
static ssize_t \
lpfc_##attr##_show(struct class_device *cdev, char *buf) \
{ \
- struct Scsi_Host *host = class_to_shost(cdev);\
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\
+ struct Scsi_Host *shost = class_to_shost(cdev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
int val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
@@ -471,8 +676,9 @@ lpfc_##attr##_show(struct class_device *cdev, char *buf) \
static ssize_t \
lpfc_##attr##_show(struct class_device *cdev, char *buf) \
{ \
- struct Scsi_Host *host = class_to_shost(cdev);\
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\
+ struct Scsi_Host *shost = class_to_shost(cdev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
int val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n",\
@@ -514,8 +720,9 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
static ssize_t \
lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
{ \
- struct Scsi_Host *host = class_to_shost(cdev);\
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\
+ struct Scsi_Host *shost = class_to_shost(cdev);\
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
+ struct lpfc_hba *phba = vport->phba;\
int val=0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
@@ -576,7 +783,7 @@ static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
-static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL);
+static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
@@ -592,6 +799,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
+static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
+static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
+static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
+static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
+static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
+static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
+static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -600,8 +814,9 @@ static ssize_t
lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
unsigned int cnt = count;
/*
@@ -634,8 +849,10 @@ static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
static ssize_t
lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)phba->cfg_soft_wwpn);
}
@@ -644,8 +861,9 @@ lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
static ssize_t
lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
@@ -680,9 +898,9 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
}
}
phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
- fc_host_port_name(host) = phba->cfg_soft_wwpn;
+ fc_host_port_name(shost) = phba->cfg_soft_wwpn;
if (phba->cfg_soft_wwnn)
- fc_host_node_name(host) = phba->cfg_soft_wwnn;
+ fc_host_node_name(shost) = phba->cfg_soft_wwnn;
dev_printk(KERN_NOTICE, &phba->pcidev->dev,
"lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -777,6 +995,15 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
+int lpfc_sli_mode = 0;
+module_param(lpfc_sli_mode, int, 0);
+MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
+ " 0 - auto (SLI-3 if supported),"
+ " 2 - select SLI-2 even on SLI-3 capable HBAs,"
+ " 3 - select SLI-3");
+
+LPFC_ATTR_R(npiv_enable, 0, 0, 1, "Enable NPIV functionality");
+
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -790,8 +1017,9 @@ MODULE_PARM_DESC(lpfc_nodev_tmo,
static ssize_t
lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
int val = 0;
val = phba->cfg_devloss_tmo;
return snprintf(buf, PAGE_SIZE, "%d\n",
@@ -832,13 +1060,19 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
static void
lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp)
- if (ndlp->rport)
- ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo;
- spin_unlock_irq(phba->host->host_lock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
+ if (ndlp->rport)
+ ndlp->rport->dev_loss_tmo =
+ phba->cfg_devloss_tmo;
+ spin_unlock_irq(shost->host_lock);
+ }
}
static int
@@ -946,6 +1180,33 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
"Max number of FCP commands we can queue to a lpfc HBA");
/*
+# peer_port_login: This parameter allows/prevents logins
+# between peer ports hosted on the same physical port.
+# When this parameter is set 0 peer ports of same physical port
+# are not allowed to login to each other.
+# When this parameter is set 1 peer ports of same physical port
+# are allowed to login to each other.
+# Default value of this parameter is 0.
+*/
+LPFC_ATTR_R(peer_port_login, 0, 0, 1,
+ "Allow peer ports on the same physical port to login to each "
+ "other.");
+
+/*
+# vport_restrict_login: This parameter allows/prevents logins
+# between Virtual Ports and remote initiators.
+# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
+# other initiators and will attempt to PLOGI all remote ports.
+# When this parameter is set (1) Virtual Ports will reject PLOGIs from
+# remote ports and will not attempt to PLOGI to other initiators.
+# This parameter does not restrict to the physical port.
+# This parameter does not restrict logins to Fabric resident remote ports.
+# Default value of this parameter is 1.
+*/
+LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
+ "Restrict virtual ports login to remote initiators.");
+
+/*
# Some disk devices have a "select ID" or "select Target" capability.
# From a protocol standpoint "select ID" usually means select the
# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
@@ -1088,7 +1349,8 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
-struct class_device_attribute *lpfc_host_attrs[] = {
+
+struct class_device_attribute *lpfc_hba_attrs[] = {
&class_device_attr_info,
&class_device_attr_serialnum,
&class_device_attr_modeldesc,
@@ -1104,6 +1366,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_log_verbose,
&class_device_attr_lpfc_lun_queue_depth,
&class_device_attr_lpfc_hba_queue_depth,
+ &class_device_attr_lpfc_peer_port_login,
+ &class_device_attr_lpfc_vport_restrict_login,
&class_device_attr_lpfc_nodev_tmo,
&class_device_attr_lpfc_devloss_tmo,
&class_device_attr_lpfc_fcp_class,
@@ -1119,9 +1383,17 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_multi_ring_type,
&class_device_attr_lpfc_fdmi_on,
&class_device_attr_lpfc_max_luns,
+ &class_device_attr_lpfc_npiv_enable,
&class_device_attr_nport_evt_cnt,
&class_device_attr_management_version,
&class_device_attr_board_mode,
+ &class_device_attr_max_vpi,
+ &class_device_attr_used_vpi,
+ &class_device_attr_max_rpi,
+ &class_device_attr_used_rpi,
+ &class_device_attr_max_xri,
+ &class_device_attr_used_xri,
+ &class_device_attr_npiv_info,
&class_device_attr_issue_reset,
&class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo,
@@ -1133,12 +1405,15 @@ struct class_device_attribute *lpfc_host_attrs[] = {
};
static ssize_t
-sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
size_t buf_off;
- struct Scsi_Host *host = class_to_shost(container_of(kobj,
- struct class_device, kobj));
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
if ((off + count) > FF_REG_AREA_SIZE)
return -ERANGE;
@@ -1148,30 +1423,31 @@ sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
- spin_lock_irq(phba->host->host_lock);
-
- if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
- spin_unlock_irq(phba->host->host_lock);
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
return -EPERM;
}
+ spin_lock_irq(&phba->hbalock);
for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
writel(*((uint32_t *)(buf + buf_off)),
phba->ctrl_regs_memmap_p + off + buf_off);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
static ssize_t
-sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
size_t buf_off;
uint32_t * tmp_ptr;
- struct Scsi_Host *host = class_to_shost(container_of(kobj,
- struct class_device, kobj));
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
if (off > FF_REG_AREA_SIZE)
return -ERANGE;
@@ -1184,14 +1460,14 @@ sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
if (off % 4 || count % 4 || (unsigned long)buf % 4)
return -EINVAL;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
tmp_ptr = (uint32_t *)(buf + buf_off);
*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
@@ -1200,7 +1476,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
.attr = {
.name = "ctlreg",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 256,
.read = sysfs_ctlreg_read,
@@ -1209,7 +1484,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
static void
-sysfs_mbox_idle (struct lpfc_hba * phba)
+sysfs_mbox_idle(struct lpfc_hba *phba)
{
phba->sysfs_mbox.state = SMBOX_IDLE;
phba->sysfs_mbox.offset = 0;
@@ -1222,12 +1497,15 @@ sysfs_mbox_idle (struct lpfc_hba * phba)
}
static ssize_t
-sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
- struct Scsi_Host * host =
- class_to_shost(container_of(kobj, struct class_device, kobj));
- struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata;
- struct lpfcMboxq * mbox = NULL;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfcMboxq *mbox = NULL;
if ((count + off) > MAILBOX_CMD_SIZE)
return -ERANGE;
@@ -1245,7 +1523,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
}
- spin_lock_irq(host->host_lock);
+ spin_lock_irq(&phba->hbalock);
if (off == 0) {
if (phba->sysfs_mbox.mbox)
@@ -1256,9 +1534,9 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
} else {
if (phba->sysfs_mbox.state != SMBOX_WRITING ||
phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.mbox == NULL ) {
+ phba->sysfs_mbox.mbox == NULL) {
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
}
}
@@ -1268,18 +1546,20 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
phba->sysfs_mbox.offset = off + count;
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
static ssize_t
-sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
- struct Scsi_Host *host =
- class_to_shost(container_of(kobj, struct class_device,
- kobj));
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct class_device *cdev = container_of(kobj, struct class_device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
int rc;
if (off > MAILBOX_CMD_SIZE)
@@ -1294,7 +1574,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
if (off && count == 0)
return 0;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
if (off == 0 &&
phba->sysfs_mbox.state == SMBOX_WRITING &&
@@ -1317,12 +1597,12 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
case MBX_SET_MASK:
case MBX_SET_SLIM:
case MBX_SET_DEBUG:
- if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
printk(KERN_WARNING "mbox_read:Command 0x%x "
"is illegal in on-line state\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
}
case MBX_LOAD_SM:
@@ -1352,48 +1632,48 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
default:
printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
phba->sysfs_mbox.mbox->mb.mbxCommand);
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
}
- if (phba->fc_flag & FC_BLOCK_MGMT_IO) {
+ phba->sysfs_mbox.mbox->vport = vport;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
}
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox (phba,
phba->sysfs_mbox.mbox,
MBX_POLL);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
} else {
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox_wait (phba,
phba->sysfs_mbox.mbox,
lpfc_mbox_tmo_val(phba,
phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
}
if (rc != MBX_SUCCESS) {
if (rc == MBX_TIMEOUT) {
- phba->sysfs_mbox.mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
phba->sysfs_mbox.mbox = NULL;
}
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
}
phba->sysfs_mbox.state = SMBOX_READING;
@@ -1402,7 +1682,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
phba->sysfs_mbox.state != SMBOX_READING) {
printk(KERN_WARNING "mbox_read: Bad State\n");
sysfs_mbox_idle(phba);
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
}
@@ -1413,7 +1693,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
sysfs_mbox_idle(phba);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return count;
}
@@ -1422,7 +1702,6 @@ static struct bin_attribute sysfs_mbox_attr = {
.attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = MAILBOX_CMD_SIZE,
.read = sysfs_mbox_read,
@@ -1430,35 +1709,35 @@ static struct bin_attribute sysfs_mbox_attr = {
};
int
-lpfc_alloc_sysfs_attr(struct lpfc_hba *phba)
+lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
{
- struct Scsi_Host *host = phba->host;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&host->shost_classdev.kobj,
- &sysfs_ctlreg_attr);
+ error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
+ &sysfs_ctlreg_attr);
if (error)
goto out;
- error = sysfs_create_bin_file(&host->shost_classdev.kobj,
- &sysfs_mbox_attr);
+ error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
+ &sysfs_mbox_attr);
if (error)
goto out_remove_ctlreg_attr;
return 0;
out_remove_ctlreg_attr:
- sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
out:
return error;
}
void
-lpfc_free_sysfs_attr(struct lpfc_hba *phba)
+lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
- struct Scsi_Host *host = phba->host;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
- sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr);
+ sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
}
@@ -1469,26 +1748,30 @@ lpfc_free_sysfs_attr(struct lpfc_hba *phba)
static void
lpfc_get_host_port_id(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+
/* note: fc_myDID already in cpu endianness */
- fc_host_port_id(shost) = phba->fc_myDID;
+ fc_host_port_id(shost) = vport->fc_myDID;
}
static void
lpfc_get_host_port_type(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
- if (phba->hba_state == LPFC_HBA_READY) {
+ if (vport->port_type == LPFC_NPIV_PORT) {
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ } else if (lpfc_is_link_up(phba)) {
if (phba->fc_topology == TOPOLOGY_LOOP) {
- if (phba->fc_flag & FC_PUBLIC_LOOP)
+ if (vport->fc_flag & FC_PUBLIC_LOOP)
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
} else {
- if (phba->fc_flag & FC_FABRIC)
+ if (vport->fc_flag & FC_FABRIC)
fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
else
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
@@ -1502,29 +1785,20 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
static void
lpfc_get_host_port_state(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
- if (phba->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
else {
- switch (phba->hba_state) {
- case LPFC_STATE_UNKNOWN:
- case LPFC_WARM_START:
- case LPFC_INIT_START:
- case LPFC_INIT_MBX_CMDS:
+ switch (phba->link_state) {
+ case LPFC_LINK_UNKNOWN:
case LPFC_LINK_DOWN:
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
break;
case LPFC_LINK_UP:
- case LPFC_LOCAL_CFG_LINK:
- case LPFC_FLOGI:
- case LPFC_FABRIC_CFG_LINK:
- case LPFC_NS_REG:
- case LPFC_NS_QRY:
- case LPFC_BUILD_DISC_LIST:
- case LPFC_DISC_AUTH:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
/* Links up, beyond this port_type reports state */
@@ -1545,11 +1819,12 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
static void
lpfc_get_host_speed(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
spin_lock_irq(shost->host_lock);
- if (phba->hba_state == LPFC_HBA_READY) {
+ if (lpfc_is_link_up(phba)) {
switch(phba->fc_linkspeed) {
case LA_1GHZ_LINK:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
@@ -1575,39 +1850,31 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
static void
lpfc_get_host_fabric_name (struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
u64 node_name;
spin_lock_irq(shost->host_lock);
- if ((phba->fc_flag & FC_FABRIC) ||
+ if ((vport->fc_flag & FC_FABRIC) ||
((phba->fc_topology == TOPOLOGY_LOOP) &&
- (phba->fc_flag & FC_PUBLIC_LOOP)))
+ (vport->fc_flag & FC_PUBLIC_LOOP)))
node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
else
/* fabric is local port if there is no F/FL_Port */
- node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
+ node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
spin_unlock_irq(shost->host_lock);
fc_host_fabric_name(shost) = node_name;
}
-static void
-lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
-{
- struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
-
- spin_lock_irq(shost->host_lock);
- lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
- spin_unlock_irq(shost->host_lock);
-}
-
static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct fc_host_statistics *hs = &phba->link_stats;
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
@@ -1615,7 +1882,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
unsigned long seconds;
int rc = 0;
- if (phba->fc_flag & FC_BLOCK_MGMT_IO)
+ /*
+ * prevent udev from issuing mailbox commands until the port is
+ * configured.
+ */
+ if (phba->link_state < LPFC_LINK_DOWN ||
+ !phba->mbox_mem_pool ||
+ (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+ return NULL;
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return NULL;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1627,17 +1903,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_STATUS;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
@@ -1653,18 +1928,17 @@ lpfc_get_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
- mempool_free( pmboxq, phba->mbox_mem_pool);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
return NULL;
}
@@ -1711,14 +1985,15 @@ lpfc_get_stats(struct Scsi_Host *shost)
static void
lpfc_reset_stats(struct Scsi_Host *shost)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
- if (phba->fc_flag & FC_BLOCK_MGMT_IO)
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
return;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1731,17 +2006,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxOwner = OWN_HOST;
pmb->un.varWords[0] = 0x1; /* reset request */
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (rc != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
return;
}
@@ -1750,17 +2024,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
pmb->mbxCommand = MBX_READ_LNK_STAT;
pmb->mbxOwner = OWN_HOST;
pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
- if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
(!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT)
- pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- else
+ if (rc != MBX_TIMEOUT)
mempool_free( pmboxq, phba->mbox_mem_pool);
return;
}
@@ -1789,13 +2062,13 @@ lpfc_reset_stats(struct Scsi_Host *shost)
static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target *starget)
{
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata;
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp;
spin_lock_irq(shost->host_lock);
/* Search for this, mapped, target ID */
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
spin_unlock_irq(shost->host_lock);
@@ -1885,8 +2158,66 @@ struct fc_function_template lpfc_transport_functions = {
.get_host_fabric_name = lpfc_get_host_fabric_name,
.show_host_fabric_name = 1,
- .get_host_symbolic_name = lpfc_get_host_symbolic_name,
- .show_host_symbolic_name = 1,
+ /*
+ * The LPFC driver treats linkdown handling as target loss events
+ * so there are no sysfs handlers for link_down_tmo.
+ */
+
+ .get_fc_host_stats = lpfc_get_stats,
+ .reset_fc_host_stats = lpfc_reset_stats,
+
+ .dd_fcrport_size = sizeof(struct lpfc_rport_data),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_port_id = lpfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_starget_node_name = lpfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = lpfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ .issue_fc_host_lip = lpfc_issue_lip,
+ .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
+ .terminate_rport_io = lpfc_terminate_rport_io,
+
+ .vport_create = lpfc_vport_create,
+ .vport_delete = lpfc_vport_delete,
+ .dd_fcvport_size = sizeof(struct lpfc_vport *),
+};
+
+struct fc_function_template lpfc_vport_transport_functions = {
+ /* fixed attributes the driver supports */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* dynamic attributes the driver supports */
+ .get_host_port_id = lpfc_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = lpfc_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = lpfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = lpfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = lpfc_get_host_fabric_name,
+ .show_host_fabric_name = 1,
/*
* The LPFC driver treats linkdown handling as target loss events
@@ -1915,6 +2246,8 @@ struct fc_function_template lpfc_transport_functions = {
.issue_fc_host_lip = lpfc_issue_lip,
.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
.terminate_rport_io = lpfc_terminate_rport_io,
+
+ .vport_disable = lpfc_vport_disable,
};
void
@@ -1937,6 +2270,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
lpfc_max_luns_init(phba, lpfc_max_luns);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+ lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
+ lpfc_npiv_enable_init(phba, lpfc_npiv_enable);
+ lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b8c2a8862d8c..e19d1a746586 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,92 +23,114 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
struct lpfc_dmabuf *mp);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
- uint32_t);
-void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
-void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
+int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+ LPFC_MBOXQ_t *, uint32_t);
+void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
-
+void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *);
-void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int);
-void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *);
-void lpfc_set_disctmo(struct lpfc_hba *);
-int lpfc_can_disctmo(struct lpfc_hba *);
-int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
+void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_set_disctmo(struct lpfc_vport *);
+int lpfc_can_disctmo(struct lpfc_vport *);
+int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
+void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_default_rpis(struct lpfc_vport *);
+void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
+
int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *, struct lpfc_nodelist *);
-void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
+ struct lpfc_iocbq *, struct lpfc_nodelist *);
+void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
-struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
-void lpfc_disc_list_loopmap(struct lpfc_hba *);
-void lpfc_disc_start(struct lpfc_hba *);
-void lpfc_disc_flush_list(struct lpfc_hba *);
+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
+void lpfc_disc_list_loopmap(struct lpfc_vport *);
+void lpfc_disc_start(struct lpfc_vport *);
+void lpfc_disc_flush_list(struct lpfc_vport *);
+void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
-struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
-struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
+struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
+void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
int lpfc_do_work(void *);
-int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
+int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
-int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+ struct lpfc_nodelist *);
+void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
+int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
struct serv_parm *, uint32_t);
-int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp);
+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+int lpfc_els_chk_latt(struct lpfc_vport *);
int lpfc_els_abort_flogi(struct lpfc_hba *);
-int lpfc_initial_flogi(struct lpfc_hba *);
-int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t);
-int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
+int lpfc_initial_flogi(struct lpfc_vport *);
+int lpfc_initial_fdisc(struct lpfc_vport *);
+int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
+int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
+int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
-int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
+int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
-int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
- struct lpfc_nodelist *);
-int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
+ struct lpfc_nodelist *, LPFC_MBOXQ_t *);
+int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
struct lpfc_nodelist *);
-int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
+int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
struct lpfc_nodelist *);
-void lpfc_cancel_retry_delay_tmo(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_els_retry_delay(unsigned long);
void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
+void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *);
void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-int lpfc_els_handle_rscn(struct lpfc_hba *);
-int lpfc_els_flush_rscn(struct lpfc_hba *);
-int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
-void lpfc_els_flush_cmd(struct lpfc_hba *);
-int lpfc_els_disc_adisc(struct lpfc_hba *);
-int lpfc_els_disc_plogi(struct lpfc_hba *);
+int lpfc_els_handle_rscn(struct lpfc_vport *);
+void lpfc_els_flush_rscn(struct lpfc_vport *);
+int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
+void lpfc_els_flush_cmd(struct lpfc_vport *);
+int lpfc_els_disc_adisc(struct lpfc_vport *);
+int lpfc_els_disc_plogi(struct lpfc_vport *);
void lpfc_els_timeout(unsigned long);
-void lpfc_els_timeout_handler(struct lpfc_hba *);
+void lpfc_els_timeout_handler(struct lpfc_vport *);
+void lpfc_hb_timeout(unsigned long);
+void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
-int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
+int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
+int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
-void lpfc_fdmi_tmo_handler(struct lpfc_hba *);
+void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -136,16 +158,23 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
+ LPFC_MBOXQ_t *);
+struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
+
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
void lpfc_poll_start_timer(struct lpfc_hba * phba);
void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
+void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
void lpfc_reset_barrier(struct lpfc_hba * phba);
@@ -154,6 +183,7 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
int lpfc_sli_brdrestart(struct lpfc_hba *);
int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
@@ -164,27 +194,36 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
-int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
struct lpfc_sli_ring *,
dma_addr_t);
+int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
+int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
+void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
+struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
+int lpfc_sli_hbq_size(void);
int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
- uint64_t, lpfc_ctx_cmd);
+ uint64_t, lpfc_ctx_cmd);
int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
- uint64_t, uint32_t, lpfc_ctx_cmd);
+ uint64_t, uint32_t, lpfc_ctx_cmd);
void lpfc_mbox_timeout(unsigned long);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
-struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t);
-struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *);
+struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
+ void *);
+struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
+struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
+ struct lpfc_name *);
int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
- uint32_t timeout);
+ uint32_t timeout);
int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring,
@@ -195,25 +234,56 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb);
+void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
+
void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
+void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
/* Function prototypes. */
const char* lpfc_info(struct Scsi_Host *);
-void lpfc_scan_start(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
void lpfc_get_cfgparam(struct lpfc_hba *);
-int lpfc_alloc_sysfs_attr(struct lpfc_hba *);
-void lpfc_free_sysfs_attr(struct lpfc_hba *);
-extern struct class_device_attribute *lpfc_host_attrs[];
+int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
+void lpfc_free_sysfs_attr(struct lpfc_vport *);
+extern struct class_device_attribute *lpfc_hba_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct fc_function_template lpfc_transport_functions;
+extern struct fc_function_template lpfc_vport_transport_functions;
+extern int lpfc_sli_mode;
-void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
+int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *);
void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
+struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
+int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
+void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+void destroy_port(struct lpfc_vport *);
+int lpfc_get_instance(void);
+void lpfc_host_attrib_init(struct Scsi_Host *);
+
+extern void lpfc_debugfs_initialize(struct lpfc_vport *);
+extern void lpfc_debugfs_terminate(struct lpfc_vport *);
+extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
+ uint32_t, uint32_t);
+
+/* Interface exported by fabric iocb scheduler */
+int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
+void lpfc_fabric_abort_vport(struct lpfc_vport *);
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+void lpfc_fabric_abort_hba(struct lpfc_hba *);
+void lpfc_fabric_abort_flogi(struct lpfc_hba *);
+void lpfc_fabric_block_timeout(unsigned long);
+void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
+void lpfc_adjust_queue_depth(struct lpfc_hba *);
+void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 34a9e3bb2614..ae9d6f385a6c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -40,6 +40,8 @@
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
* incapable of reporting */
@@ -58,25 +60,69 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION;
/*
* lpfc_ct_unsol_event
*/
+static void
+lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
+{
+ if (!mp) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, "
+ "piocbq = %p, status = x%x, mp = %p, size = %d\n",
+ __FUNCTION__, __LINE__,
+ piocbq, piocbq->iocb.ulpStatus, mp, size);
+ }
+
+ printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, "
+ "buffer = %p, size = %d, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ piocbq, mp, size,
+ piocbq->iocb.ulpStatus);
+
+}
+
+static void
+lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+ struct lpfc_dmabuf *mp, uint32_t size)
+{
+ if (!mp) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no "
+ "HBQ buffer, piocbq = %p, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ piocbq, piocbq->iocb.ulpStatus);
+ } else {
+ lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
+ printk(KERN_ERR "%s (%d): Ignoring unsolicted CT "
+ "piocbq = %p, buffer = %p, size = %d, "
+ "status = x%x\n",
+ __FUNCTION__, __LINE__,
+ piocbq, mp, size, piocbq->iocb.ulpStatus);
+ }
+}
+
void
-lpfc_ct_unsol_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
+lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocbq)
{
- struct lpfc_iocbq *next_piocbq;
- struct lpfc_dmabuf *pmbuf = NULL;
- struct lpfc_dmabuf *matp, *next_matp;
- uint32_t ctx = 0, size = 0, cnt = 0;
+ struct lpfc_dmabuf *mp = NULL;
IOCB_t *icmd = &piocbq->iocb;
- IOCB_t *save_icmd = icmd;
- int i, go_exit = 0;
- struct list_head head;
+ int i;
+ struct lpfc_iocbq *iocbq;
+ dma_addr_t paddr;
+ uint32_t size;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+
+ piocbq->context2 = NULL;
+ piocbq->context3 = NULL;
- if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
- lpfc_post_buffer(phba, pring, 0, 1);
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba, pring, 0, 1);
return;
}
@@ -86,66 +132,56 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
if (icmd->ulpBdeCount == 0)
return;
- INIT_LIST_HEAD(&head);
- list_add_tail(&head, &piocbq->list);
-
- list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
- icmd = &piocbq->iocb;
- if (ctx == 0)
- ctx = (uint32_t) (icmd->ulpContext);
- if (icmd->ulpBdeCount == 0)
- continue;
-
- for (i = 0; i < icmd->ulpBdeCount; i++) {
- matp = lpfc_sli_ringpostbuf_get(phba, pring,
- getPaddr(icmd->un.
- cont64[i].
- addrHigh,
- icmd->un.
- cont64[i].
- addrLow));
- if (!matp) {
- /* Insert lpfc log message here */
- lpfc_post_buffer(phba, pring, cnt, 1);
- go_exit = 1;
- goto ct_unsol_event_exit_piocbq;
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ list_for_each_entry(iocbq, &piocbq->list, list) {
+ icmd = &iocbq->iocb;
+ if (icmd->ulpBdeCount == 0) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no "
+ "BDE, iocbq = %p, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ iocbq, iocbq->iocb.ulpStatus);
+ continue;
}
- /* Typically for Unsolicited CT requests */
- if (!pmbuf) {
- pmbuf = matp;
- INIT_LIST_HEAD(&pmbuf->list);
- } else
- list_add_tail(&matp->list, &pmbuf->list);
-
- size += icmd->un.cont64[i].tus.f.bdeSize;
- cnt++;
+ size = icmd->un.cont64[0].tus.f.bdeSize;
+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
+ lpfc_in_buf_free(phba, bdeBuf1);
+ if (icmd->ulpBdeCount == 2) {
+ lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
+ size);
+ lpfc_in_buf_free(phba, bdeBuf2);
+ }
}
+ } else {
+ struct lpfc_iocbq *next;
+
+ list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
+ icmd = &iocbq->iocb;
+ if (icmd->ulpBdeCount == 0) {
+ printk(KERN_ERR "%s (%d): Unsolited CT, no "
+ "BDE, iocbq = %p, status = x%x\n",
+ __FUNCTION__, __LINE__,
+ iocbq, iocbq->iocb.ulpStatus);
+ continue;
+ }
- icmd->ulpBdeCount = 0;
- }
-
- lpfc_post_buffer(phba, pring, cnt, 1);
- if (save_icmd->ulpStatus) {
- go_exit = 1;
- }
-
-ct_unsol_event_exit_piocbq:
- list_del(&head);
- if (pmbuf) {
- list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
- lpfc_mbuf_free(phba, matp->virt, matp->phys);
- list_del(&matp->list);
- kfree(matp);
+ for (i = 0; i < icmd->ulpBdeCount; i++) {
+ paddr = getPaddr(icmd->un.cont64[i].addrHigh,
+ icmd->un.cont64[i].addrLow);
+ mp = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ size = icmd->un.cont64[i].tus.f.bdeSize;
+ lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
+ lpfc_in_buf_free(phba, mp);
+ }
+ list_del(&iocbq->list);
+ lpfc_sli_release_iocbq(phba, iocbq);
}
- lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
- kfree(pmbuf);
}
- return;
}
static void
-lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
+lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
{
struct lpfc_dmabuf *mlast, *next_mlast;
@@ -160,7 +196,7 @@ lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
}
static struct lpfc_dmabuf *
-lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
+lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
uint32_t size, int *entries)
{
struct lpfc_dmabuf *mlist = NULL;
@@ -181,7 +217,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
INIT_LIST_HEAD(&mp->list);
- if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
+ if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
+ cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
else
mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
@@ -201,8 +238,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
/* build buffer ptr list for IOCB */
- bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
- bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
bpl->tus.f.bdeSize = (uint16_t) cnt;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
@@ -215,24 +252,49 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
return mlist;
}
+int
+lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
+{
+ struct lpfc_dmabuf *buf_ptr;
+
+ if (ctiocb->context1) {
+ buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ ctiocb->context1 = NULL;
+ }
+ if (ctiocb->context2) {
+ lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
+ ctiocb->context2 = NULL;
+ }
+
+ if (ctiocb->context3) {
+ buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ ctiocb->context1 = NULL;
+ }
+ lpfc_sli_release_iocbq(phba, ctiocb);
+ return 0;
+}
+
static int
-lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
+lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
- uint32_t tmo)
+ uint32_t tmo, uint8_t retry)
{
-
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
+ int rc;
/* Allocate buffer for command iocb */
- spin_lock_irq(phba->host->host_lock);
geniocb = lpfc_sli_get_iocbq(phba);
- spin_unlock_irq(phba->host->host_lock);
if (geniocb == NULL)
return 1;
@@ -272,31 +334,40 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
icmd->ulpClass = CLASS3;
icmd->ulpContext = ndlp->nlp_rpi;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ /* For GEN_REQUEST64_CR, use the RPI */
+ icmd->ulpCt_h = 0;
+ icmd->ulpCt_l = 0;
+ }
+
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
- "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
- icmd->ulpIoTag, phba->hba_state);
+ "%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
+ "Data: x%x x%x\n", phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, icmd->ulpIoTag,
+ vport->port_state);
geniocb->iocb_cmpl = cmpl;
geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
- spin_lock_irq(phba->host->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
+ geniocb->vport = vport;
+ geniocb->retry = retry;
+ rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
+
+ if (rc == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, geniocb);
- spin_unlock_irq(phba->host->host_lock);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static int
-lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
+lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
- uint32_t rsp_size)
+ uint32_t rsp_size, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
struct lpfc_dmabuf *outmp;
int cnt = 0, status;
@@ -310,8 +381,8 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
if (!outmp)
return -ENOMEM;
- status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
- cnt+1, 0);
+ status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
+ cnt+1, 0, retry);
if (status) {
lpfc_free_ct_rsp(phba, outmp);
return -ENOMEM;
@@ -319,20 +390,35 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
return 0;
}
+static struct lpfc_vport *
+lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
+
+ struct lpfc_vport *vport_curr;
+
+ list_for_each_entry(vport_curr, &phba->port_list, listentry) {
+ if ((vport_curr->fc_myDID) &&
+ (vport_curr->fc_myDID == did))
+ return vport_curr;
+ }
+
+ return NULL;
+}
+
static int
-lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
+lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ct_request *Response =
(struct lpfc_sli_ct_request *) mp->virt;
struct lpfc_nodelist *ndlp = NULL;
struct lpfc_dmabuf *mlast, *next_mp;
uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
- uint32_t Did;
- uint32_t CTentry;
+ uint32_t Did, CTentry;
int Cnt;
struct list_head head;
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
+ vport->num_disc_nodes = 0;
list_add_tail(&head, &mp->list);
@@ -350,39 +436,96 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
/* Loop through entire NameServer list of DIDs */
while (Cnt >= sizeof (uint32_t)) {
-
/* Get next DID from NameServer List */
CTentry = *ctptr++;
Did = ((be32_to_cpu(CTentry)) & Mask_DID);
ndlp = NULL;
- if (Did != phba->fc_myDID) {
- /* Check for rscn processing or not */
- ndlp = lpfc_setup_disc_node(phba, Did);
- }
- /* Mark all node table entries that are in the
- Nameserver */
- if (ndlp) {
- /* NameServer Rsp */
- lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0238 Process x%x NameServer"
- " Rsp Data: x%x x%x x%x\n",
- phba->brd_no,
+
+ /*
+ * Check for rscn processing or not
+ * To conserve rpi's, filter out addresses for other
+ * vports on the same physical HBAs.
+ */
+ if ((Did != vport->fc_myDID) &&
+ ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
+ phba->cfg_peer_port_login)) {
+ if ((vport->port_type != LPFC_NPIV_PORT) ||
+ (vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
+ (!phba->cfg_vport_restrict_login)) {
+ ndlp = lpfc_setup_disc_node(vport, Did);
+ if (ndlp) {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Parse GID_FTrsp: "
+ "did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag,
- phba->fc_flag,
- phba->fc_rscn_id_cnt);
- } else {
- /* NameServer Rsp */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0239 Skip x%x NameServer "
- "Rsp Data: x%x x%x x%x\n",
- phba->brd_no,
- Did, Size, phba->fc_flag,
- phba->fc_rscn_id_cnt);
+ vport->fc_flag);
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY,
+ "%d (%d):0238 Process "
+ "x%x NameServer Rsp"
+ "Data: x%x x%x x%x\n",
+ phba->brd_no,
+ vport->vpi, Did,
+ ndlp->nlp_flag,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Skip1 GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY,
+ "%d (%d):0239 Skip x%x "
+ "NameServer Rsp Data: "
+ "x%x x%x\n",
+ phba->brd_no,
+ vport->vpi, Did,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+
+ } else {
+ if (!(vport->fc_flag & FC_RSCN_MODE) ||
+ (lpfc_rscn_payload_check(vport, Did))) {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Query GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ if (lpfc_ns_cmd(vport,
+ SLI_CTNS_GFF_ID,
+ 0, Did) == 0)
+ vport->num_disc_nodes++;
+ }
+ else {
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_CT,
+ "Skip2 GID_FTrsp: "
+ "did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY,
+ "%d (%d):0245 Skip x%x "
+ "NameServer Rsp Data: "
+ "x%x x%x\n",
+ phba->brd_no,
+ vport->vpi, Did,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+ }
}
-
if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
goto nsout1;
Cnt -= sizeof (uint32_t);
@@ -393,190 +536,369 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
nsout1:
list_del(&head);
-
- /*
- * The driver has cycled through all Nports in the RSCN payload.
- * Complete the handling by cleaning up and marking the
- * current driver state.
- */
- if (phba->hba_state == LPFC_HBA_READY) {
- lpfc_els_flush_rscn(phba);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
- spin_unlock_irq(phba->host->host_lock);
- }
return 0;
}
-
-
-
static void
-lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_sli *psli;
struct lpfc_dmabuf *bmp;
- struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
- struct lpfc_nodelist *ndlp;
struct lpfc_sli_ct_request *CTrsp;
+ int rc;
- psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
-
irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
- goto out;
- }
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT cmpl: status:x%x/x%x rtry:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+
+ /* Don't bother processing response if vport is being torn down. */
+ if (vport->load_flag & FC_UNLOADING)
+ goto out;
+
+
+ if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0216 Link event during NS query\n",
+ phba->brd_no, vport->vpi);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ goto out;
+ }
+
+ if (irsp->ulpStatus) {
/* Check for retry */
- if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- phba->fc_ns_retry++;
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES))
+ vport->fc_ns_retry++;
/* CT command is being retried */
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
- if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
- 0) {
- goto out;
- }
- }
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+ vport->fc_ns_retry, 0);
+ if (rc == 0)
+ goto out;
}
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
+ phba->brd_no, vport->vpi, irsp->ulpStatus,
+ vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0208 NameServer Rsp "
+ "%d (%d):0208 NameServer Rsp "
"Data: x%x\n",
- phba->brd_no,
- phba->fc_flag);
- lpfc_ns_rsp(phba, outp,
+ phba->brd_no, vport->vpi,
+ vport->fc_flag);
+ lpfc_ns_rsp(vport, outp,
(uint32_t) (irsp->un.genreq64.bdl.bdeSize));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0240 NameServer Rsp Error "
+ "%d (%d):0240 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
- phba->fc_flag);
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
+
} else {
/* NameServer Rsp Error */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0241 NameServer Rsp Error "
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0241 NameServer Rsp Error "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
CTrsp->CommandResponse.bits.CmdRsp,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation,
- phba->fc_flag);
+ vport->fc_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x",
+ (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
+ (uint32_t) CTrsp->ReasonCode,
+ (uint32_t) CTrsp->Explanation);
}
}
/* Link up / RSCN discovery */
- lpfc_disc_start(phba);
+ if (vport->num_disc_nodes == 0) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ }
+ else
+ lpfc_els_flush_rscn(vport);
+ }
+
+ lpfc_disc_start(vport);
+ }
out:
- lpfc_free_ct_rsp(phba, outp);
- lpfc_mbuf_free(phba, inp->virt, inp->phys);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(inp);
- kfree(bmp);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
+void
+lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
+ struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp;
+ int did;
+ uint8_t fbits;
+ struct lpfc_nodelist *ndlp;
+
+ did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
+ did = be32_to_cpu(did);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GFF_ID cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
+
+ if (CTrsp->CommandResponse.bits.CmdRsp ==
+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
+ if ((fbits & FC4_FEATURE_INIT) &&
+ !(fbits & FC4_FEATURE_TARGET)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0245 Skip x%x GFF "
+ "NameServer Rsp Data: (init) "
+ "x%x x%x\n", phba->brd_no,
+ vport->vpi, did, fbits,
+ vport->fc_rscn_id_cnt);
+ goto out;
+ }
+ }
+ }
+ else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0267 NameServer GFF Rsp"
+ " x%x Error (%d %d) Data: x%x x%x\n",
+ phba->brd_no, vport->vpi, did,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->fc_flag, vport->fc_rscn_id_cnt)
+ }
+
+ /* This is a target port, unregistered port, or the GFF_ID failed */
+ ndlp = lpfc_setup_disc_node(vport, did);
+ if (ndlp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0242 Process x%x GFF "
+ "NameServer Rsp Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ did, ndlp->nlp_flag, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0243 Skip x%x GFF "
+ "NameServer Rsp Data: x%x x%x\n",
+ phba->brd_no, vport->vpi, did,
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ }
+out:
+ /* Link up / RSCN discovery */
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
+ if (vport->num_disc_nodes == 0) {
+ /*
+ * The driver has cycled through all Nports in the RSCN payload.
+ * Complete the handling by cleaning up and marking the
+ * current driver state.
+ */
+ if (vport->port_state >= LPFC_DISC_AUTH) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_els_flush_rscn(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
+ spin_unlock_irq(shost->host_lock);
+ }
+ else
+ lpfc_els_flush_rscn(vport);
+ }
+ lpfc_disc_start(vport);
+ }
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+}
+
+
static void
-lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- struct lpfc_sli *psli;
- struct lpfc_dmabuf *bmp;
+ struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
+ int cmdcode, rc;
+ uint8_t retry;
+ uint32_t latt;
- psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
irsp = &rspiocb->iocb;
+ cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
+ CommandResponse.bits.CmdRsp);
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ latt = lpfc_els_chk_latt(vport);
+
/* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0209 RFT request completes ulpStatus x%x "
- "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
- CTrsp->CommandResponse.bits.CmdRsp);
+ "%d (%d):0209 RFT request completes, latt %d, "
+ "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ phba->brd_no, vport->vpi, latt, irsp->ulpStatus,
+ CTrsp->CommandResponse.bits.CmdRsp,
+ cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
- lpfc_free_ct_rsp(phba, outp);
- lpfc_mbuf_free(phba, inp->virt, inp->phys);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(inp);
- kfree(bmp);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "CT cmd cmpl: status:x%x/x%x cmd:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+
+ if (irsp->ulpStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0268 NS cmd %x Error (%d %d)\n",
+ phba->brd_no, vport->vpi, cmdcode,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
+ goto out;
+
+ retry = cmdiocb->retry;
+ if (retry >= LPFC_MAX_NS_RETRY)
+ goto out;
+
+ retry++;
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0216 Retrying NS cmd %x\n",
+ phba->brd_no, vport->vpi, cmdcode);
+ rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
+ if (rc == 0)
+ goto out;
+ }
+
+out:
+ lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
static void
-lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
static void
-lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
static void
-lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
-void
-lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- char fwrev[16];
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_vport *vport = cmdiocb->vport;
- lpfc_decode_firmware_rev(phba, fwrev, 0);
+ if (irsp->ulpStatus != IOSTAT_SUCCESS)
+ vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
- sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
- fwrev, lpfc_release_version);
+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
return;
}
+int
+lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
+ size_t size)
+{
+ int n;
+ uint8_t *wwn = vport->phba->wwpn;
+
+ n = snprintf(symbol, size,
+ "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ wwn[0], wwn[1], wwn[2], wwn[3],
+ wwn[4], wwn[5], wwn[6], wwn[7]);
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ return n;
+
+ if (n < size)
+ n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
+
+ if (n < size && vport->vname)
+ n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
+ return n;
+}
+
+int
+lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
+ size_t size)
+{
+ char fwrev[16];
+ int n;
+
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+
+ n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
+ vport->phba->ModelName, fwrev, lpfc_release_version);
+ return n;
+}
+
/*
* lpfc_ns_cmd
* Description:
@@ -585,55 +907,76 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
* LI_CTNS_RFT_ID
*/
int
-lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
+ uint8_t retry, uint32_t context)
{
+ struct lpfc_nodelist * ndlp;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *mp, *bmp;
struct lpfc_sli_ct_request *CtReq;
struct ulp_bde64 *bpl;
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *) = NULL;
uint32_t rsp_size = 1024;
+ size_t size;
+ int rc = 0;
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
+ rc=1;
+ goto ns_cmd_exit;
+ }
/* fill in BDEs for command */
/* Allocate buffer for command payload */
mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ if (!mp) {
+ rc=2;
goto ns_cmd_exit;
+ }
INIT_LIST_HEAD(&mp->list);
mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
- if (!mp->virt)
+ if (!mp->virt) {
+ rc=3;
goto ns_cmd_free_mp;
+ }
/* Allocate buffer for Buffer ptr list */
bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
+ if (!bmp) {
+ rc=4;
goto ns_cmd_free_mpvirt;
+ }
INIT_LIST_HEAD(&bmp->list);
bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
- if (!bmp->virt)
+ if (!bmp->virt) {
+ rc=5;
goto ns_cmd_free_bmp;
+ }
/* NameServer Req */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0236 NameServer Req Data: x%x x%x x%x\n",
- phba->brd_no, cmdcode, phba->fc_flag,
- phba->fc_rscn_id_cnt);
+ lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
+ "%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
bpl = (struct ulp_bde64 *) bmp->virt;
memset(bpl, 0, sizeof(struct ulp_bde64));
- bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
- bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->tus.f.bdeFlags = 0;
if (cmdcode == SLI_CTNS_GID_FT)
bpl->tus.f.bdeSize = GID_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_GFF_ID)
+ bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFT_ID)
bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RNN_ID)
bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_RSPN_ID)
+ bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RSNN_NN)
bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -654,56 +997,78 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_GID_FT);
CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
- if (phba->hba_state < LPFC_HBA_READY)
- phba->hba_state = LPFC_NS_QRY;
- lpfc_set_disctmo(phba);
+ if (vport->port_state < LPFC_NS_QRY)
+ vport->port_state = LPFC_NS_QRY;
+ lpfc_set_disctmo(vport);
cmpl = lpfc_cmpl_ct_cmd_gid_ft;
rsp_size = FC_MAX_NS_RSP;
break;
+ case SLI_CTNS_GFF_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_GFF_ID);
+ CtReq->un.gff.PortId = be32_to_cpu(context);
+ cmpl = lpfc_cmpl_ct_cmd_gff_id;
+ break;
+
case SLI_CTNS_RFT_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RFT_ID);
- CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
+ CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID);
CtReq->un.rft.fcpReg = 1;
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
- case SLI_CTNS_RFF_ID:
- CtReq->CommandResponse.bits.CmdRsp =
- be16_to_cpu(SLI_CTNS_RFF_ID);
- CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
- CtReq->un.rff.feature_res = 0;
- CtReq->un.rff.feature_tgt = 0;
- CtReq->un.rff.type_code = FC_FCP_DATA;
- CtReq->un.rff.feature_init = 1;
- cmpl = lpfc_cmpl_ct_cmd_rff_id;
- break;
-
case SLI_CTNS_RNN_ID:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RNN_ID);
- CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
- memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename,
+ CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID);
+ memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
cmpl = lpfc_cmpl_ct_cmd_rnn_id;
break;
+ case SLI_CTNS_RSPN_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RSPN_ID);
+ CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
+ size = sizeof(CtReq->un.rspn.symbname);
+ CtReq->un.rspn.len =
+ lpfc_vport_symbolic_port_name(vport,
+ CtReq->un.rspn.symbname, size);
+ cmpl = lpfc_cmpl_ct_cmd_rspn_id;
+ break;
case SLI_CTNS_RSNN_NN:
CtReq->CommandResponse.bits.CmdRsp =
be16_to_cpu(SLI_CTNS_RSNN_NN);
- memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
+ memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
sizeof (struct lpfc_name));
- lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
- CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
+ size = sizeof(CtReq->un.rsnn.symbname);
+ CtReq->un.rsnn.len =
+ lpfc_vport_symbolic_node_name(vport,
+ CtReq->un.rsnn.symbname, size);
cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
break;
+ case SLI_CTNS_RFF_ID:
+ vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
+ CtReq->CommandResponse.bits.CmdRsp =
+ be16_to_cpu(SLI_CTNS_RFF_ID);
+ CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
+ CtReq->un.rff.fbits = FC4_FEATURE_INIT;
+ CtReq->un.rff.type_code = FC_FCP_DATA;
+ cmpl = lpfc_cmpl_ct_cmd_rff_id;
+ break;
}
- if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
/* On success, The cmpl function will free the buffers */
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Issue CT cmd: cmd:x%x did:x%x",
+ cmdcode, ndlp->nlp_DID, 0);
return 0;
+ }
+ rc=6;
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
kfree(bmp);
@@ -712,14 +1077,17 @@ ns_cmd_free_mpvirt:
ns_cmd_free_mp:
kfree(mp);
ns_cmd_exit:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
+ phba->brd_no, vport->vpi, cmdcode, rc, vport->fc_flag,
+ vport->fc_rscn_id_cnt);
return 1;
}
static void
-lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq * rspiocb)
{
- struct lpfc_dmabuf *bmp = cmdiocb->context3;
struct lpfc_dmabuf *inp = cmdiocb->context1;
struct lpfc_dmabuf *outp = cmdiocb->context2;
struct lpfc_sli_ct_request *CTrsp = outp->virt;
@@ -727,48 +1095,60 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
struct lpfc_nodelist *ndlp;
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp = &rspiocb->iocb;
+ uint32_t latt;
+
+ latt = lpfc_els_chk_latt(vport);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "FDMI cmpl: status:x%x/x%x latt:%d",
+ irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+
+ if (latt || irsp->ulpStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0229 FDMI cmd %04x failed, latt = %d "
+ "ulpStatus: x%x, rid x%x\n",
+ phba->brd_no, vport->vpi,
+ be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ return;
+ }
- ndlp = lpfc_findnode_did(phba, FDMI_DID);
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0220 FDMI rsp failed Data: x%x\n",
- phba->brd_no,
- be16_to_cpu(fdmi_cmd));
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0220 FDMI rsp failed Data: x%x\n",
+ phba->brd_no, vport->vpi,
+ be16_to_cpu(fdmi_cmd));
}
switch (be16_to_cpu(fdmi_cmd)) {
case SLI_MGMT_RHBA:
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
break;
case SLI_MGMT_RPA:
break;
case SLI_MGMT_DHBA:
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
break;
case SLI_MGMT_DPRT:
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
break;
}
-
- lpfc_free_ct_rsp(phba, outp);
- lpfc_mbuf_free(phba, inp->virt, inp->phys);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(inp);
- kfree(bmp);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_ct_free_iocb(phba, cmdiocb);
return;
}
+
int
-lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
+lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *mp, *bmp;
struct lpfc_sli_ct_request *CtReq;
struct ulp_bde64 *bpl;
@@ -805,12 +1185,10 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
INIT_LIST_HEAD(&bmp->list);
/* FDMI request */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0218 FDMI Request Data: x%x x%x x%x\n",
- phba->brd_no,
- phba->fc_flag, phba->hba_state, cmdcode);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, vport->fc_flag,
+ vport->port_state, cmdcode);
CtReq = (struct lpfc_sli_ct_request *) mp->virt;
@@ -833,11 +1211,11 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
be16_to_cpu(SLI_MGMT_RHBA);
CtReq->CommandResponse.bits.Size = 0;
rh = (REG_HBA *) & CtReq->un.PortID;
- memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
+ memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
/* One entry (port) per adapter */
rh->rpl.EntryCnt = be32_to_cpu(1);
- memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
+ memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
/* point to the HBA attribute block */
@@ -853,7 +1231,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
+ sizeof (struct lpfc_name));
- memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
+ memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
ab->EntryCnt++;
size += FOURBYTES + sizeof (struct lpfc_name);
@@ -991,7 +1369,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
size = sizeof (struct lpfc_name) + FOURBYTES;
memcpy((uint8_t *) & pab->PortName,
- (uint8_t *) & phba->fc_sparam.portName,
+ (uint8_t *) & vport->fc_sparam.portName,
sizeof (struct lpfc_name));
pab->ab.EntryCnt = 0;
@@ -1053,7 +1431,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
- hsp = (struct serv_parm *) & phba->fc_sparam;
+ hsp = (struct serv_parm *) & vport->fc_sparam;
ae->un.MaxFrameSize =
(((uint32_t) hsp->cmn.
bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
@@ -1097,7 +1475,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.Size = 0;
pe = (PORT_ENTRY *) & CtReq->un.PortID;
memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & phba->fc_sparam.portName,
+ (uint8_t *) & vport->fc_sparam.portName,
sizeof (struct lpfc_name));
size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
break;
@@ -1107,22 +1485,22 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
CtReq->CommandResponse.bits.Size = 0;
pe = (PORT_ENTRY *) & CtReq->un.PortID;
memcpy((uint8_t *) & pe->PortName,
- (uint8_t *) & phba->fc_sparam.portName,
+ (uint8_t *) & vport->fc_sparam.portName,
sizeof (struct lpfc_name));
size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
break;
}
bpl = (struct ulp_bde64 *) bmp->virt;
- bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
- bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->tus.f.bdeFlags = 0;
bpl->tus.f.bdeSize = size;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
cmpl = lpfc_cmpl_ct_cmd_fdmi;
- if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
return 0;
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -1134,49 +1512,50 @@ fdmi_cmd_free_mp:
kfree(mp);
fdmi_cmd_exit:
/* Issue FDMI request failed */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0244 Issue FDMI request failed Data: x%x\n",
- phba->brd_no,
- cmdcode);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0244 Issue FDMI request failed Data: x%x\n",
+ phba->brd_no, vport->vpi, cmdcode);
return 1;
}
void
lpfc_fdmi_tmo(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
unsigned long iflag;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
- phba->work_hba_events |= WORKER_FDMI_TMO;
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
+ vport->work_port_events |= WORKER_FDMI_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
- spin_unlock_irqrestore(phba->host->host_lock,iflag);
+ else
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
}
void
-lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
+lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
- ndlp = lpfc_findnode_did(phba, FDMI_DID);
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (ndlp) {
- if (init_utsname()->nodename[0] != '\0') {
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
- } else {
- mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
- }
+ if (init_utsname()->nodename[0] != '\0')
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+ else
+ mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
}
return;
}
-
void
-lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
+lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
{
struct lpfc_sli *psli = &phba->sli;
lpfc_vpd_t *vp = &phba->vpd;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 000000000000..673cfe11cc2b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,508 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2007 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/ctype.h>
+#include <linux/version.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+/* debugfs interface
+ *
+ * To access this interface the user should:
+ * # mkdir /debug
+ * # mount -t debugfs none /debug
+ *
+ * The lpfc debugfs directory hierachy is:
+ * lpfc/lpfcX/vportY
+ * where X is the lpfc hba unique_id
+ * where Y is the vport VPI on that hba
+ *
+ * Debugging services available per vport:
+ * discovery_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
+ * See lpfc_debugfs.h for different categories of
+ * discovery events. To enable the discovery trace, the following
+ * module parameters must be set:
+ * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
+ * EACH vport. X MUST also be a power of 2.
+ * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
+ * lpfc_debugfs.h .
+ */
+static int lpfc_debugfs_enable = 0;
+module_param(lpfc_debugfs_enable, int, 0);
+MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
+
+static int lpfc_debugfs_max_disc_trc = 0; /* This MUST be a power of 2 */
+module_param(lpfc_debugfs_max_disc_trc, int, 0);
+MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
+ "Set debugfs discovery trace depth");
+
+static int lpfc_debugfs_mask_disc_trc = 0;
+module_param(lpfc_debugfs_mask_disc_trc, int, 0);
+MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
+ "Set debugfs discovery trace mask");
+
+#include <linux/debugfs.h>
+
+/* size of discovery_trace output line */
+#define LPFC_DISC_TRC_ENTRY_SIZE 80
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+struct lpfc_debug {
+ char *buffer;
+ int len;
+};
+
+atomic_t lpfc_debugfs_disc_trc_cnt = ATOMIC_INIT(0);
+unsigned long lpfc_debugfs_start_time = 0L;
+
+static int
+lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int i, index, len, enable;
+ uint32_t ms;
+ struct lpfc_disc_trc *dtp;
+ char buffer[80];
+
+
+ enable = lpfc_debugfs_enable;
+ lpfc_debugfs_enable = 0;
+
+ len = 0;
+ index = (atomic_read(&vport->disc_trc_cnt) + 1) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
+ dtp = vport->disc_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer, 80, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+ for (i = 0; i < index; i++) {
+ dtp = vport->disc_trc + i;
+ if (!dtp->fmt)
+ continue;
+ ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
+ snprintf(buffer, 80, "%010d:%010d ms:%s\n",
+ dtp->seq_cnt, ms, dtp->fmt);
+ len += snprintf(buf+len, size-len, buffer,
+ dtp->data1, dtp->data2, dtp->data3);
+ }
+
+ lpfc_debugfs_enable = enable;
+ return len;
+}
+
+static int
+lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ int len = 0;
+ int cnt;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+ unsigned char *statep, *name;
+
+ cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!cnt) {
+ len += snprintf(buf+len, size-len,
+ "Missing Nodelist Entries\n");
+ break;
+ }
+ cnt--;
+ switch (ndlp->nlp_state) {
+ case NLP_STE_UNUSED_NODE:
+ statep = "UNUSED";
+ break;
+ case NLP_STE_PLOGI_ISSUE:
+ statep = "PLOGI ";
+ break;
+ case NLP_STE_ADISC_ISSUE:
+ statep = "ADISC ";
+ break;
+ case NLP_STE_REG_LOGIN_ISSUE:
+ statep = "REGLOG";
+ break;
+ case NLP_STE_PRLI_ISSUE:
+ statep = "PRLI ";
+ break;
+ case NLP_STE_UNMAPPED_NODE:
+ statep = "UNMAP ";
+ break;
+ case NLP_STE_MAPPED_NODE:
+ statep = "MAPPED";
+ break;
+ case NLP_STE_NPR_NODE:
+ statep = "NPR ";
+ break;
+ default:
+ statep = "UNKNOWN";
+ }
+ len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+ statep, ndlp->nlp_DID);
+ name = (unsigned char *)&ndlp->nlp_portname;
+ len += snprintf(buf+len, size-len,
+ "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7));
+ name = (unsigned char *)&ndlp->nlp_nodename;
+ len += snprintf(buf+len, size-len,
+ "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7));
+ len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
+ ndlp->nlp_rpi, ndlp->nlp_flag);
+ if (!ndlp->nlp_type)
+ len += snprintf(buf+len, size-len, "UNKNOWN_TYPE");
+ if (ndlp->nlp_type & NLP_FC_NODE)
+ len += snprintf(buf+len, size-len, "FC_NODE ");
+ if (ndlp->nlp_type & NLP_FABRIC)
+ len += snprintf(buf+len, size-len, "FABRIC ");
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+ ndlp->nlp_sid);
+ if (ndlp->nlp_type & NLP_FCP_INITIATOR)
+ len += snprintf(buf+len, size-len, "FCP_INITIATOR");
+ len += snprintf(buf+len, size-len, "\n");
+ }
+ spin_unlock_irq(shost->host_lock);
+ return len;
+}
+#endif
+
+
+inline void
+lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
+ uint32_t data1, uint32_t data2, uint32_t data3)
+{
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct lpfc_disc_trc *dtp;
+ int index;
+
+ if (!(lpfc_debugfs_mask_disc_trc & mask))
+ return;
+
+ if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
+ !vport || !vport->disc_trc)
+ return;
+
+ index = atomic_inc_return(&vport->disc_trc_cnt) &
+ (lpfc_debugfs_max_disc_trc - 1);
+ dtp = vport->disc_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+ dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_disc_trc_cnt);
+ dtp->jif = jiffies;
+#endif
+ return;
+}
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+static int
+lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ if (!lpfc_debugfs_max_disc_trc) {
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ size = (lpfc_debugfs_max_disc_trc * LPFC_DISC_TRC_ENTRY_SIZE);
+ size = PAGE_ALIGN(size);
+
+ debug->buffer = kmalloc(size, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
+ LPFC_NODELIST_SIZE);
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static loff_t
+lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
+{
+ struct lpfc_debug *debug;
+ loff_t pos = -1;
+
+ debug = file->private_data;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = debug->len - off;
+ }
+ return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
+}
+
+static ssize_t
+lpfc_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
+ debug->len);
+}
+
+static int
+lpfc_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+#undef lpfc_debugfs_op_disc_trc
+static struct file_operations lpfc_debugfs_op_disc_trc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_disc_trc_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nodelist
+static struct file_operations lpfc_debugfs_op_nodelist = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_nodelist_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_release,
+};
+
+static struct dentry *lpfc_debugfs_root = NULL;
+static atomic_t lpfc_debugfs_hba_count;
+#endif
+
+inline void
+lpfc_debugfs_initialize(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct lpfc_hba *phba = vport->phba;
+ char name[64];
+ uint32_t num, i;
+
+ if (!lpfc_debugfs_enable)
+ return;
+
+ if (lpfc_debugfs_max_disc_trc) {
+ num = lpfc_debugfs_max_disc_trc - 1;
+ if (num & lpfc_debugfs_max_disc_trc) {
+ /* Change to be a power of 2 */
+ num = lpfc_debugfs_max_disc_trc;
+ i = 0;
+ while (num > 1) {
+ num = num >> 1;
+ i++;
+ }
+ lpfc_debugfs_max_disc_trc = (1 << i);
+ printk(KERN_ERR
+ "lpfc_debugfs_max_disc_trc changed to %d\n",
+ lpfc_debugfs_max_disc_trc);
+ }
+ }
+
+ if (!lpfc_debugfs_root) {
+ lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
+ atomic_set(&lpfc_debugfs_hba_count, 0);
+ if (!lpfc_debugfs_root)
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
+ if (!phba->hba_debugfs_root) {
+ phba->hba_debugfs_root =
+ debugfs_create_dir(name, lpfc_debugfs_root);
+ if (!phba->hba_debugfs_root)
+ goto debug_failed;
+ atomic_inc(&lpfc_debugfs_hba_count);
+ atomic_set(&phba->debugfs_vport_count, 0);
+ }
+
+ snprintf(name, sizeof(name), "vport%d", vport->vpi);
+ if (!vport->vport_debugfs_root) {
+ vport->vport_debugfs_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!vport->vport_debugfs_root)
+ goto debug_failed;
+ atomic_inc(&phba->debugfs_vport_count);
+ }
+
+ if (!lpfc_debugfs_start_time)
+ lpfc_debugfs_start_time = jiffies;
+
+ vport->disc_trc = kmalloc(
+ (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc),
+ GFP_KERNEL);
+
+ if (!vport->disc_trc)
+ goto debug_failed;
+ memset(vport->disc_trc, 0,
+ (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc));
+
+ snprintf(name, sizeof(name), "discovery_trace");
+ vport->debug_disc_trc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_disc_trc);
+ if (!vport->debug_disc_trc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0409 Cannot create debugfs",
+ phba->brd_no);
+ goto debug_failed;
+ }
+ snprintf(name, sizeof(name), "nodelist");
+ vport->debug_nodelist =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_nodelist);
+ if (!vport->debug_nodelist) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0409 Cannot create debugfs",
+ phba->brd_no);
+ goto debug_failed;
+ }
+debug_failed:
+ return;
+#endif
+}
+
+
+inline void
+lpfc_debugfs_terminate(struct lpfc_vport *vport)
+{
+#ifdef CONFIG_LPFC_DEBUG_FS
+ struct lpfc_hba *phba = vport->phba;
+
+ if (vport->disc_trc) {
+ kfree(vport->disc_trc);
+ vport->disc_trc = NULL;
+ }
+ if (vport->debug_disc_trc) {
+ debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
+ vport->debug_disc_trc = NULL;
+ }
+ if (vport->debug_nodelist) {
+ debugfs_remove(vport->debug_nodelist); /* nodelist */
+ vport->debug_nodelist = NULL;
+ }
+ if (vport->vport_debugfs_root) {
+ debugfs_remove(vport->vport_debugfs_root); /* vportX */
+ vport->vport_debugfs_root = NULL;
+ atomic_dec(&phba->debugfs_vport_count);
+ }
+ if (atomic_read(&phba->debugfs_vport_count) == 0) {
+ debugfs_remove(vport->phba->hba_debugfs_root); /* lpfcX */
+ vport->phba->hba_debugfs_root = NULL;
+ atomic_dec(&lpfc_debugfs_hba_count);
+ if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+ debugfs_remove(lpfc_debugfs_root); /* lpfc */
+ lpfc_debugfs_root = NULL;
+ }
+ }
+#endif
+}
+
+
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 000000000000..fffb678426a4
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,50 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2007 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#ifndef _H_LPFC_DEBUG_FS
+#define _H_LPFC_DEBUG_FS
+
+#ifdef CONFIG_LPFC_DEBUG_FS
+struct lpfc_disc_trc {
+ char *fmt;
+ uint32_t data1;
+ uint32_t data2;
+ uint32_t data3;
+ uint32_t seq_cnt;
+ unsigned long jif;
+};
+#endif
+
+/* Mask for discovery_trace */
+#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
+#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
+#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */
+#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */
+#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */
+#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */
+#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */
+#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */
+#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */
+#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */
+#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */
+
+#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
+ * discovery */
+#endif /* H_LPFC_DEBUG_FS */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 498059f3f7f4..aacac9ac5381 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,21 +36,23 @@ enum lpfc_work_type {
LPFC_EVT_WARM_START,
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
+ LPFC_EVT_DEV_LOSS_DELAY,
+ LPFC_EVT_DEV_LOSS,
};
/* structure used to queue event to the discovery tasklet */
struct lpfc_work_evt {
struct list_head evt_listp;
- void * evt_arg1;
- void * evt_arg2;
+ void *evt_arg1;
+ void *evt_arg2;
enum lpfc_work_type evt;
};
struct lpfc_nodelist {
struct list_head nlp_listp;
- struct lpfc_name nlp_portname; /* port name */
- struct lpfc_name nlp_nodename; /* node name */
+ struct lpfc_name nlp_portname;
+ struct lpfc_name nlp_nodename;
uint32_t nlp_flag; /* entry flags */
uint32_t nlp_DID; /* FC D_ID of entry */
uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -75,8 +77,9 @@ struct lpfc_nodelist {
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct fc_rport *rport; /* Corresponding FC transport
port structure */
- struct lpfc_hba *nlp_phba;
+ struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
+ struct lpfc_work_evt dev_loss_evt;
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
@@ -98,7 +101,9 @@ struct lpfc_nodelist {
ACC */
#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
NPR list */
+#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */
#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
+#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
/* There are 4 different double linked lists nodelist entries can reside on.
* The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 638b3cd677bd..33fbc1666946 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -35,38 +35,38 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+
static int lpfc_max_els_tries = 3;
-static int
-lpfc_els_chk_latt(struct lpfc_hba * phba)
+int
+lpfc_els_chk_latt(struct lpfc_vport *vport)
{
- struct lpfc_sli *psli;
- LPFC_MBOXQ_t *mbox;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
uint32_t ha_copy;
- int rc;
- psli = &phba->sli;
-
- if ((phba->hba_state >= LPFC_HBA_READY) ||
- (phba->hba_state == LPFC_LINK_DOWN))
+ if (vport->port_state >= LPFC_VPORT_READY ||
+ phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Read the HBA Host Attention Register */
- spin_lock_irq(phba->host->host_lock);
ha_copy = readl(phba->HAregaddr);
- spin_unlock_irq(phba->host->host_lock);
if (!(ha_copy & HA_LATT))
return 0;
/* Pending Link Event during Discovery */
- lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
- "%d:0237 Pending Link Event during "
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0237 Pending Link Event during "
"Discovery: State x%x\n",
- phba->brd_no, phba->hba_state);
+ phba->brd_no, vport->vpi, phba->pport->port_state);
/* CLEAR_LA should re-enable link attention events and
* we should then imediately take a LATT event. The
@@ -74,48 +74,34 @@ lpfc_els_chk_latt(struct lpfc_hba * phba)
* will cleanup any left over in-progress discovery
* events.
*/
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_ABORT_DISCOVERY;
- spin_unlock_irq(phba->host->host_lock);
-
- if (phba->hba_state != LPFC_CLEAR_LA) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, mbox);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- phba->hba_state = LPFC_HBA_ERROR;
- }
- }
- }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_ABORT_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
- return 1;
+ if (phba->link_state != LPFC_CLEAR_LA)
+ lpfc_issue_clear_la(phba, vport);
+ return 1;
}
static struct lpfc_iocbq *
-lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp,
- uint32_t did, uint32_t elscmd)
+lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+ uint16_t cmdSize, uint8_t retry,
+ struct lpfc_nodelist *ndlp, uint32_t did,
+ uint32_t elscmd)
{
- struct lpfc_sli_ring *pring;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
struct ulp_bde64 *bpl;
IOCB_t *icmd;
- pring = &phba->sli.ring[LPFC_ELS_RING];
- if (phba->hba_state < LPFC_LINK_UP)
- return NULL;
+ if (!lpfc_is_link_up(phba))
+ return NULL;
/* Allocate buffer for command iocb */
- spin_lock_irq(phba->host->host_lock);
elsiocb = lpfc_sli_get_iocbq(phba);
- spin_unlock_irq(phba->host->host_lock);
if (elsiocb == NULL)
return NULL;
@@ -123,14 +109,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
+ if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
((pcmd->virt = lpfc_mbuf_alloc(phba,
MEM_PRI, &(pcmd->phys))) == 0)) {
kfree(pcmd);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
return NULL;
}
@@ -138,7 +122,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
/* Allocate buffer for response payload */
if (expectRsp) {
- prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
@@ -146,9 +130,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
kfree(prsp);
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
kfree(pcmd);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
return NULL;
}
INIT_LIST_HEAD(&prsp->list);
@@ -157,14 +139,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
}
/* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
+ pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (pbuflist)
- pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
- &pbuflist->phys);
+ pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
+ &pbuflist->phys);
if (pbuflist == 0 || pbuflist->virt == 0) {
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pcmd);
@@ -178,20 +158,28 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.elsreq64.remoteID = did; /* DID */
if (expectRsp) {
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- icmd->un.elsreq64.remoteID = did; /* DID */
+ icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
- icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
+ icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
}
-
icmd->ulpBdeCount = 1;
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ icmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ icmd->ulpContext = vport->vpi;
+ icmd->ulpCt_h = 0;
+ icmd->ulpCt_l = 1;
+ }
+
bpl = (struct ulp_bde64 *) pbuflist->virt;
bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
@@ -209,10 +197,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
}
/* Save for completion so we can release these resources */
- elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (elscmd != ELS_CMD_LS_RJT)
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->context2 = pcmd;
elsiocb->context3 = pbuflist;
elsiocb->retry = retry;
+ elsiocb->vport = vport;
elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
if (prsp) {
@@ -222,16 +212,16 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
if (expectRsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0116 Xmit ELS command x%x to remote "
- "NPORT x%x I/O tag: x%x, HBA state: x%x\n",
- phba->brd_no, elscmd,
- did, elsiocb->iotag, phba->hba_state);
+ "%d (%d):0116 Xmit ELS command x%x to remote "
+ "NPORT x%x I/O tag: x%x, port state: x%x\n",
+ phba->brd_no, vport->vpi, elscmd, did,
+ elsiocb->iotag, vport->port_state);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0117 Xmit ELS response x%x to remote "
+ "%d (%d):0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x\n",
- phba->brd_no, elscmd,
+ phba->brd_no, vport->vpi, elscmd,
ndlp->nlp_DID, elsiocb->iotag, cmdSize);
}
@@ -240,16 +230,79 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
static int
-lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
+lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ndlp;
+ struct serv_parm *sp;
int rc;
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_FABRIC;
- spin_unlock_irq(phba->host->host_lock);
+ sp = &phba->fc_fabparam;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ goto fail;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ vport->port_state = LPFC_FABRIC_CFG_LINK;
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED)
+ goto fail_free_mbox;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+ rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+ 0);
+ if (rc)
+ goto fail_free_mbox;
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
+ mbox->vport = vport;
+ mbox->context2 = lpfc_nlp_get(ndlp);
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
+ if (rc == MBX_NOT_FINISHED)
+ goto fail_issue_reg_login;
+
+ return 0;
+
+fail_issue_reg_login:
+ lpfc_nlp_put(ndlp);
+ mp = (struct lpfc_dmabuf *) mbox->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+fail_free_mbox:
+ mempool_free(mbox, phba->mbox_mem_pool);
+
+fail:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0249 Cannot issue Register Fabric login\n",
+ phba->brd_no, vport->vpi);
+ return -ENXIO;
+}
+
+static int
+lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp, IOCB_t *irsp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *np;
+ struct lpfc_nodelist *next_np;
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ spin_unlock_irq(shost->host_lock);
phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
@@ -258,20 +311,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
if (phba->fc_topology == TOPOLOGY_LOOP) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
} else {
/*
* If we are a N-port connected to a Fabric, fixup sparam's so
* logins to devices on remote loops work.
*/
- phba->fc_sparam.cmn.altBbCredit = 1;
+ vport->fc_sparam.cmn.altBbCredit = 1;
}
- phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
- memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -285,68 +338,85 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sp->cmn.bbRcvSizeLsb;
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- phba->hba_state = LPFC_FABRIC_CFG_LINK;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (sp->cmn.response_multiple_NPort) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
+ "%d:1816 FLOGI NPIV supported, "
+ "response data 0x%x\n",
+ phba->brd_no,
+ sp->cmn.response_multiple_NPort);
+ phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
- if (rc == MBX_NOT_FINISHED)
- goto fail_free_mbox;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
+ } else {
+ /* Because we asked f/w for NPIV it still expects us
+ to call reg_vnpid atleast for the physcial host */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
+ "%d:1817 Fabric does not support NPIV "
+ "- configuring single port mode.\n",
+ phba->brd_no);
+ phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ }
+ }
- if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))
- goto fail_free_mbox;
+ if ((vport->fc_prevDID != vport->fc_myDID) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
- mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
- mbox->context2 = lpfc_nlp_get(ndlp);
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if ((np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
+ spin_lock_irq(shost->host_lock);
+ np->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
+ }
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ }
+ }
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
- if (rc == MBX_NOT_FINISHED)
- goto fail_issue_reg_login;
+ ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+ vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
+ lpfc_register_new_vport(phba, vport, ndlp);
+ return 0;
+ }
+ lpfc_issue_fabric_reglogin(vport);
return 0;
-
- fail_issue_reg_login:
- lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *) mbox->context1;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- fail_free_mbox:
- mempool_free(mbox, phba->mbox_mem_pool);
- fail:
- return -ENXIO;
}
/*
* We FLOGIed into an NPort, initiate pt2pt protocol
*/
static int
-lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp)
+lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm *sp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
int rc;
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
phba->fc_edtov = FF_DEF_EDTOV;
phba->fc_ratov = FF_DEF_RATOV;
- rc = memcmp(&phba->fc_portname, &sp->portName,
- sizeof(struct lpfc_name));
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(vport->fc_portname));
if (rc >= 0) {
/* This side will initiate the PLOGI */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
/*
* N_Port ID cannot be 0, set our to LocalID the other
@@ -355,7 +425,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* not equal */
if (rc)
- phba->fc_myDID = PT2PT_LocalID;
+ vport->fc_myDID = PT2PT_LocalID;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -364,15 +434,16 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mbox,
- MBX_NOWAIT | MBX_STOP_IOCB);
+ MBX_NOWAIT | MBX_STOP_IOCB);
if (rc == MBX_NOT_FINISHED) {
mempool_free(mbox, phba->mbox_mem_pool);
goto fail;
}
lpfc_nlp_put(ndlp);
- ndlp = lpfc_findnode_did(phba, PT2PT_RemoteID);
+ ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
if (!ndlp) {
/*
* Cannot find existing Fabric ndlp, so allocate a
@@ -382,28 +453,30 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (!ndlp)
goto fail;
- lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);
+ lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
}
memcpy(&ndlp->nlp_portname, &sp->portName,
- sizeof(struct lpfc_name));
+ sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName,
- sizeof(struct lpfc_name));
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ sizeof(struct lpfc_name));
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
} else {
/* This side will wait for the PLOGI */
lpfc_nlp_put(ndlp);
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PT2PT;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT;
+ spin_unlock_irq(shost->host_lock);
/* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
return 0;
- fail:
+fail:
return -ENXIO;
}
@@ -411,6 +484,8 @@ static void
lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp = cmdiocb->context1;
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
@@ -418,21 +493,25 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int rc;
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
+ if (lpfc_els_chk_latt(vport)) {
lpfc_nlp_put(ndlp);
goto out;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "FLOGI cmpl: status:x%x/x%x state:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ vport->port_state);
+
if (irsp->ulpStatus) {
/* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
- /* ELS command is being retried */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- }
+
/* FLOGI failed, so there is no fabric */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
@@ -443,11 +522,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* FLOGI failure */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0100 FLOGI failure Data: x%x x%x x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0100 FLOGI failure Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
goto flogifail;
@@ -463,21 +541,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FLOGI completes successfully */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0101 FLOGI completes sucessfully "
+ "%d (%d):0101 FLOGI completes sucessfully "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
- if (phba->hba_state == LPFC_FLOGI) {
+ if (vport->port_state == LPFC_FLOGI) {
/*
* If Common Service Parameters indicate Nport
* we are point to point, if Fport we are Fabric.
*/
if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
else
- rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);
+ rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
if (!rc)
goto out;
@@ -486,14 +564,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
flogifail:
lpfc_nlp_put(ndlp);
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
- irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
+ if (!lpfc_error_lost_link(irsp)) {
/* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(phba);
+ lpfc_disc_list_loopmap(vport);
/* Start discovery */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
}
out:
@@ -501,9 +577,10 @@ out:
}
static int
-lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
@@ -515,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
pring = &phba->sli.ring[LPFC_ELS_RING];
- cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_FLOGI);
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_FLOGI);
+
if (!elsiocb)
return 1;
@@ -526,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
/* Setup CSPs accordingly for Fabric */
@@ -541,16 +619,32 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ sp->cmn.request_multiple_Nport = 1;
+
+ /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ }
+
+ if (phba->fc_topology != TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
+
tmo = phba->fc_ratov;
phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
phba->fc_ratov = tmo;
phba->fc_stat.elsXmitFLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
- spin_lock_irq(phba->host->host_lock);
- rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FLOGI: opt:x%x",
+ phba->sli3_options, 0, 0);
+
+ rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -559,7 +653,7 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
}
int
-lpfc_els_abort_flogi(struct lpfc_hba * phba)
+lpfc_els_abort_flogi(struct lpfc_hba *phba)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
@@ -577,73 +671,99 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
* Check the txcmplq for an iocb that matches the nport the driver is
* searching for.
*/
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
+ icmd->un.elsreq64.bdl.ulpIoTag32) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
- if (ndlp && (ndlp->nlp_DID == Fabric_DID))
+ if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
int
-lpfc_initial_flogi(struct lpfc_hba *phba)
+lpfc_initial_flogi(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
/* First look for the Fabric ndlp */
- ndlp = lpfc_findnode_did(phba, Fabric_DID);
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 0;
- lpfc_nlp_init(phba, ndlp, Fabric_DID);
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
} else {
- lpfc_dequeue_node(phba, ndlp);
+ lpfc_dequeue_node(vport, ndlp);
}
- if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
+ if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
lpfc_nlp_put(ndlp);
}
return 1;
}
+int
+lpfc_initial_fdisc(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, so allocate a new one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ return 0;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ } else {
+ lpfc_dequeue_node(vport, ndlp);
+ }
+ if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
+ lpfc_nlp_put(ndlp);
+ }
+ return 1;
+}
static void
-lpfc_more_plogi(struct lpfc_hba * phba)
+lpfc_more_plogi(struct lpfc_vport *vport)
{
int sentplogi;
+ struct lpfc_hba *phba = vport->phba;
- if (phba->num_disc_nodes)
- phba->num_disc_nodes--;
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
/* Continue discovery with <num_disc_nodes> PLOGIs to go */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0232 Continue discovery with %d PLOGIs to go "
+ "%d (%d):0232 Continue discovery with %d PLOGIs to go "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi, vport->num_disc_nodes,
+ vport->fc_plogi_cnt, vport->fc_flag, vport->port_state);
/* Check to see if there are more PLOGIs to be sent */
- if (phba->fc_flag & FC_NLP_MORE) {
- /* go thru NPR list and issue any remaining ELS PLOGIs */
- sentplogi = lpfc_els_disc_plogi(phba);
- }
+ if (vport->fc_flag & FC_NLP_MORE)
+ /* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ sentplogi = lpfc_els_disc_plogi(vport);
+
return;
}
static struct lpfc_nodelist *
-lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
+lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
+ struct lpfc_vport *vport = ndlp->vport;
struct lpfc_nodelist *new_ndlp;
- uint32_t *lp;
struct serv_parm *sp;
- uint8_t name[sizeof (struct lpfc_name)];
+ uint8_t name[sizeof(struct lpfc_name)];
uint32_t rc;
/* Fabric nodes can have the same WWPN so we don't bother searching
@@ -652,50 +772,51 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
if (ndlp->nlp_type & NLP_FABRIC)
return ndlp;
- lp = (uint32_t *) prsp->virt;
- sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
memset(name, 0, sizeof(struct lpfc_name));
/* Now we find out if the NPort we are logging into, matches the WWPN
* we have for that ndlp. If not, we have some work to do.
*/
- new_ndlp = lpfc_findnode_wwpn(phba, &sp->portName);
+ new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
if (new_ndlp == ndlp)
return ndlp;
if (!new_ndlp) {
- rc =
- memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
+ rc = memcmp(&ndlp->nlp_portname, name,
+ sizeof(struct lpfc_name));
if (!rc)
return ndlp;
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
if (!new_ndlp)
return ndlp;
- lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID);
+ lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
}
- lpfc_unreg_rpi(phba, new_ndlp);
+ lpfc_unreg_rpi(vport, new_ndlp);
new_ndlp->nlp_DID = ndlp->nlp_DID;
new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
- lpfc_nlp_set_state(phba, new_ndlp, ndlp->nlp_state);
+ lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
- /* Move this back to NPR list */
+ /* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
else {
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
return new_ndlp;
}
static void
-lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *prsp;
@@ -705,32 +826,43 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
cmdiocb->context_un.rsp_iocb = rspiocb;
irsp = &rspiocb->iocb;
- ndlp = lpfc_findnode_did(phba, irsp->un.elsreq64.remoteID);
- if (!ndlp)
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "PLOGI cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
+ ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0136 PLOGI completes to NPort x%x "
+ "with no ndlp. Data: x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag);
goto out;
+ }
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
+ spin_lock_irq(shost->host_lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
- spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
rc = 0;
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0102 PLOGI completes to NPort x%x "
+ "%d (%d):0102 PLOGI completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
- spin_lock_irq(phba->host->host_lock);
+ if (lpfc_els_chk_latt(vport)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
goto out;
}
@@ -743,56 +875,62 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
}
goto out;
}
/* PLOGI failed */
+ if (ndlp->nlp_DID == NameServer_DID) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0250 Nameserver login error: "
+ "0x%x / 0x%x\n",
+ phba->brd_no, vport->vpi,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ }
+
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ if (lpfc_error_lost_link(irsp)) {
rc = NLP_STE_FREED_NODE;
} else {
- rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
}
} else {
/* Good status, call state machine */
prsp = list_entry(((struct lpfc_dmabuf *)
- cmdiocb->context2)->list.next,
- struct lpfc_dmabuf, list);
- ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
- rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_PLOGI);
+ cmdiocb->context2)->list.next,
+ struct lpfc_dmabuf, list);
+ ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+ rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PLOGI);
}
- if (disc && phba->num_disc_nodes) {
+ if (disc && vport->num_disc_nodes) {
/* Check to see if there are more PLOGIs to be sent */
- lpfc_more_plogi(phba);
+ lpfc_more_plogi(vport);
- if (phba->num_disc_nodes == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(phba);
- if (phba->fc_flag & FC_RSCN_MODE) {
+ lpfc_can_disctmo(vport);
+ if (vport->fc_flag & FC_RSCN_MODE) {
/*
* Check to see if more RSCNs came in while
* we were processing this one.
*/
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
} else {
- lpfc_els_handle_rscn(phba);
+ lpfc_els_handle_rscn(vport);
}
}
}
@@ -804,8 +942,9 @@ out:
}
int
-lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
+lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
@@ -813,13 +952,14 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
+ int ret;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did,
- ELS_CMD_PLOGI);
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
+ ELS_CMD_PLOGI);
if (!elsiocb)
return 1;
@@ -828,8 +968,8 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
/* For PLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -838,22 +978,27 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PLOGI: did:x%x",
+ did, 0, 0);
+
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
- spin_lock_irq(phba->host->host_lock);
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
- spin_unlock_irq(phba->host->host_lock);
+ ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+
+ if (ret == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
@@ -864,21 +1009,26 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "PRLI cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0103 PRLI completes to NPort x%x "
+ "%d (%d):0103 PRLI completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
+ vport->num_disc_nodes);
- phba->fc_prli_sent--;
+ vport->fc_prli_sent--;
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba))
+ if (lpfc_els_chk_latt(vport))
goto out;
if (irsp->ulpStatus) {
@@ -889,18 +1039,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
/* PRLI failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ if (lpfc_error_lost_link(irsp)) {
goto out;
} else {
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_PRLI);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
}
} else {
/* Good status, call state machine */
- lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_PRLI);
}
out:
@@ -909,9 +1057,11 @@ out:
}
int
-lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
PRLI *npr;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
@@ -923,9 +1073,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_PRLI);
+ cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_PRLI);
if (!elsiocb)
return 1;
@@ -933,9 +1083,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PRLI request, remainder of payload is service parameters */
- memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
+ memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
npr = (PRLI *) pcmd;
@@ -955,81 +1105,88 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue PRLI: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
phba->fc_stat.elsXmitPRLI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+ spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
- phba->fc_prli_sent++;
+ vport->fc_prli_sent++;
return 0;
}
static void
-lpfc_more_adisc(struct lpfc_hba * phba)
+lpfc_more_adisc(struct lpfc_vport *vport)
{
int sentadisc;
+ struct lpfc_hba *phba = vport->phba;
- if (phba->num_disc_nodes)
- phba->num_disc_nodes--;
+ if (vport->num_disc_nodes)
+ vport->num_disc_nodes--;
/* Continue discovery with <num_disc_nodes> ADISCs to go */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0210 Continue discovery with %d ADISCs to go "
+ "%d (%d):0210 Continue discovery with %d ADISCs to go "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi, vport->num_disc_nodes,
+ vport->fc_adisc_cnt, vport->fc_flag, vport->port_state);
/* Check to see if there are more ADISCs to be sent */
- if (phba->fc_flag & FC_NLP_MORE) {
- lpfc_set_disctmo(phba);
-
- /* go thru NPR list and issue any remaining ELS ADISCs */
- sentadisc = lpfc_els_disc_adisc(phba);
+ if (vport->fc_flag & FC_NLP_MORE) {
+ lpfc_set_disctmo(vport);
+ /* go thru NPR nodes and issue any remaining ELS ADISCs */
+ sentadisc = lpfc_els_disc_adisc(vport);
}
return;
}
static void
-lpfc_rscn_disc(struct lpfc_hba * phba)
+lpfc_rscn_disc(struct lpfc_vport *vport)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ lpfc_can_disctmo(vport);
+
/* RSCN discovery */
- /* go thru NPR list and issue ELS PLOGIs */
- if (phba->fc_npr_cnt) {
- if (lpfc_els_disc_plogi(phba))
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ if (lpfc_els_disc_plogi(vport))
return;
- }
- if (phba->fc_flag & FC_RSCN_MODE) {
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
/* Check to see if more RSCNs came in while we were
* processing this one.
*/
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
} else {
- lpfc_els_handle_rscn(phba);
+ lpfc_els_handle_rscn(vport);
}
}
}
static void
-lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
- struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
- LPFC_MBOXQ_t *mbox;
- int disc, rc;
-
- psli = &phba->sli;
+ int disc;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1037,27 +1194,32 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
irsp = &(rspiocb->iocb);
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ADISC cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
+
/* Since ndlp can be freed in the disc state machine, note if this node
* is being used during discovery.
*/
+ spin_lock_irq(shost->host_lock);
disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
- spin_lock_irq(phba->host->host_lock);
ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
/* ADISC completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0104 ADISC completes to NPort x%x "
+ "%d (%d):0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout, disc,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
+ disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
- spin_lock_irq(phba->host->host_lock);
+ if (lpfc_els_chk_latt(vport)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
goto out;
}
@@ -1066,67 +1228,68 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
if (disc) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
- spin_unlock_irq(phba->host->host_lock);
- lpfc_set_disctmo(phba);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_set_disctmo(vport);
}
goto out;
}
/* ADISC failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
- (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) &&
- (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+ if (!lpfc_error_lost_link(irsp)) {
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_ADISC);
}
} else {
/* Good status, call state machine */
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
}
- if (disc && phba->num_disc_nodes) {
+ if (disc && vport->num_disc_nodes) {
/* Check to see if there are more ADISCs to be sent */
- lpfc_more_adisc(phba);
+ lpfc_more_adisc(vport);
/* Check to see if we are done with ADISC authentication */
- if (phba->num_disc_nodes == 0) {
- lpfc_can_disctmo(phba);
- /* If we get here, there is nothing left to wait for */
- if ((phba->hba_state < LPFC_HBA_READY) &&
- (phba->hba_state != LPFC_CLEAR_LA)) {
- /* Link up discovery */
- if ((mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL))) {
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, mbox);
- mbox->mbox_cmpl =
- lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox
- (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox,
- phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].
- flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].
- flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].
- flag &=
- ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state =
- LPFC_HBA_READY;
+ if (vport->num_disc_nodes == 0) {
+ /* If we get here, there is nothing left to ADISC */
+ /*
+ * For NPIV, cmpl_reg_vpi will set port_state to READY,
+ * and continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE)) {
+ lpfc_issue_reg_vpi(phba, vport);
+ goto out;
+ }
+ /*
+ * For SLI2, we need to set port_state to READY
+ * and continue discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* If we get here, there is nothing to ADISC */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_issue_clear_la(phba, vport);
+
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list, issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &=
+ ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(
+ shost->host_lock);
+ lpfc_can_disctmo(vport);
}
}
+ vport->port_state = LPFC_VPORT_READY;
} else {
- lpfc_rscn_disc(phba);
+ lpfc_rscn_disc(vport);
}
}
}
@@ -1136,23 +1299,22 @@ out:
}
int
-lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
ADISC *ap;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
uint16_t cmdsize;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-
- cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_ADISC);
+ cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ADISC);
if (!elsiocb)
return 1;
@@ -1161,81 +1323,97 @@ lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
/* For ADISC request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* Fill in ADISC payload */
ap = (ADISC *) pcmd;
ap->hardAL_PA = phba->fc_pref_ALPA;
- memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
- ap->DID = be32_to_cpu(phba->fc_myDID);
+ memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ap->DID = be32_to_cpu(vport->fc_myDID);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue ADISC: did:x%x",
+ ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitADISC++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
+ spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_ADISC_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
- struct lpfc_nodelist *ndlp;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0105 LOGO completes to NPort x%x "
+ "%d (%d):0105 LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- phba->num_disc_nodes);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
+ vport->num_disc_nodes);
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba))
+ if (lpfc_els_chk_latt(vport))
goto out;
+ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ /* NLP_EVT_DEVICE_RM should unregister the RPI
+ * which should abort all outstanding IOs.
+ */
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_DEVICE_RM);
+ goto out;
+ }
+
if (irsp->ulpStatus) {
/* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
/* ELS command is being retried */
goto out;
- }
/* LOGO failed */
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
+ if (lpfc_error_lost_link(irsp))
goto out;
- } else {
- lpfc_disc_state_machine(phba, ndlp, cmdiocb,
- NLP_EVT_CMPL_LOGO);
- }
+ else
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_LOGO);
} else {
/* Good status, call state machine.
* This will unregister the rpi if needed.
*/
- lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
+ lpfc_disc_state_machine(vport, ndlp, cmdiocb,
+ NLP_EVT_CMPL_LOGO);
}
out:
@@ -1244,75 +1422,91 @@ out:
}
int
-lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
+ int rc;
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
- cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name);
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_LOGO);
+ cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LOGO);
if (!elsiocb)
return 1;
icmd = &elsiocb->iocb;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* Fill in LOGO payload */
- *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
+ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO: did:x%x",
+ ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_unlock_irq(shost->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+
+ if (rc == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
IOCB_t *irsp;
irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "ELS cmd cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.elsreq64.remoteID);
+
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
/* Check to see if link went down during discovery */
- lpfc_els_chk_latt(phba);
+ lpfc_els_chk_latt(vport);
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
-lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
@@ -1323,15 +1517,16 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (SCR));
+ cmdsize = (sizeof(uint32_t) + sizeof(SCR));
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 1;
- lpfc_nlp_init(phba, ndlp, nportid);
+ lpfc_nlp_init(vport, ndlp, nportid);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_SCR);
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_SCR);
if (!elsiocb) {
lpfc_nlp_put(ndlp);
return 1;
@@ -1341,29 +1536,31 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* For SCR, remainder of payload is SCR parameter page */
- memset(pcmd, 0, sizeof (SCR));
+ memset(pcmd, 0, sizeof(SCR));
((SCR *) pcmd)->Function = SCR_FUNC_FULL;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue SCR: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
phba->fc_stat.elsXmitSCR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- spin_lock_irq(phba->host->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
return 0;
}
static int
-lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
+lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
@@ -1377,14 +1574,15 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = (sizeof (uint32_t) + sizeof (FARP));
+ cmdsize = (sizeof(uint32_t) + sizeof(FARP));
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return 1;
- lpfc_nlp_init(phba, ndlp, nportid);
- elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_RNID);
+ lpfc_nlp_init(vport, ndlp, nportid);
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_RNID);
if (!elsiocb) {
lpfc_nlp_put(ndlp);
return 1;
@@ -1394,44 +1592,71 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* Fill in FARPR payload */
fp = (FARP *) (pcmd);
- memset(fp, 0, sizeof (FARP));
+ memset(fp, 0, sizeof(FARP));
lp = (uint32_t *) pcmd;
*lp++ = be32_to_cpu(nportid);
- *lp++ = be32_to_cpu(phba->fc_myDID);
+ *lp++ = be32_to_cpu(vport->fc_myDID);
fp->Rflags = 0;
fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
- memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
- if ((ondlp = lpfc_findnode_did(phba, nportid))) {
+ memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ondlp = lpfc_findnode_did(vport, nportid);
+ if (ondlp) {
memcpy(&fp->OportName, &ondlp->nlp_portname,
- sizeof (struct lpfc_name));
+ sizeof(struct lpfc_name));
memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
- sizeof (struct lpfc_name));
+ sizeof(struct lpfc_name));
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FARPR: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
phba->fc_stat.elsXmitFARPR++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- spin_lock_irq(phba->host->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
- spin_unlock_irq(phba->host->host_lock);
lpfc_nlp_put(ndlp);
return 0;
}
+static void
+lpfc_end_rscn(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ /*
+ * Check to see if more RSCNs came in while we were
+ * processing this one.
+ */
+ if (vport->fc_rscn_id_cnt ||
+ (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
+ lpfc_els_handle_rscn(vport);
+ else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
+ }
+}
+
void
-lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
+lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
@@ -1439,30 +1664,21 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
list_del_init(&nlp->els_retry_evt.evt_listp);
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- if (phba->num_disc_nodes) {
+ spin_unlock_irq(shost->host_lock);
+ if (vport->num_disc_nodes) {
/* Check to see if there are more
* PLOGIs to be sent
*/
- lpfc_more_plogi(phba);
-
- if (phba->num_disc_nodes == 0) {
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- lpfc_can_disctmo(phba);
- if (phba->fc_flag & FC_RSCN_MODE) {
- /*
- * Check to see if more RSCNs
- * came in while we were
- * processing this one.
- */
- if((phba->fc_rscn_id_cnt==0) &&
- !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
- phba->fc_flag &= ~FC_RSCN_MODE;
- }
- else {
- lpfc_els_handle_rscn(phba);
- }
- }
+ lpfc_more_plogi(vport);
+
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
}
}
}
@@ -1472,18 +1688,19 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
void
lpfc_els_retry_delay(unsigned long ptr)
{
- struct lpfc_nodelist *ndlp;
- struct lpfc_hba *phba;
- unsigned long iflag;
- struct lpfc_work_evt *evtp;
-
- ndlp = (struct lpfc_nodelist *)ptr;
- phba = ndlp->nlp_phba;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long flags;
+ struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
+
+ ndlp = (struct lpfc_nodelist *) ptr;
+ phba = ndlp->vport->phba;
evtp = &ndlp->els_retry_evt;
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
@@ -1491,33 +1708,31 @@ lpfc_els_retry_delay(unsigned long ptr)
evtp->evt = LPFC_EVT_ELS_RETRY;
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
- struct lpfc_hba *phba;
- uint32_t cmd;
- uint32_t did;
- uint8_t retry;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ uint32_t cmd, did, retry;
- phba = ndlp->nlp_phba;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
did = ndlp->nlp_DID;
cmd = ndlp->nlp_last_elscmd;
ndlp->nlp_last_elscmd = 0;
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
return;
}
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
/*
* If a discovery event readded nlp_delayfunc after timer
* firing and before processing the timer, cancel the
@@ -1528,57 +1743,54 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
switch (cmd) {
case ELS_CMD_FLOGI:
- lpfc_issue_els_flogi(phba, ndlp, retry);
+ lpfc_issue_els_flogi(vport, ndlp, retry);
break;
case ELS_CMD_PLOGI:
- if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) {
+ if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
}
break;
case ELS_CMD_ADISC:
- if (!lpfc_issue_els_adisc(phba, ndlp, retry)) {
+ if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
}
break;
case ELS_CMD_PRLI:
- if (!lpfc_issue_els_prli(phba, ndlp, retry)) {
+ if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
}
break;
case ELS_CMD_LOGO:
- if (!lpfc_issue_els_logo(phba, ndlp, retry)) {
+ if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
break;
+ case ELS_CMD_FDISC:
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
+ break;
}
return;
}
static int
-lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
- struct lpfc_dmabuf *pcmd;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
uint32_t *elscmd;
struct ls_rjt stat;
- int retry, maxretry;
- int delay;
- uint32_t cmd;
+ int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
+ uint32_t cmd = 0;
uint32_t did;
- retry = 0;
- delay = 0;
- maxretry = lpfc_max_els_tries;
- irsp = &rspiocb->iocb;
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- cmd = 0;
/* Note: context2 may be 0 for internal driver abort
* of delays ELS command.
@@ -1594,11 +1806,15 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
else {
/* We should only hit this case for retrying PLOGI */
did = irsp->un.elsreq64.remoteID;
- ndlp = lpfc_findnode_did(phba, did);
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 1;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Retry ELS: wd7:x%x wd4:x%x did:x%x",
+ *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
+
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
case IOSTAT_REMOTE_STOP:
@@ -1607,25 +1823,37 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_PLOGI) {
- if (cmdiocb->retry == 0) {
- delay = 1;
- }
- }
+ if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
+ delay = 1000;
retry = 1;
break;
- case IOERR_SEQUENCE_TIMEOUT:
- retry = 1;
+ case IOERR_ILLEGAL_COMMAND:
+ if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
+ (cmd == ELS_CMD_FDISC)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0124 FDISC failed (3/6) retrying...\n",
+ phba->brd_no, vport->vpi);
+ lpfc_mbx_unreg_vpi(vport);
+ retry = 1;
+ /* Always retry for this case */
+ cmdiocb->retry = 0;
+ }
break;
case IOERR_NO_RESOURCES:
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
- }
+ retry = 1;
+ if (cmdiocb->retry > 100)
+ delay = 100;
+ maxretry = 250;
+ break;
+
+ case IOERR_ILLEGAL_FRAME:
+ delay = 100;
retry = 1;
break;
+ case IOERR_SEQUENCE_TIMEOUT:
case IOERR_INVALID_RPI:
retry = 1;
break;
@@ -1655,27 +1883,57 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_CMD_IN_PROGRESS) {
if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
+ delay = 1000;
maxretry = 48;
}
retry = 1;
break;
}
if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
+ delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
break;
}
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (cmd == ELS_CMD_FDISC) &&
+ (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0125 FDISC Failed (x%x)."
+ " Fabric out of resources\n",
+ phba->brd_no, vport->vpi, stat.un.lsRjtError);
+ lpfc_vport_set_state(vport,
+ FC_VPORT_NO_FABRIC_RSCS);
+ }
break;
case LSRJT_LOGICAL_BSY:
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1;
+ if ((cmd == ELS_CMD_PLOGI) ||
+ (cmd == ELS_CMD_PRLI)) {
+ delay = 1000;
maxretry = 48;
+ } else if (cmd == ELS_CMD_FDISC) {
+ /* Always retry for this case */
+ cmdiocb->retry = 0;
}
retry = 1;
break;
+
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_PROTOCOL_ERR:
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (cmd == ELS_CMD_FDISC) &&
+ ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
+ (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
+ ) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0123 FDISC Failed (x%x)."
+ " Fabric Detected Bad WWN\n",
+ phba->brd_no, vport->vpi, stat.un.lsRjtError);
+ lpfc_vport_set_state(vport,
+ FC_VPORT_FABRIC_REJ_WWN);
+ }
+ break;
}
break;
@@ -1695,21 +1953,27 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
retry = 0;
}
+ if ((vport->load_flag & FC_UNLOADING) != 0)
+ retry = 0;
+
if (retry) {
/* Retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0107 Retry ELS command x%x to remote "
+ "%d (%d):0107 Retry ELS command x%x to remote "
"NPORT x%x Data: x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
cmd, did, cmdiocb->retry, delay);
- if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
+ if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
+ ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
+ ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
+ /* Don't reset timer for no resources */
+
/* If discovery / RSCN timer is running, reset it */
- if (timer_pending(&phba->fc_disctmo) ||
- (phba->fc_flag & FC_RSCN_MODE)) {
- lpfc_set_disctmo(phba);
- }
+ if (timer_pending(&vport->fc_disctmo) ||
+ (vport->fc_flag & FC_RSCN_MODE))
+ lpfc_set_disctmo(vport);
}
phba->fc_stat.elsXmitRetry++;
@@ -1717,50 +1981,62 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
phba->fc_stat.elsDelayRetry++;
ndlp->nlp_retry = cmdiocb->retry;
- mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ /* delay is specified in milliseconds */
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(delay));
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ if (cmd == ELS_CMD_PRLI)
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ else
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
ndlp->nlp_last_elscmd = cmd;
return 1;
}
switch (cmd) {
case ELS_CMD_FLOGI:
- lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
+ lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
+ return 1;
+ case ELS_CMD_FDISC:
+ lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PLOGI:
if (ndlp) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
}
- lpfc_issue_els_plogi(phba, did, cmdiocb->retry);
+ lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
return 1;
case ELS_CMD_ADISC:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PRLI:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_LOGO:
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
return 1;
}
}
/* No retry ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0108 No retry ELS command x%x to remote NPORT x%x "
- "Data: x%x\n",
- phba->brd_no,
+ "%d (%d):0108 No retry ELS command x%x to remote "
+ "NPORT x%x Data: x%x\n",
+ phba->brd_no, vport->vpi,
cmd, did, cmdiocb->retry);
return 0;
@@ -1795,33 +2071,36 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
}
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, elsiocb);
- spin_unlock_irq(phba->host->host_lock);
return 0;
}
static void
-lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "ACC LOGO cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0109 ACC to LOGO completes to NPort x%x "
+ "%d (%d):0109 ACC to LOGO completes to NPort x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ phba->brd_no, vport->vpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
switch (ndlp->nlp_state) {
case NLP_STE_UNUSED_NODE: /* node is just allocated */
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case NLP_STE_NPR_NODE: /* NPort Recovery mode */
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
break;
default:
break;
@@ -1830,24 +2109,38 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
return;
}
+void
+lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+
+ pmb->context1 = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_nlp_put(ndlp);
+ return;
+}
+
static void
-lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
+ struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
IOCB_t *irsp;
- struct lpfc_nodelist *ndlp;
LPFC_MBOXQ_t *mbox = NULL;
- struct lpfc_dmabuf *mp;
+ struct lpfc_dmabuf *mp = NULL;
irsp = &rspiocb->iocb;
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
-
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba) || !ndlp) {
+ if (!ndlp || lpfc_els_chk_latt(vport)) {
if (mbox) {
mp = (struct lpfc_dmabuf *) mbox->context1;
if (mp) {
@@ -1859,24 +2152,37 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "ACC cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->un.rcvels.remoteID);
+
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0110 ELS response tag x%x completes "
+ "%d (%d):0110 ELS response tag x%x completes "
"Data: x%x x%x x%x x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0)
&& (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
- lpfc_unreg_rpi(phba, ndlp);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ lpfc_unreg_rpi(vport, ndlp);
mbox->context2 = lpfc_nlp_get(ndlp);
- ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ mbox->vport = vport;
+ if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
+ mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ }
+ else {
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ }
if (lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB))
!= MBX_NOT_FINISHED) {
@@ -1886,15 +2192,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NOTE: we should have messages for unsuccessful
reglogin */
} else {
- /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
- if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
- (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
- (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
- if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- lpfc_drop_node(phba, ndlp);
- ndlp = NULL;
- }
+ /* Do not drop node for lpfc_els_abort'ed ELS cmds */
+ if (!lpfc_error_lost_link(irsp) &&
+ ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ lpfc_drop_node(vport, ndlp);
+ ndlp = NULL;
}
}
mp = (struct lpfc_dmabuf *) mbox->context1;
@@ -1906,19 +2208,21 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out:
if (ndlp) {
- spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+ spin_unlock_irq(shost->host_lock);
}
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
-lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
- LPFC_MBOXQ_t * mbox, uint8_t newnode)
+lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+ LPFC_MBOXQ_t *mbox, uint8_t newnode)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
@@ -1935,23 +2239,30 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
switch (flag) {
case ELS_CMD_ACC:
- cmdsize = sizeof (uint32_t);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ cmdsize = sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
return 1;
}
+
icmd = &elsiocb->iocb;
icmd->ulpContext = oldcmd->ulpContext; /* Xri */
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
case ELS_CMD_PLOGI:
- cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -1963,12 +2274,16 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
elsiocb->context_un.mbox = mbox;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
- memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PLOGI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
case ELS_CMD_PRLO:
- cmdsize = sizeof (uint32_t) + sizeof (PRLO);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
+ cmdsize = sizeof(uint32_t) + sizeof(PRLO);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
if (!elsiocb)
return 1;
@@ -1978,10 +2293,14 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
- sizeof (uint32_t) + sizeof (PRLO));
+ sizeof(uint32_t) + sizeof(PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PRLO: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
default:
return 1;
@@ -1994,25 +2313,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
/* Xmit ELS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
+ "%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
"DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
if (ndlp->nlp_flag & NLP_LOGO_ACC) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
} else {
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
- spin_lock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2021,9 +2338,11 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
}
int
-lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
+ LPFC_MBOXQ_t *mbox)
{
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
@@ -2036,9 +2355,9 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = 2 * sizeof (uint32_t);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ cmdsize = 2 * sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
if (!elsiocb)
return 1;
@@ -2048,22 +2367,30 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
*((uint32_t *) (pcmd)) = rejectError;
+ if (mbox) {
+ elsiocb->context_un.mbox = mbox;
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ }
+
/* Xmit ELS RJT <err> response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, rejectError, elsiocb->iotag,
+ "%d (%d):0129 Xmit ELS RJT x%x response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ phba->brd_no, vport->vpi, rejectError, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue LS_RJT: did:x%x flg:x%x err:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
+
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
- spin_lock_irq(phba->host->host_lock);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2072,25 +2399,22 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
}
int
-lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
+ struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
ADISC *ap;
- IOCB_t *icmd;
- IOCB_t *oldcmd;
+ IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-
- cmdsize = sizeof (uint32_t) + sizeof (ADISC);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ cmdsize = sizeof(uint32_t) + sizeof(ADISC);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -2100,28 +2424,30 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0130 Xmit ADISC ACC response iotag x%x xri: "
+ "%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
ap = (ADISC *) (pcmd);
ap->hardAL_PA = phba->fc_pref_ALPA;
- memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
- ap->DID = be32_to_cpu(phba->fc_myDID);
+ memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
+ ap->DID = be32_to_cpu(vport->fc_myDID);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC ADISC: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
- spin_lock_irq(phba->host->host_lock);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2130,9 +2456,10 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
}
int
-lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
+lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
PRLI *npr;
lpfc_vpd_t *vpd;
IOCB_t *icmd;
@@ -2147,8 +2474,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
- cmdsize = sizeof (uint32_t) + sizeof (PRLI);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp,
+ cmdsize = sizeof(uint32_t) + sizeof(PRLI);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
if (!elsiocb)
return 1;
@@ -2159,19 +2486,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, "
+ "%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
- memset(pcmd, 0, sizeof (PRLI));
+ memset(pcmd, 0, sizeof(PRLI));
npr = (PRLI *) pcmd;
vpd = &phba->vpd;
@@ -2193,12 +2520,14 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC PRLI: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- spin_lock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2207,12 +2536,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
}
static int
-lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
+lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
RNID *rn;
- IOCB_t *icmd;
- IOCB_t *oldcmd;
+ IOCB_t *icmd, *oldcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
@@ -2223,13 +2552,13 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
psli = &phba->sli;
pring = &psli->ring[LPFC_ELS_RING];
- cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
- + (2 * sizeof (struct lpfc_name));
+ cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ + (2 * sizeof(struct lpfc_name));
if (format)
- cmdsize += sizeof (RNID_TOP_DISC);
+ cmdsize += sizeof(RNID_TOP_DISC);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -2239,30 +2568,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0132 Xmit RNID ACC response tag x%x "
+ "%d (%d):0132 Xmit RNID ACC response tag x%x "
"xri x%x\n",
- phba->brd_no, elsiocb->iotag,
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t);
+ pcmd += sizeof(uint32_t);
- memset(pcmd, 0, sizeof (RNID));
+ memset(pcmd, 0, sizeof(RNID));
rn = (RNID *) (pcmd);
rn->Format = format;
- rn->CommonLen = (2 * sizeof (struct lpfc_name));
- memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
- memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
+ rn->CommonLen = (2 * sizeof(struct lpfc_name));
+ memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
switch (format) {
case 0:
rn->SpecificLen = 0;
break;
case RNID_TOPOLOGY_DISC:
- rn->SpecificLen = sizeof (RNID_TOP_DISC);
+ rn->SpecificLen = sizeof(RNID_TOP_DISC);
memcpy(&rn->un.topologyDisc.portName,
- &phba->fc_portname, sizeof (struct lpfc_name));
+ &vport->fc_portname, sizeof(struct lpfc_name));
rn->un.topologyDisc.unitType = RNID_HBA;
rn->un.topologyDisc.physPort = 0;
rn->un.topologyDisc.attachedNodes = 0;
@@ -2273,15 +2602,17 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
break;
}
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC RNID: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
lpfc_nlp_put(ndlp);
elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
* it could be freed */
- spin_lock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
- spin_unlock_irq(phba->host->host_lock);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
@@ -2290,168 +2621,153 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
}
int
-lpfc_els_disc_adisc(struct lpfc_hba *phba)
+lpfc_els_disc_adisc(struct lpfc_vport *vport)
{
- int sentadisc;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ int sentadisc = 0;
- sentadisc = 0;
/* go thru NPR nodes and issue any remaining ELS ADISCs */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
sentadisc++;
- phba->num_disc_nodes++;
- if (phba->num_disc_nodes >=
- phba->cfg_discovery_threads) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->phba->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
break;
}
}
}
if (sentadisc == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
}
return sentadisc;
}
int
-lpfc_els_disc_plogi(struct lpfc_hba * phba)
+lpfc_els_disc_plogi(struct lpfc_vport *vport)
{
- int sentplogi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ int sentplogi = 0;
- sentplogi = 0;
- /* go thru NPR list and issue any remaining ELS PLOGIs */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
+ /* go thru NPR nodes and issue any remaining ELS PLOGIs */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
sentplogi++;
- phba->num_disc_nodes++;
- if (phba->num_disc_nodes >=
- phba->cfg_discovery_threads) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ vport->num_disc_nodes++;
+ if (vport->num_disc_nodes >=
+ vport->phba->cfg_discovery_threads) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
break;
}
}
}
if (sentplogi == 0) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NLP_MORE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NLP_MORE;
+ spin_unlock_irq(shost->host_lock);
}
return sentplogi;
}
-int
-lpfc_els_flush_rscn(struct lpfc_hba * phba)
+void
+lpfc_els_flush_rscn(struct lpfc_vport *vport)
{
- struct lpfc_dmabuf *mp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
int i;
- for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
- mp = phba->fc_rscn_id_list[i];
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- phba->fc_rscn_id_list[i] = NULL;
- }
- phba->fc_rscn_id_cnt = 0;
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
- spin_unlock_irq(phba->host->host_lock);
- lpfc_can_disctmo(phba);
- return 0;
+ for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+ lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
+ vport->fc_rscn_id_list[i] = NULL;
+ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_rscn_id_cnt = 0;
+ vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
}
int
-lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
+lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
{
D_ID ns_did;
D_ID rscn_did;
- struct lpfc_dmabuf *mp;
uint32_t *lp;
- uint32_t payload_len, cmd, i, match;
+ uint32_t payload_len, i;
+ struct lpfc_hba *phba = vport->phba;
ns_did.un.word = did;
- match = 0;
/* Never match fabric nodes for RSCNs */
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
- return(0);
+ return 0;
/* If we are doing a FULL RSCN rediscovery, match everything */
- if (phba->fc_flag & FC_RSCN_DISCOVERY) {
+ if (vport->fc_flag & FC_RSCN_DISCOVERY)
return did;
- }
- for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
- mp = phba->fc_rscn_id_list[i];
- lp = (uint32_t *) mp->virt;
- cmd = *lp++;
- payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
- payload_len -= sizeof (uint32_t); /* take off word 0 */
+ for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
+ lp = vport->fc_rscn_id_list[i]->virt;
+ payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+ payload_len -= sizeof(uint32_t); /* take off word 0 */
while (payload_len) {
- rscn_did.un.word = *lp++;
- rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
- payload_len -= sizeof (uint32_t);
+ rscn_did.un.word = be32_to_cpu(*lp++);
+ payload_len -= sizeof(uint32_t);
switch (rscn_did.un.b.resv) {
case 0: /* Single N_Port ID effected */
- if (ns_did.un.word == rscn_did.un.word) {
- match = did;
- }
+ if (ns_did.un.word == rscn_did.un.word)
+ return did;
break;
case 1: /* Whole N_Port Area effected */
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area))
- {
- match = did;
- }
+ return did;
break;
case 2: /* Whole N_Port Domain effected */
if (ns_did.un.b.domain == rscn_did.un.b.domain)
- {
- match = did;
- }
- break;
- case 3: /* Whole Fabric effected */
- match = did;
+ return did;
break;
default:
- /* Unknown Identifier in RSCN list */
+ /* Unknown Identifier in RSCN node */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0217 Unknown Identifier in "
- "RSCN payload Data: x%x\n",
- phba->brd_no, rscn_did.un.word);
- break;
- }
- if (match) {
- break;
+ "%d (%d):0217 Unknown "
+ "Identifier in RSCN payload "
+ "Data: x%x\n",
+ phba->brd_no, vport->vpi,
+ rscn_did.un.word);
+ case 3: /* Whole Fabric effected */
+ return did;
}
}
}
- return match;
+ return 0;
}
static int
-lpfc_rscn_recovery_check(struct lpfc_hba *phba)
+lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL;
@@ -2459,188 +2775,261 @@ lpfc_rscn_recovery_check(struct lpfc_hba *phba)
* them to NPR state.
*/
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
- lpfc_rscn_payload_check(phba, ndlp->nlp_DID) == 0)
+ lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
continue;
- lpfc_disc_state_machine(phba, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
/*
* Make sure NLP_DELAY_TMO is NOT running after a device
* recovery event.
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
return 0;
}
static int
-lpfc_els_rcv_rscn(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp, uint8_t newnode)
+lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp, uint8_t newnode)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
- uint32_t *lp;
+ struct lpfc_vport *next_vport;
+ uint32_t *lp, *datap;
IOCB_t *icmd;
- uint32_t payload_len, cmd;
+ uint32_t payload_len, length, nportid, *cmd;
+ int rscn_cnt = vport->fc_rscn_id_cnt;
+ int rscn_id = 0, hba_id = 0;
int i;
icmd = &cmdiocb->iocb;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
- cmd = *lp++;
- payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
- payload_len -= sizeof (uint32_t); /* take off word 0 */
- cmd &= ELS_CMD_MASK;
+ payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
+ payload_len -= sizeof(uint32_t); /* take off word 0 */
/* RSCN received */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
+ *lp, rscn_cnt);
for (i = 0; i < payload_len/sizeof(uint32_t); i++)
- fc_host_post_event(phba->host, fc_get_event_number(),
+ fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_RSCN, lp[i]);
/* If we are about to begin discovery, just ACC the RSCN.
* Discovery processing will satisfy it.
*/
- if (phba->hba_state <= LPFC_NS_QRY) {
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
- newnode);
+ if (vport->port_state <= LPFC_NS_QRY) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ newnode);
return 0;
}
+ /* If this RSCN just contains NPortIDs for other vports on this HBA,
+ * just ACC and ignore it.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(phba->cfg_peer_port_login)) {
+ i = payload_len;
+ datap = lp;
+ while (i > 0) {
+ nportid = *datap++;
+ nportid = ((be32_to_cpu(nportid)) & Mask_DID);
+ i -= sizeof(uint32_t);
+ rscn_id++;
+ list_for_each_entry(next_vport, &phba->port_list,
+ listentry) {
+ if (nportid == next_vport->fc_myDID) {
+ hba_id++;
+ break;
+ }
+ }
+ }
+ if (rscn_id == hba_id) {
+ /* ALL NPortIDs in RSCN are on HBA */
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
+ *lp, rscn_cnt);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state,
+ ndlp->nlp_flag);
+
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
+ ndlp, NULL, newnode);
+ return 0;
+ }
+ }
+
/* If we are already processing an RSCN, save the received
* RSCN payload buffer, cmdiocb->context2 to process later.
*/
- if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
- if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
- !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
- phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
-
- /* If we zero, cmdiocb->context2, the calling
- * routine will not try to free it.
- */
- cmdiocb->context2 = NULL;
+ if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ vport->fc_flag |= FC_RSCN_DEFERRED;
+ if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
+ !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ if (rscn_cnt) {
+ cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
+ length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
+ }
+ if ((rscn_cnt) &&
+ (payload_len + length <= LPFC_BPL_SIZE)) {
+ *cmd &= ELS_CMD_MASK;
+ *cmd |= be32_to_cpu(payload_len + length);
+ memcpy(((uint8_t *)cmd) + length, lp,
+ payload_len);
+ } else {
+ vport->fc_rscn_id_list[rscn_cnt] = pcmd;
+ vport->fc_rscn_id_cnt++;
+ /* If we zero, cmdiocb->context2, the calling
+ * routine will not try to free it.
+ */
+ cmdiocb->context2 = NULL;
+ }
/* Deferred RSCN */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0235 Deferred RSCN "
+ "%d (%d):0235 Deferred RSCN "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->fc_rscn_id_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi,
+ vport->fc_rscn_id_cnt, vport->fc_flag,
+ vport->port_state);
} else {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_RSCN_DISCOVERY;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
/* ReDiscovery RSCN */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0234 ReDiscovery RSCN "
+ "%d (%d):0234 ReDiscovery RSCN "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->fc_rscn_id_cnt,
- phba->fc_flag, phba->hba_state);
+ phba->brd_no, vport->vpi,
+ vport->fc_rscn_id_cnt, vport->fc_flag,
+ vport->port_state);
}
/* Send back ACC */
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
newnode);
/* send RECOVERY event for ALL nodes that match RSCN payload */
- lpfc_rscn_recovery_check(phba);
+ lpfc_rscn_recovery_check(vport);
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
return 0;
}
- phba->fc_flag |= FC_RSCN_MODE;
- phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RSCN: did:x%x/ste:x%x flg:x%x",
+ ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
/*
* If we zero, cmdiocb->context2, the calling routine will
* not try to free it.
*/
cmdiocb->context2 = NULL;
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
/* Send back ACC */
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
/* send RECOVERY event for ALL nodes that match RSCN payload */
- lpfc_rscn_recovery_check(phba);
+ lpfc_rscn_recovery_check(vport);
- return lpfc_els_handle_rscn(phba);
+ return lpfc_els_handle_rscn(vport);
}
int
-lpfc_els_handle_rscn(struct lpfc_hba * phba)
+lpfc_els_handle_rscn(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Ignore RSCN if the port is being torn down. */
+ if (vport->load_flag & FC_UNLOADING) {
+ lpfc_els_flush_rscn(vport);
+ return 0;
+ }
/* Start timer for RSCN processing */
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
/* RSCN processed */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- phba->fc_flag, 0, phba->fc_rscn_id_cnt,
- phba->hba_state);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ vport->fc_flag, 0, vport->fc_rscn_id_cnt,
+ vport->port_state);
/* To process RSCN, first compare RSCN data with NameServer */
- phba->fc_ns_retry = 0;
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
+ vport->fc_ns_retry = 0;
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
- if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
/* Wait for NameServer query cmpl before we can
continue */
return 1;
- }
} else {
/* If login to NameServer does not exist, issue one */
/* Good status, issue PLOGI to NameServer */
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (ndlp) {
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ndlp)
/* Wait for NameServer login cmpl before we can
continue */
return 1;
- }
+
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp) {
- lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_rscn(vport);
return 0;
} else {
- lpfc_nlp_init(phba, ndlp, NameServer_DID);
+ lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_type |= NLP_FABRIC;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, NameServer_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, NameServer_DID, 0);
/* Wait for NameServer login cmpl before we can
continue */
return 1;
}
}
- lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_rscn(vport);
return 0;
}
static int
-lpfc_els_rcv_flogi(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp, uint8_t newnode)
+lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp, uint8_t newnode)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
uint32_t *lp = (uint32_t *) pcmd->virt;
IOCB_t *icmd = &cmdiocb->iocb;
@@ -2655,7 +3044,7 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
/* FLOGI received */
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
if (phba->fc_topology == TOPOLOGY_LOOP) {
/* We should never receive a FLOGI in loop mode, ignore it */
@@ -2664,33 +3053,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "%d:0113 An FLOGI ELS command x%x was received "
- "from DID x%x in Loop Mode\n",
- phba->brd_no, cmd, did);
+ "%d (%d):0113 An FLOGI ELS command x%x was "
+ "received from DID x%x in Loop Mode\n",
+ phba->brd_no, vport->vpi, cmd, did);
return 1;
}
did = Fabric_DID;
- if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
/* For a FLOGI we accept, then if our portname is greater
* then the remote portname we initiate Nport login.
*/
- rc = memcmp(&phba->fc_portname, &sp->portName,
- sizeof (struct lpfc_name));
+ rc = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
if (!rc) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL)) == 0) {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
return 1;
- }
+
lpfc_linkdown(phba);
lpfc_init_link(phba, mbox,
phba->cfg_topology,
phba->cfg_link_speed);
mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
rc = lpfc_sli_issue_mbox
(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
lpfc_set_loopback_flag(phba);
@@ -2699,31 +3089,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
}
return 1;
} else if (rc > 0) { /* greater than */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_PT2PT_PLOGI;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT_PLOGI;
+ spin_unlock_irq(shost->host_lock);
}
- phba->fc_flag |= FC_PT2PT;
- phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_PT2PT;
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
} else {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
return 1;
}
/* Send back ACC */
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
return 0;
}
static int
-lpfc_els_rcv_rnid(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -2746,7 +3139,7 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
case 0:
case RNID_TOPOLOGY_DISC:
/* Send back ACC */
- lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
+ lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
break;
default:
/* Reject this request because format not supported */
@@ -2754,14 +3147,15 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
}
return 0;
}
static int
-lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_nodelist *ndlp)
+lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct ls_rjt stat;
@@ -2770,15 +3164,15 @@ lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return 0;
}
static void
lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
MAILBOX_t *mb;
IOCB_t *icmd;
RPS_RSP *rps_rsp;
@@ -2788,8 +3182,6 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t xri, status;
uint32_t cmdsize;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
mb = &pmb->mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
@@ -2804,8 +3196,9 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
mempool_free(pmb, phba->mbox_mem_pool);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_ACC);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
lpfc_nlp_put(ndlp);
if (!elsiocb)
return;
@@ -2815,14 +3208,14 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint32_t); /* Skip past command */
+ pcmd += sizeof(uint32_t); /* Skip past command */
rps_rsp = (RPS_RSP *)pcmd;
if (phba->fc_topology != TOPOLOGY_LOOP)
status = 0x10;
else
status = 0x8;
- if (phba->fc_flag & FC_FABRIC)
+ if (phba->pport->fc_flag & FC_FABRIC)
status |= 0x4;
rps_rsp->rsvd1 = 0;
@@ -2836,25 +3229,25 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Xmit ELS RPS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ "%d (%d):0118 Xmit ELS RPS ACC response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ phba->brd_no, ndlp->vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
-
- if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
lpfc_els_free_iocb(phba, elsiocb);
- }
return;
}
static int
-lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
uint32_t *lp;
uint8_t flag;
LPFC_MBOXQ_t *mbox;
@@ -2868,7 +3261,8 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2878,19 +3272,24 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if ((flag == 0) ||
((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
- ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname,
- sizeof (struct lpfc_name)) == 0))) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
+ ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
+ sizeof(struct lpfc_name)) == 0))) {
+
+ printk("Fix me....\n");
+ dump_stack();
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->context1 =
- (void *)((unsigned long)cmdiocb->iocb.ulpContext);
+ (void *)((unsigned long) cmdiocb->iocb.ulpContext);
mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
if (lpfc_sli_issue_mbox (phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) {
+ (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
/* Mbox completion will send ELS Response */
return 0;
- }
+
lpfc_nlp_put(ndlp);
mempool_free(mbox, phba->mbox_mem_pool);
}
@@ -2899,27 +3298,25 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return 0;
}
static int
-lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
- struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
- IOCB_t *icmd;
- IOCB_t *oldcmd;
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
- struct lpfc_sli_ring *pring;
- struct lpfc_sli *psli;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
uint8_t *pcmd;
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
- elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
- ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
@@ -2929,7 +3326,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
- pcmd += sizeof (uint16_t);
+ pcmd += sizeof(uint16_t);
*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
pcmd += sizeof(uint16_t);
@@ -2937,8 +3334,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
rpl_rsp.listLen = be32_to_cpu(1);
rpl_rsp.index = 0;
rpl_rsp.port_num_blk.portNum = 0;
- rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID);
- memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname,
+ rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
+ memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
sizeof(struct lpfc_name));
memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
@@ -2946,13 +3343,14 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
/* Xmit ELS RPL ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, "
- "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- phba->brd_no, elsiocb->iotag,
+ "%d (%d):0120 Xmit ELS RPL ACC response tag x%x "
+ "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
+ "rpi x%x\n",
+ phba->brd_no, vport->vpi, elsiocb->iotag,
elsiocb->iocb.ulpContext, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
@@ -2963,8 +3361,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
}
static int
-lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -2979,7 +3377,8 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2996,15 +3395,16 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
} else {
cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
}
- lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp);
+ lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
return 0;
}
static int
-lpfc_els_rcv_farp(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
@@ -3020,11 +3420,9 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
fp = (FARP *) lp;
/* FARP-REQ received from DID <did> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0601 FARP-REQ received from DID x%x\n",
- phba->brd_no, did);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0601 FARP-REQ received from DID x%x\n",
+ phba->brd_no, vport->vpi, did);
/* We will only support match on WWPN or WWNN */
if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
@@ -3034,15 +3432,15 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
cnt = 0;
/* If this FARP command is searching for my portname */
if (fp->Mflags & FARP_MATCH_PORT) {
- if (memcmp(&fp->RportName, &phba->fc_portname,
- sizeof (struct lpfc_name)) == 0)
+ if (memcmp(&fp->RportName, &vport->fc_portname,
+ sizeof(struct lpfc_name)) == 0)
cnt = 1;
}
/* If this FARP command is searching for my nodename */
if (fp->Mflags & FARP_MATCH_NODE) {
- if (memcmp(&fp->RnodeName, &phba->fc_nodename,
- sizeof (struct lpfc_name)) == 0)
+ if (memcmp(&fp->RnodeName, &vport->fc_nodename,
+ sizeof(struct lpfc_name)) == 0)
cnt = 1;
}
@@ -3052,28 +3450,28 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
/* Log back into the node before sending the FARP. */
if (fp->Rflags & FARP_REQUEST_PLOGI) {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
/* Send a FARP response to that node */
- if (fp->Rflags & FARP_REQUEST_FARPR) {
- lpfc_issue_els_farpr(phba, did, 0);
- }
+ if (fp->Rflags & FARP_REQUEST_FARPR)
+ lpfc_issue_els_farpr(vport, did, 0);
}
}
return 0;
}
static int
-lpfc_els_rcv_farpr(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
+lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
uint32_t cmd, did;
+ struct lpfc_hba *phba = vport->phba;
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
@@ -3082,21 +3480,18 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
cmd = *lp++;
/* FARP-RSP received from DID <did> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0600 FARP-RSP received from DID x%x\n",
- phba->brd_no, did);
-
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0600 FARP-RSP received from DID x%x\n",
+ phba->brd_no, vport->vpi, did);
/* ACCEPT the Farp resp request */
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
return 0;
}
static int
-lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_nodelist * fan_ndlp)
+lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *fan_ndlp)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -3104,10 +3499,12 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
uint32_t cmd, did;
FAN *fp;
struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
/* FAN received */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
- phba->brd_no);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0265 FAN received\n",
+ phba->brd_no, vport->vpi);
icmd = &cmdiocb->iocb;
did = icmd->un.elsreq64.remoteID;
@@ -3115,11 +3512,11 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
lp = (uint32_t *)pcmd->virt;
cmd = *lp++;
- fp = (FAN *)lp;
+ fp = (FAN *) lp;
/* FAN received; Fan does not have a reply sequence */
- if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
sizeof(struct lpfc_name)) != 0) ||
(memcmp(&phba->fc_fabparam.portName, &fp->FportName,
@@ -3130,7 +3527,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
*/
list_for_each_entry_safe(ndlp, next_ndlp,
- &phba->fc_nodes, nlp_listp) {
+ &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -3138,24 +3535,24 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
* Clean up old Fabric, Nameserver and
* other NLP_FABRIC logins
*/
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding I/O now since this
* device is marked for PLOGI
*/
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
}
}
- phba->hba_state = LPFC_FLOGI;
- lpfc_set_disctmo(phba);
- lpfc_initial_flogi(phba);
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+ lpfc_initial_flogi(vport);
return 0;
}
/* Discovery not needed,
* move the nodes to their original state.
*/
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
@@ -3163,13 +3560,13 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
switch (ndlp->nlp_prev_state) {
case NLP_STE_UNMAPPED_NODE:
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_UNMAPPED_NODE);
break;
case NLP_STE_MAPPED_NODE:
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp,
+ lpfc_nlp_set_state(vport, ndlp,
NLP_STE_MAPPED_NODE);
break;
@@ -3179,7 +3576,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
/* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
}
return 0;
}
@@ -3187,42 +3584,42 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
void
lpfc_els_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba;
+ struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_hba *phba = vport->phba;
unsigned long iflag;
- phba = (struct lpfc_hba *)ptr;
- if (phba == 0)
- return;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
- phba->work_hba_events |= WORKER_ELS_TMO;
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
+ vport->work_port_events |= WORKER_ELS_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ else
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
return;
}
void
-lpfc_els_timeout_handler(struct lpfc_hba *phba)
+lpfc_els_timeout_handler(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
struct lpfc_dmabuf *pcmd;
- uint32_t *elscmd;
- uint32_t els_command=0;
+ uint32_t els_command = 0;
uint32_t timeout;
- uint32_t remote_ID;
+ uint32_t remote_ID = 0xffffffff;
- if (phba == 0)
- return;
- spin_lock_irq(phba->host->host_lock);
/* If the timer is already canceled do nothing */
- if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
return;
}
+ spin_lock_irq(&phba->hbalock);
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = &phba->sli.ring[LPFC_ELS_RING];
@@ -3230,63 +3627,70 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
- if ((piocb->iocb_flag & LPFC_IO_LIBDFC) ||
- (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN) ||
- (piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)) {
+ if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
+ piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+ piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
continue;
- }
+
+ if (piocb->vport != vport)
+ continue;
+
pcmd = (struct lpfc_dmabuf *) piocb->context2;
- if (pcmd) {
- elscmd = (uint32_t *) (pcmd->virt);
- els_command = *elscmd;
- }
+ if (pcmd)
+ els_command = *(uint32_t *) (pcmd->virt);
- if ((els_command == ELS_CMD_FARP)
- || (els_command == ELS_CMD_FARPR)) {
+ if (els_command == ELS_CMD_FARP ||
+ els_command == ELS_CMD_FARPR ||
+ els_command == ELS_CMD_FDISC)
+ continue;
+
+ if (vport != piocb->vport)
continue;
- }
if (piocb->drvrTimeout > 0) {
- if (piocb->drvrTimeout >= timeout) {
+ if (piocb->drvrTimeout >= timeout)
piocb->drvrTimeout -= timeout;
- } else {
+ else
piocb->drvrTimeout = 0;
- }
continue;
}
- if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
- struct lpfc_nodelist *ndlp;
- ndlp = __lpfc_findnode_rpi(phba, cmd->ulpContext);
- remote_ID = ndlp->nlp_DID;
- } else {
+ remote_ID = 0xffffffff;
+ if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
remote_ID = cmd->un.elsreq64.remoteID;
+ else {
+ struct lpfc_nodelist *ndlp;
+ ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+ if (ndlp)
+ remote_ID = ndlp->nlp_DID;
}
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_ELS,
- "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
- phba->brd_no, els_command,
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0127 ELS timeout Data: x%x x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi, els_command,
remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
}
- if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
- mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
+ spin_unlock_irq(&phba->hbalock);
- spin_unlock_irq(phba->host->host_lock);
+ if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+ mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
}
void
-lpfc_els_flush_cmd(struct lpfc_hba *phba)
+lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
- spin_lock_irq(phba->host->host_lock);
+ lpfc_fabric_abort_vport(vport);
+
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -3301,271 +3705,1042 @@ lpfc_els_flush_cmd(struct lpfc_hba *phba)
cmd->ulpCommand == CMD_ABORT_XRI_CN)
continue;
+ if (piocb->vport != vport)
+ continue;
+
list_move_tail(&piocb->list, &completions);
pring->txq_cnt--;
-
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- cmd = &piocb->iocb;
-
if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
continue;
}
+ if (piocb->vport != vport)
+ continue;
+
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
- while(!list_empty(&completions)) {
+ while (!list_empty(&completions)) {
piocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &piocb->iocb;
- list_del(&piocb->list);
+ list_del_init(&piocb->list);
- if (piocb->iocb_cmpl) {
+ if (!piocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, piocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(piocb->iocb_cmpl) (phba, piocb, piocb);
- } else
- lpfc_sli_release_iocbq(phba, piocb);
+ }
}
return;
}
-void
-lpfc_els_unsol_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
+static void
+lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
{
- struct lpfc_sli *psli;
struct lpfc_nodelist *ndlp;
- struct lpfc_dmabuf *mp;
- uint32_t *lp;
- IOCB_t *icmd;
struct ls_rjt stat;
- uint32_t cmd;
- uint32_t did;
- uint32_t newnode;
- uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
- uint32_t rjt_err = 0;
-
- psli = &phba->sli;
- icmd = &elsiocb->iocb;
-
- if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
- /* Not enough posted buffers; Try posting more buffers */
- phba->fc_stat.NoRcvBuf++;
- lpfc_post_buffer(phba, pring, 0, 1);
- return;
- }
-
- /* If there are no BDEs associated with this IOCB,
- * there is nothing to do.
- */
- if (icmd->ulpBdeCount == 0)
- return;
+ uint32_t *payload;
+ uint32_t cmd, did, newnode, rjt_err = 0;
+ IOCB_t *icmd = &elsiocb->iocb;
- /* type of ELS cmd is first 32bit word in packet */
- mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
- cont64[0].
- addrHigh,
- icmd->un.
- cont64[0].addrLow));
- if (mp == 0) {
- drop_cmd = 1;
+ if (vport == NULL || elsiocb->context2 == NULL)
goto dropit;
- }
newnode = 0;
- lp = (uint32_t *) mp->virt;
- cmd = *lp++;
- lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
+ payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ cmd = *payload;
+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
+ lpfc_post_buffer(phba, pring, 1, 1);
+ did = icmd->un.rcvels.remoteID;
if (icmd->ulpStatus) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- drop_cmd = 1;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV Unsol ELS: status:x%x/x%x did:x%x",
+ icmd->ulpStatus, icmd->un.ulpWord[4], did);
goto dropit;
}
/* Check to see if link went down during discovery */
- if (lpfc_els_chk_latt(phba)) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- drop_cmd = 1;
+ if (lpfc_els_chk_latt(vport))
goto dropit;
- }
- did = icmd->un.rcvels.remoteID;
- ndlp = lpfc_findnode_did(phba, did);
+ /* Ignore traffic recevied during vport shutdown. */
+ if (vport->load_flag & FC_UNLOADING)
+ goto dropit;
+
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
- if (!ndlp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- drop_cmd = 1;
+ if (!ndlp)
goto dropit;
- }
- lpfc_nlp_init(phba, ndlp, did);
+ lpfc_nlp_init(vport, ndlp, did);
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
ndlp->nlp_type |= NLP_FABRIC;
}
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
}
phba->fc_stat.elsRcvFrame++;
if (elsiocb->context1)
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = lpfc_nlp_get(ndlp);
- elsiocb->context2 = mp;
+ elsiocb->vport = vport;
if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
cmd &= ELS_CMD_MASK;
}
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0112 ELS command x%x received from NPORT x%x "
- "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
+ "%d (%d):0112 ELS command x%x received from NPORT x%x "
+ "Data: x%x\n", phba->brd_no, vport->vpi, cmd, did,
+ vport->port_state);
switch (cmd) {
case ELS_CMD_PLOGI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPLOGI++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_PLOGI);
+
break;
case ELS_CMD_FLOGI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFLOGI++;
- lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
+ lpfc_els_rcv_flogi(vport, elsiocb, ndlp, newnode);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_LOGO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV LOGO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvLOGO++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
break;
case ELS_CMD_PRLO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PRLO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPRLO++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
break;
case ELS_CMD_RSCN:
phba->fc_stat.elsRcvRSCN++;
- lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
+ lpfc_els_rcv_rscn(vport, elsiocb, ndlp, newnode);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_ADISC:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ADISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvADISC++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_ADISC);
break;
case ELS_CMD_PDISC:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PDISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPDISC++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb,
+ NLP_EVT_RCV_PDISC);
break;
case ELS_CMD_FARPR:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FARPR: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFARPR++;
- lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
+ lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
break;
case ELS_CMD_FARP:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FARP: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFARP++;
- lpfc_els_rcv_farp(phba, elsiocb, ndlp);
+ lpfc_els_rcv_farp(vport, elsiocb, ndlp);
break;
case ELS_CMD_FAN:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV FAN: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvFAN++;
- lpfc_els_rcv_fan(phba, elsiocb, ndlp);
+ lpfc_els_rcv_fan(vport, elsiocb, ndlp);
break;
case ELS_CMD_PRLI:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV PRLI: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvPRLI++;
- if (phba->hba_state < LPFC_DISC_AUTH) {
- rjt_err = 1;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
break;
}
- lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
+ lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV LIRR: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvLIRR++;
- lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
+ lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_RPS:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RPS: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvRPS++;
- lpfc_els_rcv_rps(phba, elsiocb, ndlp);
+ lpfc_els_rcv_rps(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_RPL:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RPL: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvRPL++;
- lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
+ lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
case ELS_CMD_RNID:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RNID: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
phba->fc_stat.elsRcvRNID++;
- lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
+ lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
default:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
+ cmd, did, vport->port_state);
+
/* Unsupported ELS command, reject */
- rjt_err = 1;
+ rjt_err = LSRJT_INVALID_CMD;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "%d:0115 Unknown ELS command x%x received from "
- "NPORT x%x\n", phba->brd_no, cmd, did);
+ "%d (%d):0115 Unknown ELS command x%x "
+ "received from NPORT x%x\n",
+ phba->brd_no, vport->vpi, cmd, did);
if (newnode)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
break;
}
/* check if need to LS_RJT received ELS cmd */
if (rjt_err) {
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ memset(&stat, 0, sizeof(stat));
+ stat.un.b.lsRjtRsnCode = rjt_err;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
+ NULL);
+ if (newnode)
+ lpfc_drop_node(vport, ndlp);
+ }
+
+ return;
+
+dropit:
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0111 Dropping received ELS cmd "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, vport ? vport->vpi : 0xffff,
+ icmd->ulpStatus, icmd->un.ulpWord[4],
+ icmd->ulpTimeout);
+ phba->fc_stat.elsRcvDrop++;
+}
+
+static struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+ struct lpfc_vport *vport;
+
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->vpi == vpi)
+ return vport;
+ }
+ return NULL;
+}
+
+void
+lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *elsiocb)
+{
+ struct lpfc_vport *vport = phba->pport;
+ IOCB_t *icmd = &elsiocb->iocb;
+ dma_addr_t paddr;
+ struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
+ struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+
+ elsiocb->context2 = NULL;
+ elsiocb->context3 = NULL;
+
+ if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+ } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
+ phba->fc_stat.NoRcvBuf++;
+ /* Not enough posted buffers; Try posting more buffers */
+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+ lpfc_post_buffer(phba, pring, 0, 1);
+ return;
+ }
+
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else {
+ uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ }
+ }
+ /* If there are no BDEs associated
+ * with this IOCB, there is nothing to do.
+ */
+ if (icmd->ulpBdeCount == 0)
+ return;
+
+ /* type of ELS cmd is first 32bit word
+ * in packet
+ */
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ elsiocb->context2 = bdeBuf1;
+ } else {
+ paddr = getPaddr(icmd->un.cont64[0].addrHigh,
+ icmd->un.cont64[0].addrLow);
+ elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
}
+ lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+ /*
+ * The different unsolicited event handlers would tell us
+ * if they are done with "mp" by setting context2 to NULL.
+ */
lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = NULL;
if (elsiocb->context2) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
+ elsiocb->context2 = NULL;
}
-dropit:
- /* check if need to drop received ELS cmd */
- if (drop_cmd == 1) {
+
+ /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
+ if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
+ icmd->ulpBdeCount == 2) {
+ elsiocb->context2 = bdeBuf2;
+ lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
+ /* free mp if we are done with it */
+ if (elsiocb->context2) {
+ lpfc_in_buf_free(phba, elsiocb->context2);
+ elsiocb->context2 = NULL;
+ }
+ }
+}
+
+void
+lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ndlp) {
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0251 NameServer login: no memory\n",
+ phba->brd_no, vport->vpi);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, NameServer_DID);
+ ndlp->nlp_type |= NLP_FABRIC;
+ }
+
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+
+ if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "%d:0111 Dropping received ELS cmd "
- "Data: x%x x%x x%x\n", phba->brd_no,
- icmd->ulpStatus, icmd->un.ulpWord[4],
- icmd->ulpTimeout);
- phba->fc_stat.elsRcvDrop++;
+ "%d (%d):0252 Cannot issue NameServer login\n",
+ phba->brd_no, vport->vpi);
+ return;
+ }
+
+ if (phba->cfg_fdmi_on) {
+ ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+ GFP_KERNEL);
+ if (ndlp_fdmi) {
+ lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+ ndlp_fdmi->nlp_type |= NLP_FABRIC;
+ ndlp_fdmi->nlp_state =
+ NLP_STE_PLOGI_ISSUE;
+ lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
+ 0);
+ }
+ }
+ return;
+}
+
+static void
+lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ MAILBOX_t *mb = &pmb->mb;
+
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ lpfc_nlp_put(ndlp);
+
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0915 Register VPI failed: 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
+
+ switch (mb->mbxStatus) {
+ case 0x11: /* unsupported feature */
+ case 0x9603: /* max_vpi exceeded */
+ /* giving up on vport registration */
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ break;
+ default:
+ /* Try to recover from this error */
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ lpfc_initial_fdisc(vport);
+ break;
+ }
+
+ } else {
+ if (vport == phba->pport)
+ lpfc_issue_fabric_reglogin(vport);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
}
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
+
+void
+lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ LPFC_MBOXQ_t *mbox;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
+ mbox->vport = vport;
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
+ if (lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT | MBX_STOP_IOCB)
+ == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0253 Register VPI: Cannot send mbox\n",
+ phba->brd_no, vport->vpi);
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0254 Register VPI: no memory\n",
+ phba->brd_no, vport->vpi);
+
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ lpfc_nlp_put(ndlp);
+ }
+}
+
+static void
+lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *np;
+ struct lpfc_nodelist *next_np;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_iocbq *piocb;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n",
+ phba->brd_no, vport->vpi,
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+ /* Since all FDISCs are being single threaded, we
+ * must reset the discovery timer for ALL vports
+ * waiting to send FDISC when one completes.
+ */
+ list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
+ lpfc_set_disctmo(piocb->vport);
+ }
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "FDISC cmpl: status:x%x/x%x prevdid:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+
+ if (irsp->ulpStatus) {
+ /* Check for retry */
+ if (lpfc_els_retry(phba, cmdiocb, rspiocb))
+ goto out;
+
+ /* FDISC failed */
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0124 FDISC failed. (%d/%d)\n",
+ phba->brd_no, vport->vpi,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+
+ if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_nlp_put(ndlp);
+ /* giving up on FDISC. Cancel discovery timer */
+ lpfc_can_disctmo(vport);
+ } else {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
+
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+ if ((vport->fc_prevDID != vport->fc_myDID) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed so we can
+ * issue unreg_vpi.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if (np->nlp_state != NLP_STE_NPR_NODE
+ || !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
+ spin_lock_irq(shost->host_lock);
+ np->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
+ }
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ }
+
+ if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
+
+ lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
+ }
+
+out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+int
+lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ struct serv_parm *sp;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int did = ndlp->nlp_DID;
+ int rc;
+
+ cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
+ ELS_CMD_FDISC);
+ if (!elsiocb) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0255 Issue FDISC: no IOCB\n",
+ phba->brd_no, vport->vpi);
+ return 1;
+ }
+
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+
+ /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
+ pcmd += sizeof(uint32_t); /* CSP Word 1 */
+ memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
+ sp = (struct serv_parm *) pcmd;
+ /* Setup CSPs accordingly for Fabric */
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cls1.classValid = 0;
+ sp->cls2.seqDelivery = 1;
+ sp->cls3.seqDelivery = 1;
+
+ pcmd += sizeof(uint32_t); /* CSP Word 2 */
+ pcmd += sizeof(uint32_t); /* CSP Word 3 */
+ pcmd += sizeof(uint32_t); /* CSP Word 4 */
+ pcmd += sizeof(uint32_t); /* Port Name */
+ memcpy(pcmd, &vport->fc_portname, 8);
+ pcmd += sizeof(uint32_t); /* Node Name */
+ pcmd += sizeof(uint32_t); /* Node Name */
+ memcpy(pcmd, &vport->fc_nodename, 8);
+
+ lpfc_set_disctmo(vport);
+
+ phba->fc_stat.elsXmitFDISC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue FDISC: did:x%x",
+ did, 0, 0);
+
+ rc = lpfc_issue_fabric_iocb(phba, elsiocb);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0256 Issue FDISC: Cannot send IOCB\n",
+ phba->brd_no, vport->vpi);
+
+ return 1;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+ vport->port_state = LPFC_FDISC;
+ return 0;
+}
+
+static void
+lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp;
+
+ irsp = &rspiocb->iocb;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "LOGO npiv cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
+}
+
+int
+lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ IOCB_t *icmd;
+ struct lpfc_iocbq *elsiocb;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+
+ cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
+ elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
+ ELS_CMD_LOGO);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
+ pcmd += sizeof(uint32_t);
+
+ /* Fill in LOGO payload */
+ *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Issue LOGO npiv did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+void
+lpfc_fabric_block_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ unsigned long iflags;
+ uint32_t tmo_posted;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+ tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+ if (!tmo_posted) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (phba->work_wait)
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+}
+
+static void
+lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq *iocb;
+ unsigned long iflags;
+ int ret;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ IOCB_t *cmd;
+
+repeat:
+ iocb = NULL;
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Post any pending iocb to the SLI layer */
+ if (atomic_read(&phba->fabric_iocb_count) == 0) {
+ list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
+ list);
+ if (iocb)
+ atomic_inc(&phba->fabric_iocb_count);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb) {
+ iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+ iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+ lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
+
+ ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+ iocb->fabric_iocb_cmpl = NULL;
+ iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ cmd = &iocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ iocb->iocb_cmpl(phba, iocb, iocb);
+
+ atomic_dec(&phba->fabric_iocb_count);
+ goto repeat;
+ }
+ }
+
+ return;
+}
+
+void
+lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
+{
+ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+ lpfc_resume_fabric_iocbs(phba);
+ return;
+}
+
+static void
+lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
+{
+ int blocked;
+
+ blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ /* Start a timer to unblock fabric
+ * iocbs after 100ms
+ */
+ if (!blocked)
+ mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+
+ return;
+}
+
+static void
+lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct ls_rjt stat;
+
+ if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
+ BUG();
+
+ switch (rspiocb->iocb.ulpStatus) {
+ case IOSTAT_NPORT_RJT:
+ case IOSTAT_FABRIC_RJT:
+ if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ lpfc_block_fabric_iocbs(phba);
+ }
+ break;
+
+ case IOSTAT_NPORT_BSY:
+ case IOSTAT_FABRIC_BSY:
+ lpfc_block_fabric_iocbs(phba);
+ break;
+
+ case IOSTAT_LS_RJT:
+ stat.un.lsRjtError =
+ be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
+ (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
+ lpfc_block_fabric_iocbs(phba);
+ break;
+ }
+
+ if (atomic_read(&phba->fabric_iocb_count) == 0)
+ BUG();
+
+ cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
+ cmdiocb->fabric_iocb_cmpl = NULL;
+ cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+
+ atomic_dec(&phba->fabric_iocb_count);
+ if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
+ /* Post any pending iocbs to HBA */
+ lpfc_resume_fabric_iocbs(phba);
+ }
+}
+
+int
+lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
+{
+ unsigned long iflags;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ int ready;
+ int ret;
+
+ if (atomic_read(&phba->fabric_iocb_count) > 1)
+ BUG();
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
+ !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (ready) {
+ iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
+ iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->iocb_flag |= LPFC_IO_FABRIC;
+
+ lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
+
+ atomic_inc(&phba->fabric_iocb_count);
+ ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+
+ if (ret == IOCB_ERROR) {
+ iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
+ iocb->fabric_iocb_cmpl = NULL;
+ iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ atomic_dec(&phba->fabric_iocb_count);
+ }
+ } else {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&iocb->list, &phba->fabric_iocb_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ ret = IOCB_SUCCESS;
+ }
+ return ret;
+}
+
+
+void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+
+ if (piocb->vport != vport)
+ continue;
+
+ list_move_tail(&piocb->list, &completions);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
+{
+ LIST_HEAD(completions);
+ struct lpfc_hba *phba = ndlp->vport->phba;
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ IOCB_t *cmd;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+ if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
+
+ list_move_tail(&piocb->list, &completions);
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *piocb;
+ IOCB_t *cmd;
+
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->fabric_iocb_list, &completions);
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+
+void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *tmp_iocb, *piocb;
+ IOCB_t *cmd;
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
+ list) {
+
+ cmd = &piocb->iocb;
+ ndlp = (struct lpfc_nodelist *) piocb->context1;
+ if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
+ ndlp != NULL &&
+ ndlp->nlp_DID == Fabric_DID)
+ list_move_tail(&piocb->list, &completions);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ while (!list_empty(&completions)) {
+ piocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_del_init(&piocb->list);
+
+ cmd = &piocb->iocb;
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+}
+
+
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 61caa8d379e2..f2f4639eab59 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,6 +36,8 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
/* AlpaArray for assignment of scsid for scan-down and bind_method */
static uint8_t lpfcAlpaArray[] = {
@@ -54,7 +56,7 @@ static uint8_t lpfcAlpaArray[] = {
0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
};
-static void lpfc_disc_timeout_handler(struct lpfc_hba *);
+static void lpfc_disc_timeout_handler(struct lpfc_vport *);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -74,14 +76,16 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
return;
}
- phba = ndlp->nlp_phba;
+ phba = ndlp->vport->phba;
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ "rport terminate: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
- spin_lock_irq(phba->host->host_lock);
if (ndlp->nlp_sid != NLP_NO_SID) {
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
- spin_unlock_irq(phba->host->host_lock);
return;
}
@@ -94,105 +98,213 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
struct lpfc_nodelist * ndlp;
- uint8_t *name;
- int warn_on = 0;
- struct lpfc_hba *phba;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct completion devloss_compl;
+ struct lpfc_work_evt *evtp;
rdata = rport->dd_data;
ndlp = rdata->pnode;
if (!ndlp) {
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+ if (rport->scsi_target_id != -1) {
printk(KERN_ERR "Cannot find remote node"
- " for rport in dev_loss_tmo_callbk x%x\n",
- rport->port_id);
+ " for rport in dev_loss_tmo_callbk x%x\n",
+ rport->port_id);
+ }
return;
}
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ vport = ndlp->vport;
+ phba = vport->phba;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlosscb: sid:x%x did:x%x flg:x%x",
+ ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ init_completion(&devloss_compl);
+ evtp = &ndlp->dev_loss_evt;
+
+ if (!list_empty(&evtp->evt_listp))
+ return;
+
+ spin_lock_irq(&phba->hbalock);
+ evtp->evt_arg1 = ndlp;
+ evtp->evt_arg2 = &devloss_compl;
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ wait_for_completion(&devloss_compl);
+
+ return;
+}
+
+/*
+ * This function is called from the worker thread when dev_loss_tmo
+ * expire.
+ */
+void
+lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_rport_data *rdata;
+ struct fc_rport *rport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ uint8_t *name;
+ int warn_on = 0;
+
+ rport = ndlp->rport;
+
+ if (!rport)
+ return;
+
+ rdata = rport->dd_data;
+ name = (uint8_t *) &ndlp->nlp_portname;
+ vport = ndlp->vport;
+ phba = vport->phba;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlosstmo:did:x%x type:x%x id:x%x",
+ ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
+
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- name = (uint8_t *)&ndlp->nlp_portname;
- phba = ndlp->nlp_phba;
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ int put_node;
+ int put_rport;
- spin_lock_irq(phba->host->host_lock);
+ /* We will clean up these Nodes in linkup */
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
+ rdata->pnode = NULL;
+ ndlp->rport = NULL;
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
+ return;
+ }
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
/* flush the target */
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
+ ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
}
- if (phba->fc_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING)
warn_on = 0;
- spin_unlock_irq(phba->host->host_lock);
-
if (warn_on) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0203 Devloss timeout on "
+ "%d (%d):0203 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0204 Devloss timeout on "
+ "%d (%d):0204 Devloss timeout on "
"WWPN %x:%x:%x:%x:%x:%x:%x:%x "
"NPort x%x Data: x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(phba->fc_flag & FC_UNLOADING) &&
+ if (!(vport->load_flag & FC_UNLOADING) &&
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
- lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
else {
+ int put_node;
+ int put_rport;
+
+ put_node = rdata->pnode != NULL;
+ put_rport = ndlp->rport != NULL;
rdata->pnode = NULL;
ndlp->rport = NULL;
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
+ if (put_node)
+ lpfc_nlp_put(ndlp);
+ if (put_rport)
+ put_device(&rport->dev);
}
+}
+
+void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+ wake_up(phba->work_wait);
return;
}
static void
-lpfc_work_list_done(struct lpfc_hba * phba)
+lpfc_work_list_done(struct lpfc_hba *phba)
{
struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *vport;
int free_evt;
- spin_lock_irq(phba->host->host_lock);
- while(!list_empty(&phba->work_list)) {
+ spin_lock_irq(&phba->hbalock);
+ while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
free_evt = 1;
switch (evtp->evt) {
+ case LPFC_EVT_DEV_LOSS_DELAY:
+ free_evt = 0; /* evt is part of ndlp */
+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
+ vport = ndlp->vport;
+ if (!vport)
+ break;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport devlossdly:did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ if (!(vport->load_flag & FC_UNLOADING) &&
+ !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+ break;
case LPFC_EVT_ELS_RETRY:
- ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ break;
+ case LPFC_EVT_DEV_LOSS:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_nlp_get(ndlp);
+ lpfc_dev_loss_tmo_handler(ndlp);
free_evt = 0;
+ complete((struct completion *)(evtp->evt_arg2));
+ lpfc_nlp_put(ndlp);
break;
case LPFC_EVT_ONLINE:
- if (phba->hba_state < LPFC_LINK_DOWN)
- *(int *)(evtp->evt_arg1) = lpfc_online(phba);
+ if (phba->link_state < LPFC_LINK_DOWN)
+ *(int *) (evtp->evt_arg1) = lpfc_online(phba);
else
- *(int *)(evtp->evt_arg1) = 0;
+ *(int *) (evtp->evt_arg1) = 0;
complete((struct completion *)(evtp->evt_arg2));
break;
case LPFC_EVT_OFFLINE_PREP:
- if (phba->hba_state >= LPFC_LINK_DOWN)
+ if (phba->link_state >= LPFC_LINK_DOWN)
lpfc_offline_prep(phba);
*(int *)(evtp->evt_arg1) = 0;
complete((struct completion *)(evtp->evt_arg2));
@@ -218,33 +330,31 @@ lpfc_work_list_done(struct lpfc_hba * phba)
case LPFC_EVT_KILL:
lpfc_offline(phba);
*(int *)(evtp->evt_arg1)
- = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
+ = (phba->pport->stopped)
+ ? 0 : lpfc_sli_brdkill(phba);
lpfc_unblock_mgmt_io(phba);
complete((struct completion *)(evtp->evt_arg2));
break;
}
if (free_evt)
kfree(evtp);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
}
-static void
-lpfc_work_done(struct lpfc_hba * phba)
+void
+lpfc_work_done(struct lpfc_hba *phba)
{
struct lpfc_sli_ring *pring;
- int i;
- uint32_t ha_copy;
- uint32_t control;
- uint32_t work_hba_events;
+ uint32_t ha_copy, status, control, work_port_events;
+ struct lpfc_vport *vport;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
- work_hba_events=phba->work_hba_events;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
if (ha_copy & HA_ERATT)
lpfc_handle_eratt(phba);
@@ -255,66 +365,111 @@ lpfc_work_done(struct lpfc_hba * phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
- if (work_hba_events & WORKER_DISC_TMO)
- lpfc_disc_timeout_handler(phba);
-
- if (work_hba_events & WORKER_ELS_TMO)
- lpfc_els_timeout_handler(phba);
-
- if (work_hba_events & WORKER_MBOX_TMO)
- lpfc_mbox_timeout_handler(phba);
-
- if (work_hba_events & WORKER_FDMI_TMO)
- lpfc_fdmi_tmo_handler(phba);
-
- spin_lock_irq(phba->host->host_lock);
- phba->work_hba_events &= ~work_hba_events;
- spin_unlock_irq(phba->host->host_lock);
-
- for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
- pring = &phba->sli.ring[i];
- if ((ha_copy & HA_RXATT)
- || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
- if (pring->flag & LPFC_STOP_IOCB_MASK) {
- pring->flag |= LPFC_DEFERRED_RING_EVENT;
- } else {
- lpfc_sli_handle_slow_ring_event(phba, pring,
- (ha_copy &
- HA_RXMASK));
- pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
- }
- /*
- * Turn on Ring interrupts
- */
- spin_lock_irq(phba->host->host_lock);
- control = readl(phba->HCregaddr);
- control |= (HC_R0INT_ENA << i);
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (!scsi_host_get(shost)) {
+ continue;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ work_port_events = vport->work_port_events;
+
+ if (work_port_events & WORKER_DISC_TMO)
+ lpfc_disc_timeout_handler(vport);
+
+ if (work_port_events & WORKER_ELS_TMO)
+ lpfc_els_timeout_handler(vport);
+
+ if (work_port_events & WORKER_HB_TMO)
+ lpfc_hb_timeout_handler(phba);
+
+ if (work_port_events & WORKER_MBOX_TMO)
+ lpfc_mbox_timeout_handler(phba);
+
+ if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
+ lpfc_unblock_fabric_iocbs(phba);
+
+ if (work_port_events & WORKER_FDMI_TMO)
+ lpfc_fdmi_timeout_handler(vport);
+
+ if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
+ lpfc_ramp_down_queue_handler(phba);
+
+ if (work_port_events & WORKER_RAMP_UP_QUEUE)
+ lpfc_ramp_up_queue_handler(phba);
+
+ spin_lock_irq(&vport->work_port_lock);
+ vport->work_port_events &= ~work_port_events;
+ spin_unlock_irq(&vport->work_port_lock);
+ scsi_host_put(shost);
+ spin_lock_irq(&phba->hbalock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status >>= (4*LPFC_ELS_RING);
+ if ((status & HA_RXMASK)
+ || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
+ if (pring->flag & LPFC_STOP_IOCB_MASK) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ } else {
+ lpfc_sli_handle_slow_ring_event(phba, pring,
+ (status &
+ HA_RXMASK));
+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+ }
+ /*
+ * Turn on Ring interrupts
+ */
+ spin_lock_irq(&phba->hbalock);
+ control = readl(phba->HCregaddr);
+ if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+ control |= (HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
}
+ spin_unlock_irq(&phba->hbalock);
}
-
- lpfc_work_list_done (phba);
-
+ lpfc_work_list_done(phba);
}
static int
-check_work_wait_done(struct lpfc_hba *phba) {
+check_work_wait_done(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_sli_ring *pring;
+ int rc = 0;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->work_port_events) {
+ rc = 1;
+ goto exit;
+ }
+ }
- spin_lock_irq(phba->host->host_lock);
- if (phba->work_ha ||
- phba->work_hba_events ||
- (!list_empty(&phba->work_list)) ||
+ if (phba->work_ha || (!list_empty(&phba->work_list)) ||
kthread_should_stop()) {
- spin_unlock_irq(phba->host->host_lock);
- return 1;
- } else {
- spin_unlock_irq(phba->host->host_lock);
- return 0;
+ rc = 1;
+ goto exit;
}
+
+ pring = &phba->sli.ring[LPFC_ELS_RING];
+ if (pring->flag & LPFC_DEFERRED_RING_EVENT)
+ rc = 1;
+exit:
+ if (rc)
+ phba->work_found++;
+ else
+ phba->work_found = 0;
+
+ spin_unlock_irq(&phba->hbalock);
+ return rc;
}
+
int
lpfc_do_work(void *p)
{
@@ -324,11 +479,13 @@ lpfc_do_work(void *p)
set_user_nice(current, -20);
phba->work_wait = &work_waitq;
+ phba->work_found = 0;
while (1) {
rc = wait_event_interruptible(work_waitq,
- check_work_wait_done(phba));
+ check_work_wait_done(phba));
+
BUG_ON(rc);
if (kthread_should_stop())
@@ -336,6 +493,17 @@ lpfc_do_work(void *p)
lpfc_work_done(phba);
+ /* If there is alot of slow ring work, like during link up
+ * check_work_wait_done() may cause this thread to not give
+ * up the CPU for very long periods of time. This may cause
+ * soft lockups or other problems. To avoid these situations
+ * give up the CPU here after LPFC_MAX_WORKER_ITERATION
+ * consecutive iterations.
+ */
+ if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
+ phba->work_found = 0;
+ schedule();
+ }
}
phba->work_wait = NULL;
return 0;
@@ -347,16 +515,17 @@ lpfc_do_work(void *p)
* embedding it in the IOCB.
*/
int
-lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
+lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
uint32_t evt)
{
struct lpfc_work_evt *evtp;
+ unsigned long flags;
/*
* All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
* be queued to worker thread for processing
*/
- evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
+ evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
if (!evtp)
return 0;
@@ -364,136 +533,210 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
evtp->evt_arg2 = arg2;
evtp->evt = evt;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&evtp->evt_listp, &phba->work_list);
if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock_irq(phba->host->host_lock);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
return 1;
}
-int
-lpfc_linkdown(struct lpfc_hba *phba)
+void
+lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
{
- struct lpfc_sli *psli;
- struct lpfc_nodelist *ndlp, *next_ndlp;
- LPFC_MBOXQ_t *mb;
- int rc;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ int rc;
- psli = &phba->sli;
- /* sysfs or selective reset may call this routine to clean up */
- if (phba->hba_state >= LPFC_LINK_DOWN) {
- if (phba->hba_state == LPFC_LINK_DOWN)
- return 0;
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
- spin_lock_irq(phba->host->host_lock);
- phba->hba_state = LPFC_LINK_DOWN;
- spin_unlock_irq(phba->host->host_lock);
+ if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
+ lpfc_unreg_rpi(vport, ndlp);
+
+ /* Leave Fabric nodes alone on link down */
+ if (!remove && ndlp->nlp_type & NLP_FABRIC)
+ continue;
+ rc = lpfc_disc_state_machine(vport, ndlp, NULL,
+ remove
+ ? NLP_EVT_DEVICE_RM
+ : NLP_EVT_DEVICE_RECOVERY);
}
+ if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+ lpfc_mbx_unreg_vpi(vport);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ }
+}
+
+static void
+lpfc_linkdown_port(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- fc_host_post_event(phba->host, fc_get_event_number(),
- FCH_EVT_LINKDOWN, 0);
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
- /* Clean up any firmware default rpi's */
- if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- lpfc_unreg_did(phba, 0xffffffff, mb);
- mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
- if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
- == MBX_NOT_FINISHED) {
- mempool_free( mb, phba->mbox_mem_pool);
- }
- }
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Link Down: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Cleanup any outstanding RSCN activity */
- lpfc_els_flush_rscn(phba);
+ lpfc_els_flush_rscn(vport);
/* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(phba);
+ lpfc_els_flush_cmd(vport);
- /*
- * Issue a LINK DOWN event to all nodes.
- */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
- /* free any ndlp's on unused list */
+ lpfc_cleanup_rpis(vport, 0);
+
+ /* free any ndlp's on unused list */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
+ /* free any ndlp's in unused state */
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_drop_node(phba, ndlp);
- else /* otherwise, force node recovery. */
- rc = lpfc_disc_state_machine(phba, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ lpfc_drop_node(vport, ndlp);
+
+ /* Turn off discovery timer if its running */
+ lpfc_can_disctmo(vport);
+}
+
+int
+lpfc_linkdown(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_vport *port_iterator;
+ LPFC_MBOXQ_t *mb;
+
+ if (phba->link_state == LPFC_LINK_DOWN) {
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ if (phba->link_state > LPFC_LINK_DOWN) {
+ phba->link_state = LPFC_LINK_DOWN;
+ phba->pport->fc_flag &= ~FC_LBIT;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+
+ /* Issue a LINK DOWN event to all nodes */
+ lpfc_linkdown_port(port_iterator);
+ }
+
+ /* Clean up any firmware default rpi's */
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
+ lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
+ mb->vport = vport;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
}
/* Setup myDID for link up if we are in pt2pt mode */
- if (phba->fc_flag & FC_PT2PT) {
- phba->fc_myDID = 0;
- if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
+ if (phba->pport->fc_flag & FC_PT2PT) {
+ phba->pport->fc_myDID = 0;
+ mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mb) {
lpfc_config_link(phba, mb);
- mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
- if (lpfc_sli_issue_mbox
- (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mb->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mb,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
== MBX_NOT_FINISHED) {
- mempool_free( mb, phba->mbox_mem_pool);
+ mempool_free(mb, phba->mbox_mem_pool);
}
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
+ spin_unlock_irq(shost->host_lock);
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_LBIT;
- spin_unlock_irq(phba->host->host_lock);
-
- /* Turn off discovery timer if its running */
- lpfc_can_disctmo(phba);
- /* Must process IOCBs on all rings to handle ABORTed I/Os */
return 0;
}
-static int
-lpfc_linkup(struct lpfc_hba *phba)
+static void
+lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
{
- struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_nodelist *ndlp;
- fc_host_post_event(phba->host, fc_get_event_number(),
- FCH_EVT_LINKUP, 0);
-
- spin_lock_irq(phba->host->host_lock);
- phba->hba_state = LPFC_LINK_UP;
- phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
- phba->fc_flag |= FC_NDISC_ACTIVE;
- phba->fc_ns_retry = 0;
- spin_unlock_irq(phba->host->host_lock);
-
-
- if (phba->fc_flag & FC_LBIT) {
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) {
- if (ndlp->nlp_type & NLP_FABRIC) {
- /*
- * On Linkup its safe to clean up the
- * ndlp from Fabric connections.
- */
- lpfc_nlp_set_state(phba, ndlp,
- NLP_STE_UNUSED_NODE);
- } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
- /*
- * Fail outstanding IO now since
- * device is marked for PLOGI.
- */
- lpfc_unreg_rpi(phba, ndlp);
- }
- }
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ /* On Linkup its safe to clean up the ndlp
+ * from Fabric connections.
+ */
+ if (ndlp->nlp_DID != Fabric_DID)
+ lpfc_unreg_rpi(vport, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
+ /* Fail outstanding IO now since device is
+ * marked for PLOGI.
+ */
+ lpfc_unreg_rpi(vport, ndlp);
}
}
+}
- /* free any ndlp's on unused list */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
- nlp_listp) {
+static void
+lpfc_linkup_port(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
+
+ if ((vport->load_flag & FC_UNLOADING) != 0)
+ return;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "Link Up: top:x%x speed:x%x flg:x%x",
+ phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
+
+ /* If NPIV is not enabled, only bring the physical port up */
+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (vport != phba->pport))
+ return;
+
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ vport->fc_flag |= FC_NDISC_ACTIVE;
+ vport->fc_ns_retry = 0;
+ spin_unlock_irq(shost->host_lock);
+
+ if (vport->fc_flag & FC_LBIT)
+ lpfc_linkup_cleanup_nodes(vport);
+
+ /* free any ndlp's in unused state */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
+}
+
+static int
+lpfc_linkup(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+
+ phba->link_state = LPFC_LINK_UP;
+
+ /* Unblock fabric iocbs if they are blocked */
+ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
+ del_timer_sync(&phba->fabric_block_timer);
+
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ lpfc_linkup_port(vport);
}
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_clear_la(phba, phba->pport);
return 0;
}
@@ -505,14 +748,14 @@ lpfc_linkup(struct lpfc_hba *phba)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_sli *psli = &phba->sli;
+ MAILBOX_t *mb = &pmb->mb;
uint32_t control;
- psli = &phba->sli;
- mb = &pmb->mb;
/* Since we don't do discovery right now, turn these off here */
psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
@@ -522,69 +765,74 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "%d:0320 CLEAR_LA mbxStatus error x%x hba "
+ "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
"state x%x\n",
- phba->brd_no, mb->mbxStatus, phba->hba_state);
+ phba->brd_no, vport->vpi, mb->mbxStatus,
+ vport->port_state);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
goto out;
}
- if (phba->fc_flag & FC_ABORT_DISCOVERY)
- goto out;
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ phba->link_state = LPFC_HBA_READY;
- phba->num_disc_nodes = 0;
- /* go thru NPR list and issue ELS PLOGIs */
- if (phba->fc_npr_cnt) {
- lpfc_els_disc_plogi(phba);
- }
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag |= LPFC_PROCESS_LA;
+ control = readl(phba->HCregaddr);
+ control |= HC_LAINT_ENA;
+ writel(control, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+
+ vport->num_disc_nodes = 0;
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
- if (!phba->num_disc_nodes) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
}
- phba->hba_state = LPFC_HBA_READY;
+ vport->port_state = LPFC_VPORT_READY;
out:
/* Device Discovery completes */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0225 Device Discovery completes\n",
- phba->brd_no);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0225 Device Discovery completes\n",
+ phba->brd_no, vport->vpi);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_ABORT_DISCOVERY;
- if (phba->fc_flag & FC_ESTABLISH_LINK) {
- phba->fc_flag &= ~FC_ESTABLISH_LINK;
- }
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
+ spin_unlock_irq(shost->host_lock);
del_timer_sync(&phba->fc_estabtmo);
- lpfc_can_disctmo(phba);
+ lpfc_can_disctmo(vport);
/* turn on Link Attention interrupts */
- spin_lock_irq(phba->host->host_lock);
+
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return;
}
+
static void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli = &phba->sli;
- int rc;
+ struct lpfc_vport *vport = pmb->vport;
if (pmb->mb.mbxStatus)
goto out;
@@ -592,154 +840,139 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (phba->fc_topology == TOPOLOGY_LOOP &&
- phba->fc_flag & FC_PUBLIC_LOOP &&
- !(phba->fc_flag & FC_LBIT)) {
+ vport->fc_flag & FC_PUBLIC_LOOP &&
+ !(vport->fc_flag & FC_LBIT)) {
/* Need to wait for FAN - use discovery timer
- * for timeout. hba_state is identically
+ * for timeout. port_state is identically
* LPFC_LOCAL_CFG_LINK while waiting for FAN
*/
- lpfc_set_disctmo(phba);
+ lpfc_set_disctmo(vport);
return;
- }
+ }
- /* Start discovery by sending a FLOGI. hba_state is identically
+ /* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- phba->hba_state = LPFC_FLOGI;
- lpfc_set_disctmo(phba);
- lpfc_initial_flogi(phba);
+ if (vport->port_state != LPFC_FLOGI) {
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+ lpfc_initial_flogi(vport);
+ }
return;
out:
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "%d:0306 CONFIG_LINK mbxStatus error x%x "
+ "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
"HBA state x%x\n",
- phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
+ phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
+ vport->port_state);
- lpfc_linkdown(phba);
+ mempool_free(pmb, phba->mbox_mem_pool);
- phba->hba_state = LPFC_HBA_ERROR;
+ lpfc_linkdown(phba);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0200 CONFIG_LINK bad hba state x%x\n",
- phba->brd_no, phba->hba_state);
+ "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
+ phba->brd_no, vport->vpi, vport->port_state);
- lpfc_clear_la(phba, pmb);
- pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
- }
+ lpfc_issue_clear_la(phba, vport);
return;
}
static void
-lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli = &phba->sli;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
+ struct lpfc_vport *vport = pmb->vport;
/* Check for error */
if (mb->mbxStatus) {
/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "%d:0319 READ_SPARAM mbxStatus error x%x "
+ "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
"hba state x%x>\n",
- phba->brd_no, mb->mbxStatus, phba->hba_state);
+ phba->brd_no, vport->vpi, mb->mbxStatus,
+ vport->port_state);
lpfc_linkdown(phba);
- phba->hba_state = LPFC_HBA_ERROR;
goto out;
}
- memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
+ memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
sizeof (struct serv_parm));
if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
+ u64_to_wwn(phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
- memcpy((uint8_t *) & phba->fc_nodename,
- (uint8_t *) & phba->fc_sparam.nodeName,
- sizeof (struct lpfc_name));
- memcpy((uint8_t *) & phba->fc_portname,
- (uint8_t *) & phba->fc_sparam.portName,
- sizeof (struct lpfc_name));
+ u64_to_wwn(phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof(vport->fc_nodename));
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(vport->fc_portname));
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
+ memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
+ }
+
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
out:
pmb->context1 = NULL;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- if (phba->hba_state != LPFC_CLEAR_LA) {
- lpfc_clear_la(phba, pmb);
- pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
- == MBX_NOT_FINISHED) {
- mempool_free( pmb, phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
- }
- } else {
- mempool_free( pmb, phba->mbox_mem_pool);
- }
+ lpfc_issue_clear_la(phba, vport);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
static void
lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
{
- int i;
+ struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+ int i;
struct lpfc_dmabuf *mp;
int rc;
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
switch (la->UlnkSpeed) {
- case LA_1GHZ_LINK:
- phba->fc_linkspeed = LA_1GHZ_LINK;
- break;
- case LA_2GHZ_LINK:
- phba->fc_linkspeed = LA_2GHZ_LINK;
- break;
- case LA_4GHZ_LINK:
- phba->fc_linkspeed = LA_4GHZ_LINK;
- break;
- case LA_8GHZ_LINK:
- phba->fc_linkspeed = LA_8GHZ_LINK;
- break;
- default:
- phba->fc_linkspeed = LA_UNKNW_LINK;
- break;
+ case LA_1GHZ_LINK:
+ phba->fc_linkspeed = LA_1GHZ_LINK;
+ break;
+ case LA_2GHZ_LINK:
+ phba->fc_linkspeed = LA_2GHZ_LINK;
+ break;
+ case LA_4GHZ_LINK:
+ phba->fc_linkspeed = LA_4GHZ_LINK;
+ break;
+ case LA_8GHZ_LINK:
+ phba->fc_linkspeed = LA_8GHZ_LINK;
+ break;
+ default:
+ phba->fc_linkspeed = LA_UNKNW_LINK;
+ break;
}
phba->fc_topology = la->topology;
+ phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
if (phba->fc_topology == TOPOLOGY_LOOP) {
- /* Get Loop Map information */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+ /* Get Loop Map information */
if (la->il)
- phba->fc_flag |= FC_LBIT;
+ vport->fc_flag |= FC_LBIT;
- phba->fc_myDID = la->granted_AL_PA;
+ vport->fc_myDID = la->granted_AL_PA;
i = la->un.lilpBde64.tus.f.bdeSize;
if (i == 0) {
@@ -769,29 +1002,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
}
/* Link Up Event ALPA map */
lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_LINK_EVENT,
- "%d:1304 Link Up Event "
- "ALPA map Data: x%x "
- "x%x x%x x%x\n",
- phba->brd_no,
- un.pa.wd1, un.pa.wd2,
- un.pa.wd3, un.pa.wd4);
+ KERN_WARNING,
+ LOG_LINK_EVENT,
+ "%d:1304 Link Up Event "
+ "ALPA map Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ un.pa.wd1, un.pa.wd2,
+ un.pa.wd3, un.pa.wd4);
}
}
}
} else {
- phba->fc_myDID = phba->fc_pref_DID;
- phba->fc_flag |= FC_LBIT;
+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+ if (phba->max_vpi && phba->cfg_npiv_enable &&
+ (phba->sli_rev == 3))
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ }
+ vport->fc_myDID = phba->fc_pref_DID;
+ vport->fc_flag |= FC_LBIT;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_linkup(phba);
if (sparam_mbox) {
- lpfc_read_sparam(phba, sparam_mbox);
+ lpfc_read_sparam(phba, sparam_mbox, 0);
+ sparam_mbox->vport = vport;
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
+ (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -799,36 +1038,48 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
mempool_free(sparam_mbox, phba->mbox_mem_pool);
if (cfglink_mbox)
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
- return;
+ goto out;
}
}
if (cfglink_mbox) {
- phba->hba_state = LPFC_LOCAL_CFG_LINK;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED)
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc != MBX_NOT_FINISHED)
+ return;
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
}
+out:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ phba->brd_no, vport->vpi,
+ vport->port_state, sparam_mbox, cfglink_mbox);
+
+ lpfc_issue_clear_la(phba, vport);
+ return;
}
static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
uint32_t control;
struct lpfc_sli *psli = &phba->sli;
lpfc_linkdown(phba);
/* turn on Link Attention interrupts - no CLEAR_LA needed */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
}
/*
@@ -838,22 +1089,21 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
READ_LA_VAR *la;
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
/* Check for error */
if (mb->mbxStatus) {
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1307 READ_LA mbox error x%x state x%x\n",
- phba->brd_no,
- mb->mbxStatus, phba->hba_state);
+ phba->brd_no, mb->mbxStatus, vport->port_state);
lpfc_mbx_issue_link_down(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
goto lpfc_mbx_cmpl_read_la_free_mbuf;
}
@@ -861,27 +1111,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
if (la->pb)
- phba->fc_flag |= FC_BYPASSED_MODE;
+ vport->fc_flag |= FC_BYPASSED_MODE;
else
- phba->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ vport->fc_flag &= ~FC_BYPASSED_MODE;
+ spin_unlock_irq(shost->host_lock);
if (((phba->fc_eventTag + 1) < la->eventTag) ||
- (phba->fc_eventTag == la->eventTag)) {
+ (phba->fc_eventTag == la->eventTag)) {
phba->fc_stat.LinkMultiEvent++;
- if (la->attType == AT_LINK_UP) {
+ if (la->attType == AT_LINK_UP)
if (phba->fc_eventTag != 0)
lpfc_linkdown(phba);
- }
}
phba->fc_eventTag = la->eventTag;
if (la->attType == AT_LINK_UP) {
phba->fc_stat.LinkUp++;
- if (phba->fc_flag & FC_LOOPBACK_MODE) {
+ if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"%d:1306 Link Up Event in loop back mode "
"x%x received Data: x%x x%x x%x x%x\n",
@@ -903,7 +1152,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
"%d:1305 Link Down Event x%x received "
"Data: x%x x%x x%x\n",
phba->brd_no, la->eventTag, phba->fc_eventTag,
- phba->hba_state, phba->fc_flag);
+ phba->pport->port_state, vport->fc_flag);
lpfc_mbx_issue_link_down(phba);
}
@@ -921,31 +1170,115 @@ lpfc_mbx_cmpl_read_la_free_mbuf:
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
/* Good status, call state machine */
- lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
lpfc_nlp_put(ndlp);
return;
}
+static void
+lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ switch (mb->mbxStatus) {
+ case 0x0011:
+ case 0x0020:
+ case 0x9700:
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d (%d):0911 cmpl_unreg_vpi, "
+ "mb status = 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
+ break;
+ }
+ vport->unreg_vpi_cmpl = VPORT_OK;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ /*
+ * This shost reference might have been taken at the beginning of
+ * lpfc_vport_delete()
+ */
+ if (vport->load_flag & FC_UNLOADING)
+ scsi_host_put(shost);
+}
+
+void
+lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return;
+
+ lpfc_unreg_vpi(phba, vport->vpi, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
+ rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "%d (%d):1800 Could not issue unreg_vpi\n",
+ phba->brd_no, vport->vpi);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ vport->unreg_vpi_cmpl = VPORT_ERROR;
+ }
+}
+
+static void
+lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ MAILBOX_t *mb = &pmb->mb;
+
+ switch (mb->mbxStatus) {
+ case 0x0011:
+ case 0x9601:
+ case 0x9602:
+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
+ "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ vport->fc_myDID = 0;
+ goto out;
+ }
+
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ }
+ vport->port_state = LPFC_VPORT_READY;
+
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+}
+
/*
* This routine handles processing a Fabric REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -953,20 +1286,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
+ struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_vport *next_vport;
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
- struct lpfc_nodelist *ndlp_fdmi;
-
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
pmb->context1 = NULL;
pmb->context2 = NULL;
@@ -977,60 +1304,46 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_nlp_put(ndlp);
- /* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(phba);
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /* FLOGI failed, use loop map to make discovery list */
+ lpfc_disc_list_loopmap(vport);
+
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ return;
+ }
+
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "%d (%d):0258 Register Fabric login error: 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
- /* Start discovery */
- lpfc_disc_start(phba);
return;
}
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
- if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
- /* This NPort has been assigned an NPort_ID by the fabric as a
- * result of the completed fabric login. Issue a State Change
- * Registration (SCR) ELS request to the fabric controller
- * (SCR_DID) so that this NPort gets RSCN events from the
- * fabric.
- */
- lpfc_issue_els_scr(phba, SCR_DID, 0);
-
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (!ndlp) {
- /* Allocate a new node instance. If the pool is empty,
- * start the discovery process and skip the Nameserver
- * login process. This is attempted again later on.
- * Otherwise, issue a Port Login (PLOGI) to NameServer.
- */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
- if (!ndlp) {
- lpfc_disc_start(phba);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
- } else {
- lpfc_nlp_init(phba, ndlp, NameServer_DID);
- ndlp->nlp_type |= NLP_FABRIC;
- }
- }
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, NameServer_DID, 0);
- if (phba->cfg_fdmi_on) {
- ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
- GFP_KERNEL);
- if (ndlp_fdmi) {
- lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
- ndlp_fdmi->nlp_type |= NLP_FABRIC;
- ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
- lpfc_issue_els_plogi(phba, FDMI_DID, 0);
+ if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+ list_for_each_entry(next_vport, &phba->port_list, listentry) {
+ if (next_vport->port_type == LPFC_PHYSICAL_PORT)
+ continue;
+
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+ lpfc_initial_fdisc(next_vport);
+ else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ lpfc_vport_set_state(vport,
+ FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0259 No NPIV Fabric "
+ "support\n",
+ phba->brd_no, vport->vpi);
}
}
+ lpfc_do_scr_ns_plogi(phba, vport);
}
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1046,32 +1359,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_vport *vport = pmb->vport;
if (mb->mbxStatus) {
+out:
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
- /* RegLogin failed, so just use loop map to make discovery
- list */
- lpfc_disc_list_loopmap(phba);
+ if (phba->fc_topology == TOPOLOGY_LOOP) {
+ /*
+ * RegLogin failed, use loop map to make discovery
+ * list
+ */
+ lpfc_disc_list_loopmap(vport);
- /* Start discovery */
- lpfc_disc_start(phba);
+ /* Start discovery */
+ lpfc_disc_start(vport);
+ return;
+ }
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0260 Register NameServer error: 0x%x\n",
+ phba->brd_no, vport->vpi, mb->mbxStatus);
return;
}
@@ -1079,37 +1396,43 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- if (phba->hba_state < LPFC_HBA_READY) {
- /* Link up discovery requires Fabrib registration. */
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
- lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* Link up discovery requires Fabric registration. */
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
+ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+ lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
+
+ /* Issue SCR just before NameServer GID_FT Query */
+ lpfc_issue_els_scr(vport, SCR_DID, 0);
}
- phba->fc_ns_retry = 0;
+ vport->fc_ns_retry = 0;
/* Good status, issue CT Request to NameServer */
- if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
/* Cannot issue NameServer Query, so finish up discovery */
- lpfc_disc_start(phba);
+ goto out;
}
lpfc_nlp_put(ndlp);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
static void
-lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- struct fc_rport *rport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct fc_rport *rport;
struct lpfc_rport_data *rdata;
struct fc_rport_identifiers rport_ids;
+ struct lpfc_hba *phba = vport->phba;
/* Remote port has reappeared. Re-register w/ FC transport */
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
@@ -1125,10 +1448,15 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* registered the port.
*/
if (ndlp->rport && ndlp->rport->dd_data &&
- *(struct lpfc_rport_data **) ndlp->rport->dd_data) {
+ ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
lpfc_nlp_put(ndlp);
}
- ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport add: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
if (!rport || !get_device(&rport->dev)) {
dev_printk(KERN_WARNING, &phba->pcidev->dev,
"Warning: fc_remote_port_add failed\n");
@@ -1151,25 +1479,20 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
- (rport->scsi_target_id < LPFC_MAX_TARGET)) {
+ (rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
-
return;
}
static void
-lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
- struct lpfc_rport_data *rdata = rport->dd_data;
- if (rport->scsi_target_id == -1) {
- ndlp->rport = NULL;
- rdata->pnode = NULL;
- lpfc_nlp_put(ndlp);
- put_device(&rport->dev);
- }
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ "rport delete: did:x%x flg:x%x type x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
fc_remote_port_delete(rport);
@@ -1177,42 +1500,46 @@ lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
static void
-lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count)
+lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
- spin_lock_irq(phba->host->host_lock);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
switch (state) {
case NLP_STE_UNUSED_NODE:
- phba->fc_unused_cnt += count;
+ vport->fc_unused_cnt += count;
break;
case NLP_STE_PLOGI_ISSUE:
- phba->fc_plogi_cnt += count;
+ vport->fc_plogi_cnt += count;
break;
case NLP_STE_ADISC_ISSUE:
- phba->fc_adisc_cnt += count;
+ vport->fc_adisc_cnt += count;
break;
case NLP_STE_REG_LOGIN_ISSUE:
- phba->fc_reglogin_cnt += count;
+ vport->fc_reglogin_cnt += count;
break;
case NLP_STE_PRLI_ISSUE:
- phba->fc_prli_cnt += count;
+ vport->fc_prli_cnt += count;
break;
case NLP_STE_UNMAPPED_NODE:
- phba->fc_unmap_cnt += count;
+ vport->fc_unmap_cnt += count;
break;
case NLP_STE_MAPPED_NODE:
- phba->fc_map_cnt += count;
+ vport->fc_map_cnt += count;
break;
case NLP_STE_NPR_NODE:
- phba->fc_npr_cnt += count;
+ vport->fc_npr_cnt += count;
break;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
}
static void
-lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -1226,35 +1553,34 @@ lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Transport interface */
if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- phba->nport_event_cnt++;
- lpfc_unregister_remote_port(phba, ndlp);
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
}
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- * sure to unblock any attached scsi devices
- */
- lpfc_register_remote_port(phba, ndlp);
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ * sure to unblock any attached scsi devices
+ */
+ lpfc_register_remote_port(vport, ndlp);
}
-
- /*
- * if we added to Mapped list, but the remote port
- * registration failed or assigned a target id outside
- * our presentable range - move the node to the
- * Unmapped List
- */
+ /*
+ * if we added to Mapped list, but the remote port
+ * registration failed or assigned a target id outside
+ * our presentable range - move the node to the
+ * Unmapped List
+ */
if (new_state == NLP_STE_MAPPED_NODE &&
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
- spin_unlock_irq(phba->host->host_lock);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
}
@@ -1280,61 +1606,74 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
}
void
-lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state)
+lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ int state)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int old_state = ndlp->nlp_state;
char name1[16], name2[16];
- lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0904 NPort state transition x%06x, %s -> %s\n",
- phba->brd_no,
+ lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
+ "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
+ vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID,
lpfc_nlp_state_name(name1, sizeof(name1), old_state),
lpfc_nlp_state_name(name2, sizeof(name2), state));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node statechg did:x%x old:%d ste:%d",
+ ndlp->nlp_DID, old_state, state);
+
if (old_state == NLP_STE_NPR_NODE &&
(ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
state != NLP_STE_NPR_NODE)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (old_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
ndlp->nlp_type &= ~NLP_FC_NODE;
}
if (list_empty(&ndlp->nlp_listp)) {
- spin_lock_irq(phba->host->host_lock);
- list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
+ spin_unlock_irq(shost->host_lock);
} else if (old_state)
- lpfc_nlp_counters(phba, old_state, -1);
+ lpfc_nlp_counters(vport, old_state, -1);
ndlp->nlp_state = state;
- lpfc_nlp_counters(phba, state, 1);
- lpfc_nlp_state_cleanup(phba, ndlp, old_state, state);
+ lpfc_nlp_counters(vport, state, 1);
+ lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
}
void
-lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ spin_lock_irq(shost->host_lock);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(phba->host->host_lock);
- lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+ NLP_STE_UNUSED_NODE);
}
void
-lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
- lpfc_nlp_counters(phba, ndlp->nlp_state, -1);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
+ spin_lock_irq(shost->host_lock);
list_del_init(&ndlp->nlp_listp);
- spin_unlock_irq(phba->host->host_lock);
+ ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
lpfc_nlp_put(ndlp);
}
@@ -1342,11 +1681,13 @@ lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* Start / ReStart rescue timer for Discovery / RSCN handling
*/
void
-lpfc_set_disctmo(struct lpfc_hba * phba)
+lpfc_set_disctmo(struct lpfc_vport *vport)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
uint32_t tmo;
- if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
+ if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
/* For FAN, timeout should be greater then edtov */
tmo = (((phba->fc_edtov + 999) / 1000) + 1);
} else {
@@ -1356,18 +1697,25 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
tmo = ((phba->fc_ratov * 3) + 3);
}
- mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_DISC_TMO;
- spin_unlock_irq(phba->host->host_lock);
+
+ if (!timer_pending(&vport->fc_disctmo)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "set disc timer: tmo:x%x state:x%x flg:x%x",
+ tmo, vport->port_state, vport->fc_flag);
+ }
+
+ mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_TMO;
+ spin_unlock_irq(shost->host_lock);
/* Start Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0247 Start Discovery Timer state x%x "
+ "%d (%d):0247 Start Discovery Timer state x%x "
"Data: x%x x%lx x%x x%x\n",
- phba->brd_no,
- phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
- phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+ phba->brd_no, vport->vpi, vport->port_state, tmo,
+ (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
return;
}
@@ -1376,23 +1724,34 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
* Cancel rescue timer for Discovery / RSCN handling
*/
int
-lpfc_can_disctmo(struct lpfc_hba * phba)
+lpfc_can_disctmo(struct lpfc_vport *vport)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ unsigned long iflags;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "can disc timer: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
+
/* Turn off discovery timer if its running */
- if (phba->fc_flag & FC_DISC_TMO) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(phba->host->host_lock);
- del_timer_sync(&phba->fc_disctmo);
- phba->work_hba_events &= ~WORKER_DISC_TMO;
+ if (vport->fc_flag & FC_DISC_TMO) {
+ spin_lock_irqsave(shost->host_lock, iflags);
+ vport->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ del_timer_sync(&vport->fc_disctmo);
+ spin_lock_irqsave(&vport->work_port_lock, iflags);
+ vport->work_port_events &= ~WORKER_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflags);
}
/* Cancel Discovery Timer state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0248 Cancel Discovery Timer state x%x "
+ "%d (%d):0248 Cancel Discovery Timer state x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->hba_state, phba->fc_flag,
- phba->fc_plogi_cnt, phba->fc_adisc_cnt);
+ phba->brd_no, vport->vpi, vport->port_state,
+ vport->fc_flag, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
return 0;
}
@@ -1402,15 +1761,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba)
* Return true if iocb matches the specified nport
*/
int
-lpfc_check_sli_ndlp(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
+lpfc_check_sli_ndlp(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *iocb,
+ struct lpfc_nodelist *ndlp)
{
- struct lpfc_sli *psli;
- IOCB_t *icmd;
+ struct lpfc_sli *psli = &phba->sli;
+ IOCB_t *icmd = &iocb->iocb;
+ struct lpfc_vport *vport = ndlp->vport;
+
+ if (iocb->vport != vport)
+ return 0;
- psli = &phba->sli;
- icmd = &iocb->iocb;
if (pring->ringno == LPFC_ELS_RING) {
switch (icmd->ulpCommand) {
case CMD_GEN_REQUEST64_CR:
@@ -1428,7 +1790,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
} else if (pring->ringno == psli->fcp_ring) {
/* Skip match check if waiting to relogin to FCP target */
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
- (ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ (ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1445,7 +1807,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
* associated with nlp_rpi in the LPFC_NODELIST entry.
*/
static int
-lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
@@ -1454,6 +1816,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
IOCB_t *icmd;
uint32_t rpi, i;
+ lpfc_fabric_abort_nport(ndlp);
+
/*
* Everything that matches on txcmplq will be returned
* by firmware with a no rpi error.
@@ -1465,15 +1829,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
- list) {
+ list) {
/*
* Check to see if iocb matches the nport we are
* looking for
*/
- if ((lpfc_check_sli_ndlp
- (phba, pring, iocb, ndlp))) {
+ if ((lpfc_check_sli_ndlp(phba, pring, iocb,
+ ndlp))) {
/* It matches, so deque and call compl
with an error */
list_move_tail(&iocb->list,
@@ -1481,22 +1845,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
pring->txq_cnt--;
}
}
- spin_unlock_irq(phba->host->host_lock);
-
+ spin_unlock_irq(&phba->hbalock);
}
}
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- (iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ (iocb->iocb_cmpl)(phba, iocb, iocb);
+ }
}
return 0;
@@ -1512,19 +1876,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
* we are waiting to PLOGI back to the remote NPort.
*/
int
-lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- LPFC_MBOXQ_t *mbox;
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
int rc;
if (ndlp->nlp_rpi) {
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
- mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox
- (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
- mempool_free( mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
}
lpfc_no_rpi(phba, ndlp);
ndlp->nlp_rpi = 0;
@@ -1533,25 +1900,70 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
return 0;
}
+void
+lpfc_unreg_all_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
+void
+lpfc_unreg_default_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+ "%d (%d):1815 Could not issue "
+ "unreg_did (default rpis)\n",
+ phba->brd_no, vport->vpi);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
/*
* Free resources associated with LPFC_NODELIST entry
* so it can be freed.
*/
static int
-lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- LPFC_MBOXQ_t *mb;
- LPFC_MBOXQ_t *nextmb;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0900 Cleanup node for NPort x%x "
+ "%d (%d):0900 Cleanup node for NPort x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
+ phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
- lpfc_dequeue_node(phba, ndlp);
+ lpfc_dequeue_node(vport, ndlp);
/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
if ((mb = phba->sli.mbox_active)) {
@@ -1562,13 +1974,13 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
}
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
- (ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
list_del(&mb->list);
@@ -1576,20 +1988,27 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
lpfc_nlp_put(ndlp);
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_els_abort(phba,ndlp);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
if (!list_empty(&ndlp->els_retry_evt.evt_listp))
list_del_init(&ndlp->els_retry_evt.evt_listp);
+ if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
+
+ if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
+ }
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
return 0;
}
@@ -1600,18 +2019,22 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
* machine, defer the free till we reach the end of the state machine.
*/
static void
-lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_rport_data *rdata;
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
- lpfc_cleanup_node(phba, ndlp);
+ lpfc_cleanup_node(vport, ndlp);
- if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
- put_device(&ndlp->rport->dev);
+ /*
+ * We can get here with a non-NULL ndlp->rport because when we
+ * unregister a rport we don't break the rport/node linkage. So if we
+ * do, make sure we don't leaving any dangling pointers behind.
+ */
+ if (ndlp->rport) {
rdata = ndlp->rport->dd_data;
rdata->pnode = NULL;
ndlp->rport = NULL;
@@ -1619,11 +2042,10 @@ lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
static int
-lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
+lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
{
- D_ID mydid;
- D_ID ndlpdid;
- D_ID matchdid;
+ D_ID mydid, ndlpdid, matchdid;
if (did == Bcast_DID)
return 0;
@@ -1637,7 +2059,7 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
return 1;
/* Next check for area/domain identically equals 0 match */
- mydid.un.word = phba->fc_myDID;
+ mydid.un.word = vport->fc_myDID;
if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
return 0;
}
@@ -1669,101 +2091,116 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
}
/* Search for a nodelist entry */
-struct lpfc_nodelist *
-lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did)
+static struct lpfc_nodelist *
+__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
uint32_t data1;
- spin_lock_irq(phba->host->host_lock);
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
- if (lpfc_matchdid(phba, ndlp, did)) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (lpfc_matchdid(vport, ndlp, did)) {
data1 = (((uint32_t) ndlp->nlp_state << 24) |
((uint32_t) ndlp->nlp_xri << 16) |
((uint32_t) ndlp->nlp_type << 8) |
((uint32_t) ndlp->nlp_rpi & 0xff));
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0929 FIND node DID "
+ "%d (%d):0929 FIND node DID "
" Data: x%p x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1);
- spin_unlock_irq(phba->host->host_lock);
return ndlp;
}
}
- spin_unlock_irq(phba->host->host_lock);
/* FIND node did <did> NOT FOUND */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0932 FIND node did x%x NOT FOUND.\n",
- phba->brd_no, did);
+ "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
+ phba->brd_no, vport->vpi, did);
return NULL;
}
struct lpfc_nodelist *
-lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
+lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- ndlp = lpfc_findnode_did(phba, did);
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_did(vport, did);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+struct lpfc_nodelist *
+lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
- if ((phba->fc_flag & FC_RSCN_MODE) &&
- ((lpfc_rscn_payload_check(phba, did) == 0)))
+ if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
+ lpfc_rscn_payload_check(vport, did) == 0)
return NULL;
ndlp = (struct lpfc_nodelist *)
- mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
if (!ndlp)
return NULL;
- lpfc_nlp_init(phba, ndlp, did);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_init(vport, ndlp, did);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
return ndlp;
}
- if (phba->fc_flag & FC_RSCN_MODE) {
- if (lpfc_rscn_payload_check(phba, did)) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ if (lpfc_rscn_payload_check(vport, did)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
/* Since this node is marked for discovery,
* delay timeout is not needed.
*/
if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
} else
ndlp = NULL;
} else {
if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
return NULL;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
}
return ndlp;
}
/* Build a list of nodes to discover based on the loopmap */
void
-lpfc_disc_list_loopmap(struct lpfc_hba * phba)
+lpfc_disc_list_loopmap(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
int j;
uint32_t alpa, index;
- if (phba->hba_state <= LPFC_LINK_DOWN) {
+ if (!lpfc_is_link_up(phba))
return;
- }
- if (phba->fc_topology != TOPOLOGY_LOOP) {
+
+ if (phba->fc_topology != TOPOLOGY_LOOP)
return;
- }
/* Check for loop map present or not */
if (phba->alpa_map[0]) {
for (j = 1; j <= phba->alpa_map[0]; j++) {
alpa = phba->alpa_map[j];
-
- if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
+ if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
continue;
- }
- lpfc_setup_disc_node(phba, alpa);
+ lpfc_setup_disc_node(vport, alpa);
}
} else {
/* No alpamap, so try all alpa's */
@@ -1776,113 +2213,167 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba)
else
index = FC_MAXLOOP - j - 1;
alpa = lpfcAlpaArray[index];
- if ((phba->fc_myDID & 0xff) == alpa) {
+ if ((vport->fc_myDID & 0xff) == alpa)
continue;
- }
-
- lpfc_setup_disc_node(phba, alpa);
+ lpfc_setup_disc_node(vport, alpa);
}
}
return;
}
-/* Start Link up / RSCN discovery on NPR list */
void
-lpfc_disc_start(struct lpfc_hba * phba)
+lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
- struct lpfc_sli *psli;
LPFC_MBOXQ_t *mbox;
- struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
+ struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
+ struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
+ int rc;
+
+ /*
+ * if it's not a physical port or if we already send
+ * clear_la then don't send it.
+ */
+ if ((phba->link_state >= LPFC_CLEAR_LA) ||
+ (vport->port_type != LPFC_PHYSICAL_PORT))
+ return;
+
+ /* Link up discovery */
+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
+ phba->link_state = LPFC_CLEAR_LA;
+ lpfc_clear_la(phba, mbox);
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
+ MBX_STOP_IOCB));
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_disc_flush_list(vport);
+ extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ phba->link_state = LPFC_HBA_ERROR;
+ }
+ }
+}
+
+/* Reg_vpi to tell firmware to resume normal operations */
+void
+lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *regvpimbox;
+
+ regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (regvpimbox) {
+ lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
+ regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
+ regvpimbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, regvpimbox,
+ (MBX_NOWAIT | MBX_STOP_IOCB))
+ == MBX_NOT_FINISHED) {
+ mempool_free(regvpimbox, phba->mbox_mem_pool);
+ }
+ }
+}
+
+/* Start Link up / RSCN discovery on NPR nodes */
+void
+lpfc_disc_start(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
uint32_t num_sent;
uint32_t clear_la_pending;
int did_changed;
- int rc;
- psli = &phba->sli;
-
- if (phba->hba_state <= LPFC_LINK_DOWN) {
+ if (!lpfc_is_link_up(phba))
return;
- }
- if (phba->hba_state == LPFC_CLEAR_LA)
+
+ if (phba->link_state == LPFC_CLEAR_LA)
clear_la_pending = 1;
else
clear_la_pending = 0;
- if (phba->hba_state < LPFC_HBA_READY) {
- phba->hba_state = LPFC_DISC_AUTH;
- }
- lpfc_set_disctmo(phba);
+ if (vport->port_state < LPFC_VPORT_READY)
+ vport->port_state = LPFC_DISC_AUTH;
- if (phba->fc_prevDID == phba->fc_myDID) {
+ lpfc_set_disctmo(vport);
+
+ if (vport->fc_prevDID == vport->fc_myDID)
did_changed = 0;
- } else {
+ else
did_changed = 1;
- }
- phba->fc_prevDID = phba->fc_myDID;
- phba->num_disc_nodes = 0;
+
+ vport->fc_prevDID = vport->fc_myDID;
+ vport->num_disc_nodes = 0;
/* Start Discovery state <hba_state> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0202 Start Discovery hba state x%x "
+ "%d (%d):0202 Start Discovery hba state x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, phba->hba_state, phba->fc_flag,
- phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-
- /* If our did changed, we MUST do PLOGI */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
- did_changed) {
- spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
- }
- }
+ phba->brd_no, vport->vpi, vport->port_state,
+ vport->fc_flag, vport->fc_plogi_cnt,
+ vport->fc_adisc_cnt);
/* First do ADISCs - if any */
- num_sent = lpfc_els_disc_adisc(phba);
+ num_sent = lpfc_els_disc_adisc(vport);
if (num_sent)
return;
- if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
+ /*
+ * For SLI3, cmpl_reg_vpi will set port_state to READY, and
+ * continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE)) {
+ lpfc_issue_reg_vpi(phba, vport);
+ return;
+ }
+
+ /*
+ * For SLI2, we need to set port_state to READY and continue
+ * discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
/* If we get here, there is nothing to ADISC */
- if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, mbox);
- mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, mbox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free( mbox, phba->mbox_mem_pool);
- lpfc_disc_flush_list(phba);
- psli->ring[(psli->extra_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &=
- ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_issue_clear_la(phba, vport);
+
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
}
}
+ vport->port_state = LPFC_VPORT_READY;
} else {
/* Next do PLOGIs - if any */
- num_sent = lpfc_els_disc_plogi(phba);
+ num_sent = lpfc_els_disc_plogi(vport);
if (num_sent)
return;
- if (phba->fc_flag & FC_RSCN_MODE) {
+ if (vport->fc_flag & FC_RSCN_MODE) {
/* Check to see if more RSCNs came in while we
* were processing this one.
*/
- if ((phba->fc_rscn_id_cnt == 0) &&
- (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_RSCN_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ if ((vport->fc_rscn_id_cnt == 0) &&
+ (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_RSCN_MODE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
} else
- lpfc_els_handle_rscn(phba);
+ lpfc_els_handle_rscn(vport);
}
}
return;
@@ -1893,7 +2384,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
* ring the match the sppecified nodelist.
*/
static void
-lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
struct lpfc_sli *psli;
@@ -1907,7 +2398,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
/* Error matching iocb on txq or txcmplq
* First check the txq.
*/
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
if (iocb->context1 != ndlp) {
continue;
@@ -1927,36 +2418,36 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
continue;
}
icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
+ icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
icmd = &iocb->iocb;
icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
-
- return;
}
void
-lpfc_disc_flush_list(struct lpfc_hba * phba)
+lpfc_disc_flush_list(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
+ struct lpfc_hba *phba = vport->phba;
- if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) {
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
+ if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
@@ -1967,6 +2458,14 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
}
}
+void
+lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
+{
+ lpfc_els_flush_rscn(vport);
+ lpfc_els_flush_cmd(vport);
+ lpfc_disc_flush_list(vport);
+}
+
/*****************************************************************************/
/*
* NAME: lpfc_disc_timeout
@@ -1985,158 +2484,154 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
void
lpfc_disc_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_hba *phba = vport->phba;
unsigned long flags = 0;
if (unlikely(!phba))
return;
- spin_lock_irqsave(phba->host->host_lock, flags);
- if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
- phba->work_hba_events |= WORKER_DISC_TMO;
+ if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
+ spin_lock_irqsave(&vport->work_port_lock, flags);
+ vport->work_port_events |= WORKER_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
}
- spin_unlock_irqrestore(phba->host->host_lock, flags);
return;
}
static void
-lpfc_disc_timeout_handler(struct lpfc_hba *phba)
+lpfc_disc_timeout_handler(struct lpfc_vport *vport)
{
- struct lpfc_sli *psli;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_nodelist *ndlp, *next_ndlp;
- LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
+ LPFC_MBOXQ_t *initlinkmbox;
int rc, clrlaerr = 0;
- if (unlikely(!phba))
+ if (!(vport->fc_flag & FC_DISC_TMO))
return;
- if (!(phba->fc_flag & FC_DISC_TMO))
- return;
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_TMO;
+ spin_unlock_irq(shost->host_lock);
- psli = &phba->sli;
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ "disc timeout: state:x%x rtry:x%x flg:x%x",
+ vport->port_state, vport->fc_ns_retry, vport->fc_flag);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_DISC_TMO;
- spin_unlock_irq(phba->host->host_lock);
-
- switch (phba->hba_state) {
+ switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
- /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
- /* FAN timeout */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_DISCOVERY,
- "%d:0221 FAN timeout\n",
- phba->brd_no);
+ /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
+ * FAN
+ */
+ /* FAN timeout */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
+ "%d (%d):0221 FAN timeout\n",
+ phba->brd_no, vport->vpi);
/* Start discovery by sending FLOGI, clean up old rpis */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes,
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
if (ndlp->nlp_state != NLP_STE_NPR_NODE)
continue;
if (ndlp->nlp_type & NLP_FABRIC) {
/* Clean up the ndlp on Fabric connections */
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
/* Fail outstanding IO now since device
* is marked for PLOGI.
*/
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
}
}
- phba->hba_state = LPFC_FLOGI;
- lpfc_set_disctmo(phba);
- lpfc_initial_flogi(phba);
+ if (vport->port_state != LPFC_FLOGI) {
+ vport->port_state = LPFC_FLOGI;
+ lpfc_set_disctmo(vport);
+ lpfc_initial_flogi(vport);
+ }
break;
+ case LPFC_FDISC:
case LPFC_FLOGI:
- /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
+ /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
/* Initial FLOGI timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0222 Initial FLOGI timeout\n",
- phba->brd_no);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0222 Initial %s timeout\n",
+ phba->brd_no, vport->vpi,
+ vport->vpi ? "FLOGI" : "FDISC");
/* Assume no Fabric and go on with discovery.
* Check for outstanding ELS FLOGI to abort.
*/
/* FLOGI failed, so just use loop map to make discovery list */
- lpfc_disc_list_loopmap(phba);
+ lpfc_disc_list_loopmap(vport);
/* Start discovery */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
break;
case LPFC_FABRIC_CFG_LINK:
/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
NameServer login */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0223 Timeout while waiting for NameServer "
- "login\n", phba->brd_no);
+ "%d (%d):0223 Timeout while waiting for "
+ "NameServer login\n",
+ phba->brd_no, vport->vpi);
/* Next look for NameServer ndlp */
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
+ ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp)
lpfc_nlp_put(ndlp);
/* Start discovery */
- lpfc_disc_start(phba);
+ lpfc_disc_start(vport);
break;
case LPFC_NS_QRY:
/* Check for wait for NameServer Rsp timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0224 NameServer Query timeout "
+ "%d (%d):0224 NameServer Query timeout "
"Data: x%x x%x\n",
- phba->brd_no,
- phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-
- ndlp = lpfc_findnode_did(phba, NameServer_DID);
- if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
- if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- /* Try it one more time */
- rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
- if (rc == 0)
- break;
- }
- phba->fc_ns_retry = 0;
+ phba->brd_no, vport->vpi,
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
+ /* Try it one more time */
+ vport->fc_ns_retry++;
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
+ vport->fc_ns_retry, 0);
+ if (rc == 0)
+ break;
}
+ vport->fc_ns_retry = 0;
- /* Nothing to authenticate, so CLEAR_LA right now */
- clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!clearlambox) {
- clrlaerr = 1;
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0226 Device Discovery "
- "completion error\n",
- phba->brd_no);
- phba->hba_state = LPFC_HBA_ERROR;
- break;
- }
-
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, clearlambox);
- clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, clearlambox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(clearlambox, phba->mbox_mem_pool);
- clrlaerr = 1;
- break;
+ /*
+ * Discovery is over.
+ * set port_state to PORT_READY if SLI2.
+ * cmpl_reg_vpi will set port_state to READY for SLI3.
+ */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
}
/* Setup and issue mailbox INITIALIZE LINK command */
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0206 Device Discovery "
+ "%d (%d):0206 Device Discovery "
"completion error\n",
- phba->brd_no);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->brd_no, vport->vpi);
+ phba->link_state = LPFC_HBA_ERROR;
break;
}
@@ -2144,6 +2639,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
phba->cfg_link_speed);
initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+ initlinkmbox->vport = vport;
+ initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
(MBX_NOWAIT | MBX_STOP_IOCB));
lpfc_set_loopback_flag(phba);
@@ -2154,67 +2651,81 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
case LPFC_DISC_AUTH:
/* Node Authentication timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0227 Node Authentication timeout\n",
- phba->brd_no);
- lpfc_disc_flush_list(phba);
- clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!clearlambox) {
- clrlaerr = 1;
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0207 Device Discovery "
- "completion error\n",
- phba->brd_no);
- phba->hba_state = LPFC_HBA_ERROR;
- break;
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0227 Node Authentication timeout\n",
+ phba->brd_no, vport->vpi);
+ lpfc_disc_flush_list(vport);
+
+ /*
+ * set port_state to PORT_READY if SLI2.
+ * cmpl_reg_vpi will set port_state to READY for SLI3.
+ */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ lpfc_issue_reg_vpi(phba, vport);
+ else { /* NPIV Not enabled */
+ lpfc_issue_clear_la(phba, vport);
+ vport->port_state = LPFC_VPORT_READY;
}
- phba->hba_state = LPFC_CLEAR_LA;
- lpfc_clear_la(phba, clearlambox);
- clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
- rc = lpfc_sli_issue_mbox(phba, clearlambox,
- (MBX_NOWAIT | MBX_STOP_IOCB));
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(clearlambox, phba->mbox_mem_pool);
- clrlaerr = 1;
+ break;
+
+ case LPFC_VPORT_READY:
+ if (vport->fc_flag & FC_RSCN_MODE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0231 RSCN timeout Data: x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_cmd(vport);
+
+ lpfc_els_flush_rscn(vport);
+ lpfc_disc_flush_list(vport);
}
break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0229 Unexpected discovery timeout, "
+ "vport State x%x\n",
+ phba->brd_no, vport->vpi, vport->port_state);
+
+ break;
+ }
+
+ switch (phba->link_state) {
case LPFC_CLEAR_LA:
- /* CLEAR LA timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0228 CLEAR LA timeout\n",
- phba->brd_no);
+ /* CLEAR LA timeout */
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0228 CLEAR LA timeout\n",
+ phba->brd_no, vport->vpi);
clrlaerr = 1;
break;
- case LPFC_HBA_READY:
- if (phba->fc_flag & FC_RSCN_MODE) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0231 RSCN timeout Data: x%x x%x\n",
- phba->brd_no,
- phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-
- /* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(phba);
+ case LPFC_LINK_UNKNOWN:
+ case LPFC_WARM_START:
+ case LPFC_INIT_START:
+ case LPFC_INIT_MBX_CMDS:
+ case LPFC_LINK_DOWN:
+ case LPFC_LINK_UP:
+ case LPFC_HBA_ERROR:
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0230 Unexpected timeout, hba link "
+ "state x%x\n",
+ phba->brd_no, vport->vpi, phba->link_state);
+ clrlaerr = 1;
+ break;
- lpfc_els_flush_rscn(phba);
- lpfc_disc_flush_list(phba);
- }
+ case LPFC_HBA_READY:
break;
}
if (clrlaerr) {
- lpfc_disc_flush_list(phba);
+ lpfc_disc_flush_list(vport);
psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- phba->hba_state = LPFC_HBA_READY;
+ vport->port_state = LPFC_VPORT_READY;
}
return;
@@ -2227,37 +2738,29 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
* handed off to the SLI layer.
*/
void
-lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_sli *psli;
- MAILBOX_t *mb;
- struct lpfc_dmabuf *mp;
- struct lpfc_nodelist *ndlp;
-
- psli = &phba->sli;
- mb = &pmb->mb;
-
- ndlp = (struct lpfc_nodelist *) pmb->context2;
- mp = (struct lpfc_dmabuf *) (pmb->context1);
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
+ struct lpfc_vport *vport = pmb->vport;
pmb->context1 = NULL;
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_type |= NLP_FABRIC;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- /* Start issuing Fabric-Device Management Interface (FDMI)
- * command to 0xfffffa (FDMI well known port)
+ /*
+ * Start issuing Fabric-Device Management Interface (FDMI) command to
+ * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
+ * fdmi-on=2 (supporting RPA/hostnmae)
*/
- if (phba->cfg_fdmi_on == 1) {
- lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
- } else {
- /*
- * Delay issuing FDMI command if fdmi-on=2
- * (supporting RPA/hostnmae)
- */
- mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
- }
+
+ if (phba->cfg_fdmi_on == 1)
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
+ else
+ mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
/* Mailbox took a reference to the node */
lpfc_nlp_put(ndlp);
@@ -2283,16 +2786,12 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
sizeof(ndlp->nlp_portname)) == 0;
}
-/*
- * Search node lists for a remote port matching filter criteria
- * Caller needs to hold host_lock before calling this routine.
- */
struct lpfc_nodelist *
-__lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
+__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
{
struct lpfc_nodelist *ndlp;
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
filter(ndlp, param))
return ndlp;
@@ -2302,68 +2801,104 @@ __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
/*
* Search node lists for a remote port matching filter criteria
- * This routine is used when the caller does NOT have host_lock.
+ * Caller needs to hold host_lock before calling this routine.
*/
struct lpfc_nodelist *
-lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
+lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- ndlp = __lpfc_find_node(phba, filter, param);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_find_node(vport, filter, param);
+ spin_unlock_irq(shost->host_lock);
return ndlp;
}
/*
* This routine looks up the ndlp lists for the given RPI. If rpi found it
- * returns the node list pointer else return NULL.
+ * returns the node list element pointer else return NULL.
*/
struct lpfc_nodelist *
-__lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi)
+__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
- return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi);
+ return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
}
struct lpfc_nodelist *
-lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- ndlp = __lpfc_findnode_rpi(phba, rpi);
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_rpi(vport, rpi);
+ spin_unlock_irq(shost->host_lock);
return ndlp;
}
/*
* This routine looks up the ndlp lists for the given WWPN. If WWPN found it
- * returns the node list pointer else return NULL.
+ * returns the node element list pointer else return NULL.
*/
struct lpfc_nodelist *
-lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn)
+lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
- spin_lock_irq(phba->host->host_lock);
- ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn);
- spin_unlock_irq(phba->host->host_lock);
- return NULL;
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
}
void
-lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
+lpfc_dev_loss_delay(unsigned long ptr)
+{
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
+ unsigned long flags;
+
+ evtp = &ndlp->dev_loss_evt;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (!list_empty(&evtp->evt_listp)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ evtp->evt_arg1 = ndlp;
+ evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ if (phba->work_wait)
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+}
+
+void
+lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
{
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
init_timer(&ndlp->nlp_delayfunc);
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
ndlp->nlp_DID = did;
- ndlp->nlp_phba = phba;
+ ndlp->vport = vport;
ndlp->nlp_sid = NLP_NO_SID;
INIT_LIST_HEAD(&ndlp->nlp_listp);
kref_init(&ndlp->kref);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node init: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
return;
}
@@ -2372,8 +2907,13 @@ lpfc_nlp_release(struct kref *kref)
{
struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
kref);
- lpfc_nlp_remove(ndlp->nlp_phba, ndlp);
- mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool);
+
+ lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
+ "node release: did:x%x flg:x%x type:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
+
+ lpfc_nlp_remove(ndlp->vport, ndlp);
+ mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2623a9bc7775..c2fb59f595f3 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -59,6 +59,12 @@
#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
+#define SLI2_IOCB_CMD_SIZE 32
+#define SLI2_IOCB_RSP_SIZE 32
+#define SLI3_IOCB_CMD_SIZE 128
+#define SLI3_IOCB_RSP_SIZE 64
+
+
/* Common Transport structures and definitions */
union CtRevisionId {
@@ -79,6 +85,9 @@ union CtCommandResponse {
uint32_t word;
};
+#define FC4_FEATURE_INIT 0x2
+#define FC4_FEATURE_TARGET 0x1
+
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
union CtRevisionId RevisionId;
@@ -121,20 +130,6 @@ struct lpfc_sli_ct_request {
uint32_t rsvd[7];
} rft;
- struct rff {
- uint32_t PortId;
- uint8_t reserved[2];
-#ifdef __BIG_ENDIAN_BITFIELD
- uint8_t feature_res:6;
- uint8_t feature_init:1;
- uint8_t feature_tgt:1;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t feature_tgt:1;
- uint8_t feature_init:1;
- uint8_t feature_res:6;
-#endif
- uint8_t type_code; /* type=8 for FCP */
- } rff;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
uint8_t wwnn[8];
@@ -144,15 +139,42 @@ struct lpfc_sli_ct_request {
uint8_t len;
uint8_t symbname[255];
} rsnn;
+ struct rspn { /* For RSPN_ID requests */
+ uint32_t PortId;
+ uint8_t len;
+ uint8_t symbname[255];
+ } rspn;
+ struct gff {
+ uint32_t PortId;
+ } gff;
+ struct gff_acc {
+ uint8_t fbits[128];
+ } gff_acc;
+#define FCP_TYPE_FEATURE_OFFSET 4
+ struct rff {
+ uint32_t PortId;
+ uint8_t reserved[2];
+ uint8_t fbits;
+ uint8_t type_code; /* type=8 for FCP */
+ } rff;
} un;
};
#define SLI_CT_REVISION 1
-#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
-#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
-#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235)
-#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
-#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
+#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gid))
+#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gff))
+#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rft))
+#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rff))
+#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rnn))
+#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rsnn))
+#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct rspn))
/*
* FsType Definitions
@@ -227,6 +249,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_GFT_ID 0x0117
#define SLI_CTNS_GSPN_ID 0x0118
#define SLI_CTNS_GPT_ID 0x011A
+#define SLI_CTNS_GFF_ID 0x011F
#define SLI_CTNS_GID_PN 0x0121
#define SLI_CTNS_GID_NN 0x0131
#define SLI_CTNS_GIP_NN 0x0135
@@ -240,9 +263,9 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_RNN_ID 0x0213
#define SLI_CTNS_RCS_ID 0x0214
#define SLI_CTNS_RFT_ID 0x0217
-#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RSPN_ID 0x0218
#define SLI_CTNS_RPT_ID 0x021A
+#define SLI_CTNS_RFF_ID 0x021F
#define SLI_CTNS_RIP_NN 0x0235
#define SLI_CTNS_RIPA_NN 0x0236
#define SLI_CTNS_RSNN_NN 0x0239
@@ -311,9 +334,9 @@ struct csp {
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+ uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
- uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
uint16_t fPort:1; /* FC Word 1, bit 28 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
@@ -332,9 +355,9 @@ struct csp {
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t fPort:1; /* FC Word 1, bit 28 */
- uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
+ uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
- uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
+ uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t payloadlength:1; /* FC Word 1, bit 16 */
uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
@@ -1255,7 +1278,9 @@ typedef struct { /* FireFly BIU registers */
#define MBX_KILL_BOARD 0x24
#define MBX_CONFIG_FARP 0x25
#define MBX_BEACON 0x2A
+#define MBX_HEARTBEAT 0x31
+#define MBX_CONFIG_HBQ 0x7C
#define MBX_LOAD_AREA 0x81
#define MBX_RUN_BIU_DIAG64 0x84
#define MBX_CONFIG_PORT 0x88
@@ -1263,6 +1288,10 @@ typedef struct { /* FireFly BIU registers */
#define MBX_READ_RPI64 0x8F
#define MBX_REG_LOGIN64 0x93
#define MBX_READ_LA64 0x95
+#define MBX_REG_VPI 0x96
+#define MBX_UNREG_VPI 0x97
+#define MBX_REG_VNPID 0x96
+#define MBX_UNREG_VNPID 0x97
#define MBX_FLASH_WR_ULA 0x98
#define MBX_SET_DEBUG 0x99
@@ -1335,6 +1364,10 @@ typedef struct { /* FireFly BIU registers */
#define CMD_FCP_TRECEIVE64_CX 0xA1
#define CMD_FCP_TRSP64_CX 0xA3
+#define CMD_IOCB_RCV_SEQ64_CX 0xB5
+#define CMD_IOCB_RCV_ELS64_CX 0xB7
+#define CMD_IOCB_RCV_CONT64_CX 0xBB
+
#define CMD_GEN_REQUEST64_CR 0xC2
#define CMD_GEN_REQUEST64_CX 0xC3
@@ -1561,6 +1594,7 @@ typedef struct {
#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
+#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
@@ -1744,8 +1778,6 @@ typedef struct {
#define LMT_4Gb 0x040
#define LMT_8Gb 0x080
#define LMT_10Gb 0x100
-
-
uint32_t rsvd2;
uint32_t rsvd3;
uint32_t max_xri;
@@ -1754,7 +1786,10 @@ typedef struct {
uint32_t avail_xri;
uint32_t avail_iocb;
uint32_t avail_rpi;
- uint32_t default_rpi;
+ uint32_t max_vpi;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint32_t avail_vpi;
} READ_CONFIG_VAR;
/* Structure for MB Command READ_RCONFIG (12) */
@@ -1818,6 +1853,13 @@ typedef struct {
structure */
struct ulp_bde64 sp64;
} un;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd3;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t vpi;
+ uint16_t rsvd3;
+#endif
} READ_SPARM_VAR;
/* Structure for MB Command READ_STATUS (14) */
@@ -1918,11 +1960,17 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t cv:1;
uint32_t rr:1;
- uint32_t rsvd1:29;
+ uint32_t rsvd2:2;
+ uint32_t v3req:1;
+ uint32_t v3rsp:1;
+ uint32_t rsvd1:25;
uint32_t rv:1;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t rv:1;
- uint32_t rsvd1:29;
+ uint32_t rsvd1:25;
+ uint32_t v3rsp:1;
+ uint32_t v3req:1;
+ uint32_t rsvd2:2;
uint32_t rr:1;
uint32_t cv:1;
#endif
@@ -1972,8 +2020,8 @@ typedef struct {
uint8_t sli1FwName[16];
uint32_t sli2FwRev;
uint8_t sli2FwName[16];
- uint32_t rsvd2;
- uint32_t RandomData[7];
+ uint32_t sli3Feat;
+ uint32_t RandomData[6];
} READ_REV_VAR;
/* Structure for MB Command READ_LINK_STAT (18) */
@@ -2013,6 +2061,14 @@ typedef struct {
struct ulp_bde64 sp64;
} un;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+
} REG_LOGIN_VAR;
/* Word 30 contents for REG_LOGIN */
@@ -2037,16 +2093,78 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t rsvd1;
uint16_t rpi;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t rsvd6;
+ uint16_t vpi;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint16_t rpi;
uint16_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t vpi;
+ uint16_t rsvd6;
#endif
} UNREG_LOGIN_VAR;
+/* Structure for MB Command REG_VPI (0x96) */
+typedef struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1;
+ uint32_t rsvd2:8;
+ uint32_t sid:24;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd1;
+ uint32_t sid:24;
+ uint32_t rsvd2:8;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} REG_VPI_VAR;
+
+/* Structure for MB Command UNREG_VPI (0x97) */
+typedef struct {
+ uint32_t rsvd1;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else /* __LITTLE_ENDIAN */
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
+} UNREG_VPI_VAR;
+
/* Structure for MB Command UNREG_D_ID (0x23) */
typedef struct {
uint32_t did;
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+ uint32_t rsvd4;
+ uint32_t rsvd5;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t rsvd6;
+ uint16_t vpi;
+#else
+ uint16_t vpi;
+ uint16_t rsvd6;
+#endif
} UNREG_D_ID_VAR;
/* Structure for MB Command READ_LA (21) */
@@ -2178,13 +2296,240 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
-/* Structure for MB Command CONFIG_PORT (0x88) */
+struct hbq_mask {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t tmatch;
+ uint8_t tmask;
+ uint8_t rctlmatch;
+ uint8_t rctlmask;
+#else /* __LITTLE_ENDIAN */
+ uint8_t rctlmask;
+ uint8_t rctlmatch;
+ uint8_t tmask;
+ uint8_t tmatch;
+#endif
+};
+
+
+/* Structure for MB Command CONFIG_HBQ (7c) */
+
+struct config_hbq_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 :7;
+ uint32_t recvNotify :1; /* Receive Notification */
+ uint32_t numMask :8; /* # Mask Entries */
+ uint32_t profile :8; /* Selection Profile */
+ uint32_t rsvd2 :8;
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd2 :8;
+ uint32_t profile :8; /* Selection Profile */
+ uint32_t numMask :8; /* # Mask Entries */
+ uint32_t recvNotify :1; /* Receive Notification */
+ uint32_t rsvd1 :7;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hbqId :16;
+ uint32_t rsvd3 :12;
+ uint32_t ringMask :4;
+#else /* __LITTLE_ENDIAN */
+ uint32_t ringMask :4;
+ uint32_t rsvd3 :12;
+ uint32_t hbqId :16;
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t entry_count :16;
+ uint32_t rsvd4 :8;
+ uint32_t headerLen :8;
+#else /* __LITTLE_ENDIAN */
+ uint32_t headerLen :8;
+ uint32_t rsvd4 :8;
+ uint32_t entry_count :16;
+#endif
+
+ uint32_t hbqaddrLow;
+ uint32_t hbqaddrHigh;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd5 :31;
+ uint32_t logEntry :1;
+#else /* __LITTLE_ENDIAN */
+ uint32_t logEntry :1;
+ uint32_t rsvd5 :31;
+#endif
+
+ uint32_t rsvd6; /* w7 */
+ uint32_t rsvd7; /* w8 */
+ uint32_t rsvd8; /* w9 */
+
+ struct hbq_mask hbqMasks[6];
+
+
+ union {
+ uint32_t allprofiles[12];
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd1 :28;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :28;
+ #endif
+ uint32_t rsvd[10];
+ } profile2;
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cmdcodeoff :28;
+ uint32_t rsvd1 :12;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :12;
+ uint32_t cmdcodeoff :28;
+ #endif
+ uint32_t cmdmatch[8];
+
+ uint32_t rsvd[2];
+ } profile3;
+
+ struct {
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t seqlenoff :16;
+ uint32_t maxlen :16;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t maxlen :16;
+ uint32_t seqlenoff :16;
+ #endif
+ #ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cmdcodeoff :28;
+ uint32_t rsvd1 :12;
+ uint32_t seqlenbcnt :4;
+ #else /* __LITTLE_ENDIAN */
+ uint32_t seqlenbcnt :4;
+ uint32_t rsvd1 :12;
+ uint32_t cmdcodeoff :28;
+ #endif
+ uint32_t cmdmatch[8];
+
+ uint32_t rsvd[2];
+ } profile5;
+
+ } profiles;
+};
+
+
+
+/* Structure for MB Command CONFIG_PORT (0x88) */
typedef struct {
- uint32_t pcbLen;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t cBE : 1;
+ uint32_t cET : 1;
+ uint32_t cHpcb : 1;
+ uint32_t cMA : 1;
+ uint32_t sli_mode : 4;
+ uint32_t pcbLen : 24; /* bit 23:0 of memory based port
+ * config block */
+#else /* __LITTLE_ENDIAN */
+ uint32_t pcbLen : 24; /* bit 23:0 of memory based port
+ * config block */
+ uint32_t sli_mode : 4;
+ uint32_t cMA : 1;
+ uint32_t cHpcb : 1;
+ uint32_t cET : 1;
+ uint32_t cBE : 1;
+#endif
+
uint32_t pcbLow; /* bit 31:0 of memory based port config block */
uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
- uint32_t hbainit[5];
+ uint32_t hbainit[6];
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd : 24; /* Reserved */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t cmr : 1; /* Configure Max RPIs */
+ uint32_t cmx : 1; /* Configure Max XRIs */
+ uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
+ uint32_t cinb : 1; /* Enable Interrupt Notification Block */
+ uint32_t chbs : 1; /* Cofigure Host Backing store */
+ uint32_t csah : 1; /* Configure Synchronous Abort Handling */
+ uint32_t ccrp : 1; /* Config Command Ring Polling */
+ uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t rsvd : 24; /* Reserved */
+#endif
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2 : 24; /* Reserved */
+ uint32_t gmv : 1; /* Grant Max VPIs */
+ uint32_t gcrp : 1; /* Grant Command Ring Polling */
+ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
+ uint32_t ghbs : 1; /* Grant Host Backing Store */
+ uint32_t ginb : 1; /* Grant Interrupt Notification Block */
+ uint32_t gerbm : 1; /* Grant ERBM Request */
+ uint32_t gmx : 1; /* Grant Max XRIs */
+ uint32_t gmr : 1; /* Grant Max RPIs */
+#else /* __LITTLE_ENDIAN */
+ uint32_t gmr : 1; /* Grant Max RPIs */
+ uint32_t gmx : 1; /* Grant Max XRIs */
+ uint32_t gerbm : 1; /* Grant ERBM Request */
+ uint32_t ginb : 1; /* Grant Interrupt Notification Block */
+ uint32_t ghbs : 1; /* Grant Host Backing Store */
+ uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
+ uint32_t gcrp : 1; /* Grant Command Ring Polling */
+ uint32_t gmv : 1; /* Grant Max VPIs */
+ uint32_t rsvd2 : 24; /* Reserved */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t max_rpi : 16; /* Max RPIs Port should configure */
+ uint32_t max_xri : 16; /* Max XRIs Port should configure */
+#else /* __LITTLE_ENDIAN */
+ uint32_t max_xri : 16; /* Max XRIs Port should configure */
+ uint32_t max_rpi : 16; /* Max RPIs Port should configure */
+#endif
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
+ uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
+ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
+#endif
+
+ uint32_t rsvd4; /* Reserved */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd5 : 16; /* Reserved */
+ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
+#else /* __LITTLE_ENDIAN */
+ uint32_t max_vpi : 16; /* Max number of virt N-Ports */
+ uint32_t rsvd5 : 16; /* Reserved */
+#endif
+
} CONFIG_PORT_VAR;
/* SLI-2 Port Control Block */
@@ -2262,33 +2607,40 @@ typedef struct {
#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
typedef union {
- uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
- LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
- READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
- WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
- BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
- INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
+ * feature/max ring number
+ */
+ LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
+ READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
+ WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
+ BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
+ INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
- CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
- PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
+ CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
+ PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
- READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
- READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
- READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
- READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
+ READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
+ READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
+ READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
+ READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
- READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
+ READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
- DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
- UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
- CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */
- CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+ DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
+ UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
+ CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP)
+ * NEW_FEATURE
+ */
+ struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
+ CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
+ REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
+ UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
} MAILVARIANTS;
/*
@@ -2305,14 +2657,27 @@ struct lpfc_pgp {
__le32 rspPutInx;
};
-typedef struct _SLI2_DESC {
- struct lpfc_hgp host[MAX_RINGS];
+struct sli2_desc {
uint32_t unused1[16];
+ struct lpfc_hgp host[MAX_RINGS];
+ struct lpfc_pgp port[MAX_RINGS];
+};
+
+struct sli3_desc {
+ struct lpfc_hgp host[MAX_RINGS];
+ uint32_t reserved[8];
+ uint32_t hbq_put[16];
+};
+
+struct sli3_pgp {
struct lpfc_pgp port[MAX_RINGS];
-} SLI2_DESC;
+ uint32_t hbq_get[16];
+};
typedef union {
- SLI2_DESC s2;
+ struct sli2_desc s2;
+ struct sli3_desc s3;
+ struct sli3_pgp s3_pgp;
} SLI_VAR;
typedef struct {
@@ -2618,6 +2983,25 @@ typedef struct {
uint32_t fcpt_Length; /* transfer ready for IWRITE */
} FCPT_FIELDS64;
+/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
+ or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
+
+struct rcv_sli3 {
+ uint32_t word8Rsvd;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t vpi;
+ uint16_t word9Rsvd;
+#else /* __LITTLE_ENDIAN */
+ uint16_t word9Rsvd;
+ uint16_t vpi;
+#endif
+ uint32_t word10Rsvd;
+ uint32_t acc_len; /* accumulated length */
+ struct ulp_bde64 bde2;
+};
+
+
+
typedef struct _IOCB { /* IOCB structure */
union {
GENERIC_RSP grsp; /* Generic response */
@@ -2632,8 +3016,8 @@ typedef struct _IOCB { /* IOCB structure */
/* SLI-2 structures */
- struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
- bde_64s */
+ struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
+ * bde_64s */
ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
@@ -2695,9 +3079,20 @@ typedef struct _IOCB { /* IOCB structure */
uint32_t ulpTimeout:8;
#endif
+ union {
+ struct rcv_sli3 rcvsli3; /* words 8 - 15 */
+ uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+ } unsli3;
+
+#define ulpCt_h ulpXS
+#define ulpCt_l ulpFCP2Rcvy
+
+#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */
+#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */
#define PARM_UNUSED 0 /* PU field (Word 4) not used */
#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
+#define PARM_NPIV_DID 3
#define CLASS1 0 /* Class 1 */
#define CLASS2 1 /* Class 2 */
#define CLASS3 2 /* Class 3 */
@@ -2718,39 +3113,51 @@ typedef struct _IOCB { /* IOCB structure */
#define IOSTAT_RSVD2 0xC
#define IOSTAT_RSVD3 0xD
#define IOSTAT_RSVD4 0xE
-#define IOSTAT_RSVD5 0xF
+#define IOSTAT_NEED_BUFFER 0xF
#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
#define IOSTAT_CNT 0x11
} IOCB_t;
+/* Structure used for a single HBQ entry */
+struct lpfc_hbq_entry {
+ struct ulp_bde64 bde;
+ uint32_t buffer_tag;
+};
+
#define SLI1_SLIM_SIZE (4 * 1024)
/* Up to 498 IOCBs will fit into 16k
* 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
*/
-#define SLI2_SLIM_SIZE (16 * 1024)
+#define SLI2_SLIM_SIZE (64 * 1024)
/* Maximum IOCBs that will fit in SLI2 slim */
#define MAX_SLI2_IOCB 498
+#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
+ (sizeof(MAILBOX_t) + sizeof(PCB_t)))
+
+/* HBQ entries are 4 words each = 4k */
+#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
+ lpfc_sli_hbq_count())
struct lpfc_sli2_slim {
MAILBOX_t mbx;
PCB_t pcb;
- IOCB_t IOCBs[MAX_SLI2_IOCB];
+ IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
};
-/*******************************************************************
-This macro check PCI device to allow special handling for LC HBAs.
-
-Parameters:
-device : struct pci_dev 's device field
-
-return 1 => TRUE
- 0 => FALSE
- *******************************************************************/
+/*
+ * This function checks PCI device to allow special handling for LC HBAs.
+ *
+ * Parameters:
+ * device : struct pci_dev 's device field
+ *
+ * return 1 => TRUE
+ * 0 => FALSE
+ */
static inline int
lpfc_is_LC_HBA(unsigned short device)
{
@@ -2766,3 +3173,16 @@ lpfc_is_LC_HBA(unsigned short device)
else
return 0;
}
+
+/*
+ * Determine if an IOCB failed because of a link event or firmware reset.
+ */
+
+static inline int
+lpfc_error_lost_link(IOCB_t *iocbp)
+{
+ return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
+ iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
+ iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index dcb4ba0ecee1..07bd0dcdf0d6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/ctype.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -40,15 +41,20 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
#include "lpfc_version.h"
+#include "lpfc_vport.h"
static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
+static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
+
+
/************************************************************************/
/* */
/* lpfc_config_port_prep */
@@ -61,7 +67,7 @@ static DEFINE_IDR(lpfc_hba_index);
/* */
/************************************************************************/
int
-lpfc_config_port_prep(struct lpfc_hba * phba)
+lpfc_config_port_prep(struct lpfc_hba *phba)
{
lpfc_vpd_t *vp = &phba->vpd;
int i = 0, rc;
@@ -75,12 +81,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
mb = &pmb->mb;
- phba->hba_state = LPFC_INIT_MBX_CMDS;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
if (lpfc_is_LC_HBA(phba->pcidev->device)) {
if (init_key) {
@@ -100,9 +106,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_MBOX,
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"%d:0324 Config Port initialization "
"error, mbxCmd x%x READ_NVPARM, "
"mbxStatus x%x\n",
@@ -112,16 +116,18 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
- sizeof (mb->un.varRDnvp.nodename));
+ sizeof(phba->wwnn));
+ memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
+ sizeof(phba->wwpn));
}
+ phba->sli3_options = 0x0;
+
/* Setup and issue mailbox READ REV command */
lpfc_read_rev(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0439 Adapter failed to init, mbxCmd x%x "
"READ_REV, mbxStatus x%x\n",
phba->brd_no,
@@ -130,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
+
/*
* The value of rr must be 1 since the driver set the cv field to 1.
* This setting requires the FW to set all revision fields.
@@ -144,8 +151,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
return -ERESTART;
}
+ if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
+ return -EINVAL;
+
/* Save information as VPD data */
vp->rev.rBit = 1;
+ memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
@@ -161,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
+ /* If the sli feature level is less then 9, we must
+ * tear down all RPIs and VPIs on link down if NPIV
+ * is enabled.
+ */
+ if (vp->rev.feaLevelHigh < 9)
+ phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
+
if (lpfc_is_LC_HBA(phba->pcidev->device))
memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
sizeof (phba->RandomData));
@@ -188,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
- mb->un.varDmp.word_cnt);
+ mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
@@ -212,48 +230,34 @@ out_free_mbox:
/* */
/************************************************************************/
int
-lpfc_config_port_post(struct lpfc_hba * phba)
+lpfc_config_port_post(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, timeout;
- int i, j, rc;
+ int i, j;
+ int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
mb = &pmb->mb;
- lpfc_config_link(phba, pmb);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
- if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
- "%d:0447 Adapter failed init, mbxCmd x%x "
- "CONFIG_LINK mbxStatus x%x\n",
- phba->brd_no,
- mb->mbxCommand, mb->mbxStatus);
- phba->hba_state = LPFC_HBA_ERROR;
- mempool_free( pmb, phba->mbox_mem_pool);
- return -EIO;
- }
-
/* Get login parameters for NID. */
- lpfc_read_sparam(phba, pmb);
+ lpfc_read_sparam(phba, pmb, 0);
+ pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0448 Adapter failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
mp = (struct lpfc_dmabuf *) pmb->context1;
mempool_free( pmb, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -263,25 +267,27 @@ lpfc_config_port_post(struct lpfc_hba * phba)
mp = (struct lpfc_dmabuf *) pmb->context1;
- memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
pmb->context1 = NULL;
if (phba->cfg_soft_wwnn)
- u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
+ u64_to_wwn(phba->cfg_soft_wwnn,
+ vport->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
- u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
- memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
+ u64_to_wwn(phba->cfg_soft_wwpn,
+ vport->fc_sparam.portName.u.wwn);
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
- memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
/* If no serial number in VPD data, use low 6 bytes of WWNN */
/* This should be consolidated into parse_vpd ? - mr */
if (phba->SerialNumber[0] == 0) {
uint8_t *outptr;
- outptr = &phba->fc_nodename.u.s.IEEE[0];
+ outptr = &vport->fc_nodename.u.s.IEEE[0];
for (i = 0; i < 12; i++) {
status = *outptr++;
j = ((status & 0xf0) >> 4);
@@ -303,15 +309,14 @@ lpfc_config_port_post(struct lpfc_hba * phba)
}
lpfc_read_config(phba, pmb);
+ pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0453 Adapter failed to init, mbxCmd x%x "
"READ_CONFIG, mbxStatus x%x\n",
phba->brd_no,
mb->mbxCommand, mb->mbxStatus);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
mempool_free( pmb, phba->mbox_mem_pool);
return -EIO;
}
@@ -338,9 +343,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
|| ((phba->cfg_link_speed == LINK_SPEED_10G)
&& !(phba->lmt & LMT_10Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
"%d:1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->brd_no,
@@ -348,7 +351,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
phba->cfg_link_speed = LINK_SPEED_AUTO;
}
- phba->hba_state = LPFC_LINK_DOWN;
+ phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ring 0 till hba_state is READY */
if (psli->ring[psli->extra_ring].cmdringaddr)
@@ -359,10 +362,11 @@ lpfc_config_port_post(struct lpfc_hba * phba)
psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
/* Post receive buffers for desired rings */
- lpfc_post_rcv_buf(phba);
+ if (phba->sli_rev != 3)
+ lpfc_post_rcv_buf(phba);
/* Enable appropriate host interrupts */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
if (psli->num_rings > 0)
@@ -380,22 +384,24 @@ lpfc_config_port_post(struct lpfc_hba * phba)
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
/*
* Setup the ring 0 (els) timeout handler
*/
timeout = phba->fc_ratov << 1;
- mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
+ mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+ mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ phba->hb_outstanding = 0;
+ phba->last_completion_time = jiffies;
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0454 Adapter failed to init, mbxCmd x%x "
"INIT_LINK, mbxStatus x%x\n",
phba->brd_no,
@@ -408,7 +414,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
if (rc != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
@@ -429,18 +435,19 @@ lpfc_config_port_post(struct lpfc_hba * phba)
/* */
/************************************************************************/
int
-lpfc_hba_down_prep(struct lpfc_hba * phba)
+lpfc_hba_down_prep(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport = phba->pport;
+
/* Disable interrupts */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- /* Cleanup potential discovery resources */
- lpfc_els_flush_rscn(phba);
- lpfc_els_flush_cmd(phba);
- lpfc_disc_flush_list(phba);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ lpfc_cleanup_discovery_resources(vport);
+ }
- return (0);
+ return 0;
}
/************************************************************************/
@@ -453,20 +460,24 @@ lpfc_hba_down_prep(struct lpfc_hba * phba)
/* */
/************************************************************************/
int
-lpfc_hba_down_post(struct lpfc_hba * phba)
+lpfc_hba_down_post(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *mp, *next_mp;
int i;
- /* Cleanup preposted buffers on the ELS ring */
- pring = &psli->ring[LPFC_ELS_RING];
- list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
- list_del(&mp->list);
- pring->postbufq_cnt--;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+ lpfc_sli_hbqbuf_free_all(phba);
+ else {
+ /* Cleanup preposted buffers on the ELS ring */
+ pring = &psli->ring[LPFC_ELS_RING];
+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
+ list_del(&mp->list);
+ pring->postbufq_cnt--;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
}
for (i = 0; i < psli->num_rings; i++) {
@@ -477,6 +488,119 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
return 0;
}
+/* HBA heart beat timeout handler */
+void
+lpfc_hb_timeout(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ unsigned long iflag;
+
+ phba = (struct lpfc_hba *)ptr;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ if (!(phba->pport->work_port_events & WORKER_HB_TMO))
+ phba->pport->work_port_events |= WORKER_HB_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ return;
+}
+
+static void
+lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+{
+ unsigned long drvr_flag;
+
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
+ phba->hb_outstanding = 0;
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
+ !(phba->link_state == LPFC_HBA_ERROR) &&
+ !(phba->pport->fc_flag & FC_UNLOADING))
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+}
+
+void
+lpfc_hb_timeout_handler(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ int retval;
+ struct lpfc_sli *psli = &phba->sli;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (phba->pport->fc_flag & FC_UNLOADING) ||
+ (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ return;
+
+ spin_lock_irq(&phba->pport->work_port_lock);
+ /* If the timer is already canceled do nothing */
+ if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ return;
+ }
+
+ if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
+ jiffies)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ if (!phba->hb_outstanding)
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ else
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+ return;
+ }
+ spin_unlock_irq(&phba->pport->work_port_lock);
+
+ /* If there is no heart beat outstanding, issue a heartbeat command */
+ if (!phba->hb_outstanding) {
+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
+ if (!pmboxq) {
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
+
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+ if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+ return;
+ }
+ mod_timer(&phba->hb_tmofunc,
+ jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+ phba->hb_outstanding = 1;
+ return;
+ } else {
+ /*
+ * If heart beat timeout called with hb_outstanding set we
+ * need to take the HBA offline.
+ */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0459 Adapter heartbeat failure, taking "
+ "this port offline.\n", phba->brd_no);
+
+ spin_lock_irq(&phba->hbalock);
+ psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ lpfc_unblock_mgmt_io(phba);
+ phba->link_state = LPFC_HBA_ERROR;
+ lpfc_hba_down_post(phba);
+ }
+}
+
/************************************************************************/
/* */
/* lpfc_handle_eratt */
@@ -486,11 +610,15 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
/* */
/************************************************************************/
void
-lpfc_handle_eratt(struct lpfc_hba * phba)
+lpfc_handle_eratt(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
+ struct lpfc_vport *port_iterator;
uint32_t event_data;
+ struct Scsi_Host *shost;
+
/* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */
if (pci_channel_offline(phba->pcidev))
@@ -504,10 +632,17 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
"Data: x%x x%x x%x\n",
phba->brd_no, phba->work_hs,
phba->work_status[0], phba->work_status[1]);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_ESTABLISH_LINK;
+ list_for_each_entry(port_iterator, &phba->port_list,
+ listentry) {
+ shost = lpfc_shost_from_vport(port_iterator);
+
+ spin_lock_irq(shost->host_lock);
+ port_iterator->fc_flag |= FC_ESTABLISH_LINK;
+ spin_unlock_irq(shost->host_lock);
+ }
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
/*
* Firmware stops when it triggled erratt with HS_FFER6.
@@ -544,15 +679,18 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
phba->work_status[0], phba->work_status[1]);
event_data = FC_REG_DUMP_EVENT;
- fc_host_post_vendor_event(phba->host, fc_get_event_number(),
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(event_data), (char *) &event_data,
SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_unblock_mgmt_io(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
lpfc_hba_down_post(phba);
}
}
@@ -566,9 +704,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
/* */
/************************************************************************/
void
-lpfc_handle_latt(struct lpfc_hba * phba)
+lpfc_handle_latt(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_vport *port_iterator;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
struct lpfc_dmabuf *mp;
@@ -589,20 +729,22 @@ lpfc_handle_latt(struct lpfc_hba * phba)
rc = -EIO;
/* Cleanup any outstanding ELS commands */
- lpfc_els_flush_cmd(phba);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry)
+ lpfc_els_flush_cmd(port_iterator);
psli->slistat.link_event++;
lpfc_read_la(phba, pmb, mp);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
+ pmb->vport = vport;
rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED)
goto lpfc_handle_latt_free_mbuf;
/* Clear Link Attention in HA REG */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
writel(HA_LATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return;
@@ -614,7 +756,7 @@ lpfc_handle_latt_free_pmb:
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_handle_latt_err_exit:
/* Enable Link attention interrupts */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control |= HC_LAINT_ENA;
@@ -624,15 +766,13 @@ lpfc_handle_latt_err_exit:
/* Clear Link Attention in HA REG */
writel(HA_LATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_linkdown(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/* The other case is an error from issue_mbox */
if (rc == -ENOMEM)
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_MBOX,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"%d:0300 READ_LA: no buffers\n",
phba->brd_no);
@@ -646,7 +786,7 @@ lpfc_handle_latt_err_exit:
/* */
/************************************************************************/
static int
-lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
+lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
@@ -658,9 +798,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
return 0;
/* Vital Product */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
phba->brd_no,
(uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
@@ -785,7 +923,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
}
static void
-lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
+lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
{
lpfc_vpd_t *vp;
uint16_t dev_id = phba->pcidev->device;
@@ -943,7 +1081,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
/* Returns the number of buffers NOT posted. */
/**************************************************/
int
-lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
int type)
{
IOCB_t *icmd;
@@ -955,9 +1093,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
/* While there are buffers to post */
while (cnt > 0) {
/* Allocate buffer for command iocb */
- spin_lock_irq(phba->host->host_lock);
iocb = lpfc_sli_get_iocbq(phba);
- spin_unlock_irq(phba->host->host_lock);
if (iocb == NULL) {
pring->missbufcnt = cnt;
return cnt;
@@ -972,9 +1108,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
&mp1->phys);
if (mp1 == 0 || mp1->virt == 0) {
kfree(mp1);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, iocb);
- spin_unlock_irq(phba->host->host_lock);
pring->missbufcnt = cnt;
return cnt;
}
@@ -990,9 +1124,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
kfree(mp2);
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_release_iocbq(phba, iocb);
- spin_unlock_irq(phba->host->host_lock);
pring->missbufcnt = cnt;
return cnt;
}
@@ -1018,7 +1150,6 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
icmd->ulpLe = 1;
- spin_lock_irq(phba->host->host_lock);
if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
kfree(mp1);
@@ -1030,14 +1161,11 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
}
lpfc_sli_release_iocbq(phba, iocb);
pring->missbufcnt = cnt;
- spin_unlock_irq(phba->host->host_lock);
return cnt;
}
- spin_unlock_irq(phba->host->host_lock);
lpfc_sli_ringpostbuf_put(phba, pring, mp1);
- if (mp2) {
+ if (mp2)
lpfc_sli_ringpostbuf_put(phba, pring, mp2);
- }
}
pring->missbufcnt = 0;
return 0;
@@ -1050,7 +1178,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
/* */
/************************************************************************/
static int
-lpfc_post_rcv_buf(struct lpfc_hba * phba)
+lpfc_post_rcv_buf(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
@@ -1151,7 +1279,7 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
{
int t;
uint32_t *HashWorking;
- uint32_t *pwwnn = phba->wwnn;
+ uint32_t *pwwnn = (uint32_t *) phba->wwnn;
HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
if (!HashWorking)
@@ -1170,64 +1298,76 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
}
static void
-lpfc_cleanup(struct lpfc_hba * phba)
+lpfc_cleanup(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
/* clean up phba - lpfc specific */
- lpfc_can_disctmo(phba);
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
+ lpfc_can_disctmo(vport);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
lpfc_nlp_put(ndlp);
-
- INIT_LIST_HEAD(&phba->fc_nodes);
-
return;
}
static void
lpfc_establish_link_tmo(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_vport *vport = phba->pport;
unsigned long iflag;
-
/* Re-establishing Link, timer expired */
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"%d:1300 Re-establishing Link, timer expired "
"Data: x%x x%x\n",
- phba->brd_no, phba->fc_flag, phba->hba_state);
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag &= ~FC_ESTABLISH_LINK;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ phba->brd_no, vport->fc_flag,
+ vport->port_state);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irqsave(shost->host_lock, iflag);
+ vport->fc_flag &= ~FC_ESTABLISH_LINK;
+ spin_unlock_irqrestore(shost->host_lock, iflag);
+ }
}
-static int
-lpfc_stop_timer(struct lpfc_hba * phba)
+void
+lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
- struct lpfc_sli *psli = &phba->sli;
+ del_timer_sync(&vport->els_tmofunc);
+ del_timer_sync(&vport->fc_fdmitmo);
+ lpfc_can_disctmo(vport);
+ return;
+}
+
+static void
+lpfc_stop_phba_timers(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
del_timer_sync(&phba->fcp_poll_timer);
del_timer_sync(&phba->fc_estabtmo);
- del_timer_sync(&phba->fc_disctmo);
- del_timer_sync(&phba->fc_fdmitmo);
- del_timer_sync(&phba->els_tmofunc);
- psli = &phba->sli;
- del_timer_sync(&psli->mbox_tmo);
- return(1);
+ list_for_each_entry(vport, &phba->port_list, listentry)
+ lpfc_stop_vport_timers(vport);
+ del_timer_sync(&phba->sli.mbox_tmo);
+ del_timer_sync(&phba->fabric_block_timer);
+ phba->hb_outstanding = 0;
+ del_timer_sync(&phba->hb_tmofunc);
+ return;
}
int
-lpfc_online(struct lpfc_hba * phba)
+lpfc_online(struct lpfc_hba *phba)
{
+ struct lpfc_vport *vport = phba->pport;
+
if (!phba)
return 0;
- if (!(phba->fc_flag & FC_OFFLINE_MODE))
+ if (!(vport->fc_flag & FC_OFFLINE_MODE))
return 0;
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0458 Bring Adapter online\n",
phba->brd_no);
@@ -1243,9 +1383,14 @@ lpfc_online(struct lpfc_hba * phba)
return 1;
}
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_OFFLINE_MODE;
- spin_unlock_irq(phba->host->host_lock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_OFFLINE_MODE;
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_unblock_mgmt_io(phba);
return 0;
@@ -1256,9 +1401,9 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
{
unsigned long iflag;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag |= FC_BLOCK_MGMT_IO;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
void
@@ -1266,17 +1411,18 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
{
unsigned long iflag;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag &= ~FC_BLOCK_MGMT_IO;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
void
lpfc_offline_prep(struct lpfc_hba * phba)
{
+ struct lpfc_vport *vport = phba->pport;
struct lpfc_nodelist *ndlp, *next_ndlp;
- if (phba->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE)
return;
lpfc_block_mgmt_io(phba);
@@ -1284,39 +1430,49 @@ lpfc_offline_prep(struct lpfc_hba * phba)
lpfc_linkdown(phba);
/* Issue an unreg_login to all nodes */
- list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp)
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
lpfc_sli_flush_mbox_queue(phba);
}
void
-lpfc_offline(struct lpfc_hba * phba)
+lpfc_offline(struct lpfc_hba *phba)
{
- unsigned long iflag;
+ struct lpfc_vport *vport = phba->pport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_vport *port_iterator;
- if (phba->fc_flag & FC_OFFLINE_MODE)
+ if (vport->fc_flag & FC_OFFLINE_MODE)
return;
/* stop all timers associated with this hba */
- lpfc_stop_timer(phba);
+ lpfc_stop_phba_timers(phba);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ port_iterator->work_port_events = 0;
+ }
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"%d:0460 Bring Adapter offline\n",
phba->brd_no);
/* Bring down the SLI Layer and cleanup. The HBA is offline
now. */
lpfc_sli_hba_down(phba);
- lpfc_cleanup(phba);
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->work_hba_events = 0;
+ spin_lock_irq(&phba->hbalock);
phba->work_ha = 0;
- phba->fc_flag |= FC_OFFLINE_MODE;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ vport->fc_flag |= FC_OFFLINE_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ shost = lpfc_shost_from_vport(port_iterator);
+
+ lpfc_cleanup(port_iterator);
+ spin_lock_irq(shost->host_lock);
+ vport->work_port_events = 0;
+ vport->fc_flag |= FC_OFFLINE_MODE;
+ spin_unlock_irq(shost->host_lock);
+ }
}
/******************************************************************************
@@ -1326,17 +1482,17 @@ lpfc_offline(struct lpfc_hba * phba)
*
******************************************************************************/
static int
-lpfc_scsi_free(struct lpfc_hba * phba)
+lpfc_scsi_free(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *sb, *sb_next;
struct lpfc_iocbq *io, *io_next;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
/* Release all the lpfc_scsi_bufs maintained by this host. */
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
list_del(&sb->list);
pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
- sb->dma_handle);
+ sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
}
@@ -1348,134 +1504,183 @@ lpfc_scsi_free(struct lpfc_hba * phba)
phba->total_iocbq_bufs--;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
-void lpfc_remove_device(struct lpfc_hba *phba)
-{
- unsigned long iflag;
- lpfc_free_sysfs_attr(phba);
-
- spin_lock_irqsave(phba->host->host_lock, iflag);
- phba->fc_flag |= FC_UNLOADING;
+struct lpfc_vport *
+lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *shost;
+ int error = 0;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
+ if (!shost)
+ goto out;
- fc_remove_host(phba->host);
- scsi_remove_host(phba->host);
+ vport = (struct lpfc_vport *) shost->hostdata;
+ vport->phba = phba;
- kthread_stop(phba->worker_thread);
+ vport->load_flag |= FC_LOADING;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ shost->unique_id = instance;
+ shost->max_id = LPFC_MAX_TARGET;
+ shost->max_lun = phba->cfg_max_luns;
+ shost->this_id = -1;
+ shost->max_cmd_len = 16;
/*
- * Bring down the SLI Layer. This step disable all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA.
+ * Set initial can_queue value since 0 is no longer supported and
+ * scsi_add_host will fail. This will be adjusted later based on the
+ * max xri value determined in hba setup.
*/
- lpfc_sli_hba_down(phba);
- lpfc_sli_brdrestart(phba);
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+ if (fc_vport != NULL) {
+ shost->transportt = lpfc_vport_transport_template;
+ vport->port_type = LPFC_NPIV_PORT;
+ } else {
+ shost->transportt = lpfc_transport_template;
+ vport->port_type = LPFC_PHYSICAL_PORT;
+ }
- /* Release the irq reservation */
- free_irq(phba->pcidev->irq, phba);
- pci_disable_msi(phba->pcidev);
+ /* Initialize all internally managed lists. */
+ INIT_LIST_HEAD(&vport->fc_nodes);
+ spin_lock_init(&vport->work_port_lock);
- lpfc_cleanup(phba);
- lpfc_stop_timer(phba);
- phba->work_hba_events = 0;
+ init_timer(&vport->fc_disctmo);
+ vport->fc_disctmo.function = lpfc_disc_timeout;
+ vport->fc_disctmo.data = (unsigned long)vport;
- /*
- * Call scsi_free before mem_free since scsi bufs are released to their
- * corresponding pools here.
- */
- lpfc_scsi_free(phba);
- lpfc_mem_free(phba);
+ init_timer(&vport->fc_fdmitmo);
+ vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
+ vport->fc_fdmitmo.data = (unsigned long)vport;
- /* Free resources associated with SLI2 interface */
- dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
- phba->slim2p, phba->slim2p_mapping);
+ init_timer(&vport->els_tmofunc);
+ vport->els_tmofunc.function = lpfc_els_timeout;
+ vport->els_tmofunc.data = (unsigned long)vport;
- /* unmap adapter SLIM and Control Registers */
- iounmap(phba->ctrl_regs_memmap_p);
- iounmap(phba->slim_memmap_p);
+ if (fc_vport != NULL) {
+ error = scsi_add_host(shost, &fc_vport->dev);
+ } else {
+ error = scsi_add_host(shost, &phba->pcidev->dev);
+ }
+ if (error)
+ goto out_put_shost;
- pci_release_regions(phba->pcidev);
- pci_disable_device(phba->pcidev);
+ list_add_tail(&vport->listentry, &phba->port_list);
+ return vport;
- idr_remove(&lpfc_hba_index, phba->brd_no);
- scsi_host_put(phba->host);
+out_put_shost:
+ scsi_host_put(shost);
+out:
+ return NULL;
}
-void lpfc_scan_start(struct Scsi_Host *host)
+void
+destroy_port(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
- if (lpfc_alloc_sysfs_attr(phba))
- goto error;
+ kfree(vport->vname);
- phba->MBslimaddr = phba->slim_memmap_p;
- phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
- phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
- phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
- phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+ lpfc_debugfs_terminate(vport);
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
- if (lpfc_sli_hba_setup(phba))
- goto error;
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
- /*
- * hba setup may have changed the hba_queue_depth so we need to adjust
- * the value of can_queue.
- */
- host->can_queue = phba->cfg_hba_queue_depth - 10;
+ lpfc_cleanup(vport);
return;
+}
+
+int
+lpfc_get_instance(void)
+{
+ int instance = 0;
-error:
- lpfc_remove_device(phba);
+ /* Assign an unused number */
+ if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
+ return -1;
+ if (idr_get_new(&lpfc_hba_index, NULL, &instance))
+ return -1;
+ return instance;
}
+/*
+ * Note: there is no scan_start function as adapter initialization
+ * will have asynchronously kicked off the link initialization.
+ */
+
int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int stat = 0;
- if (!phba->host)
- return 1;
- if (time >= 30 * HZ)
+ spin_lock_irq(shost->host_lock);
+
+ if (vport->fc_flag & FC_UNLOADING) {
+ stat = 1;
+ goto finished;
+ }
+ if (time >= 30 * HZ) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0461 Scanning longer than 30 "
+ "seconds. Continuing initialization\n",
+ phba->brd_no);
+ stat = 1;
+ goto finished;
+ }
+ if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0465 Link down longer than 15 "
+ "seconds. Continuing initialization\n",
+ phba->brd_no);
+ stat = 1;
goto finished;
+ }
- if (phba->hba_state != LPFC_HBA_READY)
- return 0;
- if (phba->num_disc_nodes || phba->fc_prli_sent)
- return 0;
- if ((phba->fc_map_cnt == 0) && (time < 2 * HZ))
- return 0;
- if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)
- return 0;
- if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ))
- return 0;
+ if (vport->port_state != LPFC_VPORT_READY)
+ goto finished;
+ if (vport->num_disc_nodes || vport->fc_prli_sent)
+ goto finished;
+ if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+ goto finished;
+ if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
+ goto finished;
+
+ stat = 1;
finished:
- if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
- spin_lock_irq(shost->host_lock);
- lpfc_poll_start_timer(phba);
- spin_unlock_irq(shost->host_lock);
- }
+ spin_unlock_irq(shost->host_lock);
+ return stat;
+}
+void lpfc_host_attrib_init(struct Scsi_Host *shost)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
/*
- * set fixed host attributes
- * Must done after lpfc_sli_hba_setup()
+ * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
*/
- fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn);
- fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn);
+ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
fc_host_supported_classes(shost) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(shost), 0,
- sizeof(fc_host_supported_fc4s(shost)));
+ sizeof(fc_host_supported_fc4s(shost)));
fc_host_supported_fc4s(shost)[2] = 1;
fc_host_supported_fc4s(shost)[7] = 1;
- lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
+ lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+ sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
if (phba->lmt & LMT_10Gb)
@@ -1488,31 +1693,31 @@ finished:
fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
fc_host_maxframe_size(shost) =
- ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb);
+ (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
/* This value is also unchanging */
memset(fc_host_active_fc4s(shost), 0,
- sizeof(fc_host_active_fc4s(shost)));
+ sizeof(fc_host_active_fc4s(shost)));
fc_host_active_fc4s(shost)[2] = 1;
fc_host_active_fc4s(shost)[7] = 1;
+ fc_host_max_npiv_vports(shost) = phba->max_vpi;
spin_lock_irq(shost->host_lock);
- phba->fc_flag &= ~FC_LOADING;
+ vport->fc_flag &= ~FC_LOADING;
spin_unlock_irq(shost->host_lock);
-
- return 1;
}
static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
- struct Scsi_Host *host;
- struct lpfc_hba *phba;
- struct lpfc_sli *psli;
+ struct lpfc_vport *vport = NULL;
+ struct lpfc_hba *phba;
+ struct lpfc_sli *psli;
struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
+ struct Scsi_Host *shost = NULL;
unsigned long bar0map_len, bar2map_len;
- int error = -ENODEV, retval;
+ int error = -ENODEV;
int i;
uint16_t iotag;
@@ -1521,67 +1726,49 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
goto out_disable_device;
- host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
- if (!host)
+ phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
+ if (!phba)
goto out_release_regions;
- phba = (struct lpfc_hba*)host->hostdata;
- memset(phba, 0, sizeof (struct lpfc_hba));
- phba->host = host;
+ spin_lock_init(&phba->hbalock);
- phba->fc_flag |= FC_LOADING;
phba->pcidev = pdev;
/* Assign an unused board number */
- if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
- goto out_put_host;
+ if ((phba->brd_no = lpfc_get_instance()) < 0)
+ goto out_free_phba;
- error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no);
- if (error)
- goto out_put_host;
-
- host->unique_id = phba->brd_no;
+ INIT_LIST_HEAD(&phba->port_list);
+ INIT_LIST_HEAD(&phba->hbq_buffer_list);
+ /*
+ * Get all the module params for configuring this host and then
+ * establish the host.
+ */
+ lpfc_get_cfgparam(phba);
+ phba->max_vpi = LPFC_MAX_VPI;
/* Initialize timers used by driver */
init_timer(&phba->fc_estabtmo);
phba->fc_estabtmo.function = lpfc_establish_link_tmo;
phba->fc_estabtmo.data = (unsigned long)phba;
- init_timer(&phba->fc_disctmo);
- phba->fc_disctmo.function = lpfc_disc_timeout;
- phba->fc_disctmo.data = (unsigned long)phba;
-
- init_timer(&phba->fc_fdmitmo);
- phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
- phba->fc_fdmitmo.data = (unsigned long)phba;
- init_timer(&phba->els_tmofunc);
- phba->els_tmofunc.function = lpfc_els_timeout;
- phba->els_tmofunc.data = (unsigned long)phba;
+
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+
psli = &phba->sli;
init_timer(&psli->mbox_tmo);
psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long)phba;
-
+ psli->mbox_tmo.data = (unsigned long) phba;
init_timer(&phba->fcp_poll_timer);
phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long)phba;
-
- /*
- * Get all the module params for configuring this host and then
- * establish the host parameters.
- */
- lpfc_get_cfgparam(phba);
-
- host->max_id = LPFC_MAX_TARGET;
- host->max_lun = phba->cfg_max_luns;
- host->this_id = -1;
-
- INIT_LIST_HEAD(&phba->fc_nodes);
+ phba->fcp_poll_timer.data = (unsigned long) phba;
+ init_timer(&phba->fabric_block_timer);
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+ phba->fabric_block_timer.data = (unsigned long) phba;
pci_set_master(pdev);
- retval = pci_set_mwi(pdev);
- if (retval)
- dev_printk(KERN_WARNING, &pdev->dev,
- "Warning: pci_set_mwi returned %d\n", retval);
+ pci_try_set_mwi(pdev);
if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
@@ -1623,18 +1810,27 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
+ phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ lpfc_sli_hbq_size(),
+ &phba->hbqslimp.phys,
+ GFP_KERNEL);
+ if (!phba->hbqslimp.virt)
+ goto out_free_slim;
+
+ memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
/* Initialize the SLI Layer to run with lpfc HBAs. */
lpfc_sli_setup(phba);
lpfc_sli_queue_setup(phba);
error = lpfc_mem_alloc(phba);
if (error)
- goto out_free_slim;
+ goto out_free_hbqslimp;
/* Initialize and populate the iocb list per host. */
INIT_LIST_HEAD(&phba->lpfc_iocb_list);
for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
- iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+ iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
@@ -1643,7 +1839,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocbq;
}
- memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq));
iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
if (iotag == 0) {
kfree (iocbq_entry);
@@ -1653,10 +1848,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENOMEM;
goto out_free_iocbq;
}
- spin_lock_irq(phba->host->host_lock);
+
+ spin_lock_irq(&phba->hbalock);
list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
phba->total_iocbq_bufs++;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
}
/* Initialize HBA structure */
@@ -1677,22 +1873,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocbq;
}
- /*
- * Set initial can_queue value since 0 is no longer supported and
- * scsi_add_host will fail. This will be adjusted later based on the
- * max xri value determined in hba setup.
- */
- host->can_queue = phba->cfg_hba_queue_depth - 10;
-
- /* Tell the midlayer we support 16 byte commands */
- host->max_cmd_len = 16;
-
/* Initialize the list of scsi buffers used by driver for scsi IO. */
spin_lock_init(&phba->scsi_buf_list_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
- host->transportt = lpfc_transport_template;
- pci_set_drvdata(pdev, host);
+ /* Initialize list of fabric iocbs */
+ INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+ vport = lpfc_create_port(phba, phba->brd_no, NULL);
+ if (!vport)
+ goto out_kthread_stop;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba->pport = vport;
+ lpfc_debugfs_initialize(vport);
+
+ pci_set_drvdata(pdev, shost);
if (phba->cfg_use_msi) {
error = pci_enable_msi(phba->pcidev);
@@ -1703,38 +1899,68 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
}
error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
- LPFC_DRIVER_NAME, phba);
+ LPFC_DRIVER_NAME, phba);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0451 Enable interrupt handler failed\n",
phba->brd_no);
- goto out_kthread_stop;
+ goto out_disable_msi;
}
- error = scsi_add_host(host, &pdev->dev);
- if (error)
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+ phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+ phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ if (lpfc_alloc_sysfs_attr(vport))
goto out_free_irq;
- scsi_scan_host(host);
+ if (lpfc_sli_hba_setup(phba))
+ goto out_remove_device;
+
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to adjust
+ * the value of can_queue.
+ */
+ shost->can_queue = phba->cfg_hba_queue_depth - 10;
+
+ lpfc_host_attrib_init(shost);
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ spin_lock_irq(shost->host_lock);
+ lpfc_poll_start_timer(phba);
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ scsi_scan_host(shost);
return 0;
+out_remove_device:
+ lpfc_free_sysfs_attr(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_UNLOADING;
+ spin_unlock_irq(shost->host_lock);
out_free_irq:
- lpfc_stop_timer(phba);
- phba->work_hba_events = 0;
+ lpfc_stop_phba_timers(phba);
+ phba->pport->work_port_events = 0;
free_irq(phba->pcidev->irq, phba);
+out_disable_msi:
pci_disable_msi(phba->pcidev);
+ destroy_port(vport);
out_kthread_stop:
kthread_stop(phba->worker_thread);
out_free_iocbq:
list_for_each_entry_safe(iocbq_entry, iocbq_next,
&phba->lpfc_iocb_list, list) {
- spin_lock_irq(phba->host->host_lock);
kfree(iocbq_entry);
phba->total_iocbq_bufs--;
- spin_unlock_irq(phba->host->host_lock);
}
lpfc_mem_free(phba);
+out_free_hbqslimp:
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
+ phba->hbqslimp.phys);
out_free_slim:
dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
phba->slim2p_mapping);
@@ -1744,27 +1970,85 @@ out_iounmap_slim:
iounmap(phba->slim_memmap_p);
out_idr_remove:
idr_remove(&lpfc_hba_index, phba->brd_no);
-out_put_host:
- phba->host = NULL;
- scsi_host_put(host);
+out_free_phba:
+ kfree(phba);
out_release_regions:
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
out:
pci_set_drvdata(pdev, NULL);
+ if (shost)
+ scsi_host_put(shost);
return error;
}
static void __devexit
lpfc_pci_remove_one(struct pci_dev *pdev)
{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
- struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata;
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_vport *port_iterator;
+ list_for_each_entry(port_iterator, &phba->port_list, listentry)
+ port_iterator->load_flag |= FC_UNLOADING;
+
+ kfree(vport->vname);
+ lpfc_free_sysfs_attr(vport);
+
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+
+ /*
+ * Bring down the SLI Layer. This step disable all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA.
+ */
+ lpfc_sli_hba_down(phba);
+ lpfc_sli_brdrestart(phba);
+
+ lpfc_stop_phba_timers(phba);
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
- lpfc_remove_device(phba);
+ lpfc_debugfs_terminate(vport);
+ lpfc_cleanup(vport);
+
+ kthread_stop(phba->worker_thread);
+
+ /* Release the irq reservation */
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
pci_set_drvdata(pdev, NULL);
+ scsi_host_put(shost);
+
+ /*
+ * Call scsi_free before mem_free since scsi bufs are released to their
+ * corresponding pools here.
+ */
+ lpfc_scsi_free(phba);
+ lpfc_mem_free(phba);
+
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
+ phba->hbqslimp.phys);
+
+ /* Free resources associated with SLI2 interface */
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p, phba->slim2p_mapping);
+
+ /* unmap adapter SLIM and Control Registers */
+ iounmap(phba->ctrl_regs_memmap_p);
+ iounmap(phba->slim_memmap_p);
+
+ idr_remove(&lpfc_hba_index, phba->brd_no);
+
+ kfree(phba);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
}
/**
@@ -1822,10 +2106,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Re-establishing Link */
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag |= FC_ESTABLISH_LINK;
+ spin_lock_irq(host->host_lock);
+ phba->pport->fc_flag |= FC_ESTABLISH_LINK;
+ spin_unlock_irq(host->host_lock);
+
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
/* Take device offline; this will perform cleanup */
@@ -1935,7 +2222,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
- .err_handler = &lpfc_err_handler,
+ .err_handler = &lpfc_err_handler,
};
static int __init
@@ -1948,11 +2235,15 @@ lpfc_init(void)
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
- if (!lpfc_transport_template)
+ lpfc_vport_transport_template =
+ fc_attach_transport(&lpfc_vport_transport_functions);
+ if (!lpfc_transport_template || !lpfc_vport_transport_template)
return -ENOMEM;
error = pci_register_driver(&lpfc_driver);
- if (error)
+ if (error) {
fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
+ }
return error;
}
@@ -1962,6 +2253,7 @@ lpfc_exit(void)
{
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
}
module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 438cbcd9eb13..8a6ceffeabcf 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -30,6 +30,7 @@
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
#define LOG_LIBDFC 0x2000 /* Libdfc events */
+#define LOG_VPORT 0x4000 /* NPIV events */
#define LOG_ALL_MSG 0xffff /* LOG all messages */
#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8041c3f06f7b..8f42fbfdd29e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -82,6 +82,22 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
}
/**********************************************/
+/* lpfc_heart_beat Issue a HEART_BEAT */
+/* mailbox command */
+/**********************************************/
+void
+lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+{
+ MAILBOX_t *mb;
+
+ mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_HEARTBEAT;
+ mb->mbxOwner = OWN_HOST;
+ return;
+}
+
+/**********************************************/
/* lpfc_read_la Issue a READ LA */
/* mailbox command */
/**********************************************/
@@ -134,6 +150,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
+ struct lpfc_vport *vport = phba->pport;
MAILBOX_t *mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -147,7 +164,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
}
- mb->un.varCfgLnk.myId = phba->fc_myDID;
+ mb->un.varCfgLnk.myId = vport->fc_myDID;
mb->un.varCfgLnk.edtov = phba->fc_edtov;
mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
mb->un.varCfgLnk.ratov = phba->fc_ratov;
@@ -239,7 +256,7 @@ lpfc_init_link(struct lpfc_hba * phba,
/* mailbox command */
/**********************************************/
int
-lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
@@ -270,6 +287,7 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
+ mb->un.varRdSparm.vpi = vpi;
/* save address for completion */
pmb->context1 = mp;
@@ -282,7 +300,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* mailbox command */
/********************************************/
void
-lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
+lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
+ LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
@@ -290,6 +309,7 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varUnregDID.did = did;
+ mb->un.varUnregDID.vpi = vpi;
mb->mbxCommand = MBX_UNREG_D_ID;
mb->mbxOwner = OWN_HOST;
@@ -335,19 +355,17 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* mailbox command */
/********************************************/
int
-lpfc_reg_login(struct lpfc_hba * phba,
- uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
+lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+ uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
{
+ MAILBOX_t *mb = &pmb->mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
- MAILBOX_t *mb;
- struct lpfc_sli *psli;
- psli = &phba->sli;
- mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRegLogin.rpi = 0;
+ mb->un.varRegLogin.vpi = vpi;
mb->un.varRegLogin.did = did;
mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
@@ -359,12 +377,10 @@ lpfc_reg_login(struct lpfc_hba * phba,
kfree(mp);
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_MBOX,
- "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
- phba->brd_no,
- (uint32_t) did, (uint32_t) flag);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
+ "flag x%x\n",
+ phba->brd_no, vpi, did, flag);
return (1);
}
INIT_LIST_HEAD(&mp->list);
@@ -389,7 +405,8 @@ lpfc_reg_login(struct lpfc_hba * phba,
/* mailbox command */
/**********************************************/
void
-lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
+lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
+ LPFC_MBOXQ_t * pmb)
{
MAILBOX_t *mb;
@@ -398,12 +415,52 @@ lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
mb->un.varUnregLogin.rpi = (uint16_t) rpi;
mb->un.varUnregLogin.rsvd1 = 0;
+ mb->un.varUnregLogin.vpi = vpi;
mb->mbxCommand = MBX_UNREG_LOGIN;
mb->mbxOwner = OWN_HOST;
return;
}
+/**************************************************/
+/* lpfc_reg_vpi Issue a REG_VPI */
+/* mailbox command */
+/**************************************************/
+void
+lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
+ LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varRegVpi.vpi = vpi;
+ mb->un.varRegVpi.sid = sid;
+
+ mb->mbxCommand = MBX_REG_VPI;
+ mb->mbxOwner = OWN_HOST;
+ return;
+
+}
+
+/**************************************************/
+/* lpfc_unreg_vpi Issue a UNREG_VNPI */
+/* mailbox command */
+/**************************************************/
+void
+lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+
+ mb->un.varUnregVpi.vpi = vpi;
+
+ mb->mbxCommand = MBX_UNREG_VPI;
+ mb->mbxOwner = OWN_HOST;
+ return;
+
+}
+
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
@@ -412,14 +469,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
PCB_t *pcbp = &phba->slim2p->pcb;
dma_addr_t pdma_addr;
uint32_t offset;
- uint32_t iocbCnt;
+ uint32_t iocbCnt = 0;
int i;
pcbp->maxRing = (psli->num_rings - 1);
- iocbCnt = 0;
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
+
+ pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
+ SLI2_IOCB_RSP_SIZE;
/* A ring MUST have both cmd and rsp entries defined to be
valid */
if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
@@ -434,20 +495,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
continue;
}
/* Command ring setup for ring */
- pring->cmdringaddr =
- (void *)&phba->slim2p->IOCBs[iocbCnt];
+ pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
- offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
- (uint8_t *)phba->slim2p;
+ offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
+ (uint8_t *) phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
- pring->rspringaddr =
- (void *)&phba->slim2p->IOCBs[iocbCnt];
+ pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
@@ -462,16 +521,108 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
- MAILBOX_t *mb;
-
- mb = &pmb->mb;
+ MAILBOX_t *mb = &pmb->mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
mb->un.varRdRev.cv = 1;
+ mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
mb->mbxCommand = MBX_READ_REV;
mb->mbxOwner = OWN_HOST;
return;
}
+static void
+lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
+}
+
+static void
+lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
+ hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
+ memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
+ sizeof(hbqmb->profiles.profile3.cmdmatch));
+}
+
+static void
+lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
+ struct lpfc_hbq_init *hbq_desc)
+{
+ hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
+ hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
+ hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
+ hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
+ memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
+ sizeof(hbqmb->profiles.profile5.cmdmatch));
+}
+
+void
+lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
+ uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
+{
+ int i;
+ MAILBOX_t *mb = &pmb->mb;
+ struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
+
+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
+ hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
+ hbqmb->recvNotify = hbq_desc->rn; /* Receive
+ * Notification */
+ hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
+ * # in words 0-19 */
+ hbqmb->profile = hbq_desc->profile; /* Selection profile:
+ * 0 = all,
+ * 7 = logentry */
+ hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
+ * e.g. Ring0=b0001,
+ * ring2=b0100 */
+ hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
+ * or 5 */
+ hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
+ * HBQ will be used
+ * for LogEntry
+ * buffers */
+ hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
+ hbq_entry_index * sizeof(struct lpfc_hbq_entry);
+ hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
+
+ mb->mbxCommand = MBX_CONFIG_HBQ;
+ mb->mbxOwner = OWN_HOST;
+
+ /* Copy info for profiles 2,3,5. Other
+ * profiles this area is reserved
+ */
+ if (hbq_desc->profile == 2)
+ lpfc_build_hbq_profile2(hbqmb, hbq_desc);
+ else if (hbq_desc->profile == 3)
+ lpfc_build_hbq_profile3(hbqmb, hbq_desc);
+ else if (hbq_desc->profile == 5)
+ lpfc_build_hbq_profile5(hbqmb, hbq_desc);
+
+ /* Return if no rctl / type masks for this HBQ */
+ if (!hbq_desc->mask_count)
+ return;
+
+ /* Otherwise we setup specific rctl / type masks for this HBQ */
+ for (i = 0; i < hbq_desc->mask_count; i++) {
+ hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
+ hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
+ hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
+ hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
+ }
+
+ return;
+}
+
+
+
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
@@ -514,15 +665,16 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
}
void
-lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
+ MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
MAILBOX_t *mb = &pmb->mb;
dma_addr_t pdma_addr;
uint32_t bar_low, bar_high;
size_t offset;
struct lpfc_hgp hgp;
- void __iomem *to_slim;
int i;
+ uint32_t pgp_offset;
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
mb->mbxCommand = MBX_CONFIG_PORT;
@@ -535,12 +687,29 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+ /* If HBA supports SLI=3 ask for it */
+
+ if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+ mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
+ if (phba->max_vpi && phba->cfg_npiv_enable &&
+ phba->vpd.sli3Feat.cmv) {
+ mb->un.varCfgPort.max_vpi = phba->max_vpi;
+ mb->un.varCfgPort.cmv = 1;
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ } else
+ mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
+ } else
+ phba->sli_rev = 2;
+ mb->un.varCfgPort.sli_mode = phba->sli_rev;
+
/* Now setup pcb */
phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
+ phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
+ sizeof(struct sli2_desc);
offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
pdma_addr = phba->slim2p_mapping + offset;
phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -568,29 +737,70 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
+ /*
+ * Set up HGP - Port Memory
+ *
+ * The port expects the host get/put pointers to reside in memory
+ * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
+ * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
+ * words (0x40 bytes). This area is not reserved if HBQs are
+ * configured in SLI-3.
+ *
+ * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
+ * RR0Get 0xc4 0x84
+ * CR1Put 0xc8 0x88
+ * RR1Get 0xcc 0x8c
+ * CR2Put 0xd0 0x90
+ * RR2Get 0xd4 0x94
+ * CR3Put 0xd8 0x98
+ * RR3Get 0xdc 0x9c
+ *
+ * Reserved 0xa0-0xbf
+ * If HBQs configured:
+ * HBQ 0 Put ptr 0xc0
+ * HBQ 1 Put ptr 0xc4
+ * HBQ 2 Put ptr 0xc8
+ * ......
+ * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
+ *
+ */
+
+ if (phba->sli_rev == 3) {
+ phba->host_gp = &mb_slim->us.s3.host[0];
+ phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
+ } else {
+ phba->host_gp = &mb_slim->us.s2.host[0];
+ phba->hbq_put = NULL;
+ }
/* mask off BAR0's flag bits 0 - 3 */
phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (SLIMOFF*sizeof(uint32_t));
+ (void __iomem *) phba->host_gp -
+ (void __iomem *)phba->MBslimaddr;
if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
phba->slim2p->pcb.hgpAddrHigh = bar_high;
else
phba->slim2p->pcb.hgpAddrHigh = 0;
/* write HGP data to SLIM at the required longword offset */
memset(&hgp, 0, sizeof(struct lpfc_hgp));
- to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
for (i=0; i < phba->sli.num_rings; i++) {
- lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp));
- to_slim += sizeof (struct lpfc_hgp);
+ lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
+ sizeof(*phba->host_gp));
}
/* Setup Port Group ring pointer */
- offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
- (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ if (phba->sli_rev == 3)
+ pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
+ (uint8_t *)phba->slim2p;
+ else
+ pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
+ (uint8_t *)phba->slim2p;
+
+ pdma_addr = phba->slim2p_mapping + pgp_offset;
phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
+ phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
/* Use callback routine to setp rings in the pcb */
lpfc_config_pcb_setup(phba);
@@ -606,11 +816,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* Swap PCB if needed */
lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
- sizeof (PCB_t));
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "%d:0405 Service Level Interface (SLI) 2 selected\n",
- phba->brd_no);
+ sizeof(PCB_t));
}
void
@@ -644,15 +850,23 @@ lpfc_mbox_get(struct lpfc_hba * phba)
LPFC_MBOXQ_t *mbq = NULL;
struct lpfc_sli *psli = &phba->sli;
- list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t,
- list);
- if (mbq) {
+ list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
+ if (mbq)
psli->mboxq_cnt--;
- }
return mbq;
}
+void
+lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+{
+ /* This function expects to be called from interupt context */
+ spin_lock(&phba->hbalock);
+ list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+ spin_unlock(&phba->hbalock);
+ return;
+}
+
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ec3bbbde6f7a..3594c469494f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -38,10 +38,13 @@
#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
+
+
int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ int longs;
int i;
phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
@@ -80,10 +83,27 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
if (!phba->nlp_mem_pool)
goto fail_free_mbox_pool;
+ phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
+ LPFC_BPL_SIZE, 8, 0);
+ if (!phba->lpfc_hbq_pool)
+ goto fail_free_nlp_mem_pool;
+
+ /* vpi zero is reserved for the physical port so add 1 to max */
+ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+ phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
+ if (!phba->vpi_bmask)
+ goto fail_free_hbq_pool;
+
return 0;
+ fail_free_hbq_pool:
+ lpfc_sli_hbqbuf_free_all(phba);
+ fail_free_nlp_mem_pool:
+ mempool_destroy(phba->nlp_mem_pool);
+ phba->nlp_mem_pool = NULL;
fail_free_mbox_pool:
mempool_destroy(phba->mbox_mem_pool);
+ phba->mbox_mem_pool = NULL;
fail_free_mbuf_pool:
while (i--)
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
@@ -91,8 +111,10 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
kfree(pool->elements);
fail_free_lpfc_mbuf_pool:
pci_pool_destroy(phba->lpfc_mbuf_pool);
+ phba->lpfc_mbuf_pool = NULL;
fail_free_dma_buf_pool:
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+ phba->lpfc_scsi_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@@ -106,6 +128,9 @@ lpfc_mem_free(struct lpfc_hba * phba)
struct lpfc_dmabuf *mp;
int i;
+ kfree(phba->vpi_bmask);
+ lpfc_sli_hbqbuf_free_all(phba);
+
list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
mp = (struct lpfc_dmabuf *) (mbox->context1);
if (mp) {
@@ -115,6 +140,15 @@ lpfc_mem_free(struct lpfc_hba * phba)
list_del(&mbox->list);
mempool_free(mbox, phba->mbox_mem_pool);
}
+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
+ mp = (struct lpfc_dmabuf *) (mbox->context1);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ list_del(&mbox->list);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
if (psli->mbox_active) {
@@ -132,13 +166,21 @@ lpfc_mem_free(struct lpfc_hba * phba)
pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
pool->elements[i].phys);
kfree(pool->elements);
+
+ pci_pool_destroy(phba->lpfc_hbq_pool);
mempool_destroy(phba->nlp_mem_pool);
mempool_destroy(phba->mbox_mem_pool);
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
pci_pool_destroy(phba->lpfc_mbuf_pool);
- /* Free the iocb lookup array */
+ phba->lpfc_hbq_pool = NULL;
+ phba->nlp_mem_pool = NULL;
+ phba->mbox_mem_pool = NULL;
+ phba->lpfc_scsi_dma_buf_pool = NULL;
+ phba->lpfc_mbuf_pool = NULL;
+
+ /* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
@@ -148,20 +190,23 @@ void *
lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
+ unsigned long iflags;
void *ret;
ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
- if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
pool->current_count--;
ret = pool->elements[pool->current_count].virt;
*handle = pool->elements[pool->current_count].phys;
}
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
}
void
-lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
@@ -174,3 +219,51 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
}
return;
}
+
+void
+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
+{
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_mbuf_free(phba, virt, dma);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+}
+
+void *
+lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+ void *ret;
+ ret = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, handle);
+ return ret;
+}
+
+void
+lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
+{
+ pci_pool_free(phba->lpfc_hbq_pool, virt, dma);
+ return;
+}
+
+void
+lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+ struct hbq_dmabuf *hbq_entry;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
+ if (hbq_entry->tag == -1) {
+ lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
+ hbq_entry->dbuf.phys);
+ kfree(hbq_entry);
+ } else {
+ lpfc_sli_free_hbq(phba, hbq_entry);
+ }
+ } else {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return;
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b309841e3846..bca2f5c9b4ba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,4 +1,4 @@
-/*******************************************************************
+ /*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2007 Emulex. All rights reserved. *
@@ -35,20 +35,22 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
/* Called to verify a rcv'ed ADISC was intended for us. */
static int
-lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
- struct lpfc_name * nn, struct lpfc_name * pn)
+lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_name *nn, struct lpfc_name *pn)
{
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
*/
- if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
+ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
return 0;
- if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
+ if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
return 0;
/* we match, return success */
@@ -56,11 +58,10 @@ lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
}
int
-lpfc_check_sparm(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, struct serv_parm * sp,
- uint32_t class)
+lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct serv_parm * sp, uint32_t class)
{
- volatile struct serv_parm *hsp = &phba->fc_sparam;
+ volatile struct serv_parm *hsp = &vport->fc_sparam;
uint16_t hsp_value, ssp_value = 0;
/*
@@ -75,12 +76,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
hsp->cls1.rcvDataSizeLsb;
ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
sp->cls1.rcvDataSizeLsb;
+ if (!ssp_value)
+ goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
}
} else if (class == CLASS1) {
- return 0;
+ goto bad_service_param;
}
if (sp->cls2.classValid) {
@@ -88,12 +91,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
hsp->cls2.rcvDataSizeLsb;
ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
sp->cls2.rcvDataSizeLsb;
+ if (!ssp_value)
+ goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
}
} else if (class == CLASS2) {
- return 0;
+ goto bad_service_param;
}
if (sp->cls3.classValid) {
@@ -101,12 +106,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
hsp->cls3.rcvDataSizeLsb;
ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
sp->cls3.rcvDataSizeLsb;
+ if (!ssp_value)
+ goto bad_service_param;
if (ssp_value > hsp_value) {
sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
}
} else if (class == CLASS3) {
- return 0;
+ goto bad_service_param;
}
/*
@@ -125,12 +132,22 @@ lpfc_check_sparm(struct lpfc_hba * phba,
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
return 1;
+bad_service_param:
+ lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0207 Device %x "
+ "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
+ "invalid service parameters. Ignoring device.\n",
+ vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
+ sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
+ sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
+ sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
+ sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
+ return 0;
}
static void *
-lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
- struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
@@ -168,32 +185,29 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
* routine effectively results in a "software abort".
*/
int
-lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
+lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
- struct lpfc_sli_ring *pring;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *cmd;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0205 Abort outstanding I/O on NPort x%x "
+ "%d (%d):0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
- phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
- psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
+ lpfc_fabric_abort_nport(ndlp);
/* First check the txq */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- /* Check to see if iocb matches the nport we are looking
- for */
+ /* Check to see if iocb matches the nport we are looking for */
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
- /* It matches, so deque and call compl with an
- error */
+ /* It matches, so deque and call compl with anp error */
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
@@ -201,37 +215,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- /* Check to see if iocb matches the nport we are looking
- for */
- if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+ /* Check to see if iocb matches the nport we are looking for */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
/* If we are delaying issuing an ELS command, cancel it */
if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
}
static int
-lpfc_rcv_plogi(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb)
+lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
IOCB_t *icmd;
@@ -241,14 +257,14 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
- if (phba->hba_state <= LPFC_FLOGI) {
+ if (vport->port_state <= LPFC_FLOGI) {
/* Before responding to PLOGI, check for pt2pt mode.
* If we are pt2pt, with an outstanding FLOGI, abort
* the FLOGI and resend it first.
*/
- if (phba->fc_flag & FC_PT2PT) {
- lpfc_els_abort_flogi(phba);
- if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ if (vport->fc_flag & FC_PT2PT) {
+ lpfc_els_abort_flogi(phba);
+ if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* If the other side is supposed to initiate
* the PLOGI anyway, just ACC it now and
* move on with discovery.
@@ -257,45 +273,42 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
phba->fc_ratov = FF_DEF_RATOV;
/* Start discovery - this should just do
CLEAR_LA */
- lpfc_disc_start(phba);
- } else {
- lpfc_initial_flogi(phba);
- }
+ lpfc_disc_start(vport);
+ } else
+ lpfc_initial_flogi(vport);
} else {
stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
- ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
return 0;
}
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
- if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
return 0;
}
icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi);
- if ((phba->cfg_fcp_class == 2) &&
- (sp->cls2.classValid)) {
+ if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
ndlp->nlp_fcp_info |= CLASS2;
- } else {
+ else
ndlp->nlp_fcp_info |= CLASS3;
- }
+
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -317,35 +330,37 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
return 1;
}
- if ((phba->fc_flag & FC_PT2PT)
- && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
- phba->fc_myDID = icmd->un.rcvels.parmRo;
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox == NULL)
goto out;
lpfc_config_link(phba, mbox);
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
rc = lpfc_sli_issue_mbox
(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
if (rc == MBX_NOT_FINISHED) {
- mempool_free( mbox, phba->mbox_mem_pool);
+ mempool_free(mbox, phba->mbox_mem_pool);
goto out;
}
- lpfc_can_disctmo(phba);
+ lpfc_can_disctmo(vport);
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ if (!mbox)
goto out;
- if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, 0)) {
- mempool_free( mbox, phba->mbox_mem_pool);
+ rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ (uint8_t *) sp, mbox, 0);
+ if (rc) {
+ mempool_free(mbox, phba->mbox_mem_pool);
goto out;
}
@@ -357,7 +372,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
* mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
+ mbox->vport = vport;
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ spin_unlock_irq(shost->host_lock);
/*
* If there is an outstanding PLOGI issued, abort it before
@@ -373,24 +391,41 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
lpfc_els_abort(phba, ndlp);
}
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
+ if ((vport->port_type == LPFC_NPIV_PORT &&
+ phba->cfg_vport_restrict_login)) {
+
+ /* In order to preserve RPIs, we want to cleanup
+ * the default RPI the firmware created to rcv
+ * this ELS request. The only way to do this is
+ * to register, then unregister the RPI.
+ */
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
+ spin_unlock_irq(shost->host_lock);
+ stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, mbox);
+ return 1;
+ }
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
return 1;
out:
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
return 0;
}
static int
-lpfc_rcv_padisc(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
+lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_dmabuf *pcmd;
- struct serv_parm *sp;
- struct lpfc_name *pnn, *ppn;
+ struct serv_parm *sp;
+ struct lpfc_name *pnn, *ppn;
struct ls_rjt stat;
ADISC *ap;
IOCB_t *icmd;
@@ -412,13 +447,12 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
}
icmd = &cmdiocb->iocb;
- if ((icmd->ulpStatus == 0) &&
- (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
+ if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
if (cmd == ELS_CMD_ADISC) {
- lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
- lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
- NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
+ NULL, 0);
}
return 1;
}
@@ -427,55 +461,57 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return 0;
}
static int
-lpfc_rcv_logo(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb,
- uint32_t els_cmd)
+lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
{
- /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
* PLOGIs during LOGO storms from a device.
*/
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
+ spin_unlock_irq(shost->host_lock);
if (els_cmd == ELS_CMD_PRLO)
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
else
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
if (!(ndlp->nlp_type & NLP_FABRIC) ||
- (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+ (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
} else {
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
/* The driver has to wait until the ACC completes before it continues
* processing the LOGO. The action will resume in
* lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -485,9 +521,8 @@ lpfc_rcv_logo(struct lpfc_hba * phba,
}
static void
-lpfc_rcv_prli(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- struct lpfc_iocbq *cmdiocb)
+lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
@@ -501,8 +536,7 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
- (npr->prliType == PRLI_FCP_TYPE)) {
+ if (npr->prliType == PRLI_FCP_TYPE) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if (npr->targetFunc)
@@ -517,36 +551,42 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (ndlp->nlp_type & NLP_FCP_TARGET)
roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
+ "rport rolechg: role:x%x did:x%x flg:x%x",
+ roles, ndlp->nlp_DID, ndlp->nlp_flag);
+
fc_remote_port_rolechg(rport, roles);
}
}
static uint32_t
-lpfc_disc_set_adisc(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp)
+lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
/* Check config parameter use-adisc or FCP-2 */
- if ((phba->cfg_use_adisc == 0) &&
- !(phba->fc_flag & FC_RSCN_MODE)) {
- if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
- return 0;
+ if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
+ return 1;
}
- spin_lock_irq(phba->host->host_lock);
- ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
- return 1;
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ lpfc_unreg_rpi(vport, ndlp);
+ return 0;
}
static uint32_t
-lpfc_disc_illegal(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-{
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0253 Illegal State Transition: node x%x event x%x, "
- "state x%x Data: x%x x%x\n",
- phba->brd_no,
+lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0253 Illegal State Transition: node x%x "
+ "event x%x, state x%x Data: x%x x%x\n",
+ vport->phba->brd_no, vport->vpi,
ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
ndlp->nlp_flag);
return ndlp->nlp_state;
@@ -555,151 +595,162 @@ lpfc_disc_illegal(struct lpfc_hba * phba,
/* Start of Discovery State Machine routines */
static uint32_t
-lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_issue_els_logo(phba, ndlp, 0);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(phba->host->host_lock);
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_device_rm_unused_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
+lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
- struct lpfc_dmabuf *pcmd;
- struct serv_parm *sp;
- uint32_t *lp;
+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ uint32_t *lp = (uint32_t *) pcmd->virt;
+ struct serv_parm *sp = (struct serv_parm *) (lp + 1);
struct ls_rjt stat;
int port_cmp;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-
memset(&stat, 0, sizeof (struct ls_rjt));
/* For a PLOGI, we only accept if our portname is less
* than the remote portname.
*/
phba->fc_stat.elsLogiCol++;
- port_cmp = memcmp(&phba->fc_portname, &sp->portName,
- sizeof (struct lpfc_name));
+ port_cmp = memcmp(&vport->fc_portname, &sp->portName,
+ sizeof(struct lpfc_name));
if (port_cmp >= 0) {
/* Reject this request because the remote node will accept
ours */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
+ NULL);
} else {
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
- } /* if our portname was less */
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ } /* If our portname was less */
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
- cmdiocb = (struct lpfc_iocbq *) arg;
+ memset(&stat, 0, sizeof (struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return ndlp->nlp_state;
+}
- /* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp);
+static uint32_t
+lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
if (evt == NLP_EVT_RCV_LOGO) {
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
} else {
- lpfc_issue_els_logo(phba, ndlp, 0);
+ lpfc_issue_els_logo(vport, ndlp, 0);
}
- /* Put ndlp in npr list set plogi timer for 1 sec */
+ /* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
IOCB_t *irsp;
@@ -721,31 +772,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- prsp = list_get_first(&pcmd->list,
- struct lpfc_dmabuf,
- list);
- lp = (uint32_t *) prsp->virt;
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
- if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
+ if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
goto out;
/* PLOGI chkparm OK */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_ELS,
- "%d:0121 PLOGI chkparm OK "
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (%d):0121 PLOGI chkparm OK "
"Data: x%x x%x x%x x%x\n",
- phba->brd_no,
+ phba->brd_no, vport->vpi,
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
- if ((phba->cfg_fcp_class == 2) &&
- (sp->cls2.classValid)) {
+ if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
ndlp->nlp_fcp_info |= CLASS2;
- } else {
+ else
ndlp->nlp_fcp_info |= CLASS3;
- }
+
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -756,16 +802,23 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
if (sp->cls4.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
- ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- sp->cmn.bbRcvSizeLsb;
+ ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL)))
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0133 PLOGI: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
goto out;
+ }
- lpfc_unreg_rpi(phba, ndlp);
- if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp,
- mbox, 0) == 0) {
+ lpfc_unreg_rpi(vport, ndlp);
+
+ if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ (uint8_t *) sp, mbox, 0) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -777,68 +830,104 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
}
mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
if (lpfc_sli_issue_mbox(phba, mbox,
(MBX_NOWAIT | MBX_STOP_IOCB))
!= MBX_NOT_FINISHED) {
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
return ndlp->nlp_state;
}
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->context1;
+ mp = (struct lpfc_dmabuf *) mbox->context1;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0134 PLOGI: cannot issue reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
} else {
mempool_free(mbox, phba->mbox_mem_pool);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0135 PLOGI: cannot format reg_login "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vport->vpi,
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag, ndlp->nlp_rpi);
}
- out:
+out:
+ if (ndlp->nlp_DID == NameServer_DID) {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0261 Cannot Register NameServer login\n",
+ phba->brd_no, vport->vpi);
+ }
+
/* Free this node since the driver cannot login or has the wrong
sparm */
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
+ } else {
/* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
-lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
/* software abort outstanding ADISC */
@@ -846,34 +935,31 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
cmdiocb = (struct lpfc_iocbq *) arg;
- if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
return ndlp->nlp_state;
- }
+
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -881,42 +967,43 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
/* Treat like rcv logo */
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
ADISC *ap;
@@ -928,101 +1015,112 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if ((irsp->ulpStatus) ||
- (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
+ (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
- memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
+ memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
+ memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_unreg_rpi(vport, ndlp);
return ndlp->nlp_state;
}
if (ndlp->nlp_type & NLP_FCP_TARGET) {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
} else {
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
+ } else {
/* software abort outstanding ADISC */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
-lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
/* software abort outstanding ADISC */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- ndlp->nlp_flag |= NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
-
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
struct lpfc_dmabuf *mp;
@@ -1033,12 +1131,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
if ((mb = phba->sli.mbox_active)) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ lpfc_nlp_put(ndlp);
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1047,61 +1146,61 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+ lpfc_nlp_put(ndlp);
list_del(&mb->list);
mempool_free(mb, phba->mbox_mem_pool);
}
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp,
- void *arg, uint32_t evt)
+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
- LPFC_MBOXQ_t *pmb;
- MAILBOX_t *mb;
- uint32_t did;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->mb;
+ uint32_t did = mb->un.varWords[1];
- pmb = (LPFC_MBOXQ_t *) arg;
- mb = &pmb->mb;
- did = mb->un.varWords[1];
if (mb->mbxStatus) {
/* RegLogin failed */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_DISCOVERY,
- "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
- phba->brd_no,
- did, mb->mbxStatus, phba->hba_state);
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "%d (%d):0246 RegLogin failed Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, vport->vpi,
+ did, mb->mbxStatus, vport->port_state);
/*
* If RegLogin failed due to lack of HBA resources do not
@@ -1109,20 +1208,20 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
*/
if (mb->mbxStatus == MBXERR_RPI_FULL) {
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
return ndlp->nlp_state;
}
- /* Put ndlp in npr list set plogi timer for 1 sec */
+ /* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
- lpfc_issue_els_logo(phba, ndlp, 0);
+ lpfc_issue_els_logo(vport, ndlp, 0);
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -1131,91 +1230,99 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
/* Only if we are not a fabric nport do we issue PRLI */
if (!(ndlp->nlp_type & NLP_FABRIC)) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(phba, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
+lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
- lpfc_drop_node(phba, ndlp);
+ } else {
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
static uint32_t
-lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb;
cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Software abort outstanding PRLI before sending acc */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
@@ -1225,21 +1332,22 @@ lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
* NEXT STATE = PRLI_ISSUE
*/
static uint32_t
-lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *cmdiocb, *rspiocb;
+ struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
PRLI *npr;
@@ -1249,8 +1357,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
+ if ((vport->port_type == LPFC_NPIV_PORT) &&
+ phba->cfg_vport_restrict_login) {
+ goto out;
+ }
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
@@ -1266,319 +1378,329 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
+ if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (vport->port_type == LPFC_NPIV_PORT) &&
+ phba->cfg_vport_restrict_login) {
+out:
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_TARGET_REMOVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ return ndlp->nlp_state;
+ }
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ else
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
/*! lpfc_device_rm_prli_issue
- *
- * \pre
- * \post
- * \param phba
- * \param ndlp
- * \param arg
- * \param evt
- * \return uint32_t
- *
- * \b Description:
- * This routine is envoked when we a request to remove a nport we are in the
- * process of PRLIing. We should software abort outstanding prli, unreg
- * login, send a logout. We will change node state to UNUSED_NODE, put it
- * on plogi list so it can be freed when LOGO completes.
- *
- */
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * This routine is envoked when we a request to remove a nport we are in the
+ * process of PRLIing. We should software abort outstanding prli, unreg
+ * login, send a logout. We will change node state to UNUSED_NODE, put it
+ * on plogi list so it can be freed when LOGO completes.
+ *
+ */
+
static uint32_t
-lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
- }
- else {
+ } else {
/* software abort outstanding PLOGI */
- lpfc_els_abort(phba, ndlp);
+ lpfc_els_abort(vport->phba, ndlp);
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
/*! lpfc_device_recov_prli_issue
- *
- * \pre
- * \post
- * \param phba
- * \param ndlp
- * \param arg
- * \param evt
- * \return uint32_t
- *
- * \b Description:
- * The routine is envoked when the state of a device is unknown, like
- * during a link down. We should remove the nodelist entry from the
- * unmapped list, issue a UNREG_LOGIN, do a software abort of the
- * outstanding PRLI command, then free the node entry.
- */
+ *
+ * \pre
+ * \post
+ * \param phba
+ * \param ndlp
+ * \param arg
+ * \param evt
+ * \return uint32_t
+ *
+ * \b Description:
+ * The routine is envoked when the state of a device is unknown, like
+ * during a link down. We should remove the nodelist entry from the
+ * unmapped list, issue a UNREG_LOGIN, do a software abort of the
+ * outstanding PRLI command, then free the node entry.
+ */
static uint32_t
-lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
/* software abort outstanding PRLI */
lpfc_els_abort(phba, ndlp);
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_prli(phba, ndlp, cmdiocb);
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_rcv_prli(vport, ndlp, cmdiocb);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- lpfc_disc_set_adisc(phba, ndlp);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_plogi(phba, ndlp, cmdiocb);
+ lpfc_rcv_plogi(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
- spin_unlock_irq(phba->host->host_lock);
+ ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg,
+ uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
- spin_lock_irq(phba->host->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
- lpfc_disc_set_adisc(phba, ndlp);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */
- if (ndlp->nlp_flag & NLP_LOGO_SND) {
+ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
return ndlp->nlp_state;
}
- if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
- spin_lock_irq(phba->host->host_lock);
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
/* send PLOGI immediately, move to PLOGI issue state */
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
- struct ls_rjt stat;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
memset(&stat, 0, sizeof (struct ls_rjt));
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
- lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, 0);
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- cmdiocb = (struct lpfc_iocbq *) arg;
-
- lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
+ lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_rcv_padisc(phba, ndlp, cmdiocb);
+ lpfc_rcv_padisc(vport, ndlp, cmdiocb);
/*
* Do not start discovery if discovery is about to start
@@ -1586,53 +1708,52 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
* here will affect the counting of discovery threads.
*/
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
- !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
+ !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
- lpfc_issue_els_adisc(phba, ndlp, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
+ lpfc_issue_els_adisc(vport, ndlp, 0);
} else {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
}
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- struct lpfc_iocbq *cmdiocb;
-
- cmdiocb = (struct lpfc_iocbq *) arg;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_LOGO_ACC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
- lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
} else {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1642,15 +1763,15 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1660,25 +1781,24 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- lpfc_unreg_rpi(phba, ndlp);
+ lpfc_unreg_rpi(vport, ndlp);
/* This routine does nothing, just return the current state */
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
@@ -1688,28 +1808,25 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
irsp = &rspiocb->iocb;
if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
}
static uint32_t
-lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- LPFC_MBOXQ_t *pmb;
- MAILBOX_t *mb;
-
- pmb = (LPFC_MBOXQ_t *) arg;
- mb = &pmb->mb;
+ LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
+ MAILBOX_t *mb = &pmb->mb;
if (!mb->mbxStatus)
ndlp->nlp_rpi = mb->un.varWords[0];
else {
if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
}
@@ -1717,28 +1834,38 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
}
static uint32_t
-lpfc_device_rm_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NODEV_REMOVE;
+ spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
- lpfc_drop_node(phba, ndlp);
+ lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
static uint32_t
-lpfc_device_recov_npr_node(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg,
- uint32_t evt)
+lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
- spin_lock_irq(phba->host->host_lock);
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /* Don't do anything that will mess up processing of the
+ * previous RSCN.
+ */
+ if (vport->fc_flag & FC_RSCN_DEFERRED)
+ return ndlp->nlp_state;
+
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_DELAY_TMO) {
- lpfc_cancel_retry_delay_tmo(phba, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
return ndlp->nlp_state;
}
@@ -1801,7 +1928,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
*/
static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
- (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
+ (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
/* Action routine Event Current State */
lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
lpfc_rcv_els_unused_node, /* RCV_PRLI */
@@ -1818,7 +1945,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
- lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
+ lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
@@ -1917,35 +2044,41 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
};
int
-lpfc_disc_state_machine(struct lpfc_hba * phba,
- struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
+lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
uint32_t cur_state, rc;
- uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
+ uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
lpfc_nlp_get(ndlp);
cur_state = ndlp->nlp_state;
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0211 DSM in event x%x on NPort x%x in state %d "
- "Data: x%x\n",
- phba->brd_no,
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0211 DSM in event x%x on NPort x%x in "
+ "state %d Data: x%x\n",
+ phba->brd_no, vport->vpi,
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM in: evt:%d ste:%d did:x%x",
+ evt, cur_state, ndlp->nlp_DID);
+
func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
- rc = (func) (phba, ndlp, arg, evt);
+ rc = (func) (vport, ndlp, arg, evt);
/* DSM out state <rc> on NPort <nlp_DID> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_DISCOVERY,
- "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
- phba->brd_no,
- rc, ndlp->nlp_DID, ndlp->nlp_flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "%d (%d):0212 DSM out state %d on NPort x%x "
+ "Data: x%x\n",
+ phba->brd_no, vport->vpi,
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
+ "DSM out: ste:%d did:x%x flg:x%x",
+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9a12d05e99e4..8f45bbc42126 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,10 +37,158 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
+/*
+ * This function is called with no lock held when there is a resource
+ * error in driver or in firmware.
+ */
+void
+lpfc_adjust_queue_depth(struct lpfc_hba *phba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ atomic_inc(&phba->num_rsrc_err);
+ phba->last_rsrc_error_time = jiffies;
+
+ if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ phba->last_ramp_down_time = jiffies;
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+ if ((phba->pport->work_port_events &
+ WORKER_RAMP_DOWN_QUEUE) == 0) {
+ phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
+ }
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ return;
+}
+
+/*
+ * This function is called with no lock held when there is a successful
+ * SCSI command completion.
+ */
+static inline void
+lpfc_rampup_queue_depth(struct lpfc_hba *phba,
+ struct scsi_device *sdev)
+{
+ unsigned long flags;
+ atomic_inc(&phba->num_cmd_success);
+
+ if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
+ return;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
+ ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ phba->last_ramp_up_time = jiffies;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ spin_lock_irqsave(&phba->pport->work_port_lock, flags);
+ if ((phba->pport->work_port_events &
+ WORKER_RAMP_UP_QUEUE) == 0) {
+ phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
+ }
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ if (phba->work_wait)
+ wake_up(phba->work_wait);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+}
+
+void
+lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev;
+ unsigned long new_queue_depth;
+ unsigned long num_rsrc_err, num_cmd_success;
+
+ num_rsrc_err = atomic_read(&phba->num_rsrc_err);
+ num_cmd_success = atomic_read(&phba->num_cmd_success);
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ host = lpfc_shost_from_vport(vport);
+ if (!scsi_host_get(host))
+ continue;
+
+ spin_unlock_irq(&phba->hbalock);
+
+ shost_for_each_device(sdev, host) {
+ new_queue_depth = sdev->queue_depth * num_rsrc_err /
+ (num_rsrc_err + num_cmd_success);
+ if (!new_queue_depth)
+ new_queue_depth = sdev->queue_depth - 1;
+ else
+ new_queue_depth =
+ sdev->queue_depth - new_queue_depth;
+
+ if (sdev->ordered_tags)
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
+ new_queue_depth);
+ else
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
+ new_queue_depth);
+ }
+ spin_lock_irq(&phba->hbalock);
+ scsi_host_put(host);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ atomic_set(&phba->num_rsrc_err, 0);
+ atomic_set(&phba->num_cmd_success, 0);
+}
+
+void
+lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ host = lpfc_shost_from_vport(vport);
+ if (!scsi_host_get(host))
+ continue;
+
+ spin_unlock_irq(&phba->hbalock);
+ shost_for_each_device(sdev, host) {
+ if (sdev->ordered_tags)
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
+ sdev->queue_depth+1);
+ else
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
+ sdev->queue_depth+1);
+ }
+ spin_lock_irq(&phba->hbalock);
+ scsi_host_put(host);
+ }
+ spin_unlock_irq(&phba->hbalock);
+ atomic_set(&phba->num_rsrc_err, 0);
+ atomic_set(&phba->num_cmd_success, 0);
+}
/*
* This routine allocates a scsi buffer, which contains all the necessary
@@ -51,8 +199,9 @@
* and the BPL BDE is setup in the IOCB.
*/
static struct lpfc_scsi_buf *
-lpfc_new_scsi_buf(struct lpfc_hba * phba)
+lpfc_new_scsi_buf(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
@@ -63,7 +212,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
if (!psb)
return NULL;
memset(psb, 0, sizeof (struct lpfc_scsi_buf));
- psb->scsi_hba = phba;
/*
* Get memory from the pci pool to map the virt space to pci bus space
@@ -155,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
}
static void
-lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
unsigned long iflag = 0;
@@ -166,7 +314,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
}
static int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct scatterlist *sgel = NULL;
@@ -175,8 +323,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
uint32_t i, num_bde = 0;
- int datadir = scsi_cmnd->sc_data_direction;
- int dma_error;
+ int nseg, datadir = scsi_cmnd->sc_data_direction;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -185,26 +332,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
* data bde entry.
*/
bpl += 2;
- if (scsi_cmnd->use_sg) {
+ if (scsi_sg_count(scsi_cmnd)) {
/*
* The driver stores the segment count returned from pci_map_sg
* because this a count of dma-mappings used to map the use_sg
* pages. They are not guaranteed to be the same for those
* architectures that implement an IOMMU.
*/
- sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
- lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
- scsi_cmnd->use_sg, datadir);
- if (lpfc_cmd->seg_cnt == 0)
+
+ nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!nseg))
return 1;
+ lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d",
__FUNCTION__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
- dma_unmap_sg(&phba->pcidev->dev, sgel,
- lpfc_cmd->seg_cnt, datadir);
+ scsi_dma_unmap(scsi_cmnd);
return 1;
}
@@ -214,7 +361,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
* single scsi command. Just run through the seg_cnt and format
* the bde's.
*/
- for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
physaddr = sg_dma_address(sgel);
bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -225,34 +372,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
bpl++;
- sgel++;
num_bde++;
}
- } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
- physaddr = dma_map_single(&phba->pcidev->dev,
- scsi_cmnd->request_buffer,
- scsi_cmnd->request_bufflen,
- datadir);
- dma_error = dma_mapping_error(physaddr);
- if (dma_error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0718 Unable to dma_map_single "
- "request_buffer: x%x\n",
- phba->brd_no, dma_error);
- return 1;
- }
-
- lpfc_cmd->nonsg_phys = physaddr;
- bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
- bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
- if (datadir == DMA_TO_DEVICE)
- bpl->tus.f.bdeFlags = 0;
- else
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- num_bde = 1;
- bpl++;
}
/*
@@ -266,7 +387,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
(num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
iocb_cmd->ulpLe = 1;
- fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
+ fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
return 0;
}
@@ -279,26 +400,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
* a request buffer, but did not request use_sg. There is a third
* case, but it does not require resource deallocation.
*/
- if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
- dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
- psb->seg_cnt, psb->pCmd->sc_data_direction);
- } else {
- if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
- dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
- psb->pCmd->request_bufflen,
- psb->pCmd->sc_data_direction);
- }
- }
+ if (psb->seg_cnt > 0)
+ scsi_dma_unmap(psb->pCmd);
}
static void
-lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
+lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_iocbq *rsp_iocb)
{
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
- struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
+ struct lpfc_hba *phba = vport->phba;
uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ uint32_t vpi = vport->vpi;
uint32_t resp_info = fcprsp->rspStatus2;
uint32_t scsi_status = fcprsp->rspStatus3;
uint32_t *lp;
@@ -331,9 +446,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
logit = LOG_FCP;
lpfc_printf_log(phba, KERN_WARNING, logit,
- "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
+ "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
- phba->brd_no, cmnd->cmnd[0], scsi_status,
+ phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
be32_to_cpu(fcprsp->rspResId),
be32_to_cpu(fcprsp->rspSnsLen),
@@ -349,15 +464,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
}
}
- cmnd->resid = 0;
+ scsi_set_resid(cmnd, 0);
if (resp_info & RESID_UNDER) {
- cmnd->resid = be32_to_cpu(fcprsp->rspResId);
+ scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0716 FCP Read Underrun, expected %d, "
- "residual %d Data: x%x x%x x%x\n", phba->brd_no,
- be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
- fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
+ "%d (%d):0716 FCP Read Underrun, expected %d, "
+ "residual %d Data: x%x x%x x%x\n",
+ phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
+ scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
+ cmnd->underflow);
/*
* If there is an under run check if under run reported by
@@ -366,15 +482,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
*/
if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
fcpi_parm &&
- (cmnd->resid != fcpi_parm)) {
+ (scsi_get_resid(cmnd) != fcpi_parm)) {
lpfc_printf_log(phba, KERN_WARNING,
- LOG_FCP | LOG_FCP_ERROR,
- "%d:0735 FCP Read Check Error and Underrun "
- "Data: x%x x%x x%x x%x\n", phba->brd_no,
- be32_to_cpu(fcpcmd->fcpDl),
- cmnd->resid,
- fcpi_parm, cmnd->cmnd[0]);
- cmnd->resid = cmnd->request_bufflen;
+ LOG_FCP | LOG_FCP_ERROR,
+ "%d (%d):0735 FCP Read Check Error "
+ "and Underrun Data: x%x x%x x%x x%x\n",
+ phba->brd_no, vpi,
+ be32_to_cpu(fcpcmd->fcpDl),
+ scsi_get_resid(cmnd), fcpi_parm,
+ cmnd->cmnd[0]);
+ scsi_set_resid(cmnd, scsi_bufflen(cmnd));
host_status = DID_ERROR;
}
/*
@@ -385,22 +502,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
*/
if (!(resp_info & SNS_LEN_VALID) &&
(scsi_status == SAM_STAT_GOOD) &&
- (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
+ (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
+ < cmnd->underflow)) {
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0717 FCP command x%x residual "
+ "%d (%d):0717 FCP command x%x residual "
"underrun converted to error "
- "Data: x%x x%x x%x\n", phba->brd_no,
- cmnd->cmnd[0], cmnd->request_bufflen,
- cmnd->resid, cmnd->underflow);
-
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, vpi, cmnd->cmnd[0],
+ scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->underflow);
host_status = DID_ERROR;
}
} else if (resp_info & RESID_OVER) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0720 FCP command x%x residual "
+ "%d (%d):0720 FCP command x%x residual "
"overrun error. Data: x%x x%x \n",
- phba->brd_no, cmnd->cmnd[0],
- cmnd->request_bufflen, cmnd->resid);
+ phba->brd_no, vpi, cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
/*
@@ -410,13 +528,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "%d:0734 FCP Read Check Error Data: "
- "x%x x%x x%x x%x\n", phba->brd_no,
- be32_to_cpu(fcpcmd->fcpDl),
- be32_to_cpu(fcprsp->rspResId),
- fcpi_parm, cmnd->cmnd[0]);
+ "%d (%d):0734 FCP Read Check Error Data: "
+ "x%x x%x x%x x%x\n",
+ phba->brd_no, vpi,
+ be32_to_cpu(fcpcmd->fcpDl),
+ be32_to_cpu(fcprsp->rspResId),
+ fcpi_parm, cmnd->cmnd[0]);
host_status = DID_ERROR;
- cmnd->resid = cmnd->request_bufflen;
+ scsi_set_resid(cmnd, scsi_bufflen(cmnd));
}
out:
@@ -429,9 +548,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
{
struct lpfc_scsi_buf *lpfc_cmd =
(struct lpfc_scsi_buf *) pIocbIn->context1;
+ struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
+ ? lpfc_cmd->cur_iocbq.vport->vpi
+ : 0);
int result;
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
@@ -447,22 +570,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DEFAULT;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0729 FCP cmd x%x failed <%d/%d> status: "
- "x%x result: x%x Data: x%x x%x\n",
- phba->brd_no, cmd->cmnd[0], cmd->device->id,
- cmd->device->lun, lpfc_cmd->status,
- lpfc_cmd->result, pIocbOut->iocb.ulpContext,
+ "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
+ "status: x%x result: x%x Data: x%x x%x\n",
+ phba->brd_no, vpi, cmd->cmnd[0],
+ cmd->device ? cmd->device->id : 0xffff,
+ cmd->device ? cmd->device->lun : 0xffff,
+ lpfc_cmd->status, lpfc_cmd->result,
+ pIocbOut->iocb.ulpContext,
lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
switch (lpfc_cmd->status) {
case IOSTAT_FCP_RSP_ERROR:
/* Call FCP RSP handler to determine result */
- lpfc_handle_fcp_err(lpfc_cmd,pIocbOut);
+ lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
cmd->result = ScsiResult(DID_BUS_BUSY, 0);
break;
+ case IOSTAT_LOCAL_REJECT:
+ if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
+ lpfc_cmd->result == IOERR_NO_RESOURCES ||
+ lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
+ cmd->result = ScsiResult(DID_REQUEUE, 0);
+ break;
+ } /* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
break;
@@ -479,11 +611,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
- "SNS x%x x%x Data: x%x x%x\n",
- phba->brd_no, cmd->device->id,
+ "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
+ "x%x SNS x%x x%x Data: x%x x%x\n",
+ phba->brd_no, vpi, cmd->device->id,
cmd->device->lun, cmd, cmd->result,
- *lp, *(lp + 3), cmd->retries, cmd->resid);
+ *lp, *(lp + 3), cmd->retries,
+ scsi_get_resid(cmd));
}
result = cmd->result;
@@ -496,6 +629,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
return;
}
+
+ if (!result)
+ lpfc_rampup_queue_depth(phba, sdev);
+
if (!result && pnode != NULL &&
((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -534,7 +671,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
tmp_sdev->queue_depth - 1);
}
/*
- * The queue depth cannot be lowered any more.
+ * The queue depth cannot be lowered any more.
* Modify the returned error code to store
* the final depth value set by
* scsi_track_queue_full.
@@ -544,8 +681,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (depth) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0711 detected queue full - lun queue depth "
- " adjusted to %d.\n", phba->brd_no, depth);
+ "%d (%d):0711 detected queue full - "
+ "lun queue depth adjusted to %d.\n",
+ phba->brd_no, vpi, depth);
}
}
@@ -553,9 +691,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
static void
-lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
- struct lpfc_nodelist *pnode)
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_nodelist *pnode)
{
+ struct lpfc_hba *phba = vport->phba;
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
@@ -592,7 +731,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
* bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
* data bde entry.
*/
- if (scsi_cmnd->use_sg) {
+ if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -602,23 +741,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
- iocb_cmd->un.fcpi.fcpi_parm =
- scsi_cmnd->request_bufflen;
- fcp_cmnd->fcpCntl3 = READ_DATA;
- phba->fc4InputRequests++;
- }
- } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
- if (datadir == DMA_TO_DEVICE) {
- iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
- iocb_cmd->un.fcpi.fcpi_parm = 0;
- iocb_cmd->ulpPU = 0;
- fcp_cmnd->fcpCntl3 = WRITE_DATA;
- phba->fc4OutputRequests++;
- } else {
- iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
- iocb_cmd->ulpPU = PARM_READ_CHECK;
- iocb_cmd->un.fcpi.fcpi_parm =
- scsi_cmnd->request_bufflen;
+ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
fcp_cmnd->fcpCntl3 = READ_DATA;
phba->fc4InputRequests++;
}
@@ -642,15 +765,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
piocbq->context1 = lpfc_cmd;
piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
+ piocbq->vport = vport;
}
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
unsigned int lun,
uint8_t task_mgmt_cmd)
{
- struct lpfc_sli *psli;
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
struct fcp_cmnd *fcp_cmnd;
@@ -661,8 +784,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
return 0;
}
- psli = &phba->sli;
piocbq = &(lpfc_cmd->cur_iocbq);
+ piocbq->vport = vport;
+
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
@@ -688,7 +812,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
piocb->ulpTimeout = lpfc_cmd->timeout;
}
- return (1);
+ return 1;
}
static void
@@ -704,10 +828,11 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
}
static int
-lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
+lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
unsigned tgt_id, unsigned int lun,
struct lpfc_rport_data *rdata)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
@@ -716,12 +841,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
return FAILED;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
+ ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
FCP_TARGET_RESET);
if (!ret)
return FAILED;
- lpfc_cmd->scsi_hba = phba;
iocbq = &lpfc_cmd->cur_iocbq;
iocbqrsp = lpfc_sli_get_iocbq(phba);
@@ -730,10 +854,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
/* Issue Target Reset to TGT <num> */
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0702 Issue Target Reset to TGT %d "
+ "%d (%d):0702 Issue Target Reset to TGT %d "
"Data: x%x x%x\n",
- phba->brd_no, tgt_id, rdata->pnode->nlp_rpi,
- rdata->pnode->nlp_flag);
+ phba->brd_no, vport->vpi, tgt_id,
+ rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
ret = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
@@ -758,7 +882,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
const char *
lpfc_info(struct Scsi_Host *host)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
int len;
static char lpfcinfobuf[384];
@@ -800,26 +925,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
void lpfc_poll_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
- unsigned long iflag;
-
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring (phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
-
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
static int
lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
- struct lpfc_hba *phba =
- (struct lpfc_hba *) cmnd->device->host->hostdata;
- struct lpfc_sli *psli = &phba->sli;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct lpfc_scsi_buf *lpfc_cmd;
@@ -840,11 +961,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
goto out_fail_command;
}
- lpfc_cmd = lpfc_get_scsi_buf (phba);
+ lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
+ lpfc_adjust_queue_depth(phba);
+
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0707 driver's buffer pool is empty, "
- "IO busied\n", phba->brd_no);
+ "%d (%d):0707 driver's buffer pool is empty, "
+ "IO busied\n",
+ phba->brd_no, vport->vpi);
goto out_host_busy;
}
@@ -862,10 +986,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
if (err)
goto out_host_busy_free_buf;
- lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
+ lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
- &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+ &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
if (err)
goto out_host_busy_free_buf;
@@ -907,8 +1031,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
struct lpfc_iocbq *iocb;
struct lpfc_iocbq *abtsiocb;
@@ -918,8 +1043,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS;
lpfc_block_error_handler(cmnd);
- spin_lock_irq(shost->host_lock);
-
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
BUG_ON(!lpfc_cmd);
@@ -956,12 +1079,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
icmd->ulpLe = 1;
icmd->ulpClass = cmd->ulpClass;
- if (phba->hba_state >= LPFC_LINK_UP)
+ if (lpfc_is_link_up(phba))
icmd->ulpCommand = CMD_ABORT_XRI_CN;
else
icmd->ulpCommand = CMD_CLOSE_XRI_CN;
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocb->vport = vport;
if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
@@ -977,9 +1101,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_poll_fcp_ring (phba);
- spin_unlock_irq(phba->host->host_lock);
- schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
- spin_lock_irq(phba->host->host_lock);
+ schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
if (++loop_count
> (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
break;
@@ -988,30 +1110,30 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (lpfc_cmd->pCmd == cmnd) {
ret = FAILED;
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0748 abort handler timed out waiting for "
- "abort to complete: ret %#x, ID %d, LUN %d, "
- "snum %#lx\n",
- phba->brd_no, ret, cmnd->device->id,
- cmnd->device->lun, cmnd->serial_number);
+ "%d (%d):0748 abort handler timed out waiting "
+ "for abort to complete: ret %#x, ID %d, "
+ "LUN %d, snum %#lx\n",
+ phba->brd_no, vport->vpi, ret,
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
}
out:
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0749 SCSI Layer I/O Abort Request "
+ "%d (%d):0749 SCSI Layer I/O Abort Request "
"Status x%x ID %d LUN %d snum %#lx\n",
- phba->brd_no, ret, cmnd->device->id,
+ phba->brd_no, vport->vpi, ret, cmnd->device->id,
cmnd->device->lun, cmnd->serial_number);
- spin_unlock_irq(shost->host_lock);
-
return ret;
}
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq, *iocbqrsp;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -1022,28 +1144,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int cnt, loopcnt;
lpfc_block_error_handler(cmnd);
- spin_lock_irq(shost->host_lock);
loopcnt = 0;
/*
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires.
*/
- while ( 1 ) {
+ while (1) {
if (!pnode)
goto out;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
- spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- spin_lock_irq(phba->host->host_lock);
loopcnt++;
rdata = cmnd->device->hostdata;
if (!rdata ||
(loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0721 LUN Reset rport failure:"
- " cnt x%x rdata x%p\n",
- phba->brd_no, loopcnt, rdata);
+ "%d (%d):0721 LUN Reset rport "
+ "failure: cnt x%x rdata x%p\n",
+ phba->brd_no, vport->vpi,
+ loopcnt, rdata);
goto out;
}
pnode = rdata->pnode;
@@ -1054,15 +1174,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
break;
}
- lpfc_cmd = lpfc_get_scsi_buf (phba);
+ lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
goto out;
lpfc_cmd->timeout = 60;
- lpfc_cmd->scsi_hba = phba;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
+ ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
FCP_TARGET_RESET);
if (!ret)
goto out_free_scsi_buf;
@@ -1075,8 +1194,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
goto out_free_scsi_buf;
lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
- "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x "
- "nlp_flag x%x\n", phba->brd_no, cmnd->device->id,
+ "%d (%d):0703 Issue target reset to TGT %d LUN %d "
+ "rpi x%x nlp_flag x%x\n",
+ phba->brd_no, vport->vpi, cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1111,9 +1231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
0, LPFC_CTX_LUN);
loopcnt = 0;
while(cnt) {
- spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
- spin_lock_irq(phba->host->host_lock);
if (++loopcnt
> (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1127,8 +1245,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0719 device reset I/O flush failure: cnt x%x\n",
- phba->brd_no, cnt);
+ "%d (%d):0719 device reset I/O flush failure: "
+ "cnt x%x\n",
+ phba->brd_no, vport->vpi, cnt);
ret = FAILED;
}
@@ -1137,21 +1256,21 @@ out_free_scsi_buf:
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0713 SCSI layer issued device reset (%d, %d) "
+ "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
"return x%x status x%x result x%x\n",
- phba->brd_no, cmnd->device->id, cmnd->device->lun,
- ret, cmd_status, cmd_result);
+ phba->brd_no, vport->vpi, cmnd->device->id,
+ cmnd->device->lun, ret, cmd_status, cmd_result);
out:
- spin_unlock_irq(shost->host_lock);
return ret;
}
static int
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
int ret = FAILED, i, err_count = 0;
@@ -1159,7 +1278,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_buf * lpfc_cmd;
lpfc_block_error_handler(cmnd);
- spin_lock_irq(shost->host_lock);
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
@@ -1167,7 +1285,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
/* The lpfc_cmd storage is reused. Set all loop invariants. */
lpfc_cmd->timeout = 60;
- lpfc_cmd->scsi_hba = phba;
/*
* Since the driver manages a single bus device, reset all
@@ -1177,7 +1294,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
for (i = 0; i < LPFC_MAX_TARGET; i++) {
/* Search for mapped node by target ID */
match = 0;
- list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) {
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
i == ndlp->nlp_sid &&
ndlp->rport) {
@@ -1185,15 +1303,18 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
break;
}
}
+ spin_unlock_irq(shost->host_lock);
if (!match)
continue;
- ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
+ ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+ cmnd->device->lun,
ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0700 Bus Reset on target %d failed\n",
- phba->brd_no, i);
+ "%d (%d):0700 Bus Reset on target %d "
+ "failed\n",
+ phba->brd_no, vport->vpi, i);
err_count++;
break;
}
@@ -1219,9 +1340,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
0, 0, 0, LPFC_CTX_HOST);
loopcnt = 0;
while(cnt) {
- spin_unlock_irq(phba->host->host_lock);
schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
- spin_lock_irq(phba->host->host_lock);
if (++loopcnt
> (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1234,25 +1353,24 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
- phba->brd_no, cnt, i);
+ "%d (%d):0715 Bus Reset I/O flush failure: "
+ "cnt x%x left x%x\n",
+ phba->brd_no, vport->vpi, cnt, i);
ret = FAILED;
}
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_FCP,
- "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
- phba->brd_no, ret);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
+ phba->brd_no, vport->vpi, ret);
out:
- spin_unlock_irq(shost->host_lock);
return ret;
}
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata;
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_scsi_buf *scsi_buf = NULL;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
uint32_t total = 0, i;
@@ -1273,27 +1391,35 @@ lpfc_slave_alloc(struct scsi_device *sdev)
*/
total = phba->total_scsi_bufs;
num_to_alloc = phba->cfg_lun_queue_depth + 2;
- if (total >= phba->cfg_hba_queue_depth) {
+
+ /* Allow some exchanges to be available always to complete discovery */
+ if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0704 At limitation of %d preallocated "
- "command buffers\n", phba->brd_no, total);
+ "%d (%d):0704 At limitation of %d "
+ "preallocated command buffers\n",
+ phba->brd_no, vport->vpi, total);
return 0;
- } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
+
+ /* Allow some exchanges to be available always to complete discovery */
+ } else if (total + num_to_alloc >
+ phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
- "%d:0705 Allocation request of %d command "
- "buffers will exceed max of %d. Reducing "
- "allocation request to %d.\n", phba->brd_no,
- num_to_alloc, phba->cfg_hba_queue_depth,
+ "%d (%d):0705 Allocation request of %d "
+ "command buffers will exceed max of %d. "
+ "Reducing allocation request to %d.\n",
+ phba->brd_no, vport->vpi, num_to_alloc,
+ phba->cfg_hba_queue_depth,
(phba->cfg_hba_queue_depth - total));
num_to_alloc = phba->cfg_hba_queue_depth - total;
}
for (i = 0; i < num_to_alloc; i++) {
- scsi_buf = lpfc_new_scsi_buf(phba);
+ scsi_buf = lpfc_new_scsi_buf(vport);
if (!scsi_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0706 Failed to allocate command "
- "buffer\n", phba->brd_no);
+ "%d (%d):0706 Failed to allocate "
+ "command buffer\n",
+ phba->brd_no, vport->vpi);
break;
}
@@ -1308,8 +1434,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata;
- struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+ struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
if (sdev->tagged_supported)
scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
@@ -1340,6 +1467,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
return;
}
+
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
@@ -1352,11 +1480,10 @@ struct scsi_host_template lpfc_template = {
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
- .scan_start = lpfc_scan_start,
.this_id = -1,
.sg_tablesize = LPFC_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
- .shost_attrs = lpfc_host_attrs,
+ .shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFF,
};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cdcd2535803f..31787bb6d53e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -110,7 +110,6 @@ struct fcp_cmnd {
struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
- struct lpfc_hba *scsi_hba;
struct lpfc_rport_data *rdata;
uint32_t timeout;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a1e721459e2b..f4d5a6b00fde 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -38,23 +38,25 @@
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
+#include "lpfc_debugfs.h"
/*
* Define macro to log: Mailbox command x%x cannot issue Data
* This allows multiple uses of lpfc_msgBlk0311
* w/o perturbing log msg utility.
*/
-#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
+#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
lpfc_printf_log(phba, \
KERN_INFO, \
LOG_MBOX | LOG_SLI, \
- "%d:0311 Mailbox command x%x cannot issue " \
- "Data: x%x x%x x%x\n", \
+ "%d (%d):0311 Mailbox command x%x cannot " \
+ "issue Data: x%x x%x x%x\n", \
phba->brd_no, \
- mb->mbxCommand, \
- phba->hba_state, \
+ pmbox->vport ? pmbox->vport->vpi : 0, \
+ pmbox->mb.mbxCommand, \
+ phba->pport->port_state, \
psli->sli_flag, \
- flag);
+ flag)
/* There are only four IOCB completion types. */
@@ -65,8 +67,26 @@ typedef enum _lpfc_iocb_type {
LPFC_ABORT_IOCB
} lpfc_iocb_type;
-struct lpfc_iocbq *
-lpfc_sli_get_iocbq(struct lpfc_hba * phba)
+ /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
+ * to the start of the ring, and the slot number of the
+ * desired iocb entry, calc a pointer to that entry.
+ */
+static inline IOCB_t *
+lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ return (IOCB_t *) (((char *) pring->cmdringaddr) +
+ pring->cmdidx * phba->iocb_cmd_size);
+}
+
+static inline IOCB_t *
+lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ return (IOCB_t *) (((char *) pring->rspringaddr) +
+ pring->rspidx * phba->iocb_rsp_size);
+}
+
+static struct lpfc_iocbq *
+__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
struct lpfc_iocbq * iocbq = NULL;
@@ -75,10 +95,22 @@ lpfc_sli_get_iocbq(struct lpfc_hba * phba)
return iocbq;
}
+struct lpfc_iocbq *
+lpfc_sli_get_iocbq(struct lpfc_hba *phba)
+{
+ struct lpfc_iocbq * iocbq = NULL;
+ unsigned long iflags;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ iocbq = __lpfc_sli_get_iocbq(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return iocbq;
+}
+
void
-lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
- size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
+ size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
/*
* Clean all volatile data fields, preserve iotag and node struct.
@@ -87,6 +119,19 @@ lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
+void
+lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ unsigned long iflags;
+
+ /*
+ * Clean all volatile data fields, preserve iotag and node struct.
+ */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ __lpfc_sli_release_iocbq(phba, iocbq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
/*
* Translate the iocb command to an iocb command type used to decide the final
* disposition of each completed IOCB.
@@ -155,6 +200,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case CMD_RCV_ELS_REQ_CX:
case CMD_RCV_SEQUENCE64_CX:
case CMD_RCV_ELS_REQ64_CX:
+ case CMD_IOCB_RCV_SEQ64_CX:
+ case CMD_IOCB_RCV_ELS64_CX:
+ case CMD_IOCB_RCV_CONT64_CX:
type = LPFC_UNSOL_IOCB;
break;
default:
@@ -166,73 +214,77 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
}
static int
-lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
+lpfc_sli_ring_map(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
- MAILBOX_t *pmbox = &pmb->mb;
- int i, rc;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ int i, rc, ret = 0;
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb)
+ return -ENOMEM;
+ pmbox = &pmb->mb;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
for (i = 0; i < psli->num_rings; i++) {
- phba->hba_state = LPFC_INIT_MBX_CMDS;
lpfc_config_ring(phba, i, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
- "%d:0446 Adapter failed to init, "
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "%d:0446 Adapter failed to init (%d), "
"mbxCmd x%x CFG_RING, mbxStatus x%x, "
"ring %d\n",
- phba->brd_no,
+ phba->brd_no, rc,
pmbox->mbxCommand,
pmbox->mbxStatus,
i);
- phba->hba_state = LPFC_HBA_ERROR;
- return -ENXIO;
+ phba->link_state = LPFC_HBA_ERROR;
+ ret = -ENXIO;
+ break;
}
}
- return 0;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return ret;
}
static int
-lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
+lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
{
list_add_tail(&piocb->list, &pring->txcmplq);
pring->txcmplq_cnt++;
- if (unlikely(pring->ringno == LPFC_ELS_RING))
- mod_timer(&phba->els_tmofunc,
- jiffies + HZ * (phba->fc_ratov << 1));
+ if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
+ (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ if (!piocb->vport)
+ BUG();
+ else
+ mod_timer(&piocb->vport->els_tmofunc,
+ jiffies + HZ * (phba->fc_ratov << 1));
+ }
- return (0);
+
+ return 0;
}
static struct lpfc_iocbq *
-lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct list_head *dlp;
struct lpfc_iocbq *cmd_iocb;
- dlp = &pring->txq;
- cmd_iocb = NULL;
- list_remove_head((&pring->txq), cmd_iocb,
- struct lpfc_iocbq,
- list);
- if (cmd_iocb) {
- /* If the first ptr is not equal to the list header,
- * deque the IOCBQ_t and return it.
- */
+ list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
+ if (cmd_iocb != NULL)
pring->txq_cnt--;
- }
- return (cmd_iocb);
+ return cmd_iocb;
}
static IOCB_t *
lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
uint32_t max_cmd_idx = pring->numCiocb;
- IOCB_t *iocb = NULL;
if ((pring->next_cmdidx == pring->cmdidx) &&
(++pring->next_cmdidx >= max_cmd_idx))
@@ -249,15 +301,17 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
phba->brd_no, pring->ringno,
pring->local_getidx, max_cmd_idx);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
* worker thread
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
+
+ /* hbalock should already be held */
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
return NULL;
}
@@ -266,39 +320,34 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return NULL;
}
- iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
-
- return iocb;
+ return lpfc_cmd_iocb(phba, pring);
}
uint16_t
-lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
+lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
- struct lpfc_iocbq ** new_arr;
- struct lpfc_iocbq ** old_arr;
+ struct lpfc_iocbq **new_arr;
+ struct lpfc_iocbq **old_arr;
size_t new_len;
struct lpfc_sli *psli = &phba->sli;
uint16_t iotag;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
iotag = psli->last_iotag;
if(++iotag < psli->iocbq_lookup_len) {
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
return iotag;
- }
- else if (psli->iocbq_lookup_len < (0xffff
+ } else if (psli->iocbq_lookup_len < (0xffff
- LPFC_IOCBQ_LOOKUP_INCREMENT)) {
new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
- spin_unlock_irq(phba->host->host_lock);
- new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
+ spin_unlock_irq(&phba->hbalock);
+ new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
GFP_KERNEL);
if (new_arr) {
- memset((char *)new_arr, 0,
- new_len * sizeof (struct lpfc_iocbq *));
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
old_arr = psli->iocbq_lookup;
if (new_len <= psli->iocbq_lookup_len) {
/* highly unprobable case */
@@ -307,11 +356,11 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
if(++iotag < psli->iocbq_lookup_len) {
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
return iotag;
}
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
if (psli->iocbq_lookup)
@@ -322,13 +371,13 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
psli->iocbq_lookup_len = new_len;
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
kfree(old_arr);
return iotag;
}
} else
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -349,7 +398,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Issue iocb command to adapter
*/
- lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
+ lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
wmb();
pring->stats.iocb_cmd++;
@@ -361,20 +410,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (nextiocb->iocb_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
- lpfc_sli_release_iocbq(phba, nextiocb);
+ __lpfc_sli_release_iocbq(phba, nextiocb);
/*
* Let the HBA know what IOCB slot will be the next one the
* driver will put a command into.
*/
pring->cmdidx = pring->next_cmdidx;
- writel(pring->cmdidx, phba->MBslimaddr
- + (SLIMOFF + (pring->ringno * 2)) * 4);
+ writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
}
static void
-lpfc_sli_update_full_ring(struct lpfc_hba * phba,
- struct lpfc_sli_ring *pring)
+lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
int ringno = pring->ringno;
@@ -393,8 +440,7 @@ lpfc_sli_update_full_ring(struct lpfc_hba * phba,
}
static void
-lpfc_sli_update_ring(struct lpfc_hba * phba,
- struct lpfc_sli_ring *pring)
+lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
int ringno = pring->ringno;
@@ -407,7 +453,7 @@ lpfc_sli_update_ring(struct lpfc_hba * phba,
}
static void
-lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
+lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
IOCB_t *iocb;
struct lpfc_iocbq *nextiocb;
@@ -420,7 +466,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
if (pring->txq_cnt &&
- (phba->hba_state > LPFC_LINK_DOWN) &&
+ lpfc_is_link_up(phba) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA) &&
!(pring->flag & LPFC_STOP_IOCB_MBX)) {
@@ -440,11 +486,15 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
static void
-lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
+lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
+ &phba->slim2p->mbx.us.s2.port[ringno];
+ unsigned long iflags;
/* If the ring is active, flag it */
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli.ring[ringno].cmdringaddr) {
if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
@@ -453,11 +503,176 @@ lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
*/
phba->sli.ring[ringno].local_getidx
= le32_to_cpu(pgp->cmdGetInx);
- spin_lock_irq(phba->host->host_lock);
lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
- spin_unlock_irq(phba->host->host_lock);
}
}
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+struct lpfc_hbq_entry *
+lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
+{
+ struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+ if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
+ ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
+ hbqp->next_hbqPutIdx = 0;
+
+ if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
+ uint32_t raw_index = phba->hbq_get[hbqno];
+ uint32_t getidx = le32_to_cpu(raw_index);
+
+ hbqp->local_hbqGetIdx = getidx;
+
+ if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "%d:1802 HBQ %d: local_hbqGetIdx "
+ "%u is > than hbqp->entry_count %u\n",
+ phba->brd_no, hbqno,
+ hbqp->local_hbqGetIdx,
+ hbqp->entry_count);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ return NULL;
+ }
+
+ if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
+ return NULL;
+ }
+
+ return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
+}
+
+void
+lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
+{
+ struct lpfc_dmabuf *dmabuf, *next_dmabuf;
+ struct hbq_dmabuf *hbq_buf;
+
+ /* Return all memory used by all HBQs */
+ list_for_each_entry_safe(dmabuf, next_dmabuf,
+ &phba->hbq_buffer_list, list) {
+ hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
+ list_del(&hbq_buf->dbuf.list);
+ lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
+ kfree(hbq_buf);
+ }
+}
+
+static void
+lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
+ struct hbq_dmabuf *hbq_buf)
+{
+ struct lpfc_hbq_entry *hbqe;
+ dma_addr_t physaddr = hbq_buf->dbuf.phys;
+
+ /* Get next HBQ entry slot to use */
+ hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
+ if (hbqe) {
+ struct hbq_s *hbqp = &phba->hbqs[hbqno];
+
+ hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ hbqe->bde.tus.f.bdeSize = FCELSSIZE;
+ hbqe->bde.tus.f.bdeFlags = 0;
+ hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
+ hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
+ /* Sync SLIM */
+ hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
+ writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
+ /* flush */
+ readl(phba->hbq_put + hbqno);
+ list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
+ }
+}
+
+static struct lpfc_hbq_init lpfc_els_hbq = {
+ .rn = 1,
+ .entry_count = 200,
+ .mask_count = 0,
+ .profile = 0,
+ .ring_mask = 1 << LPFC_ELS_RING,
+ .buffer_count = 0,
+ .init_count = 20,
+ .add_count = 5,
+};
+
+static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
+ &lpfc_els_hbq,
+};
+
+int
+lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
+{
+ uint32_t i, start, end;
+ struct hbq_dmabuf *hbq_buffer;
+
+ start = lpfc_hbq_defs[hbqno]->buffer_count;
+ end = count + lpfc_hbq_defs[hbqno]->buffer_count;
+ if (end > lpfc_hbq_defs[hbqno]->entry_count) {
+ end = lpfc_hbq_defs[hbqno]->entry_count;
+ }
+
+ /* Populate HBQ entries */
+ for (i = start; i < end; i++) {
+ hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
+ GFP_KERNEL);
+ if (!hbq_buffer)
+ return 1;
+ hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
+ &hbq_buffer->dbuf.phys);
+ if (hbq_buffer->dbuf.virt == NULL)
+ return 1;
+ hbq_buffer->tag = (i | (hbqno << 16));
+ lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
+ lpfc_hbq_defs[hbqno]->buffer_count++;
+ }
+ return 0;
+}
+
+int
+lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+ return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count));
+}
+
+int
+lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
+{
+ return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count));
+}
+
+struct hbq_dmabuf *
+lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
+{
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *hbq_buf;
+
+ list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if ((hbq_buf->tag & 0xffff) == tag) {
+ return hbq_buf;
+ }
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
+ "%d:1803 Bad hbq tag. Data: x%x x%x\n",
+ phba->brd_no, tag,
+ lpfc_hbq_defs[tag >> 16]->buffer_count);
+ return NULL;
+}
+
+void
+lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
+{
+ uint32_t hbqno;
+
+ if (sp) {
+ hbqno = sp->tag >> 16;
+ lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
+ }
}
static int
@@ -511,32 +726,38 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_FLASH_WR_ULA:
case MBX_SET_DEBUG:
case MBX_LOAD_EXP_ROM:
+ case MBX_REG_VPI:
+ case MBX_UNREG_VPI:
+ case MBX_HEARTBEAT:
ret = mbxCommand;
break;
default:
ret = MBX_SHUTDOWN;
break;
}
- return (ret);
+ return ret;
}
static void
-lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
+lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
wait_queue_head_t *pdone_q;
+ unsigned long drvr_flag;
/*
* If pdone_q is empty, the driver thread gave up waiting and
* continued running.
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
pdone_q = (wait_queue_head_t *) pmboxq->context1;
if (pdone_q)
wake_up_interruptible(pdone_q);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return;
}
void
-lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
+lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_dmabuf *mp;
uint16_t rpi;
@@ -553,79 +774,64 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
- if (!(phba->fc_flag & FC_UNLOADING) &&
- (pmb->mb.mbxCommand == MBX_REG_LOGIN64) &&
- (!pmb->mb.mbxStatus)) {
+ if (!(phba->pport->load_flag & FC_UNLOADING) &&
+ pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
+ !pmb->mb.mbxStatus) {
rpi = pmb->mb.un.varWords[0];
- lpfc_unreg_login(phba, rpi, pmb);
- pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
+ lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
}
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
int
-lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
+lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
{
- MAILBOX_t *mbox;
MAILBOX_t *pmbox;
LPFC_MBOXQ_t *pmb;
- struct lpfc_sli *psli;
- int i, rc;
- uint32_t process_next;
-
- psli = &phba->sli;
- /* We should only get here if we are in SLI2 mode */
- if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
- return (1);
- }
+ int rc;
+ LIST_HEAD(cmplq);
phba->sli.slistat.mbox_event++;
+ /* Get all completed mailboxe buffers into the cmplq */
+ spin_lock_irq(&phba->hbalock);
+ list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
+ spin_unlock_irq(&phba->hbalock);
+
/* Get a Mailbox buffer to setup mailbox commands for callback */
- if ((pmb = phba->sli.mbox_active)) {
- pmbox = &pmb->mb;
- mbox = &phba->slim2p->mbx;
+ do {
+ list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
+ if (pmb == NULL)
+ break;
- /* First check out the status word */
- lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
+ pmbox = &pmb->mb;
- /* Sanity check to ensure the host owns the mailbox */
- if (pmbox->mbxOwner != OWN_HOST) {
- /* Lets try for a while */
- for (i = 0; i < 10240; i++) {
- /* First copy command data */
- lpfc_sli_pcimem_bcopy(mbox, pmbox,
- sizeof (uint32_t));
- if (pmbox->mbxOwner == OWN_HOST)
- goto mbout;
+ if (pmbox->mbxCommand != MBX_HEARTBEAT) {
+ if (pmb->vport) {
+ lpfc_debugfs_disc_trc(pmb->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)pmbox->mbxCommand,
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX cmpl: cmd:x%x mb:x%x x%x",
+ (uint32_t)pmbox->mbxCommand,
+ pmbox->un.varWords[0],
+ pmbox->un.varWords[1]);
}
- /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
- <status> */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_MBOX | LOG_SLI,
- "%d:0304 Stray Mailbox Interrupt "
- "mbxCommand x%x mbxStatus x%x\n",
- phba->brd_no,
- pmbox->mbxCommand,
- pmbox->mbxStatus);
-
- spin_lock_irq(phba->host->host_lock);
- phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
- return (1);
}
- mbout:
- del_timer_sync(&phba->sli.mbox_tmo);
- phba->work_hba_events &= ~WORKER_MBOX_TMO;
-
/*
* It is a fatal error if unknown mbox command completion.
*/
@@ -633,51 +839,50 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
MBX_SHUTDOWN) {
/* Unknow mailbox command compl */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_MBOX | LOG_SLI,
- "%d:0323 Unknown Mailbox command %x Cmpl\n",
- phba->brd_no,
- pmbox->mbxCommand);
- phba->hba_state = LPFC_HBA_ERROR;
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "%d (%d):0323 Unknown Mailbox command "
+ "%x Cmpl\n",
+ phba->brd_no,
+ pmb->vport ? pmb->vport->vpi : 0,
+ pmbox->mbxCommand);
+ phba->link_state = LPFC_HBA_ERROR;
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
- return (0);
+ continue;
}
- phba->sli.mbox_active = NULL;
if (pmbox->mbxStatus) {
phba->sli.slistat.mbox_stat_err++;
if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
/* Mbox cmd cmpl error - RETRYing */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0305 Mbox cmd cmpl error - "
- "RETRYing Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- pmbox->mbxCommand,
- pmbox->mbxStatus,
- pmbox->un.varWords[0],
- phba->hba_state);
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_MBOX | LOG_SLI,
+ "%d (%d):0305 Mbox cmd cmpl "
+ "error - RETRYing Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ pmb->vport ? pmb->vport->vpi :0,
+ pmbox->mbxCommand,
+ pmbox->mbxStatus,
+ pmbox->un.varWords[0],
+ pmb->vport->port_state);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_SUCCESS)
- return (0);
+ continue;
}
}
/* Mailbox cmd <cmd> Cmpl <cmpl> */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0307 Mailbox cmd x%x Cmpl x%p "
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no,
+ pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
pmb->mbox_cmpl,
*((uint32_t *) pmbox),
@@ -690,39 +895,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
pmbox->un.varWords[6],
pmbox->un.varWords[7]);
- if (pmb->mbox_cmpl) {
- lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
+ if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
- }
- }
-
-
- do {
- process_next = 0; /* by default don't loop */
- spin_lock_irq(phba->host->host_lock);
- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-
- /* Process next mailbox command if there is one */
- if ((pmb = lpfc_mbox_get(phba))) {
- spin_unlock_irq(phba->host->host_lock);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- pmb->mb.mbxStatus = MBX_NOT_FINISHED;
- pmb->mbox_cmpl(phba,pmb);
- process_next = 1;
- continue; /* loop back */
- }
- } else {
- spin_unlock_irq(phba->host->host_lock);
- /* Turn on IOCB processing */
- for (i = 0; i < phba->sli.num_rings; i++)
- lpfc_sli_turn_on_ring(phba, i);
- }
-
- } while (process_next);
+ } while (1);
+ return 0;
+}
- return (0);
+static struct lpfc_dmabuf *
+lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
+{
+ struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
+
+ hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
+ if (hbq_entry == NULL)
+ return NULL;
+ list_del(&hbq_entry->dbuf.list);
+ new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
+ if (new_hbq_entry == NULL)
+ return &hbq_entry->dbuf;
+ new_hbq_entry->dbuf = hbq_entry->dbuf;
+ new_hbq_entry->tag = -1;
+ hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
+ if (hbq_entry->dbuf.virt == NULL) {
+ kfree(new_hbq_entry);
+ return &hbq_entry->dbuf;
+ }
+ lpfc_sli_free_hbq(phba, hbq_entry);
+ return &new_hbq_entry->dbuf;
}
+
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -735,7 +936,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
match = 0;
irsp = &(saveq->iocb);
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
- || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
+ || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
+ || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
+ || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
} else {
@@ -747,13 +950,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Firmware Workaround */
if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
- (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
+ (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
w5p->hcsw.Rctl = Rctl;
w5p->hcsw.Type = Type;
}
}
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (irsp->ulpBdeCount != 0)
+ saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
+ irsp->un.ulpWord[3]);
+ if (irsp->ulpBdeCount == 2)
+ saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
+ irsp->un.ulpWord[15]);
+ }
+
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -781,23 +995,21 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Unexpected Rctl / Type received */
/* Ring <ringno> handler: unexpected
Rctl <Rctl> Type <Type> received */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"%d:0313 Ring %d handler: unexpected Rctl x%x "
- "Type x%x received \n",
+ "Type x%x received\n",
phba->brd_no,
pring->ringno,
Rctl,
Type);
}
- return(1);
+ return 1;
}
static struct lpfc_iocbq *
-lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * prspiocb)
+lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
uint16_t iotag;
@@ -806,7 +1018,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- list_del(&cmd_iocb->list);
+ list_del_init(&cmd_iocb->list);
pring->txcmplq_cnt--;
return cmd_iocb;
}
@@ -821,16 +1033,18 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
}
static int
-lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
+lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
- struct lpfc_iocbq * cmdiocbp;
+ struct lpfc_iocbq *cmdiocbp;
int rc = 1;
unsigned long iflag;
/* Based on the iotag field, get the cmd IOCB from the txcmplq */
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
/*
@@ -846,17 +1060,8 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
}
- spin_unlock_irqrestore(phba->host->host_lock,
- iflag);
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
- }
- else {
- spin_unlock_irqrestore(phba->host->host_lock,
- iflag);
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
}
+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
@@ -870,29 +1075,30 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
* Ring <ringno> handler: unexpected completion IoTag
* <IoTag>
*/
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_SLI,
- "%d:0322 Ring %d handler: unexpected "
- "completion IoTag x%x Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- pring->ringno,
- saveq->iocb.ulpIoTag,
- saveq->iocb.ulpStatus,
- saveq->iocb.un.ulpWord[4],
- saveq->iocb.ulpCommand,
- saveq->iocb.ulpContext);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "%d (%d):0322 Ring %d handler: "
+ "unexpected completion IoTag x%x "
+ "Data: x%x x%x x%x x%x\n",
+ phba->brd_no,
+ cmdiocbp->vport->vpi,
+ pring->ringno,
+ saveq->iocb.ulpIoTag,
+ saveq->iocb.ulpStatus,
+ saveq->iocb.un.ulpWord[4],
+ saveq->iocb.ulpCommand,
+ saveq->iocb.ulpContext);
}
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
return rc;
}
-static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring)
+static void
+lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
@@ -904,7 +1110,7 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
le32_to_cpu(pgp->rspPutInx),
pring->numRiocb);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
@@ -912,16 +1118,18 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
+
+ /* hbalock should already be held */
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
return;
}
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
+void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
{
- struct lpfc_sli * psli = &phba->sli;
- struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -931,13 +1139,15 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
uint32_t portRspPut, portRspMax;
int type;
uint32_t rsp_cmpl = 0;
- void __iomem *to_slim;
uint32_t ha_copy;
+ unsigned long iflags;
pring->stats.iocb_event++;
- /* The driver assumes SLI-2 mode */
- pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
+
/*
* The next available response entry should never exceed the maximum
@@ -952,15 +1162,13 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
rmb();
while (pring->rspidx != portRspPut) {
-
- entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
-
+ entry = lpfc_resp_iocb(phba, pring);
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
- sizeof (IOCB_t));
+ phba->iocb_rsp_size);
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
pring->stats.iocb_rsp++;
@@ -998,8 +1206,10 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
break;
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
@@ -1033,9 +1243,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
- to_slim = phba->MBslimaddr +
- (SLIMOFF + (pring->ringno * 2) + 1) * 4;
- writeb(pring->rspidx, to_slim);
+ writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1045,13 +1253,16 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
ha_copy >>= (LPFC_FCP_RING * 4);
if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring->stats.iocb_rsp_full++;
status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
writel(status, phba->CAregaddr);
readl(phba->CAregaddr);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if ((ha_copy & HA_R0CE_RSP) &&
(pring->flag & LPFC_CALL_RING_AVAILABLE)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
pring->stats.iocb_cmd_empty++;
@@ -1062,6 +1273,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
if ((pring->lpfc_sli_cmd_available))
(pring->lpfc_sli_cmd_available) (phba, pring);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
return;
@@ -1072,10 +1284,12 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
* to check it explicitly.
*/
static int
-lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, uint32_t mask)
+lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
{
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1086,9 +1300,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
lpfc_iocb_type type;
unsigned long iflag;
uint32_t rsp_cmpl = 0;
- void __iomem *to_slim;
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
/*
@@ -1099,7 +1312,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
lpfc_sli_rsp_pointers_error(phba, pring);
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return 1;
}
@@ -1110,14 +1323,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* structure. The copy involves a byte-swap since the
* network byte order and pci byte orders are different.
*/
- entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
+ entry = lpfc_resp_iocb(phba, pring);
+ phba->last_completion_time = jiffies;
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
- sizeof (IOCB_t));
+ phba->iocb_rsp_size);
INIT_LIST_HEAD(&(rspiocbq.list));
irsp = &rspiocbq.iocb;
@@ -1126,16 +1340,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
rsp_cmpl++;
if (unlikely(irsp->ulpStatus)) {
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_adjust_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "%d:0336 Rsp Ring %d error: IOCB Data: "
- "x%x x%x x%x x%x x%x x%x x%x x%x\n",
- phba->brd_no, pring->ringno,
- irsp->un.ulpWord[0], irsp->un.ulpWord[1],
- irsp->un.ulpWord[2], irsp->un.ulpWord[3],
- irsp->un.ulpWord[4], irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ "%d:0336 Rsp Ring %d error: IOCB Data: "
+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
+ phba->brd_no, pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7));
}
switch (type) {
@@ -1149,7 +1377,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0333 IOCB cmd 0x%x"
" processed. Skipping"
- " completion\n", phba->brd_no,
+ " completion\n",
+ phba->brd_no,
irsp->ulpCommand);
break;
}
@@ -1161,19 +1390,19 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
} else {
- spin_unlock_irqrestore(
- phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
- spin_lock_irqsave(phba->host->host_lock,
+ spin_lock_irqsave(&phba->hbalock,
iflag);
}
}
break;
case LPFC_UNSOL_IOCB:
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
break;
default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
@@ -1186,11 +1415,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0334 Unknown IOCB command "
- "Data: x%x, x%x x%x x%x x%x\n",
- phba->brd_no, type, irsp->ulpCommand,
- irsp->ulpStatus, irsp->ulpIoTag,
- irsp->ulpContext);
+ "%d:0334 Unknown IOCB command "
+ "Data: x%x, x%x x%x x%x x%x\n",
+ phba->brd_no, type,
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
}
break;
}
@@ -1201,9 +1432,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
- to_slim = phba->MBslimaddr +
- (SLIMOFF + (pring->ringno * 2) + 1) * 4;
- writel(pring->rspidx, to_slim);
+ writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1228,31 +1457,31 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
-
int
-lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring, uint32_t mask)
+lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, uint32_t mask)
{
+ struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
+ &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
+ &phba->slim2p->mbx.us.s2.port[pring->ringno];
IOCB_t *entry;
IOCB_t *irsp = NULL;
struct lpfc_iocbq *rspiocbp = NULL;
struct lpfc_iocbq *next_iocb;
struct lpfc_iocbq *cmdiocbp;
struct lpfc_iocbq *saveq;
- struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
uint8_t iocb_cmd_type;
lpfc_iocb_type type;
uint32_t status, free_saveq;
uint32_t portRspPut, portRspMax;
int rc = 1;
unsigned long iflag;
- void __iomem *to_slim;
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
/*
@@ -1266,16 +1495,14 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
*/
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"%d:0303 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
- phba->brd_no,
- pring->ringno, portRspPut, portRspMax);
+ phba->brd_no, pring->ringno, portRspPut,
+ portRspMax);
- phba->hba_state = LPFC_HBA_ERROR;
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ phba->link_state = LPFC_HBA_ERROR;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
@@ -1298,23 +1525,24 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
* the ulpLe field is set, the entire Command has been
* received.
*/
- entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
- rspiocbp = lpfc_sli_get_iocbq(phba);
+ entry = lpfc_resp_iocb(phba, pring);
+
+ phba->last_completion_time = jiffies;
+ rspiocbp = __lpfc_sli_get_iocbq(phba);
if (rspiocbp == NULL) {
printk(KERN_ERR "%s: out of buffers! Failing "
"completion.\n", __FUNCTION__);
break;
}
- lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
+ lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
+ phba->iocb_rsp_size);
irsp = &rspiocbp->iocb;
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
- to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
- + 1) * 4;
- writel(pring->rspidx, to_slim);
+ writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (list_empty(&(pring->iocb_continueq))) {
list_add(&rspiocbp->list, &(pring->iocb_continueq));
@@ -1338,23 +1566,44 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
pring->stats.iocb_rsp++;
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_adjust_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
if (irsp->ulpStatus) {
/* Rsp ring <ringno> error: IOCB */
- lpfc_printf_log(phba,
- KERN_WARNING,
- LOG_SLI,
- "%d:0328 Rsp Ring %d error: IOCB Data: "
- "x%x x%x x%x x%x x%x x%x x%x x%x\n",
- phba->brd_no,
- pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "%d:0328 Rsp Ring %d error: "
+ "IOCB Data: "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x "
+ "x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pring->ringno,
+ irsp->un.ulpWord[0],
+ irsp->un.ulpWord[1],
+ irsp->un.ulpWord[2],
+ irsp->un.ulpWord[3],
+ irsp->un.ulpWord[4],
+ irsp->un.ulpWord[5],
+ *(((uint32_t *) irsp) + 6),
+ *(((uint32_t *) irsp) + 7),
+ *(((uint32_t *) irsp) + 8),
+ *(((uint32_t *) irsp) + 9),
+ *(((uint32_t *) irsp) + 10),
+ *(((uint32_t *) irsp) + 11),
+ *(((uint32_t *) irsp) + 12),
+ *(((uint32_t *) irsp) + 13),
+ *(((uint32_t *) irsp) + 14),
+ *(((uint32_t *) irsp) + 15));
}
/*
@@ -1366,17 +1615,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
if (type == LPFC_SOL_IOCB) {
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
iflag);
rc = lpfc_sli_process_sol_iocb(phba, pring,
- saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
} else if (type == LPFC_UNSOL_IOCB) {
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
iflag);
rc = lpfc_sli_process_unsol_iocb(phba, pring,
- saveq);
- spin_lock_irqsave(phba->host->host_lock, iflag);
+ saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
} else if (type == LPFC_ABORT_IOCB) {
if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
((cmdiocbp =
@@ -1386,15 +1635,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
routine */
if (cmdiocbp->iocb_cmpl) {
spin_unlock_irqrestore(
- phba->host->host_lock,
+ &phba->hbalock,
iflag);
(cmdiocbp->iocb_cmpl) (phba,
cmdiocbp, saveq);
spin_lock_irqsave(
- phba->host->host_lock,
+ &phba->hbalock,
iflag);
} else
- lpfc_sli_release_iocbq(phba,
+ __lpfc_sli_release_iocbq(phba,
cmdiocbp);
}
} else if (type == LPFC_UNKNOWN_IOCB) {
@@ -1411,32 +1660,28 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
- "%d:0335 Unknown IOCB command "
- "Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "%d:0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ phba->brd_no,
+ irsp->ulpCommand,
+ irsp->ulpStatus,
+ irsp->ulpIoTag,
+ irsp->ulpContext);
}
}
if (free_saveq) {
- if (!list_empty(&saveq->list)) {
- list_for_each_entry_safe(rspiocbp,
- next_iocb,
- &saveq->list,
- list) {
- list_del(&rspiocbp->list);
- lpfc_sli_release_iocbq(phba,
- rspiocbp);
- }
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba,
+ rspiocbp);
}
- lpfc_sli_release_iocbq(phba, saveq);
+ __lpfc_sli_release_iocbq(phba, saveq);
}
+ rspiocbp = NULL;
}
/*
@@ -1449,7 +1694,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
}
} /* while (pring->rspidx != portRspPut) */
- if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
+ if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
/* At least one response entry has been freed */
pring->stats.iocb_rsp_full++;
/* SET RxRE_RSP in Chip Att register */
@@ -1470,24 +1715,25 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
-int
+void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
LIST_HEAD(completions);
struct lpfc_iocbq *iocb, *next_iocb;
IOCB_t *cmd = NULL;
- int errcnt;
- errcnt = 0;
+ if (pring->ringno == LPFC_ELS_RING) {
+ lpfc_fabric_abort_hba(phba);
+ }
/* Error everything on txq and txcmplq
* First do the txq.
*/
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
@@ -1495,26 +1741,25 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
while (!list_empty(&completions)) {
iocb = list_get_first(&completions, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
- list_del(&iocb->list);
+ list_del_init(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
-
- return errcnt;
}
int
-lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
{
uint32_t status;
int i = 0;
@@ -1541,7 +1786,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
msleep(2500);
if (i == 15) {
- phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
+ /* Do post */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
@@ -1550,7 +1796,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
retval = 1;
}
@@ -1559,7 +1805,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
#define BARRIER_TEST_PATTERN (0xdeadbeef)
-void lpfc_reset_barrier(struct lpfc_hba * phba)
+void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
@@ -1584,12 +1830,12 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
hc_copy = readl(phba->HCregaddr);
writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- phba->fc_flag |= FC_IGNORE_ERATT;
+ phba->link_flag |= LS_IGNORE_ERATT;
if (readl(phba->HAregaddr) & HA_ERATT) {
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
- phba->stopped = 1;
+ phba->pport->stopped = 1;
}
mbox = 0;
@@ -1606,7 +1852,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
- phba->stopped)
+ phba->pport->stopped)
goto restore_hc;
else
goto clear_errat;
@@ -1623,17 +1869,17 @@ clear_errat:
if (readl(phba->HAregaddr) & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
- phba->stopped = 1;
+ phba->pport->stopped = 1;
}
restore_hc:
- phba->fc_flag &= ~FC_IGNORE_ERATT;
+ phba->link_flag &= ~LS_IGNORE_ERATT;
writel(hc_copy, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
int
-lpfc_sli_brdkill(struct lpfc_hba * phba)
+lpfc_sli_brdkill(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
LPFC_MBOXQ_t *pmb;
@@ -1645,26 +1891,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
psli = &phba->sli;
/* Kill HBA */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_SLI,
- "%d:0329 Kill HBA Data: x%x x%x\n",
- phba->brd_no,
- phba->hba_state,
- psli->sli_flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "%d:0329 Kill HBA Data: x%x x%x\n",
+ phba->brd_no, phba->pport->port_state, psli->sli_flag);
if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL)) == 0)
return 1;
/* Disable the error attention */
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
status &= ~HC_ERINT_ENA;
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- phba->fc_flag |= FC_IGNORE_ERATT;
- spin_unlock_irq(phba->host->host_lock);
+ phba->link_flag |= LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
lpfc_kill_board(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -1673,9 +1915,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
- spin_lock_irq(phba->host->host_lock);
- phba->fc_flag &= ~FC_IGNORE_ERATT;
- spin_unlock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
return 1;
}
@@ -1698,22 +1940,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
del_timer_sync(&psli->mbox_tmo);
if (ha_copy & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
- phba->stopped = 1;
+ phba->pport->stopped = 1;
}
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- phba->fc_flag &= ~FC_IGNORE_ERATT;
- spin_unlock_irq(phba->host->host_lock);
+ phba->link_flag &= ~LS_IGNORE_ERATT;
+ spin_unlock_irq(&phba->hbalock);
psli->mbox_active = NULL;
lpfc_hba_down_post(phba);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
- return (ha_copy & HA_ERATT ? 0 : 1);
+ return ha_copy & HA_ERATT ? 0 : 1;
}
int
-lpfc_sli_brdreset(struct lpfc_hba * phba)
+lpfc_sli_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
@@ -1725,12 +1967,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
- phba->hba_state, psli->sli_flag);
+ phba->pport->port_state, psli->sli_flag);
/* perform board reset */
phba->fc_eventTag = 0;
- phba->fc_myDID = 0;
- phba->fc_prevDID = 0;
+ phba->pport->fc_myDID = 0;
+ phba->pport->fc_prevDID = 0;
/* Turn off parity checking and serr during the physical reset */
pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
@@ -1760,12 +2002,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
pring->missbufcnt = 0;
}
- phba->hba_state = LPFC_WARM_START;
+ phba->link_state = LPFC_WARM_START;
return 0;
}
int
-lpfc_sli_brdrestart(struct lpfc_hba * phba)
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
@@ -1773,14 +2015,14 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
volatile uint32_t word0;
void __iomem *to_slim;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
psli = &phba->sli;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
- phba->hba_state, psli->sli_flag);
+ phba->pport->port_state, psli->sli_flag);
word0 = 0;
mb = (MAILBOX_t *) &word0;
@@ -1794,7 +2036,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
- if (phba->hba_state) {
+ if (phba->pport->port_state) {
skip_post = 1;
word0 = 1; /* This is really setting up word1 */
} else {
@@ -1806,10 +2048,10 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
- phba->stopped = 0;
- phba->hba_state = LPFC_INIT_START;
+ phba->pport->stopped = 0;
+ phba->link_state = LPFC_INIT_START;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
@@ -1843,14 +2085,11 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (i++ >= 20) {
/* Adapter failed to init, timeout, status reg
<status> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0436 Adapter failed to init, "
"timeout, status reg x%x\n",
- phba->brd_no,
- status);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->brd_no, status);
+ phba->link_state = LPFC_HBA_ERROR;
return -ETIMEDOUT;
}
@@ -1859,14 +2098,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg
<status> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0437 Adapter failed to init, "
"chipset, status reg x%x\n",
phba->brd_no,
status);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@@ -1879,7 +2116,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
}
if (i == 15) {
- phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
+ /* Do post */
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
@@ -1890,14 +2128,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
if (status & HS_FFERM) {
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg <status> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0438 Adapter failed to init, chipset, "
"status reg x%x\n",
phba->brd_no,
status);
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
@@ -1911,80 +2147,253 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return 0;
}
+static int
+lpfc_sli_hbq_count(void)
+{
+ return ARRAY_SIZE(lpfc_hbq_defs);
+}
+
+static int
+lpfc_sli_hbq_entry_count(void)
+{
+ int hbq_count = lpfc_sli_hbq_count();
+ int count = 0;
+ int i;
+
+ for (i = 0; i < hbq_count; ++i)
+ count += lpfc_hbq_defs[i]->entry_count;
+ return count;
+}
+
int
-lpfc_sli_hba_setup(struct lpfc_hba * phba)
+lpfc_sli_hbq_size(void)
+{
+ return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
+}
+
+static int
+lpfc_sli_hbq_setup(struct lpfc_hba *phba)
+{
+ int hbq_count = lpfc_sli_hbq_count();
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *pmbox;
+ uint32_t hbqno;
+ uint32_t hbq_entry_index;
+
+ /* Get a Mailbox buffer to setup mailbox
+ * commands for HBA initialization
+ */
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!pmb)
+ return -ENOMEM;
+
+ pmbox = &pmb->mb;
+
+ /* Initialize the struct lpfc_sli_hbq structure for each hbq */
+ phba->link_state = LPFC_INIT_MBX_CMDS;
+
+ hbq_entry_index = 0;
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+ phba->hbqs[hbqno].next_hbqPutIdx = 0;
+ phba->hbqs[hbqno].hbqPutIdx = 0;
+ phba->hbqs[hbqno].local_hbqGetIdx = 0;
+ phba->hbqs[hbqno].entry_count =
+ lpfc_hbq_defs[hbqno]->entry_count;
+ lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
+ pmb);
+ hbq_entry_index += phba->hbqs[hbqno].entry_count;
+
+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
+ /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
+ mbxStatus <status>, ring <num> */
+
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "%d:1805 Adapter failed to init. "
+ "Data: x%x x%x x%x\n",
+ phba->brd_no, pmbox->mbxCommand,
+ pmbox->mbxStatus, hbqno);
+
+ phba->link_state = LPFC_HBA_ERROR;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return ENXIO;
+ }
+ }
+ phba->hbq_count = hbq_count;
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Initially populate or replenish the HBQs */
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
+ if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int
+lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
{
LPFC_MBOXQ_t *pmb;
uint32_t resetcount = 0, rc = 0, done = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
+ phba->sli_rev = sli_mode;
while (resetcount < 2 && !done) {
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
- phba->hba_state = LPFC_STATE_UNKNOWN;
+ spin_unlock_irq(&phba->hbalock);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
msleep(2500);
rc = lpfc_sli_chipset_init(phba);
if (rc)
break;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
resetcount++;
- /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
- * means the call was successful. Any other nonzero value is a failure,
- * but if ERESTART is returned, the driver may reset the HBA and try
- * again.
- */
+ /* Call pre CONFIG_PORT mailbox command initialization. A
+ * value of 0 means the call was successful. Any other
+ * nonzero value is a failure, but if ERESTART is returned,
+ * the driver may reset the HBA and try again.
+ */
rc = lpfc_config_port_prep(phba);
if (rc == -ERESTART) {
- phba->hba_state = 0;
+ phba->link_state = LPFC_LINK_UNKNOWN;
continue;
} else if (rc) {
break;
}
- phba->hba_state = LPFC_INIT_MBX_CMDS;
+ phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
- if (rc == MBX_SUCCESS)
- done = 1;
- else {
+ if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
phba->brd_no, pmb->mb.mbxCommand,
pmb->mb.mbxStatus, 0);
+ spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ rc = -ENXIO;
+ } else {
+ done = 1;
+ phba->max_vpi = (phba->max_vpi &&
+ pmb->mb.un.varCfgPort.gmv) != 0
+ ? pmb->mb.un.varCfgPort.max_vpi
+ : 0;
+ }
+ }
+
+ if (!done) {
+ rc = -EINVAL;
+ goto do_prep_failed;
+ }
+
+ if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
+ (!pmb->mb.un.varCfgPort.cMA)) {
+ rc = -ENXIO;
+ goto do_prep_failed;
+ }
+ return rc;
+
+do_prep_failed:
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+}
+
+int
+lpfc_sli_hba_setup(struct lpfc_hba *phba)
+{
+ uint32_t rc;
+ int mode = 3;
+
+ switch (lpfc_sli_mode) {
+ case 2:
+ if (phba->cfg_npiv_enable) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d:1824 NPIV enabled: Override lpfc_sli_mode "
+ "parameter (%d) to auto (0).\n",
+ phba->brd_no, lpfc_sli_mode);
+ break;
}
+ mode = 2;
+ break;
+ case 0:
+ case 3:
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d:1819 Unrecognized lpfc_sli_mode "
+ "parameter: %d.\n",
+ phba->brd_no, lpfc_sli_mode);
+
+ break;
}
- if (!done)
+
+ rc = lpfc_do_config_port(phba, mode);
+ if (rc && lpfc_sli_mode == 3)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d:1820 Unable to select SLI-3. "
+ "Not supported by adapter.\n",
+ phba->brd_no);
+ if (rc && mode != 2)
+ rc = lpfc_do_config_port(phba, 2);
+ if (rc)
goto lpfc_sli_hba_setup_error;
- rc = lpfc_sli_ring_map(phba, pmb);
+ if (phba->sli_rev == 3) {
+ phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
+ phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
+ phba->sli3_options |= LPFC_SLI3_ENABLED;
+ phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+
+ } else {
+ phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
+ phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
+ phba->sli3_options = 0;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
+ phba->brd_no, phba->sli_rev, phba->max_vpi);
+ rc = lpfc_sli_ring_map(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
+ /* Init HBQs */
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ rc = lpfc_sli_hbq_setup(phba);
+ if (rc)
+ goto lpfc_sli_hba_setup_error;
+ }
+
phba->sli.sli_flag |= LPFC_PROCESS_LA;
rc = lpfc_config_port_post(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
- goto lpfc_sli_hba_setup_exit;
+ return rc;
+
lpfc_sli_hba_setup_error:
- phba->hba_state = LPFC_HBA_ERROR;
-lpfc_sli_hba_setup_exit:
- mempool_free(pmb, phba->mbox_mem_pool);
+ phba->link_state = LPFC_HBA_ERROR;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "%d:0445 Firmware initialization failed\n",
+ phba->brd_no);
return rc;
}
@@ -2004,56 +2413,58 @@ lpfc_sli_hba_setup_exit:
void
lpfc_mbox_timeout(unsigned long ptr)
{
- struct lpfc_hba *phba;
+ struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflag;
+ uint32_t tmo_posted;
- phba = (struct lpfc_hba *)ptr;
- spin_lock_irqsave(phba->host->host_lock, iflag);
- if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
- phba->work_hba_events |= WORKER_MBOX_TMO;
+ spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+ tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
+ if (!tmo_posted)
+ phba->pport->work_port_events |= WORKER_MBOX_TMO;
+ spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+
+ if (!tmo_posted) {
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->work_wait)
- wake_up(phba->work_wait);
+ lpfc_worker_wake_up(phba);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
- spin_unlock_irqrestore(phba->host->host_lock, iflag);
}
void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
- LPFC_MBOXQ_t *pmbox;
- MAILBOX_t *mb;
+ LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
+ MAILBOX_t *mb = &pmbox->mb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- spin_lock_irq(phba->host->host_lock);
- if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
- spin_unlock_irq(phba->host->host_lock);
+ if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
return;
}
- pmbox = phba->sli.mbox_active;
- mb = &pmbox->mb;
-
/* Mbox cmd <mbxCommand> timeout */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_MBOX | LOG_SLI,
- "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
- phba->brd_no,
- mb->mbxCommand,
- phba->hba_state,
- phba->sli.sli_flag,
- phba->sli.mbox_active);
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
+ "x%p\n",
+ phba->brd_no,
+ mb->mbxCommand,
+ phba->pport->port_state,
+ phba->sli.sli_flag,
+ phba->sli.mbox_active);
/* Setting state unknown so lpfc_sli_abort_iocb_ring
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all oustanding SCSI IO.
*/
- phba->hba_state = LPFC_STATE_UNKNOWN;
- phba->work_hba_events &= ~WORKER_MBOX_TMO;
- phba->fc_flag |= FC_ESTABLISH_LINK;
+ spin_lock_irq(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ spin_lock_irq(&phba->hbalock);
+ phba->link_state = LPFC_LINK_UNKNOWN;
+ phba->pport->fc_flag |= FC_ESTABLISH_LINK;
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
- spin_unlock_irq(phba->host->host_lock);
+ spin_unlock_irq(&phba->hbalock);
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
@@ -2075,10 +2486,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
}
int
-lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
{
MAILBOX_t *mb;
- struct lpfc_sli *psli;
+ struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy;
int i;
@@ -2086,31 +2497,44 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
volatile uint32_t word0, ldata;
void __iomem *to_slim;
+ if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+ pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+ if(!pmbox->vport) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_MBOX | LOG_VPORT,
+ "%d:1806 Mbox x%x failed. No vport\n",
+ phba->brd_no,
+ pmbox->mb.mbxCommand);
+ dump_stack();
+ return MBXERR_ERROR;
+ }
+ }
+
+
/* If the PCI channel is in offline state, do not post mbox. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return MBX_NOT_FINISHED;
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
psli = &phba->sli;
- spin_lock_irqsave(phba->host->host_lock, drvr_flag);
-
mb = &pmbox->mb;
status = MBX_SUCCESS;
- if (phba->hba_state == LPFC_HBA_ERROR) {
- spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
+ if (phba->link_state == LPFC_HBA_ERROR) {
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
+ return MBX_NOT_FINISHED;
}
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
- spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
+ return MBX_NOT_FINISHED;
}
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -2120,20 +2544,18 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
*/
if (flag & MBX_POLL) {
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ return MBX_NOT_FINISHED;
}
if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ return MBX_NOT_FINISHED;
}
/* Handle STOP IOCB processing flag. This is only meaningful
@@ -2157,21 +2579,33 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
lpfc_mbox_put(phba, pmbox);
/* Mbox cmd issue - BUSY */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
- phba->brd_no,
- mb->mbxCommand,
- phba->hba_state,
- psli->sli_flag,
- flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "%d (%d):0308 Mbox cmd issue - BUSY Data: "
+ "x%x x%x x%x x%x\n",
+ phba->brd_no,
+ pmbox->vport ? pmbox->vport->vpi : 0xffffff,
+ mb->mbxCommand, phba->pport->port_state,
+ psli->sli_flag, flag);
psli->slistat.mbox_busy++;
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+
+ if (pmbox->vport) {
+ lpfc_debugfs_disc_trc(pmbox->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Bsy: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
- return (MBX_BUSY);
+ return MBX_BUSY;
}
/* Handle STOP IOCB processing flag. This is only meaningful
@@ -2198,11 +2632,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
(mb->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irqrestore(phba->host->host_lock,
- drvr_flag);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
- LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
- return (MBX_NOT_FINISHED);
+ LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+ return MBX_NOT_FINISHED;
}
/* timeout active mbox command */
mod_timer(&psli->mbox_tmo, (jiffies +
@@ -2210,15 +2643,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
}
/* Mailbox cmd <cmd> issue */
- lpfc_printf_log(phba,
- KERN_INFO,
- LOG_MBOX | LOG_SLI,
- "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
- phba->brd_no,
- mb->mbxCommand,
- phba->hba_state,
- psli->sli_flag,
- flag);
+ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+ "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
+ "x%x\n",
+ phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
+ mb->mbxCommand, phba->pport->port_state,
+ psli->sli_flag, flag);
+
+ if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (pmbox->vport) {
+ lpfc_debugfs_disc_trc(pmbox->vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX Send vport: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
+ else {
+ lpfc_debugfs_disc_trc(phba->pport,
+ LPFC_DISC_TRC_MBOX,
+ "MBOX Send: cmd:x%x mb:x%x x%x",
+ (uint32_t)mb->mbxCommand,
+ mb->un.varWords[0], mb->un.varWords[1]);
+ }
+ }
psli->slistat.mbox_cmd++;
evtctr = psli->slistat.mbox_event;
@@ -2233,7 +2680,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
- MAILBOX_CMD_SIZE);
+ MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
@@ -2285,12 +2732,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
(!(ha_copy & HA_MBATT) &&
- (phba->hba_state > LPFC_WARM_START))) {
+ (phba->link_state > LPFC_WARM_START))) {
if (i-- <= 0) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
- return (MBX_NOT_FINISHED);
+ return MBX_NOT_FINISHED;
}
/* Check if we took a mbox interrupt while we were
@@ -2299,12 +2746,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
&& (evtctr != psli->slistat.mbox_event))
break;
- spin_unlock_irqrestore(phba->host->host_lock,
+ spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
msleep(1);
- spin_lock_irqsave(phba->host->host_lock, drvr_flag);
+ spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data */
@@ -2335,7 +2782,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
- MAILBOX_CMD_SIZE);
+ MAILBOX_CMD_SIZE);
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2355,23 +2802,25 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
status = mb->mbxStatus;
}
- spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
- return (status);
+ spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ return status;
}
-static int
-lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * piocb)
+/*
+ * Caller needs to hold lock.
+ */
+static void
+__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
pring->txq_cnt++;
- return (0);
}
static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- struct lpfc_iocbq ** piocb)
+ struct lpfc_iocbq **piocb)
{
struct lpfc_iocbq * nextiocb;
@@ -2384,13 +2833,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return nextiocb;
}
+/*
+ * Lockless version of lpfc_sli_issue_iocb.
+ */
int
-lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_iocbq *nextiocb;
IOCB_t *iocb;
+ if (piocb->iocb_cmpl && (!piocb->vport) &&
+ (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_SLI | LOG_VPORT,
+ "%d:1807 IOCB x%x failed. No vport\n",
+ phba->brd_no,
+ piocb->iocb.ulpCommand);
+ dump_stack();
+ return IOCB_ERROR;
+ }
+
+
/* If the PCI channel is in offline state, do not post iocbs. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return IOCB_ERROR;
@@ -2398,7 +2863,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* We should never get an IOCB if we are in a < LINK_DOWN state
*/
- if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IOCB_ERROR;
/*
@@ -2408,7 +2873,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
goto iocb_busy;
- if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
+ if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
/*
* Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
* can be issued if the link is not up.
@@ -2436,8 +2901,9 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* attention events.
*/
} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
- !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
+ !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
goto iocb_busy;
+ }
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
(nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
@@ -2459,13 +2925,28 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
out_busy:
if (!(flag & SLI_IOCB_RET_IOCB)) {
- lpfc_sli_ringtx_put(phba, pring, piocb);
+ __lpfc_sli_ringtx_put(phba, pring, piocb);
return IOCB_SUCCESS;
}
return IOCB_BUSY;
}
+
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb, uint32_t flag)
+{
+ unsigned long iflags;
+ int rc;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ return rc;
+}
+
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
{
@@ -2504,7 +2985,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
int
lpfc_sli_setup(struct lpfc_hba *phba)
{
- int i, totiocb = 0;
+ int i, totiocbsize = 0;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
@@ -2529,6 +3010,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+ pring->sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
pring->iotag_ctr = 0;
pring->iotag_max =
(phba->cfg_hba_queue_depth * 2);
@@ -2539,12 +3026,25 @@ lpfc_sli_setup(struct lpfc_hba *phba)
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
+ pring->sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
+ pring->iotag_max = phba->cfg_hba_queue_depth;
pring->num_mask = 0;
break;
case LPFC_ELS_RING: /* ring 2 - ELS / CT */
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
+ pring->sizeCiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_CMD_SIZE :
+ SLI2_IOCB_CMD_SIZE;
+ pring->sizeRiocb = (phba->sli_rev == 3) ?
+ SLI3_IOCB_RSP_SIZE :
+ SLI2_IOCB_RSP_SIZE;
pring->fast_iotag = 0;
pring->iotag_ctr = 0;
pring->iotag_max = 4096;
@@ -2575,14 +3075,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
lpfc_ct_unsol_event;
break;
}
- totiocb += (pring->numCiocb + pring->numRiocb);
+ totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
+ (pring->numRiocb * pring->sizeRiocb);
}
- if (totiocb > MAX_SLI2_IOCB) {
+ if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
/* Too many cmd / rsp ring entries in SLI2 SLIM */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0462 Too many cmd / rsp ring entries in "
- "SLI2 SLIM Data: x%x x%x\n",
- phba->brd_no, totiocb, MAX_SLI2_IOCB);
+ "SLI2 SLIM Data: x%x x%lx\n",
+ phba->brd_no, totiocbsize,
+ (unsigned long) MAX_SLIM_IOCB_SIZE);
}
if (phba->cfg_multi_ring_support == 2)
lpfc_extra_ring_setup(phba);
@@ -2591,15 +3093,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
}
int
-lpfc_sli_queue_setup(struct lpfc_hba * phba)
+lpfc_sli_queue_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
int i;
psli = &phba->sli;
- spin_lock_irq(phba->host->host_lock);
+ spin_lock_irq(&phba->hbalock);
INIT_LIST_HEAD(&psli->mboxq);
+ INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
@@ -2612,15 +3115,73 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
INIT_LIST_HEAD(&pring->iocb_continueq);
INIT_LIST_HEAD(&pring->postbufq);
}
- spin_unlock_irq(phba->host->host_lock);
- return (1);
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
}
int
-lpfc_sli_hba_down(struct lpfc_hba * phba)
+lpfc_sli_host_down(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *iocb, *next_iocb;
+ int i;
+ unsigned long flags = 0;
+ uint16_t prev_pring_flag;
+
+ lpfc_cleanup_discovery_resources(vport);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ prev_pring_flag = pring->flag;
+ if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /*
+ * Error everything on the txq since these iocbs have not been
+ * given to the FW yet.
+ */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ if (iocb->vport != vport)
+ continue;
+ list_move_tail(&iocb->list, &completions);
+ pring->txq_cnt--;
+ }
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
+ list) {
+ if (iocb->vport != vport)
+ continue;
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+
+ pring->flag = prev_pring_flag;
+ }
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+ return 1;
+}
+
+int
+lpfc_sli_hba_down(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
LPFC_MBOXQ_t *pmb;
struct lpfc_iocbq *iocb;
@@ -2628,13 +3189,15 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
int i;
unsigned long flags = 0;
- psli = &phba->sli;
lpfc_hba_down_prep(phba);
- spin_lock_irqsave(phba->host->host_lock, flags);
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
/*
* Error everything on the txq since these iocbs have not been
@@ -2644,51 +3207,50 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
pring->txq_cnt = 0;
}
- spin_unlock_irqrestore(phba->host->host_lock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
while (!list_empty(&completions)) {
- iocb = list_get_first(&completions, struct lpfc_iocbq, list);
+ list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
cmd = &iocb->iocb;
- list_del(&iocb->list);
- if (iocb->iocb_cmpl) {
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
(iocb->iocb_cmpl) (phba, iocb, iocb);
- } else
- lpfc_sli_release_iocbq(phba, iocb);
+ }
}
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
- spin_lock_irqsave(phba->host->host_lock, flags);
- phba->work_hba_events &= ~WORKER_MBOX_TMO;
+ spin_lock_irqsave(&phba->hbalock, flags);
+
+ spin_lock(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock(&phba->pport->work_port_lock);
+
if (psli->mbox_active) {
- pmb = psli->mbox_active;
- pmb->mb.mbxStatus = MBX_NOT_FINISHED;
- if (pmb->mbox_cmpl) {
- spin_unlock_irqrestore(phba->host->host_lock, flags);
- pmb->mbox_cmpl(phba,pmb);
- spin_lock_irqsave(phba->host->host_lock, flags);
- }
+ list_add_tail(&psli->mbox_active->list, &completions);
+ psli->mbox_active = NULL;
+ psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
}
- psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- psli->mbox_active = NULL;
- /* Return any pending mbox cmds */
- while ((pmb = lpfc_mbox_get(phba)) != NULL) {
+ /* Return any pending or completed mbox cmds */
+ list_splice_init(&phba->sli.mboxq, &completions);
+ list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+ INIT_LIST_HEAD(&psli->mboxq);
+ INIT_LIST_HEAD(&psli->mboxq_cmpl);
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ while (!list_empty(&completions)) {
+ list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
pmb->mb.mbxStatus = MBX_NOT_FINISHED;
if (pmb->mbox_cmpl) {
- spin_unlock_irqrestore(phba->host->host_lock, flags);
pmb->mbox_cmpl(phba,pmb);
- spin_lock_irqsave(phba->host->host_lock, flags);
}
}
-
- INIT_LIST_HEAD(&psli->mboxq);
-
- spin_unlock_irqrestore(phba->host->host_lock, flags);
-
return 1;
}
@@ -2710,14 +3272,15 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
}
int
-lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
- struct lpfc_dmabuf * mp)
+lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_dmabuf *mp)
{
/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
later */
+ spin_lock_irq(&phba->hbalock);
list_add_tail(&mp->list, &pring->postbufq);
-
pring->postbufq_cnt++;
+ spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -2730,14 +3293,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct list_head *slp = &pring->postbufq;
/* Search postbufq, from the begining, looking for a match on phys */
+ spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
if (mp->phys == phys) {
list_del_init(&mp->list);
pring->postbufq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
return mp;
}
}
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"%d:0410 Cannot find virtual addr for mapped buf on "
"ring %d Data x%llx x%p x%p x%x\n",
@@ -2747,92 +3313,110 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
static void
-lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
+ IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb;
+ struct lpfc_iocbq *abort_iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
abort_iocb = NULL;
- irsp = &rspiocb->iocb;
-
- spin_lock_irq(phba->host->host_lock);
if (irsp->ulpStatus) {
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
+ spin_lock_irq(&phba->hbalock);
if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0327 Cannot abort els iocb %p"
- " with tag %x context %x\n",
- phba->brd_no, abort_iocb,
- abort_iotag, abort_context);
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
+ "%d:0327 Cannot abort els iocb %p "
+ "with tag %x context %x, abort status %x, "
+ "abort code %x\n",
+ phba->brd_no, abort_iocb, abort_iotag,
+ abort_context, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
/*
* make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine.
*/
- if (abort_iocb &&
- abort_iocb->iocb.ulpContext == abort_context &&
- abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
- list_del(&abort_iocb->list);
+ if (!abort_iocb ||
+ abort_iocb->iocb.ulpContext != abort_context ||
+ (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
+ spin_unlock_irq(&phba->hbalock);
+ else {
+ list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
+ spin_unlock_irq(&phba->hbalock);
- rsp_ab_iocb = lpfc_sli_get_iocbq(phba);
- if (rsp_ab_iocb == NULL)
- lpfc_sli_release_iocbq(phba, abort_iocb);
- else {
- abort_iocb->iocb_flag &=
- ~LPFC_DRIVER_ABORTED;
- rsp_ab_iocb->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- rsp_ab_iocb->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
- spin_unlock_irq(phba->host->host_lock);
- (abort_iocb->iocb_cmpl)
- (phba, abort_iocb, rsp_ab_iocb);
- spin_lock_irq(phba->host->host_lock);
- lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
- }
+ abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irq(phba->host->host_lock);
+ return;
+}
+
+static void
+lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp = &rspiocb->iocb;
+
+ /* ELS cmd tag <ulpIoTag> completes */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
+ "x%x x%x x%x\n",
+ phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
+ irsp->un.ulpWord[4], irsp->ulpTimeout);
+ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ else
+ lpfc_els_free_iocb(phba, cmdiocb);
return;
}
int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * cmdiocb)
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
{
+ struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
- /* There are certain command types we don't want
- * to abort.
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
*/
icmd = &cmdiocb->iocb;
- if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) ||
- (icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
- /* If we're unloading, interrupts are disabled so we
- * need to cleanup the iocb here.
+ /* If we're unloading, don't abort iocb on the ELS ring, but change the
+ * callback so that nothing happens when it finishes.
*/
- if (phba->fc_flag & FC_UNLOADING)
+ if ((vport->load_flag & FC_UNLOADING) &&
+ (pring->ringno == LPFC_ELS_RING)) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
goto abort_iotag_exit;
+ }
/* issue ABTS for this IOCB based on iotag */
- abtsiocbp = lpfc_sli_get_iocbq(phba);
+ abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return 0;
@@ -2848,7 +3432,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
- if (phba->hba_state >= LPFC_LINK_UP)
+ if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
else
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
@@ -2856,32 +3440,20 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "%d:0339 Abort xri x%x, original iotag x%x, abort "
- "cmd iotag x%x\n",
- phba->brd_no, iabt->un.acxri.abortContextTag,
+ "%d (%d):0339 Abort xri x%x, original iotag x%x, "
+ "abort cmd iotag x%x\n",
+ phba->brd_no, vport->vpi,
+ iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
- retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+ retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
abort_iotag_exit:
-
- /* If we could not issue an abort dequeue the iocb and handle
- * the completion here.
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
*/
- if (retval == IOCB_ERROR) {
- list_del(&cmdiocb->list);
- pring->txcmplq_cnt--;
-
- if (cmdiocb->iocb_cmpl) {
- icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
- icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- spin_unlock_irq(phba->host->host_lock);
- (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
- spin_lock_irq(phba->host->host_lock);
- } else
- lpfc_sli_release_iocbq(phba, cmdiocb);
- }
-
- return 1;
+ return retval;
}
static int
@@ -2930,7 +3502,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
int
lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
- uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
+ uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_iocbq *iocbq;
int sum, i;
@@ -2947,14 +3519,10 @@ lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
void
-lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb)
+lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
{
- unsigned long iflags;
-
- spin_lock_irqsave(phba->host->host_lock, iflags);
lpfc_sli_release_iocbq(phba, cmdiocb);
- spin_unlock_irqrestore(phba->host->host_lock, iflags);
return;
}
@@ -2972,8 +3540,8 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
- 0, abort_cmd) != 0)
+ if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
+ abort_cmd) != 0)
continue;
/* issue ABTS for this IOCB based on iotag */
@@ -2989,8 +3557,9 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
+ abtsiocb->vport = phba->pport;
- if (phba->hba_state >= LPFC_LINK_UP)
+ if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
else
abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -3016,16 +3585,16 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
wait_queue_head_t *pdone_q;
unsigned long iflags;
- spin_lock_irqsave(phba->host->host_lock, iflags);
+ spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
pdone_q = cmdiocbq->context_un.wait_queue;
- spin_unlock_irqrestore(phba->host->host_lock, iflags);
if (pdone_q)
wake_up(pdone_q);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -3035,11 +3604,12 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
* lpfc_sli_issue_call since the wake routine sets a unique value and by
* definition this is a wait function.
*/
+
int
-lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * piocb,
- struct lpfc_iocbq * prspiocbq,
+lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb,
+ struct lpfc_iocbq *prspiocbq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
@@ -3071,11 +3641,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
- spin_unlock_irq(phba->host->host_lock);
timeleft = wait_event_timeout(done_q,
piocb->iocb_flag & LPFC_IO_WAKE,
timeout_req);
- spin_lock_irq(phba->host->host_lock);
if (piocb->iocb_flag & LPFC_IO_WAKE) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3117,16 +3685,16 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
}
int
-lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
+lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
int retval;
+ unsigned long flag;
/* The caller must leave context1 empty. */
- if (pmboxq->context1 != 0) {
- return (MBX_NOT_FINISHED);
- }
+ if (pmboxq->context1 != 0)
+ return MBX_NOT_FINISHED;
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
@@ -3141,6 +3709,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
timeout * HZ);
+ spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->context1 = NULL;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
@@ -3148,8 +3717,11 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
retval = MBX_SUCCESS;
- else
+ else {
retval = MBX_TIMEOUT;
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flag);
}
return retval;
@@ -3158,14 +3730,27 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
int
lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
{
+ struct lpfc_vport *vport = phba->pport;
int i = 0;
+ uint32_t ha_copy;
- while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
+ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
if (i++ > LPFC_MBOX_TMO * 1000)
return 1;
- if (lpfc_sli_handle_mb_event(phba) == 0)
- i = 0;
+ /*
+ * Call lpfc_sli_handle_mb_event only if a mailbox cmd
+ * did finish. This way we won't get the misleading
+ * "Stray Mailbox Interrupt" message.
+ */
+ spin_lock_irq(&phba->hbalock);
+ ha_copy = phba->work_ha;
+ phba->work_ha &= ~HA_MBATT;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (ha_copy & HA_MBATT)
+ if (lpfc_sli_handle_mb_event(phba) == 0)
+ i = 0;
msleep(1);
}
@@ -3176,13 +3761,20 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
irqreturn_t
lpfc_intr_handler(int irq, void *dev_id)
{
- struct lpfc_hba *phba;
+ struct lpfc_hba *phba;
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
int i;
uint32_t control;
+ MAILBOX_t *mbox, *pmbox;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *mp;
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
/*
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
@@ -3204,7 +3796,7 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
/* Ignore all interrupts during initialization. */
- if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IRQ_NONE;
/*
@@ -3212,16 +3804,16 @@ lpfc_intr_handler(int irq, void *dev_id)
* Clear Attention Sources, except Error Attention (to
* preserve status) and Link Attention
*/
- spin_lock(phba->host->host_lock);
+ spin_lock(&phba->hbalock);
ha_copy = readl(phba->HAregaddr);
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
*/
- if (phba->fc_flag & FC_IGNORE_ERATT)
+ if (phba->link_flag & LS_IGNORE_ERATT)
ha_copy &= ~HA_ERATT;
writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- spin_unlock(phba->host->host_lock);
+ spin_unlock(&phba->hbalock);
if (unlikely(!ha_copy))
return IRQ_NONE;
@@ -3235,36 +3827,41 @@ lpfc_intr_handler(int irq, void *dev_id)
* Turn off Link Attention interrupts
* until CLEAR_LA done
*/
- spin_lock(phba->host->host_lock);
+ spin_lock(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock(phba->host->host_lock);
+ spin_unlock(&phba->hbalock);
}
else
work_ha_copy &= ~HA_LATT;
}
if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
- for (i = 0; i < phba->sli.num_rings; i++) {
- if (work_ha_copy & (HA_RXATT << (4*i))) {
- /*
- * Turn off Slow Rings interrupts
- */
- spin_lock(phba->host->host_lock);
- control = readl(phba->HCregaddr);
- control &= ~(HC_R0INT_ENA << i);
+ /*
+ * Turn off Slow Rings interrupts, LPFC_ELS_RING is
+ * the only slow ring.
+ */
+ status = (work_ha_copy &
+ (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status >>= (4*LPFC_ELS_RING);
+ if (status & HA_RXMASK) {
+ spin_lock(&phba->hbalock);
+ control = readl(phba->HCregaddr);
+ if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
+ control &=
+ ~(HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock(phba->host->host_lock);
}
+ spin_unlock(&phba->hbalock);
}
}
if (work_ha_copy & HA_ERATT) {
- phba->hba_state = LPFC_HBA_ERROR;
+ phba->link_state = LPFC_HBA_ERROR;
/*
* There was a link/board error. Read the
* status register to retrieve the error event
@@ -3279,14 +3876,108 @@ lpfc_intr_handler(int irq, void *dev_id)
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
- phba->stopped = 1;
+ phba->pport->stopped = 1;
+ }
+
+ if ((work_ha_copy & HA_MBATT) &&
+ (phba->sli.mbox_active)) {
+ pmb = phba->sli.mbox_active;
+ pmbox = &pmb->mb;
+ mbox = &phba->slim2p->mbx;
+ vport = pmb->vport;
+
+ /* First check out the status word */
+ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
+ if (pmbox->mbxOwner != OWN_HOST) {
+ /*
+ * Stray Mailbox Interrupt, mbxCommand <cmd>
+ * mbxStatus <status>
+ */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
+ LOG_SLI,
+ "%d (%d):0304 Stray Mailbox "
+ "Interrupt mbxCommand x%x "
+ "mbxStatus x%x\n",
+ phba->brd_no,
+ (vport
+ ? vport->vpi : 0),
+ pmbox->mbxCommand,
+ pmbox->mbxStatus);
+ }
+ phba->last_completion_time = jiffies;
+ del_timer_sync(&phba->sli.mbox_tmo);
+
+ phba->sli.mbox_active = NULL;
+ if (pmb->mbox_cmpl) {
+ lpfc_sli_pcimem_bcopy(mbox, pmbox,
+ MAILBOX_CMD_SIZE);
+ }
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+
+ lpfc_debugfs_disc_trc(vport,
+ LPFC_DISC_TRC_MBOX_VPORT,
+ "MBOX dflt rpi: : status:x%x rpi:x%x",
+ (uint32_t)pmbox->mbxStatus,
+ pmbox->un.varWords[0], 0);
+
+ if ( !pmbox->mbxStatus) {
+ mp = (struct lpfc_dmabuf *)
+ (pmb->context1);
+ ndlp = (struct lpfc_nodelist *)
+ pmb->context2;
+
+ /* Reg_LOGIN of dflt RPI was successful.
+ * new lets get rid of the RPI using the
+ * same mbox buffer.
+ */
+ lpfc_unreg_login(phba, vport->vpi,
+ pmbox->un.varWords[0], pmb);
+ pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+ pmb->context1 = mp;
+ pmb->context2 = ndlp;
+ pmb->vport = vport;
+ spin_lock(&phba->hbalock);
+ phba->sli.sli_flag &=
+ ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock(&phba->hbalock);
+ goto send_current_mbox;
+ }
+ }
+ spin_lock(&phba->pport->work_port_lock);
+ phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+ spin_unlock(&phba->pport->work_port_lock);
+ lpfc_mbox_cmpl_put(phba, pmb);
+ }
+ if ((work_ha_copy & HA_MBATT) &&
+ (phba->sli.mbox_active == NULL)) {
+send_next_mbox:
+ spin_lock(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ pmb = lpfc_mbox_get(phba);
+ spin_unlock(&phba->hbalock);
+send_current_mbox:
+ /* Process next mailbox command if there is one */
+ if (pmb != NULL) {
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
+ lpfc_mbox_cmpl_put(phba, pmb);
+ goto send_next_mbox;
+ }
+ } else {
+ /* Turn on IOCB processing */
+ for (i = 0; i < phba->sli.num_rings; i++)
+ lpfc_sli_turn_on_ring(phba, i);
+ }
+
}
- spin_lock(phba->host->host_lock);
+ spin_lock(&phba->hbalock);
phba->work_ha |= work_ha_copy;
if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock(phba->host->host_lock);
+ lpfc_worker_wake_up(phba);
+ spin_unlock(&phba->hbalock);
}
ha_copy &= ~(phba->work_ha_mask);
@@ -3298,7 +3989,7 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status >>= (4*LPFC_FCP_RING);
- if (status & HA_RXATT)
+ if (status & HA_RXMASK)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING],
status);
@@ -3311,7 +4002,7 @@ lpfc_intr_handler(int irq, void *dev_id)
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
- if (status & HA_RXATT) {
+ if (status & HA_RXMASK) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_EXTRA_RING],
status);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 41c38d324ab0..76058505795e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -20,6 +20,7 @@
/* forward declaration for LPFC_IOCB_t's use */
struct lpfc_hba;
+struct lpfc_vport;
/* Define the context types that SLI handles for abort and sums. */
typedef enum _lpfc_ctx_cmd {
@@ -43,10 +44,12 @@ struct lpfc_iocbq {
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
+#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
uint8_t abort_count;
uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */
+ struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void *context3; /* caller context information */
@@ -56,6 +59,8 @@ struct lpfc_iocbq {
struct lpfcMboxq *mbox;
} context_un;
+ void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
@@ -68,12 +73,14 @@ struct lpfc_iocbq {
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
-#define LPFC_MBX_WAKE 1
+#define LPFC_MBX_WAKE 1
+#define LPFC_MBX_IMED_UNREG 2
typedef struct lpfcMboxq {
/* MBOXQs are used in single linked lists */
struct list_head list; /* ptr to next mailbox command */
MAILBOX_t mb; /* Mailbox cmd */
+ struct lpfc_vport *vport;/* virutal port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
@@ -135,6 +142,8 @@ struct lpfc_sli_ring {
uint8_t ringno; /* ring number */
uint16_t numCiocb; /* number of command iocb's per ring */
uint16_t numRiocb; /* number of rsp iocb's per ring */
+ uint16_t sizeCiocb; /* Size of command iocb's in this ring */
+ uint16_t sizeRiocb; /* Size of response iocb's in this ring */
uint32_t fast_iotag; /* max fastlookup based iotag */
uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -165,6 +174,34 @@ struct lpfc_sli_ring {
struct lpfc_sli_ring *);
};
+/* Structure used for configuring rings to a specific profile or rctl / type */
+struct lpfc_hbq_init {
+ uint32_t rn; /* Receive buffer notification */
+ uint32_t entry_count; /* max # of entries in HBQ */
+ uint32_t headerLen; /* 0 if not profile 4 or 5 */
+ uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
+ uint32_t profile; /* Selection profile 0=all, 7=logentry */
+ uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001,
+ * ring2=b0100 */
+ uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */
+
+ uint32_t seqlenoff;
+ uint32_t maxlen;
+ uint32_t seqlenbcnt;
+ uint32_t cmdcodeoff;
+ uint32_t cmdmatch[8];
+ uint32_t mask_count; /* number of mask entries in prt array */
+ struct hbq_mask hbqMasks[6];
+
+ /* Non-config rings fields to keep track of buffer allocations */
+ uint32_t buffer_count; /* number of buffers allocated */
+ uint32_t init_count; /* number to allocate when initialized */
+ uint32_t add_count; /* number to allocate when starved */
+} ;
+
+#define LPFC_MAX_HBQ 16
+
+
/* Structure used to hold SLI statistical counters and info */
struct lpfc_sli_stat {
uint64_t mbox_stat_err; /* Mbox cmds completed status error */
@@ -197,6 +234,7 @@ struct lpfc_sli {
#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
+#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */
@@ -209,6 +247,7 @@ struct lpfc_sli {
uint16_t mboxq_cnt; /* current length of queue */
uint16_t mboxq_max; /* max length */
LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
+ struct list_head mboxq_cmpl;
struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
cmd */
@@ -221,12 +260,6 @@ struct lpfc_sli {
struct lpfc_lnk_stat lnk_stat_offsets;
};
-/* Given a pointer to the start of the ring, and the slot number of
- * the desired iocb entry, calc a pointer to that entry.
- * (assume iocb entry size is 32 bytes, or 8 words)
- */
-#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
-
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 92a9107019d2..a5bc79eef052 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.1.12"
+#define LPFC_DRIVER_VERSION "8.2.1"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 000000000000..85797dbf5478
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,523 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_version.h"
+#include "lpfc_vport.h"
+
+inline void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state)
+{
+ struct fc_vport *fc_vport = vport->fc_vport;
+
+ if (fc_vport) {
+ /*
+ * When the transport defines fc_vport_set state we will replace
+ * this code with the following line
+ */
+ /* fc_vport_set_state(fc_vport, new_state); */
+ if (new_state != FC_VPORT_INITIALIZING)
+ fc_vport->vport_last_state = fc_vport->vport_state;
+ fc_vport->vport_state = new_state;
+ }
+
+ /* for all the error states we will set the invternal state to FAILED */
+ switch (new_state) {
+ case FC_VPORT_NO_FABRIC_SUPP:
+ case FC_VPORT_NO_FABRIC_RSCS:
+ case FC_VPORT_FABRIC_LOGOUT:
+ case FC_VPORT_FABRIC_REJ_WWN:
+ case FC_VPORT_FAILED:
+ vport->port_state = LPFC_VPORT_FAILED;
+ break;
+ case FC_VPORT_LINKDOWN:
+ vport->port_state = LPFC_VPORT_UNKNOWN;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+static int
+lpfc_alloc_vpi(struct lpfc_hba *phba)
+{
+ int vpi;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Start at bit 1 because vpi zero is reserved for the physical port */
+ vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
+ if (vpi > phba->max_vpi)
+ vpi = 0;
+ else
+ set_bit(vpi, phba->vpi_bmask);
+ spin_unlock_irq(&phba->hbalock);
+ return vpi;
+}
+
+static void
+lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
+{
+ spin_lock_irq(&phba->hbalock);
+ clear_bit(vpi, phba->vpi_bmask);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static int
+lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_dmabuf *mp;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ return -ENOMEM;
+ }
+ mb = &pmb->mb;
+
+ lpfc_read_sparam(phba, pmb, vport->vpi);
+ /*
+ * Grab buffer pointer and clear context1 so we can use
+ * lpfc_sli_issue_box_wait
+ */
+ mp = (struct lpfc_dmabuf *) pmb->context1;
+ pmb->context1 = NULL;
+
+ pmb->vport = vport;
+ rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
+ "%d (%d):1818 VPort failed init, mbxCmd x%x "
+ "READ_SPARM mbxStatus x%x, rc = x%x\n",
+ phba->brd_no, vport->vpi,
+ mb->mbxCommand, mb->mbxStatus, rc);
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
+ memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+ sizeof (struct lpfc_name));
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof (struct lpfc_name));
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return 0;
+}
+
+static int
+lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
+ const char *name_type)
+{
+ /* ensure that IEEE format 1 addresses
+ * contain zeros in bits 59-48
+ */
+ if (!((wwn->u.wwn[0] >> 4) == 1 &&
+ ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
+ return 1;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x\n",
+ phba->brd_no, name_type,
+ wwn->u.wwn[0], wwn->u.wwn[1],
+ wwn->u.wwn[2], wwn->u.wwn[3],
+ wwn->u.wwn[4], wwn->u.wwn[5],
+ wwn->u.wwn[6], wwn->u.wwn[7]);
+ return 0;
+}
+
+static int
+lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
+{
+ struct lpfc_vport *vport;
+
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport == new_vport)
+ continue;
+ /* If they match, return not unique */
+ if (memcmp(&vport->fc_sparam.portName,
+ &new_vport->fc_sparam.portName,
+ sizeof(struct lpfc_name)) == 0)
+ return 0;
+ }
+ return 1;
+}
+
+int
+lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_vport *pport =
+ (struct lpfc_vport *) fc_vport->shost->hostdata;
+ struct lpfc_hba *phba = pport->phba;
+ struct lpfc_vport *vport = NULL;
+ int instance;
+ int vpi;
+ int rc = VPORT_ERROR;
+
+ if ((phba->sli_rev < 3) ||
+ !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1808 Create VPORT failed: "
+ "NPIV is not enabled: SLImode:%d\n",
+ phba->brd_no, phba->sli_rev);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ vpi = lpfc_alloc_vpi(phba);
+ if (vpi == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1809 Create VPORT failed: "
+ "Max VPORTs (%d) exceeded\n",
+ phba->brd_no, phba->max_vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+
+ /* Assign an unused board number */
+ if ((instance = lpfc_get_instance()) < 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1810 Create VPORT failed: Cannot get "
+ "instance number\n", phba->brd_no);
+ lpfc_free_vpi(phba, vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ vport = lpfc_create_port(phba, instance, fc_vport);
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1811 Create VPORT failed: vpi x%x\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ vport->vpi = vpi;
+ lpfc_debugfs_initialize(vport);
+
+ if (lpfc_vport_sparm(phba, vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1813 Create VPORT failed: vpi:%d "
+ "Cannot get sparam\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_NORESOURCES;
+ goto error_out;
+ }
+
+ memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
+ memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
+
+ if (fc_vport->node_name != 0)
+ u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+ if (fc_vport->port_name != 0)
+ u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+
+ memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
+ memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
+
+ if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
+ !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1821 Create VPORT failed: vpi:%d "
+ "Invalid WWN format\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ if (!lpfc_unique_wwpn(phba, vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1823 Create VPORT failed: vpi:%d "
+ "Duplicate WWN on HBA\n",
+ phba->brd_no, vpi);
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
+ *(struct lpfc_vport **)fc_vport->dd_data = vport;
+ vport->fc_vport = fc_vport;
+
+ if ((phba->link_state < LPFC_LINK_UP) ||
+ (phba->fc_topology == TOPOLOGY_LOOP)) {
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ if (disable) {
+ rc = VPORT_OK;
+ goto out;
+ }
+
+ /* Use the Physical nodes Fabric NDLP to determine if the link is
+ * up and ready to FDISC.
+ */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+ lpfc_set_disctmo(vport);
+ lpfc_initial_fdisc(vport);
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0262 No NPIV Fabric "
+ "support\n",
+ phba->brd_no, vport->vpi);
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ }
+ rc = VPORT_OK;
+
+out:
+ lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
+error_out:
+ return rc;
+}
+
+int
+disable_vport(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+ long timeout;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (ndlp && phba->link_state >= LPFC_LINK_UP) {
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);
+ }
+
+ lpfc_sli_host_down(vport);
+
+ /* Mark all nodes for discovery so we can remove them by
+ * calling lpfc_cleanup_rpis(vport, 1)
+ */
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
+ lpfc_cleanup_rpis(vport, 1);
+
+ lpfc_stop_vport_timers(vport);
+ lpfc_unreg_all_rpis(vport);
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+ * scsi_host_put() to release the vport.
+ */
+ lpfc_mbx_unreg_vpi(vport);
+
+ lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
+ return VPORT_OK;
+}
+
+int
+enable_vport(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL;
+
+ if ((phba->link_state < LPFC_LINK_UP) ||
+ (phba->fc_topology == TOPOLOGY_LOOP)) {
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ return VPORT_OK;
+ }
+
+ vport->load_flag |= FC_LOADING;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+
+ /* Use the Physical nodes Fabric NDLP to determine if the link is
+ * up and ready to FDISC.
+ */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
+ lpfc_set_disctmo(vport);
+ lpfc_initial_fdisc(vport);
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "%d (%d):0264 No NPIV Fabric "
+ "support\n",
+ phba->brd_no, vport->vpi);
+ }
+ } else {
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ }
+
+ return VPORT_OK;
+}
+
+int
+lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ if (disable)
+ return disable_vport(fc_vport);
+ else
+ return enable_vport(fc_vport);
+}
+
+
+int
+lpfc_vport_delete(struct fc_vport *fc_vport)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *next_ndlp;
+ struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+ struct lpfc_hba *phba = vport->phba;
+ long timeout;
+ int rc = VPORT_ERROR;
+
+ /*
+ * This is a bit of a mess. We want to ensure the shost doesn't get
+ * torn down until we're done with the embedded lpfc_vport structure.
+ *
+ * Beyond holding a reference for this function, we also need a
+ * reference for outstanding I/O requests we schedule during delete
+ * processing. But once we scsi_remove_host() we can no longer obtain
+ * a reference through scsi_host_get().
+ *
+ * So we take two references here. We release one reference at the
+ * bottom of the function -- after delinking the vport. And we
+ * release the other at the completion of the unreg_vpi that get's
+ * initiated after we've disposed of all other resources associated
+ * with the port.
+ */
+ if (!scsi_host_get(shost) || !scsi_host_get(shost))
+ return VPORT_INVAL;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "%d:1812 vport_delete failed: Cannot delete "
+ "physical host\n", phba->brd_no);
+ goto out;
+ }
+
+ vport->load_flag |= FC_UNLOADING;
+
+ kfree(vport->vname);
+ lpfc_debugfs_terminate(vport);
+ fc_remove_host(lpfc_shost_from_vport(vport));
+ scsi_remove_host(lpfc_shost_from_vport(vport));
+
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
+ phba->link_state >= LPFC_LINK_UP) {
+
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+ /* Cannot find existing Fabric ndlp, allocate one */
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp)
+ goto skip_logo;
+ lpfc_nlp_init(vport, ndlp, Fabric_DID);
+ } else {
+ lpfc_dequeue_node(vport, ndlp);
+ }
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);
+ }
+
+skip_logo:
+ lpfc_sli_host_down(vport);
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
+
+ lpfc_stop_vport_timers(vport);
+ lpfc_unreg_all_rpis(vport);
+ lpfc_unreg_default_rpis(vport);
+ /*
+ * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
+ * scsi_host_put() to release the vport.
+ */
+ lpfc_mbx_unreg_vpi(vport);
+
+ lpfc_free_vpi(phba, vport->vpi);
+ vport->work_port_events = 0;
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&vport->listentry);
+ spin_unlock_irq(&phba->hbalock);
+
+ rc = VPORT_OK;
+out:
+ scsi_host_put(shost);
+ return rc;
+}
+
+
+EXPORT_SYMBOL(lpfc_vport_create);
+EXPORT_SYMBOL(lpfc_vport_delete);
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 000000000000..f223550f8cba
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,113 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#ifndef _H_LPFC_VPORT
+#define _H_LPFC_VPORT
+
+/* API version values (each will be an individual bit) */
+#define VPORT_API_VERSION_1 0x01
+
+/* Values returned via lpfc_vport_getinfo() */
+struct vport_info {
+
+ uint32_t api_versions;
+ uint8_t linktype;
+#define VPORT_TYPE_PHYSICAL 0
+#define VPORT_TYPE_VIRTUAL 1
+
+ uint8_t state;
+#define VPORT_STATE_OFFLINE 0
+#define VPORT_STATE_ACTIVE 1
+#define VPORT_STATE_FAILED 2
+
+ uint8_t fail_reason;
+ uint8_t prev_fail_reason;
+#define VPORT_FAIL_UNKNOWN 0
+#define VPORT_FAIL_LINKDOWN 1
+#define VPORT_FAIL_FAB_UNSUPPORTED 2
+#define VPORT_FAIL_FAB_NORESOURCES 3
+#define VPORT_FAIL_FAB_LOGOUT 4
+#define VPORT_FAIL_ADAP_NORESOURCES 5
+
+ uint8_t node_name[8]; /* WWNN */
+ uint8_t port_name[8]; /* WWPN */
+
+ struct Scsi_Host *shost;
+
+/* Following values are valid only on physical links */
+ uint32_t vports_max;
+ uint32_t vports_inuse;
+ uint32_t rpi_max;
+ uint32_t rpi_inuse;
+#define VPORT_CNT_INVALID 0xFFFFFFFF
+};
+
+/* data used in link creation */
+struct vport_data {
+ uint32_t api_version;
+
+ uint32_t options;
+#define VPORT_OPT_AUTORETRY 0x01
+
+ uint8_t node_name[8]; /* WWNN */
+ uint8_t port_name[8]; /* WWPN */
+
+/*
+ * Upon successful creation, vport_shost will point to the new Scsi_Host
+ * structure for the new virtual link.
+ */
+ struct Scsi_Host *vport_shost;
+};
+
+/* API function return codes */
+#define VPORT_OK 0
+#define VPORT_ERROR -1
+#define VPORT_INVAL -2
+#define VPORT_NOMEM -3
+#define VPORT_NORESOURCES -4
+
+int lpfc_vport_create(struct fc_vport *, bool);
+int lpfc_vport_delete(struct fc_vport *);
+int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
+int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
+
+/*
+ * queuecommand VPORT-specific return codes. Specified in the host byte code.
+ * Returned when the virtual link has failed or is not active.
+ */
+#define DID_VPORT_ERROR 0x0f
+
+#define VPORT_INFO 0x1
+#define VPORT_CREATE 0x2
+#define VPORT_DELETE 0x4
+
+struct vport_cmd_tag {
+ uint32_t cmd;
+ struct vport_data cdata;
+ struct vport_info cinfo;
+ void *vport;
+ int vport_num;
+};
+
+void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state);
+
+#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 5806ede120a4..b12ad7c7c673 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -77,7 +77,7 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
for (i = 0; i < cmd->cmd_len; ++i)
printk(" %.2x", cmd->cmnd[i]);
printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
- cmd->use_sg, cmd->request_bufflen, cmd->request_buffer);
+ scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
}
#endif
@@ -173,8 +173,7 @@ static void mac53c94_start(struct fsc_state *state)
writeb(CMD_SELECT, &regs->command);
state->phase = selecting;
- if (cmd->use_sg > 0 || cmd->request_bufflen != 0)
- set_dma_cmds(state, cmd);
+ set_dma_cmds(state, cmd);
}
static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id)
@@ -262,7 +261,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
writeb(CMD_NOP, &regs->command);
/* set DMA controller going if any data to transfer */
if ((stat & (STAT_MSG|STAT_CD)) == 0
- && (cmd->use_sg > 0 || cmd->request_bufflen != 0)) {
+ && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
nb = cmd->SCp.this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
@@ -310,14 +309,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
printk(KERN_DEBUG "intr %x before data xfer complete\n", intr);
}
writel(RUN << 16, &dma->control); /* stop dma */
- if (cmd->use_sg != 0) {
- pci_unmap_sg(state->pdev,
- (struct scatterlist *)cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else {
- pci_unmap_single(state->pdev, state->dma_addr,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
/* should check dma status */
writeb(CMD_I_COMPLETE, &regs->command);
state->phase = completing;
@@ -365,47 +357,35 @@ static void cmd_done(struct fsc_state *state, int result)
*/
static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
{
- int i, dma_cmd, total;
+ int i, dma_cmd, total, nseg;
struct scatterlist *scl;
struct dbdma_cmd *dcmds;
dma_addr_t dma_addr;
u32 dma_len;
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+ if (!nseg)
+ return;
+
dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ?
OUTPUT_MORE : INPUT_MORE;
dcmds = state->dma_cmds;
- if (cmd->use_sg > 0) {
- int nseg;
-
- total = 0;
- scl = (struct scatterlist *) cmd->request_buffer;
- nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
- cmd->sc_data_direction);
- for (i = 0; i < nseg; ++i) {
- dma_addr = sg_dma_address(scl);
- dma_len = sg_dma_len(scl);
- if (dma_len > 0xffff)
- panic("mac53c94: scatterlist element >= 64k");
- total += dma_len;
- st_le16(&dcmds->req_count, dma_len);
- st_le16(&dcmds->command, dma_cmd);
- st_le32(&dcmds->phy_addr, dma_addr);
- dcmds->xfer_status = 0;
- ++scl;
- ++dcmds;
- }
- } else {
- total = cmd->request_bufflen;
- if (total > 0xffff)
- panic("mac53c94: transfer size >= 64k");
- dma_addr = pci_map_single(state->pdev, cmd->request_buffer,
- total, cmd->sc_data_direction);
- state->dma_addr = dma_addr;
- st_le16(&dcmds->req_count, total);
+ total = 0;
+
+ scsi_for_each_sg(cmd, scl, nseg, i) {
+ dma_addr = sg_dma_address(scl);
+ dma_len = sg_dma_len(scl);
+ if (dma_len > 0xffff)
+ panic("mac53c94: scatterlist element >= 64k");
+ total += dma_len;
+ st_le16(&dcmds->req_count, dma_len);
+ st_le16(&dcmds->command, dma_cmd);
st_le32(&dcmds->phy_addr, dma_addr);
dcmds->xfer_status = 0;
++dcmds;
}
+
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
st_le16(&dcmds[-1].command, dma_cmd);
st_le16(&dcmds->command, DBDMA_STOP);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3cce75d70263..3907f6718ede 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -523,10 +523,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
/*
* filter the internal and ioctl commands
*/
- if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) {
- return cmd->request_buffer;
- }
-
+ if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
+ return (scb_t *)cmd->host_scribble;
/*
* We know what channels our logical drives are on - mega_find_card()
@@ -657,22 +655,14 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
case MODE_SENSE: {
char *buf;
+ struct scatterlist *sg;
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ sg = scsi_sglist(cmd);
+ buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
- sg = (struct scatterlist *)cmd->request_buffer;
- buf = kmap_atomic(sg->page, KM_IRQ0) +
- sg->offset;
- } else
- buf = cmd->request_buffer;
memset(buf, 0, cmd->cmnd[4]);
- if (cmd->use_sg) {
- struct scatterlist *sg;
+ kunmap_atomic(buf - sg->offset, KM_IRQ0);
- sg = (struct scatterlist *)cmd->request_buffer;
- kunmap_atomic(buf - sg->offset, KM_IRQ0);
- }
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
return NULL;
@@ -1551,23 +1541,15 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
islogical = adapter->logdrv_chan[cmd->device->channel];
if( cmd->cmnd[0] == INQUIRY && !islogical ) {
- if( cmd->use_sg ) {
- sgl = (struct scatterlist *)
- cmd->request_buffer;
-
- if( sgl->page ) {
- c = *(unsigned char *)
+ sgl = scsi_sglist(cmd);
+ if( sgl->page ) {
+ c = *(unsigned char *)
page_address((&sgl[0])->page) +
(&sgl[0])->offset;
- }
- else {
- printk(KERN_WARNING
- "megaraid: invalid sg.\n");
- c = 0;
- }
- }
- else {
- c = *(u8 *)cmd->request_buffer;
+ } else {
+ printk(KERN_WARNING
+ "megaraid: invalid sg.\n");
+ c = 0;
}
if(IS_RAID_CH(adapter, cmd->device->channel) &&
@@ -1704,30 +1686,14 @@ mega_rundoneq (adapter_t *adapter)
static void
mega_free_scb(adapter_t *adapter, scb_t *scb)
{
- unsigned long length;
-
switch( scb->dma_type ) {
case MEGA_DMA_TYPE_NONE:
break;
- case MEGA_BULK_DATA:
- if (scb->cmd->use_sg == 0)
- length = scb->cmd->request_bufflen;
- else {
- struct scatterlist *sgl =
- (struct scatterlist *)scb->cmd->request_buffer;
- length = sgl->length;
- }
- pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
- length, scb->dma_direction);
- break;
-
case MEGA_SGLIST:
- pci_unmap_sg(adapter->dev, scb->cmd->request_buffer,
- scb->cmd->use_sg, scb->dma_direction);
+ scsi_dma_unmap(scb->cmd);
break;
-
default:
break;
}
@@ -1767,80 +1733,33 @@ __mega_busywait_mbox (adapter_t *adapter)
static int
mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
{
- struct scatterlist *sgl;
- struct page *page;
- unsigned long offset;
- unsigned int length;
+ struct scatterlist *sg;
Scsi_Cmnd *cmd;
int sgcnt;
int idx;
cmd = scb->cmd;
- /* Scatter-gather not used */
- if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
- !adapter->has_64bit_addr)) {
-
- if (cmd->use_sg == 0) {
- page = virt_to_page(cmd->request_buffer);
- offset = offset_in_page(cmd->request_buffer);
- length = cmd->request_bufflen;
- } else {
- sgl = (struct scatterlist *)cmd->request_buffer;
- page = sgl->page;
- offset = sgl->offset;
- length = sgl->length;
- }
-
- scb->dma_h_bulkdata = pci_map_page(adapter->dev,
- page, offset,
- length,
- scb->dma_direction);
- scb->dma_type = MEGA_BULK_DATA;
-
- /*
- * We need to handle special 64-bit commands that need a
- * minimum of 1 SG
- */
- if( adapter->has_64bit_addr ) {
- scb->sgl64[0].address = scb->dma_h_bulkdata;
- scb->sgl64[0].length = length;
- *buf = (u32)scb->sgl_dma_addr;
- *len = (u32)length;
- return 1;
- }
- else {
- *buf = (u32)scb->dma_h_bulkdata;
- *len = (u32)length;
- }
- return 0;
- }
-
- sgl = (struct scatterlist *)cmd->request_buffer;
-
/*
* Copy Scatter-Gather list info into controller structure.
*
* The number of sg elements returned must not exceed our limit
*/
- sgcnt = pci_map_sg(adapter->dev, sgl, cmd->use_sg,
- scb->dma_direction);
+ sgcnt = scsi_dma_map(cmd);
scb->dma_type = MEGA_SGLIST;
- BUG_ON(sgcnt > adapter->sglen);
+ BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
*len = 0;
- for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
-
- if( adapter->has_64bit_addr ) {
- scb->sgl64[idx].address = sg_dma_address(sgl);
- *len += scb->sgl64[idx].length = sg_dma_len(sgl);
- }
- else {
- scb->sgl[idx].address = sg_dma_address(sgl);
- *len += scb->sgl[idx].length = sg_dma_len(sgl);
+ scsi_for_each_sg(cmd, sg, sgcnt, idx) {
+ if (adapter->has_64bit_addr) {
+ scb->sgl64[idx].address = sg_dma_address(sg);
+ *len += scb->sgl64[idx].length = sg_dma_len(sg);
+ } else {
+ scb->sgl[idx].address = sg_dma_address(sg);
+ *len += scb->sgl[idx].length = sg_dma_len(sg);
}
}
@@ -3571,7 +3490,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
/*
* The user passthru structure
*/
- upthru = (mega_passthru __user *)MBOX(uioc)->xferaddr;
+ upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
/*
* Copy in the user passthru here.
@@ -3623,7 +3542,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
/*
* Get the user data
*/
- if( copy_from_user(data, (char __user *)uxferaddr,
+ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
pthru->dataxferlen) ) {
rval = (-EFAULT);
goto freemem_and_return;
@@ -3649,7 +3568,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
* Is data going up-stream
*/
if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
- if( copy_to_user((char __user *)uxferaddr, data,
+ if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
pthru->dataxferlen) ) {
rval = (-EFAULT);
}
@@ -3702,7 +3621,7 @@ freemem_and_return:
/*
* Get the user data
*/
- if( copy_from_user(data, (char __user *)uxferaddr,
+ if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
uioc.xferlen) ) {
pci_free_consistent(pdev,
@@ -3742,7 +3661,7 @@ freemem_and_return:
* Is data going up-stream
*/
if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
- if( copy_to_user((char __user *)uxferaddr, data,
+ if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
uioc.xferlen) ) {
rval = (-EFAULT);
@@ -4494,7 +4413,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scmd->device = sdev;
scmd->device->host = adapter->host;
- scmd->request_buffer = (void *)scb;
+ scmd->host_scribble = (void *)scb;
scmd->cmnd[0] = MEGA_INTERNAL_CMD;
scb->state |= SCB_ACTIVE;
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 26e1e6c55654..fef9ac958754 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 04d0b6918c61..c6a53dccc16a 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -454,7 +454,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
// Allocate the per driver initialization structure
- adapter = kmalloc(sizeof(adapter_t), GFP_KERNEL);
+ adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
if (adapter == NULL) {
con_log(CL_ANN, (KERN_WARNING
@@ -462,7 +462,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_probe_one;
}
- memset(adapter, 0, sizeof(adapter_t));
// set up PCI related soft state and other pre-known parameters
@@ -746,10 +745,9 @@ megaraid_init_mbox(adapter_t *adapter)
* Allocate and initialize the init data structure for mailbox
* controllers
*/
- raid_dev = kmalloc(sizeof(mraid_device_t), GFP_KERNEL);
+ raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
if (raid_dev == NULL) return -1;
- memset(raid_dev, 0, sizeof(mraid_device_t));
/*
* Attach the adapter soft state to raid device soft state
@@ -1050,8 +1048,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* since the calling routine does not yet know the number of available
* commands.
*/
- adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS,
- GFP_KERNEL);
+ adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
if (adapter->kscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
@@ -1059,7 +1056,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
__LINE__));
goto out_free_ibuf;
}
- memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS);
// memory allocation for our command packets
if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
@@ -1378,8 +1374,6 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
{
struct scatterlist *sgl;
mbox_ccb_t *ccb;
- struct page *page;
- unsigned long offset;
struct scsi_cmnd *scp;
int sgcnt;
int i;
@@ -1388,48 +1382,16 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
scp = scb->scp;
ccb = (mbox_ccb_t *)scb->ccb;
+ sgcnt = scsi_dma_map(scp);
+ BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
+
// no mapping required if no data to be transferred
- if (!scp->request_buffer || !scp->request_bufflen)
+ if (!sgcnt)
return 0;
- if (!scp->use_sg) { /* scatter-gather list not used */
-
- page = virt_to_page(scp->request_buffer);
-
- offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
-
- ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
- scp->request_bufflen,
- scb->dma_direction);
- scb->dma_type = MRAID_DMA_WBUF;
-
- /*
- * We need to handle special 64-bit commands that need a
- * minimum of 1 SG
- */
- sgcnt = 1;
- ccb->sgl64[0].address = ccb->buf_dma_h;
- ccb->sgl64[0].length = scp->request_bufflen;
-
- return sgcnt;
- }
-
- sgl = (struct scatterlist *)scp->request_buffer;
-
- // The number of sg elements returned must not exceed our limit
- sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
- scb->dma_direction);
-
- if (sgcnt > adapter->sglen) {
- con_log(CL_ANN, (KERN_CRIT
- "megaraid critical: too many sg elements:%d\n",
- sgcnt));
- BUG();
- }
-
scb->dma_type = MRAID_DMA_WSG;
- for (i = 0; i < sgcnt; i++, sgl++) {
+ scsi_for_each_sg(scp, sgl, sgcnt, i) {
ccb->sgl64[i].address = sg_dma_address(sgl);
ccb->sgl64[i].length = sg_dma_len(sgl);
}
@@ -1489,19 +1451,11 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
adapter->outstanding_cmds++;
- if (scb->dma_direction == PCI_DMA_TODEVICE) {
- if (!scb->scp->use_sg) { // sg list not used
- pci_dma_sync_single_for_device(adapter->pdev,
- ccb->buf_dma_h,
- scb->scp->request_bufflen,
- PCI_DMA_TODEVICE);
- }
- else {
- pci_dma_sync_sg_for_device(adapter->pdev,
- scb->scp->request_buffer,
- scb->scp->use_sg, PCI_DMA_TODEVICE);
- }
- }
+ if (scb->dma_direction == PCI_DMA_TODEVICE)
+ pci_dma_sync_sg_for_device(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
+ PCI_DMA_TODEVICE);
mbox->busy = 1; // Set busy
mbox->poll = 0;
@@ -1624,29 +1578,26 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
case MODE_SENSE:
- if (scp->use_sg) {
- struct scatterlist *sgl;
- caddr_t vaddr;
+ {
+ struct scatterlist *sgl;
+ caddr_t vaddr;
- sgl = (struct scatterlist *)scp->request_buffer;
- if (sgl->page) {
- vaddr = (caddr_t)
- (page_address((&sgl[0])->page)
- + (&sgl[0])->offset);
+ sgl = scsi_sglist(scp);
+ if (sgl->page) {
+ vaddr = (caddr_t)
+ (page_address((&sgl[0])->page)
+ + (&sgl[0])->offset);
- memset(vaddr, 0, scp->cmnd[4]);
- }
- else {
- con_log(CL_ANN, (KERN_WARNING
- "megaraid mailbox: invalid sg:%d\n",
- __LINE__));
- }
+ memset(vaddr, 0, scp->cmnd[4]);
}
else {
- memset(scp->request_buffer, 0, scp->cmnd[4]);
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
}
- scp->result = (DID_OK << 16);
- return NULL;
+ }
+ scp->result = (DID_OK << 16);
+ return NULL;
case INQUIRY:
/*
@@ -1716,7 +1667,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
mbox->cmd = MBOXCMD_PASSTHRU64;
scb->dma_direction = scp->sc_data_direction;
- pthru->dataxferlen = scp->request_bufflen;
+ pthru->dataxferlen = scsi_bufflen(scp);
pthru->dataxferaddr = ccb->sgl_dma_h;
pthru->numsge = megaraid_mbox_mksgl(adapter,
scb);
@@ -2050,8 +2001,8 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
- if (scp->request_bufflen) {
- pthru->dataxferlen = scp->request_bufflen;
+ if (scsi_bufflen(scp)) {
+ pthru->dataxferlen = scsi_bufflen(scp);
pthru->dataxferaddr = ccb->sgl_dma_h;
pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
}
@@ -2099,8 +2050,8 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
- if (scp->request_bufflen) {
- epthru->dataxferlen = scp->request_bufflen;
+ if (scsi_bufflen(scp)) {
+ epthru->dataxferlen = scsi_bufflen(scp);
epthru->dataxferaddr = ccb->sgl_dma_h;
epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
}
@@ -2266,37 +2217,13 @@ megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
ccb = (mbox_ccb_t *)scb->ccb;
- switch (scb->dma_type) {
-
- case MRAID_DMA_WBUF:
- if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
- pci_dma_sync_single_for_cpu(adapter->pdev,
- ccb->buf_dma_h,
- scb->scp->request_bufflen,
+ if (scb->dma_direction == PCI_DMA_FROMDEVICE)
+ pci_dma_sync_sg_for_cpu(adapter->pdev,
+ scsi_sglist(scb->scp),
+ scsi_sg_count(scb->scp),
PCI_DMA_FROMDEVICE);
- }
-
- pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
- scb->scp->request_bufflen, scb->dma_direction);
-
- break;
-
- case MRAID_DMA_WSG:
- if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
- pci_dma_sync_sg_for_cpu(adapter->pdev,
- scb->scp->request_buffer,
- scb->scp->use_sg, PCI_DMA_FROMDEVICE);
- }
-
- pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
- scb->scp->use_sg, scb->dma_direction);
-
- break;
-
- default:
- break;
- }
+ scsi_dma_unmap(scb->scp);
return;
}
@@ -2399,24 +2326,16 @@ megaraid_mbox_dpc(unsigned long devp)
if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
- if (scp->use_sg) {
- sgl = (struct scatterlist *)
- scp->request_buffer;
-
- if (sgl->page) {
- c = *(unsigned char *)
+ sgl = scsi_sglist(scp);
+ if (sgl->page) {
+ c = *(unsigned char *)
(page_address((&sgl[0])->page) +
- (&sgl[0])->offset);
- }
- else {
- con_log(CL_ANN, (KERN_WARNING
- "megaraid mailbox: invalid sg:%d\n",
- __LINE__));
- c = 0;
- }
- }
- else {
- c = *(uint8_t *)scp->request_buffer;
+ (&sgl[0])->offset);
+ } else {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid mailbox: invalid sg:%d\n",
+ __LINE__));
+ c = 0;
}
if ((c & 0x1F ) == TYPE_DISK) {
@@ -3572,8 +3491,7 @@ megaraid_cmm_register(adapter_t *adapter)
int i;
// Allocate memory for the base list of scb for management module.
- adapter->uscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_USER_CMDS,
- GFP_KERNEL);
+ adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
if (adapter->uscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
@@ -3581,7 +3499,6 @@ megaraid_cmm_register(adapter_t *adapter)
__LINE__));
return -1;
}
- memset(adapter->uscb_list, 0, sizeof(scb_t) * MBOX_MAX_USER_CMDS);
// Initialize the synchronization parameters for resources for
@@ -3957,7 +3874,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
megaraid_sysfs_free_resources(adapter);
}
- sema_init(&raid_dev->sysfs_sem, 1);
+ mutex_init(&raid_dev->sysfs_mtx);
init_waitqueue_head(&raid_dev->sysfs_wait_q);
@@ -4058,7 +3975,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/*
* Allow only one read at a time to go through the sysfs attributes
*/
- down(&raid_dev->sysfs_sem);
+ mutex_lock(&raid_dev->sysfs_mtx);
uioc = raid_dev->sysfs_uioc;
mbox64 = raid_dev->sysfs_mbox64;
@@ -4134,7 +4051,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
del_timer_sync(timerp);
- up(&raid_dev->sysfs_sem);
+ mutex_unlock(&raid_dev->sysfs_mtx);
return rval;
}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 9de803cebd4b..626459d1e902 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -168,7 +168,7 @@ typedef struct {
* @hw_error : set if FW not responding
* @fast_load : If set, skip physical device scanning
* @channel_class : channel class, RAID or SCSI
- * @sysfs_sem : semaphore to serialize access to sysfs res.
+ * @sysfs_mtx : mutex to serialize access to sysfs res.
* @sysfs_uioc : management packet to issue FW calls from sysfs
* @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs
* @sysfs_buffer : data buffer for FW commands issued from sysfs
@@ -208,7 +208,7 @@ typedef struct {
int hw_error;
int fast_load;
uint8_t channel_class;
- struct semaphore sysfs_sem;
+ struct mutex sysfs_mtx;
uioc_t *sysfs_uioc;
mbox64_t *sysfs_mbox64;
caddr_t sysfs_buffer;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 84d9c27133d4..b6587a6d8486 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -890,12 +890,11 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
if (lld_adp->drvr_type != DRVRTYPE_MBOX)
return (-EINVAL);
- adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
+ adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
if (!adapter)
return -ENOMEM;
- memset(adapter, 0, sizeof(mraid_mmadp_t));
adapter->unique_id = lld_adp->unique_id;
adapter->drvr_type = lld_adp->drvr_type;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index e2cf12ef3688..ebb948c016bb 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -433,34 +433,15 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
int sge_count;
struct scatterlist *os_sgl;
- /*
- * Return 0 if there is no data transfer
- */
- if (!scp->request_buffer || !scp->request_bufflen)
- return 0;
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
- if (!scp->use_sg) {
- mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
- scp->
- request_buffer,
- scp->
- request_bufflen,
- scp->
- sc_data_direction);
- mfi_sgl->sge32[0].length = scp->request_bufflen;
-
- return 1;
- }
-
- os_sgl = (struct scatterlist *)scp->request_buffer;
- sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
- scp->sc_data_direction);
-
- for (i = 0; i < sge_count; i++, os_sgl++) {
- mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+ }
}
-
return sge_count;
}
@@ -481,35 +462,15 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
int sge_count;
struct scatterlist *os_sgl;
- /*
- * Return 0 if there is no data transfer
- */
- if (!scp->request_buffer || !scp->request_bufflen)
- return 0;
-
- if (!scp->use_sg) {
- mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
- scp->
- request_buffer,
- scp->
- request_bufflen,
- scp->
- sc_data_direction);
-
- mfi_sgl->sge64[0].length = scp->request_bufflen;
-
- return 1;
- }
-
- os_sgl = (struct scatterlist *)scp->request_buffer;
- sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
- scp->sc_data_direction);
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
- for (i = 0; i < sge_count; i++, os_sgl++) {
- mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
+ mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+ }
}
-
return sge_count;
}
@@ -593,7 +554,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0;
pthru->flags = flags;
- pthru->data_xfer_len = scp->request_bufflen;
+ pthru->data_xfer_len = scsi_bufflen(scp);
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
@@ -1195,45 +1156,6 @@ megasas_complete_abort(struct megasas_instance *instance,
}
/**
- * megasas_unmap_sgbuf - Unmap SG buffers
- * @instance: Adapter soft state
- * @cmd: Completed command
- */
-static void
-megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
- dma_addr_t buf_h;
- u8 opcode;
-
- if (cmd->scmd->use_sg) {
- pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
- cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
- return;
- }
-
- if (!cmd->scmd->request_bufflen)
- return;
-
- opcode = cmd->frame->hdr.cmd;
-
- if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
- if (IS_DMA64)
- buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
- else
- buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
- } else {
- if (IS_DMA64)
- buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
- else
- buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
- }
-
- pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
- cmd->scmd->sc_data_direction);
- return;
-}
-
-/**
* megasas_complete_cmd - Completes a command
* @instance: Adapter soft state
* @cmd: Command to be completed
@@ -1281,7 +1203,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
- megasas_unmap_sgbuf(instance, cmd);
+ scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
@@ -1329,7 +1251,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
atomic_dec(&instance->fw_outstanding);
- megasas_unmap_sgbuf(instance, cmd);
+ scsi_dma_unmap(cmd->scmd);
cmd->scmd->scsi_done(cmd->scmd);
megasas_return_cmd(instance, cmd);
@@ -1714,15 +1636,13 @@ static int megasas_alloc_cmds(struct megasas_instance *instance)
* Allocate the dynamic array first and then allocate individual
* commands.
*/
- instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
- GFP_KERNEL);
+ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
if (!instance->cmd_list) {
printk(KERN_DEBUG "megasas: out of memory\n");
return -ENOMEM;
}
- memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd);
for (i = 0; i < max_cmd; i++) {
instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index e64d1a19d8d7..651d09b08f2a 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -421,7 +421,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
for (i = 0; i < cmd->cmd_len; ++i)
printk(" %x", cmd->cmnd[i]);
printk(" use_sg=%d buffer=%p bufflen=%u\n",
- cmd->use_sg, cmd->request_buffer, cmd->request_bufflen);
+ scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
}
#endif
if (ms->dma_started)
@@ -602,13 +602,16 @@ static void mesh_done(struct mesh_state *ms, int start_next)
cmd->result += (cmd->SCp.Message << 8);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
- cmd->result, ms->data_ptr, cmd->request_bufflen);
+ cmd->result, ms->data_ptr, scsi_bufflen(cmd));
+#if 0
+ /* needs to use sg? */
if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
&& cmd->request_buffer != 0) {
unsigned char *b = cmd->request_buffer;
printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
}
+#endif
}
cmd->SCp.this_residual -= ms->data_ptr;
mesh_completed(ms, cmd);
@@ -1265,15 +1268,18 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
dcmds = ms->dma_cmds;
dtot = 0;
if (cmd) {
- cmd->SCp.this_residual = cmd->request_bufflen;
- if (cmd->use_sg > 0) {
- int nseg;
+ int nseg;
+
+ cmd->SCp.this_residual = scsi_bufflen(cmd);
+
+ nseg = scsi_dma_map(cmd);
+ BUG_ON(nseg < 0);
+
+ if (nseg) {
total = 0;
- scl = (struct scatterlist *) cmd->request_buffer;
off = ms->data_ptr;
- nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
- cmd->sc_data_direction);
- for (i = 0; i <nseg; ++i, ++scl) {
+
+ scsi_for_each_sg(cmd, scl, nseg, i) {
u32 dma_addr = sg_dma_address(scl);
u32 dma_len = sg_dma_len(scl);
@@ -1292,16 +1298,6 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
dtot += dma_len - off;
off = 0;
}
- } else if (ms->data_ptr < cmd->request_bufflen) {
- dtot = cmd->request_bufflen - ms->data_ptr;
- if (dtot > 0xffff)
- panic("mesh: transfer size >= 64k");
- st_le16(&dcmds->req_count, dtot);
- /* XXX Use pci DMA API here ... */
- st_le32(&dcmds->phy_addr,
- virt_to_phys(cmd->request_buffer) + ms->data_ptr);
- dcmds->xfer_status = 0;
- ++dcmds;
}
}
if (dtot == 0) {
@@ -1356,18 +1352,14 @@ static void halt_dma(struct mesh_state *ms)
dumplog(ms, ms->conn_tgt);
dumpslog(ms);
#endif /* MESH_DBG */
- } else if (cmd && cmd->request_bufflen != 0 &&
- ms->data_ptr > cmd->request_bufflen) {
+ } else if (cmd && scsi_bufflen(cmd) &&
+ ms->data_ptr > scsi_bufflen(cmd)) {
printk(KERN_DEBUG "mesh: target %d overrun, "
"data_ptr=%x total=%x goes_out=%d\n",
- ms->conn_tgt, ms->data_ptr, cmd->request_bufflen,
+ ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
ms->tgts[ms->conn_tgt].data_goes_out);
}
- if (cmd->use_sg != 0) {
- struct scatterlist *sg;
- sg = (struct scatterlist *)cmd->request_buffer;
- pci_unmap_sg(ms->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
ms->dma_started = 0;
}
diff --git a/drivers/scsi/mvme16x.c b/drivers/scsi/mvme16x.c
deleted file mode 100644
index 575fe6f7e0ec..000000000000
--- a/drivers/scsi/mvme16x.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
- *
- * Based on work by Alan Hourihane
- */
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/blkdev.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/mvme16xhw.h>
-#include <asm/irq.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "53c7xx.h"
-#include "mvme16x.h"
-
-#include<linux/stat.h>
-
-
-int mvme16x_scsi_detect(struct scsi_host_template *tpnt)
-{
- static unsigned char called = 0;
- int clock;
- long long options;
-
- if (!MACH_IS_MVME16x)
- return 0;
- if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
- printk ("SCSI detection disabled, SCSI chip not present\n");
- return 0;
- }
- if (called)
- return 0;
-
- tpnt->proc_name = "MVME16x";
-
- options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
-
- clock = 66000000; /* 66MHz SCSI Clock */
-
- ncr53c7xx_init(tpnt, 0, 710, (unsigned long)0xfff47000,
- 0, MVME16x_IRQ_SCSI, DMA_NONE,
- options, clock);
- called = 1;
- return 1;
-}
-
-static int mvme16x_scsi_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-static struct scsi_host_template driver_template = {
- .name = "MVME16x NCR53c710 SCSI",
- .detect = mvme16x_scsi_detect,
- .release = mvme16x_scsi_release,
- .queuecommand = NCR53c7xx_queue_command,
- .abort = NCR53c7xx_abort,
- .reset = NCR53c7xx_reset,
- .can_queue = 24,
- .this_id = 7,
- .sg_tablesize = 63,
- .cmd_per_lun = 3,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
diff --git a/drivers/scsi/mvme16x.h b/drivers/scsi/mvme16x.h
deleted file mode 100644
index 73e33b37a3f8..000000000000
--- a/drivers/scsi/mvme16x.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef MVME16x_SCSI_H
-#define MVME16x_SCSI_H
-
-#include <linux/types.h>
-
-int mvme16x_scsi_detect(struct scsi_host_template *);
-const char *NCR53c7x0_info(void);
-int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int NCR53c7xx_abort(Scsi_Cmnd *);
-int NCR53c7x0_release (struct Scsi_Host *);
-int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
-void NCR53c7x0_intr(int irq, void *dev_id);
-
-#ifndef CMD_PER_LUN
-#define CMD_PER_LUN 3
-#endif
-
-#ifndef CAN_QUEUE
-#define CAN_QUEUE 24
-#endif
-
-#include <scsi/scsicam.h>
-
-#endif /* MVME16x_SCSI_H */
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
new file mode 100644
index 000000000000..1bdddad48571
--- /dev/null
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -0,0 +1,159 @@
+/*
+ * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
+ *
+ * Based on work by Alan Hourihane
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/mvme16xhw.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("MVME16x NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+static struct scsi_host_template mvme16x_scsi_driver_template = {
+ .name = "MVME16x NCR53c710 SCSI",
+ .proc_name = "MVME16x",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct platform_device *mvme16x_scsi_device;
+
+static __devinit int
+mvme16x_probe(struct device *dev)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+
+ if (!MACH_IS_MVME16x)
+ goto out;
+
+ if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
+ printk(KERN_INFO "mvme16x-scsi: detection disabled, "
+ "SCSI chip not present\n");
+ goto out;
+ }
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "mvme16x-scsi: "
+ "Failed to allocate host data\n");
+ goto out;
+ }
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ hostdata->base = (void __iomem *)0xfff47000UL;
+ hostdata->clock = 50; /* XXX - depends on the CPU clock! */
+ hostdata->chip710 = 1;
+ hostdata->dmode_extra = DMODE_FC2;
+ hostdata->dcntl_extra = EA_710;
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ /* and register the chip */
+ host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, dev);
+ if (!host) {
+ printk(KERN_ERR "mvme16x-scsi: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+ host->this_id = 7;
+ host->base = 0xfff47000UL;
+ host->irq = MVME16x_IRQ_SCSI;
+ if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) {
+ printk(KERN_ERR "mvme16x-scsi: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ /* Enable scsi chip ints */
+ {
+ volatile unsigned long v;
+
+ /* Enable scsi interrupts at level 4 in PCCchip2 */
+ v = in_be32(0xfff4202c);
+ v = (v & ~0xff) | 0x10 | 4;
+ out_be32(0xfff4202c, v);
+ }
+
+ dev_set_drvdata(dev, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ kfree(hostdata);
+ out:
+ return -ENODEV;
+}
+
+static __devexit int
+mvme16x_device_remove(struct device *dev)
+{
+ struct Scsi_Host *host = dev_get_drvdata(dev);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ /* Disable scsi chip ints */
+ {
+ volatile unsigned long v;
+
+ v = in_be32(0xfff4202c);
+ v &= ~0x10;
+ out_be32(0xfff4202c, v);
+ }
+ scsi_remove_host(host);
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+
+ return 0;
+}
+
+static struct device_driver mvme16x_scsi_driver = {
+ .name = "mvme16x-scsi",
+ .bus = &platform_bus_type,
+ .probe = mvme16x_probe,
+ .remove = __devexit_p(mvme16x_device_remove),
+};
+
+static int __init mvme16x_scsi_init(void)
+{
+ int err;
+
+ err = driver_register(&mvme16x_scsi_driver);
+ if (err)
+ return err;
+
+ mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi",
+ -1, NULL, 0);
+ if (IS_ERR(mvme16x_scsi_device)) {
+ driver_unregister(&mvme16x_scsi_driver);
+ return PTR_ERR(mvme16x_scsi_device);
+ }
+
+ return 0;
+}
+
+static void __exit mvme16x_scsi_exit(void)
+{
+ platform_device_unregister(mvme16x_scsi_device);
+ driver_unregister(&mvme16x_scsi_driver);
+}
+
+module_init(mvme16x_scsi_init);
+module_exit(mvme16x_scsi_exit);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index bbf521cbc55d..030ba49f33ff 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -529,43 +529,20 @@ static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
switch(cmd->__data_mapped) {
case 2:
- dma_unmap_sg(dev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
- break;
- case 1:
- dma_unmap_single(dev, cmd->__data_mapping,
- cmd->request_bufflen,
- cmd->sc_data_direction);
+ scsi_dma_unmap(cmd);
break;
}
cmd->__data_mapped = 0;
}
-static u_long __map_scsi_single_data(struct device *dev, struct scsi_cmnd *cmd)
-{
- dma_addr_t mapping;
-
- if (cmd->request_bufflen == 0)
- return 0;
-
- mapping = dma_map_single(dev, cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- cmd->__data_mapped = 1;
- cmd->__data_mapping = mapping;
-
- return mapping;
-}
-
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
int use_sg;
- if (cmd->use_sg == 0)
+ use_sg = scsi_dma_map(cmd);
+ if (!use_sg)
return 0;
- use_sg = dma_map_sg(dev, cmd->request_buffer, cmd->use_sg,
- cmd->sc_data_direction);
cmd->__data_mapped = 2;
cmd->__data_mapping = use_sg;
@@ -573,7 +550,6 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
}
#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
-#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->dev, cmd)
#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
/*==========================================================
@@ -7667,39 +7643,16 @@ fail:
** sizes to the data segment array.
*/
-static int ncr_scatter_no_sglist(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
-{
- struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER - 1];
- int segment;
-
- cp->data_len = cmd->request_bufflen;
-
- if (cmd->request_bufflen) {
- dma_addr_t baddr = map_scsi_single_data(np, cmd);
- if (baddr) {
- ncr_build_sge(np, data, baddr, cmd->request_bufflen);
- segment = 1;
- } else {
- segment = -2;
- }
- } else {
- segment = 0;
- }
-
- return segment;
-}
-
static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
{
int segment = 0;
- int use_sg = (int) cmd->use_sg;
+ int use_sg = scsi_sg_count(cmd);
cp->data_len = 0;
- if (!use_sg)
- segment = ncr_scatter_no_sglist(np, cp, cmd);
- else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
- struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer;
+ use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > 0) {
+ struct scatterlist *sg;
struct scr_tblmove *data;
if (use_sg > MAX_SCATTER) {
@@ -7709,16 +7662,15 @@ static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
data = &cp->phys.data[MAX_SCATTER - use_sg];
- for (segment = 0; segment < use_sg; segment++) {
- dma_addr_t baddr = sg_dma_address(&scatter[segment]);
- unsigned int len = sg_dma_len(&scatter[segment]);
+ scsi_for_each_sg(cmd, sg, use_sg, segment) {
+ dma_addr_t baddr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
ncr_build_sge(np, &data[segment], baddr, len);
cp->data_len += len;
}
- } else {
+ } else
segment = -2;
- }
return segment;
}
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index f6f561d26bf0..7fed35372150 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -49,10 +49,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
-# include <linux/blk.h>
-#endif
-
#include "nsp32.h"
@@ -199,17 +195,9 @@ static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
-#else
-static int nsp32_proc_info (char *, char **, off_t, int, int, int);
-#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
static int nsp32_detect (struct pci_dev *pdev);
-#else
-static int nsp32_detect (struct scsi_host_template *);
-#endif
static int nsp32_queuecommand(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
static const char *nsp32_info (struct Scsi_Host *);
@@ -296,15 +284,7 @@ static struct scsi_host_template nsp32_template = {
.eh_abort_handler = nsp32_eh_abort,
.eh_bus_reset_handler = nsp32_eh_bus_reset,
.eh_host_reset_handler = nsp32_eh_host_reset,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,74))
- .detect = nsp32_detect,
- .release = nsp32_release,
-#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,2))
- .use_new_eh_code = 1,
-#else
/* .highmem_io = 1, */
-#endif
};
#include "nsp32_io.h"
@@ -739,7 +719,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
command = 0;
command |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
- if (SCpnt->request_bufflen > 0) {
+ if (scsi_bufflen(SCpnt) > 0) {
command |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -888,31 +868,28 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- struct scatterlist *sgl;
+ struct scatterlist *sg;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
int num, i;
u32_le l;
- if (SCpnt->request_bufflen == 0) {
- return TRUE;
- }
-
if (sgt == NULL) {
nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
return FALSE;
}
- if (SCpnt->use_sg) {
- sgl = (struct scatterlist *)SCpnt->request_buffer;
- num = pci_map_sg(data->Pci, sgl, SCpnt->use_sg,
- SCpnt->sc_data_direction);
- for (i = 0; i < num; i++) {
+ num = scsi_dma_map(SCpnt);
+ if (!num)
+ return TRUE;
+ else if (num < 0)
+ return FALSE;
+ else {
+ scsi_for_each_sg(SCpnt, sg, num, i) {
/*
* Build nsp32_sglist, substitute sg dma addresses.
*/
- sgt[i].addr = cpu_to_le32(sg_dma_address(sgl));
- sgt[i].len = cpu_to_le32(sg_dma_len(sgl));
- sgl++;
+ sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
+ sgt[i].len = cpu_to_le32(sg_dma_len(sg));
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
@@ -929,23 +906,6 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
/* set end mark */
l = le32_to_cpu(sgt[num-1].len);
sgt[num-1].len = cpu_to_le32(l | SGTEND);
-
- } else {
- SCpnt->SCp.have_data_in = pci_map_single(data->Pci,
- SCpnt->request_buffer, SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
-
- sgt[0].addr = cpu_to_le32(SCpnt->SCp.have_data_in);
- sgt[0].len = cpu_to_le32(SCpnt->request_bufflen | SGTEND); /* set end mark */
-
- if (SCpnt->request_bufflen > 0x10000) {
- nsp32_msg(KERN_ERR,
- "can't transfer over 64KB at a time, size=0x%lx", SCpnt->request_bufflen);
- return FALSE;
- }
- nsp32_dbg(NSP32_DEBUG_SGLIST, "single : addr 0x%lx len=0x%lx",
- le32_to_cpu(sgt[0].addr),
- le32_to_cpu(sgt[0].len ));
}
return TRUE;
@@ -962,7 +922,7 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
"enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
- SCpnt->use_sg, SCpnt->request_buffer, SCpnt->request_bufflen);
+ scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -994,10 +954,10 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = CHECK_CONDITION;
SCpnt->SCp.Message = 0;
- SCpnt->resid = SCpnt->request_bufflen;
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.ptr = (char *) SCpnt->request_buffer;
- SCpnt->SCp.this_residual = SCpnt->request_bufflen;
+ SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
+ SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
@@ -1210,13 +1170,9 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
unsigned long flags;
int ret;
int handled = 0;
-
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
struct Scsi_Host *host = data->Host;
+
spin_lock_irqsave(host->host_lock, flags);
-#else
- spin_lock_irqsave(&io_request_lock, flags);
-#endif
/*
* IRQ check, then enable IRQ mask
@@ -1312,7 +1268,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
}
if ((auto_stat & DATA_IN_PHASE) &&
- (SCpnt->resid > 0) &&
+ (scsi_get_resid(SCpnt) > 0) &&
((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
printk( "auto+fifo\n");
//nsp32_pio_read(SCpnt);
@@ -1333,7 +1289,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
- SCpnt->resid = 0; /* all data transfered! */
+ scsi_set_resid(SCpnt, 0); /* all data transfered! */
}
/*
@@ -1480,11 +1436,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
nsp32_write2(base, IRQ_CONTROL, 0);
out2:
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
spin_unlock_irqrestore(host->host_lock, flags);
-#else
- spin_unlock_irqrestore(&io_request_lock, flags);
-#endif
nsp32_dbg(NSP32_DEBUG_INTR, "exit");
@@ -1499,28 +1451,15 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
} \
} while(0)
-static int nsp32_proc_info(
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
- struct Scsi_Host *host,
-#endif
- char *buffer,
- char **start,
- off_t offset,
- int length,
-#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
- int hostno,
-#endif
- int inout)
+
+static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
+ off_t offset, int length, int inout)
{
char *pos = buffer;
int thislength;
unsigned long flags;
nsp32_hw_data *data;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
int hostno;
-#else
- struct Scsi_Host *host;
-#endif
unsigned int base;
unsigned char mode_reg;
int id, speed;
@@ -1531,15 +1470,7 @@ static int nsp32_proc_info(
return -EINVAL;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
hostno = host->host_no;
-#else
- /* search this HBA host */
- host = scsi_host_hn_get(hostno);
- if (host == NULL) {
- return -ESRCH;
- }
-#endif
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
@@ -1626,25 +1557,8 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
- /*
- * unmap pci
- */
- if (SCpnt->request_bufflen == 0) {
- goto skip;
- }
+ scsi_dma_unmap(SCpnt);
- if (SCpnt->use_sg) {
- pci_unmap_sg(data->Pci,
- (struct scatterlist *)SCpnt->request_buffer,
- SCpnt->use_sg, SCpnt->sc_data_direction);
- } else {
- pci_unmap_single(data->Pci,
- (u32)SCpnt->SCp.have_data_in,
- SCpnt->request_bufflen,
- SCpnt->sc_data_direction);
- }
-
- skip:
/*
* clear TRANSFERCONTROL_BM_START
*/
@@ -1800,7 +1714,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
SCpnt->SCp.Message = 0;
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
- SCpnt->SCp.Status, SCpnt->resid);
+ SCpnt->SCp.Status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
(SCpnt->SCp.Message << 8) |
(SCpnt->SCp.Status << 0);
@@ -1844,7 +1758,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
unsigned int restlen, sentlen;
u32_le len, addr;
- nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", SCpnt->resid);
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
/* adjust saved SACK count with 4 byte start address boundary */
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
@@ -1888,12 +1802,12 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
return;
last:
- if (SCpnt->resid < sentlen) {
+ if (scsi_get_resid(SCpnt) < sentlen) {
nsp32_msg(KERN_ERR, "resid underflow");
}
- SCpnt->resid -= sentlen;
- nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", SCpnt->resid);
+ scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
+ nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
/* update hostdata and lun */
@@ -2022,7 +1936,7 @@ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short comma
transfer = 0;
transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
- if (SCpnt->request_bufflen > 0) {
+ if (scsi_bufflen(SCpnt) > 0) {
transfer |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -2674,17 +2588,7 @@ static void nsp32_sack_negate(nsp32_hw_data *data)
* 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
* 0xc00-0xfff: CardBus status registers
*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
-#define DETECT_OK 0
-#define DETECT_NG 1
-#define PCIDEV pdev
static int nsp32_detect(struct pci_dev *pdev)
-#else
-#define DETECT_OK 1
-#define DETECT_NG 0
-#define PCIDEV (data->Pci)
-static int nsp32_detect(struct scsi_host_template *sht)
-#endif
{
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
@@ -2697,11 +2601,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* register this HBA as SCSI device
*/
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
-#else
- host = scsi_register(sht, sizeof(nsp32_hw_data));
-#endif
if (host == NULL) {
nsp32_msg (KERN_ERR, "failed to scsi register");
goto err;
@@ -2719,9 +2619,6 @@ static int nsp32_detect(struct scsi_host_template *sht)
host->unique_id = data->BaseAddress;
host->n_io_port = data->NumAddress;
host->base = (unsigned long)data->MmioAddress;
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,63))
- scsi_set_pci_device(host, PCIDEV);
-#endif
data->Host = host;
spin_lock_init(&(data->Lock));
@@ -2776,7 +2673,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* setup DMA
*/
- if (pci_set_dma_mask(PCIDEV, DMA_32BIT_MASK) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
@@ -2784,7 +2681,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* allocate autoparam DMA resource.
*/
- data->autoparam = pci_alloc_consistent(PCIDEV, sizeof(nsp32_autoparam), &(data->auto_paddr));
+ data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
@@ -2793,7 +2690,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
/*
* allocate scatter-gather DMA resource.
*/
- data->sg_list = pci_alloc_consistent(PCIDEV, NSP32_SG_TABLE_SIZE,
+ data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
&(data->sg_paddr));
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
@@ -2883,16 +2780,14 @@ static int nsp32_detect(struct scsi_host_template *sht)
goto free_irq;
}
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
- ret = scsi_add_host(host, &PCIDEV->dev);
+ ret = scsi_add_host(host, &pdev->dev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to add scsi host");
goto free_region;
}
scsi_scan_host(host);
-#endif
- pci_set_drvdata(PCIDEV, host);
- return DETECT_OK;
+ pci_set_drvdata(pdev, host);
+ return 0;
free_region:
release_region(host->io_port, host->n_io_port);
@@ -2901,22 +2796,19 @@ static int nsp32_detect(struct scsi_host_template *sht)
free_irq(host->irq, data);
free_sg_list:
- pci_free_consistent(PCIDEV, NSP32_SG_TABLE_SIZE,
+ pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
- pci_free_consistent(PCIDEV, sizeof(nsp32_autoparam),
+ pci_free_consistent(pdev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
scsi_host_put(host);
err:
- return DETECT_NG;
+ return 1;
}
-#undef DETECT_OK
-#undef DETECT_NG
-#undef PCIDEV
static int nsp32_release(struct Scsi_Host *host)
{
@@ -3487,15 +3379,6 @@ static int nsp32_resume(struct pci_dev *pdev)
return 0;
}
-/* Enable wake event */
-static int nsp32_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable)
-{
- struct Scsi_Host *host = pci_get_drvdata(pdev);
-
- nsp32_msg(KERN_INFO, "pci-enable_wake: stub, pdev=0x%p, enable=%d, slot=%s, host=0x%p", pdev, enable, pci_name(pdev), host);
-
- return 0;
-}
#endif
/************************************************************************
@@ -3525,11 +3408,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
pci_set_master(pdev);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
ret = nsp32_detect(pdev);
-#else
- ret = scsi_register_host(&nsp32_template);
-#endif
nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
pdev->irq,
@@ -3544,25 +3423,17 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
static void __devexit nsp32_remove(struct pci_dev *pdev)
{
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
struct Scsi_Host *host = pci_get_drvdata(pdev);
-#endif
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
scsi_remove_host(host);
nsp32_release(host);
scsi_host_put(host);
-#else
- scsi_unregister_host(&nsp32_template);
-#endif
}
-
-
static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
@@ -3571,7 +3442,6 @@ static struct pci_driver nsp32_driver = {
#ifdef CONFIG_PM
.suspend = nsp32_suspend,
.resume = nsp32_resume,
- .enable_wake = nsp32_enable_wake,
#endif
};
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 7dd787f6ab27..fa481b515ead 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -2,9 +2,12 @@
# PCMCIA SCSI adapter configuration
#
-menu "PCMCIA SCSI adapter support"
+menuconfig SCSI_LOWLEVEL_PCMCIA
+ bool "PCMCIA SCSI adapter support"
depends on SCSI!=n && PCMCIA!=n
+if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA
+
config PCMCIA_AHA152X
tristate "Adaptec AHA152X PCMCIA support"
depends on !64BIT
@@ -77,4 +80,4 @@ config PCMCIA_SYM53C500
To compile this driver as a module, choose M here: the
module will be called sym53c500_cs.
-endmenu
+endif # SCSI_LOWLEVEL_PCMCIA
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 370802d24acd..2dd0dc9a9aed 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -106,9 +106,8 @@ static int aha152x_probe(struct pcmcia_device *link)
DEBUG(0, "aha152x_attach()\n");
/* Create new SCSI device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) return -ENOMEM;
- memset(info, 0, sizeof(*info));
info->p_dev = link;
link->priv = info;
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index c6f8c6e65e05..445cfbbca9b3 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1602,9 +1602,8 @@ static int nsp_cs_probe(struct pcmcia_device *link)
nsp_dbg(NSP_DEBUG_INIT, "in");
/* Create new SCSI device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) { return -ENOMEM; }
- memset(info, 0, sizeof(*info));
info->p_dev = link;
link->priv = info;
data->ScsiInfo = info;
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 697cfb76c3a4..67c5a58d17df 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -162,10 +162,9 @@ static int qlogic_probe(struct pcmcia_device *link)
DEBUG(0, "qlogic_attach()\n");
/* Create new SCSI device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- memset(info, 0, sizeof(*info));
info->p_dev = link;
link->priv = info;
link->io.NumPorts1 = 16;
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index ffe75c431b25..961839ecfe86 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -370,8 +370,6 @@ SYM53C500_intr(int irq, void *dev_id)
DEB(unsigned char seq_reg;)
unsigned char status, int_reg;
unsigned char pio_status;
- struct scatterlist *sglist;
- unsigned int sgcount;
int port_base = dev->io_port;
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
@@ -434,20 +432,19 @@ SYM53C500_intr(int irq, void *dev_id)
switch (status & 0x07) { /* scsi phase */
case 0x00: /* DATA-OUT */
if (int_reg & 0x10) { /* Target requesting info transfer */
+ struct scatterlist *sg;
+ int i;
+
curSC->SCp.phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
- LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
- if (!curSC->use_sg) /* Don't use scatter-gather */
- SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen);
- else { /* use scatter-gather */
- sgcount = curSC->use_sg;
- sglist = curSC->request_buffer;
- while (sgcount--) {
- SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
+ SYM53C500_pio_write(fast_pio, port_base,
+ page_address(sg->page) + sg->offset,
+ sg->length);
}
REG0(port_base);
}
@@ -455,20 +452,19 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x01: /* DATA-IN */
if (int_reg & 0x10) { /* Target requesting info transfer */
+ struct scatterlist *sg;
+ int i;
+
curSC->SCp.phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
- LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */
+ LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
- if (!curSC->use_sg) /* Don't use scatter-gather */
- SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen);
- else { /* Use scatter-gather */
- sgcount = curSC->use_sg;
- sglist = curSC->request_buffer;
- while (sgcount--) {
- SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
+ SYM53C500_pio_read(fast_pio, port_base,
+ page_address(sg->page) + sg->offset,
+ sg->length);
}
REG0(port_base);
}
@@ -578,7 +574,7 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id,
- SCpnt->device->lun, SCpnt->request_bufflen));
+ SCpnt->device->lun, scsi_bufflen(SCpnt)));
VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
@@ -879,10 +875,9 @@ SYM53C500_probe(struct pcmcia_device *link)
DEBUG(0, "SYM53C500_attach()\n");
/* Create new SCSI device */
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- memset(info, 0, sizeof(*info));
info->p_dev = link;
link->priv = info;
link->io.NumPorts1 = 16;
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 584ba4d6e038..67b6d76a6c8d 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -129,11 +129,11 @@ static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length)
if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
x = simple_strtoul(buffer + 10, NULL, 0);
dev->recon_tmo = x;
- printk("ppa: recon_tmo set to %ld\n", x);
+ printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x);
return length;
}
- printk("ppa /proc: invalid variable\n");
- return (-EINVAL);
+ printk(KERN_WARNING "ppa /proc: invalid variable\n");
+ return -EINVAL;
}
static int ppa_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout)
@@ -216,7 +216,7 @@ static unsigned char ppa_wait(ppa_struct *dev)
/* Counter expired - Time out occurred */
ppa_fail(dev, DID_TIME_OUT);
- printk("ppa timeout in ppa_wait\n");
+ printk(KERN_WARNING "ppa timeout in ppa_wait\n");
return 0; /* command timed out */
}
@@ -248,7 +248,7 @@ static inline void ecp_sync(ppa_struct *dev)
return;
udelay(5);
}
- printk("ppa: ECP sync failed as data still present in FIFO.\n");
+ printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n");
}
}
@@ -328,7 +328,7 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
break;
default:
- printk("PPA: bug in ppa_out()\n");
+ printk(KERN_ERR "PPA: bug in ppa_out()\n");
r = 0;
}
return r;
@@ -381,7 +381,7 @@ static int ppa_in(ppa_struct *dev, char *buffer, int len)
break;
default:
- printk("PPA: bug in ppa_ins()\n");
+ printk(KERN_ERR "PPA: bug in ppa_ins()\n");
r = 0;
break;
}
@@ -633,7 +633,7 @@ static void ppa_interrupt(struct work_struct *work)
struct scsi_cmnd *cmd = dev->cur_cmd;
if (!cmd) {
- printk("PPA: bug in ppa_interrupt\n");
+ printk(KERN_ERR "PPA: bug in ppa_interrupt\n");
return;
}
if (ppa_engine(dev, cmd)) {
@@ -646,31 +646,31 @@ static void ppa_interrupt(struct work_struct *work)
case DID_OK:
break;
case DID_NO_CONNECT:
- printk("ppa: no device at SCSI ID %i\n", cmd->device->target);
+ printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
break;
case DID_BUS_BUSY:
- printk("ppa: BUS BUSY - EPP timeout detected\n");
+ printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
break;
case DID_TIME_OUT:
- printk("ppa: unknown timeout\n");
+ printk(KERN_DEBUG "ppa: unknown timeout\n");
break;
case DID_ABORT:
- printk("ppa: told to abort\n");
+ printk(KERN_DEBUG "ppa: told to abort\n");
break;
case DID_PARITY:
- printk("ppa: parity error (???)\n");
+ printk(KERN_DEBUG "ppa: parity error (???)\n");
break;
case DID_ERROR:
- printk("ppa: internal driver error\n");
+ printk(KERN_DEBUG "ppa: internal driver error\n");
break;
case DID_RESET:
- printk("ppa: told to reset device\n");
+ printk(KERN_DEBUG "ppa: told to reset device\n");
break;
case DID_BAD_INTR:
- printk("ppa: bad interrupt (???)\n");
+ printk(KERN_WARNING "ppa: bad interrupt (???)\n");
break;
default:
- printk("ppa: bad return code (%02x)\n",
+ printk(KERN_WARNING "ppa: bad return code (%02x)\n",
(cmd->result >> 16) & 0xff);
}
#endif
@@ -724,8 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv) {
if (time_after(jiffies, dev->jstart + (1 * HZ))) {
- printk
- ("ppa: Parallel port cable is unplugged!!\n");
+ printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n");
ppa_fail(dev, DID_BUS_BUSY);
return 0;
} else {
@@ -755,11 +754,9 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (cmd->use_sg) {
/* if many buffers are available, start filling the first */
- cmd->SCp.buffer =
- (struct scatterlist *) cmd->request_buffer;
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
+ cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
cmd->SCp.buffer->offset;
} else {
/* else fill the only available buffer */
@@ -800,7 +797,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
break;
default:
- printk("ppa: Invalid scsi phase\n");
+ printk(KERN_ERR "ppa: Invalid scsi phase\n");
}
return 0;
}
@@ -811,7 +808,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
ppa_struct *dev = ppa_dev(cmd->device->host);
if (dev->cur_cmd) {
- printk("PPA: bug in ppa_queuecommand\n");
+ printk(KERN_ERR "PPA: bug in ppa_queuecommand\n");
return 0;
}
dev->failed = 0;
@@ -899,7 +896,7 @@ static int device_check(ppa_struct *dev)
/* This routine looks for a device and then attempts to use EPP
to send a command. If all goes as planned then EPP is available. */
- static char cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
int loop, old_mode, status, k, ppb = dev->base;
unsigned char l;
@@ -909,14 +906,14 @@ static int device_check(ppa_struct *dev)
if ((ppb & 0x0007) == 0x0000)
dev->mode = PPA_EPP_32;
- second_pass:
+second_pass:
ppa_connect(dev, CONNECT_EPP_MAYBE);
/* Select SCSI device */
if (!ppa_select(dev, loop)) {
ppa_disconnect(dev);
continue;
}
- printk("ppa: Found device at ID %i, Attempting to use %s\n",
+ printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n",
loop, PPA_MODE_STRING[dev->mode]);
/* Send SCSI command */
@@ -965,7 +962,7 @@ static int device_check(ppa_struct *dev)
return -EIO;
}
ppa_disconnect(dev);
- printk("ppa: Communication established with ID %i using %s\n",
+ printk(KERN_INFO "ppa: Communication established with ID %i using %s\n",
loop, PPA_MODE_STRING[dev->mode]);
ppa_connect(dev, CONNECT_EPP_MAYBE);
ppa_reset_pulse(ppb);
@@ -1017,10 +1014,9 @@ static int __ppa_attach(struct parport *pb)
int modes, ppb, ppb_hi;
int err = -ENOMEM;
- dev = kmalloc(sizeof(ppa_struct), GFP_KERNEL);
+ dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- memset(dev, 0, sizeof(ppa_struct));
dev->base = -1;
dev->mode = PPA_AUTODETECT;
dev->recon_tmo = PPA_RECON_TMO;
@@ -1140,7 +1136,7 @@ static struct parport_driver ppa_driver = {
static int __init ppa_driver_init(void)
{
- printk("ppa: Version %s\n", PPA_VERSION);
+ printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
return parport_register_driver(&ppa_driver);
}
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
new file mode 100644
index 000000000000..b50f1e14f2a5
--- /dev/null
+++ b/drivers/scsi/ps3rom.c
@@ -0,0 +1,533 @@
+/*
+ * PS3 BD/DVD/CD-ROM Storage Driver
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2007 Sony Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/cdrom.h>
+#include <linux/highmem.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <asm/lv1call.h>
+#include <asm/ps3stor.h>
+
+
+#define DEVICE_NAME "ps3rom"
+
+#define BOUNCE_SIZE (64*1024)
+
+#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE)
+
+
+struct ps3rom_private {
+ struct ps3_storage_device *dev;
+ struct scsi_cmnd *curr_cmd;
+};
+
+
+#define LV1_STORAGE_SEND_ATAPI_COMMAND (1)
+
+struct lv1_atapi_cmnd_block {
+ u8 pkt[32]; /* packet command block */
+ u32 pktlen; /* should be 12 for ATAPI 8020 */
+ u32 blocks;
+ u32 block_size;
+ u32 proto; /* transfer mode */
+ u32 in_out; /* transfer direction */
+ u64 buffer; /* parameter except command block */
+ u32 arglen; /* length above */
+};
+
+enum lv1_atapi_proto {
+ NON_DATA_PROTO = 0,
+ PIO_DATA_IN_PROTO = 1,
+ PIO_DATA_OUT_PROTO = 2,
+ DMA_PROTO = 3
+};
+
+enum lv1_atapi_in_out {
+ DIR_WRITE = 0, /* memory -> device */
+ DIR_READ = 1 /* device -> memory */
+};
+
+
+static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
+{
+ struct ps3rom_private *priv = shost_priv(scsi_dev->host);
+ struct ps3_storage_device *dev = priv->dev;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %u, channel %u\n", __func__,
+ __LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel);
+
+ /*
+ * ATAPI SFF8020 devices use MODE_SENSE_10,
+ * so we can prohibit MODE_SENSE_6
+ */
+ scsi_dev->use_10_for_ms = 1;
+
+ /* we don't support {READ,WRITE}_6 */
+ scsi_dev->use_10_for_rw = 1;
+
+ return 0;
+}
+
+/*
+ * copy data from device into scatter/gather buffer
+ */
+static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
+{
+ int k, req_len, act_len, len, active;
+ void *kaddr;
+ struct scatterlist *sgpnt;
+ unsigned int buflen;
+
+ buflen = cmd->request_bufflen;
+ if (!buflen)
+ return 0;
+
+ if (!cmd->request_buffer)
+ return -1;
+
+ sgpnt = cmd->request_buffer;
+ active = 1;
+ for (k = 0, req_len = 0, act_len = 0; k < cmd->use_sg; ++k, ++sgpnt) {
+ if (active) {
+ kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
+ len = sgpnt->length;
+ if ((req_len + len) > buflen) {
+ active = 0;
+ len = buflen - req_len;
+ }
+ memcpy(kaddr + sgpnt->offset, buf + req_len, len);
+ flush_kernel_dcache_page(sgpnt->page);
+ kunmap_atomic(kaddr, KM_IRQ0);
+ act_len += len;
+ }
+ req_len += sgpnt->length;
+ }
+ cmd->resid = req_len - act_len;
+ return 0;
+}
+
+/*
+ * copy data from scatter/gather into device's buffer
+ */
+static int fetch_to_dev_buffer(struct scsi_cmnd *cmd, void *buf)
+{
+ int k, req_len, len, fin;
+ void *kaddr;
+ struct scatterlist *sgpnt;
+ unsigned int buflen;
+
+ buflen = cmd->request_bufflen;
+ if (!buflen)
+ return 0;
+
+ if (!cmd->request_buffer)
+ return -1;
+
+ sgpnt = cmd->request_buffer;
+ for (k = 0, req_len = 0, fin = 0; k < cmd->use_sg; ++k, ++sgpnt) {
+ kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
+ len = sgpnt->length;
+ if ((req_len + len) > buflen) {
+ len = buflen - req_len;
+ fin = 1;
+ }
+ memcpy(buf + req_len, kaddr + sgpnt->offset, len);
+ kunmap_atomic(kaddr, KM_IRQ0);
+ if (fin)
+ return req_len + len;
+ req_len += sgpnt->length;
+ }
+ return req_len;
+}
+
+static int ps3rom_atapi_request(struct ps3_storage_device *dev,
+ struct scsi_cmnd *cmd)
+{
+ struct lv1_atapi_cmnd_block atapi_cmnd;
+ unsigned char opcode = cmd->cmnd[0];
+ int res;
+ u64 lpar;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: send ATAPI command 0x%02x\n", __func__,
+ __LINE__, opcode);
+
+ memset(&atapi_cmnd, 0, sizeof(struct lv1_atapi_cmnd_block));
+ memcpy(&atapi_cmnd.pkt, cmd->cmnd, 12);
+ atapi_cmnd.pktlen = 12;
+ atapi_cmnd.block_size = 1; /* transfer size is block_size * blocks */
+ atapi_cmnd.blocks = atapi_cmnd.arglen = cmd->request_bufflen;
+ atapi_cmnd.buffer = dev->bounce_lpar;
+
+ switch (cmd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ if (cmd->request_bufflen >= CD_FRAMESIZE)
+ atapi_cmnd.proto = DMA_PROTO;
+ else
+ atapi_cmnd.proto = PIO_DATA_IN_PROTO;
+ atapi_cmnd.in_out = DIR_READ;
+ break;
+
+ case DMA_TO_DEVICE:
+ if (cmd->request_bufflen >= CD_FRAMESIZE)
+ atapi_cmnd.proto = DMA_PROTO;
+ else
+ atapi_cmnd.proto = PIO_DATA_OUT_PROTO;
+ atapi_cmnd.in_out = DIR_WRITE;
+ res = fetch_to_dev_buffer(cmd, dev->bounce_buf);
+ if (res < 0)
+ return DID_ERROR << 16;
+ break;
+
+ default:
+ atapi_cmnd.proto = NON_DATA_PROTO;
+ break;
+ }
+
+ lpar = ps3_mm_phys_to_lpar(__pa(&atapi_cmnd));
+ res = lv1_storage_send_device_command(dev->sbd.dev_id,
+ LV1_STORAGE_SEND_ATAPI_COMMAND,
+ lpar, sizeof(atapi_cmnd),
+ atapi_cmnd.buffer,
+ atapi_cmnd.arglen, &dev->tag);
+ if (res == LV1_DENIED_BY_POLICY) {
+ dev_dbg(&dev->sbd.core,
+ "%s:%u: ATAPI command 0x%02x denied by policy\n",
+ __func__, __LINE__, opcode);
+ return DID_ERROR << 16;
+ }
+
+ if (res) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: ATAPI command 0x%02x failed %d\n", __func__,
+ __LINE__, opcode, res);
+ return DID_ERROR << 16;
+ }
+
+ return 0;
+}
+
+static inline unsigned int srb10_lba(const struct scsi_cmnd *cmd)
+{
+ return cmd->cmnd[2] << 24 | cmd->cmnd[3] << 16 | cmd->cmnd[4] << 8 |
+ cmd->cmnd[5];
+}
+
+static inline unsigned int srb10_len(const struct scsi_cmnd *cmd)
+{
+ return cmd->cmnd[7] << 8 | cmd->cmnd[8];
+}
+
+static int ps3rom_read_request(struct ps3_storage_device *dev,
+ struct scsi_cmnd *cmd, u32 start_sector,
+ u32 sectors)
+{
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: read %u sectors starting at %u\n",
+ __func__, __LINE__, sectors, start_sector);
+
+ res = lv1_storage_read(dev->sbd.dev_id,
+ dev->regions[dev->region_idx].id, start_sector,
+ sectors, 0, dev->bounce_lpar, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: read failed %d\n", __func__,
+ __LINE__, res);
+ return DID_ERROR << 16;
+ }
+
+ return 0;
+}
+
+static int ps3rom_write_request(struct ps3_storage_device *dev,
+ struct scsi_cmnd *cmd, u32 start_sector,
+ u32 sectors)
+{
+ int res;
+
+ dev_dbg(&dev->sbd.core, "%s:%u: write %u sectors starting at %u\n",
+ __func__, __LINE__, sectors, start_sector);
+
+ res = fetch_to_dev_buffer(cmd, dev->bounce_buf);
+ if (res < 0)
+ return DID_ERROR << 16;
+
+ res = lv1_storage_write(dev->sbd.dev_id,
+ dev->regions[dev->region_idx].id, start_sector,
+ sectors, 0, dev->bounce_lpar, &dev->tag);
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: write failed %d\n", __func__,
+ __LINE__, res);
+ return DID_ERROR << 16;
+ }
+
+ return 0;
+}
+
+static int ps3rom_queuecommand(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct ps3rom_private *priv = shost_priv(cmd->device->host);
+ struct ps3_storage_device *dev = priv->dev;
+ unsigned char opcode;
+ int res;
+
+#ifdef DEBUG
+ scsi_print_command(cmd);
+#endif
+
+ priv->curr_cmd = cmd;
+ cmd->scsi_done = done;
+
+ opcode = cmd->cmnd[0];
+ /*
+ * While we can submit READ/WRITE SCSI commands as ATAPI commands,
+ * it's recommended for various reasons (performance, error handling,
+ * ...) to use lv1_storage_{read,write}() instead
+ */
+ switch (opcode) {
+ case READ_10:
+ res = ps3rom_read_request(dev, cmd, srb10_lba(cmd),
+ srb10_len(cmd));
+ break;
+
+ case WRITE_10:
+ res = ps3rom_write_request(dev, cmd, srb10_lba(cmd),
+ srb10_len(cmd));
+ break;
+
+ default:
+ res = ps3rom_atapi_request(dev, cmd);
+ break;
+ }
+
+ if (res) {
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ cmd->result = res;
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ priv->curr_cmd = NULL;
+ cmd->scsi_done(cmd);
+ }
+
+ return 0;
+}
+
+static int decode_lv1_status(u64 status, unsigned char *sense_key,
+ unsigned char *asc, unsigned char *ascq)
+{
+ if (((status >> 24) & 0xff) != SAM_STAT_CHECK_CONDITION)
+ return -1;
+
+ *sense_key = (status >> 16) & 0xff;
+ *asc = (status >> 8) & 0xff;
+ *ascq = status & 0xff;
+ return 0;
+}
+
+static irqreturn_t ps3rom_interrupt(int irq, void *data)
+{
+ struct ps3_storage_device *dev = data;
+ struct Scsi_Host *host;
+ struct ps3rom_private *priv;
+ struct scsi_cmnd *cmd;
+ int res;
+ u64 tag, status;
+ unsigned char sense_key, asc, ascq;
+
+ res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
+ /*
+ * status = -1 may mean that ATAPI transport completed OK, but
+ * ATAPI command itself resulted CHECK CONDITION
+ * so, upper layer should issue REQUEST_SENSE to check the sense data
+ */
+
+ if (tag != dev->tag)
+ dev_err(&dev->sbd.core,
+ "%s:%u: tag mismatch, got %lx, expected %lx\n",
+ __func__, __LINE__, tag, dev->tag);
+
+ if (res) {
+ dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n",
+ __func__, __LINE__, res, status);
+ return IRQ_HANDLED;
+ }
+
+ host = dev->sbd.core.driver_data;
+ priv = shost_priv(host);
+ cmd = priv->curr_cmd;
+
+ if (!status) {
+ /* OK, completed */
+ if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ res = fill_from_dev_buffer(cmd, dev->bounce_buf);
+ if (res) {
+ cmd->result = DID_ERROR << 16;
+ goto done;
+ }
+ }
+ cmd->result = DID_OK << 16;
+ goto done;
+ }
+
+ if (cmd->cmnd[0] == REQUEST_SENSE) {
+ /* SCSI spec says request sense should never get error */
+ dev_err(&dev->sbd.core, "%s:%u: end error without autosense\n",
+ __func__, __LINE__);
+ cmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION;
+ goto done;
+ }
+
+ if (decode_lv1_status(status, &sense_key, &asc, &ascq)) {
+ cmd->result = DID_ERROR << 16;
+ goto done;
+ }
+
+ cmd->sense_buffer[0] = 0x70;
+ cmd->sense_buffer[2] = sense_key;
+ cmd->sense_buffer[7] = 16 - 6;
+ cmd->sense_buffer[12] = asc;
+ cmd->sense_buffer[13] = ascq;
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+
+done:
+ priv->curr_cmd = NULL;
+ cmd->scsi_done(cmd);
+ return IRQ_HANDLED;
+}
+
+static struct scsi_host_template ps3rom_host_template = {
+ .name = DEVICE_NAME,
+ .slave_configure = ps3rom_slave_configure,
+ .queuecommand = ps3rom_queuecommand,
+ .can_queue = 1,
+ .this_id = 7,
+ .sg_tablesize = SG_ALL,
+ .cmd_per_lun = 1,
+ .emulated = 1, /* only sg driver uses this */
+ .max_sectors = PS3ROM_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .module = THIS_MODULE,
+};
+
+
+static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ int error;
+ struct Scsi_Host *host;
+ struct ps3rom_private *priv;
+
+ if (dev->blk_size != CD_FRAMESIZE) {
+ dev_err(&dev->sbd.core,
+ "%s:%u: cannot handle block size %lu\n", __func__,
+ __LINE__, dev->blk_size);
+ return -EINVAL;
+ }
+
+ dev->bounce_size = BOUNCE_SIZE;
+ dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA);
+ if (!dev->bounce_buf)
+ return -ENOMEM;
+
+ error = ps3stor_setup(dev, ps3rom_interrupt);
+ if (error)
+ goto fail_free_bounce;
+
+ host = scsi_host_alloc(&ps3rom_host_template,
+ sizeof(struct ps3rom_private));
+ if (!host) {
+ dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n",
+ __func__, __LINE__);
+ goto fail_teardown;
+ }
+
+ priv = shost_priv(host);
+ dev->sbd.core.driver_data = host;
+ priv->dev = dev;
+
+ /* One device/LUN per SCSI bus */
+ host->max_id = 1;
+ host->max_lun = 1;
+
+ error = scsi_add_host(host, &dev->sbd.core);
+ if (error) {
+ dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed %d\n",
+ __func__, __LINE__, error);
+ error = -ENODEV;
+ goto fail_host_put;
+ }
+
+ scsi_scan_host(host);
+ return 0;
+
+fail_host_put:
+ scsi_host_put(host);
+ dev->sbd.core.driver_data = NULL;
+fail_teardown:
+ ps3stor_teardown(dev);
+fail_free_bounce:
+ kfree(dev->bounce_buf);
+ return error;
+}
+
+static int ps3rom_remove(struct ps3_system_bus_device *_dev)
+{
+ struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
+ struct Scsi_Host *host = dev->sbd.core.driver_data;
+
+ scsi_remove_host(host);
+ ps3stor_teardown(dev);
+ scsi_host_put(host);
+ dev->sbd.core.driver_data = NULL;
+ kfree(dev->bounce_buf);
+ return 0;
+}
+
+static struct ps3_system_bus_driver ps3rom = {
+ .match_id = PS3_MATCH_ID_STOR_ROM,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3rom_probe,
+ .remove = ps3rom_remove
+};
+
+
+static int __init ps3rom_init(void)
+{
+ return ps3_system_bus_driver_register(&ps3rom);
+}
+
+static void __exit ps3rom_exit(void)
+{
+ ps3_system_bus_driver_unregister(&ps3rom);
+}
+
+module_init(ps3rom_init);
+module_exit(ps3rom_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PS3 BD/DVD/CD-ROM Storage Driver");
+MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_ROM);
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 411663af7bb7..71ddb5db4944 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,4 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
- qla_dbg.o qla_sup.o qla_attr.o
+ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8081b637d97e..1612f9200a52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -6,13 +6,17 @@
*/
#include "qla_def.h"
+#include <linux/kthread.h>
#include <linux/vmalloc.h>
+int qla24xx_vport_disable(struct fc_vport *, bool);
+
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
-qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -31,8 +35,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj, char *buf, loff_t off,
}
static ssize_t
-qla2x00_sysfs_write_fw_dump(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -73,7 +78,6 @@ static struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 0,
.read = qla2x00_sysfs_read_fw_dump,
@@ -81,8 +85,9 @@ static struct bin_attribute sysfs_fw_dump_attr = {
};
static ssize_t
-qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_read_nvram(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -93,7 +98,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
/* Read NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->nvram_base,
+ ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->nvram_base,
ha->nvram_size);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -101,8 +106,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, char *buf, loff_t off,
}
static ssize_t
-qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_write_nvram(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -113,7 +119,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
return 0;
/* Checksum NVRAM. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
uint32_t *iter;
uint32_t chksum;
@@ -137,7 +143,7 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj, char *buf, loff_t off,
/* Write NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
+ ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
@@ -149,7 +155,6 @@ static struct bin_attribute sysfs_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 512,
.read = qla2x00_sysfs_read_nvram,
@@ -157,8 +162,9 @@ static struct bin_attribute sysfs_nvram_attr = {
};
static ssize_t
-qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_read_optrom(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -176,8 +182,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj, char *buf, loff_t off,
}
static ssize_t
-qla2x00_sysfs_write_optrom(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_write_optrom(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -198,16 +205,16 @@ static struct bin_attribute sysfs_optrom_attr = {
.attr = {
.name = "optrom",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
- .size = OPTROM_SIZE_24XX,
+ .size = 0,
.read = qla2x00_sysfs_read_optrom,
.write = qla2x00_sysfs_write_optrom,
};
static ssize_t
-qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -245,7 +252,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
}
memset(ha->optrom_buffer, 0, ha->optrom_size);
- ha->isp_ops.read_optrom(ha, ha->optrom_buffer, 0,
+ ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 0,
ha->optrom_size);
break;
case 2:
@@ -268,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj, char *buf, loff_t off,
if (ha->optrom_state != QLA_SWRITING)
break;
- ha->isp_ops.write_optrom(ha, ha->optrom_buffer, 0,
+ ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 0,
ha->optrom_size);
break;
}
@@ -279,15 +286,15 @@ static struct bin_attribute sysfs_optrom_ctl_attr = {
.attr = {
.name = "optrom_ctl",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 0,
.write = qla2x00_sysfs_write_optrom_ctl,
};
static ssize_t
-qla2x00_sysfs_read_vpd(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_read_vpd(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -298,15 +305,17 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj, char *buf, loff_t off,
/* Read NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->isp_ops.read_nvram(ha, (uint8_t *)buf, ha->vpd_base, ha->vpd_size);
+ ha->isp_ops->read_nvram(ha, (uint8_t *)buf, ha->vpd_base,
+ ha->vpd_size);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return ha->vpd_size;
}
static ssize_t
-qla2x00_sysfs_write_vpd(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_write_vpd(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -317,7 +326,7 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj, char *buf, loff_t off,
/* Write NVRAM. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->isp_ops.write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
+ ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return count;
@@ -327,7 +336,6 @@ static struct bin_attribute sysfs_vpd_attr = {
.attr = {
.name = "vpd",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 0,
.read = qla2x00_sysfs_read_vpd,
@@ -335,8 +343,9 @@ static struct bin_attribute sysfs_vpd_attr = {
};
static ssize_t
-qla2x00_sysfs_read_sfp(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+qla2x00_sysfs_read_sfp(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -375,7 +384,6 @@ static struct bin_attribute sysfs_sfp_attr = {
.attr = {
.name = "sfp",
.mode = S_IRUSR | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = SFP_DEV_SIZE * 2,
.read = qla2x00_sysfs_read_sfp,
@@ -403,7 +411,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
- if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)))
+ if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -422,7 +430,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
struct sysfs_entry *iter;
for (iter = bin_file_entries; iter->name; iter++) {
- if (iter->is4GBp_only && (!IS_QLA24XX(ha) && !IS_QLA54XX(ha)))
+ if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -430,7 +438,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
}
if (ha->beacon_blink_led == 1)
- ha->isp_ops.beacon_off(ha);
+ ha->isp_ops->beacon_off(ha);
}
/* Scsi_Host attributes. */
@@ -448,7 +456,7 @@ qla2x00_fw_version_show(struct class_device *cdev, char *buf)
char fw_str[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
- ha->isp_ops.fw_version_str(ha, fw_str));
+ ha->isp_ops->fw_version_str(ha, fw_str));
}
static ssize_t
@@ -500,7 +508,7 @@ qla2x00_pci_info_show(struct class_device *cdev, char *buf)
char pci_info[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
- ha->isp_ops.pci_info_str(ha, pci_info));
+ ha->isp_ops->pci_info_str(ha, pci_info));
}
static ssize_t
@@ -645,9 +653,9 @@ qla2x00_beacon_store(struct class_device *cdev, const char *buf,
return -EINVAL;
if (val)
- rval = ha->isp_ops.beacon_on(ha);
+ rval = ha->isp_ops->beacon_on(ha);
else
- rval = ha->isp_ops.beacon_off(ha);
+ rval = ha->isp_ops->beacon_off(ha);
if (rval != QLA_SUCCESS)
count = 0;
@@ -891,7 +899,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
rval = qla24xx_get_isp_stats(ha, (uint32_t *)&stat_buf,
sizeof(stat_buf) / 4, mb_stat);
} else if (atomic_read(&ha->loop_state) == LOOP_READY &&
@@ -959,6 +967,122 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
}
+static int
+qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ int ret = 0;
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha;
+
+ ret = qla24xx_vport_create_req_sanity_check(fc_vport);
+ if (ret) {
+ DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
+ "status %x\n", ret));
+ return (ret);
+ }
+
+ vha = qla24xx_create_vhost(fc_vport);
+ if (vha == NULL) {
+ DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
+ vha));
+ return FC_VPORT_FAILED;
+ }
+ if (disable) {
+ atomic_set(&vha->vp_state, VP_OFFLINE);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ } else
+ atomic_set(&vha->vp_state, VP_FAILED);
+
+ /* ready to create vport */
+ qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx);
+
+ /* initialized vport states */
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->vp_err_state= VP_ERR_PORTDWN;
+ vha->vp_prev_err_state= VP_ERR_UNKWN;
+ /* Check if physical ha port is Up */
+ if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
+ atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ /* Don't retry or attempt login of this virtual port */
+ DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
+ vha->host_no));
+ atomic_set(&vha->loop_state, LOOP_DEAD);
+ if (!disable)
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ }
+
+ if (scsi_add_host(vha->host, &fc_vport->dev)) {
+ DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
+ vha->host_no, vha->vp_idx));
+ goto vport_create_failed_2;
+ }
+
+ /* initialize attributes */
+ fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+ fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+ fc_host_supported_classes(vha->host) =
+ fc_host_supported_classes(ha->host);
+ fc_host_supported_speeds(vha->host) =
+ fc_host_supported_speeds(ha->host);
+
+ qla24xx_vport_disable(fc_vport, disable);
+
+ return 0;
+vport_create_failed_2:
+ qla24xx_disable_vp(vha);
+ qla24xx_deallocate_vp_id(vha);
+ kfree(vha->port_name);
+ kfree(vha->node_name);
+ scsi_host_put(vha->host);
+ return FC_VPORT_FAILED;
+}
+
+int
+qla24xx_vport_delete(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha = fc_vport->dd_data;
+
+ qla24xx_disable_vp(vha);
+ qla24xx_deallocate_vp_id(vha);
+
+ down(&ha->vport_sem);
+ ha->cur_vport_count--;
+ clear_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
+ up(&ha->vport_sem);
+
+ kfree(vha->node_name);
+ kfree(vha->port_name);
+
+ if (vha->timer_active) {
+ qla2x00_vp_stop_timer(vha);
+ DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
+ "has stopped\n",
+ vha->host_no, vha->vp_idx, vha));
+ }
+
+ fc_remove_host(vha->host);
+
+ scsi_remove_host(vha->host);
+
+ scsi_host_put(vha->host);
+
+ return 0;
+}
+
+int
+qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ scsi_qla_host_t *vha = fc_vport->dd_data;
+
+ if (disable)
+ qla24xx_disable_vp(vha);
+ else
+ qla24xx_enable_vp(vha);
+
+ return 0;
+}
+
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -996,6 +1120,49 @@ struct fc_function_template qla2xxx_transport_functions = {
.issue_fc_host_lip = qla2x00_issue_lip,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+
+ .vport_create = qla24xx_vport_create,
+ .vport_disable = qla24xx_vport_disable,
+ .vport_delete = qla24xx_vport_delete,
+};
+
+struct fc_function_template qla2xxx_transport_vport_functions = {
+
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+
+ .get_host_port_id = qla2x00_get_host_port_id,
+ .show_host_port_id = 1,
+ .get_host_speed = qla2x00_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_port_type = qla2x00_get_host_port_type,
+ .show_host_port_type = 1,
+ .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
+ .show_host_symbolic_name = 1,
+ .set_host_system_hostname = qla2x00_set_host_system_hostname,
+ .show_host_system_hostname = 1,
+ .get_host_fabric_name = qla2x00_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+ .get_host_port_state = qla2x00_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_port *),
+ .show_rport_supported_classes = 1,
+
+ .get_starget_node_name = qla2x00_get_starget_node_name,
+ .show_starget_node_name = 1,
+ .get_starget_port_name = qla2x00_get_starget_port_name,
+ .show_starget_port_name = 1,
+ .get_starget_port_id = qla2x00_get_starget_port_id,
+ .show_starget_port_id = 1,
+
+ .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
+ .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .issue_fc_host_lip = qla2x00_issue_lip,
+ .get_fc_host_stats = qla2x00_get_fc_host_stats,
};
void
@@ -1004,4 +1171,6 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(ha->host) = MAX_NUM_VPORT_FABRIC;
+ fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6ed6962bc2b..563d18f4ff50 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -37,6 +37,121 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
return ptr + (ha->response_q_length * sizeof(response_t));
}
+static int
+qla2xxx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
+ uint32_t cram_size, uint32_t *ext_mem, void **nxt)
+{
+ int rval;
+ uint32_t cnt, stat, timer, risc_address, ext_mem_cnt;
+ uint16_t mb[4];
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ rval = QLA_SUCCESS;
+ risc_address = ext_mem_cnt = 0;
+ memset(mb, 0, sizeof(mb));
+
+ /* Code RAM. */
+ risc_address = 0x20000;
+ WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS;
+ cnt++, risc_address++) {
+ WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
+ WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
+ RD_REG_WORD(&reg->mailbox8);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb[0] = RD_REG_WORD(&reg->mailbox0);
+ mb[2] = RD_REG_WORD(&reg->mailbox2);
+ mb[3] = RD_REG_WORD(&reg->mailbox3);
+
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+
+ /* Clear this intr; it wasn't a mailbox intr */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb[0] & MBS_MASK;
+ code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ if (rval == QLA_SUCCESS) {
+ /* External Memory. */
+ risc_address = 0x100000;
+ ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
+ WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ }
+ for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
+ cnt++, risc_address++) {
+ WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
+ WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
+ RD_REG_WORD(&reg->mailbox8);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+
+ for (timer = 6000000; timer; timer--) {
+ /* Check for pending interrupts. */
+ stat = RD_REG_DWORD(&reg->host_status);
+ if (stat & HSRX_RISC_INT) {
+ stat &= 0xff;
+
+ if (stat == 0x1 || stat == 0x2 ||
+ stat == 0x10 || stat == 0x11) {
+ set_bit(MBX_INTERRUPT,
+ &ha->mbx_cmd_flags);
+
+ mb[0] = RD_REG_WORD(&reg->mailbox0);
+ mb[2] = RD_REG_WORD(&reg->mailbox2);
+ mb[3] = RD_REG_WORD(&reg->mailbox3);
+
+ WRT_REG_DWORD(&reg->hccr,
+ HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ break;
+ }
+
+ /* Clear this intr; it wasn't a mailbox intr */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD(&reg->hccr);
+ }
+ udelay(5);
+ }
+
+ if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+ rval = mb[0] & MBS_MASK;
+ ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
+ } else {
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
+
+ *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL;
+ return rval;
+}
+
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
* @ha: HA context
@@ -633,11 +748,10 @@ void
qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
{
int rval;
- uint32_t cnt, timer;
+ uint32_t cnt;
uint32_t risc_address;
- uint16_t mb[4], wd;
+ uint16_t mb0, wd;
- uint32_t stat;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
uint32_t *iter_reg;
@@ -645,10 +759,9 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
unsigned long flags;
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
- void *eft;
+ void *nxt;
risc_address = ext_mem_cnt = 0;
- memset(mb, 0, sizeof(mb));
flags = 0;
if (!hardware_locked)
@@ -701,250 +814,236 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
/* Shadow registers. */
WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
RD_REG_DWORD(&reg->iobase_addr);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0000000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(dmp_reg));
-
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0100000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(dmp_reg));
-
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0200000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(dmp_reg));
-
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0300000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(dmp_reg));
-
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0400000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(dmp_reg));
-
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0500000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(dmp_reg));
-
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xF0);
- WRT_REG_DWORD(dmp_reg, 0xB0600000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xFC);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(dmp_reg));
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
/* Mailbox registers. */
- mbx_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
+ mbx_reg = &reg->mailbox0;
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->xseq_0_reg) / 4; cnt++)
fw->xseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
/* Receive sequence registers. */
iter_reg = fw->rseq_gp_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->rseq_0_reg) / 4; cnt++)
fw->rseq_0_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
/* Command DMA registers. */
WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
/* Queues. */
iter_reg = fw->req0_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 8; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
+ dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->resp0_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 8; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
+ dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->req1_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 8; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xE4);
+ dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->xmt1_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->xmt2_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->xmt3_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->xmt4_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
fw->xmt_data_dma_reg[cnt] =
htonl(RD_REG_DWORD(dmp_reg++));
@@ -952,221 +1051,221 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
/* Receive DMA registers. */
iter_reg = fw->rcvt0_data_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->rcvt1_data_dma_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
/* RISC registers. */
iter_reg = fw->risc_gp_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
/* Local memory controller registers. */
iter_reg = fw->lmc_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
/* Fibre Protocol Module registers. */
iter_reg = fw->fpm_hdw_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
/* Frame Buffer registers. */
iter_reg = fw->fb_hdw_reg;
WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
- dmp_reg = (uint32_t __iomem *)((uint8_t __iomem *)reg + 0xC0);
+ dmp_reg = &reg->iobase_window;
for (cnt = 0; cnt < 16; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
@@ -1187,10 +1286,10 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
- mb[0] = (uint32_t) RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000 ; cnt && mb[0]; cnt--) {
+ mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 10000 ; cnt && mb0; cnt--) {
udelay(5);
- mb[0] = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
barrier();
}
@@ -1214,110 +1313,717 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
rval = QLA_FUNCTION_TIMEOUT;
}
- /* Memory. */
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_memory(ha, fw->code_ram,
+ sizeof(fw->code_ram), fw->ext_mem, &nxt);
+
if (rval == QLA_SUCCESS) {
- /* Code RAM. */
- risc_address = 0x20000;
- WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ nxt = qla2xxx_copy_queues(ha, nxt);
+ if (ha->eft)
+ memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
}
- for (cnt = 0; cnt < sizeof(fw->code_ram) / 4 && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
- WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
- RD_REG_WORD(&reg->mailbox8);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
- if (stat & HSRX_RISC_INT) {
- stat &= 0xff;
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Failed to dump firmware (%x)!!!\n", rval);
+ ha->fw_dumped = 0;
- if (stat == 0x1 || stat == 0x2 ||
- stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "Firmware dump saved to temp buffer (%ld/%p).\n",
+ ha->host_no, ha->fw_dump);
+ ha->fw_dumped = 1;
+ }
- mb[0] = RD_REG_WORD(&reg->mailbox0);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+qla24xx_fw_dump_failed:
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
- WRT_REG_DWORD(&reg->hccr,
- HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- break;
- }
+void
+qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+{
+ int rval;
+ uint32_t cnt;
+ uint32_t risc_address;
+ uint16_t mb0, wd;
- /* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- }
- udelay(5);
- }
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ uint32_t __iomem *dmp_reg;
+ uint32_t *iter_reg;
+ uint16_t __iomem *mbx_reg;
+ unsigned long flags;
+ struct qla25xx_fw_dump *fw;
+ uint32_t ext_mem_cnt;
+ void *nxt;
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb[0] & MBS_MASK;
- fw->code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
- } else {
- rval = QLA_FUNCTION_FAILED;
+ risc_address = ext_mem_cnt = 0;
+ flags = 0;
+
+ if (!hardware_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (!ha->fw_dump) {
+ qla_printk(KERN_WARNING, ha,
+ "No buffer available for dump!!!\n");
+ goto qla25xx_fw_dump_failed;
+ }
+
+ if (ha->fw_dumped) {
+ qla_printk(KERN_WARNING, ha,
+ "Firmware has been previously dumped (%p) -- ignoring "
+ "request...\n", ha->fw_dump);
+ goto qla25xx_fw_dump_failed;
+ }
+ fw = &ha->fw_dump->isp.isp25;
+ qla2xxx_prep_dump(ha, ha->fw_dump);
+
+ rval = QLA_SUCCESS;
+ fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+
+ /* Pause RISC. */
+ if ((RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0) {
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET |
+ HCCRX_CLR_HOST_INT);
+ RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
+ WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+ for (cnt = 30000;
+ (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
}
}
if (rval == QLA_SUCCESS) {
- /* External Memory. */
- risc_address = 0x100000;
- ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
- WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
- clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- }
- for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
- cnt++, risc_address++) {
- WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
- WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
- RD_REG_WORD(&reg->mailbox8);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ /* Host interface registers. */
+ dmp_reg = (uint32_t __iomem *)(reg + 0);
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
- for (timer = 6000000; timer; timer--) {
- /* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
- if (stat & HSRX_RISC_INT) {
- stat &= 0xff;
+ /* Disable interrupts. */
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
- if (stat == 0x1 || stat == 0x2 ||
- stat == 0x10 || stat == 0x11) {
- set_bit(MBX_INTERRUPT,
- &ha->mbx_cmd_flags);
+ /* Shadow registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ RD_REG_DWORD(&reg->iobase_addr);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
- mb[0] = RD_REG_WORD(&reg->mailbox0);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->hccr,
- HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- break;
- }
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
- /* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
- }
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+
+ /* RISC I/O register. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
+ RD_REG_DWORD(&reg->iobase_addr);
+ fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Mailbox registers. */
+ mbx_reg = &reg->mailbox0;
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+
+ /* Transfer sequence registers. */
+ iter_reg = fw->xseq_gp_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF00);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF10);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF20);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF30);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF40);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF50);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF60);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBF70);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->xseq_0_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBFC0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBFD0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBFE0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xBFF0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->xseq_1_reg) / 4; cnt++)
+ fw->xseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Receive sequence registers. */
+ iter_reg = fw->rseq_gp_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF00);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF10);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF20);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF30);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF40);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF50);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF60);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFF70);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->rseq_0_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFFC0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFFD0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFFE0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->rseq_1_reg) / 4; cnt++)
+ fw->rseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xFFF0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->rseq_2_reg) / 4; cnt++)
+ fw->rseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Auxiliary sequence registers. */
+ iter_reg = fw->aseq_gp_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB000);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB010);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB020);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB030);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB040);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB050);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB060);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB070);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->aseq_0_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB0C0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB0D0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB0E0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->aseq_1_reg) / 4; cnt++)
+ fw->aseq_1_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0xB0F0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->aseq_2_reg) / 4; cnt++)
+ fw->aseq_2_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Command DMA registers. */
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7100);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->cmd_dma_reg) / 4; cnt++)
+ fw->cmd_dma_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Queues. */
+ iter_reg = fw->req0_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7200);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 8; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->resp0_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7300);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 8; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->req1_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7400);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 8; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ dmp_reg = &reg->iobase_q;
+ for (cnt = 0; cnt < 7; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Transmit DMA registers. */
+ iter_reg = fw->xmt0_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7600);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7610);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->xmt1_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7620);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7630);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->xmt2_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7640);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7650);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->xmt3_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7660);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7670);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->xmt4_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7680);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7690);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x76A0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < sizeof(fw->xmt_data_dma_reg) / 4; cnt++)
+ fw->xmt_data_dma_reg[cnt] =
+ htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Receive DMA registers. */
+ iter_reg = fw->rcvt0_data_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7700);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7710);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ iter_reg = fw->rcvt1_data_dma_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7720);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x7730);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* RISC registers. */
+ iter_reg = fw->risc_gp_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F00);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F10);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F20);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F30);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F40);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F50);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F60);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Local memory controller registers. */
+ iter_reg = fw->lmc_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3000);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3010);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3020);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3030);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3040);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3050);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3060);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x3070);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Fibre Protocol Module registers. */
+ iter_reg = fw->fpm_hdw_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4000);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4010);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4020);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4030);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4040);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4050);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4060);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4070);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4080);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x4090);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x40A0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x40B0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Frame Buffer registers. */
+ iter_reg = fw->fb_hdw_reg;
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6020);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6030);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6040);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6100);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6130);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6150);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6170);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6190);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x61B0);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ WRT_REG_DWORD(&reg->iobase_addr, 0x6F00);
+ dmp_reg = &reg->iobase_window;
+ for (cnt = 0; cnt < 16; cnt++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+
+ /* Reset RISC. */
+ WRT_REG_DWORD(&reg->ctrl_status,
+ CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_DWORD(&reg->ctrl_status) &
+ CSRX_DMA_ACTIVE) == 0)
+ break;
+
+ udelay(10);
+ }
+
+ WRT_REG_DWORD(&reg->ctrl_status,
+ CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
+
+ udelay(100);
+ /* Wait for firmware to complete NVRAM accesses. */
+ mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ for (cnt = 10000 ; cnt && mb0; cnt--) {
udelay(5);
+ mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+ barrier();
}
- if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
- rval = mb[0] & MBS_MASK;
- fw->ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
- } else {
- rval = QLA_FUNCTION_FAILED;
+ /* Wait for soft-reset to complete. */
+ for (cnt = 0; cnt < 30000; cnt++) {
+ if ((RD_REG_DWORD(&reg->ctrl_status) &
+ CSRX_ISP_SOFT_RESET) == 0)
+ break;
+
+ udelay(10);
}
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
}
+ for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rval == QLA_SUCCESS; cnt--) {
+ if (cnt)
+ udelay(100);
+ else
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (rval == QLA_SUCCESS)
+ rval = qla2xxx_dump_memory(ha, fw->code_ram,
+ sizeof(fw->code_ram), fw->ext_mem, &nxt);
+
if (rval == QLA_SUCCESS) {
- eft = qla2xxx_copy_queues(ha, &fw->ext_mem[cnt]);
+ nxt = qla2xxx_copy_queues(ha, nxt);
if (ha->eft)
- memcpy(eft, ha->eft, ntohl(ha->fw_dump->eft_size));
+ memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size));
}
if (rval != QLA_SUCCESS) {
@@ -1332,7 +2038,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
ha->fw_dumped = 1;
}
-qla24xx_fw_dump_failed:
+qla25xx_fw_dump_failed:
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1411,9 +2117,9 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
printk("0x%02x ", cmd->cmnd[i]);
}
printk("\n seg_cnt=%d, allowed=%d, retries=%d\n",
- cmd->use_sg, cmd->allowed, cmd->retries);
+ scsi_sg_count(cmd), cmd->allowed, cmd->retries);
printk(" request buffer=0x%p, request buffer len=0x%x\n",
- cmd->request_buffer, cmd->request_bufflen);
+ scsi_sglist(cmd), scsi_bufflen(cmd));
printk(" tag=%d, transfersize=0x%x\n",
cmd->tag, cmd->transfersize);
printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5b12278968e0..cca4b0d8253e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -21,6 +21,7 @@
/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
+/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/*
* Local Macro Definitions.
*/
@@ -30,7 +31,8 @@
defined(QL_DEBUG_LEVEL_7) || defined(QL_DEBUG_LEVEL_8) || \
defined(QL_DEBUG_LEVEL_9) || defined(QL_DEBUG_LEVEL_10) || \
defined(QL_DEBUG_LEVEL_11) || defined(QL_DEBUG_LEVEL_12) || \
- defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14)
+ defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14) || \
+ defined(QL_DEBUG_LEVEL_15)
#define QL_DEBUG_ROUTINES
#endif
@@ -125,6 +127,12 @@
#define DEBUG14(x) do {} while (0)
#endif
+#if defined(QL_DEBUG_LEVEL_15)
+#define DEBUG15(x) do {x;} while (0)
+#else
+#define DEBUG15(x) do {} while (0)
+#endif
+
/*
* Firmware Dump structure definition
*/
@@ -205,6 +213,43 @@ struct qla24xx_fw_dump {
uint32_t ext_mem[1];
};
+struct qla25xx_fw_dump {
+ uint32_t host_status;
+ uint32_t host_reg[32];
+ uint32_t shadow_reg[11];
+ uint32_t risc_io_reg;
+ uint16_t mailbox_reg[32];
+ uint32_t xseq_gp_reg[128];
+ uint32_t xseq_0_reg[48];
+ uint32_t xseq_1_reg[16];
+ uint32_t rseq_gp_reg[128];
+ uint32_t rseq_0_reg[32];
+ uint32_t rseq_1_reg[16];
+ uint32_t rseq_2_reg[16];
+ uint32_t aseq_gp_reg[128];
+ uint32_t aseq_0_reg[32];
+ uint32_t aseq_1_reg[16];
+ uint32_t aseq_2_reg[16];
+ uint32_t cmd_dma_reg[16];
+ uint32_t req0_dma_reg[15];
+ uint32_t resp0_dma_reg[15];
+ uint32_t req1_dma_reg[15];
+ uint32_t xmt0_dma_reg[32];
+ uint32_t xmt1_dma_reg[32];
+ uint32_t xmt2_dma_reg[32];
+ uint32_t xmt3_dma_reg[32];
+ uint32_t xmt4_dma_reg[32];
+ uint32_t xmt_data_dma_reg[16];
+ uint32_t rcvt0_data_dma_reg[32];
+ uint32_t rcvt1_data_dma_reg[32];
+ uint32_t risc_gp_reg[128];
+ uint32_t lmc_reg[128];
+ uint32_t fpm_hdw_reg[192];
+ uint32_t fb_hdw_reg[192];
+ uint32_t code_ram[0x2000];
+ uint32_t ext_mem[1];
+};
+
#define EFT_NUM_BUFFERS 4
#define EFT_BYTES_PER_BUFFER 0x4000
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -238,5 +283,6 @@ struct qla2xxx_fw_dump {
struct qla2100_fw_dump isp21;
struct qla2300_fw_dump isp23;
struct qla24xx_fw_dump isp24;
+ struct qla25xx_fw_dump isp25;
} isp;
};
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e8948b679f5b..0c9f36c8a248 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1551,6 +1551,9 @@ typedef struct fc_port {
unsigned long last_queue_full;
unsigned long last_ramp_up;
+
+ struct list_head vp_fcport;
+ uint16_t vp_idx;
} fc_port_t;
/*
@@ -1708,6 +1711,14 @@ struct ct_fdmi_hba_attributes {
#define FDMI_PORT_OS_DEVICE_NAME 5
#define FDMI_PORT_HOST_NAME 6
+#define FDMI_PORT_SPEED_1GB 0x1
+#define FDMI_PORT_SPEED_2GB 0x2
+#define FDMI_PORT_SPEED_10GB 0x4
+#define FDMI_PORT_SPEED_4GB 0x8
+#define FDMI_PORT_SPEED_8GB 0x10
+#define FDMI_PORT_SPEED_16GB 0x20
+#define FDMI_PORT_SPEED_UNKNOWN 0x8000
+
struct ct_fdmi_port_attr {
uint16_t type;
uint16_t len;
@@ -1999,6 +2010,36 @@ struct gid_list_info {
};
#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
+/* NPIV */
+typedef struct vport_info {
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ int vp_id;
+ uint16_t loop_id;
+ unsigned long host_no;
+ uint8_t port_id[3];
+ int loop_state;
+} vport_info_t;
+
+typedef struct vport_params {
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ uint32_t options;
+#define VP_OPTS_RETRY_ENABLE BIT_0
+#define VP_OPTS_VP_DISABLE BIT_1
+} vport_params_t;
+
+/* NPIV - return codes of VP create and modify */
+#define VP_RET_CODE_OK 0
+#define VP_RET_CODE_FATAL 1
+#define VP_RET_CODE_WRONG_ID 2
+#define VP_RET_CODE_WWPN 3
+#define VP_RET_CODE_RESOURCES 4
+#define VP_RET_CODE_NO_MEM 5
+#define VP_RET_CODE_NOT_FOUND 6
+
+#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
+
/*
* ISP operations
*/
@@ -2073,6 +2114,16 @@ struct qla_msix_entry {
uint16_t msix_entry;
};
+#define WATCH_INTERVAL 1 /* number of seconds */
+
+/* NPIV */
+#define MAX_MULTI_ID_LOOP 126
+#define MAX_MULTI_ID_FABRIC 64
+#define MAX_NUM_VPORT_LOOP (MAX_MULTI_ID_LOOP - 1)
+#define MAX_NUM_VPORT_FABRIC (MAX_MULTI_ID_FABRIC - 1)
+#define MAX_NUM_VHBA_LOOP (MAX_MULTI_ID_LOOP - 1)
+#define MAX_NUM_VHBA_FABRIC (MAX_MULTI_ID_FABRIC - 1)
+
/*
* Linux Host Adapter structure
*/
@@ -2108,6 +2159,8 @@ typedef struct scsi_qla_host {
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
uint32_t gpsc_supported :1;
+ uint32_t vsan_enabled :1;
+ uint32_t npiv_supported :1;
} flags;
atomic_t loop_state;
@@ -2147,6 +2200,7 @@ typedef struct scsi_qla_host {
#define BEACON_BLINK_NEEDED 25
#define REGISTER_FDMI_NEEDED 26
#define FCPORT_UPDATE_NEEDED 27
+#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0
@@ -2155,6 +2209,7 @@ typedef struct scsi_qla_host {
#define SWITCH_FOUND BIT_3
#define DFLG_NO_CABLE BIT_4
+#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
uint32_t device_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
@@ -2167,8 +2222,11 @@ typedef struct scsi_qla_host {
#define DT_ISP2432 BIT_8
#define DT_ISP5422 BIT_9
#define DT_ISP5432 BIT_10
-#define DT_ISP_LAST (DT_ISP5432 << 1)
+#define DT_ISP2532 BIT_11
+#define DT_ISP_LAST (DT_ISP2532 << 1)
+#define DT_IIDMA BIT_26
+#define DT_FWI2 BIT_27
#define DT_ZIO_SUPPORTED BIT_28
#define DT_OEM_001 BIT_29
#define DT_ISP2200A BIT_30
@@ -2186,12 +2244,16 @@ typedef struct scsi_qla_host {
#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
+#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
+#define IS_QLA25XX(ha) (IS_QLA2532(ha))
+#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
+#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
@@ -2228,7 +2290,7 @@ typedef struct scsi_qla_host {
uint16_t rsp_ring_index; /* Current index. */
uint16_t response_q_length;
- struct isp_operations isp_ops;
+ struct isp_operations *isp_ops;
/* Outstandings ISP commands. */
srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
@@ -2237,6 +2299,11 @@ typedef struct scsi_qla_host {
/* ISP configuration data. */
uint16_t loop_id; /* Host adapter loop id */
+ uint16_t switch_cap;
+#define FLOGI_SEQ_DEL BIT_8
+#define FLOGI_MID_SUPPORT BIT_10
+#define FLOGI_VSAN_SUPPORT BIT_12
+#define FLOGI_SP_SUPPORT BIT_13
uint16_t fb_rev;
port_id_t d_id; /* Host adapter port id */
@@ -2247,6 +2314,7 @@ typedef struct scsi_qla_host {
#define PORT_SPEED_1GB 0x00
#define PORT_SPEED_2GB 0x01
#define PORT_SPEED_4GB 0x03
+#define PORT_SPEED_8GB 0x04
uint16_t link_data_rate; /* F/W operating speed */
uint8_t current_topology;
@@ -2344,6 +2412,7 @@ typedef struct scsi_qla_host {
#define MBX_UPDATE_FLASH_ACTIVE 3
struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
+ struct semaphore vport_sem; /* Virtual port synchronization */
struct semaphore mbx_intr_sem; /* Used for completion notification */
uint32_t mbx_flags;
@@ -2428,6 +2497,37 @@ typedef struct scsi_qla_host {
struct fc_host_statistics fc_host_stat;
struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
+
+ struct list_head vp_list; /* list of VP */
+ struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
+ uint8_t vp_idx_map[16];
+ uint16_t num_vhosts; /* number of vports created */
+ uint16_t num_vsans; /* number of vsan created */
+ uint16_t vp_idx; /* vport ID */
+
+ struct scsi_qla_host *parent; /* holds pport */
+ unsigned long vp_flags;
+ struct list_head vp_fcports; /* list of fcports */
+#define VP_IDX_ACQUIRED 0 /* bit no 0 */
+#define VP_CREATE_NEEDED 1
+#define VP_BIND_NEEDED 2
+#define VP_DELETE_NEEDED 3
+#define VP_SCR_NEEDED 4 /* State Change Request registration */
+ atomic_t vp_state;
+#define VP_OFFLINE 0
+#define VP_ACTIVE 1
+#define VP_FAILED 2
+// #define VP_DISABLE 3
+ uint16_t vp_err_state;
+ uint16_t vp_prev_err_state;
+#define VP_ERR_UNKWN 0
+#define VP_ERR_PORTDWN 1
+#define VP_ERR_FAB_UNSUPPORTED 2
+#define VP_ERR_FAB_NORESOURCES 3
+#define VP_ERR_FAB_LOGOUT 4
+#define VP_ERR_ADAP_NORESOURCES 5
+ int max_npiv_vports; /* 63 or 125 per topoloty */
+ int cur_vport_count;
} scsi_qla_host_t;
@@ -2481,6 +2581,7 @@ typedef struct scsi_qla_host {
#define OPTROM_SIZE_2300 0x20000
#define OPTROM_SIZE_2322 0x100000
#define OPTROM_SIZE_24XX 0x100000
+#define OPTROM_SIZE_25XX 0x200000
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index a0a722cf4237..99fe49618d61 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -8,14 +8,17 @@
#define __QLA_FW_H
#define MBS_CHECKSUM_ERROR 0x4010
+#define MBS_INVALID_PRODUCT_KEY 0x4020
/*
* Firmware Options.
*/
#define FO1_ENABLE_PUREX BIT_10
#define FO1_DISABLE_LED_CTRL BIT_6
+#define FO1_ENABLE_8016 BIT_0
#define FO2_ENABLE_SEL_CLASS2 BIT_5
#define FO3_NO_ABTS_ON_LINKDOWN BIT_14
+#define FO3_HOLD_STS_IOCB BIT_12
/*
* Port Database structure definition for ISP 24xx.
@@ -69,6 +72,16 @@ struct port_database_24xx {
uint8_t reserved_3[24];
};
+struct vp_database_24xx {
+ uint16_t vp_status;
+ uint8_t options;
+ uint8_t id;
+ uint8_t port_name[WWN_SIZE];
+ uint8_t node_name[WWN_SIZE];
+ uint16_t port_id_low;
+ uint16_t port_id_high;
+};
+
struct nvram_24xx {
/* NVRAM header. */
uint8_t id[4];
@@ -331,7 +344,9 @@ struct init_cb_24xx {
* BIT 10 = Reserved
* BIT 11 = Enable FC-SP Security
* BIT 12 = FC Tape Enable
- * BIT 13-31 = Reserved
+ * BIT 13 = Reserved
+ * BIT 14 = Enable Target PRLI Control
+ * BIT 15-31 = Reserved
*/
uint32_t firmware_options_2;
@@ -353,7 +368,8 @@ struct init_cb_24xx {
* BIT 13 = Data Rate bit 0
* BIT 14 = Data Rate bit 1
* BIT 15 = Data Rate bit 2
- * BIT 16-31 = Reserved
+ * BIT 16 = Enable 75 ohm Termination Select
+ * BIT 17-31 = Reserved
*/
uint32_t firmware_options_3;
@@ -425,6 +441,7 @@ struct cmd_type_7 {
#define TMF_LUN_RESET BIT_12
#define TMF_CLEAR_TASK_SET BIT_10
#define TMF_ABORT_TASK_SET BIT_9
+#define TMF_DSD_LIST_ENABLE BIT_2
#define TMF_READ_DATA BIT_1
#define TMF_WRITE_DATA BIT_0
@@ -579,7 +596,7 @@ struct els_entry_24xx {
#define EST_SOFI3 (1 << 4)
#define EST_SOFI2 (3 << 4)
- uint32_t rx_xchg_address[2]; /* Receive exchange address. */
+ uint32_t rx_xchg_address; /* Receive exchange address. */
uint16_t rx_dsd_count;
uint8_t opcode;
@@ -640,6 +657,7 @@ struct logio_entry_24xx {
uint16_t control_flags; /* Control flags. */
/* Modifiers. */
+#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
@@ -769,6 +787,15 @@ struct device_reg_24xx {
#define FA_RISC_CODE_ADDR 0x20000
#define FA_RISC_CODE_SEGMENTS 2
+#define FA_FW_AREA_ADDR 0x40000
+#define FA_VPD_NVRAM_ADDR 0x48000
+#define FA_FEATURE_ADDR 0x4C000
+#define FA_FLASH_DESCR_ADDR 0x50000
+#define FA_HW_EVENT_ADDR 0x54000
+#define FA_BOOT_LOG_ADDR 0x58000
+#define FA_FW_DUMP0_ADDR 0x60000
+#define FA_FW_DUMP1_ADDR 0x70000
+
uint32_t flash_data; /* Flash/NVRAM BIOS data. */
uint32_t ctrl_status; /* Control/Status. */
@@ -849,10 +876,13 @@ struct device_reg_24xx {
#define HCCRX_CLR_RISC_INT 0xA0000000
uint32_t gpiod; /* GPIO Data register. */
+
/* LED update mask. */
#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
/* Data update mask. */
#define GPDX_DATA_UPDATE_MASK (BIT_17|BIT_16)
+ /* Data update mask. */
+#define GPDX_DATA_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
/* LED control mask. */
#define GPDX_LED_COLOR_MASK (BIT_4|BIT_3|BIT_2)
/* LED bit values. Color names as
@@ -867,6 +897,8 @@ struct device_reg_24xx {
uint32_t gpioe; /* GPIO Enable register. */
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
+ /* Enable update mask. */
+#define GPEX_ENABLE_UPDATE_2_MASK (BIT_28|BIT_27|BIT_26|BIT_17|BIT_16)
/* Enable. */
#define GPEX_ENABLE (BIT_1|BIT_0)
@@ -906,6 +938,14 @@ struct device_reg_24xx {
uint16_t mailbox29;
uint16_t mailbox30;
uint16_t mailbox31;
+
+ uint32_t iobase_window;
+ uint32_t unused_4[8]; /* Gap. */
+ uint32_t iobase_q;
+ uint32_t unused_5[2]; /* Gap. */
+ uint32_t iobase_select;
+ uint32_t unused_6[2]; /* Gap. */
+ uint32_t iobase_sdata;
};
/* MID Support ***************************************************************/
@@ -962,6 +1002,25 @@ struct mid_db_24xx {
struct mid_db_entry_24xx entries[MAX_MID_VPS];
};
+ /*
+ * Virtual Fabric ID type definition.
+ */
+typedef struct vf_id {
+ uint16_t id : 12;
+ uint16_t priority : 4;
+} vf_id_t;
+
+/*
+ * Virtual Fabric HopCt type definition.
+ */
+typedef struct vf_hopct {
+ uint16_t reserved : 8;
+ uint16_t hopct : 8;
+} vf_hopct_t;
+
+/*
+ * Virtual Port Control IOCB
+ */
#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */
struct vp_ctrl_entry_24xx {
uint8_t entry_type; /* Entry type. */
@@ -974,6 +1033,7 @@ struct vp_ctrl_entry_24xx {
uint16_t vp_idx_failed;
uint16_t comp_status; /* Completion status. */
+#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
@@ -982,24 +1042,34 @@ struct vp_ctrl_entry_24xx {
#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
+#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
uint16_t vp_count;
uint8_t vp_idx_map[16];
-
- uint8_t reserved_4[32];
+ uint16_t flags;
+ struct vf_id id;
+ uint16_t reserved_4;
+ struct vf_hopct hopct;
+ uint8_t reserved_5[8];
};
+/*
+ * Modify Virtual Port Configuration IOCB
+ */
#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */
struct vp_config_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t sys_define; /* System defined. */
+ uint8_t handle_count;
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t reserved_1;
+ uint16_t flags;
+#define CS_VF_BIND_VPORTS_TO_VF BIT_0
+#define CS_VF_SET_QOS_OF_VPORTS BIT_1
+#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
uint16_t comp_status; /* Completion status. */
#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
@@ -1009,27 +1079,29 @@ struct vp_config_entry_24xx {
#define CS_VCT_BUSY 0x05 /* Firmware not ready to accept cmd. */
uint8_t command;
-#define VCT_COMMAND_MOD_VPS 0x00 /* Enable VPs. */
-#define VCT_COMMAND_MOD_ENABLE_VPS 0x08 /* Disable VPs. */
+#define VCT_COMMAND_MOD_VPS 0x00 /* Modify VP configurations. */
+#define VCT_COMMAND_MOD_ENABLE_VPS 0x01 /* Modify configuration & enable VPs. */
uint8_t vp_count;
- uint8_t vp_idx1;
- uint8_t vp_idx2;
+ uint8_t vp_index1;
+ uint8_t vp_index2;
uint8_t options_idx1;
uint8_t hard_address_idx1;
- uint16_t reserved_2;
+ uint16_t reserved_vp1;
uint8_t port_name_idx1[WWN_SIZE];
uint8_t node_name_idx1[WWN_SIZE];
uint8_t options_idx2;
uint8_t hard_address_idx2;
- uint16_t reserved_3;
+ uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
-
- uint8_t reserved_4[8];
+ struct vf_id id;
+ uint16_t reserved_4;
+ struct vf_hopct hopct;
+ uint8_t reserved_5;
};
#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
@@ -1054,5 +1126,30 @@ struct vp_rpt_id_entry_24xx {
uint8_t reserved_4[32];
};
+#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
+struct vf_evfp_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t comp_status; /* Completion status. */
+ uint16_t timeout; /* timeout */
+ uint16_t adim_tagging_mode;
+
+ uint16_t vfport_id;
+ uint32_t exch_addr;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t control_flags;
+ uint32_t io_parameter_0;
+ uint32_t io_parameter_1;
+ uint32_t tx_address[2]; /* Data segment 0 address. */
+ uint32_t tx_len; /* Data segment 0 length. */
+ uint32_t rx_address[2]; /* Data segment 1 address. */
+ uint32_t rx_len; /* Data segment 1 length. */
+};
+
/* END MID Support ***********************************************************/
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 74544ae4b0e2..aa1e41152283 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -17,6 +17,7 @@ extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
extern int qla2100_pci_config(struct scsi_qla_host *);
extern int qla2300_pci_config(struct scsi_qla_host *);
extern int qla24xx_pci_config(scsi_qla_host_t *);
+extern int qla25xx_pci_config(scsi_qla_host_t *);
extern void qla2x00_reset_chip(struct scsi_qla_host *);
extern void qla24xx_reset_chip(struct scsi_qla_host *);
extern int qla2x00_chip_diag(struct scsi_qla_host *);
@@ -62,6 +63,38 @@ extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
+extern int num_hosts;
+
+/*
+ * Global Functions in qla_mid.c source file.
+ */
+extern struct scsi_host_template qla2x00_driver_template;
+extern struct scsi_host_template qla24xx_driver_template;
+extern struct scsi_transport_template *qla2xxx_transport_vport_template;
+extern uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
+extern void qla2x00_timer(scsi_qla_host_t *);
+extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
+extern void qla2x00_stop_timer(scsi_qla_host_t *);
+extern uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *);
+extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
+extern int qla24xx_disable_vp (scsi_qla_host_t *);
+extern int qla24xx_enable_vp (scsi_qla_host_t *);
+extern void qla2x00_mem_free(scsi_qla_host_t *);
+extern int qla24xx_control_vp(scsi_qla_host_t *, int );
+extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
+extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
+extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
+extern int qla24xx_configure_vhba (scsi_qla_host_t *);
+extern int qla24xx_get_vp_entry(scsi_qla_host_t *, uint16_t, int);
+extern int qla24xx_get_vp_database(scsi_qla_host_t *, uint16_t);
+extern int qla2x00_do_dpc_vp(scsi_qla_host_t *);
+extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
+ struct vp_rpt_id_entry_24xx *);
+extern scsi_qla_host_t * qla24xx_find_vhost_by_name(scsi_qla_host_t *,
+ uint8_t *);
+extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
+extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
+extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
@@ -77,6 +110,10 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
+extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
+extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
+extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
+extern int qla24xx_vport_delete(struct fc_vport *);
/*
* Global Function Prototypes in qla_iocb.c source file.
@@ -128,7 +165,7 @@ qla2x00_abort_target(fc_port_t *);
extern int
qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
- uint8_t *, uint16_t *);
+ uint8_t *, uint16_t *, uint16_t *);
extern int
qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
@@ -245,6 +282,10 @@ extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
+extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
+extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
+ uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -271,6 +312,7 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
extern void qla2100_fw_dump(scsi_qla_host_t *, int);
extern void qla2300_fw_dump(scsi_qla_host_t *, int);
extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
+extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_print_scsi_cmd(struct scsi_cmnd *);
@@ -303,6 +345,7 @@ struct class_device_attribute;
extern struct class_device_attribute *qla2x00_host_attrs[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
+extern struct fc_function_template qla2xxx_transport_vport_functions;
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ec5b2dd90d6a..b06cbb8580d3 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -88,6 +88,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = ha->vp_idx;
return (ct_pkt);
}
@@ -126,7 +127,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
ha->host_no, routine, ms_pkt->entry_status));
} else {
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
((struct ct_entry_24xx *)ms_pkt)->comp_status);
else
@@ -179,7 +180,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
/* Issue GA_NXT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GA_NXT_REQ_SIZE, GA_NXT_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE,
+ GA_NXT_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD,
@@ -265,7 +267,8 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
/* Issue GID_PT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GID_PT_REQ_SIZE, GID_PT_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE,
+ GID_PT_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
@@ -337,7 +340,7 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
GPN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -398,7 +401,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
GNN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -472,7 +475,8 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
/* Issue RFT_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFT_ID_REQ_SIZE, RFT_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE,
+ RFT_ID_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD,
@@ -527,7 +531,8 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
/* Issue RFF_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE,
+ RFF_ID_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD,
@@ -581,7 +586,8 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
/* Issue RNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RNN_ID_REQ_SIZE, RNN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE,
+ RNN_ID_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD,
@@ -644,7 +650,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
/* Issue RSNN_NN */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -1101,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
if (ha->flags.management_server_logged_in)
return ret;
- ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
+ ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
@@ -1186,6 +1192,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = ha->vp_idx;
return ct_pkt;
}
@@ -1196,7 +1203,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
} else {
@@ -1251,7 +1258,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
/* Issue RHBA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1371,7 +1378,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops.fw_version_str(ha, eiter->a.fw_version);
+ ha->isp_ops->fw_version_str(ha, eiter->a.fw_version);
alen = strlen(eiter->a.fw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
@@ -1437,7 +1444,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
/* Issue RPA */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
/* Prepare CT request */
@@ -1495,7 +1502,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
/* Issue RPA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1525,12 +1532,20 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(4);
+ if (IS_QLA25XX(ha))
+ eiter->a.sup_speed = __constant_cpu_to_be32(
+ FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
+ else if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ eiter->a.sup_speed = __constant_cpu_to_be32(
+ FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
+ FDMI_PORT_SPEED_4GB);
else if (IS_QLA23XX(ha))
- eiter->a.sup_speed = __constant_cpu_to_be32(2);
+ eiter->a.sup_speed =__constant_cpu_to_be32(
+ FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
else
- eiter->a.sup_speed = __constant_cpu_to_be32(1);
+ eiter->a.sup_speed = __constant_cpu_to_be32(
+ FDMI_PORT_SPEED_1GB);
size += 4 + 4;
DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
@@ -1541,14 +1556,25 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
switch (ha->link_data_rate) {
- case 0:
- eiter->a.cur_speed = __constant_cpu_to_be32(1);
+ case PORT_SPEED_1GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
+ break;
+ case PORT_SPEED_2GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
+ break;
+ case PORT_SPEED_4GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
break;
- case 1:
- eiter->a.cur_speed = __constant_cpu_to_be32(2);
+ case PORT_SPEED_8GB:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
break;
- case 3:
- eiter->a.cur_speed = __constant_cpu_to_be32(4);
+ default:
+ eiter->a.cur_speed =
+ __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
break;
}
size += 4 + 4;
@@ -1560,7 +1586,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = __constant_cpu_to_be16(4 + 4);
- max_frame_size = IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
+ max_frame_size = IS_FWI2_CAPABLE(ha) ?
(uint32_t) icb24->frame_payload_size:
(uint32_t) ha->init_cb->frame_payload_size;
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
@@ -1676,7 +1702,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
@@ -1684,7 +1710,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
memset(list[i].fabric_port_name, 0, WWN_SIZE);
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
GFPN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -1746,6 +1772,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
+ ct_pkt->vp_index = ha->vp_idx;
return ct_pkt;
}
@@ -1783,7 +1810,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
if (!ha->flags.gpsc_supported)
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 2a45aec4ff29..5ec798c2bf13 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -79,20 +79,20 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
- rval = ha->isp_ops.pci_config(ha);
+ rval = ha->isp_ops->pci_config(ha);
if (rval) {
DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
ha->host_no));
return (rval);
}
- ha->isp_ops.reset_chip(ha);
+ ha->isp_ops->reset_chip(ha);
- ha->isp_ops.get_flash_version(ha, ha->request_ring);
+ ha->isp_ops->get_flash_version(ha, ha->request_ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
- ha->isp_ops.nvram_config(ha);
+ ha->isp_ops->nvram_config(ha);
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
@@ -108,7 +108,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
- rval = ha->isp_ops.chip_diag(ha);
+ rval = ha->isp_ops->chip_diag(ha);
if (rval)
return (rval);
rval = qla2x00_setup_chip(ha);
@@ -129,14 +129,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
int
qla2100_pci_config(scsi_qla_host_t *ha)
{
- int ret;
uint16_t w;
uint32_t d;
unsigned long flags;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
- ret = pci_set_mwi(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -164,7 +163,6 @@ qla2100_pci_config(scsi_qla_host_t *ha)
int
qla2300_pci_config(scsi_qla_host_t *ha)
{
- int ret;
uint16_t w;
uint32_t d;
unsigned long flags = 0;
@@ -172,7 +170,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
- ret = pci_set_mwi(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -250,15 +248,13 @@ qla2300_pci_config(scsi_qla_host_t *ha)
int
qla24xx_pci_config(scsi_qla_host_t *ha)
{
- int ret;
uint16_t w;
uint32_t d;
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- int pcix_cmd_reg, pcie_dctl_reg;
pci_set_master(ha->pdev);
- ret = pci_set_mwi(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
@@ -268,35 +264,19 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
/* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
- pcix_cmd_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX);
- if (pcix_cmd_reg) {
- uint16_t pcix_cmd;
-
- pcix_cmd_reg += PCI_X_CMD;
- pci_read_config_word(ha->pdev, pcix_cmd_reg, &pcix_cmd);
- pcix_cmd &= ~PCI_X_CMD_MAX_READ;
- pcix_cmd |= 0x0008;
- pci_write_config_word(ha->pdev, pcix_cmd_reg, pcix_cmd);
- }
+ if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
+ pcix_set_mmrbc(ha->pdev, 2048);
/* PCIe -- adjust Maximum Read Request Size (2048). */
- pcie_dctl_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
- if (pcie_dctl_reg) {
- uint16_t pcie_dctl;
-
- pcie_dctl_reg += PCI_EXP_DEVCTL;
- pci_read_config_word(ha->pdev, pcie_dctl_reg, &pcie_dctl);
- pcie_dctl &= ~PCI_EXP_DEVCTL_READRQ;
- pcie_dctl |= 0x4000;
- pci_write_config_word(ha->pdev, pcie_dctl_reg, pcie_dctl);
- }
+ if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ pcie_set_readrq(ha->pdev, 2048);
/* Reset expansion ROM address decode enable */
pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
d &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
- pci_read_config_word(ha->pdev, PCI_REVISION_ID, &ha->chip_revision);
+ ha->chip_revision = ha->pdev->revision;
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -307,6 +287,40 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
}
/**
+ * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qla25xx_pci_config(scsi_qla_host_t *ha)
+{
+ uint16_t w;
+ uint32_t d;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ /* PCIe -- adjust Maximum Read Request Size (2048). */
+ if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ pcie_set_readrq(ha->pdev, 2048);
+
+ /* Reset expansion ROM address decode enable */
+ pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
+ d &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
+
+ ha->chip_revision = ha->pdev->revision;
+
+ return QLA_SUCCESS;
+}
+
+/**
* qla2x00_isp_firmware() - Choose firmware image.
* @ha: HA context
*
@@ -351,7 +365,7 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
uint32_t cnt;
uint16_t cmd;
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -551,7 +565,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
void
qla24xx_reset_chip(scsi_qla_host_t *ha)
{
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
/* Perform RISC reset. */
qla24xx_reset_risc(ha);
@@ -736,8 +750,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
- } else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
- fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ fixed_size = IS_QLA25XX(ha) ?
+ offsetof(struct qla25xx_fw_dump, ext_mem):
+ offsetof(struct qla24xx_fw_dump, ext_mem);
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
@@ -879,7 +895,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
uint32_t srisc_address = 0;
/* Load firmware sequences */
- rval = ha->isp_ops.load_risc(ha, &srisc_address);
+ rval = ha->isp_ops->load_risc(ha, &srisc_address);
if (rval == QLA_SUCCESS) {
DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
"code.\n", ha->host_no));
@@ -899,6 +915,10 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
&ha->fw_subminor_version,
&ha->fw_attributes, &ha->fw_memory_size);
qla2x00_resize_request_q(ha);
+ ha->flags.npiv_supported = 0;
+ if (IS_QLA24XX(ha) &&
+ (ha->fw_attributes & BIT_2))
+ ha->flags.npiv_supported = 1;
if (ql2xallocfwdump)
qla2x00_alloc_fw_dump(ha);
@@ -1101,6 +1121,8 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
int rval;
unsigned long flags = 0;
int cnt;
+ struct mid_init_cb_24xx *mid_init_cb =
+ (struct mid_init_cb_24xx *) ha->init_cb;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1124,14 +1146,18 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
/* Initialize response queue entries */
qla2x00_init_response_q_entries(ha);
- ha->isp_ops.config_rings(ha);
+ ha->isp_ops->config_rings(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Update any ISP specific firmware options before initialization. */
- ha->isp_ops.update_fw_options(ha);
+ ha->isp_ops->update_fw_options(ha);
DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
+
+ mid_init_cb->count = MAX_NUM_VPORT_FABRIC;
+ ha->max_npiv_vports = MAX_NUM_VPORT_FABRIC;
+
rval = qla2x00_init_firmware(ha, ha->init_cb_size);
if (rval) {
DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
@@ -1263,6 +1289,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
int rval;
uint16_t loop_id;
uint16_t topo;
+ uint16_t sw_cap;
uint8_t al_pa;
uint8_t area;
uint8_t domain;
@@ -1270,7 +1297,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
/* Get host addresses. */
rval = qla2x00_get_adapter_id(ha,
- &loop_id, &al_pa, &area, &domain, &topo);
+ &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
if (rval != QLA_SUCCESS) {
if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
@@ -1295,6 +1322,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
/* initialize */
ha->min_external_loopid = SNS_FIRST_LOOP_ID;
ha->operating_mode = LOOP;
+ ha->switch_cap = 0;
switch (topo) {
case 0:
@@ -1307,6 +1335,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
case 1:
DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
ha->host_no));
+ ha->switch_cap = sw_cap;
ha->current_topology = ISP_CFG_FL;
strcpy(connect_type, "(FL_Port)");
break;
@@ -1322,6 +1351,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
case 3:
DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
ha->host_no));
+ ha->switch_cap = sw_cap;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_F;
strcpy(connect_type, "(F_Port)");
@@ -1445,7 +1475,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
- ha->isp_ops.read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
+ ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
chksum += *ptr++;
@@ -1743,7 +1773,6 @@ qla2x00_rport_del(void *data)
spin_unlock_irqrestore(&fcport->rport_lock, flags);
if (rport)
fc_remote_port_delete(rport);
-
}
/**
@@ -1765,6 +1794,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
/* Setup fcport template structure. */
memset(fcport, 0, sizeof (fc_port_t));
fcport->ha = ha;
+ fcport->vp_idx = ha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1911,6 +1941,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
char *id_iter;
uint16_t loop_id;
uint8_t domain, area, al_pa;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
found_devs = 0;
new_fcport = NULL;
@@ -1942,7 +1973,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/*
* Mark local devices that were present with FCF_DEVICE_LOST for now.
*/
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if (atomic_read(&fcport->state) == FCS_ONLINE &&
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
@@ -1988,6 +2022,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->vp_idx = ha->vp_idx;
rval2 = qla2x00_get_port_database(ha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
@@ -2003,7 +2038,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/* Check for matching device in port list. */
found = 0;
fcport = NULL;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if (memcmp(new_fcport->port_name, fcport->port_name,
WWN_SIZE))
continue;
@@ -2023,7 +2061,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
if (!found) {
/* New device, add to fcports list. */
new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
- list_add_tail(&new_fcport->list, &ha->fcports);
+ if (ha->parent) {
+ new_fcport->ha = ha;
+ new_fcport->vp_idx = ha->vp_idx;
+ list_add_tail(&new_fcport->vp_fcport,
+ &ha->vp_fcports);
+ }
+ list_add_tail(&new_fcport->list, &pha->fcports);
/* Allocate a new replacement fcport. */
fcport = new_fcport;
@@ -2091,7 +2135,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
int rval;
uint16_t port_speed, mb[6];
- if (!IS_QLA24XX(ha))
+ if (!IS_IIDMA_CAPABLE(ha))
return;
switch (be16_to_cpu(fcport->fp_speed)) {
@@ -2199,11 +2243,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
{
+ scsi_qla_host_t *pha = to_qla_parent(ha);
+
fcport->ha = ha;
fcport->login_retry = 0;
- fcport->port_login_retry_count = ha->port_down_retry_count *
+ fcport->port_login_retry_count = pha->port_down_retry_count *
PORT_RETRY_TIME;
- atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
+ atomic_set(&fcport->port_down_timer, pha->port_down_retry_count *
PORT_RETRY_TIME);
fcport->flags &= ~FCF_LOGIN_NEEDED;
@@ -2234,9 +2280,10 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
LIST_HEAD(new_fcports);
+ scsi_qla_host_t *pha = to_qla_parent(ha);
/* If FL port exists, then SNS is present */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
loop_id = NPH_F_PORT;
else
loop_id = SNS_FL_PORT;
@@ -2263,11 +2310,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
qla2x00_fdmi_register(ha);
/* Ensure we are logged into the SNS. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
loop_id = NPH_SNS;
else
loop_id = SIMPLE_NAME_SERVER;
- ha->isp_ops.fabric_login(ha, loop_id, 0xff, 0xff,
+ ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff,
0xfc, mb, BIT_1 | BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2307,7 +2354,10 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
* Logout all previous fabric devices marked lost, except
* tape devices.
*/
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx !=ha->vp_idx)
+ continue;
+
if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
break;
@@ -2321,7 +2371,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
(fcport->flags & FCF_TAPE_PRESENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops.fabric_logout(ha,
+ ha->isp_ops->fabric_logout(ha,
fcport->loop_id,
fcport->d_id.b.domain,
fcport->d_id.b.area,
@@ -2332,13 +2382,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
}
/* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
+ next_loopid = pha->min_external_loopid;
/*
* Scan through our port list and login entries that need to be
* logged in.
*/
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if (atomic_read(&ha->loop_down_timer) ||
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
break;
@@ -2380,11 +2433,18 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
break;
}
- /* Remove device from the new list and add it to DB */
- list_move_tail(&fcport->list, &ha->fcports);
-
/* Login and update database */
qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
+
+ if (ha->parent) {
+ fcport->ha = ha;
+ fcport->vp_idx = ha->vp_idx;
+ list_add_tail(&fcport->vp_fcport,
+ &ha->vp_fcports);
+ list_move_tail(&fcport->list,
+ &ha->parent->fcports);
+ } else
+ list_move_tail(&fcport->list, &ha->fcports);
}
} while (0);
@@ -2428,6 +2488,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
int swl_idx;
int first_dev, last_dev;
port_id_t wrap, nxt_d_id;
+ int vp_index;
+ int empty_vp_index;
+ int found_vp;
+ scsi_qla_host_t *vha;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_SUCCESS;
@@ -2461,13 +2526,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
-
+ new_fcport->vp_idx = ha->vp_idx;
/* Set start port ID scan at adapter ID. */
first_dev = 1;
last_dev = 0;
/* Starting free loop ID. */
- loop_id = ha->min_external_loopid;
+ loop_id = pha->min_external_loopid;
for (; loop_id <= ha->last_loop_id; loop_id++) {
if (qla2x00_is_reserved_id(ha, loop_id))
continue;
@@ -2521,10 +2586,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
break;
}
- /* Bypass if host adapter. */
- if (new_fcport->d_id.b24 == ha->d_id.b24)
+ /* Bypass if same physical adapter. */
+ if (new_fcport->d_id.b24 == pha->d_id.b24)
continue;
+ /* Bypass virtual ports of the same host. */
+ if (pha->num_vhosts) {
+ vp_index = find_next_bit(
+ (unsigned long *)pha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, 1);
+
+ for (;vp_index <= MAX_MULTI_ID_FABRIC;
+ vp_index = find_next_bit(
+ (unsigned long *)pha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, vp_index + 1)) {
+ empty_vp_index = 1;
+ found_vp = 0;
+ list_for_each_entry(vha, &pha->vp_list,
+ vp_list) {
+ if (vp_index == vha->vp_idx) {
+ empty_vp_index = 0;
+ found_vp = 1;
+ break;
+ }
+ }
+
+ if (empty_vp_index)
+ continue;
+
+ if (found_vp &&
+ new_fcport->d_id.b24 == vha->d_id.b24)
+ break;
+ }
+ if (vp_index <= MAX_MULTI_ID_FABRIC)
+ continue;
+ }
+
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
(ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
@@ -2537,7 +2634,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
/* Locate matching device in database. */
found = 0;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (new_fcport->vp_idx != fcport->vp_idx)
+ continue;
if (memcmp(new_fcport->port_name, fcport->port_name,
WWN_SIZE))
continue;
@@ -2581,7 +2680,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
(fcport->flags & FCF_TAPE_PRESENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops.fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
@@ -2605,6 +2704,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
new_fcport->d_id.b24 = nxt_d_id.b24;
+ new_fcport->vp_idx = ha->vp_idx;
}
kfree(swl);
@@ -2637,6 +2737,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
int found;
fc_port_t *fcport;
uint16_t first_loop_id;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_SUCCESS;
@@ -2663,7 +2764,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
/* Check for loop ID being already in use. */
found = 0;
fcport = NULL;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
if (fcport->loop_id == dev->loop_id && fcport != dev) {
/* ID possibly in use */
found++;
@@ -2710,6 +2811,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
uint8_t rscn_out_iter;
uint8_t format;
port_id_t d_id;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_RSCNS_HANDLED;
@@ -2776,7 +2878,10 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
rval = QLA_SUCCESS;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != ha->vp_idx)
+ continue;
+
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
(fcport->d_id.b24 & mask) != d_id.b24 ||
fcport->port_type == FCT_BROADCAST)
@@ -2830,7 +2935,7 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
opts |= BIT_1;
rval = qla2x00_get_port_database(ha, fcport, opts);
if (rval != QLA_SUCCESS) {
- ha->isp_ops.fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
qla2x00_mark_device_lost(ha, fcport, 1, 0);
@@ -2875,7 +2980,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
fcport->d_id.b.area, fcport->d_id.b.al_pa));
/* Login fcport on switch. */
- ha->isp_ops.fabric_login(ha, fcport->loop_id,
+ ha->isp_ops->fabric_login(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb, BIT_0);
if (mb[0] == MBS_PORT_ID_USED) {
@@ -2943,7 +3048,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
* dead.
*/
*next_loopid = fcport->loop_id;
- ha->isp_ops.fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
qla2x00_mark_device_lost(ha, fcport, 1, 0);
@@ -2961,7 +3066,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
*next_loopid = fcport->loop_id;
- ha->isp_ops.fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
@@ -3117,7 +3222,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
- ha->isp_ops.reset_chip(ha);
+ ha->isp_ops->reset_chip(ha);
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
@@ -3143,9 +3248,9 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ha->isp_ops.get_flash_version(ha, ha->request_ring);
+ ha->isp_ops->get_flash_version(ha, ha->request_ring);
- ha->isp_ops.nvram_config(ha);
+ ha->isp_ops->nvram_config(ha);
if (!qla2x00_restart_isp(ha)) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
@@ -3160,7 +3265,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->flags.online = 1;
- ha->isp_ops.enable_intrs(ha);
+ ha->isp_ops->enable_intrs(ha);
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
@@ -3185,7 +3290,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
* The next call disables the board
* completely.
*/
- ha->isp_ops.reset_adapter(ha);
+ ha->isp_ops->reset_adapter(ha);
ha->flags.online = 0;
clear_bit(ISP_ABORT_RETRY,
&ha->dpc_flags);
@@ -3242,7 +3347,7 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(ha)) {
ha->flags.online = 0;
- if (!(status = ha->isp_ops.chip_diag(ha))) {
+ if (!(status = ha->isp_ops->chip_diag(ha))) {
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
status = qla2x00_setup_chip(ha);
goto done;
@@ -3334,7 +3439,7 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
ha->flags.online = 0;
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -3351,7 +3456,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ha->flags.online = 0;
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
@@ -3409,7 +3514,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
/* Get NVRAM data and calculate checksum. */
dptr = (uint32_t *)nv;
- ha->isp_ops.read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
+ ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
@@ -3923,7 +4028,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
{
int ret, retries;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_FWI2_CAPABLE(ha))
return;
if (!ha->fw_major_version)
return;
@@ -3940,3 +4045,40 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
ret = qla2x00_stop_firmware(ha);
}
}
+
+int
+qla24xx_configure_vhba(scsi_qla_host_t *ha)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t mb[MAILBOX_REGISTER_COUNT];
+
+ if (!ha->parent)
+ return -EINVAL;
+
+ rval = qla2x00_fw_ready(ha);
+ if (rval == QLA_SUCCESS) {
+ clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+ qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+ }
+
+ ha->flags.management_server_logged_in = 0;
+
+ /* Login to SNS first */
+ qla24xx_login_fabric(ha, NPH_SNS, 0xff, 0xff, 0xfc,
+ mb, BIT_1);
+ if (mb[0] != MBS_COMMAND_COMPLETE) {
+ DEBUG15(qla_printk(KERN_INFO, ha,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
+ "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
+ mb[0], mb[1], mb[2], mb[6], mb[7]));
+ return (QLA_FUNCTION_FAILED);
+ }
+
+ atomic_set(&ha->loop_down_timer, 0);
+ atomic_set(&ha->loop_state, LOOP_UP);
+ set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+ rval = qla2x00_loop_resync(ha);
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index d3023338628f..8e3b04464cff 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -104,7 +104,7 @@ static __inline__ void qla2x00_poll(scsi_qla_host_t *);
static inline void
qla2x00_poll(scsi_qla_host_t *ha)
{
- ha->isp_ops.intr_handler(0, ha);
+ ha->isp_ops->intr_handler(0, ha);
}
static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *);
@@ -163,7 +163,7 @@ static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t);
static inline int
qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
{
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
return (loop_id > NPH_LAST_HANDLE);
return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5b3c610a32a..3a5e78cb6b3f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -155,6 +155,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
uint32_t *cur_dsd;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = sp->cmd;
@@ -163,7 +165,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
__constant_cpu_to_le32(COMMAND_TYPE);
/* No data transfer */
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
@@ -177,35 +179,23 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
- if (cmd->use_sg != 0) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- cont_entry_t *cont_pkt;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- /*
- * Seven DSDs are available in the Continuation
- * Type 0 IOCB.
- */
- cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
- cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
- avail_dsds = 7;
- }
-
- *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_seg++;
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ cont_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Seven DSDs are available in the Continuation
+ * Type 0 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
+ cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
+ avail_dsds = 7;
}
- } else {
- *cur_dsd++ = cpu_to_le32(sp->dma_handle);
- *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
+
+ *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
}
}
@@ -224,6 +214,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
uint32_t *cur_dsd;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = sp->cmd;
@@ -232,7 +224,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
__constant_cpu_to_le32(COMMAND_A64_TYPE);
/* No data transfer */
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
@@ -246,39 +238,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
- if (cmd->use_sg != 0) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- dma_addr_t sle_dma;
- cont_a64_entry_t *cont_pkt;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- /*
- * Five DSDs are available in the Continuation
- * Type 1 IOCB.
- */
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
- }
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_seg++;
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
}
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
}
}
@@ -291,7 +270,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
int
qla2x00_start_scsi(srb_t *sp)
{
- int ret;
+ int ret, nseg;
unsigned long flags;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
@@ -299,7 +278,6 @@ qla2x00_start_scsi(srb_t *sp)
uint32_t index;
uint32_t handle;
cmd_entry_t *cmd_pkt;
- struct scatterlist *sg;
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
@@ -337,26 +315,18 @@ qla2x00_start_scsi(srb_t *sp)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
- if (cmd->use_sg) {
- sg = (struct scatterlist *) cmd->request_buffer;
- tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- if (tot_dsds == 0)
- goto queuing_error;
- } else if (cmd->request_bufflen) {
- dma_addr_t req_dma;
-
- req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
- cmd->request_bufflen, cmd->sc_data_direction);
- if (dma_mapping_error(req_dma))
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
goto queuing_error;
+ } else
+ nseg = 0;
- sp->dma_handle = req_dma;
- tot_dsds = 1;
- }
+ tot_dsds = nseg;
/* Calculate the number of request entries needed. */
- req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
+ req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (ha->req_q_cnt < (req_cnt + 2)) {
cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
if (ha->req_ring_index < cnt)
@@ -391,10 +361,10 @@ qla2x00_start_scsi(srb_t *sp)
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
- ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
+ ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
@@ -423,14 +393,9 @@ qla2x00_start_scsi(srb_t *sp)
return (QLA_SUCCESS);
queuing_error:
- if (cmd->use_sg && tot_dsds) {
- sg = (struct scatterlist *) cmd->request_buffer;
- pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (tot_dsds) {
- pci_unmap_single(ha->pdev, sp->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return (QLA_FUNCTION_FAILED);
@@ -453,9 +418,10 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
mrk24 = NULL;
- mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
+ mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
if (mrk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
__func__, ha->host_no));
@@ -466,12 +432,13 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
+ mrk24->vp_index = ha->vp_idx;
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
@@ -479,7 +446,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
}
wmb();
- qla2x00_isp_cmd(ha);
+ qla2x00_isp_cmd(pha);
return (QLA_SUCCESS);
}
@@ -520,7 +487,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
for (timer = HZ; timer; timer--) {
if ((req_cnt + 2) >= ha->req_q_cnt) {
/* Calculate number of free request entries. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
cnt = (uint16_t)RD_REG_DWORD(
&reg->isp24.req_q_out);
else
@@ -594,7 +561,7 @@ qla2x00_isp_cmd(scsi_qla_host_t *ha)
ha->request_ring_ptr++;
/* Set chip new ring index. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
} else {
@@ -642,6 +609,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
uint32_t *cur_dsd;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = sp->cmd;
@@ -650,7 +619,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
__constant_cpu_to_le32(COMMAND_TYPE_7);
/* No data transfer */
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return;
}
@@ -670,39 +639,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
/* Load data segments */
- if (cmd->use_sg != 0) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- dma_addr_t sle_dma;
- cont_a64_entry_t *cont_pkt;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- /*
- * Five DSDs are available in the Continuation
- * Type 1 IOCB.
- */
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
- cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
- avail_dsds = 5;
- }
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_seg++;
+
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
}
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
- *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
}
}
@@ -716,7 +673,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
int
qla24xx_start_scsi(srb_t *sp)
{
- int ret;
+ int ret, nseg;
unsigned long flags;
scsi_qla_host_t *ha;
struct scsi_cmnd *cmd;
@@ -724,7 +681,6 @@ qla24xx_start_scsi(srb_t *sp)
uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
- struct scatterlist *sg;
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
@@ -762,23 +718,15 @@ qla24xx_start_scsi(srb_t *sp)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
- if (cmd->use_sg) {
- sg = (struct scatterlist *) cmd->request_buffer;
- tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- if (tot_dsds == 0)
- goto queuing_error;
- } else if (cmd->request_bufflen) {
- dma_addr_t req_dma;
-
- req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
- cmd->request_bufflen, cmd->sc_data_direction);
- if (dma_mapping_error(req_dma))
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
goto queuing_error;
+ } else
+ nseg = 0;
- sp->dma_handle = req_dma;
- tot_dsds = 1;
- }
+ tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(tot_dsds);
if (ha->req_q_cnt < (req_cnt + 2)) {
@@ -813,6 +761,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -821,7 +770,7 @@ qla24xx_start_scsi(srb_t *sp)
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Build IOCB segments */
qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
@@ -853,14 +802,9 @@ qla24xx_start_scsi(srb_t *sp)
return QLA_SUCCESS;
queuing_error:
- if (cmd->use_sg && tot_dsds) {
- sg = (struct scatterlist *) cmd->request_buffer;
- pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (tot_dsds) {
- pci_unmap_single(ha->pdev, sp->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ca463469063d..b8f226ae2633 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,7 +9,6 @@
#include <scsi/scsi_tcq.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, void *);
static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
@@ -144,7 +143,7 @@ qla2300_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr);
- ha->isp_ops.fw_dump(ha, 1);
+ ha->isp_ops->fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
@@ -244,11 +243,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
* @ha: SCSI driver HA context
* @mb: Mailbox registers (0 - 3)
*/
-static void
+void
qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
{
#define LS_UNKNOWN 2
- static char *link_speeds[5] = { "1", "2", "?", "4", "10" };
+ static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
char *link_speed;
uint16_t handle_cnt;
uint16_t cnt;
@@ -335,9 +334,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
mb[1], mb[2], mb[3]);
- ha->isp_ops.fw_dump(ha, 1);
+ ha->isp_ops->fw_dump(ha, 1);
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
qla_printk(KERN_ERR, ha,
"Unrecoverable Hardware Error: adapter "
@@ -386,6 +385,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
ha->flags.management_server_logged_in = 0;
@@ -422,6 +426,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
ha->flags.management_server_logged_in = 0;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
if (ql2xfdmienable)
@@ -440,6 +449,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
ha->operating_mode = LOOP;
@@ -465,6 +479,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
}
@@ -491,6 +510,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
qla2x00_mark_all_devices_lost(ha, 1);
}
+ if (ha->parent) {
+ atomic_set(&ha->vp_state, VP_FAILED);
+ fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ }
+
set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
break;
@@ -530,6 +554,10 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
+ /* Check if the Vport has issued a SCR */
+ if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
+ break;
+
DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
ha->host_no));
DEBUG(printk(KERN_INFO
@@ -573,7 +601,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"scsi(%ld): [R|Z]IO update completion.\n",
ha->host_no));
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
qla24xx_process_response_queue(ha);
else
qla2x00_process_response_queue(ha);
@@ -589,6 +617,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
ha->host_no, mb[1], mb[2]));
break;
}
+
+ if (!ha->parent && ha->num_vhosts)
+ qla2x00_alert_all_vps(ha, mb);
}
static void
@@ -792,7 +823,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
} else {
@@ -841,7 +872,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
fcport = sp->fcport;
sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
sense_len = le32_to_cpu(sts24->sense_len);
rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
resid_len = le32_to_cpu(sts24->rsp_residual_count);
@@ -860,7 +891,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
/* Check for any FCP transport errors. */
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
/* Sense data lies beyond any FCP RESPONSE data. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
sense_data += rsp_info_len;
if (rsp_info_len > 3 && rsp_info[3]) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
@@ -889,19 +920,19 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
}
if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
resid = resid_len;
- cp->resid = resid;
+ scsi_set_resid(cp, resid);
CMD_RESID_LEN(cp) = resid;
if (!lscsi_status &&
- ((unsigned)(cp->request_bufflen - resid) <
+ ((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- cp->request_bufflen);
+ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+ "detected (%x of %x bytes)...returning "
+ "error status.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -959,11 +990,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
case CS_DATA_UNDERRUN:
resid = resid_len;
/* Use F/W calculated residual length. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
resid = fw_resid_len;
if (scsi_status & SS_RESIDUAL_UNDER) {
- cp->resid = resid;
+ scsi_set_resid(cp, resid);
CMD_RESID_LEN(cp) = resid;
} else {
DEBUG2(printk(KERN_INFO
@@ -1031,6 +1062,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
cp->device->id, cp->device->lun, cp,
cp->serial_number));
+ /*
+ * In case of a Underrun condition, set both the lscsi
+ * status and the completion status to appropriate
+ * values.
+ */
+ if (resid &&
+ ((unsigned)(cp->request_bufflen - resid) <
+ cp->underflow)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+ "detected (%x of %x bytes)...returning "
+ "error status.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ cp->request_bufflen));
+
+ cp->result = DID_ERROR << 16 | lscsi_status;
+ }
+
if (sense_len)
DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
CMD_ACTUAL_SNSLEN(cp)));
@@ -1042,26 +1092,26 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
*/
if (!(scsi_status & SS_RESIDUAL_UNDER)) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
- "frame(s) detected (%x of %x bytes)..."
- "retrying command.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- cp->request_bufflen));
+ "frame(s) detected (%x of %x bytes)..."
+ "retrying command.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ scsi_bufflen(cp)));
cp->result = DID_BUS_BUSY << 16;
break;
}
/* Handle mid-layer underflow */
- if ((unsigned)(cp->request_bufflen - resid) <
+ if ((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow) {
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Mid-layer underflow "
- "detected (%x of %x bytes)...returning "
- "error status.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- cp->request_bufflen);
+ "scsi(%ld:%d:%d:%d): Mid-layer underflow "
+ "detected (%x of %x bytes)...returning "
+ "error status.\n", ha->host_no,
+ cp->device->channel, cp->device->id,
+ cp->device->lun, resid,
+ scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -1084,7 +1134,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
DEBUG2(printk(KERN_INFO
"PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
"status!\n",
- cp->serial_number, cp->request_bufflen, resid_len));
+ cp->serial_number, scsi_bufflen(cp), resid_len));
cp->result = DID_ERROR << 16;
break;
@@ -1135,7 +1185,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
case CS_TIMEOUT:
cp->result = DID_BUS_BUSY << 16;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
DEBUG2(printk(KERN_INFO
"scsi(%ld:%d:%d:%d): TIMEOUT status detected "
"0x%x-0x%x\n", ha->host_no, cp->device->channel,
@@ -1204,7 +1254,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
}
/* Move sense data. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
host_to_fcp_swap(pkt->data, sizeof(pkt->data));
memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
@@ -1393,6 +1443,10 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
case MS_IOCB_TYPE:
qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
break;
+ case VP_RPT_ID_IOCB_TYPE:
+ qla24xx_report_id_acquisition(ha,
+ (struct vp_rpt_id_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@@ -1448,7 +1502,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
- ha->isp_ops.fw_dump(ha, 1);
+ ha->isp_ops->fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -1582,7 +1636,7 @@ qla24xx_msix_default(int irq, void *dev_id)
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
- ha->isp_ops.fw_dump(ha, 1);
+ ha->isp_ops->fw_dump(ha, 1);
set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -1633,7 +1687,7 @@ struct qla_init_msix_entry {
uint16_t entry;
uint16_t index;
const char *name;
- irqreturn_t (*handler)(int, void *);
+ irq_handler_t handler;
};
static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
@@ -1704,11 +1758,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
int ret;
/* If possible, enable MSI-X. */
- if (!IS_QLA2432(ha))
+ if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
goto skip_msix;
- if (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
- !QLA_MSIX_FW_MODE_1(ha->fw_attributes)) {
+ if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
+ !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
ha->chip_revision, ha->fw_attributes));
@@ -1727,7 +1781,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
skip_msix:
- if (!IS_QLA24XX(ha))
+ if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
@@ -1737,7 +1791,7 @@ skip_msix:
}
skip_msi:
- ret = request_irq(ha->pdev->irq, ha->isp_ops.intr_handler,
+ ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
if (!ret) {
ha->flags.inta_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 71e32a248528..d3746ec80a85 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -42,25 +42,29 @@ qla2x00_mbx_sem_timeout(unsigned long data)
* Kernel context.
*/
static int
-qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
+qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
{
int rval;
unsigned long flags = 0;
- device_reg_t __iomem *reg = ha->iobase;
+ device_reg_t __iomem *reg;
struct timer_list tmp_intr_timer;
uint8_t abort_active;
- uint8_t io_lock_on = ha->flags.init_done;
+ uint8_t io_lock_on;
uint16_t command;
uint16_t *iptr;
uint16_t __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
+ scsi_qla_host_t *ha = to_qla_parent(pvha);
+
+ reg = ha->iobase;
+ io_lock_on = ha->flags.init_done;
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no));
/*
* Wait for active mailbox commands to finish by waiting at most tov
@@ -86,7 +90,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -150,7 +154,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -171,7 +175,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
ha->host_no, command));
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -224,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
uint16_t mb0;
uint32_t ictrl;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
} else {
@@ -318,7 +322,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- if (MSW(risc_addr) || IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
mcp->mb[8] = MSW(risc_addr);
mcp->out_mb = MBX_8|MBX_0;
@@ -332,7 +336,7 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
mcp->mb[6] = MSW(MSD(req_dma));
mcp->mb[7] = LSW(MSD(req_dma));
mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[4] = MSW(risc_code_size);
mcp->mb[5] = LSW(risc_code_size);
mcp->out_mb |= MBX_5|MBX_4;
@@ -383,7 +387,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
@@ -406,7 +410,7 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
ha->host_no, rval, mcp->mb[0]));
} else {
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
__func__, ha->host_no, mcp->mb[1]));
} else {
@@ -547,7 +551,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
mcp->mb[3] = fwopts[3];
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->in_mb |= MBX_1;
} else {
mcp->mb[10] = fwopts[10];
@@ -660,7 +664,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->out_mb |= MBX_2|MBX_1;
@@ -677,8 +681,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
- ha->host_no, rval, (IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
- (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])));
+ ha->host_no, rval, IS_FWI2_CAPABLE(ha) ?
+ (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
@@ -735,7 +739,7 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void* buffer, dma_addr_t phys_addr,
/* Mask reserved bits. */
sts_entry->entry_status &=
- IS_QLA24XX(ha) || IS_QLA54XX(ha) ? RF_MASK_24XX :RF_MASK;
+ IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK;
}
return rval;
@@ -889,7 +893,7 @@ qla2x00_abort_target(fc_port_t *fcport)
*/
int
qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
- uint8_t *area, uint8_t *domain, uint16_t *top)
+ uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
{
int rval;
mbx_cmd_t mc;
@@ -899,8 +903,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
ha->host_no));
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
+ mcp->mb[9] = ha->vp_idx;
mcp->out_mb = MBX_0;
- mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
@@ -913,6 +918,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
*area = MSB(mcp->mb[2]);
*domain = LSB(mcp->mb[3]);
*top = mcp->mb[6];
+ *sw_cap = mcp->mb[7];
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -1009,7 +1015,11 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
ha->host_no));
- mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+ if (ha->flags.npiv_supported)
+ mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
+ else
+ mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
mcp->mb[2] = MSW(ha->init_cb_dma);
mcp->mb[3] = LSW(ha->init_cb_dma);
mcp->mb[4] = 0;
@@ -1075,15 +1085,16 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mcp->mb[0] = MBC_GET_PORT_DATABASE;
- if (opt != 0 && !IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (opt != 0 && !IS_FWI2_CAPABLE(ha))
mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
mcp->mb[2] = MSW(pd_dma);
mcp->mb[3] = LSW(pd_dma);
mcp->mb[6] = MSW(MSD(pd_dma));
mcp->mb[7] = LSW(MSD(pd_dma));
- mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->mb[9] = ha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = fcport->loop_id;
mcp->mb[10] = opt;
mcp->out_mb |= MBX_10|MBX_1;
@@ -1096,15 +1107,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
mcp->mb[1] = fcport->loop_id << 8 | opt;
mcp->out_mb |= MBX_1;
}
- mcp->buf_size = (IS_QLA24XX(ha) || IS_QLA54XX(ha) ?
- PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE);
+ mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
+ PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
mcp->flags = MBX_DMA_IN;
mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS)
goto gpd_error_out;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
@@ -1259,7 +1270,8 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
ha->host_no));
mcp->mb[0] = MBC_GET_PORT_NAME;
- mcp->out_mb = MBX_1|MBX_0;
+ mcp->mb[9] = ha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_1|MBX_0;
if (HAS_EXTENDED_IDS(ha)) {
mcp->mb[1] = loop_id;
mcp->mb[10] = opt;
@@ -1321,7 +1333,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_6;
mcp->mb[2] = 0;
@@ -1447,6 +1459,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
+ lg->vp_index = cpu_to_le16(ha->vp_idx);
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1624,7 +1637,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
+ if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(ha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb_ret, opt);
@@ -1701,6 +1714,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
+ lg->vp_index = cpu_to_le16(ha->vp_idx);
rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -1807,7 +1821,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
ha->host_no));
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- mcp->mb[1] = IS_QLA24XX(ha) || IS_QLA54XX(ha) ? BIT_3: 0;
+ mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0;
mcp->mb[2] = 0;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1857,13 +1871,14 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
mcp->mb[0] = MBC_GET_ID_LIST;
mcp->out_mb = MBX_0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[2] = MSW(id_list_dma);
mcp->mb[3] = LSW(id_list_dma);
mcp->mb[6] = MSW(MSD(id_list_dma));
mcp->mb[7] = LSW(MSD(id_list_dma));
mcp->mb[8] = 0;
- mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
+ mcp->mb[9] = ha->vp_idx;
+ mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
} else {
mcp->mb[1] = MSW(id_list_dma);
mcp->mb[2] = LSW(id_list_dma);
@@ -2048,7 +2063,7 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
mcp->mb[7] = LSW(MSD(stat_buf_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
mcp->mb[1] = loop_id;
mcp->mb[4] = 0;
mcp->mb[10] = 0;
@@ -2212,6 +2227,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
+ abt->vp_index = fcport->vp_idx;
rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
@@ -2249,7 +2265,7 @@ qla24xx_abort_target(fc_port_t *fcport)
int rval;
struct tsk_mgmt_cmd *tsk;
dma_addr_t tsk_dma;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *ha, *pha;
if (fcport == NULL)
return 0;
@@ -2257,7 +2273,8 @@ qla24xx_abort_target(fc_port_t *fcport)
DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
ha = fcport->ha;
- tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+ pha = to_qla_parent(ha);
+ tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
"IOCB.\n", __func__, ha->host_no));
@@ -2273,6 +2290,8 @@ qla24xx_abort_target(fc_port_t *fcport)
tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
+ tsk->p.tsk.vp_index = fcport->vp_idx;
+
rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB "
@@ -2303,7 +2322,7 @@ qla24xx_abort_target(fc_port_t *fcport)
}
atarget_done:
- dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
+ dma_pool_free(pha->s_dma_pool, tsk, tsk_dma);
return rval;
}
@@ -2315,7 +2334,7 @@ qla2x00_system_error(scsi_qla_host_t *ha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2425,7 +2444,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *ha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2455,7 +2474,7 @@ qla2x00_trace_control(scsi_qla_host_t *ha, uint16_t ctrl, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2495,7 +2514,7 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha))
+ if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2533,7 +2552,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA24XX(ha))
+ if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2576,7 +2595,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA24XX(ha))
+ if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
@@ -2610,3 +2629,354 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
return rval;
}
+
+/*
+ * qla24xx_get_vp_database
+ * Get the VP's database for all configured ports.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * size = size of initialization control block.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_get_vp_database(scsi_qla_host_t *ha, uint16_t size)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ DEBUG11(printk("scsi(%ld):%s - entered.\n",
+ ha->host_no, __func__));
+
+ mcp->mb[0] = MBC_MID_GET_VP_DATABASE;
+ mcp->mb[2] = MSW(ha->init_cb_dma);
+ mcp->mb[3] = LSW(ha->init_cb_dma);
+ mcp->mb[4] = 0;
+ mcp->mb[5] = 0;
+ mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
+ mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ DEBUG2_3_11(printk("%s(%ld): failed=%x "
+ "mb0=%x.\n",
+ __func__, ha->host_no, rval, mcp->mb[0]));
+ } else {
+ /*EMPTY*/
+ DEBUG11(printk("%s(%ld): done.\n",
+ __func__, ha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla24xx_get_vp_entry(scsi_qla_host_t *ha, uint16_t size, int vp_id)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+ mcp->mb[0] = MBC_MID_GET_VP_ENTRY;
+ mcp->mb[2] = MSW(ha->init_cb_dma);
+ mcp->mb[3] = LSW(ha->init_cb_dma);
+ mcp->mb[4] = 0;
+ mcp->mb[5] = 0;
+ mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
+ mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
+ mcp->mb[9] = vp_id;
+ mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = 30;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ /*EMPTY*/
+ DEBUG2_3_11(printk("qla24xx_get_vp_entry(%ld): failed=%x "
+ "mb0=%x.\n",
+ ha->host_no, rval, mcp->mb[0]));
+ } else {
+ /*EMPTY*/
+ DEBUG11(printk("qla24xx_get_vp_entry(%ld): done.\n",
+ ha->host_no));
+ }
+
+ return rval;
+}
+
+void
+qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
+ struct vp_rpt_id_entry_24xx *rptid_entry)
+{
+ uint8_t vp_idx;
+ scsi_qla_host_t *vha;
+
+ if (rptid_entry->entry_status != 0)
+ return;
+ if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
+ return;
+
+ if (rptid_entry->format == 0) {
+ DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
+ " number of VPs acquired %d\n", __func__, ha->host_no,
+ MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
+ DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]));
+ } else if (rptid_entry->format == 1) {
+ vp_idx = LSB(rptid_entry->vp_idx);
+ DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
+ "- status %d - "
+ "with port id %02x%02x%02x\n",__func__,ha->host_no,
+ vp_idx, MSB(rptid_entry->vp_idx),
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]));
+ if (vp_idx == 0)
+ return;
+
+ if (MSB(rptid_entry->vp_idx) == 1)
+ return;
+
+ list_for_each_entry(vha, &ha->vp_list, vp_list)
+ if (vp_idx == vha->vp_idx)
+ break;
+
+ if (!vha)
+ return;
+
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ /*
+ * Cannot configure here as we are still sitting on the
+ * response queue. Handle it in dpc context.
+ */
+ set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
+ set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+
+ wake_up_process(ha->dpc_thread);
+ }
+}
+
+/*
+ * qla24xx_modify_vp_config
+ * Change VP configuration for vha
+ *
+ * Input:
+ * vha = adapter block pointer.
+ *
+ * Returns:
+ * qla2xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_modify_vp_config(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct vp_config_entry_24xx *vpmod;
+ dma_addr_t vpmod_dma;
+ scsi_qla_host_t *pha;
+
+ /* This can be called by the parent */
+ pha = to_qla_parent(vha);
+
+ vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+ if (!vpmod) {
+ DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
+ "IOCB.\n", __func__, pha->host_no));
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
+ vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
+ vpmod->entry_count = 1;
+ vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
+ vpmod->vp_count = 1;
+ vpmod->vp_index1 = vha->vp_idx;
+ vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+ memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
+ memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
+ vpmod->entry_count = 1;
+
+ rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
+ "(%x).\n", __func__, pha->host_no, rval));
+ } else if (vpmod->comp_status != 0) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- error status (%x).\n", __func__, pha->host_no,
+ vpmod->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- completion status (%x).\n", __func__, pha->host_no,
+ le16_to_cpu(vpmod->comp_status)));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ /* EMPTY */
+ DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no));
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
+ }
+ dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma);
+
+ return rval;
+}
+
+/*
+ * qla24xx_control_vp
+ * Enable a virtual port for given host
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * vhba = virtual adapter (unused)
+ * index = index number for enabled VP
+ *
+ * Returns:
+ * qla2xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval;
+ int map, pos;
+ struct vp_ctrl_entry_24xx *vce;
+ dma_addr_t vce_dma;
+ scsi_qla_host_t *ha = vha->parent;
+ int vp_index = vha->vp_idx;
+
+ DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
+ ha->host_no, vp_index));
+
+ if (vp_index == 0 || vp_index >= MAX_MULTI_ID_LOOP)
+ return QLA_PARAMETER_ERROR;
+
+ vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
+ if (!vce) {
+ DEBUG2_3(printk("%s(%ld): "
+ "failed to allocate VP Control IOCB.\n", __func__,
+ ha->host_no));
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+ memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(cmd);
+ vce->vp_count = __constant_cpu_to_le16(1);
+
+ /* index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (vp_index - 1) / 8;
+ pos = (vp_index - 1) & 7;
+ down(&ha->vport_sem);
+ vce->vp_idx_map[map] |= 1 << pos;
+ up(&ha->vport_sem);
+
+ rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
+ "(%x).\n", __func__, ha->host_no, rval));
+ printk("%s(%ld): failed to issue VP control IOCB"
+ "(%x).\n", __func__, ha->host_no, rval);
+ } else if (vce->entry_status != 0) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- error status (%x).\n", __func__, ha->host_no,
+ vce->entry_status));
+ printk("%s(%ld): failed to complete IOCB "
+ "-- error status (%x).\n", __func__, ha->host_no,
+ vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- completion status (%x).\n", __func__, ha->host_no,
+ le16_to_cpu(vce->comp_status)));
+ printk("%s(%ld): failed to complete IOCB "
+ "-- completion status (%x).\n", __func__, ha->host_no,
+ le16_to_cpu(vce->comp_status));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ }
+
+ dma_pool_free(ha->s_dma_pool, vce, vce_dma);
+
+ return rval;
+}
+
+/*
+ * qla2x00_send_change_request
+ * Receive or disable RSCN request from fabric controller
+ *
+ * Input:
+ * ha = adapter block pointer
+ * format = registration format:
+ * 0 - Reserved
+ * 1 - Fabric detected registration
+ * 2 - N_port detected registration
+ * 3 - Full registration
+ * FF - clear registration
+ * vp_idx = Virtual port index
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel Context
+ */
+
+int
+qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
+ uint16_t vp_idx)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ /*
+ * This command is implicitly executed by firmware during login for the
+ * physical hosts
+ */
+ if (vp_idx == 0)
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
+ mcp->mb[1] = format;
+ mcp->mb[9] = vp_idx;
+ mcp->out_mb = MBX_9|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0|MBX_1;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
+ rval = BIT_1;
+ }
+ } else
+ rval = BIT_1;
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
new file mode 100644
index 000000000000..54dc415d8b53
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -0,0 +1,497 @@
+/*
+ * QLOGIC LINUX SOFTWARE
+ *
+ * QLogic ISP2x00 device driver for Linux 2.6.x
+ * Copyright (C) 2003-2005 QLogic Corporation
+ * (www.qlogic.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#include "qla_def.h"
+
+#include <linux/version.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+#include <linux/list.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <linux/delay.h>
+
+void qla2x00_vp_stop_timer(scsi_qla_host_t *);
+
+void
+qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
+{
+ if (vha->parent && vha->timer_active) {
+ del_timer_sync(&vha->timer);
+ vha->timer_active = 0;
+ }
+}
+
+uint32_t
+qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
+{
+ uint32_t vp_id;
+ scsi_qla_host_t *ha = vha->parent;
+
+ /* Find an empty slot and assign an vp_id */
+ down(&ha->vport_sem);
+ vp_id = find_first_zero_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC);
+ if (vp_id > MAX_MULTI_ID_FABRIC) {
+ DEBUG15(printk ("vp_id %d is bigger than MAX_MULTI_ID_FABRID\n",
+ vp_id));
+ up(&ha->vport_sem);
+ return vp_id;
+ }
+
+ set_bit(vp_id, (unsigned long *)ha->vp_idx_map);
+ ha->num_vhosts++;
+ vha->vp_idx = vp_id;
+ list_add_tail(&vha->vp_list, &ha->vp_list);
+ up(&ha->vport_sem);
+ return vp_id;
+}
+
+void
+qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
+{
+ uint16_t vp_id;
+ scsi_qla_host_t *ha = vha->parent;
+
+ down(&ha->vport_sem);
+ vp_id = vha->vp_idx;
+ ha->num_vhosts--;
+ clear_bit(vp_id, (unsigned long *)ha->vp_idx_map);
+ list_del(&vha->vp_list);
+ up(&ha->vport_sem);
+}
+
+scsi_qla_host_t *
+qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
+{
+ scsi_qla_host_t *vha;
+
+ /* Locate matching device in database. */
+ list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ if (!memcmp(port_name, vha->port_name, WWN_SIZE))
+ return vha;
+ }
+ return NULL;
+}
+
+/*
+ * qla2x00_mark_vp_devices_dead
+ * Updates fcport state when device goes offline.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * fcport = port structure pointer.
+ *
+ * Return:
+ * None.
+ *
+ * Context:
+ */
+void
+qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
+{
+ fc_port_t *fcport;
+ scsi_qla_host_t *pha = to_qla_parent(vha);
+
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (fcport->vp_idx != vha->vp_idx)
+ continue;
+
+ DEBUG15(printk("scsi(%ld): Marking port dead, "
+ "loop_id=0x%04x :%x\n",
+ vha->host_no, fcport->loop_id, fcport->vp_idx));
+
+ atomic_set(&fcport->state, FCS_DEVICE_DEAD);
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ }
+}
+
+int
+qla24xx_disable_vp(scsi_qla_host_t *vha)
+{
+ int ret;
+
+ ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+ /* Delete all vp's fcports from parent's list */
+ qla2x00_mark_vp_devices_dead(vha);
+ atomic_set(&vha->vp_state, VP_FAILED);
+ vha->flags.management_server_logged_in = 0;
+ if (ret == QLA_SUCCESS) {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
+ } else {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ return -1;
+ }
+ return 0;
+}
+
+int
+qla24xx_enable_vp(scsi_qla_host_t *vha)
+{
+ int ret;
+ scsi_qla_host_t *ha = vha->parent;
+
+ /* Check if physical ha port is Up */
+ if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
+ atomic_read(&ha->loop_state) == LOOP_DEAD ) {
+ vha->vp_err_state = VP_ERR_PORTDWN;
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
+ goto enable_failed;
+ }
+
+ /* Initialize the new vport unless it is a persistent port */
+ down(&ha->vport_sem);
+ ret = qla24xx_modify_vp_config(vha);
+ up(&ha->vport_sem);
+
+ if (ret != QLA_SUCCESS) {
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
+ goto enable_failed;
+ }
+
+ DEBUG15(qla_printk(KERN_INFO, ha,
+ "Virtual port with id: %d - Enabled\n", vha->vp_idx));
+ return 0;
+
+enable_failed:
+ DEBUG15(qla_printk(KERN_INFO, ha,
+ "Virtual port with id: %d - Disabled\n", vha->vp_idx));
+ return 1;
+}
+
+/**
+ * qla24xx_modify_vport() - Modifies the virtual fabric port's configuration
+ * @ha: HA context
+ * @vp: pointer to buffer of virtual port parameters.
+ * @ret_code: return error code:
+ *
+ * Returns the virtual port id, or MAX_VSAN_ID, if couldn't create.
+ */
+uint32_t
+qla24xx_modify_vhba(scsi_qla_host_t *ha, vport_params_t *vp, uint32_t *vp_id)
+{
+ scsi_qla_host_t *vha;
+
+ vha = qla24xx_find_vhost_by_name(ha, vp->port_name);
+ if (!vha) {
+ *vp_id = MAX_NUM_VPORT_LOOP;
+ return VP_RET_CODE_WWPN;
+ }
+
+ if (qla24xx_enable_vp(vha)) {
+ scsi_host_put(vha->host);
+ qla2x00_mem_free(vha);
+ *vp_id = MAX_NUM_VPORT_LOOP;
+ return VP_RET_CODE_RESOURCES;
+ }
+
+ *vp_id = vha->vp_idx;
+ return VP_RET_CODE_OK;
+}
+
+void
+qla24xx_configure_vp(scsi_qla_host_t *vha)
+{
+ struct fc_vport *fc_vport;
+ int ret;
+
+ fc_vport = vha->fc_vport;
+
+ DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
+ vha->host_no, __func__));
+ ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
+ if (ret != QLA_SUCCESS) {
+ DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving"
+ " of RSCN requests: 0x%x\n", ret));
+ return;
+ } else {
+ /* Corresponds to SCR enabled */
+ clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
+ }
+
+ vha->flags.online = 1;
+ if (qla24xx_configure_vhba(vha))
+ return;
+
+ atomic_set(&vha->vp_state, VP_ACTIVE);
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+void
+qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
+{
+ int i, vp_idx_matched;
+ scsi_qla_host_t *vha;
+
+ if (ha->parent)
+ return;
+
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, 1);
+ for (;i <= MAX_MULTI_ID_FABRIC;
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, i + 1)) {
+ vp_idx_matched = 0;
+
+ list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ if (i == vha->vp_idx) {
+ vp_idx_matched = 1;
+ break;
+ }
+ }
+
+ if (vp_idx_matched) {
+ switch (mb[0]) {
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_UP:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_POINT_TO_POINT:
+ case MBA_CHG_IN_CONNECTION:
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ DEBUG15(printk("scsi(%ld)%s: Async_event for"
+ " VP[%d], mb = 0x%x, vha=%p\n",
+ vha->host_no, __func__,i, *mb, vha));
+ qla2x00_async_event(vha, mb);
+ break;
+ }
+ }
+ }
+}
+
+void
+qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
+{
+ /*
+ * Physical port will do most of the abort and recovery work. We can
+ * just treat it as a loop down
+ */
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ }
+
+ DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
+ vha->host_no, vha->vp_idx));
+ qla24xx_enable_vp(vha);
+}
+
+int
+qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
+{
+ if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
+ /* VP acquired. complete port configuration */
+ qla24xx_configure_vp(vha);
+ return 0;
+ }
+
+ if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ qla2x00_vp_abort_isp(vha);
+
+ if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
+ (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
+ clear_bit(RESET_ACTIVE, &vha->dpc_flags);
+ }
+
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
+ qla2x00_loop_resync(vha);
+ clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
+ }
+ }
+
+ return 0;
+}
+
+void
+qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
+{
+ int ret;
+ int i, vp_idx_matched;
+ scsi_qla_host_t *vha;
+
+ if (ha->parent)
+ return;
+ if (list_empty(&ha->vp_list))
+ return;
+
+ clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, 1);
+ for (;i <= MAX_MULTI_ID_FABRIC;
+ i = find_next_bit((unsigned long *)ha->vp_idx_map,
+ MAX_MULTI_ID_FABRIC + 1, i + 1)) {
+ vp_idx_matched = 0;
+
+ list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ if (i == vha->vp_idx) {
+ vp_idx_matched = 1;
+ break;
+ }
+ }
+
+ if (vp_idx_matched)
+ ret = qla2x00_do_dpc_vp(vha);
+ }
+}
+
+int
+qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha;
+ uint8_t port_name[WWN_SIZE];
+
+ if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
+ return VPCERR_UNSUPPORTED;
+
+ /* Check up the F/W and H/W support NPIV */
+ if (!ha->flags.npiv_supported)
+ return VPCERR_UNSUPPORTED;
+
+ /* Check up whether npiv supported switch presented */
+ if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
+ return VPCERR_NO_FABRIC_SUPP;
+
+ /* Check up unique WWPN */
+ u64_to_wwn(fc_vport->port_name, port_name);
+ vha = qla24xx_find_vhost_by_name(ha, port_name);
+ if (vha)
+ return VPCERR_BAD_WWN;
+
+ /* Check up max-npiv-supports */
+ if (ha->num_vhosts > ha->max_npiv_vports) {
+ DEBUG15(printk("scsi(%ld): num_vhosts %d is bigger than "
+ "max_npv_vports %d.\n", ha->host_no,
+ (uint16_t) ha->num_vhosts, (int) ha->max_npiv_vports));
+ return VPCERR_UNSUPPORTED;
+ }
+ return 0;
+}
+
+scsi_qla_host_t *
+qla24xx_create_vhost(struct fc_vport *fc_vport)
+{
+ scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
+ scsi_qla_host_t *vha;
+ struct Scsi_Host *host;
+
+ host = scsi_host_alloc(&qla24xx_driver_template,
+ sizeof(scsi_qla_host_t));
+ if (!host) {
+ printk(KERN_WARNING
+ "qla2xxx: scsi_host_alloc() failed for vport\n");
+ return(NULL);
+ }
+
+ vha = (scsi_qla_host_t *)host->hostdata;
+
+ /* clone the parent hba */
+ memcpy(vha, ha, sizeof (scsi_qla_host_t));
+
+ fc_vport->dd_data = vha;
+
+ vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
+ if (!vha->node_name)
+ goto create_vhost_failed_1;
+
+ vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
+ if (!vha->port_name)
+ goto create_vhost_failed_2;
+
+ /* New host info */
+ u64_to_wwn(fc_vport->node_name, vha->node_name);
+ u64_to_wwn(fc_vport->port_name, vha->port_name);
+
+ vha->host = host;
+ vha->host_no = host->host_no;
+ vha->parent = ha;
+ vha->fc_vport = fc_vport;
+ vha->device_flags = 0;
+ vha->instance = num_hosts;
+ vha->vp_idx = qla24xx_allocate_vp_id(vha);
+ if (vha->vp_idx > ha->max_npiv_vports) {
+ DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
+ vha->host_no));
+ goto create_vhost_failed_3;
+ }
+ vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+
+ init_MUTEX(&vha->mbx_cmd_sem);
+ init_MUTEX_LOCKED(&vha->mbx_intr_sem);
+
+ INIT_LIST_HEAD(&vha->list);
+ INIT_LIST_HEAD(&vha->fcports);
+ INIT_LIST_HEAD(&vha->vp_fcports);
+
+ vha->dpc_flags = 0L;
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+
+ /*
+ * To fix the issue of processing a parent's RSCN for the vport before
+ * its SCR is complete.
+ */
+ set_bit(VP_SCR_NEEDED, &vha->vp_flags);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+
+ qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
+
+ host->can_queue = vha->request_q_length + 128;
+ host->this_id = 255;
+ host->cmd_per_lun = 3;
+ host->max_cmd_len = MAX_CMDSZ;
+ host->max_channel = MAX_BUSES - 1;
+ host->max_lun = MAX_LUNS;
+ host->unique_id = vha->instance;
+ host->max_id = MAX_TARGETS_2200;
+ host->transportt = qla2xxx_transport_vport_template;
+
+ DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
+ vha->host_no, vha));
+
+ vha->flags.init_done = 1;
+ num_hosts++;
+
+ down(&ha->vport_sem);
+ set_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
+ ha->cur_vport_count++;
+ up(&ha->vport_sem);
+
+ return vha;
+
+create_vhost_failed_3:
+ kfree(vha->port_name);
+
+create_vhost_failed_2:
+ kfree(vha->node_name);
+
+create_vhost_failed_1:
+ return NULL;
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b98136adaaae..c488996cb958 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -29,8 +29,7 @@ static struct kmem_cache *srb_cachep;
/*
* Ioctl related information.
*/
-static int num_hosts;
-
+int num_hosts;
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -112,7 +111,7 @@ static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_change_queue_depth(struct scsi_device *, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
-static struct scsi_host_template qla2x00_driver_template = {
+struct scsi_host_template qla2x00_driver_template = {
.module = THIS_MODULE,
.name = QLA2XXX_DRIVER_NAME,
.queuecommand = qla2x00_queuecommand,
@@ -143,7 +142,7 @@ static struct scsi_host_template qla2x00_driver_template = {
.shost_attrs = qla2x00_host_attrs,
};
-static struct scsi_host_template qla24xx_driver_template = {
+struct scsi_host_template qla24xx_driver_template = {
.module = THIS_MODULE,
.name = QLA2XXX_DRIVER_NAME,
.queuecommand = qla24xx_queuecommand,
@@ -171,21 +170,21 @@ static struct scsi_host_template qla24xx_driver_template = {
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
+struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
/* TODO Convert to inlines
*
* Timer routines
*/
-#define WATCH_INTERVAL 1 /* number of seconds */
-static void qla2x00_timer(scsi_qla_host_t *);
+void qla2x00_timer(scsi_qla_host_t *);
-static __inline__ void qla2x00_start_timer(scsi_qla_host_t *,
+__inline__ void qla2x00_start_timer(scsi_qla_host_t *,
void *, unsigned long);
static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long);
-static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
+__inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
-static inline void
+__inline__ void
qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
{
init_timer(&ha->timer);
@@ -202,7 +201,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
mod_timer(&ha->timer, jiffies + interval * HZ);
}
-static __inline__ void
+__inline__ void
qla2x00_stop_timer(scsi_qla_host_t *ha)
{
del_timer_sync(&ha->timer);
@@ -213,8 +212,8 @@ static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
-static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
-static void qla2x00_mem_free(scsi_qla_host_t *ha);
+uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
+void qla2x00_mem_free(scsi_qla_host_t *ha);
static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
@@ -266,6 +265,8 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
strcpy(str, "PCIe (");
if (lspeed == 1)
strcat(str, "2.5Gb/s ");
+ else if (lspeed == 2)
+ strcat(str, "5.0Gb/s ");
else
strcat(str, "<unknown> ");
snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
@@ -344,6 +345,12 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
strcat(str, "[IP] ");
if (ha->fw_attributes & BIT_2)
strcat(str, "[Multi-ID] ");
+ if (ha->fw_attributes & BIT_3)
+ strcat(str, "[SB-2] ");
+ if (ha->fw_attributes & BIT_4)
+ strcat(str, "[T10 CRC] ");
+ if (ha->fw_attributes & BIT_5)
+ strcat(str, "[VI] ");
if (ha->fw_attributes & BIT_13)
strcat(str, "[Experimental]");
return str;
@@ -438,6 +445,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
srb_t *sp;
int rval;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
rval = fc_remote_port_chkready(rport);
if (rval) {
@@ -453,7 +461,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ atomic_read(&pha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
@@ -462,7 +470,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
spin_unlock_irq(ha->host->host_lock);
- sp = qla2x00_get_new_sp(ha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(pha, fcport, cmd, done);
if (!sp)
goto qc24_host_busy_lock;
@@ -475,8 +483,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(ha, sp);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_sp_free_dma(pha, sp);
+ mempool_free(sp, pha->srb_mempool);
qc24_host_busy_lock:
spin_lock_irq(ha->host->host_lock);
@@ -548,16 +556,17 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
{
int return_status;
unsigned long wait_online;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while (((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) ||
- test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &ha->dpc_flags) ||
- ha->dpc_active) && time_before(jiffies, wait_online)) {
+ while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) ||
+ pha->dpc_active) && time_before(jiffies, wait_online)) {
msleep(1000);
}
- if (ha->flags.online)
+ if (pha->flags.online)
return_status = QLA_SUCCESS;
else
return_status = QLA_FUNCTION_FAILED;
@@ -588,14 +597,15 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
{
int return_status = QLA_SUCCESS;
unsigned long loop_timeout ;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
/* wait for 5 min at the max for loop to be ready */
loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while ((!atomic_read(&ha->loop_down_timer) &&
- atomic_read(&ha->loop_state) == LOOP_DOWN) ||
- atomic_read(&ha->loop_state) != LOOP_READY) {
- if (atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ while ((!atomic_read(&pha->loop_down_timer) &&
+ atomic_read(&pha->loop_state) == LOOP_DOWN) ||
+ atomic_read(&pha->loop_state) != LOOP_READY) {
+ if (atomic_read(&pha->loop_state) == LOOP_DEAD) {
return_status = QLA_FUNCTION_FAILED;
break;
}
@@ -650,6 +660,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned long serial;
unsigned long flags;
int wait = 0;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
qla2x00_block_error_handler(cmd);
@@ -663,9 +674,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
serial = cmd->serial_number;
/* Check active list for command command. */
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
- sp = ha->outstanding_cmds[i];
+ sp = pha->outstanding_cmds[i];
if (sp == NULL)
continue;
@@ -677,8 +688,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
__func__, ha->host_no, sp, serial));
DEBUG3(qla2x00_print_scsi_cmd(cmd));
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops.abort_command(ha, sp)) {
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(ha, sp)) {
DEBUG2(printk("%s(%ld): abort_command "
"mbx failed.\n", __func__, ha->host_no));
} else {
@@ -686,11 +697,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"mbx success.\n", __func__, ha->host_no));
wait = 1;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&pha->hardware_lock, flags);
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
/* Wait for the command to be returned. */
if (wait) {
@@ -731,6 +742,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
srb_t *sp;
struct scsi_cmnd *cmd;
unsigned long flags;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
status = 0;
@@ -739,19 +751,20 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
* array
*/
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- spin_lock_irqsave(&ha->hardware_lock, flags);
- sp = ha->outstanding_cmds[cnt];
+ spin_lock_irqsave(&pha->hardware_lock, flags);
+ sp = pha->outstanding_cmds[cnt];
if (sp) {
cmd = sp->cmd;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (cmd->device->id == t) {
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ if (cmd->device->id == t &&
+ ha->vp_idx == sp->ha->vp_idx) {
if (!qla2x00_eh_wait_on_command(ha, cmd)) {
status = 1;
break;
}
}
} else {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&pha->hardware_lock, flags);
}
}
return (status);
@@ -782,14 +795,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- int ret;
+ int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
qla2x00_block_error_handler(cmd);
- ret = FAILED;
-
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
@@ -810,7 +821,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
#if defined(LOGOUT_AFTER_DEVICE_RESET)
if (ret == SUCCESS) {
if (fcport->flags & FC_FABRIC_DEVICE) {
- ha->isp_ops.fabric_logout(ha, fcport->loop_id);
+ ha->isp_ops->fabric_logout(ha, fcport->loop_id);
qla2x00_mark_device_lost(ha, fcport, 0, 0);
}
}
@@ -912,15 +923,14 @@ static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
+ scsi_qla_host_t *pha = to_qla_parent(ha);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- int ret;
+ int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
qla2x00_block_error_handler(cmd);
- ret = FAILED;
-
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
@@ -944,7 +954,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
/* Flush outstanding commands. */
- if (!qla2x00_eh_wait_for_pending_commands(ha))
+ if (!qla2x00_eh_wait_for_pending_commands(pha))
ret = FAILED;
eh_bus_reset_done:
@@ -974,14 +984,13 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- int ret;
+ int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
qla2x00_block_error_handler(cmd);
- ret = FAILED;
-
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
@@ -1004,21 +1013,24 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
* while dpc is stuck for the mailbox to complete.
*/
qla2x00_wait_for_loop_ready(ha);
- set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- if (qla2x00_abort_isp(ha)) {
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
+ if (qla2x00_abort_isp(pha)) {
+ clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
/* failed. schedule dpc to try */
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
goto eh_host_reset_lock;
}
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
/* Waiting for our command in done_queue to be returned to OS.*/
- if (qla2x00_eh_wait_for_pending_commands(ha))
+ if (qla2x00_eh_wait_for_pending_commands(pha))
ret = SUCCESS;
+ if (ha->parent)
+ qla2x00_vp_abort_isp(ha);
+
eh_host_reset_lock:
qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
(ret == FAILED) ? "failed" : "succeded");
@@ -1101,7 +1113,7 @@ static int
qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
{
/* Abort Target command will clear Reservation */
- return ha->isp_ops.abort_target(reset_fcport);
+ return ha->isp_ops->abort_target(reset_fcport);
}
static int
@@ -1180,8 +1192,8 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
!pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
- ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
- ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
+ ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+ ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
return;
}
}
@@ -1190,6 +1202,193 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
}
+static void
+qla2x00_enable_intrs(scsi_qla_host_t *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 1;
+ /* enable risc and host interrupts */
+ WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+ RD_REG_WORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+}
+
+static void
+qla2x00_disable_intrs(scsi_qla_host_t *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 0;
+ /* disable risc and host interrupts */
+ WRT_REG_WORD(&reg->ictrl, 0);
+ RD_REG_WORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qla24xx_enable_intrs(scsi_qla_host_t *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 1;
+ WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
+ RD_REG_DWORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qla24xx_disable_intrs(scsi_qla_host_t *ha)
+{
+ unsigned long flags = 0;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 0;
+ WRT_REG_DWORD(&reg->ictrl, 0);
+ RD_REG_DWORD(&reg->ictrl);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static struct isp_operations qla2100_isp_ops = {
+ .pci_config = qla2100_pci_config,
+ .reset_chip = qla2x00_reset_chip,
+ .chip_diag = qla2x00_chip_diag,
+ .config_rings = qla2x00_config_rings,
+ .reset_adapter = qla2x00_reset_adapter,
+ .nvram_config = qla2x00_nvram_config,
+ .update_fw_options = qla2x00_update_fw_options,
+ .load_risc = qla2x00_load_risc,
+ .pci_info_str = qla2x00_pci_info_str,
+ .fw_version_str = qla2x00_fw_version_str,
+ .intr_handler = qla2100_intr_handler,
+ .enable_intrs = qla2x00_enable_intrs,
+ .disable_intrs = qla2x00_disable_intrs,
+ .abort_command = qla2x00_abort_command,
+ .abort_target = qla2x00_abort_target,
+ .fabric_login = qla2x00_login_fabric,
+ .fabric_logout = qla2x00_fabric_logout,
+ .calc_req_entries = qla2x00_calc_iocbs_32,
+ .build_iocbs = qla2x00_build_scsi_iocbs_32,
+ .prep_ms_iocb = qla2x00_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
+ .read_nvram = qla2x00_read_nvram_data,
+ .write_nvram = qla2x00_write_nvram_data,
+ .fw_dump = qla2100_fw_dump,
+ .beacon_on = NULL,
+ .beacon_off = NULL,
+ .beacon_blink = NULL,
+ .read_optrom = qla2x00_read_optrom_data,
+ .write_optrom = qla2x00_write_optrom_data,
+ .get_flash_version = qla2x00_get_flash_version,
+};
+
+static struct isp_operations qla2300_isp_ops = {
+ .pci_config = qla2300_pci_config,
+ .reset_chip = qla2x00_reset_chip,
+ .chip_diag = qla2x00_chip_diag,
+ .config_rings = qla2x00_config_rings,
+ .reset_adapter = qla2x00_reset_adapter,
+ .nvram_config = qla2x00_nvram_config,
+ .update_fw_options = qla2x00_update_fw_options,
+ .load_risc = qla2x00_load_risc,
+ .pci_info_str = qla2x00_pci_info_str,
+ .fw_version_str = qla2x00_fw_version_str,
+ .intr_handler = qla2300_intr_handler,
+ .enable_intrs = qla2x00_enable_intrs,
+ .disable_intrs = qla2x00_disable_intrs,
+ .abort_command = qla2x00_abort_command,
+ .abort_target = qla2x00_abort_target,
+ .fabric_login = qla2x00_login_fabric,
+ .fabric_logout = qla2x00_fabric_logout,
+ .calc_req_entries = qla2x00_calc_iocbs_32,
+ .build_iocbs = qla2x00_build_scsi_iocbs_32,
+ .prep_ms_iocb = qla2x00_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb,
+ .read_nvram = qla2x00_read_nvram_data,
+ .write_nvram = qla2x00_write_nvram_data,
+ .fw_dump = qla2300_fw_dump,
+ .beacon_on = qla2x00_beacon_on,
+ .beacon_off = qla2x00_beacon_off,
+ .beacon_blink = qla2x00_beacon_blink,
+ .read_optrom = qla2x00_read_optrom_data,
+ .write_optrom = qla2x00_write_optrom_data,
+ .get_flash_version = qla2x00_get_flash_version,
+};
+
+static struct isp_operations qla24xx_isp_ops = {
+ .pci_config = qla24xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla24xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla24xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .abort_target = qla24xx_abort_target,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla24xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+};
+
+static struct isp_operations qla25xx_isp_ops = {
+ .pci_config = qla25xx_pci_config,
+ .reset_chip = qla24xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla24xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla24xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla24xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla24xx_intr_handler,
+ .enable_intrs = qla24xx_enable_intrs,
+ .disable_intrs = qla24xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .abort_target = qla24xx_abort_target,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla25xx_read_nvram_data,
+ .write_nvram = qla25xx_write_nvram_data,
+ .fw_dump = qla25xx_fw_dump,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = qla24xx_beacon_blink,
+ .read_optrom = qla24xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+};
+
static inline void
qla2x00_set_isp_flags(scsi_qla_host_t *ha)
{
@@ -1234,19 +1433,32 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
case PCI_DEVICE_ID_QLOGIC_ISP2422:
ha->device_type |= DT_ISP2422;
ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2432:
ha->device_type |= DT_ISP2432;
ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5422:
ha->device_type |= DT_ISP5422;
+ ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5432:
ha->device_type |= DT_ISP5432;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_ISP2532:
+ ha->device_type |= DT_ISP2532;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
}
@@ -1319,61 +1531,6 @@ iospace_error_exit:
}
static void
-qla2x00_enable_intrs(scsi_qla_host_t *ha)
-{
- unsigned long flags = 0;
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->interrupts_on = 1;
- /* enable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
- RD_REG_WORD(&reg->ictrl);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-}
-
-static void
-qla2x00_disable_intrs(scsi_qla_host_t *ha)
-{
- unsigned long flags = 0;
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->interrupts_on = 0;
- /* disable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, 0);
- RD_REG_WORD(&reg->ictrl);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-static void
-qla24xx_enable_intrs(scsi_qla_host_t *ha)
-{
- unsigned long flags = 0;
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->interrupts_on = 1;
- WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
- RD_REG_DWORD(&reg->ictrl);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-static void
-qla24xx_disable_intrs(scsi_qla_host_t *ha)
-{
- unsigned long flags = 0;
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->interrupts_on = 0;
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
@@ -1418,7 +1575,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432)
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532)
sht = &qla24xx_driver_template;
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
if (host == NULL) {
@@ -1435,6 +1593,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->host = host;
ha->host_no = host->host_no;
sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
+ ha->parent = NULL;
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
@@ -1452,7 +1611,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
- ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
+ ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
@@ -1461,33 +1620,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
- ha->isp_ops.pci_config = qla2100_pci_config;
- ha->isp_ops.reset_chip = qla2x00_reset_chip;
- ha->isp_ops.chip_diag = qla2x00_chip_diag;
- ha->isp_ops.config_rings = qla2x00_config_rings;
- ha->isp_ops.reset_adapter = qla2x00_reset_adapter;
- ha->isp_ops.nvram_config = qla2x00_nvram_config;
- ha->isp_ops.update_fw_options = qla2x00_update_fw_options;
- ha->isp_ops.load_risc = qla2x00_load_risc;
- ha->isp_ops.pci_info_str = qla2x00_pci_info_str;
- ha->isp_ops.fw_version_str = qla2x00_fw_version_str;
- ha->isp_ops.intr_handler = qla2100_intr_handler;
- ha->isp_ops.enable_intrs = qla2x00_enable_intrs;
- ha->isp_ops.disable_intrs = qla2x00_disable_intrs;
- ha->isp_ops.abort_command = qla2x00_abort_command;
- ha->isp_ops.abort_target = qla2x00_abort_target;
- ha->isp_ops.fabric_login = qla2x00_login_fabric;
- ha->isp_ops.fabric_logout = qla2x00_fabric_logout;
- ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
- ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
- ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
- ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
- ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
- ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
- ha->isp_ops.fw_dump = qla2100_fw_dump;
- ha->isp_ops.read_optrom = qla2x00_read_optrom_data;
- ha->isp_ops.write_optrom = qla2x00_write_optrom_data;
- ha->isp_ops.get_flash_version = qla2x00_get_flash_version;
if (IS_QLA2100(ha)) {
host->max_id = MAX_TARGETS_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
@@ -1496,6 +1628,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
host->sg_tablesize = 32;
ha->gid_list_info_size = 4;
+ ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1503,59 +1636,39 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
ha->gid_list_info_size = 4;
+ ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA23XX(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
ha->request_q_length = REQUEST_ENTRY_CNT_2200;
ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
- ha->isp_ops.pci_config = qla2300_pci_config;
- ha->isp_ops.intr_handler = qla2300_intr_handler;
- ha->isp_ops.fw_dump = qla2300_fw_dump;
- ha->isp_ops.beacon_on = qla2x00_beacon_on;
- ha->isp_ops.beacon_off = qla2x00_beacon_off;
- ha->isp_ops.beacon_blink = qla2x00_beacon_blink;
ha->gid_list_info_size = 6;
if (IS_QLA2322(ha) || IS_QLA6322(ha))
ha->optrom_size = OPTROM_SIZE_2322;
+ ha->isp_ops = &qla2300_isp_ops;
} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
- ha->init_cb_size = sizeof(struct init_cb_24xx);
- ha->mgmt_svr_loop_id = 10;
- ha->isp_ops.pci_config = qla24xx_pci_config;
- ha->isp_ops.reset_chip = qla24xx_reset_chip;
- ha->isp_ops.chip_diag = qla24xx_chip_diag;
- ha->isp_ops.config_rings = qla24xx_config_rings;
- ha->isp_ops.reset_adapter = qla24xx_reset_adapter;
- ha->isp_ops.nvram_config = qla24xx_nvram_config;
- ha->isp_ops.update_fw_options = qla24xx_update_fw_options;
- ha->isp_ops.load_risc = qla24xx_load_risc;
- ha->isp_ops.pci_info_str = qla24xx_pci_info_str;
- ha->isp_ops.fw_version_str = qla24xx_fw_version_str;
- ha->isp_ops.intr_handler = qla24xx_intr_handler;
- ha->isp_ops.enable_intrs = qla24xx_enable_intrs;
- ha->isp_ops.disable_intrs = qla24xx_disable_intrs;
- ha->isp_ops.abort_command = qla24xx_abort_command;
- ha->isp_ops.abort_target = qla24xx_abort_target;
- ha->isp_ops.fabric_login = qla24xx_login_fabric;
- ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
- ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
- ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
- ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
- ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
- ha->isp_ops.fw_dump = qla24xx_fw_dump;
- ha->isp_ops.read_optrom = qla24xx_read_optrom_data;
- ha->isp_ops.write_optrom = qla24xx_write_optrom_data;
- ha->isp_ops.beacon_on = qla24xx_beacon_on;
- ha->isp_ops.beacon_off = qla24xx_beacon_off;
- ha->isp_ops.beacon_blink = qla24xx_beacon_blink;
- ha->isp_ops.get_flash_version = qla24xx_get_flash_version;
+ ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+ ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_24XX;
+ ha->isp_ops = &qla24xx_isp_ops;
+ } else if (IS_QLA25XX(ha)) {
+ host->max_id = MAX_TARGETS_2200;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
+ ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
+ ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
+ ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_25XX;
+ ha->isp_ops = &qla25xx_isp_ops;
}
host->can_queue = ha->request_q_length + 128;
@@ -1563,10 +1676,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->instance = num_hosts;
init_MUTEX(&ha->mbx_cmd_sem);
+ init_MUTEX(&ha->vport_sem);
init_MUTEX_LOCKED(&ha->mbx_intr_sem);
INIT_LIST_HEAD(&ha->list);
INIT_LIST_HEAD(&ha->fcports);
+ INIT_LIST_HEAD(&ha->vp_list);
+
+ set_bit(0, (unsigned long *) ha->vp_idx_map);
qla2x00_config_dma_addressing(ha);
if (qla2x00_mem_alloc(ha)) {
@@ -1619,11 +1736,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
ha->host_no, ha));
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
reg = ha->iobase;
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
} else {
@@ -1645,7 +1762,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ha->isp_ops.enable_intrs(ha);
+ ha->isp_ops->enable_intrs(ha);
pci_set_drvdata(pdev, ha);
@@ -1670,9 +1787,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
" ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
qla2x00_version_str, ha->model_number,
ha->model_desc ? ha->model_desc: "", pdev->device,
- ha->isp_ops.pci_info_str(ha, pci_info), pci_name(pdev),
+ ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev),
ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
- ha->isp_ops.fw_version_str(ha, fw_str));
+ ha->isp_ops->fw_version_str(ha, fw_str));
return 0;
@@ -1738,7 +1855,7 @@ qla2x00_free_device(scsi_qla_host_t *ha)
/* turn-off interrupts on the card */
if (ha->interrupts_on)
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
qla2x00_mem_free(ha);
@@ -1789,7 +1906,8 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
int do_login, int defer)
{
- if (atomic_read(&fcport->state) == FCS_ONLINE)
+ if (atomic_read(&fcport->state) == FCS_ONLINE &&
+ ha->vp_idx == fcport->vp_idx)
qla2x00_schedule_rport_del(ha, fcport, defer);
/*
@@ -1840,19 +1958,23 @@ void
qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
{
fc_port_t *fcport;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
- list_for_each_entry(fcport, &ha->fcports, list) {
- if (fcport->port_type != FCT_TARGET)
+ list_for_each_entry(fcport, &pha->fcports, list) {
+ if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx)
continue;
-
/*
* No point in marking the device as lost, if the device is
* already DEAD.
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_schedule_rport_del(ha, fcport, defer);
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ if (defer)
+ qla2x00_schedule_rport_del(ha, fcport, defer);
+ else if (ha->vp_idx == fcport->vp_idx)
+ qla2x00_schedule_rport_del(ha, fcport, defer);
+ }
atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
@@ -1868,7 +1990,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
* 0 = success.
* 1 = failure.
*/
-static uint8_t
+uint8_t
qla2x00_mem_alloc(scsi_qla_host_t *ha)
{
char name[16];
@@ -1920,33 +2042,33 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
continue;
}
- snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
- ha->host_no);
- ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
- DMA_POOL_SIZE, 8, 0);
- if (ha->s_dma_pool == NULL) {
+ /* get consistent memory allocated for init control block */
+ ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
+ ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
+ if (ha->init_cb == NULL) {
qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - s_dma_pool\n");
+ "Memory Allocation failed - init_cb\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
+ memset(ha->init_cb, 0, ha->init_cb_size);
- /* get consistent memory allocated for init control block */
- ha->init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->init_cb_dma);
- if (ha->init_cb == NULL) {
+ snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
+ ha->host_no);
+ ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DMA_POOL_SIZE, 8, 0);
+ if (ha->s_dma_pool == NULL) {
qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - init_cb\n");
+ "Memory Allocation failed - s_dma_pool\n");
qla2x00_mem_free(ha);
msleep(100);
continue;
}
- memset(ha->init_cb, 0, ha->init_cb_size);
if (qla2x00_allocate_sp_pool(ha)) {
qla_printk(KERN_WARNING, ha,
@@ -2011,7 +2133,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
}
memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
- if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
+ if (IS_FWI2_CAPABLE(ha)) {
/*
* Get consistent memory allocated for SFP
* block.
@@ -2052,7 +2174,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
* Input:
* ha = adapter block pointer.
*/
-static void
+void
qla2x00_mem_free(scsi_qla_host_t *ha)
{
struct list_head *fcpl, *fcptemp;
@@ -2088,12 +2210,13 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
if (ha->ms_iocb)
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
- if (ha->init_cb)
- dma_pool_free(ha->s_dma_pool, ha->init_cb, ha->init_cb_dma);
-
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
+ if (ha->init_cb)
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+ ha->init_cb, ha->init_cb_dma);
+
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
ha->gid_list_dma);
@@ -2199,6 +2322,7 @@ qla2x00_free_sp_pool( scsi_qla_host_t *ha)
static int
qla2x00_do_dpc(void *data)
{
+ int rval;
scsi_qla_host_t *ha;
fc_port_t *fcport;
uint8_t status;
@@ -2289,7 +2413,7 @@ qla2x00_do_dpc(void *data)
if (fcport->flags & FCF_FABRIC_DEVICE) {
if (fcport->flags &
FCF_TAPE_PRESENT)
- ha->isp_ops.fabric_logout(
+ ha->isp_ops->fabric_logout(
ha, fcport->loop_id,
fcport->d_id.b.domain,
fcport->d_id.b.area,
@@ -2347,7 +2471,7 @@ qla2x00_do_dpc(void *data)
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
&ha->dpc_flags))) {
- qla2x00_loop_resync(ha);
+ rval = qla2x00_loop_resync(ha);
clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
}
@@ -2369,10 +2493,12 @@ qla2x00_do_dpc(void *data)
}
if (!ha->interrupts_on)
- ha->isp_ops.enable_intrs(ha);
+ ha->isp_ops->enable_intrs(ha);
if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
- ha->isp_ops.beacon_blink(ha);
+ ha->isp_ops->beacon_blink(ha);
+
+ qla2x00_do_dpc_all_vps(ha);
ha->dpc_active = 0;
} /* End of while(1) */
@@ -2426,13 +2552,7 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
if (sp->flags & SRB_DMA_VALID) {
- if (cmd->use_sg) {
- dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else if (cmd->request_bufflen) {
- dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
sp->flags &= ~SRB_DMA_VALID;
}
CMD_SP(cmd) = NULL;
@@ -2458,7 +2578,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
*
* Context: Interrupt
***************************************************************************/
-static void
+void
qla2x00_timer(scsi_qla_host_t *ha)
{
unsigned long cpu_flags = 0;
@@ -2467,6 +2587,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
int index;
srb_t *sp;
int t;
+ scsi_qla_host_t *pha = to_qla_parent(ha);
/*
* Ports - Port down timer.
@@ -2512,23 +2633,29 @@ qla2x00_timer(scsi_qla_host_t *ha)
atomic_set(&ha->loop_state, LOOP_DEAD);
/* Schedule an ISP abort to return any tape commands. */
- spin_lock_irqsave(&ha->hardware_lock, cpu_flags);
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS;
- index++) {
- fc_port_t *sfcp;
-
- sp = ha->outstanding_cmds[index];
- if (!sp)
- continue;
- sfcp = sp->fcport;
- if (!(sfcp->flags & FCF_TAPE_PRESENT))
- continue;
+ /* NPIV - scan physical port only */
+ if (!ha->parent) {
+ spin_lock_irqsave(&ha->hardware_lock,
+ cpu_flags);
+ for (index = 1;
+ index < MAX_OUTSTANDING_COMMANDS;
+ index++) {
+ fc_port_t *sfcp;
+
+ sp = ha->outstanding_cmds[index];
+ if (!sp)
+ continue;
+ sfcp = sp->fcport;
+ if (!(sfcp->flags & FCF_TAPE_PRESENT))
+ continue;
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- break;
+ set_bit(ISP_ABORT_NEEDED,
+ &ha->dpc_flags);
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ cpu_flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, cpu_flags);
-
set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
start_dpc++;
}
@@ -2572,8 +2699,9 @@ qla2x00_timer(scsi_qla_host_t *ha)
test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
+ test_bit(VP_DPC_NEEDED, &ha->dpc_flags) ||
test_bit(RELOGIN_NEEDED, &ha->dpc_flags)))
- qla2xxx_wake_dpc(ha);
+ qla2xxx_wake_dpc(pha);
qla2x00_restart_timer(ha, WATCH_INTERVAL);
}
@@ -2597,18 +2725,20 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
/* Firmware interface routines. */
-#define FW_BLOBS 5
+#define FW_BLOBS 6
#define FW_ISP21XX 0
#define FW_ISP22XX 1
#define FW_ISP2300 2
#define FW_ISP2322 3
#define FW_ISP24XX 4
+#define FW_ISP25XX 5
#define FW_FILE_ISP21XX "ql2100_fw.bin"
#define FW_FILE_ISP22XX "ql2200_fw.bin"
#define FW_FILE_ISP2300 "ql2300_fw.bin"
#define FW_FILE_ISP2322 "ql2322_fw.bin"
#define FW_FILE_ISP24XX "ql2400_fw.bin"
+#define FW_FILE_ISP25XX "ql2500_fw.bin"
static DECLARE_MUTEX(qla_fw_lock);
@@ -2618,6 +2748,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
{ .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
{ .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
{ .name = FW_FILE_ISP24XX, },
+ { .name = FW_FILE_ISP25XX, },
};
struct fw_blob *
@@ -2636,6 +2767,8 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
blob = &qla_fw_blobs[FW_ISP2322];
} else if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
blob = &qla_fw_blobs[FW_ISP24XX];
+ } else if (IS_QLA25XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP25XX];
}
down(&qla_fw_lock);
@@ -2679,6 +2812,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
@@ -2703,7 +2837,7 @@ qla2x00_module_init(void)
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
printk(KERN_ERR
"qla2xxx: Unable to allocate SRB cache...Failing load!\n");
@@ -2717,14 +2851,24 @@ qla2x00_module_init(void)
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
- if (!qla2xxx_transport_template)
+ if (!qla2xxx_transport_template) {
+ kmem_cache_destroy(srb_cachep);
+ return -ENODEV;
+ }
+ qla2xxx_transport_vport_template =
+ fc_attach_transport(&qla2xxx_transport_vport_functions);
+ if (!qla2xxx_transport_vport_template) {
+ kmem_cache_destroy(srb_cachep);
+ fc_release_transport(qla2xxx_transport_template);
return -ENODEV;
+ }
printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
+ fc_release_transport(qla2xxx_transport_vport_template);
}
return ret;
}
@@ -2739,6 +2883,7 @@ qla2x00_module_exit(void)
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
+ fc_release_transport(qla2xxx_transport_vport_template);
}
module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 206bda093da2..a925a3f179f9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -766,6 +766,29 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
return ret;
}
+uint8_t *
+qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ uint32_t i;
+ uint32_t *dwptr;
+
+ /* Dword reads to flash. */
+ dwptr = (uint32_t *)buf;
+ for (i = 0; i < bytes >> 2; i++, naddr++)
+ dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+ flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr)));
+
+ return buf;
+}
+
+int
+qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+ uint32_t bytes)
+{
+ return qla24xx_write_flash_data(ha, (uint32_t *)buf,
+ FA_VPD_NVRAM_ADDR | naddr, bytes >> 2);
+}
static inline void
qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
@@ -919,7 +942,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
else
ha->beacon_color_state = QLA_LED_GRN_ON;
- ha->isp_ops.beacon_blink(ha); /* This turns green LED off */
+ ha->isp_ops->beacon_blink(ha); /* This turns green LED off */
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
@@ -1031,7 +1054,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
ha->beacon_blink_led = 0;
ha->beacon_color_state = QLA_LED_ALL_ON;
- ha->isp_ops.beacon_blink(ha); /* Will flip to all off. */
+ ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1419,7 +1442,7 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
/* Suspend HBA. */
scsi_block_requests(ha->host);
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Pause RISC. */
@@ -1705,7 +1728,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
{
/* Suspend HBA. */
scsi_block_requests(ha->host);
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
@@ -1713,7 +1736,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
- ha->isp_ops.enable_intrs(ha);
+ ha->isp_ops->enable_intrs(ha);
scsi_unblock_requests(ha->host);
return buf;
@@ -1727,7 +1750,7 @@ qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
/* Suspend HBA. */
scsi_block_requests(ha->host);
- ha->isp_ops.disable_intrs(ha);
+ ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with write. */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c375a4efbc71..dd1f8ceb79c4 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.01.07-k7"
+#define QLA2XXX_VERSION "8.02.00-k2"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 1
-#define QLA_DRIVER_PATCH_VER 7
+#define QLA_DRIVER_MINOR_VER 2
+#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 6437d024b0dd..fcc184cd066d 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -6,176 +6,9 @@
*/
#include "ql4_def.h"
-#include <scsi/scsi_dbg.h>
-
-#if 0
-
-static void qla4xxx_print_srb_info(struct srb * srb)
-{
- printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
- printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
- __func__, srb->cmd, (unsigned long) srb->dma_handle);
- printk("%s: fw_ddb_index = %d, lun = %d\n",
- __func__, srb->fw_ddb_index, srb->cmd->device->lun);
- printk("%s: iocb_tov = %d\n",
- __func__, srb->iocb_tov);
- printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
- __func__, srb->cc_stat, srb->r_start, srb->u_start);
-}
-
-void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
-{
- printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
- printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
- cmd->device->channel, cmd->device->id, cmd->device->lun,
- cmd->cmd_len);
- scsi_print_command(cmd);
- printk(" seg_cnt = %d\n", cmd->use_sg);
- printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
- cmd->request_buffer, cmd->request_bufflen);
- if (cmd->use_sg) {
- struct scatterlist *sg;
- sg = (struct scatterlist *)cmd->request_buffer;
- printk(" SG buffer: \n");
- qla4xxx_dump_buffer((caddr_t) sg,
- (cmd->use_sg * sizeof(*sg)));
- }
- printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
- cmd->transfersize);
- printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
- printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
- cmd->sc_data_direction);
- printk(" Current time (jiffies) = 0x%lx, "
- "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
- qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
-}
-
-void __dump_registers(struct scsi_qla_host *ha)
-{
- uint8_t i;
- for (i = 0; i < MBOX_REG_COUNT; i++) {
- printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
- readw(&ha->reg->mailbox[i]));
- }
- printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, flash_address),
- readw(&ha->reg->flash_address));
- printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, flash_data),
- readw(&ha->reg->flash_data));
- printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, ctrl_status),
- readw(&ha->reg->ctrl_status));
- if (is_qla4010(ha)) {
- printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
- readw(&ha->reg->u1.isp4010.nvram));
- }
-
- else if (is_qla4022(ha) | is_qla4032(ha)) {
- printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u1.isp4022.intr_mask),
- readw(&ha->reg->u1.isp4022.intr_mask));
- printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
- readw(&ha->reg->u1.isp4022.nvram));
- printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u1.isp4022.semaphore),
- readw(&ha->reg->u1.isp4022.semaphore));
- }
- printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, req_q_in),
- readw(&ha->reg->req_q_in));
- printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, rsp_q_out),
- readw(&ha->reg->rsp_q_out));
- if (is_qla4010(ha)) {
- printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.ext_hw_conf),
- readw(&ha->reg->u2.isp4010.ext_hw_conf));
- printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.port_ctrl),
- readw(&ha->reg->u2.isp4010.port_ctrl));
- printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.port_status),
- readw(&ha->reg->u2.isp4010.port_status));
- printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.req_q_out),
- readw(&ha->reg->u2.isp4010.req_q_out));
- printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
- readw(&ha->reg->u2.isp4010.gp_out));
- printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
- readw(&ha->reg->u2.isp4010.gp_in));
- printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4010.port_err_status),
- readw(&ha->reg->u2.isp4010.port_err_status));
- }
-
- else if (is_qla4022(ha) | is_qla4032(ha)) {
- printk(KERN_INFO "Page 0 Registers:\n");
- printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.ext_hw_conf),
- readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
- printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.port_ctrl),
- readw(&ha->reg->u2.isp4022.p0.port_ctrl));
- printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.port_status),
- readw(&ha->reg->u2.isp4022.p0.port_status));
- printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.gp_out),
- readw(&ha->reg->u2.isp4022.p0.gp_out));
- printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
- readw(&ha->reg->u2.isp4022.p0.gp_in));
- printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p0.port_err_status),
- readw(&ha->reg->u2.isp4022.p0.port_err_status));
- printk(KERN_INFO "Page 1 Registers:\n");
- writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
- &ha->reg->ctrl_status);
- printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
- (uint8_t) offsetof(struct isp_reg,
- u2.isp4022.p1.req_q_out),
- readw(&ha->reg->u2.isp4022.p1.req_q_out));
- writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
- &ha->reg->ctrl_status);
- }
-}
-
-void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
-{
- unsigned long flags = 0;
- int i = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (i = 1; i < MBOX_REG_COUNT; i++)
- printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
- readw(&ha->reg->mailbox[i]));
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-void qla4xxx_dump_registers(struct scsi_qla_host *ha)
-{
- unsigned long flags = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- __dump_registers(ha);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
void qla4xxx_dump_buffer(void *b, uint32_t size)
{
@@ -198,4 +31,3 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
printk(KERN_DEBUG "\n");
}
-#endif /* 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 6f4cf2dd2f4a..accaf690eaf0 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -122,8 +122,7 @@
#define ISCSI_IPADDR_SIZE 4 /* IP address size */
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
-#define ISCSI_NAME_SIZE 255 /* ISCSI Name size -
- * usually a string */
+#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -187,9 +186,21 @@ struct srb {
u_long u_start; /* Time when we handed the cmd to F/W */
};
- /*
- * Device Database (DDB) structure
- */
+/*
+ * Asynchronous Event Queue structure
+ */
+struct aen {
+ uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
+};
+
+struct ql4_aen_log {
+ int count;
+ struct aen entry[MAX_AEN_ENTRIES];
+};
+
+/*
+ * Device Database (DDB) structure
+ */
struct ddb_entry {
struct list_head list; /* ddb list */
struct scsi_qla_host *ha;
@@ -254,13 +265,6 @@ struct ddb_entry {
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
-/*
- * Asynchronous Event Queue structure
- */
-struct aen {
- uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
-};
-
#include "ql4_fw.h"
#include "ql4_nvram.h"
@@ -270,31 +274,31 @@ struct aen {
*/
struct scsi_qla_host {
/* Linux adapter configuration data */
- struct Scsi_Host *host; /* pointer to host data */
- uint32_t tot_ddbs;
unsigned long flags;
-#define AF_ONLINE 0 /* 0x00000001 */
-#define AF_INIT_DONE 1 /* 0x00000002 */
-#define AF_MBOX_COMMAND 2 /* 0x00000004 */
-#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
-#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
-#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
-#define AF_LINK_UP 8 /* 0x00000100 */
-#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
-#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
-#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
+#define AF_ONLINE 0 /* 0x00000001 */
+#define AF_INIT_DONE 1 /* 0x00000002 */
+#define AF_MBOX_COMMAND 2 /* 0x00000004 */
+#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
+#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
+#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
+#define AF_LINK_UP 8 /* 0x00000100 */
+#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
+#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
unsigned long dpc_flags;
-#define DPC_RESET_HA 1 /* 0x00000002 */
-#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
-#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
-#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
-#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
-#define DPC_ISNS_RESTART 7 /* 0x00000080 */
-#define DPC_AEN 9 /* 0x00000200 */
-#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+#define DPC_RESET_HA 1 /* 0x00000002 */
+#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
+#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
+#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
+#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
+#define DPC_ISNS_RESTART 7 /* 0x00000080 */
+#define DPC_AEN 9 /* 0x00000200 */
+#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
+
+ struct Scsi_Host *host; /* pointer to host data */
+ uint32_t tot_ddbs;
uint16_t iocb_cnt;
uint16_t iocb_hiwat;
@@ -344,6 +348,7 @@ struct scsi_qla_host {
uint32_t firmware_version[2];
uint32_t patch_number;
uint32_t build_number;
+ uint32_t board_id;
/* --- From Init_FW --- */
/* init_cb_t *init_cb; */
@@ -363,7 +368,6 @@ struct scsi_qla_host {
/* --- From GetFwState --- */
uint32_t firmware_state;
- uint32_t board_id;
uint32_t addl_fw_state;
/* Linux kernel thread */
@@ -414,6 +418,8 @@ struct scsi_qla_host {
uint16_t aen_out;
struct aen aen_q[MAX_AEN_ENTRIES];
+ struct ql4_aen_log aen_log;/* tracks all aens */
+
/* This mutex protects several threads to do mailbox commands
* concurrently.
*/
@@ -585,10 +591,4 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
#define FLUSH_DDB_CHANGED_AENS 1
#define RELOGIN_DDB_CHANGED_AENS 2
-#include "ql4_version.h"
-#include "ql4_glbl.h"
-#include "ql4_dbg.h"
-#include "ql4_inline.h"
-
-
#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4eea8c571916..9bb3d1d2a925 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -20,143 +20,23 @@
*************************************************************************/
struct port_ctrl_stat_regs {
- __le32 ext_hw_conf; /* 80 x50 R/W */
- __le32 intChipConfiguration; /* 84 x54 */
- __le32 port_ctrl; /* 88 x58 */
- __le32 port_status; /* 92 x5c */
- __le32 HostPrimMACHi; /* 96 x60 */
- __le32 HostPrimMACLow; /* 100 x64 */
- __le32 HostSecMACHi; /* 104 x68 */
- __le32 HostSecMACLow; /* 108 x6c */
- __le32 EPPrimMACHi; /* 112 x70 */
- __le32 EPPrimMACLow; /* 116 x74 */
- __le32 EPSecMACHi; /* 120 x78 */
- __le32 EPSecMACLow; /* 124 x7c */
- __le32 HostPrimIPHi; /* 128 x80 */
- __le32 HostPrimIPMidHi; /* 132 x84 */
- __le32 HostPrimIPMidLow; /* 136 x88 */
- __le32 HostPrimIPLow; /* 140 x8c */
- __le32 HostSecIPHi; /* 144 x90 */
- __le32 HostSecIPMidHi; /* 148 x94 */
- __le32 HostSecIPMidLow; /* 152 x98 */
- __le32 HostSecIPLow; /* 156 x9c */
- __le32 EPPrimIPHi; /* 160 xa0 */
- __le32 EPPrimIPMidHi; /* 164 xa4 */
- __le32 EPPrimIPMidLow; /* 168 xa8 */
- __le32 EPPrimIPLow; /* 172 xac */
- __le32 EPSecIPHi; /* 176 xb0 */
- __le32 EPSecIPMidHi; /* 180 xb4 */
- __le32 EPSecIPMidLow; /* 184 xb8 */
- __le32 EPSecIPLow; /* 188 xbc */
- __le32 IPReassemblyTimeout; /* 192 xc0 */
- __le32 EthMaxFramePayload; /* 196 xc4 */
- __le32 TCPMaxWindowSize; /* 200 xc8 */
- __le32 TCPCurrentTimestampHi; /* 204 xcc */
- __le32 TCPCurrentTimestampLow; /* 208 xd0 */
- __le32 LocalRAMAddress; /* 212 xd4 */
- __le32 LocalRAMData; /* 216 xd8 */
- __le32 PCSReserved1; /* 220 xdc */
- __le32 gp_out; /* 224 xe0 */
- __le32 gp_in; /* 228 xe4 */
- __le32 ProbeMuxAddr; /* 232 xe8 */
- __le32 ProbeMuxData; /* 236 xec */
- __le32 ERMQueueBaseAddr0; /* 240 xf0 */
- __le32 ERMQueueBaseAddr1; /* 244 xf4 */
- __le32 MACConfiguration; /* 248 xf8 */
- __le32 port_err_status; /* 252 xfc COR */
+ __le32 ext_hw_conf; /* 0x50 R/W */
+ __le32 rsrvd0; /* 0x54 */
+ __le32 port_ctrl; /* 0x58 */
+ __le32 port_status; /* 0x5c */
+ __le32 rsrvd1[32]; /* 0x60-0xdf */
+ __le32 gp_out; /* 0xe0 */
+ __le32 gp_in; /* 0xe4 */
+ __le32 rsrvd2[5]; /* 0xe8-0xfb */
+ __le32 port_err_status; /* 0xfc */
};
struct host_mem_cfg_regs {
- __le32 NetRequestQueueOut; /* 80 x50 */
- __le32 NetRequestQueueOutAddrHi; /* 84 x54 */
- __le32 NetRequestQueueOutAddrLow; /* 88 x58 */
- __le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
- __le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
- __le32 NetRequestQueueLength; /* 100 x64 */
- __le32 NetResponseQueueIn; /* 104 x68 */
- __le32 NetResponseQueueInAddrHi; /* 108 x6c */
- __le32 NetResponseQueueInAddrLow; /* 112 x70 */
- __le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
- __le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
- __le32 NetResponseQueueLength; /* 124 x7c */
- __le32 req_q_out; /* 128 x80 */
- __le32 RequestQueueOutAddrHi; /* 132 x84 */
- __le32 RequestQueueOutAddrLow; /* 136 x88 */
- __le32 RequestQueueBaseAddrHi; /* 140 x8c */
- __le32 RequestQueueBaseAddrLow; /* 144 x90 */
- __le32 RequestQueueLength; /* 148 x94 */
- __le32 ResponseQueueIn; /* 152 x98 */
- __le32 ResponseQueueInAddrHi; /* 156 x9c */
- __le32 ResponseQueueInAddrLow; /* 160 xa0 */
- __le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
- __le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
- __le32 ResponseQueueLength; /* 172 xac */
- __le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
- __le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
- __le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
- __le32 NetRxLargeBufferQueueLength; /* 188 xbc */
- __le32 NetRxLargeBufferLength; /* 192 xc0 */
- __le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
- __le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
- __le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
- __le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
- __le32 NetRxSmallBufferLength; /* 212 xd4 */
- __le32 HMCReserved0[10]; /* 216 xd8 */
+ __le32 rsrvd0[12]; /* 0x50-0x79 */
+ __le32 req_q_out; /* 0x80 */
+ __le32 rsrvd1[31]; /* 0x84-0xFF */
};
-struct local_ram_cfg_regs {
- __le32 BufletSize; /* 80 x50 */
- __le32 BufletMaxCount; /* 84 x54 */
- __le32 BufletCurrCount; /* 88 x58 */
- __le32 BufletPauseThresholdCount; /* 92 x5c */
- __le32 BufletTCPWinThresholdHi; /* 96 x60 */
- __le32 BufletTCPWinThresholdLow; /* 100 x64 */
- __le32 IPHashTableBaseAddr; /* 104 x68 */
- __le32 IPHashTableSize; /* 108 x6c */
- __le32 TCPHashTableBaseAddr; /* 112 x70 */
- __le32 TCPHashTableSize; /* 116 x74 */
- __le32 NCBAreaBaseAddr; /* 120 x78 */
- __le32 NCBMaxCount; /* 124 x7c */
- __le32 NCBCurrCount; /* 128 x80 */
- __le32 DRBAreaBaseAddr; /* 132 x84 */
- __le32 DRBMaxCount; /* 136 x88 */
- __le32 DRBCurrCount; /* 140 x8c */
- __le32 LRCReserved[28]; /* 144 x90 */
-};
-
-struct prot_stat_regs {
- __le32 MACTxFrameCount; /* 80 x50 R */
- __le32 MACTxByteCount; /* 84 x54 R */
- __le32 MACRxFrameCount; /* 88 x58 R */
- __le32 MACRxByteCount; /* 92 x5c R */
- __le32 MACCRCErrCount; /* 96 x60 R */
- __le32 MACEncErrCount; /* 100 x64 R */
- __le32 MACRxLengthErrCount; /* 104 x68 R */
- __le32 IPTxPacketCount; /* 108 x6c R */
- __le32 IPTxByteCount; /* 112 x70 R */
- __le32 IPTxFragmentCount; /* 116 x74 R */
- __le32 IPRxPacketCount; /* 120 x78 R */
- __le32 IPRxByteCount; /* 124 x7c R */
- __le32 IPRxFragmentCount; /* 128 x80 R */
- __le32 IPDatagramReassemblyCount; /* 132 x84 R */
- __le32 IPV6RxPacketCount; /* 136 x88 R */
- __le32 IPErrPacketCount; /* 140 x8c R */
- __le32 IPReassemblyErrCount; /* 144 x90 R */
- __le32 TCPTxSegmentCount; /* 148 x94 R */
- __le32 TCPTxByteCount; /* 152 x98 R */
- __le32 TCPRxSegmentCount; /* 156 x9c R */
- __le32 TCPRxByteCount; /* 160 xa0 R */
- __le32 TCPTimerExpCount; /* 164 xa4 R */
- __le32 TCPRxAckCount; /* 168 xa8 R */
- __le32 TCPTxAckCount; /* 172 xac R */
- __le32 TCPRxErrOOOCount; /* 176 xb0 R */
- __le32 PSReserved0; /* 180 xb4 */
- __le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
- __le32 ECCErrCorrectionCount; /* 188 xbc R */
- __le32 PSReserved1[16]; /* 192 xc0 */
-};
-
-
/* remote register set (access via PCI memory read/write) */
struct isp_reg {
#define MBOX_REG_COUNT 8
@@ -207,11 +87,7 @@ struct isp_reg {
union {
struct port_ctrl_stat_regs p0;
struct host_mem_cfg_regs p1;
- struct local_ram_cfg_regs p2;
- struct prot_stat_regs p3;
- __le32 r_union[44];
};
-
} __attribute__ ((packed)) isp4022;
} u2;
}; /* 256 x100 */
@@ -296,6 +172,7 @@ static inline uint32_t clr_rmask(uint32_t val)
/* ISP Semaphore definitions */
/* ISP General Purpose Output definitions */
+#define GPOR_TOPCAT_RESET 0x00000004
/* shadow registers (DMA'd from HA to system memory. read only) */
struct shadow_regs {
@@ -337,6 +214,7 @@ union external_hw_config_reg {
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
+#define MBOX_CMD_PING 0x000B
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
#define MBOX_CMD_GET_FW_STATUS 0x001F
@@ -364,6 +242,17 @@ union external_hw_config_reg {
#define MBOX_CMD_GET_FW_STATE 0x0069
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
+#define MBOX_CMD_SET_ACB 0x0088
+#define MBOX_CMD_GET_ACB 0x0089
+#define MBOX_CMD_DISABLE_ACB 0x008A
+#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE 0x008B
+#define MBOX_CMD_GET_IPV6_DEST_CACHE 0x008C
+#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST 0x008D
+#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST 0x008E
+#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE 0x0090
+#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
+#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
+#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
@@ -409,6 +298,16 @@ union external_hw_config_reg {
#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
+#define MBOX_ASTS_DUPLICATE_IP 0x8025
+#define MBOX_ASTS_ARP_COMPLETE 0x8026
+#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
+#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
+#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
+#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
+#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
+#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
+#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
+
#define ISNS_EVENT_DATA_RECEIVED 0x0000
#define ISNS_EVENT_CONNECTION_OPENED 0x0001
#define ISNS_EVENT_CONNECTION_FAILED 0x0002
@@ -418,137 +317,166 @@ union external_hw_config_reg {
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
-struct init_fw_ctrl_blk {
- uint8_t Version; /* 00 */
- uint8_t Control; /* 01 */
+struct addr_ctrl_blk {
+ uint8_t version; /* 00 */
+ uint8_t control; /* 01 */
- uint16_t FwOptions; /* 02-03 */
+ uint16_t fw_options; /* 02-03 */
#define FWOPT_HEARTBEAT_ENABLE 0x1000
#define FWOPT_SESSION_MODE 0x0040
#define FWOPT_INITIATOR_MODE 0x0020
#define FWOPT_TARGET_MODE 0x0010
- uint16_t ExecThrottle; /* 04-05 */
- uint8_t RetryCount; /* 06 */
- uint8_t RetryDelay; /* 07 */
- uint16_t MaxEthFrPayloadSize; /* 08-09 */
- uint16_t AddFwOptions; /* 0A-0B */
-
- uint8_t HeartbeatInterval; /* 0C */
- uint8_t InstanceNumber; /* 0D */
- uint16_t RES2; /* 0E-0F */
- uint16_t ReqQConsumerIndex; /* 10-11 */
- uint16_t ComplQProducerIndex; /* 12-13 */
- uint16_t ReqQLen; /* 14-15 */
- uint16_t ComplQLen; /* 16-17 */
- uint32_t ReqQAddrLo; /* 18-1B */
- uint32_t ReqQAddrHi; /* 1C-1F */
- uint32_t ComplQAddrLo; /* 20-23 */
- uint32_t ComplQAddrHi; /* 24-27 */
- uint32_t ShadowRegBufAddrLo; /* 28-2B */
- uint32_t ShadowRegBufAddrHi; /* 2C-2F */
-
- uint16_t iSCSIOptions; /* 30-31 */
-
- uint16_t TCPOptions; /* 32-33 */
-
- uint16_t IPOptions; /* 34-35 */
-
- uint16_t MaxPDUSize; /* 36-37 */
- uint16_t RcvMarkerInt; /* 38-39 */
- uint16_t SndMarkerInt; /* 3A-3B */
- uint16_t InitMarkerlessInt; /* 3C-3D */
- uint16_t FirstBurstSize; /* 3E-3F */
- uint16_t DefaultTime2Wait; /* 40-41 */
- uint16_t DefaultTime2Retain; /* 42-43 */
- uint16_t MaxOutStndngR2T; /* 44-45 */
- uint16_t KeepAliveTimeout; /* 46-47 */
- uint16_t PortNumber; /* 48-49 */
- uint16_t MaxBurstSize; /* 4A-4B */
- uint32_t RES4; /* 4C-4F */
- uint8_t IPAddr[4]; /* 50-53 */
- uint8_t RES5[12]; /* 54-5F */
- uint8_t SubnetMask[4]; /* 60-63 */
- uint8_t RES6[12]; /* 64-6F */
- uint8_t GatewayIPAddr[4]; /* 70-73 */
- uint8_t RES7[12]; /* 74-7F */
- uint8_t PriDNSIPAddr[4]; /* 80-83 */
- uint8_t SecDNSIPAddr[4]; /* 84-87 */
- uint8_t RES8[8]; /* 88-8F */
- uint8_t Alias[32]; /* 90-AF */
- uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */
- uint8_t CHAPNameSecretsTable[8]; /* B8-BF */
- uint8_t EthernetMACAddr[6]; /* C0-C5 */
- uint16_t TargetPortalGroup; /* C6-C7 */
- uint8_t SendScale; /* C8 */
- uint8_t RecvScale; /* C9 */
- uint8_t TypeOfService; /* CA */
- uint8_t Time2Live; /* CB */
- uint16_t VLANPriority; /* CC-CD */
- uint16_t Reserved8; /* CE-CF */
- uint8_t SecIPAddr[4]; /* D0-D3 */
- uint8_t Reserved9[12]; /* D4-DF */
- uint8_t iSNSIPAddr[4]; /* E0-E3 */
- uint16_t iSNSServerPortNumber; /* E4-E5 */
- uint8_t Reserved10[10]; /* E6-EF */
- uint8_t SLPDAIPAddr[4]; /* F0-F3 */
- uint8_t Reserved11[12]; /* F4-FF */
- uint8_t iSCSINameString[256]; /* 100-1FF */
+ uint16_t exec_throttle; /* 04-05 */
+ uint8_t zio_count; /* 06 */
+ uint8_t res0; /* 07 */
+ uint16_t eth_mtu_size; /* 08-09 */
+ uint16_t add_fw_options; /* 0A-0B */
+
+ uint8_t hb_interval; /* 0C */
+ uint8_t inst_num; /* 0D */
+ uint16_t res1; /* 0E-0F */
+ uint16_t rqq_consumer_idx; /* 10-11 */
+ uint16_t compq_producer_idx; /* 12-13 */
+ uint16_t rqq_len; /* 14-15 */
+ uint16_t compq_len; /* 16-17 */
+ uint32_t rqq_addr_lo; /* 18-1B */
+ uint32_t rqq_addr_hi; /* 1C-1F */
+ uint32_t compq_addr_lo; /* 20-23 */
+ uint32_t compq_addr_hi; /* 24-27 */
+ uint32_t shdwreg_addr_lo; /* 28-2B */
+ uint32_t shdwreg_addr_hi; /* 2C-2F */
+
+ uint16_t iscsi_opts; /* 30-31 */
+ uint16_t ipv4_tcp_opts; /* 32-33 */
+ uint16_t ipv4_ip_opts; /* 34-35 */
+
+ uint16_t iscsi_max_pdu_size; /* 36-37 */
+ uint8_t ipv4_tos; /* 38 */
+ uint8_t ipv4_ttl; /* 39 */
+ uint8_t acb_version; /* 3A */
+ uint8_t res2; /* 3B */
+ uint16_t def_timeout; /* 3C-3D */
+ uint16_t iscsi_fburst_len; /* 3E-3F */
+ uint16_t iscsi_def_time2wait; /* 40-41 */
+ uint16_t iscsi_def_time2retain; /* 42-43 */
+ uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
+ uint16_t conn_ka_timeout; /* 46-47 */
+ uint16_t ipv4_port; /* 48-49 */
+ uint16_t iscsi_max_burst_len; /* 4A-4B */
+ uint32_t res5; /* 4C-4F */
+ uint8_t ipv4_addr[4]; /* 50-53 */
+ uint16_t ipv4_vlan_tag; /* 54-55 */
+ uint8_t ipv4_addr_state; /* 56 */
+ uint8_t ipv4_cacheid; /* 57 */
+ uint8_t res6[8]; /* 58-5F */
+ uint8_t ipv4_subnet[4]; /* 60-63 */
+ uint8_t res7[12]; /* 64-6F */
+ uint8_t ipv4_gw_addr[4]; /* 70-73 */
+ uint8_t res8[0xc]; /* 74-7F */
+ uint8_t pri_dns_srvr_ip[4];/* 80-83 */
+ uint8_t sec_dns_srvr_ip[4];/* 84-87 */
+ uint16_t min_eph_port; /* 88-89 */
+ uint16_t max_eph_port; /* 8A-8B */
+ uint8_t res9[4]; /* 8C-8F */
+ uint8_t iscsi_alias[32];/* 90-AF */
+ uint8_t res9_1[0x16]; /* B0-C5 */
+ uint16_t tgt_portal_grp;/* C6-C7 */
+ uint8_t abort_timer; /* C8 */
+ uint8_t ipv4_tcp_wsf; /* C9 */
+ uint8_t res10[6]; /* CA-CF */
+ uint8_t ipv4_sec_ip_addr[4]; /* D0-D3 */
+ uint8_t ipv4_dhcp_vid_len; /* D4 */
+ uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
+ uint8_t res11[20]; /* E0-F3 */
+ uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
+ uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
+ uint8_t iscsi_name[224]; /* 100-1DF */
+ uint8_t res12[32]; /* 1E0-1FF */
+ uint32_t cookie; /* 200-203 */
+ uint16_t ipv6_port; /* 204-205 */
+ uint16_t ipv6_opts; /* 206-207 */
+ uint16_t ipv6_addtl_opts; /* 208-209 */
+ uint16_t ipv6_tcp_opts; /* 20A-20B */
+ uint8_t ipv6_tcp_wsf; /* 20C */
+ uint16_t ipv6_flow_lbl; /* 20D-20F */
+ uint8_t ipv6_gw_addr[16]; /* 210-21F */
+ uint16_t ipv6_vlan_tag; /* 220-221 */
+ uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
+ uint8_t ipv6_addr0_state; /* 223 */
+ uint8_t ipv6_addr1_state; /* 224 */
+ uint8_t ipv6_gw_state; /* 225 */
+ uint8_t ipv6_traffic_class; /* 226 */
+ uint8_t ipv6_hop_limit; /* 227 */
+ uint8_t ipv6_if_id[8]; /* 228-22F */
+ uint8_t ipv6_addr0[16]; /* 230-23F */
+ uint8_t ipv6_addr1[16]; /* 240-24F */
+ uint32_t ipv6_nd_reach_time; /* 250-253 */
+ uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
+ uint32_t ipv6_nd_stale_timeout; /* 258-25B */
+ uint8_t ipv6_dup_addr_detect_count; /* 25C */
+ uint8_t ipv6_cache_id; /* 25D */
+ uint8_t res13[18]; /* 25E-26F */
+ uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
+ uint8_t res14[140]; /* 274-2FF */
+};
+
+struct init_fw_ctrl_blk {
+ struct addr_ctrl_blk pri;
+ struct addr_ctrl_blk sec;
};
/*************************************************************************/
struct dev_db_entry {
- uint8_t options; /* 00 */
+ uint16_t options; /* 00-01 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
- uint8_t control; /* 01 */
-
- uint16_t exeThrottle; /* 02-03 */
- uint16_t exeCount; /* 04-05 */
- uint8_t retryCount; /* 06 */
- uint8_t retryDelay; /* 07 */
- uint16_t iSCSIOptions; /* 08-09 */
-
- uint16_t TCPOptions; /* 0A-0B */
-
- uint16_t IPOptions; /* 0C-0D */
-
- uint16_t maxPDUSize; /* 0E-0F */
- uint16_t rcvMarkerInt; /* 10-11 */
- uint16_t sndMarkerInt; /* 12-13 */
- uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
- uint16_t firstBurstSize; /* 16-17 */
- uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
- uint16_t maxTime2Retain; /* 1A-1B */
- uint16_t maxOutstndngR2T; /* 1C-1D */
- uint16_t keepAliveTimeout; /* 1E-1F */
- uint8_t ISID[6]; /* 20-25 big-endian, must be converted
+ uint16_t exec_throttle; /* 02-03 */
+ uint16_t exec_count; /* 04-05 */
+ uint16_t res0; /* 06-07 */
+ uint16_t iscsi_options; /* 08-09 */
+ uint16_t tcp_options; /* 0A-0B */
+ uint16_t ip_options; /* 0C-0D */
+ uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
+ uint32_t res1; /* 10-13 */
+ uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
+ uint16_t iscsi_first_burst_len; /* 16-17 */
+ uint16_t iscsi_def_time2wait; /* 18-19 */
+ uint16_t iscsi_def_time2retain; /* 1A-1B */
+ uint16_t iscsi_max_outsnd_r2t; /* 1C-1D */
+ uint16_t ka_timeout; /* 1E-1F */
+ uint8_t isid[6]; /* 20-25 big-endian, must be converted
* to little-endian */
- uint16_t TSID; /* 26-27 */
- uint16_t portNumber; /* 28-29 */
- uint16_t maxBurstSize; /* 2A-2B */
- uint16_t taskMngmntTimeout; /* 2C-2D */
- uint16_t reserved1; /* 2E-2F */
- uint8_t ipAddr[0x10]; /* 30-3F */
- uint8_t iSCSIAlias[0x20]; /* 40-5F */
- uint8_t targetAddr[0x20]; /* 60-7F */
- uint8_t userID[0x20]; /* 80-9F */
- uint8_t password[0x20]; /* A0-BF */
- uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a
+ uint16_t tsid; /* 26-27 */
+ uint16_t port; /* 28-29 */
+ uint16_t iscsi_max_burst_len; /* 2A-2B */
+ uint16_t def_timeout; /* 2C-2D */
+ uint16_t res2; /* 2E-2F */
+ uint8_t ip_addr[0x10]; /* 30-3F */
+ uint8_t iscsi_alias[0x20]; /* 40-5F */
+ uint8_t tgt_addr[0x20]; /* 60-7F */
+ uint16_t mss; /* 80-81 */
+ uint16_t res3; /* 82-83 */
+ uint16_t lcl_port; /* 84-85 */
+ uint8_t ipv4_tos; /* 86 */
+ uint16_t ipv6_flow_lbl; /* 87-89 */
+ uint8_t res4[0x36]; /* 8A-BF */
+ uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
* pointer to a string so we
* don't have to reserve soooo
* much RAM */
- uint16_t ddbLink; /* 1C0-1C1 */
- uint16_t CHAPTableIndex; /* 1C2-1C3 */
- uint16_t TargetPortalGroup; /* 1C4-1C5 */
- uint16_t reserved2[2]; /* 1C6-1C7 */
- uint32_t statSN; /* 1C8-1CB */
- uint32_t expStatSN; /* 1CC-1CF */
- uint16_t reserved3[0x2C]; /* 1D0-1FB */
- uint16_t ddbValidCookie; /* 1FC-1FD */
- uint16_t ddbValidSize; /* 1FE-1FF */
+ uint8_t ipv6_addr[0x10];/* 1A0-1AF */
+ uint8_t res5[0x10]; /* 1B0-1BF */
+ uint16_t ddb_link; /* 1C0-1C1 */
+ uint16_t chap_tbl_idx; /* 1C2-1C3 */
+ uint16_t tgt_portal_grp; /* 1C4-1C5 */
+ uint8_t tcp_xmt_wsf; /* 1C6 */
+ uint8_t tcp_rcv_wsf; /* 1C7 */
+ uint32_t stat_sn; /* 1C8-1CB */
+ uint32_t exp_stat_sn; /* 1CC-1CF */
+ uint8_t res6[0x30]; /* 1D0-1FF */
};
/*************************************************************************/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b00cb04e7c0..a3608e028bf6 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,9 @@
#ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H
+struct iscsi_cls_conn;
+
+void qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
@@ -58,11 +61,13 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host * ha);
-struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
+struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
+ uint32_t index);
void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
uint32_t fw_ddb_index, uint32_t state);
+void qla4xxx_dump_buffer(void *b, uint32_t size);
extern int ql4xextended_error_logging;
extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 6365df268612..1e29f51d596b 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
uint32_t fw_ddb_index);
@@ -300,12 +303,12 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
if (!qla4xxx_fw_ready(ha))
return status;
- set_bit(AF_ONLINE, &ha->flags);
return qla4xxx_get_firmware_status(ha);
}
static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
- uint32_t fw_ddb_index)
+ uint32_t fw_ddb_index,
+ uint32_t *new_tgt)
{
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
@@ -313,6 +316,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
int found = 0;
uint32_t device_state;
+ *new_tgt = 0;
/* Make sure the dma buffer is valid */
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
sizeof(*fw_ddb_entry),
@@ -337,7 +341,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
__func__, fw_ddb_index));
list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
- if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName,
+ if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
ISCSI_NAME_SIZE) == 0) {
found++;
break;
@@ -348,6 +352,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
"new ddb\n", ha->host_no, __func__,
fw_ddb_index));
+ *new_tgt = 1;
ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
}
@@ -409,26 +414,26 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
}
status = QLA_SUCCESS;
- ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID);
+ ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
ddb_entry->task_mgmt_timeout =
- le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
+ le16_to_cpu(fw_ddb_entry->def_timeout);
ddb_entry->CmdSn = 0;
- ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle);
+ ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle);
ddb_entry->default_relogin_timeout =
- le16_to_cpu(fw_ddb_entry->taskMngmntTimeout);
- ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait);
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
/* Update index in case it changed */
ddb_entry->fw_ddb_index = fw_ddb_index;
ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
- ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber);
- ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup);
- memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0],
+ ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
+ ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
min(sizeof(ddb_entry->iscsi_name),
- sizeof(fw_ddb_entry->iscsiName)));
- memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0],
- min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr)));
+ sizeof(fw_ddb_entry->iscsi_name)));
+ memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
+ min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
ha->host_no, __func__, fw_ddb_index,
@@ -495,6 +500,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
uint32_t ddb_state;
uint32_t conn_err, err_code;
struct ddb_entry *ddb_entry;
+ uint32_t new_tgt;
dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
@@ -526,8 +532,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
"completed "
"or access denied failure\n",
ha->host_no, __func__));
- } else
+ } else {
qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
+ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
+ NULL, 0, NULL, &next_fw_ddb_index,
+ &ddb_state, &conn_err, NULL, NULL)
+ == QLA_ERROR) {
+ DEBUG2(printk("scsi%ld: %s:"
+ "get_ddb_entry %d failed\n",
+ ha->host_no,
+ __func__, fw_ddb_index));
+ return QLA_ERROR;
+ }
+ }
}
if (ddb_state != DDB_DS_SESSION_ACTIVE)
@@ -540,7 +557,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
ha->host_no, __func__, fw_ddb_index));
/* Add DDB to internal our ddb list. */
- ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
+ ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
if (ddb_entry == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
"for device at fw_ddb_index %d\n",
@@ -865,21 +882,20 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
static void qla4x00_pci_config(struct scsi_qla_host *ha)
{
- uint16_t w, mwi;
+ uint16_t w;
+ int status;
dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
pci_set_master(ha->pdev);
- mwi = 0;
- if (pci_set_mwi(ha->pdev))
- mwi = PCI_COMMAND_INVALIDATE;
+ status = pci_set_mwi(ha->pdev);
/*
* We want to respect framework's setting of PCI configuration space
* command register and also want to make sure that all bits of
* interest to us are properly set in command register.
*/
pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
- w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
w &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
}
@@ -911,6 +927,9 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
writel(set_rmask(NVR_WRITE_ENABLE),
&ha->reg->u1.isp4022.nvram);
+ writel(2, &ha->reg->mailbox[6]);
+ readl(&ha->reg->mailbox[6]);
+
writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -958,25 +977,25 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
return status;
}
-int ql4xxx_lock_drvr_wait(struct scsi_qla_host *ha)
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
{
-#define QL4_LOCK_DRVR_WAIT 30
+#define QL4_LOCK_DRVR_WAIT 60
#define QL4_LOCK_DRVR_SLEEP 1
int drvr_wait = QL4_LOCK_DRVR_WAIT;
while (drvr_wait) {
- if (ql4xxx_lock_drvr(ha) == 0) {
+ if (ql4xxx_lock_drvr(a) == 0) {
ssleep(QL4_LOCK_DRVR_SLEEP);
if (drvr_wait) {
DEBUG2(printk("scsi%ld: %s: Waiting for "
- "Global Init Semaphore(%d)...n",
- ha->host_no,
+ "Global Init Semaphore(%d)...\n",
+ a->host_no,
__func__, drvr_wait));
}
drvr_wait -= QL4_LOCK_DRVR_SLEEP;
} else {
DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
- "acquired.n", ha->host_no, __func__));
+ "acquired\n", a->host_no, __func__));
return QLA_SUCCESS;
}
}
@@ -1125,17 +1144,17 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
/* Initialize the Host adapter request/response queues and firmware */
if (qla4xxx_start_firmware(ha) == QLA_ERROR)
- return status;
+ goto exit_init_hba;
if (qla4xxx_validate_mac_address(ha) == QLA_ERROR)
- return status;
+ goto exit_init_hba;
if (qla4xxx_init_local_data(ha) == QLA_ERROR)
- return status;
+ goto exit_init_hba;
status = qla4xxx_init_firmware(ha);
if (status == QLA_ERROR)
- return status;
+ goto exit_init_hba;
/*
* FW is waiting to get an IP address from DHCP server: Skip building
@@ -1143,12 +1162,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
* followed by 0x8014 aen" to trigger the tgt discovery process.
*/
if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
- return status;
+ goto exit_init_online;
/* Skip device discovery if ip and subnet is zero */
if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
- return status;
+ goto exit_init_online;
if (renew_ddb_list == PRESERVE_DDB_LIST) {
/*
@@ -1177,9 +1196,10 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
ha->host_no));
}
- exit_init_hba:
+exit_init_online:
+ set_bit(AF_ONLINE, &ha->flags);
+exit_init_hba:
return status;
-
}
/**
@@ -1193,9 +1213,10 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
uint32_t fw_ddb_index)
{
struct ddb_entry * ddb_entry;
+ uint32_t new_tgt;
/* First allocate a device structure */
- ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index);
+ ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
if (ddb_entry == NULL) {
DEBUG2(printk(KERN_WARNING
"scsi%ld: Unable to allocate memory to add "
@@ -1203,6 +1224,18 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
return;
}
+ if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) {
+ /* Target has been bound to a new fw_ddb_index */
+ qla4xxx_free_ddb(ha, ddb_entry);
+ ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
+ if (ddb_entry == NULL) {
+ DEBUG2(printk(KERN_WARNING
+ "scsi%ld: Unable to allocate memory"
+ " to add fw_ddb_index %d\n",
+ ha->host_no, fw_ddb_index));
+ return;
+ }
+ }
if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
QLA_ERROR) {
ha->fw_ddb_index_map[fw_ddb_index] =
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index a216a1781afb..5006ecb3ef5e 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -6,6 +6,10 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
#include <scsi/scsi_tcq.h>
@@ -141,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i;
cmd = srb->cmd;
ha = srb->ha;
- if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
@@ -154,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
- /* Load data segments */
- if (cmd->use_sg) {
- struct scatterlist *cur_seg;
- struct scatterlist *end_seg;
-
- cur_seg = (struct scatterlist *)cmd->request_buffer;
- end_seg = cur_seg + tot_dsds;
- while (cur_seg < end_seg) {
- dma_addr_t sle_dma;
-
- /* Allocate additional continuation packets? */
- if (avail_dsds == 0) {
- struct continuation_t1_entry *cont_entry;
-
- cont_entry = qla4xxx_alloc_cont_entry(ha);
- cur_dsd =
- (struct data_seg_a64 *)
- &cont_entry->dataseg[0];
- avail_dsds = CONTINUE_SEG;
- }
-
- sle_dma = sg_dma_address(cur_seg);
- cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
- cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
- cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
- avail_dsds--;
-
- cur_dsd++;
- cur_seg++;
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ struct continuation_t1_entry *cont_entry;
+
+ cont_entry = qla4xxx_alloc_cont_entry(ha);
+ cur_dsd =
+ (struct data_seg_a64 *)
+ &cont_entry->dataseg[0];
+ avail_dsds = CONTINUE_SEG;
}
- } else {
- cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
- cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
- cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
+
+ sle_dma = sg_dma_address(sg);
+ cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
+ cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
+ cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ cur_dsd++;
}
}
@@ -204,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
- struct scatterlist *sg = NULL;
+ int nseg;
uint16_t tot_dsds;
uint16_t req_cnt;
@@ -233,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
index = (uint32_t)cmd->request->tag;
/* Calculate the number of request entries needed. */
- if (cmd->use_sg) {
- sg = (struct scatterlist *)cmd->request_buffer;
- tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- if (tot_dsds == 0)
- goto queuing_error;
- } else if (cmd->request_bufflen) {
- dma_addr_t req_dma;
-
- req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- if (dma_mapping_error(req_dma))
- goto queuing_error;
-
- srb->dma_handle = req_dma;
- tot_dsds = 1;
- }
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ goto queuing_error;
+ tot_dsds = nseg;
+
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (ha->req_q_count < (req_cnt + 2)) {
@@ -279,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
- cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
+ cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
@@ -289,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
- if (cmd->request_bufflen) {
+ if (scsi_bufflen(cmd)) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
- ha->bytes_xfered += cmd->request_bufflen;
+ ha->bytes_xfered += scsi_bufflen(cmd);
if (ha->bytes_xfered & ~0xFFFFF){
ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
ha->bytes_xfered &= 0xFFFFF;
@@ -359,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
return QLA_SUCCESS;
queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
- if (cmd->use_sg && tot_dsds) {
- sg = (struct scatterlist *) cmd->request_buffer;
- pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
- cmd->sc_data_direction);
- } else if (tot_dsds)
- pci_unmap_single(ha->pdev, srb->dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 35b9e36a0e8d..4a154beb0d39 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
@@ -90,9 +93,29 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
}
- if (sts_entry->iscsiFlags &
- (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
- cmd->resid = residual;
+ if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+ cmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
+ scsi_set_resid(cmd, residual);
+ if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
+ cmd->underflow)) {
+
+ cmd->result = DID_ERROR << 16;
+
+ DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
+ "Mid-layer Data underrun0, "
+ "xferlen = 0x%x, "
+ "residual = 0x%x\n", ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd), residual));
+ break;
+ }
+ }
cmd->result = DID_OK << 16 | scsi_status;
@@ -161,7 +184,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
case SCS_DATA_UNDERRUN:
case SCS_DATA_OVERRUN:
- if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
+ if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
+ (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
@@ -171,21 +195,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
}
- if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
- /*
- * Firmware detected a SCSI transport underrun
- * condition
- */
- cmd->resid = residual;
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
- "detected, xferlen = 0x%x, residual = "
- "0x%x\n",
- ha->host_no, cmd->device->channel,
- cmd->device->id,
- cmd->device->lun, __func__,
- cmd->request_bufflen,
- residual));
- }
+ scsi_set_resid(cmd, residual);
/*
* If there is scsi_status, it takes precedense over
@@ -227,7 +237,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
- } else if ((cmd->request_bufflen - residual) <
+ } else if ((scsi_bufflen(cmd) - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
@@ -242,13 +252,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
* will return DID_ERROR.
*/
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
- "Mid-layer Data underrun, "
- "xferlen = 0x%x, "
- "residual = 0x%x\n", ha->host_no,
- cmd->device->channel,
- cmd->device->id,
- cmd->device->lun, __func__,
- cmd->request_bufflen, residual));
+ "Mid-layer Data underrun1, "
+ "xferlen = 0x%x, "
+ "residual = 0x%x\n", ha->host_no,
+ cmd->device->channel,
+ cmd->device->id,
+ cmd->device->lun, __func__,
+ scsi_bufflen(cmd), residual));
cmd->result = DID_ERROR << 16;
} else {
@@ -417,6 +427,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_status)
{
int i;
+ uint32_t mbox_stat2, mbox_stat3;
if ((mbox_status == MBOX_STS_BUSY) ||
(mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -437,6 +448,12 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
+ if (ha->aen_log.count < MAX_AEN_ENTRIES) {
+ for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
+ ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
+ readl(&ha->reg->mailbox[i]);
+ ha->aen_log.count++;
+ }
switch (mbox_status) {
case MBOX_ASTS_SYSTEM_ERROR:
/* Log Mailbox registers */
@@ -493,6 +510,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
mbox_status));
break;
+ case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
+ mbox_stat2 = readl(&ha->reg->mailbox[2]);
+ mbox_stat3 = readl(&ha->reg->mailbox[3]);
+
+ if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
+ set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
+ else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ break;
+
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
@@ -518,11 +545,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
/* Queue AEN information and process it in the DPC
* routine */
if (ha->aen_q_count > 0) {
- /* advance pointer */
- if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
- ha->aen_in = 0;
- else
- ha->aen_in++;
/* decrement available counter */
ha->aen_q_count--;
@@ -542,6 +564,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
ha->aen_q[ha->aen_in].mbox_sts[2],
ha->aen_q[ha->aen_in].mbox_sts[3],
ha->aen_q[ha->aen_in]. mbox_sts[4]));
+ /* advance pointer */
+ ha->aen_in++;
+ if (ha->aen_in == MAX_AEN_ENTRIES)
+ ha->aen_in = 0;
/* The DPC routine will process the aen */
set_bit(DPC_AEN, &ha->dpc_flags);
@@ -724,25 +750,24 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
spin_lock_irqsave(&ha->hardware_lock, flags);
while (ha->aen_out != ha->aen_in) {
- /* Advance pointers for next entry */
- if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
- ha->aen_out = 0;
- else
- ha->aen_out++;
-
- ha->aen_q_count++;
aen = &ha->aen_q[ha->aen_out];
-
/* copy aen information to local structure */
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = aen->mbox_sts[i];
+ ha->aen_q_count++;
+ ha->aen_out++;
+
+ if (ha->aen_out == MAX_AEN_ENTRIES)
+ ha->aen_out = 0;
+
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
- "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
- mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[1], mbox_sts[4]));
+ DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
+ " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
+ (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
+ mbox_sts[0], mbox_sts[1], mbox_sts[2],
+ mbox_sts[3], mbox_sts[4]));
switch (mbox_sts[0]) {
case MBOX_ASTS_DATABASE_CHANGED:
@@ -792,6 +817,5 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index f116ff917237..35cd73c72a68 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
/**
@@ -169,84 +172,6 @@ mbox_exit:
return status;
}
-
-#if 0
-
-/**
- * qla4xxx_issue_iocb - issue mailbox iocb command
- * @ha: adapter state pointer.
- * @buffer: buffer pointer.
- * @phys_addr: physical address of buffer.
- * @size: size of buffer.
- *
- * Issues iocbs via mailbox commands.
- * TARGET_QUEUE_LOCK must be released.
- * ADAPTER_STATE_LOCK must be released.
- **/
-int
-qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
- dma_addr_t phys_addr, size_t size)
-{
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
- int status;
-
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
- mbox_cmd[1] = 0;
- mbox_cmd[2] = LSDW(phys_addr);
- mbox_cmd[3] = MSDW(phys_addr);
- status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
- return status;
-}
-
-int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
- uint16_t fw_ddb_index,
- uint16_t connection_id,
- uint16_t option)
-{
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
-
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
- mbox_cmd[1] = fw_ddb_index;
- mbox_cmd[2] = connection_id;
- mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
- if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS) {
- DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
- "option %04x failed sts %04X %04X",
- ha->host_no, __func__,
- option, mbox_sts[0], mbox_sts[1]));
- if (mbox_sts[0] == 0x4005)
- DEBUG2(printk("%s reason %04X\n", __func__,
- mbox_sts[1]));
- }
- return QLA_SUCCESS;
-}
-
-int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
- uint16_t fw_ddb_index)
-{
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
-
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
- mbox_cmd[1] = fw_ddb_index;
- if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
- QLA_SUCCESS)
- return QLA_ERROR;
-
- return QLA_SUCCESS;
-}
-
-#endif /* 0 */
-
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
@@ -272,10 +197,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
sizeof(struct init_fw_ctrl_blk),
@@ -287,51 +215,56 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
- init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out);
- init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in);
- init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
- init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
- init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma));
- init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma));
- init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma));
- init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma));
- init_fw_cb->ShadowRegBufAddrLo =
+ init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
+ init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
+ init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
+ init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
+ init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
+ init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
+ init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
+ init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
+ init_fw_cb->pri.shdwreg_addr_lo =
cpu_to_le32(LSDW(ha->shadow_regs_dma));
- init_fw_cb->ShadowRegBufAddrHi =
+ init_fw_cb->pri.shdwreg_addr_hi =
cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
- init_fw_cb->FwOptions |=
+ init_fw_cb->pri.fw_options |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
- init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
+ init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
/* Save some info in adapter structure. */
- ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions);
- ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions);
- ha->heartbeat_interval = init_fw_cb->HeartbeatInterval;
- memcpy(ha->ip_address, init_fw_cb->IPAddr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
- memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
- memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
- memcpy(ha->name_string, init_fw_cb->iSCSINameString,
+ ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
+ ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
+ ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
+ memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
+ memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
min(sizeof(ha->name_string),
- sizeof(init_fw_cb->iSCSINameString)));
- memcpy(ha->alias, init_fw_cb->Alias,
- min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));
+ sizeof(init_fw_cb->pri.iscsi_name)));
+ /*memcpy(ha->alias, init_fw_cb->Alias,
+ min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
/* Save Command Line Paramater info */
- ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout);
+ ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
ha->discovery_wait = ql4xdiscoverywait;
/* Send Initialize Firmware Control Block. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
mbox_cmd[1] = 0;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) ==
+ mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_SUCCESS)
status = QLA_SUCCESS;
else {
@@ -368,12 +301,14 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
+ mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
@@ -384,12 +319,12 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Save IP Address. */
- memcpy(ha->ip_address, init_fw_cb->IPAddr,
- min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
- memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
- min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
- memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
- min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
+ memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
+ min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
+ memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
+ min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
+ memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
+ min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
@@ -409,8 +344,10 @@ int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
- if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
"status %04X\n", ha->host_no, __func__,
@@ -438,8 +375,10 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
- if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
"status %04X\n", ha->host_no, __func__,
@@ -491,11 +430,14 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
}
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
- if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) ==
+ mbox_cmd[4] = sizeof(struct dev_db_entry);
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
" with status 0x%04X\n", ha->host_no, __func__,
@@ -512,11 +454,11 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
"State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
- mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0],
- fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2],
- fw_ddb_entry->ipAddr[3],
- le16_to_cpu(fw_ddb_entry->portNumber),
- fw_ddb_entry->iscsiName);
+ mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
+ fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
+ fw_ddb_entry->ip_addr[3],
+ le16_to_cpu(fw_ddb_entry->port),
+ fw_ddb_entry->iscsi_name);
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
@@ -571,35 +513,10 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
- return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
-}
+ mbox_cmd[4] = sizeof(struct dev_db_entry);
-#if 0
-int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
- uint16_t fw_ddb_index)
-{
- int status = QLA_ERROR;
- uint32_t mbox_cmd[MBOX_REG_COUNT];
- uint32_t mbox_sts[MBOX_REG_COUNT];
-
- /* Do not wait for completion. The firmware will send us an
- * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
- */
- memset(&mbox_cmd, 0, sizeof(mbox_cmd));
- memset(&mbox_sts, 0, sizeof(mbox_sts));
- mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
- mbox_cmd[1] = (uint32_t) fw_ddb_index;
- mbox_cmd[2] = 0;
- mbox_cmd[3] = 0;
- mbox_cmd[4] = 0;
- status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
- DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
- __func__, fw_ddb_index, status, mbox_sts[0],
- mbox_sts[1]);)
-
- return status;
+ return qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
}
-#endif /* 0 */
/**
* qla4xxx_get_crash_record - retrieves crash record.
@@ -614,12 +531,14 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
struct crash_record *crash_record = NULL;
dma_addr_t crash_record_dma = 0;
uint32_t crash_record_size = 0;
+
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
- if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
ha->host_no, __func__));
@@ -639,11 +558,15 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
goto exit_get_crash_record;
/* Get Crash Record. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
mbox_cmd[2] = LSDW(crash_record_dma);
mbox_cmd[3] = MSDW(crash_record_dma);
mbox_cmd[4] = crash_record_size;
- if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_crash_record;
@@ -655,7 +578,6 @@ exit_get_crash_record:
crash_record, crash_record_dma);
}
-#if 0
/**
* qla4xxx_get_conn_event_log - retrieves connection event log
* @ha: Pointer to host adapter structure.
@@ -678,7 +600,8 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
- if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_event_log;
@@ -693,10 +616,14 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
goto exit_get_event_log;
/* Get Crash Record. */
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_cmd));
+
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
mbox_cmd[3] = MSDW(event_log_dma);
- if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
"log!\n", ha->host_no, __func__));
@@ -745,7 +672,6 @@ exit_get_event_log:
dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
event_log_dma);
}
-#endif /* 0 */
/**
* qla4xxx_reset_lun - issues LUN Reset
@@ -773,11 +699,13 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_LUN_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[2] = lun << 8;
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
- qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]);
+
+ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
status = QLA_ERROR;
@@ -794,12 +722,14 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_READ_FLASH;
mbox_cmd[1] = LSDW(dma_addr);
mbox_cmd[2] = MSDW(dma_addr);
mbox_cmd[3] = offset;
mbox_cmd[4] = len;
- if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
"status %04X %04X, offset %08x, len %08x\n", ha->host_no,
@@ -825,8 +755,10 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
/* Get firmware version. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
- if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
+
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
"status %04X\n", ha->host_no, __func__, mbox_sts[0]));
@@ -855,7 +787,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
mbox_cmd[2] = LSDW(dma_addr);
mbox_cmd[3] = MSDW(dma_addr);
- if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
@@ -875,7 +807,7 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
- if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) !=
+ if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
*ddb_index = mbox_sts[2];
@@ -918,23 +850,23 @@ int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
if (ret_val != QLA_SUCCESS)
goto qla4xxx_send_tgts_exit;
- memset((void *)fw_ddb_entry->iSCSIAlias, 0,
- sizeof(fw_ddb_entry->iSCSIAlias));
+ memset(fw_ddb_entry->iscsi_alias, 0,
+ sizeof(fw_ddb_entry->iscsi_alias));
- memset((void *)fw_ddb_entry->iscsiName, 0,
- sizeof(fw_ddb_entry->iscsiName));
+ memset(fw_ddb_entry->iscsi_name, 0,
+ sizeof(fw_ddb_entry->iscsi_name));
- memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr));
- memset((void *)fw_ddb_entry->targetAddr, 0,
- sizeof(fw_ddb_entry->targetAddr));
+ memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
+ memset(fw_ddb_entry->tgt_addr, 0,
+ sizeof(fw_ddb_entry->tgt_addr));
fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
- fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port));
+ fw_ddb_entry->port = cpu_to_le16(ntohs(port));
- fw_ddb_entry->ipAddr[0] = *ip;
- fw_ddb_entry->ipAddr[1] = *(ip + 1);
- fw_ddb_entry->ipAddr[2] = *(ip + 2);
- fw_ddb_entry->ipAddr[3] = *(ip + 3);
+ fw_ddb_entry->ip_addr[0] = *ip;
+ fw_ddb_entry->ip_addr[1] = *(ip + 1);
+ fw_ddb_entry->ip_addr[2] = *(ip + 2);
+ fw_ddb_entry->ip_addr[3] = *(ip + 3);
ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 58afd135aa1d..7fe0482ecf03 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -6,6 +6,9 @@
*/
#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
{
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index da21f5fbbf87..b1d565c12c5b 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -10,6 +10,10 @@
#include <scsi/scsicam.h>
#include "ql4_def.h"
+#include "ql4_version.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
/*
* Driver version
@@ -50,12 +54,15 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
/*
* iSCSI template entry points
*/
-static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
- uint32_t enable, struct sockaddr *dst_addr);
+static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
+ enum iscsi_tgt_dscvr type, uint32_t enable,
+ struct sockaddr *dst_addr);
static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf);
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
@@ -95,16 +102,20 @@ static struct scsi_host_template qla4xxx_driver_template = {
static struct iscsi_transport qla4xxx_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .param_mask = ISCSI_CONN_PORT |
- ISCSI_CONN_ADDRESS |
- ISCSI_TARGET_NAME |
- ISCSI_TPGT,
+ .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD |
+ CAP_DATA_PATH_OFFLOAD,
+ .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
+ ISCSI_TARGET_NAME | ISCSI_TPGT,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
.sessiondata_size = sizeof(struct ddb_entry),
.host_template = &qla4xxx_driver_template,
.tgt_dscvr = qla4xxx_tgt_dscvr,
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param,
+ .get_host_param = qla4xxx_host_get_param,
.start_conn = qla4xxx_conn_start,
.stop_conn = qla4xxx_conn_stop,
.session_recovery_timedout = qla4xxx_recovery_timedout,
@@ -161,6 +172,43 @@ static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
}
+static ssize_t format_addr(char *buf, const unsigned char *addr, int len)
+{
+ int i;
+ char *cp = buf;
+
+ for (i = 0; i < len; i++)
+ cp += sprintf(cp, "%02x%c", addr[i],
+ i == (len - 1) ? '\n' : ':');
+ return cp - buf;
+}
+
+
+static int qla4xxx_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = format_addr(buf, ha->my_mac, MAC_ADDR_LEN);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0],
+ ha->ip_address[1], ha->ip_address[2],
+ ha->ip_address[3]);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+ len = sprintf(buf, "%s\n", ha->name_string);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf)
{
@@ -208,21 +256,15 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
return len;
}
-static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
- uint32_t enable, struct sockaddr *dst_addr)
+static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
+ enum iscsi_tgt_dscvr type, uint32_t enable,
+ struct sockaddr *dst_addr)
{
struct scsi_qla_host *ha;
- struct Scsi_Host *shost;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
int ret = 0;
- shost = scsi_host_lookup(host_no);
- if (IS_ERR(shost)) {
- printk(KERN_ERR "Could not find host no %u\n", host_no);
- return -ENODEV;
- }
-
ha = (struct scsi_qla_host *) shost->hostdata;
switch (type) {
@@ -246,8 +288,6 @@ static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
default:
ret = -ENOSYS;
}
-
- scsi_host_put(shost);
return ret;
}
@@ -369,14 +409,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
struct scsi_cmnd *cmd = srb->cmd;
if (srb->flags & SRB_DMA_VALID) {
- if (cmd->use_sg) {
- pci_unmap_sg(ha->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- } else if (cmd->request_bufflen) {
- pci_unmap_single(ha->pdev, srb->dma_handle,
- cmd->request_bufflen,
- cmd->sc_data_direction);
- }
+ scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
cmd->SCp.ptr = NULL;
@@ -711,7 +744,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
return stat;
}
-static void qla4xxx_hw_reset(struct scsi_qla_host *ha)
+void qla4xxx_hw_reset(struct scsi_qla_host *ha)
{
uint32_t ctrl_status;
unsigned long flags = 0;
@@ -1081,13 +1114,13 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
if (ha->timer_active)
qla4xxx_stop_timer(ha);
- /* free extra memory */
- qla4xxx_mem_free(ha);
-
/* Detach interrupts */
if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
free_irq(ha->pdev->irq, ha);
+ /* free extra memory */
+ qla4xxx_mem_free(ha);
+
pci_disable_device(ha->pdev);
}
@@ -1332,6 +1365,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
ha = pci_get_drvdata(pdev);
+ qla4xxx_disable_intrs(ha);
+
+ while (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
+ ssleep(1);
+
/* remove devs from iscsi_sessions to scsi_devices */
qla4xxx_free_ddb_list(ha);
@@ -1639,7 +1677,7 @@ static int __init qla4xxx_module_init(void)
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
printk(KERN_ERR
"%s: Unable to allocate SRB cache..."
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index e5183a697d1f..ab984cb89cea 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,5 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.00.07-k1"
+#define QLA4XXX_DRIVER_VERSION "5.01.00-k8"
+
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 2e7db18f5aef..2bfbf26c00ed 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -265,8 +265,6 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
unsigned int reqlen; /* total length of transfer */
- struct scatterlist *sglist; /* scatter-gather list pointer */
- unsigned int sgcount; /* sg counter */
char *buf;
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
int qbase = priv->qbase;
@@ -301,9 +299,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
- reqlen = cmd->request_bufflen;
+ reqlen = scsi_bufflen(cmd);
/* note that it won't work if transfers > 16M are requested */
if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
+ struct scatterlist *sg;
rtrc(2)
outb(reqlen, qbase); /* low-mid xfer cnt */
outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
@@ -311,23 +310,16 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
outb(0x90, qbase + 3); /* command do xfer */
/* PIO pseudo DMA to buffer or sglist */
REG1;
- if (!cmd->use_sg)
- ql_pdma(priv, phase, cmd->request_buffer,
- cmd->request_bufflen);
- else {
- sgcount = cmd->use_sg;
- sglist = cmd->request_buffer;
- while (sgcount--) {
- if (priv->qabort) {
- REG0;
- return ((priv->qabort == 1 ?
- DID_ABORT : DID_RESET) << 16);
- }
- buf = page_address(sglist->page) + sglist->offset;
- if (ql_pdma(priv, phase, buf, sglist->length))
- break;
- sglist++;
+
+ scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
+ if (priv->qabort) {
+ REG0;
+ return ((priv->qabort == 1 ?
+ DID_ABORT : DID_RESET) << 16);
}
+ buf = page_address(sg->page) + sg->offset;
+ if (ql_pdma(priv, phase, buf, sg->length))
+ break;
}
REG0;
rtrc(2)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4c1e31334765..a5de1a829a76 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -288,7 +288,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
if (!pool->users) {
pool->slab = kmem_cache_create(pool->name,
sizeof(struct scsi_cmnd), 0,
- pool->slab_flags, NULL, NULL);
+ pool->slab_flags, NULL);
if (!pool->slab)
goto fail;
}
@@ -368,7 +368,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
if (level > 3) {
printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
" done = 0x%p, queuecommand 0x%p\n",
- cmd->request_buffer, cmd->request_bufflen,
+ scsi_sglist(cmd), scsi_bufflen(cmd),
cmd->done,
cmd->device->host->hostt->queuecommand);
@@ -1016,52 +1016,6 @@ struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
}
EXPORT_SYMBOL(scsi_device_lookup);
-/**
- * scsi_device_cancel - cancel outstanding IO to this device
- * @sdev: Pointer to struct scsi_device
- * @recovery: Boolean instructing function to recover device or not.
- *
- **/
-int scsi_device_cancel(struct scsi_device *sdev, int recovery)
-{
- struct scsi_cmnd *scmd;
- LIST_HEAD(active_list);
- struct list_head *lh, *lh_sf;
- unsigned long flags;
-
- scsi_device_set_state(sdev, SDEV_CANCEL);
-
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_for_each_entry(scmd, &sdev->cmd_list, list) {
- if (scmd->request) {
- /*
- * If we are unable to remove the timer, it means
- * that the command has already timed out or
- * finished.
- */
- if (!scsi_delete_timer(scmd))
- continue;
- list_add_tail(&scmd->eh_entry, &active_list);
- }
- }
- spin_unlock_irqrestore(&sdev->list_lock, flags);
-
- if (!list_empty(&active_list)) {
- list_for_each_safe(lh, lh_sf, &active_list) {
- scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
- list_del_init(lh);
- if (recovery &&
- !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
- scmd->result = (DID_ABORT << 16);
- scsi_finish_command(scmd);
- }
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(scsi_device_cancel);
-
MODULE_DESCRIPTION("SCSI core");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 06229f225ee9..4947dfe625a6 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2405,7 +2405,7 @@ MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
-MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)");
+MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
@@ -2875,7 +2875,7 @@ static int __init scsi_debug_init(void)
init_all_queued();
- sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
+ sdebug_driver_template.proc_name = sdebug_proc_name;
host_to_add = scsi_debug_add_host;
scsi_debug_add_host = 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 18dd5cc4d7c6..19c44f0781fd 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -128,6 +128,7 @@ static struct {
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
+ {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
{"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
{"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
{"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e8350c562d24..8a525abda30f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -18,12 +18,13 @@
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -640,16 +641,8 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
memcpy(scmd->cmnd, cmnd, cmnd_size);
if (copy_sense) {
- gfp_t gfp_mask = GFP_ATOMIC;
-
- if (shost->hostt->unchecked_isa_dma)
- gfp_mask |= __GFP_DMA;
-
- sgl.page = alloc_page(gfp_mask);
- if (!sgl.page)
- return FAILED;
- sgl.offset = 0;
- sgl.length = 252;
+ sg_init_one(&sgl, scmd->sense_buffer,
+ sizeof(scmd->sense_buffer));
scmd->sc_data_direction = DMA_FROM_DEVICE;
scmd->request_bufflen = sgl.length;
@@ -720,18 +713,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
/*
- * Last chance to have valid sense data.
- */
- if (copy_sense) {
- if (!SCSI_SENSE_VALID(scmd)) {
- memcpy(scmd->sense_buffer, page_address(sgl.page),
- sizeof(scmd->sense_buffer));
- }
- __free_page(sgl.page);
- }
-
-
- /*
* Restore original data
*/
scmd->request_buffer = old_buffer;
@@ -1536,8 +1517,6 @@ int scsi_error_handler(void *data)
{
struct Scsi_Host *shost = data;
- current->flags |= PF_NOFREEZE;
-
/*
* We use TASK_INTERRUPTIBLE so that the thread is not
* counted against the load average as a running process.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1f5a07bf2a75..da63c544919b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1661,7 +1661,7 @@ int __init scsi_init_queue(void)
scsi_io_context_cache = kmem_cache_create("scsi_io_context",
sizeof(struct scsi_io_context),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!scsi_io_context_cache) {
printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
return -ENOMEM;
@@ -1672,7 +1672,7 @@ int __init scsi_init_queue(void)
int size = sgp->size * sizeof(struct scatterlist);
sgp->slab = kmem_cache_create(sgp->name, size, 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ SLAB_HWCACHE_ALIGN, NULL);
if (!sgp->slab) {
printk(KERN_ERR "SCSI: can't init sg slab %s\n",
sgp->name);
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
new file mode 100644
index 000000000000..ac6855cd2657
--- /dev/null
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -0,0 +1,50 @@
+/*
+ * SCSI library functions depending on DMA
+ */
+
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+/**
+ * scsi_dma_map - perform DMA mapping against command's sg lists
+ * @cmd: scsi command
+ *
+ * Returns the number of sg lists actually used, zero if the sg lists
+ * is NULL, or -ENOMEM if the mapping failed.
+ */
+int scsi_dma_map(struct scsi_cmnd *cmd)
+{
+ int nseg = 0;
+
+ if (scsi_sg_count(cmd)) {
+ struct device *dev = cmd->device->host->shost_gendev.parent;
+
+ nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
+ cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ return -ENOMEM;
+ }
+ return nseg;
+}
+EXPORT_SYMBOL(scsi_dma_map);
+
+/**
+ * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
+ * @cmd: scsi command
+ */
+void scsi_dma_unmap(struct scsi_cmnd *cmd)
+{
+ if (scsi_sg_count(cmd)) {
+ struct device *dev = cmd->device->host->shost_gendev.parent;
+
+ dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
+ cmd->sc_data_direction);
+ }
+}
+EXPORT_SYMBOL(scsi_dma_unmap);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 662577fbe7a8..a86e62f4b3ba 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -703,16 +703,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
/**
* scsi_add_lun - allocate and fully initialze a scsi_device
- * @sdevscan: holds information to be stored in the new scsi_device
- * @sdevnew: store the address of the newly allocated scsi_device
+ * @sdev: holds information to be stored in the new scsi_device
* @inq_result: holds the result of a previous INQUIRY to the LUN
* @bflags: black/white list flag
+ * @async: 1 if this device is being scanned asynchronously
*
* Description:
- * Allocate and initialize a scsi_device matching sdevscan. Optionally
- * set fields based on values in *@bflags. If @sdevnew is not
- * NULL, store the address of the new scsi_device in *@sdevnew (needed
- * when scanning a particular LUN).
+ * Initialize the scsi_device @sdev. Optionally set fields based
+ * on values in *@bflags.
*
* Return:
* SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
@@ -752,25 +750,15 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->rev = (char *) (sdev->inquiry + 32);
if (*bflags & BLIST_ISROM) {
- /*
- * It would be better to modify sdev->type, and set
- * sdev->removable; this can now be done since
- * print_inquiry has gone away.
- */
- inq_result[0] = TYPE_ROM;
- inq_result[1] |= 0x80; /* removable */
- } else if (*bflags & BLIST_NO_ULD_ATTACH)
- sdev->no_uld_attach = 1;
+ sdev->type = TYPE_ROM;
+ sdev->removable = 1;
+ } else {
+ sdev->type = (inq_result[0] & 0x1f);
+ sdev->removable = (inq_result[1] & 0x80) >> 7;
+ }
- switch (sdev->type = (inq_result[0] & 0x1f)) {
+ switch (sdev->type) {
case TYPE_RBC:
- /* RBC devices can return SCSI-3 compliance and yet
- * still not support REPORT LUNS, so make them act as
- * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
- * specifically set */
- if ((*bflags & BLIST_REPORTLUN2) == 0)
- *bflags |= BLIST_NOREPORTLUN;
- /* fall through */
case TYPE_TAPE:
case TYPE_DISK:
case TYPE_PRINTER:
@@ -784,13 +772,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->writeable = 1;
break;
case TYPE_ROM:
- /* MMC devices can return SCSI-3 compliance and yet
- * still not support REPORT LUNS, so make them act as
- * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
- * specifically set */
- if ((*bflags & BLIST_REPORTLUN2) == 0)
- *bflags |= BLIST_NOREPORTLUN;
- /* fall through */
case TYPE_WORM:
sdev->writeable = 0;
break;
@@ -798,6 +779,15 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
}
+ if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
+ /* RBC and MMC devices can return SCSI-3 compliance and yet
+ * still not support REPORT LUNS, so make them act as
+ * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
+ * specifically set */
+ if ((*bflags & BLIST_REPORTLUN2) == 0)
+ *bflags |= BLIST_NOREPORTLUN;
+ }
+
/*
* For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
* spec says: The device server is capable of supporting the
@@ -815,12 +805,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
- sdev->removable = (0x80 & inq_result[1]) >> 7;
sdev->lockable = sdev->removable;
sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
- if (sdev->scsi_level >= SCSI_3 || (sdev->inquiry_len > 56 &&
- inq_result[56] & 0x04))
+ if (sdev->scsi_level >= SCSI_3 ||
+ (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
sdev->ppr = 1;
if (inq_result[7] & 0x60)
sdev->wdtr = 1;
@@ -833,13 +822,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->inq_periph_qual, inq_result[2] & 0x07,
(inq_result[3] & 0x0f) == 1 ? " CCS" : "");
- /*
- * End sysfs code.
- */
-
if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
!(*bflags & BLIST_NOTQ))
sdev->tagged_supported = 1;
+
/*
* Some devices (Texel CD ROM drives) have handshaking problems
* when used with the Seagate controllers. borken is initialized
@@ -848,6 +834,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if ((*bflags & BLIST_BORKEN) == 0)
sdev->borken = 0;
+ if (*bflags & BLIST_NO_ULD_ATTACH)
+ sdev->no_uld_attach = 1;
+
/*
* Apparently some really broken devices (contrary to the SCSI
* standards) need to be selected without asserting ATN
@@ -872,7 +861,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_SINGLELUN)
sdev->single_lun = 1;
-
sdev->use_10_for_rw = 1;
if (*bflags & BLIST_MS_SKIP_PAGE_08)
@@ -1213,7 +1201,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns
* the integer: 0x0b030a04
**/
-static int scsilun_to_int(struct scsi_lun *scsilun)
+int scsilun_to_int(struct scsi_lun *scsilun)
{
int i;
unsigned int lun;
@@ -1224,6 +1212,7 @@ static int scsilun_to_int(struct scsi_lun *scsilun)
scsilun->scsi_lun[i + 1]) << (i * 8));
return lun;
}
+EXPORT_SYMBOL(scsilun_to_int);
/**
* int_to_scsilun: reverts an int into a scsi_lun
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 6cfaaa2d0c81..63a30f566f3a 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -9,6 +9,7 @@
#include <linux/sysctl.h>
#include "scsi_logging.h"
+#include "scsi_priv.h"
static ctl_table scsi_table[] = {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67a38a1409ba..34cdce6738a6 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -16,6 +16,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_driver.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -293,30 +294,18 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
{
struct device_driver *drv = dev->driver;
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
int err;
err = scsi_device_quiesce(sdev);
if (err)
return err;
- /* call HLD suspend first */
if (drv && drv->suspend) {
err = drv->suspend(dev, state);
if (err)
return err;
}
- /* then, call host suspend */
- if (sht->suspend) {
- err = sht->suspend(sdev, state);
- if (err) {
- if (drv && drv->resume)
- drv->resume(dev);
- return err;
- }
- }
-
return 0;
}
@@ -324,21 +313,14 @@ static int scsi_bus_resume(struct device * dev)
{
struct device_driver *drv = dev->driver;
struct scsi_device *sdev = to_scsi_device(dev);
- struct scsi_host_template *sht = sdev->host->hostt;
- int err = 0, err2 = 0;
+ int err = 0;
- /* call host resume first */
- if (sht->resume)
- err = sht->resume(sdev);
-
- /* then, call HLD resume */
if (drv && drv->resume)
- err2 = drv->resume(dev);
+ err = drv->resume(dev);
scsi_device_resume(sdev);
- /* favor LLD failure */
- return err ? err : err2;;
+ return err;
}
struct bus_type scsi_bus_type = {
@@ -733,6 +715,7 @@ static int attr_add(struct device *dev, struct device_attribute *attr)
int scsi_sysfs_add_sdev(struct scsi_device *sdev)
{
int error, i;
+ struct request_queue *rq = sdev->request_queue;
if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
return error;
@@ -752,6 +735,17 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
/* take a reference for the sdev_classdev; this is
* released by the sdev_class .release */
get_device(&sdev->sdev_gendev);
+
+ error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL);
+
+ if (error)
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to register bsg queue, errno=%d\n", error);
+
+ /* we're treating error on bsg register as non-fatal, so pretend
+ * nothing went wrong */
+ error = 0;
+
if (sdev->host->hostt->sdev_attrs) {
for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
error = attr_add(&sdev->sdev_gendev,
@@ -798,6 +792,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
return;
+ bsg_unregister_queue(sdev->request_queue);
class_device_unregister(&sdev->sdev_classdev);
transport_remove_device(dev);
device_del(dev);
@@ -822,7 +817,7 @@ void scsi_remove_device(struct scsi_device *sdev)
}
EXPORT_SYMBOL(scsi_remove_device);
-void __scsi_remove_target(struct scsi_target *starget)
+static void __scsi_remove_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
unsigned long flags;
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 2570f48a69c7..371b69c110bc 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -585,7 +585,7 @@ static int __init scsi_tgt_init(void)
scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
sizeof(struct scsi_tgt_cmd),
- 0, 0, NULL, NULL);
+ 0, 0, NULL);
if (!scsi_tgt_cmd_cache)
return -ENOMEM;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b4d1ece46f78..47057254850d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1,4 +1,4 @@
-/*
+/*
* FiberChannel transport specific attributes exported to sysfs.
*
* Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
@@ -19,9 +19,10 @@
*
* ========
*
- * Copyright (C) 2004-2005 James Smart, Emulex Corporation
+ * Copyright (C) 2004-2007 James Smart, Emulex Corporation
* Rewrite for host, target, device, and remote port attributes,
* statistics, and service functions...
+ * Add vports, etc
*
*/
#include <linux/module.h>
@@ -37,6 +38,34 @@
#include "scsi_priv.h"
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
+static void fc_vport_sched_delete(struct work_struct *work);
+
+/*
+ * This is a temporary carrier for creating a vport. It will eventually
+ * be replaced by a real message definition for sgio or netlink.
+ *
+ * fc_vport_identifiers: This set of data contains all elements
+ * to uniquely identify and instantiate a FC virtual port.
+ *
+ * Notes:
+ * symbolic_name: The driver is to append the symbolic_name string data
+ * to the symbolic_node_name data that it generates by default.
+ * the resulting combination should then be registered with the switch.
+ * It is expected that things like Xen may stuff a VM title into
+ * this field.
+ */
+struct fc_vport_identifiers {
+ u64 node_name;
+ u64 port_name;
+ u32 roles;
+ bool disable;
+ enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
+ char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
+};
+
+static int fc_vport_create(struct Scsi_Host *shost, int channel,
+ struct device *pdev, struct fc_vport_identifiers *ids,
+ struct fc_vport **vport);
/*
* Redefine so that we can have same named attributes in the
@@ -90,10 +119,14 @@ static struct {
{ FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
{ FC_PORTTYPE_LPORT, "LPort (private loop)" },
{ FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" },
+ { FC_PORTTYPE_NPIV, "NPIV VPORT" },
};
fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
#define FC_PORTTYPE_MAX_NAMELEN 50
+/* Reuse fc_port_type enum function for vport_type */
+#define get_fc_vport_type_name get_fc_port_type_name
+
/* Convert fc_host_event_code values to ascii string name */
static const struct {
@@ -139,6 +172,29 @@ fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
#define FC_PORTSTATE_MAX_NAMELEN 20
+/* Convert fc_vport_state values to ascii string name */
+static struct {
+ enum fc_vport_state value;
+ char *name;
+} fc_vport_state_names[] = {
+ { FC_VPORT_UNKNOWN, "Unknown" },
+ { FC_VPORT_ACTIVE, "Active" },
+ { FC_VPORT_DISABLED, "Disabled" },
+ { FC_VPORT_LINKDOWN, "Linkdown" },
+ { FC_VPORT_INITIALIZING, "Initializing" },
+ { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
+ { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
+ { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
+ { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
+ { FC_VPORT_FAILED, "VPort Failed" },
+};
+fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
+#define FC_VPORTSTATE_MAX_NAMELEN 24
+
+/* Reuse fc_vport_state enum function for vport_last_state */
+#define get_fc_vport_last_state_name get_fc_vport_state_name
+
+
/* Convert fc_tgtid_binding_type values to ascii string name */
static const struct {
enum fc_tgtid_binding_type value;
@@ -219,16 +275,16 @@ show_fc_fc4s (char *buf, u8 *fc4_list)
}
-/* Convert FC_RPORT_ROLE bit values to ascii string name */
+/* Convert FC_PORT_ROLE bit values to ascii string name */
static const struct {
u32 value;
char *name;
-} fc_remote_port_role_names[] = {
- { FC_RPORT_ROLE_FCP_TARGET, "FCP Target" },
- { FC_RPORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
- { FC_RPORT_ROLE_IP_PORT, "IP Port" },
+} fc_port_role_names[] = {
+ { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
+ { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
+ { FC_PORT_ROLE_IP_PORT, "IP Port" },
};
-fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
+fc_bitfield_name_search(port_roles, fc_port_role_names)
/*
* Define roles that are specific to port_id. Values are relative to ROLE_MASK.
@@ -252,7 +308,8 @@ static void fc_scsi_scan_rport(struct work_struct *work);
*/
#define FC_STARGET_NUM_ATTRS 3
#define FC_RPORT_NUM_ATTRS 10
-#define FC_HOST_NUM_ATTRS 17
+#define FC_VPORT_NUM_ATTRS 9
+#define FC_HOST_NUM_ATTRS 21
struct fc_internal {
struct scsi_transport_template t;
@@ -278,6 +335,10 @@ struct fc_internal {
struct transport_container rport_attr_cont;
struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
+
+ struct transport_container vport_attr_cont;
+ struct class_device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
+ struct class_device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
};
#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
@@ -318,7 +379,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
struct Scsi_Host *shost = dev_to_shost(dev);
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
- /*
+ /*
* Set default values easily detected by the midlayer as
* failure cases. The scsi lldd is responsible for initializing
* all transport attributes to valid values per host.
@@ -331,6 +392,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
sizeof(fc_host->supported_fc4s));
fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
fc_host->maxframe_size = -1;
+ fc_host->max_npiv_vports = 0;
memset(fc_host->serial_number, 0,
sizeof(fc_host->serial_number));
@@ -348,8 +410,11 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
INIT_LIST_HEAD(&fc_host->rports);
INIT_LIST_HEAD(&fc_host->rport_bindings);
+ INIT_LIST_HEAD(&fc_host->vports);
fc_host->next_rport_number = 0;
fc_host->next_target_id = 0;
+ fc_host->next_vport_number = 0;
+ fc_host->npiv_vports_inuse = 0;
snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d",
shost->host_no);
@@ -388,6 +453,16 @@ static DECLARE_TRANSPORT_CLASS(fc_rport_class,
NULL);
/*
+ * Setup and Remove actions for virtual ports are handled
+ * in the service functions below.
+ */
+static DECLARE_TRANSPORT_CLASS(fc_vport_class,
+ "fc_vports",
+ NULL,
+ NULL,
+ NULL);
+
+/*
* Module Parameters
*/
@@ -585,6 +660,9 @@ static __init int fc_transport_init(void)
error = transport_class_register(&fc_host_class);
if (error)
return error;
+ error = transport_class_register(&fc_vport_class);
+ if (error)
+ return error;
error = transport_class_register(&fc_rport_class);
if (error)
return error;
@@ -596,6 +674,7 @@ static void __exit fc_transport_exit(void)
transport_class_unregister(&fc_transport_class);
transport_class_unregister(&fc_rport_class);
transport_class_unregister(&fc_host_class);
+ transport_class_unregister(&fc_vport_class);
}
/*
@@ -800,9 +879,9 @@ show_fc_rport_roles (struct class_device *cdev, char *buf)
return snprintf(buf, 30, "Unknown Fabric Entity\n");
}
} else {
- if (rport->roles == FC_RPORT_ROLE_UNKNOWN)
+ if (rport->roles == FC_PORT_ROLE_UNKNOWN)
return snprintf(buf, 20, "unknown\n");
- return get_fc_remote_port_roles_names(rport->roles, buf);
+ return get_fc_port_roles_names(rport->roles, buf);
}
}
static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
@@ -857,7 +936,7 @@ static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
/*
* Note: in the target show function we recognize when the remote
- * port is in the hierarchy and do not allow the driver to get
+ * port is in the heirarchy and do not allow the driver to get
* involved in sysfs functions. The driver only gets involved if
* it's the "old" style that doesn't use rports.
*/
@@ -912,6 +991,257 @@ fc_starget_rd_attr(port_id, "0x%06x\n", 20);
/*
+ * FC Virtual Port Attribute Management
+ */
+
+#define fc_vport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_vport_##field (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ if ((i->f->get_vport_##field) && \
+ !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
+ i->f->get_vport_##field(vport); \
+ return snprintf(buf, sz, format_string, cast vport->field); \
+}
+
+#define fc_vport_store_function(field) \
+static ssize_t \
+store_fc_vport_##field(struct class_device *cdev, const char *buf, \
+ size_t count) \
+{ \
+ int val; \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ char *cp; \
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ i->f->set_vport_##field(vport, val); \
+ return count; \
+}
+
+#define fc_vport_store_str_function(field, slen) \
+static ssize_t \
+store_fc_vport_##field(struct class_device *cdev, const char *buf, \
+ size_t count) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ struct Scsi_Host *shost = vport_to_shost(vport); \
+ struct fc_internal *i = to_fc_internal(shost->transportt); \
+ unsigned int cnt=count; \
+ \
+ /* count may include a LF at end of string */ \
+ if (buf[cnt-1] == '\n') \
+ cnt--; \
+ if (cnt > ((slen) - 1)) \
+ return -EINVAL; \
+ memcpy(vport->field, buf, cnt); \
+ i->f->set_vport_##field(vport); \
+ return count; \
+}
+
+#define fc_vport_rd_attr(field, format_string, sz) \
+ fc_vport_show_function(field, format_string, sz, ) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_vport_show_function(field, format_string, sz, (cast)) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_vport_rw_attr(field, format_string, sz) \
+ fc_vport_show_function(field, format_string, sz, ) \
+ fc_vport_store_function(field) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
+ show_fc_vport_##field, \
+ store_fc_vport_##field)
+
+#define fc_private_vport_show_function(field, format_string, sz, cast) \
+static ssize_t \
+show_fc_vport_##field (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ return snprintf(buf, sz, format_string, cast vport->field); \
+}
+
+#define fc_private_vport_store_u32_function(field) \
+static ssize_t \
+store_fc_vport_##field(struct class_device *cdev, const char *buf, \
+ size_t count) \
+{ \
+ u32 val; \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ char *cp; \
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
+ return -EBUSY; \
+ val = simple_strtoul(buf, &cp, 0); \
+ if (*cp && (*cp != '\n')) \
+ return -EINVAL; \
+ vport->field = val; \
+ return count; \
+}
+
+
+#define fc_private_vport_rd_attr(field, format_string, sz) \
+ fc_private_vport_show_function(field, format_string, sz, ) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
+ fc_private_vport_show_function(field, format_string, sz, (cast)) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
+ show_fc_vport_##field, NULL)
+
+#define fc_private_vport_rw_u32_attr(field, format_string, sz) \
+ fc_private_vport_show_function(field, format_string, sz, ) \
+ fc_private_vport_store_u32_function(field) \
+static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
+ show_fc_vport_##field, \
+ store_fc_vport_##field)
+
+
+#define fc_private_vport_rd_enum_attr(title, maxlen) \
+static ssize_t \
+show_fc_vport_##title (struct class_device *cdev, char *buf) \
+{ \
+ struct fc_vport *vport = transport_class_to_vport(cdev); \
+ const char *name; \
+ name = get_fc_##title##_name(vport->title); \
+ if (!name) \
+ return -EINVAL; \
+ return snprintf(buf, maxlen, "%s\n", name); \
+} \
+static FC_CLASS_DEVICE_ATTR(vport, title, S_IRUGO, \
+ show_fc_vport_##title, NULL)
+
+
+#define SETUP_VPORT_ATTRIBUTE_RD(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ if (i->f->get_##field) \
+ count++
+ /* NOTE: Above MACRO differs: checks function not show bit */
+
+#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++
+
+#define SETUP_VPORT_ATTRIBUTE_WR(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ if (i->f->field) \
+ count++
+ /* NOTE: Above MACRO differs: checks function */
+
+#define SETUP_VPORT_ATTRIBUTE_RW(field) \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ if (!i->f->set_vport_##field) { \
+ i->private_vport_attrs[count].attr.mode = S_IRUGO; \
+ i->private_vport_attrs[count].store = NULL; \
+ } \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++
+ /* NOTE: Above MACRO differs: does not check show bit */
+
+#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
+{ \
+ i->private_vport_attrs[count] = class_device_attr_vport_##field; \
+ i->vport_attrs[count] = &i->private_vport_attrs[count]; \
+ count++; \
+}
+
+
+/* The FC Transport Virtual Port Attributes: */
+
+/* Fixed Virtual Port Attributes */
+
+/* Dynamic Virtual Port Attributes */
+
+/* Private Virtual Port Attributes */
+
+fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
+fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
+fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
+fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
+
+static ssize_t
+show_fc_vport_roles (struct class_device *cdev, char *buf)
+{
+ struct fc_vport *vport = transport_class_to_vport(cdev);
+
+ if (vport->roles == FC_PORT_ROLE_UNKNOWN)
+ return snprintf(buf, 20, "unknown\n");
+ return get_fc_port_roles_names(vport->roles, buf);
+}
+static FC_CLASS_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
+
+fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
+
+fc_private_vport_show_function(symbolic_name, "%s\n",
+ FC_VPORT_SYMBOLIC_NAMELEN + 1, )
+fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
+static FC_CLASS_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
+ show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
+
+static ssize_t
+store_fc_vport_delete(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct fc_vport *vport = transport_class_to_vport(cdev);
+ struct Scsi_Host *shost = vport_to_shost(vport);
+
+ fc_queue_work(shost, &vport->vport_delete_work);
+ return count;
+}
+static FC_CLASS_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
+ NULL, store_fc_vport_delete);
+
+
+/*
+ * Enable/Disable vport
+ * Write "1" to disable, write "0" to enable
+ */
+static ssize_t
+store_fc_vport_disable(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct fc_vport *vport = transport_class_to_vport(cdev);
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ int stat;
+
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+ return -EBUSY;
+
+ if (*buf == '0') {
+ if (vport->vport_state != FC_VPORT_DISABLED)
+ return -EALREADY;
+ } else if (*buf == '1') {
+ if (vport->vport_state == FC_VPORT_DISABLED)
+ return -EALREADY;
+ } else
+ return -EINVAL;
+
+ stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
+ return stat ? stat : count;
+}
+static FC_CLASS_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
+ NULL, store_fc_vport_disable);
+
+
+/*
* Host Attribute Management
*/
@@ -1003,6 +1333,13 @@ static FC_CLASS_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
if (i->f->show_host_##field) \
count++
+#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
+ i->private_host_attrs[count] = class_device_attr_host_##field; \
+ i->private_host_attrs[count].attr.mode = S_IRUGO; \
+ i->private_host_attrs[count].store = NULL; \
+ i->host_attrs[count] = &i->private_host_attrs[count]; \
+ count++
+
#define SETUP_HOST_ATTRIBUTE_RW(field) \
i->private_host_attrs[count] = class_device_attr_host_##field; \
if (!i->f->set_host_##field) { \
@@ -1090,6 +1427,7 @@ fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
unsigned long long);
fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
+fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
@@ -1210,6 +1548,9 @@ store_fc_private_host_issue_lip(struct class_device *cdev,
static FC_CLASS_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
store_fc_private_host_issue_lip);
+fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
+
+
/*
* Host Statistics Management
*/
@@ -1285,7 +1626,6 @@ fc_reset_statistics(struct class_device *cdev, const char *buf,
static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
fc_reset_statistics);
-
static struct attribute *fc_statistics_attrs[] = {
&class_device_attr_host_seconds_since_last_reset.attr,
&class_device_attr_host_tx_frames.attr,
@@ -1316,6 +1656,142 @@ static struct attribute_group fc_statistics_group = {
.attrs = fc_statistics_attrs,
};
+
+/* Host Vport Attributes */
+
+static int
+fc_parse_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i=0, j=0; i < 16; i++) {
+ if ((*ns >= 'a') && (*ns <= 'f'))
+ j = ((j << 4) | ((*ns++ -'a') + 10));
+ else if ((*ns >= 'A') && (*ns <= 'F'))
+ j = ((j << 4) | ((*ns++ -'A') + 10));
+ else if ((*ns >= '0') && (*ns <= '9'))
+ j = ((j << 4) | (*ns++ -'0'));
+ else
+ return -EINVAL;
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+
+ return 0;
+}
+
+
+/*
+ * "Short-cut" sysfs variable to create a new vport on a FC Host.
+ * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
+ * will default to a NPIV-based FCP_Initiator; The WWNs are specified
+ * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
+ */
+static ssize_t
+store_fc_host_vport_create(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct fc_vport_identifiers vid;
+ struct fc_vport *vport;
+ unsigned int cnt=count;
+ int stat;
+
+ memset(&vid, 0, sizeof(vid));
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (buf[16] != ':'))
+ return -EINVAL;
+
+ stat = fc_parse_wwn(&buf[0], &vid.port_name);
+ if (stat)
+ return stat;
+
+ stat = fc_parse_wwn(&buf[17], &vid.node_name);
+ if (stat)
+ return stat;
+
+ vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vid.vport_type = FC_PORTTYPE_NPIV;
+ /* vid.symbolic_name is already zero/NULL's */
+ vid.disable = false; /* always enabled */
+
+ /* we only allow support on Channel 0 !!! */
+ stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
+ return stat ? stat : count;
+}
+static FC_CLASS_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
+ store_fc_host_vport_create);
+
+
+/*
+ * "Short-cut" sysfs variable to delete a vport on a FC Host.
+ * Vport is identified by a string containing "<WWPN>:<WWNN>".
+ * The WWNs are specified as hex characters, and may *not* contain
+ * any prefixes (e.g. 0x, x, etc)
+ */
+static ssize_t
+store_fc_host_vport_delete(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_vport *vport;
+ u64 wwpn, wwnn;
+ unsigned long flags;
+ unsigned int cnt=count;
+ int stat, match;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (buf[16] != ':'))
+ return -EINVAL;
+
+ stat = fc_parse_wwn(&buf[0], &wwpn);
+ if (stat)
+ return stat;
+
+ stat = fc_parse_wwn(&buf[17], &wwnn);
+ if (stat)
+ return stat;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ match = 0;
+ /* we only allow support on Channel 0 !!! */
+ list_for_each_entry(vport, &fc_host->vports, peers) {
+ if ((vport->channel == 0) &&
+ (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
+ match = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!match)
+ return -ENODEV;
+
+ stat = fc_vport_terminate(vport);
+ return stat ? stat : count;
+}
+static FC_CLASS_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
+ store_fc_host_vport_delete);
+
+
static int fc_host_match(struct attribute_container *cont,
struct device *dev)
{
@@ -1387,6 +1863,40 @@ static int fc_rport_match(struct attribute_container *cont,
}
+static void fc_vport_dev_release(struct device *dev)
+{
+ struct fc_vport *vport = dev_to_vport(dev);
+ put_device(dev->parent); /* release kobj parent */
+ kfree(vport);
+}
+
+int scsi_is_fc_vport(const struct device *dev)
+{
+ return dev->release == fc_vport_dev_release;
+}
+EXPORT_SYMBOL(scsi_is_fc_vport);
+
+static int fc_vport_match(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct fc_vport *vport;
+ struct Scsi_Host *shost;
+ struct fc_internal *i;
+
+ if (!scsi_is_fc_vport(dev))
+ return 0;
+ vport = dev_to_vport(dev);
+
+ shost = vport_to_shost(vport);
+ if (!shost->transportt || shost->transportt->host_attrs.ac.class
+ != &fc_host_class.class)
+ return 0;
+
+ i = to_fc_internal(shost->transportt);
+ return &i->vport_attr_cont.ac == cont;
+}
+
+
/**
* fc_timed_out - FC Transport I/O timeout intercept handler
*
@@ -1433,6 +1943,9 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
if (rport->scsi_target_id == -1)
continue;
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ continue;
+
if ((channel == SCAN_WILD_CARD || channel == rport->channel) &&
(id == SCAN_WILD_CARD || id == rport->scsi_target_id)) {
scsi_scan_target(&rport->dev, rport->channel,
@@ -1472,6 +1985,11 @@ fc_attach_transport(struct fc_function_template *ft)
i->rport_attr_cont.ac.match = fc_rport_match;
transport_container_register(&i->rport_attr_cont);
+ i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
+ i->vport_attr_cont.ac.class = &fc_vport_class.class;
+ i->vport_attr_cont.ac.match = fc_vport_match;
+ transport_container_register(&i->vport_attr_cont);
+
i->f = ft;
/* Transport uses the shost workq for scsi scanning */
@@ -1480,7 +1998,7 @@ fc_attach_transport(struct fc_function_template *ft)
i->t.eh_timed_out = fc_timed_out;
i->t.user_scan = fc_user_scan;
-
+
/*
* Setup SCSI Target Attributes.
*/
@@ -1505,6 +2023,10 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
+ if (ft->vport_create) {
+ SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
+ SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
+ }
SETUP_HOST_ATTRIBUTE_RD(serial_number);
SETUP_HOST_ATTRIBUTE_RD(port_id);
@@ -1520,6 +2042,10 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
if (ft->issue_fc_host_lip)
SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
+ if (ft->vport_create)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
+ if (ft->vport_delete)
+ SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
BUG_ON(count > FC_HOST_NUM_ATTRS);
@@ -1545,6 +2071,24 @@ fc_attach_transport(struct fc_function_template *ft)
i->rport_attrs[count] = NULL;
+ /*
+ * Setup Virtual Port Attributes.
+ */
+ count=0;
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
+ SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
+ SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
+ SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
+ SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
+
+ BUG_ON(count > FC_VPORT_NUM_ATTRS);
+
+ i->vport_attrs[count] = NULL;
+
return &i->t;
}
EXPORT_SYMBOL(fc_attach_transport);
@@ -1556,6 +2100,7 @@ void fc_release_transport(struct scsi_transport_template *t)
transport_container_unregister(&i->t.target_attrs);
transport_container_unregister(&i->t.host_attrs);
transport_container_unregister(&i->rport_attr_cont);
+ transport_container_unregister(&i->vport_attr_cont);
kfree(i);
}
@@ -1667,9 +2212,17 @@ fc_flush_devloss(struct Scsi_Host *shost)
void
fc_remove_host(struct Scsi_Host *shost)
{
- struct fc_rport *rport, *next_rport;
+ struct fc_vport *vport = NULL, *next_vport = NULL;
+ struct fc_rport *rport = NULL, *next_rport = NULL;
struct workqueue_struct *work_q;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ /* Remove any vports */
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
+ fc_queue_work(shost, &vport->vport_delete_work);
/* Remove any remote ports */
list_for_each_entry_safe(rport, next_rport,
@@ -1686,6 +2239,8 @@ fc_remove_host(struct Scsi_Host *shost)
fc_queue_work(shost, &rport->rport_delete_work);
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
/* flush all scan work items */
scsi_flush_work(shost);
@@ -1744,7 +2299,7 @@ fc_rport_final_delete(struct work_struct *work)
unsigned long flags;
/*
- * if a scan is pending, flush the SCSI Host work_q so that
+ * if a scan is pending, flush the SCSI Host work_q so that
* that we can reclaim the rport scan work element.
*/
if (rport->flags & FC_RPORT_SCAN_PENDING)
@@ -1803,7 +2358,7 @@ fc_rport_final_delete(struct work_struct *work)
* Notes:
* This routine assumes no locks are held on entry.
**/
-struct fc_rport *
+static struct fc_rport *
fc_rport_create(struct Scsi_Host *shost, int channel,
struct fc_rport_identifiers *ids)
{
@@ -1844,7 +2399,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
spin_lock_irqsave(shost->host_lock, flags);
rport->number = fc_host->next_rport_number++;
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
rport->scsi_target_id = fc_host->next_target_id++;
else
rport->scsi_target_id = -1;
@@ -1869,7 +2424,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
transport_add_device(dev);
transport_configure_device(dev);
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) {
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
@@ -2003,7 +2558,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
/* was a target, not in roles */
if ((rport->scsi_target_id != -1) &&
- (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET)))
+ (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
return rport;
/*
@@ -2086,7 +2641,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
memset(rport->dd_data, 0,
fci->f->dd_fcrport_size);
- if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) {
+ if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
/* initiate a scan of the target */
rport->flags |= FC_RPORT_SCAN_PENDING;
scsi_queue_work(shost, &rport->scan_work);
@@ -2243,11 +2798,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
int create = 0;
spin_lock_irqsave(shost->host_lock, flags);
- if (roles & FC_RPORT_ROLE_FCP_TARGET) {
+ if (roles & FC_PORT_ROLE_FCP_TARGET) {
if (rport->scsi_target_id == -1) {
rport->scsi_target_id = fc_host->next_target_id++;
create = 1;
- } else if (!(rport->roles & FC_RPORT_ROLE_FCP_TARGET))
+ } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
create = 1;
}
@@ -2294,7 +2849,7 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
* fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
* which we blocked, and has now failed to return
* in the allotted time.
- *
+ *
* @work: rport target that failed to reappear in the allotted time.
**/
static void
@@ -2317,7 +2872,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
*/
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
(rport->scsi_target_id != -1) &&
- !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
dev_printk(KERN_ERR, &rport->dev,
"blocked FC remote port time out: no longer"
" a FCP target, removing starget\n");
@@ -2367,7 +2922,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
*/
rport->maxframe_size = -1;
rport->supported_classes = FC_COS_UNSPECIFIED;
- rport->roles = FC_RPORT_ROLE_UNKNOWN;
+ rport->roles = FC_PORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
/* remove the identifiers that aren't used in the consisting binding */
@@ -2436,7 +2991,7 @@ fc_scsi_scan_rport(struct work_struct *work)
unsigned long flags;
if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
- (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ (rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
scsi_scan_target(&rport->dev, rport->channel,
rport->scsi_target_id, SCAN_WILD_CARD, 1);
}
@@ -2447,7 +3002,227 @@ fc_scsi_scan_rport(struct work_struct *work)
}
-MODULE_AUTHOR("Martin Hicks");
+/**
+ * fc_vport_create - allocates and creates a FC virtual port.
+ * @shost: scsi host the virtual port is connected to.
+ * @channel: Channel on shost port connected to.
+ * @pdev: parent device for vport
+ * @ids: The world wide names, FC4 port roles, etc for
+ * the virtual port.
+ * @ret_vport: The pointer to the created vport.
+ *
+ * Allocates and creates the vport structure, calls the parent host
+ * to instantiate the vport, the completes w/ class and sysfs creation.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ **/
+static int
+fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
+ struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *fci = to_fc_internal(shost->transportt);
+ struct fc_vport *vport;
+ struct device *dev;
+ unsigned long flags;
+ size_t size;
+ int error;
+
+ *ret_vport = NULL;
+
+ if ( ! fci->f->vport_create)
+ return -ENOENT;
+
+ size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
+ vport = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!vport)) {
+ printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ vport->vport_state = FC_VPORT_UNKNOWN;
+ vport->vport_last_state = FC_VPORT_UNKNOWN;
+ vport->node_name = ids->node_name;
+ vport->port_name = ids->port_name;
+ vport->roles = ids->roles;
+ vport->vport_type = ids->vport_type;
+ if (fci->f->dd_fcvport_size)
+ vport->dd_data = &vport[1];
+ vport->shost = shost;
+ vport->channel = channel;
+ vport->flags = FC_VPORT_CREATING;
+ INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+
+ if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ kfree(vport);
+ return -ENOSPC;
+ }
+ fc_host->npiv_vports_inuse++;
+ vport->number = fc_host->next_vport_number++;
+ list_add_tail(&vport->peers, &fc_host->vports);
+ get_device(&shost->shost_gendev); /* for fc_host->vport list */
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev = &vport->dev;
+ device_initialize(dev); /* takes self reference */
+ dev->parent = get_device(pdev); /* takes parent reference */
+ dev->release = fc_vport_dev_release;
+ sprintf(dev->bus_id, "vport-%d:%d-%d",
+ shost->host_no, channel, vport->number);
+ transport_setup_device(dev);
+
+ error = device_add(dev);
+ if (error) {
+ printk(KERN_ERR "FC Virtual Port device_add failed\n");
+ goto delete_vport;
+ }
+ transport_add_device(dev);
+ transport_configure_device(dev);
+
+ error = fci->f->vport_create(vport, ids->disable);
+ if (error) {
+ printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
+ goto delete_vport_all;
+ }
+
+ /*
+ * if the parent isn't the physical adapter's Scsi_Host, ensure
+ * the Scsi_Host at least contains ia symlink to the vport.
+ */
+ if (pdev != &shost->shost_gendev) {
+ error = sysfs_create_link(&shost->shost_gendev.kobj,
+ &dev->kobj, dev->bus_id);
+ if (error)
+ printk(KERN_ERR
+ "%s: Cannot create vport symlinks for "
+ "%s, err=%d\n",
+ __FUNCTION__, dev->bus_id, error);
+ }
+ spin_lock_irqsave(shost->host_lock, flags);
+ vport->flags &= ~FC_VPORT_CREATING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ dev_printk(KERN_NOTICE, pdev,
+ "%s created via shost%d channel %d\n", dev->bus_id,
+ shost->host_no, channel);
+
+ *ret_vport = vport;
+
+ return 0;
+
+delete_vport_all:
+ transport_remove_device(dev);
+ device_del(dev);
+delete_vport:
+ transport_destroy_device(dev);
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del(&vport->peers);
+ put_device(&shost->shost_gendev); /* for fc_host->vport list */
+ fc_host->npiv_vports_inuse--;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ put_device(dev->parent);
+ kfree(vport);
+
+ return error;
+}
+
+
+/**
+ * fc_vport_terminate - Admin App or LLDD requests termination of a vport
+ * @vport: fc_vport to be terminated
+ *
+ * Calls the LLDD vport_delete() function, then deallocates and removes
+ * the vport from the shost and object tree.
+ *
+ * Notes:
+ * This routine assumes no locks are held on entry.
+ **/
+int
+fc_vport_terminate(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+ struct device *dev = &vport->dev;
+ unsigned long flags;
+ int stat;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vport->flags & FC_VPORT_CREATING) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EBUSY;
+ }
+ if (vport->flags & (FC_VPORT_DEL)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EALREADY;
+ }
+ vport->flags |= FC_VPORT_DELETING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (i->f->vport_delete)
+ stat = i->f->vport_delete(vport);
+ else
+ stat = -ENOENT;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ vport->flags &= ~FC_VPORT_DELETING;
+ if (!stat) {
+ vport->flags |= FC_VPORT_DELETED;
+ list_del(&vport->peers);
+ fc_host->npiv_vports_inuse--;
+ put_device(&shost->shost_gendev); /* for fc_host->vport list */
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (stat)
+ return stat;
+
+ if (dev->parent != &shost->shost_gendev)
+ sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
+ transport_remove_device(dev);
+ device_del(dev);
+ transport_destroy_device(dev);
+
+ /*
+ * Removing our self-reference should mean our
+ * release function gets called, which will drop the remaining
+ * parent reference and free the data structure.
+ */
+ put_device(dev); /* for self-reference */
+
+ return 0; /* SUCCESS */
+}
+EXPORT_SYMBOL(fc_vport_terminate);
+
+/**
+ * fc_vport_sched_delete - workq-based delete request for a vport
+ *
+ * @work: vport to be deleted.
+ **/
+static void
+fc_vport_sched_delete(struct work_struct *work)
+{
+ struct fc_vport *vport =
+ container_of(work, struct fc_vport, vport_delete_work);
+ int stat;
+
+ stat = fc_vport_terminate(vport);
+ if (stat)
+ dev_printk(KERN_ERR, vport->dev.parent,
+ "%s: %s could not be deleted created via "
+ "shost%d channel %d - error %d\n", __FUNCTION__,
+ vport->dev.bus_id, vport->shost->host_no,
+ vport->channel, stat);
+}
+
+
+/* Original Author: Martin Hicks */
+MODULE_AUTHOR("James Smart");
MODULE_DESCRIPTION("FC Transport Attributes");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index caf1836bbeca..34c1860a259d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,9 +30,9 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
-#define ISCSI_SESSION_ATTRS 11
+#define ISCSI_SESSION_ATTRS 15
#define ISCSI_CONN_ATTRS 11
-#define ISCSI_HOST_ATTRS 0
+#define ISCSI_HOST_ATTRS 4
#define ISCSI_TRANSPORT_VERSION "2.0-724"
struct iscsi_internal {
@@ -609,12 +609,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
int t = done ? NLMSG_DONE : type;
skb = alloc_skb(len, GFP_ATOMIC);
- /*
- * FIXME:
- * user is supposed to react on iferror == -ENOMEM;
- * see iscsi_if_rx().
- */
- BUG_ON(!skb);
+ if (!skb) {
+ printk(KERN_ERR "Could not allocate skb to send reply.\n");
+ return -ENOMEM;
+ }
nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
nlh->nlmsg_flags = flags;
@@ -816,6 +814,8 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
uint32_t hostno;
session = transport->create_session(transport, &priv->t,
+ ev->u.c_session.cmds_max,
+ ev->u.c_session.queue_depth,
ev->u.c_session.initial_cmdsn,
&hostno);
if (!session)
@@ -947,15 +947,50 @@ static int
iscsi_tgt_dscvr(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
+ struct Scsi_Host *shost;
struct sockaddr *dst_addr;
+ int err;
if (!transport->tgt_dscvr)
return -EINVAL;
+ shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+ if (IS_ERR(shost)) {
+ printk(KERN_ERR "target discovery could not find host no %u\n",
+ ev->u.tgt_dscvr.host_no);
+ return -ENODEV;
+ }
+
+
dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
- return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
- ev->u.tgt_dscvr.host_no,
- ev->u.tgt_dscvr.enable, dst_addr);
+ err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+ ev->u.tgt_dscvr.enable, dst_addr);
+ scsi_host_put(shost);
+ return err;
+}
+
+static int
+iscsi_set_host_param(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ char *data = (char*)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int err;
+
+ if (!transport->set_host_param)
+ return -ENOSYS;
+
+ shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+ if (IS_ERR(shost)) {
+ printk(KERN_ERR "set_host_param could not find host no %u\n",
+ ev->u.set_host_param.host_no);
+ return -ENODEV;
+ }
+
+ err = transport->set_host_param(shost, ev->u.set_host_param.param,
+ data, ev->u.set_host_param.len);
+ scsi_host_put(shost);
+ return err;
}
static int
@@ -1049,8 +1084,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case ISCSI_UEVENT_TGT_DSCVR:
err = iscsi_tgt_dscvr(transport, ev);
break;
+ case ISCSI_UEVENT_SET_HOST_PARAM:
+ err = iscsi_set_host_param(transport, ev);
+ break;
default:
- err = -EINVAL;
+ err = -ENOSYS;
break;
}
@@ -1160,30 +1198,37 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
/*
* iSCSI session attrs
*/
-#define iscsi_session_attr_show(param) \
+#define iscsi_session_attr_show(param, perm) \
static ssize_t \
show_session_param_##param(struct class_device *cdev, char *buf) \
{ \
struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+ return -EACCES; \
return t->get_session_param(session, param, buf); \
}
-#define iscsi_session_attr(field, param) \
- iscsi_session_attr_show(param) \
+#define iscsi_session_attr(field, param, perm) \
+ iscsi_session_attr_show(param, perm) \
static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
NULL);
-iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
-iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
-iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
-iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
-iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
-iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
-iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
-iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
-iscsi_session_attr(erl, ISCSI_PARAM_ERL);
-iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
+iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -1199,6 +1244,28 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
NULL)
iscsi_priv_session_attr(recovery_tmo, "%d");
+/*
+ * iSCSI host attrs
+ */
+#define iscsi_host_attr_show(param) \
+static ssize_t \
+show_host_param_##param(struct class_device *cdev, char *buf) \
+{ \
+ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+}
+
+#define iscsi_host_attr(field, param) \
+ iscsi_host_attr_show(param) \
+static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+ NULL);
+
+iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
#define SETUP_PRIV_SESSION_RD_ATTR(field) \
do { \
priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
@@ -1222,6 +1289,14 @@ do { \
} \
} while (0)
+#define SETUP_HOST_RD_ATTR(field, param_flag) \
+do { \
+ if (tt->host_param_mask & param_flag) { \
+ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+} while (0)
+
static int iscsi_session_match(struct attribute_container *cont,
struct device *dev)
{
@@ -1323,9 +1398,16 @@ iscsi_register_transport(struct iscsi_transport *tt)
priv->t.host_attrs.ac.class = &iscsi_host_class.class;
priv->t.host_attrs.ac.match = iscsi_host_match;
priv->t.host_size = sizeof(struct iscsi_host);
- priv->host_attrs[0] = NULL;
transport_container_register(&priv->t.host_attrs);
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+ SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
+ SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
+ SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
+ BUG_ON(count > ISCSI_HOST_ATTRS);
+ priv->host_attrs[count] = NULL;
+ count = 0;
+
/* connection parameters */
priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
priv->conn_cont.ac.class = &iscsi_connection_class.class;
@@ -1364,6 +1446,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
+ SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
+ SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
+ SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
+ SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
BUG_ON(count > ISCSI_SESSION_ATTRS);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index b2ef71a86292..3120f4b3a11a 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -29,6 +29,8 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/bsg.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -40,6 +42,7 @@
struct sas_host_attrs {
struct list_head rphy_list;
struct mutex lock;
+ struct request_queue *q;
u32 next_target_id;
u32 next_expander_id;
int next_port_id;
@@ -152,6 +155,106 @@ static struct {
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
+static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct request *req;
+ int ret;
+ int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
+
+ while (!blk_queue_plugged(q)) {
+ req = elv_next_request(q);
+ if (!req)
+ break;
+
+ blkdev_dequeue_request(req);
+
+ spin_unlock_irq(q->queue_lock);
+
+ handler = to_sas_internal(shost->transportt)->f->smp_handler;
+ ret = handler(shost, rphy, req);
+
+ spin_lock_irq(q->queue_lock);
+
+ req->end_io(req, ret);
+ }
+}
+
+static void sas_host_smp_request(struct request_queue *q)
+{
+ sas_smp_request(q, (struct Scsi_Host *)q->queuedata, NULL);
+}
+
+static void sas_non_host_smp_request(struct request_queue *q)
+{
+ struct sas_rphy *rphy = q->queuedata;
+ sas_smp_request(q, rphy_to_shost(rphy), rphy);
+}
+
+static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
+{
+ struct request_queue *q;
+ int error;
+ struct device *dev;
+ char namebuf[BUS_ID_SIZE];
+ const char *name;
+
+ if (!to_sas_internal(shost->transportt)->f->smp_handler) {
+ printk("%s can't handle SMP requests\n", shost->hostt->name);
+ return 0;
+ }
+
+ if (rphy) {
+ q = blk_init_queue(sas_non_host_smp_request, NULL);
+ dev = &rphy->dev;
+ name = dev->bus_id;
+ } else {
+ q = blk_init_queue(sas_host_smp_request, NULL);
+ dev = &shost->shost_gendev;
+ snprintf(namebuf, sizeof(namebuf),
+ "sas_host%d", shost->host_no);
+ name = namebuf;
+ }
+ if (!q)
+ return -ENOMEM;
+
+ error = bsg_register_queue(q, dev, name);
+ if (error) {
+ blk_cleanup_queue(q);
+ return -ENOMEM;
+ }
+
+ if (rphy)
+ rphy->q = q;
+ else
+ to_sas_host_attrs(shost)->q = q;
+
+ if (rphy)
+ q->queuedata = rphy;
+ else
+ q->queuedata = shost;
+
+ set_bit(QUEUE_FLAG_BIDI, &q->queue_flags);
+
+ return 0;
+}
+
+static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
+{
+ struct request_queue *q;
+
+ if (rphy)
+ q = rphy->q;
+ else
+ q = to_sas_host_attrs(shost)->q;
+
+ if (!q)
+ return;
+
+ bsg_unregister_queue(q);
+ blk_cleanup_queue(q);
+}
+
/*
* SAS host attributes
*/
@@ -167,11 +270,26 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
sas_host->next_target_id = 0;
sas_host->next_expander_id = 0;
sas_host->next_port_id = 0;
+
+ if (sas_bsg_initialize(shost, NULL))
+ dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
+ shost->host_no);
+
+ return 0;
+}
+
+static int sas_host_remove(struct transport_container *tc, struct device *dev,
+ struct class_device *cdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(dev);
+
+ sas_bsg_remove(shost, NULL);
+
return 0;
}
static DECLARE_TRANSPORT_CLASS(sas_host_class,
- "sas_host", sas_host_setup, NULL, NULL);
+ "sas_host", sas_host_setup, sas_host_remove, NULL);
static int sas_host_match(struct attribute_container *cont,
struct device *dev)
@@ -1287,6 +1405,9 @@ int sas_rphy_add(struct sas_rphy *rphy)
return error;
transport_add_device(&rphy->dev);
transport_configure_device(&rphy->dev);
+ if (sas_bsg_initialize(shost, rphy))
+ printk("fail to a bsg device %s\n", rphy->dev.bus_id);
+
mutex_lock(&sas_host->lock);
list_add_tail(&rphy->list, &sas_host->rphy_list);
@@ -1329,6 +1450,8 @@ void sas_rphy_free(struct sas_rphy *rphy)
list_del(&rphy->list);
mutex_unlock(&sas_host->lock);
+ sas_bsg_remove(shost, rphy);
+
transport_destroy_device(dev);
put_device(dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d8c9cb24f91..424d557284a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -684,7 +684,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdp, cmd, p);
default:
- error = scsi_cmd_ioctl(filp, disk, cmd, p);
+ error = scsi_cmd_ioctl(filp, disk->queue, disk, cmd, p);
if (error != -ENOTTY)
return error;
}
@@ -1515,7 +1515,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (!scsi_device_online(sdp))
goto out;
- buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA);
+ buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
if (!buffer) {
sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
"allocation failure.\n");
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index ff62e9708e1c..ce80fa9ad815 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -420,7 +420,7 @@ static inline void borken_wait (void)
#define ULOOP( i ) for (clock = i*8;;)
#define TIMEOUT (!(clock--))
-int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
+static int __init seagate_st0x_detect (struct scsi_host_template * tpnt)
{
struct Scsi_Host *instance;
int i, j;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0c691a60a756..85d38940a6c9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1842,7 +1842,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int blk_size = buff_size;
struct page *p = NULL;
- if ((blk_size < 0) || (!sfp))
+ if (blk_size < 0)
return -EFAULT;
if (0 == blk_size)
++blk_size; /* don't know why */
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 018c65f73ac4..d63d229e2323 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -100,7 +100,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata =
- kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
printk(KERN_NOTICE "sim710: %s\n", dev->bus_id);
printk(KERN_NOTICE "sim710: irq = %d, clock = %d, base = 0x%lx, scsi_id = %d\n",
@@ -110,7 +110,6 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
printk(KERN_ERR "sim710: Failed to allocate host data\n");
goto out;
}
- memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
if(request_region(base_addr, 64, "sim710") == NULL) {
printk(KERN_ERR "sim710: Failed to reserve IO region 0x%lx\n",
@@ -139,6 +138,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
goto out_put_host;
}
+ dev_set_drvdata(dev, host);
scsi_scan_host(host);
return 0;
@@ -156,7 +156,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
static __devexit int
sim710_device_remove(struct device *dev)
{
- struct Scsi_Host *host = dev_to_shost(dev);
+ struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index a7dfb65fb842..0a6b45b1b003 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -84,7 +84,7 @@ static int __init snirm710_probe(struct platform_device *dev)
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_32BIT_MASK);
- hostdata->base = ioremap_nocache(CPHYSADDR(base), 0x100);
+ hostdata->base = ioremap_nocache(base, 0x100);
hostdata->differential = 0;
hostdata->clock = SNIRM710_CLOCK;
@@ -141,13 +141,7 @@ static struct platform_driver snirm710_driver = {
static int __init snirm710_init(void)
{
- int err;
-
- if ((err = platform_driver_register(&snirm710_driver))) {
- printk(KERN_ERR "Driver registration failed\n");
- return err;
- }
- return 0;
+ return platform_driver_register(&snirm710_driver);
}
static void __exit snirm710_exit(void)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f9a52af7f5b4..e7b6a7fde1cb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -175,7 +175,7 @@ static void scsi_cd_put(struct scsi_cd *cd)
* an inode for that to work, and we do not always have one.
*/
-int sr_media_change(struct cdrom_device_info *cdi, int slot)
+static int sr_media_change(struct cdrom_device_info *cdi, int slot)
{
struct scsi_cd *cd = cdi->handle;
int retval;
@@ -885,7 +885,11 @@ static int __init init_sr(void)
rc = register_blkdev(SCSI_CDROM_MAJOR, "sr");
if (rc)
return rc;
- return scsi_register_driver(&sr_template.gendrv);
+ rc = scsi_register_driver(&sr_template.gendrv);
+ if (rc)
+ unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+
+ return rc;
}
static void __exit exit_sr(void)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 55bfeccf68a2..a4f7b8465773 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3549,7 +3549,8 @@ static int st_ioctl(struct inode *inode, struct file *file,
!capable(CAP_SYS_RAWIO))
i = -EPERM;
else
- i = scsi_cmd_ioctl(file, STp->disk, cmd_in, p);
+ i = scsi_cmd_ioctl(file, STp->disk->queue,
+ STp->disk, cmd_in, p);
if (i != -ENOTTY)
return i;
break;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9ac83abc4028..72f6d8015358 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -395,53 +395,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
static int stex_map_sg(struct st_hba *hba,
struct req_msg *req, struct st_ccb *ccb)
{
- struct pci_dev *pdev = hba->pdev;
struct scsi_cmnd *cmd;
- dma_addr_t dma_handle;
- struct scatterlist *src;
+ struct scatterlist *sg;
struct st_sgtable *dst;
- int i;
+ int i, nseg;
cmd = ccb->cmd;
dst = (struct st_sgtable *)req->variable;
dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
- dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
-
- if (cmd->use_sg) {
- int n_elem;
+ dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
- src = (struct scatterlist *) cmd->request_buffer;
- n_elem = pci_map_sg(pdev, src,
- cmd->use_sg, cmd->sc_data_direction);
- if (n_elem <= 0)
- return -EIO;
+ nseg = scsi_dma_map(cmd);
+ if (nseg < 0)
+ return -EIO;
+ if (nseg) {
+ ccb->sg_count = nseg;
+ dst->sg_count = cpu_to_le16((u16)nseg);
- ccb->sg_count = n_elem;
- dst->sg_count = cpu_to_le16((u16)n_elem);
-
- for (i = 0; i < n_elem; i++, src++) {
- dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
+ scsi_for_each_sg(cmd, sg, nseg, i) {
+ dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
dst->table[i].addr =
- cpu_to_le32(sg_dma_address(src) & 0xffffffff);
+ cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
dst->table[i].addr_hi =
- cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
+ cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
}
dst->table[--i].ctrl |= SG_CF_EOT;
- return 0;
}
- dma_handle = pci_map_single(pdev, cmd->request_buffer,
- cmd->request_bufflen, cmd->sc_data_direction);
- cmd->SCp.dma_handle = dma_handle;
-
- ccb->sg_count = 1;
- dst->sg_count = cpu_to_le16(1);
- dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
- dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
- dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
- dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
-
return 0;
}
@@ -451,24 +432,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
size_t lcount;
size_t len;
void *s, *d, *base = NULL;
- if (*count > cmd->request_bufflen)
- *count = cmd->request_bufflen;
+ size_t offset;
+
+ if (*count > scsi_bufflen(cmd))
+ *count = scsi_bufflen(cmd);
lcount = *count;
while (lcount) {
len = lcount;
s = (void *)src;
- if (cmd->use_sg) {
- size_t offset = *count - lcount;
- s += offset;
- base = scsi_kmap_atomic_sg(cmd->request_buffer,
- sg_count, &offset, &len);
- if (base == NULL) {
- *count -= lcount;
- return;
- }
- d = base + offset;
- } else
- d = cmd->request_buffer;
+
+ offset = *count - lcount;
+ s += offset;
+ base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
+ sg_count, &offset, &len);
+ if (!base) {
+ *count -= lcount;
+ return;
+ }
+ d = base + offset;
if (direction == ST_TO_CMD)
memcpy(d, s, len);
@@ -476,30 +457,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
memcpy(s, d, len);
lcount -= len;
- if (cmd->use_sg)
- scsi_kunmap_atomic_sg(base);
+ scsi_kunmap_atomic_sg(base);
}
}
static int stex_direct_copy(struct scsi_cmnd *cmd,
const void *src, size_t count)
{
- struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
size_t cp_len = count;
int n_elem = 0;
- if (cmd->use_sg) {
- n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- if (n_elem <= 0)
- return 0;
- }
+ n_elem = scsi_dma_map(cmd);
+ if (n_elem < 0)
+ return 0;
stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
- if (cmd->use_sg)
- pci_unmap_sg(hba->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
+ scsi_dma_unmap(cmd);
+
return cp_len == count;
}
@@ -678,18 +653,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
return 0;
}
-static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
-{
- if (cmd->sc_data_direction != DMA_NONE) {
- if (cmd->use_sg)
- pci_unmap_sg(hba->pdev, cmd->request_buffer,
- cmd->use_sg, cmd->sc_data_direction);
- else
- pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
- cmd->request_bufflen, cmd->sc_data_direction);
- }
-}
-
static void stex_scsi_done(struct st_ccb *ccb)
{
struct scsi_cmnd *cmd = ccb->cmd;
@@ -756,8 +719,8 @@ static void stex_ys_commands(struct st_hba *hba,
if (ccb->cmd->cmnd[0] == MGT_CMD &&
resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
- ccb->cmd->request_bufflen =
- le32_to_cpu(*(__le32 *)&resp->variable[0]);
+ scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
+ le32_to_cpu(*(__le32 *)&resp->variable[0]));
return;
}
@@ -855,7 +818,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
stex_controller_info(hba, ccb);
- stex_unmap_sg(hba, ccb->cmd);
+ scsi_dma_unmap(ccb->cmd);
stex_scsi_done(ccb);
hba->out_req_cnt--;
} else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
@@ -1028,7 +991,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
}
fail_out:
- stex_unmap_sg(hba, cmd);
+ scsi_dma_unmap(cmd);
hba->wait_ccb->req = NULL; /* nullify the req's future return */
hba->wait_ccb = NULL;
result = FAILED;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index bbeb2451d32f..2c87db98cdfb 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -493,7 +493,7 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
goto fail;
host->max_id = (hme ? 16 : 8);
- esp = host_to_esp(host);
+ esp = shost_priv(host);
esp->host = host;
esp->dev = esp_dev;
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 2ca950582bc3..92bfaeafe30d 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -332,8 +332,7 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
int i;
unsigned long flags = 0;
unsigned char status_reg, pio_int_reg, int_reg;
- struct scatterlist *sglist;
- unsigned int sgcount;
+ struct scatterlist *sg;
unsigned int tot_trans = 0;
/* We search the base address of the host adapter which caused the interrupt */
@@ -429,19 +428,15 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
{
current_command->SCp.phase = data_out;
outb(FLUSH_FIFO, base + COMMAND_REG);
- sym53c416_set_transfer_counter(base, current_command->request_bufflen);
+ sym53c416_set_transfer_counter(base,
+ scsi_bufflen(current_command));
outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
- if(!current_command->use_sg)
- tot_trans = sym53c416_write(base, current_command->request_buffer, current_command->request_bufflen);
- else
- {
- sgcount = current_command->use_sg;
- sglist = current_command->request_buffer;
- while(sgcount--)
- {
- tot_trans += sym53c416_write(base, SG_ADDRESS(sglist), sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(current_command,
+ sg, scsi_sg_count(current_command), i) {
+ tot_trans += sym53c416_write(base,
+ SG_ADDRESS(sg),
+ sg->length);
}
if(tot_trans < current_command->underflow)
printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
@@ -455,19 +450,16 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
{
current_command->SCp.phase = data_in;
outb(FLUSH_FIFO, base + COMMAND_REG);
- sym53c416_set_transfer_counter(base, current_command->request_bufflen);
+ sym53c416_set_transfer_counter(base,
+ scsi_bufflen(current_command));
+
outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
- if(!current_command->use_sg)
- tot_trans = sym53c416_read(base, current_command->request_buffer, current_command->request_bufflen);
- else
- {
- sgcount = current_command->use_sg;
- sglist = current_command->request_buffer;
- while(sgcount--)
- {
- tot_trans += sym53c416_read(base, SG_ADDRESS(sglist), sglist->length);
- sglist++;
- }
+
+ scsi_for_each_sg(current_command,
+ sg, scsi_sg_count(current_command), i) {
+ tot_trans += sym53c416_read(base,
+ SG_ADDRESS(sg),
+ sg->length);
}
if(tot_trans < current_command->underflow)
printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 4d78c7e87cca..15a51459c81f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -146,41 +146,17 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
- int dma_dir = cmd->sc_data_direction;
+ if (SYM_UCMD_PTR(cmd)->data_mapped)
+ scsi_dma_unmap(cmd);
- switch(SYM_UCMD_PTR(cmd)->data_mapped) {
- case 2:
- pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir);
- break;
- case 1:
- pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping,
- cmd->request_bufflen, dma_dir);
- break;
- }
SYM_UCMD_PTR(cmd)->data_mapped = 0;
}
-static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- dma_addr_t mapping;
- int dma_dir = cmd->sc_data_direction;
-
- mapping = pci_map_single(pdev, cmd->request_buffer,
- cmd->request_bufflen, dma_dir);
- if (mapping) {
- SYM_UCMD_PTR(cmd)->data_mapped = 1;
- SYM_UCMD_PTR(cmd)->data_mapping = mapping;
- }
-
- return mapping;
-}
-
static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
{
int use_sg;
- int dma_dir = cmd->sc_data_direction;
- use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir);
+ use_sg = scsi_dma_map(cmd);
if (use_sg > 0) {
SYM_UCMD_PTR(cmd)->data_mapped = 2;
SYM_UCMD_PTR(cmd)->data_mapping = use_sg;
@@ -191,8 +167,6 @@ static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
#define unmap_scsi_data(np, cmd) \
__unmap_scsi_data(np->s.device, cmd)
-#define map_scsi_single_data(np, cmd) \
- __map_scsi_single_data(np->s.device, cmd)
#define map_scsi_sg_data(np, cmd) \
__map_scsi_sg_data(np->s.device, cmd)
/*
@@ -322,55 +296,20 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
*/
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
- cmd->resid = resid;
+ scsi_set_resid(cmd, resid);
cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
}
-
-/*
- * Build the scatter/gather array for an I/O.
- */
-
-static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
-{
- struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1];
- int segment;
- unsigned int len = cmd->request_bufflen;
-
- if (len) {
- dma_addr_t baddr = map_scsi_single_data(np, cmd);
- if (baddr) {
- if (len & 1) {
- struct sym_tcb *tp = &np->target[cp->target];
- if (tp->head.wval & EWS) {
- len++;
- cp->odd_byte_adjustment++;
- }
- }
- cp->data_len = len;
- sym_build_sge(np, data, baddr, len);
- segment = 1;
- } else {
- segment = -2;
- }
- } else {
- segment = 0;
- }
-
- return segment;
-}
-
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
{
int segment;
- int use_sg = (int) cmd->use_sg;
+ int use_sg;
cp->data_len = 0;
- if (!use_sg)
- segment = sym_scatter_no_sglist(np, cp, cmd);
- else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
- struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer;
+ use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > 0) {
+ struct scatterlist *sg;
struct sym_tcb *tp = &np->target[cp->target];
struct sym_tblmove *data;
@@ -381,9 +320,9 @@ static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd
data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
- for (segment = 0; segment < use_sg; segment++) {
- dma_addr_t baddr = sg_dma_address(&scatter[segment]);
- unsigned int len = sg_dma_len(&scatter[segment]);
+ scsi_for_each_sg(cmd, sg, use_sg, segment) {
+ dma_addr_t baddr = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
if ((len & 1) && (tp->head.wval & EWS)) {
len++;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index e022d3c71b59..0f097ba4f712 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -255,7 +255,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
*/
static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
{
- cmd->resid = resid;
+ scsi_set_resid(cmd, resid);
cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
}
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index e7b85e832eb5..5db1520f8ba9 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -457,28 +457,21 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
error = 1;
DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle));
/* Map SG list */
- } else if (pcmd->use_sg) {
- pSRB->pSegmentList = (struct scatterlist *) pcmd->request_buffer;
- pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, pcmd->use_sg,
- pcmd->sc_data_direction);
+ } else if (scsi_sg_count(pcmd)) {
+ int nseg;
+
+ nseg = scsi_dma_map(pcmd);
+
+ pSRB->pSegmentList = scsi_sglist(pcmd);
+ pSRB->SGcount = nseg;
+
/* TODO: error handling */
- if (!pSRB->SGcount)
+ if (nseg < 0)
error = 1;
DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
- __FUNCTION__, pcmd->request_buffer, pSRB->SGcount, pcmd->use_sg));
+ __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
/* Map single segment */
- } else if (pcmd->request_buffer && pcmd->request_bufflen) {
- pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->request_buffer, pcmd->request_bufflen);
- pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
- pcmd->sc_data_direction);
- cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
-
- /* TODO: error handling */
- if (pSRB->SGcount != 1)
- error = 1;
- DEBUG1(printk("%s(): Mapped request buffer %p at %x\n", __FUNCTION__, pcmd->request_buffer, cmdp->saved_dma_handle));
- /* No mapping !? */
- } else
+ } else
pSRB->SGcount = 0;
return error;
@@ -494,12 +487,10 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
if (pSRB->SRBFlag) {
pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
- } else if (pcmd->use_sg) {
- pci_unmap_sg(pdev, pcmd->request_buffer, pcmd->use_sg, pcmd->sc_data_direction);
- DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", __FUNCTION__, pcmd->request_buffer, pcmd->use_sg));
- } else if (pcmd->request_buffer && pcmd->request_bufflen) {
- pci_unmap_sg(pdev, &pSRB->Segmentx, 1, pcmd->sc_data_direction);
- DEBUG1(printk("%s(): Unmapped request buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
+ } else {
+ scsi_dma_unmap(pcmd);
+ DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
+ __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
}
}
@@ -1153,9 +1144,9 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
struct scatterlist *psgl;
pSRB->TotalXferredLen = 0;
pSRB->SGIndex = 0;
- if (pcmd->use_sg) {
+ if (scsi_sg_count(pcmd)) {
size_t saved;
- pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer;
+ pSRB->pSegmentList = scsi_sglist(pcmd);
psgl = pSRB->pSegmentList;
//dc390_pci_sync(pSRB);
@@ -1179,12 +1170,6 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
- } else if(pcmd->request_buffer) {
- //dc390_pci_sync(pSRB);
-
- sg_dma_len(&pSRB->Segmentx) = pcmd->request_bufflen - pSRB->Saved_Ptr;
- pSRB->SGcount = 1;
- pSRB->pSegmentList = (struct scatterlist *) &pSRB->Segmentx;
} else {
pSRB->SGcount = 0;
printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
@@ -1579,7 +1564,8 @@ dc390_Disconnect( struct dc390_acb* pACB )
if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
!(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
{ /* Selection time out */
- pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT;
+ pSRB->AdaptStatus = H_SEL_TIMEOUT;
+ pSRB->TargetStatus = 0;
goto disc1;
}
else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED))
@@ -1612,7 +1598,7 @@ dc390_Reselect( struct dc390_acb* pACB )
if( !( pACB->scan_devices ) )
{
struct scsi_cmnd *pcmd = pSRB->pcmd;
- pcmd->resid = pcmd->request_bufflen;
+ scsi_set_resid(pcmd, scsi_bufflen(pcmd));
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
dc390_Going_remove(pDCB, pSRB);
dc390_Free_insert(pACB, pSRB);
@@ -1695,7 +1681,6 @@ dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_
pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
pSRB->SRBFlag |= AUTO_REQSENSE;
- pSRB->SavedSGCount = pcmd->use_sg;
pSRB->SavedTotXLen = pSRB->TotalXferredLen;
pSRB->AdaptStatus = 0;
pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
@@ -1728,22 +1713,21 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
{ /* Last command was a Request Sense */
pSRB->SRBFlag &= ~AUTO_REQSENSE;
pSRB->AdaptStatus = 0;
- pSRB->TargetStatus = CHECK_CONDITION << 1;
+ pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION;
//pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status);
- if (status == (CHECK_CONDITION << 1))
+ if (status == SAM_STAT_CHECK_CONDITION)
pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0);
else /* Retry */
{
if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */)
{
/* Don't retry on TEST_UNIT_READY */
- pcmd->result = MK_RES_LNX(DRIVER_SENSE,DID_OK,0,CHECK_CONDITION);
+ pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION);
REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\
(u32) pcmd->result, (u32) pSRB->TotalXferredLen));
} else {
SET_RES_DRV(pcmd->result, DRIVER_SENSE);
- pcmd->use_sg = pSRB->SavedSGCount;
//pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
pSRB->TotalXferredLen = 0;
@@ -1754,7 +1738,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
}
if( status )
{
- if( status_byte(status) == CHECK_CONDITION )
+ if (status == SAM_STAT_CHECK_CONDITION)
{
if (dc390_RequestSense(pACB, pDCB, pSRB)) {
SET_RES_DID(pcmd->result, DID_ERROR);
@@ -1762,22 +1746,14 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
}
return;
}
- else if( status_byte(status) == QUEUE_FULL )
+ else if (status == SAM_STAT_TASK_SET_FULL)
{
scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
- pcmd->use_sg = pSRB->SavedSGCount;
DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
}
- else if(status == SCSI_STAT_SEL_TIMEOUT)
- {
- pSRB->AdaptStatus = H_SEL_TIMEOUT;
- pSRB->TargetStatus = 0;
- pcmd->result = MK_RES(0,DID_NO_CONNECT,0,0);
- /* Devices are removed below ... */
- }
- else if (status_byte(status) == BUSY &&
+ else if (status == SAM_STAT_BUSY &&
(pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) &&
pACB->scan_devices)
{
@@ -1795,12 +1771,17 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
else
{ /* Target status == 0 */
status = pSRB->AdaptStatus;
- if(status & H_OVER_UNDER_RUN)
+ if (status == H_OVER_UNDER_RUN)
{
pSRB->TargetStatus = 0;
SET_RES_DID(pcmd->result,DID_OK);
SET_RES_MSG(pcmd->result,pSRB->EndMessage);
}
+ else if (status == H_SEL_TIMEOUT)
+ {
+ pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0);
+ /* Devices are removed below ... */
+ }
else if( pSRB->SRBStatus & PARITY_ERROR)
{
//pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0);
@@ -1816,7 +1797,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
}
cmd_done:
- pcmd->resid = pcmd->request_bufflen - pSRB->TotalXferredLen;
+ scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
dc390_Going_remove (pDCB, pSRB);
/* Add to free list */
@@ -2101,10 +2082,9 @@ static int dc390_slave_alloc(struct scsi_device *scsi_device)
uint id = scsi_device->id;
uint lun = scsi_device->lun;
- pDCB = kmalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
+ pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL);
if (!pDCB)
return -ENOMEM;
- memset(pDCB, 0, sizeof(struct dc390_dcb));
if (!pACB->DCBCnt++) {
pACB->pLinkDCB = pDCB;
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index c3d8c80cfb38..77adc54dbd16 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -57,7 +57,6 @@ u8 SGcount;
u8 MsgCnt;
u8 EndMessage;
-u8 SavedSGCount;
u8 MsgInBuf[6];
u8 MsgOutBuf[6];
@@ -258,13 +257,6 @@ struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
#define H_BAD_CCB_OR_SG 0x1A
#define H_ABORT 0x0FF
-/*; SCSI Status byte codes*/
-/* The values defined in include/scsi/scsi.h, to be shifted << 1 */
-
-#define SCSI_STAT_UNEXP_BUS_F 0xFD /*; Unexpect Bus Free */
-#define SCSI_STAT_BUS_RST_DETECT 0xFE /*; Scsi Bus Reset detected */
-#define SCSI_STAT_SEL_TIMEOUT 0xFF /*; Selection Time out */
-
/* cmd->result */
#define RES_TARGET 0x000000FF /* Target State */
#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
@@ -273,7 +265,7 @@ struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
#define RES_DRV 0xFF000000 /* DRIVER_ codes */
#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
-#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
+#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0)
#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0)
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 3de08a15de40..9e8232a1f169 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,7 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
static void map_dma(unsigned int i, unsigned int j) {
unsigned int data_len = 0;
unsigned int k, count, pci_dir;
- struct scatterlist *sgpnt;
+ struct scatterlist *sg;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
@@ -1124,33 +1124,28 @@ static void map_dma(unsigned int i, unsigned int j) {
cpp->sense_len = sizeof SCpnt->sense_buffer;
- if (!SCpnt->use_sg) {
-
- /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */
- if (!SCpnt->request_bufflen) pci_dir = PCI_DMA_BIDIRECTIONAL;
-
- if (SCpnt->request_buffer)
- cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev,
- SCpnt->request_buffer, SCpnt->request_bufflen, pci_dir));
-
- cpp->data_len = H2DEV(SCpnt->request_bufflen);
- return;
- }
-
- sgpnt = (struct scatterlist *) SCpnt->request_buffer;
- count = pci_map_sg(HD(j)->pdev, sgpnt, SCpnt->use_sg, pci_dir);
-
- for (k = 0; k < count; k++) {
- cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k]));
- cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k]));
- data_len += sgpnt[k].length;
- }
-
- cpp->sg = TRUE;
- cpp->use_sg = SCpnt->use_sg;
- cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
- SCpnt->use_sg * sizeof(struct sg_list), pci_dir));
- cpp->data_len = H2DEV(data_len);
+ if (scsi_bufflen(SCpnt)) {
+ count = scsi_dma_map(SCpnt);
+ BUG_ON(count < 0);
+
+ scsi_for_each_sg(SCpnt, sg, count, k) {
+ cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
+ cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
+ data_len += sg->length;
+ }
+
+ cpp->sg = TRUE;
+ cpp->use_sg = scsi_sg_count(SCpnt);
+ cpp->data_address =
+ H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
+ cpp->use_sg * sizeof(struct sg_list),
+ pci_dir));
+ cpp->data_len = H2DEV(data_len);
+
+ } else {
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
+ }
}
static void unmap_dma(unsigned int i, unsigned int j) {
@@ -1165,8 +1160,7 @@ static void unmap_dma(unsigned int i, unsigned int j) {
pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_unmap_sg(HD(j)->pdev, SCpnt->request_buffer, SCpnt->use_sg, pci_dir);
+ scsi_dma_unmap(SCpnt);
if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
@@ -1187,9 +1181,9 @@ static void sync_dma(unsigned int i, unsigned int j) {
pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- if (SCpnt->use_sg)
- pci_dma_sync_sg_for_cpu(HD(j)->pdev, SCpnt->request_buffer,
- SCpnt->use_sg, pci_dir);
+ if (scsi_sg_count(SCpnt))
+ pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt), pci_dir);
if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 56906aba5ee3..c08235d5afc9 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -675,16 +675,15 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
{
- struct scatterlist *sl;
+ struct scatterlist *sg;
long transfer_length = 0;
int i, max;
- sl = (struct scatterlist *) SCpnt->request_buffer;
- max = SCpnt->use_sg;
- for (i = 0; i < max; i++) {
- mscp->sglist[i].address = isa_page_to_bus(sl[i].page) + sl[i].offset;
- mscp->sglist[i].num_bytes = sl[i].length;
- transfer_length += sl[i].length;
+ max = scsi_sg_count(SCpnt);
+ scsi_for_each_sg(SCpnt, sg, max, i) {
+ mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset;
+ mscp->sglist[i].num_bytes = sg->length;
+ transfer_length += sg->length;
}
mscp->number_of_sg_list = max;
mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
@@ -730,15 +729,15 @@ static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
my_mscp->target_id = SCpnt->device->id;
my_mscp->ch_no = 0;
my_mscp->lun = SCpnt->device->lun;
- if (SCpnt->use_sg) {
+ if (scsi_sg_count(SCpnt)) {
/* Set scatter/gather flag in SCSI command packet */
my_mscp->sg = TRUE;
build_sg_list(my_mscp, SCpnt);
} else {
/* Unset scatter/gather flag in SCSI command packet */
my_mscp->sg = FALSE;
- my_mscp->transfer_data = isa_virt_to_bus(SCpnt->request_buffer);
- my_mscp->transfer_data_length = SCpnt->request_bufflen;
+ my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
+ my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
}
my_mscp->command_link = 0; /*???*/
my_mscp->scsi_command_link_id = 0; /*???*/
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fa4e08e508ad..b92ff047af38 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -89,6 +89,8 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <asm/irq.h>
+
#include "wd33c93.h"
#define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns
@@ -1762,7 +1764,7 @@ static char setup_buffer[SETUP_BUFFER_SIZE];
static char setup_used[MAX_SETUP_ARGS];
static int done_setup = 0;
-int
+static int
wd33c93_setup(char *str)
{
int i;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 30be76514c43..d6fd4259c56b 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1091,6 +1091,7 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
unchar *cdb = (unchar *) SCpnt->cmnd;
unchar idlun;
short cdblen;
+ int nseg;
Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
cdblen = SCpnt->cmd_len;
@@ -1106,28 +1107,29 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
SCpnt->host_scribble = (unchar *) scb;
scb->host = host;
- if (SCpnt->use_sg) {
- struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer;
+ nseg = scsi_sg_count(SCpnt);
+ if (nseg) {
+ struct scatterlist *sg;
unsigned i;
if (SCpnt->device->host->sg_tablesize == SG_NONE) {
panic("wd7000_queuecommand: scatter/gather not supported.\n");
}
- dprintk("Using scatter/gather with %d elements.\n", SCpnt->use_sg);
+ dprintk("Using scatter/gather with %d elements.\n", nseg);
sgb = scb->sgb;
scb->op = 1;
any2scsi(scb->dataptr, (int) sgb);
- any2scsi(scb->maxlen, SCpnt->use_sg * sizeof(Sgb));
+ any2scsi(scb->maxlen, nseg * sizeof(Sgb));
- for (i = 0; i < SCpnt->use_sg; i++) {
- any2scsi(sgb[i].ptr, isa_page_to_bus(sg[i].page) + sg[i].offset);
- any2scsi(sgb[i].len, sg[i].length);
+ scsi_for_each_sg(SCpnt, sg, nseg, i) {
+ any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset);
+ any2scsi(sgb[i].len, sg->length);
}
} else {
scb->op = 0;
- any2scsi(scb->dataptr, isa_virt_to_bus(SCpnt->request_buffer));
- any2scsi(scb->maxlen, SCpnt->request_bufflen);
+ any2scsi(scb->dataptr, isa_virt_to_bus(scsi_sglist(SCpnt)));
+ any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
}
/* FIXME: drop lock and yield here ? */
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
new file mode 100644
index 000000000000..c822debc2668
--- /dev/null
+++ b/drivers/scsi/zorro7xx.c
@@ -0,0 +1,181 @@
+/*
+ * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
+ * Amiga MacroSystemUS WarpEngine SCSI controller.
+ * Amiga Technologies/DKB A4091 SCSI controller.
+ *
+ * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * plus modifications of the 53c7xx.c driver to support the Amiga.
+ *
+ * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/zorro.h>
+#include <asm/amigaints.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
+
+#include "53c700.h"
+
+MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
+MODULE_DESCRIPTION("Amiga Zorro NCR53C710 driver");
+MODULE_LICENSE("GPL");
+
+
+static struct scsi_host_template zorro7xx_scsi_driver_template = {
+ .proc_name = "zorro7xx",
+ .this_id = 7,
+ .module = THIS_MODULE,
+};
+
+static struct zorro_driver_data {
+ const char *name;
+ unsigned long offset;
+ int absolute; /* offset is absolute address */
+} zorro7xx_driver_data[] __devinitdata = {
+ { .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 },
+ { .name = "WarpEngine 40xx", .offset = 0x40000 },
+ { .name = "A4091", .offset = 0x800000 },
+ { .name = "GForce 040/060", .offset = 0x40000 },
+ { 0 }
+};
+
+static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
+ {
+ .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[0],
+ },
+ {
+ .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[1],
+ },
+ {
+ .id = ZORRO_PROD_CBM_A4091_1,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[2],
+ },
+ {
+ .id = ZORRO_PROD_CBM_A4091_2,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[2],
+ },
+ {
+ .id = ZORRO_PROD_GVP_GFORCE_040_060,
+ .driver_data = (unsigned long)&zorro7xx_driver_data[3],
+ },
+ { 0 }
+};
+
+static int __devinit zorro7xx_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct Scsi_Host * host = NULL;
+ struct NCR_700_Host_Parameters *hostdata;
+ struct zorro_driver_data *zdd;
+ unsigned long board, ioaddr;
+
+ board = zorro_resource_start(z);
+ zdd = (struct zorro_driver_data *)ent->driver_data;
+
+ if (zdd->absolute) {
+ ioaddr = zdd->offset;
+ } else {
+ ioaddr = board + zdd->offset;
+ }
+
+ if (!zorro_request_device(z, zdd->name)) {
+ printk(KERN_ERR "zorro7xx: cannot reserve region 0x%lx, abort\n",
+ board);
+ return -EBUSY;
+ }
+
+ hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
+ if (hostdata == NULL) {
+ printk(KERN_ERR "zorro7xx: Failed to allocate host data\n");
+ goto out_release;
+ }
+
+ memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
+
+ /* Fill in the required pieces of hostdata */
+ if (ioaddr > 0x01000000)
+ hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
+ else
+ hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr);
+
+ hostdata->clock = 50;
+ hostdata->chip710 = 1;
+
+ /* Settings for at least WarpEngine 40xx */
+ hostdata->ctest7_extra = CTEST7_TT1;
+
+ zorro7xx_scsi_driver_template.name = zdd->name;
+
+ /* and register the chip */
+ host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata,
+ &z->dev);
+ if (!host) {
+ printk(KERN_ERR "zorro7xx: No host detected; "
+ "board configuration problem?\n");
+ goto out_free;
+ }
+
+ host->this_id = 7;
+ host->base = ioaddr;
+ host->irq = IRQ_AMIGA_PORTS;
+
+ if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi",
+ host)) {
+ printk(KERN_ERR "zorro7xx: request_irq failed\n");
+ goto out_put_host;
+ }
+
+ zorro_set_drvdata(z, host);
+ scsi_scan_host(host);
+
+ return 0;
+
+ out_put_host:
+ scsi_host_put(host);
+ out_free:
+ if (ioaddr > 0x01000000)
+ iounmap(hostdata->base);
+ kfree(hostdata);
+ out_release:
+ zorro_release_device(z);
+
+ return -ENODEV;
+}
+
+static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
+{
+ struct Scsi_Host *host = zorro_get_drvdata(z);
+ struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
+
+ scsi_remove_host(host);
+
+ NCR_700_release(host);
+ kfree(hostdata);
+ free_irq(host->irq, host);
+ zorro_release_device(z);
+}
+
+static struct zorro_driver zorro7xx_driver = {
+ .name = "zorro7xx-scsi",
+ .id_table = zorro7xx_zorro_tbl,
+ .probe = zorro7xx_init_one,
+ .remove = __devexit_p(zorro7xx_remove_one),
+};
+
+static int __init zorro7xx_scsi_init(void)
+{
+ return zorro_register_driver(&zorro7xx_driver);
+}
+
+static void __exit zorro7xx_scsi_exit(void)
+{
+ zorro_unregister_driver(&zorro7xx_driver);
+}
+
+module_init(zorro7xx_scsi_init);
+module_exit(zorro7xx_scsi_exit);
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index 68817a7d8c0d..2aa6bfe8fdb3 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -934,8 +934,6 @@ static void change_speed(ser_info_t *info)
/*
* Set up parity check flag
*/
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
info->read_status_mask = (BD_SC_EMPTY | BD_SC_OV);
if (I_INPCK(info->tty))
info->read_status_mask |= BD_SC_FR | BD_SC_PR;
@@ -1527,11 +1525,6 @@ static void rs_360_set_termios(struct tty_struct *tty, struct ktermios *old_term
{
ser_info_t *info = (ser_info_t *)tty->driver_data;
- if ( (tty->termios->c_cflag == old_termios->c_cflag)
- && ( RELEVANT_IFLAG(tty->termios->c_iflag)
- == RELEVANT_IFLAG(old_termios->c_iflag)))
- return;
-
change_speed(info);
#ifdef modem_control
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c84dab083a85..0b3ec38ae614 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2514,12 +2514,18 @@ static int __init serial8250_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
+static int __init serial8250_console_early_setup(void)
+{
+ return serial8250_find_port_for_earlycon();
+}
+
static struct uart_driver serial8250_reg;
static struct console serial8250_console = {
.name = "ttyS",
.write = serial8250_console_write,
.device = uart_console_device,
.setup = serial8250_console_setup,
+ .early_setup = serial8250_console_early_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial8250_reg,
@@ -2533,7 +2539,7 @@ static int __init serial8250_console_init(void)
}
console_initcall(serial8250_console_init);
-static int __init find_port(struct uart_port *p)
+int serial8250_find_port(struct uart_port *p)
{
int line;
struct uart_port *port;
@@ -2546,26 +2552,6 @@ static int __init find_port(struct uart_port *p)
return -ENODEV;
}
-int __init serial8250_start_console(struct uart_port *port, char *options)
-{
- int line;
-
- line = find_port(port);
- if (line < 0)
- return -ENODEV;
-
- add_preferred_console("ttyS", line, options);
- printk("Adding console on ttyS%d at %s 0x%lx (options '%s')\n",
- line, port->iotype == UPIO_MEM ? "MMIO" : "I/O port",
- port->iotype == UPIO_MEM ? (unsigned long) port->mapbase :
- (unsigned long) port->iobase, options);
- if (!(serial8250_console.flags & CON_ENABLED)) {
- serial8250_console.flags &= ~CON_PRINTBUFFER;
- register_console(&serial8250_console);
- }
- return line;
-}
-
#define SERIAL8250_CONSOLE &serial8250_console
#else
#define SERIAL8250_CONSOLE NULL
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index 7e511199b4c5..947c20507e1f 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -17,13 +17,11 @@
* we locate the device directly by its MMIO or I/O port address.
*
* The user can specify the device directly, e.g.,
- * console=uart,io,0x3f8,9600n8
- * console=uart,mmio,0xff5e0000,115200n8
- * or platform code can call early_uart_console_init() to set
- * the early UART device.
- *
- * After the normal serial driver starts, we try to locate the
- * matching ttyS device and start a console there.
+ * earlycon=uart8250,io,0x3f8,9600n8
+ * earlycon=uart8250,mmio,0xff5e0000,115200n8
+ * or
+ * console=uart8250,io,0x3f8,9600n8
+ * console=uart8250,mmio,0xff5e0000,115200n8
*/
#include <linux/tty.h>
@@ -32,17 +30,21 @@
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial.h>
+#include <linux/serial_8250.h>
#include <asm/io.h>
#include <asm/serial.h>
+#ifdef CONFIG_FIX_EARLYCON_MEM
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#endif
-struct early_uart_device {
+struct early_serial8250_device {
struct uart_port port;
char options[16]; /* e.g., 115200n8 */
unsigned int baud;
};
-static struct early_uart_device early_device __initdata;
-static int early_uart_registered __initdata;
+static struct early_serial8250_device early_device;
static unsigned int __init serial_in(struct uart_port *port, int offset)
{
@@ -80,7 +82,7 @@ static void __init putc(struct uart_port *port, int c)
serial_out(port, UART_TX, c);
}
-static void __init early_uart_write(struct console *console, const char *s, unsigned int count)
+static void __init early_serial8250_write(struct console *console, const char *s, unsigned int count)
{
struct uart_port *port = &early_device.port;
unsigned int ier;
@@ -111,7 +113,7 @@ static unsigned int __init probe_baud(struct uart_port *port)
return (port->uartclk / 16) / quot;
}
-static void __init init_port(struct early_uart_device *device)
+static void __init init_port(struct early_serial8250_device *device)
{
struct uart_port *port = &device->port;
unsigned int divisor;
@@ -130,10 +132,9 @@ static void __init init_port(struct early_uart_device *device)
serial_out(port, UART_LCR, c & ~UART_LCR_DLAB);
}
-static int __init parse_options(struct early_uart_device *device, char *options)
+static int __init parse_options(struct early_serial8250_device *device, char *options)
{
struct uart_port *port = &device->port;
- int mapsize = 64;
int mmio, length;
if (!options)
@@ -143,12 +144,18 @@ static int __init parse_options(struct early_uart_device *device, char *options)
if (!strncmp(options, "mmio,", 5)) {
port->iotype = UPIO_MEM;
port->mapbase = simple_strtoul(options + 5, &options, 0);
- port->membase = ioremap(port->mapbase, mapsize);
+#ifdef CONFIG_FIX_EARLYCON_MEM
+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, port->mapbase & PAGE_MASK);
+ port->membase = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ port->membase += port->mapbase & ~PAGE_MASK;
+#else
+ port->membase = ioremap(port->mapbase, 64);
if (!port->membase) {
printk(KERN_ERR "%s: Couldn't ioremap 0x%lx\n",
__FUNCTION__, port->mapbase);
return -ENOMEM;
}
+#endif
mmio = 1;
} else if (!strncmp(options, "io,", 3)) {
port->iotype = UPIO_PORT;
@@ -175,9 +182,16 @@ static int __init parse_options(struct early_uart_device *device, char *options)
return 0;
}
-static int __init early_uart_setup(struct console *console, char *options)
+static struct console early_serial8250_console __initdata = {
+ .name = "uart",
+ .write = early_serial8250_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+static int __init early_serial8250_setup(char *options)
{
- struct early_uart_device *device = &early_device;
+ struct early_serial8250_device *device = &early_device;
int err;
if (device->port.membase || device->port.iobase)
@@ -190,61 +204,48 @@ static int __init early_uart_setup(struct console *console, char *options)
return 0;
}
-static struct console early_uart_console __initdata = {
- .name = "uart",
- .write = early_uart_write,
- .setup = early_uart_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-static int __init early_uart_console_init(void)
-{
- if (!early_uart_registered) {
- register_console(&early_uart_console);
- early_uart_registered = 1;
- }
- return 0;
-}
-console_initcall(early_uart_console_init);
-
-int __init early_serial_console_init(char *cmdline)
+int __init setup_early_serial8250_console(char *cmdline)
{
char *options;
int err;
- options = strstr(cmdline, "console=uart,");
- if (!options)
- return -ENODEV;
+ options = strstr(cmdline, "uart8250,");
+ if (!options) {
+ options = strstr(cmdline, "uart,");
+ if (!options)
+ return 0;
+ }
options = strchr(cmdline, ',') + 1;
- if ((err = early_uart_setup(NULL, options)) < 0)
+ if ((err = early_serial8250_setup(options)) < 0)
return err;
- return early_uart_console_init();
+
+ register_console(&early_serial8250_console);
+
+ return 0;
}
-static int __init early_uart_console_switch(void)
+int __init serial8250_find_port_for_earlycon(void)
{
- struct early_uart_device *device = &early_device;
+ struct early_serial8250_device *device = &early_device;
struct uart_port *port = &device->port;
- int mmio, line;
+ int line;
+ int ret;
- if (!(early_uart_console.flags & CON_ENABLED))
- return 0;
+ if (!device->port.membase && !device->port.iobase)
+ return -ENODEV;
- /* Try to start the normal driver on a matching line. */
- mmio = (port->iotype == UPIO_MEM);
- line = serial8250_start_console(port, device->options);
+ line = serial8250_find_port(port);
if (line < 0)
- printk("No ttyS device at %s 0x%lx for console\n",
- mmio ? "MMIO" : "I/O port",
- mmio ? port->mapbase :
- (unsigned long) port->iobase);
+ return -ENODEV;
- unregister_console(&early_uart_console);
- if (mmio)
- iounmap(port->membase);
+ ret = update_console_cmdline("uart", 8250,
+ "ttyS", line, device->options);
+ if (ret < 0)
+ ret = update_console_cmdline("uart", 0,
+ "ttyS", line, device->options);
- return 0;
+ return ret;
}
-late_initcall(early_uart_console_switch);
+
+early_param("earlycon", setup_early_serial8250_console);
diff --git a/drivers/serial/8250_hp300.c b/drivers/serial/8250_hp300.c
index 53e81a44c1a3..2cf0953fe0ec 100644
--- a/drivers/serial/8250_hp300.c
+++ b/drivers/serial/8250_hp300.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
#include <linux/delay.h>
#include <linux/dio.h>
#include <linux/console.h>
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 315ea9916456..18f629706448 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -62,8 +62,22 @@ config SERIAL_8250_CONSOLE
kernel will automatically use the first serial line, /dev/ttyS0, as
system console.
+ you can set that using a kernel command line option such as
+ "console=uart8250,io,0x3f8,9600n8"
+ "console=uart8250,mmio,0xff5e0000,115200n8".
+ and it will switch to normal serial console when correponding port is
+ ready.
+ "earlycon=uart8250,io,0x3f8,9600n8"
+ "earlycon=uart8250,mmio,0xff5e0000,115200n8".
+ it will not only setup early console.
+
If unsure, say N.
+config FIX_EARLYCON_MEM
+ bool
+ depends on X86
+ default y
+
config SERIAL_8250_GSC
tristate
depends on SERIAL_8250 && GSC
@@ -324,6 +338,34 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_SB1250_DUART
+ tristate "BCM1xxx on-chip DUART serial support"
+ depends on SIBYTE_SB1xxx_SOC=y
+ select SERIAL_CORE
+ default y
+ ---help---
+ Support for the asynchronous serial interface (DUART) included in
+ the BCM1250 and derived System-On-a-Chip (SOC) devices. Note that
+ the letter D in DUART stands for "dual", which is how the device
+ is implemented. Depending on the SOC configuration there may be
+ one or more DUARTs available of which all are handled.
+
+ If unsure, say Y. To compile this driver as a module, choose M here:
+ the module will be called sb1250-duart.
+
+config SERIAL_SB1250_DUART_CONSOLE
+ bool "Support for console on a BCM1xxx DUART serial port"
+ depends on SERIAL_SB1250_DUART=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ If unsure, say Y.
+
config SERIAL_ATMEL
bool "AT91 / AT32 on-chip serial port support"
depends on (ARM && ARCH_AT91) || AVR32
@@ -444,6 +486,36 @@ config SERIAL_DZ_CONSOLE
If unsure, say Y.
+config SERIAL_ZS
+ tristate "DECstation Z85C30 serial support"
+ depends on MACH_DECSTATION
+ select SERIAL_CORE
+ default y
+ ---help---
+ Support for the Zilog 85C350 serial communications controller used
+ for serial ports in newer DECstation systems. These include the
+ DECsystem 5900 and all models of the DECstation and DECsystem 5000
+ systems except from model 200.
+
+ If unsure, say Y. To compile this driver as a module, choose M here:
+ the module will be called zs.
+
+config SERIAL_ZS_CONSOLE
+ bool "Support for console on a DECstation Z85C30 serial port"
+ depends on SERIAL_ZS=y
+ select SERIAL_CORE_CONSOLE
+ default y
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode).
+
+ Note that the firmware uses ttyS1 as the serial console on the
+ Maxine and ttyS3 on the others using this driver.
+
+ If unsure, say Y.
+
config SERIAL_21285
tristate "DC21285 serial port support"
depends on ARM && FOOTBRIDGE
@@ -556,7 +628,7 @@ choice
config SERIAL_BFIN_DMA
bool "DMA mode"
- depends on DMA_UNCACHED_1M
+ depends on DMA_UNCACHED_1M && !KGDB_UART
help
This driver works under DMA mode. If this option is selected, the
blackfin simple dma driver is also enabled.
@@ -599,7 +671,7 @@ config UART0_RTS_PIN
config SERIAL_BFIN_UART1
bool "Enable UART1"
- depends on SERIAL_BFIN && (BF534 || BF536 || BF537)
+ depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x)
help
Enable UART1
@@ -612,18 +684,58 @@ config BFIN_UART1_CTSRTS
config UART1_CTS_PIN
int "UART1 CTS pin"
- depends on BFIN_UART1_CTSRTS
+ depends on BFIN_UART1_CTSRTS && (BF53x || BF561)
default -1
help
Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
config UART1_RTS_PIN
int "UART1 RTS pin"
- depends on BFIN_UART1_CTSRTS
+ depends on BFIN_UART1_CTSRTS && (BF53x || BF561)
default -1
help
Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
+config SERIAL_BFIN_UART2
+ bool "Enable UART2"
+ depends on SERIAL_BFIN && (BF54x)
+ help
+ Enable UART2
+
+config BFIN_UART2_CTSRTS
+ bool "Enable UART2 hardware flow control"
+ depends on SERIAL_BFIN_UART2
+ help
+ Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
+ signal.
+
+config UART2_CTS_PIN
+ int "UART2 CTS pin"
+ depends on BFIN_UART2_CTSRTS
+ default -1
+ help
+ Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
+
+config UART2_RTS_PIN
+ int "UART2 RTS pin"
+ depends on BFIN_UART2_CTSRTS
+ default -1
+ help
+ Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
+
+config SERIAL_BFIN_UART3
+ bool "Enable UART3"
+ depends on SERIAL_BFIN && (BF54x)
+ help
+ Enable UART3
+
+config BFIN_UART3_CTSRTS
+ bool "Enable UART3 hardware flow control"
+ depends on SERIAL_BFIN_UART3
+ help
+ Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
+ signal.
+
config SERIAL_IMX
bool "IMX serial port support"
depends on ARM && ARCH_IMX
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 08ad0d978183..af6377d480d7 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_V850E_UART) += v850e_uart.o
obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
+obj-$(CONFIG_SERIAL_ZS) += zs.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
@@ -51,6 +52,7 @@ obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
obj-$(CONFIG_SERIAL_ICOM) += icom.o
obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
+obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 954073c6ce3a..72229df9dc11 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -716,7 +716,7 @@ static int pl011_probe(struct amba_device *dev, void *id)
goto out;
}
- uap = kmalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
+ uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
if (uap == NULL) {
ret = -ENOMEM;
goto out;
@@ -728,7 +728,6 @@ static int pl011_probe(struct amba_device *dev, void *id)
goto free;
}
- memset(uap, 0, sizeof(struct uart_amba_port));
uap->clk = clk_get(&dev->dev, "UARTCLK");
if (IS_ERR(uap->clk)) {
ret = PTR_ERR(uap->clk);
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 3320bcd92c0a..4d6b3c56d20e 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -114,6 +114,7 @@ struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
unsigned short suspended; /* is port suspended? */
+ int break_active; /* break being received */
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
@@ -252,6 +253,7 @@ static void atmel_break_ctl(struct uart_port *port, int break_state)
*/
static void atmel_rx_chars(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
struct tty_struct *tty = port->info->tty;
unsigned int status, ch, flg;
@@ -267,13 +269,29 @@ static void atmel_rx_chars(struct uart_port *port)
* note that the error handling code is
* out of the main execution path
*/
- if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
+ if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
+ | ATMEL_US_OVRE | ATMEL_US_RXBRK)
+ || atmel_port->break_active)) {
UART_PUT_CR(port, ATMEL_US_RSTSTA); /* clear error */
- if (status & ATMEL_US_RXBRK) {
+ if (status & ATMEL_US_RXBRK
+ && !atmel_port->break_active) {
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); /* ignore side-effect */
port->icount.brk++;
+ atmel_port->break_active = 1;
+ UART_PUT_IER(port, ATMEL_US_RXBRK);
if (uart_handle_break(port))
goto ignore_char;
+ } else {
+ /*
+ * This is either the end-of-break
+ * condition or we've received at
+ * least one character without RXBRK
+ * being set. In both cases, the next
+ * RXBRK will indicate start-of-break.
+ */
+ UART_PUT_IDR(port, ATMEL_US_RXBRK);
+ status &= ~ATMEL_US_RXBRK;
+ atmel_port->break_active = 0;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
@@ -352,6 +370,16 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
+ else if (pending & ATMEL_US_RXBRK) {
+ /*
+ * End of break detected. If it came along
+ * with a character, atmel_rx_chars will
+ * handle it.
+ */
+ UART_PUT_CR(port, ATMEL_US_RSTSTA);
+ UART_PUT_IDR(port, ATMEL_US_RXBRK);
+ atmel_port->break_active = 0;
+ }
// TODO: All reads to CSR will clear these interrupts!
if (pending & ATMEL_US_RIIC) port->icount.rng++;
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 22569bd5d821..66c92bc36f3d 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -41,6 +41,11 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
+#ifdef CONFIG_KGDB_UART
+#include <linux/kgdb.h>
+#include <asm/irq_regs.h>
+#endif
+
#include <asm/gpio.h>
#include <asm/mach/bfin_serial_5xx.h>
@@ -81,15 +86,29 @@ static void bfin_serial_stop_tx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+#ifdef CONFIG_BF54x
+ while (!(UART_GET_LSR(uart) & TEMT))
+ continue;
+#endif
+
#ifdef CONFIG_SERIAL_BFIN_DMA
disable_dma(uart->tx_dma_channel);
#else
+#ifdef CONFIG_BF54x
+ /* Waiting for Transmission Finished */
+ while (!(UART_GET_LSR(uart) & TFI))
+ continue;
+ /* Clear TFI bit */
+ UART_PUT_LSR(uart, TFI);
+ UART_CLEAR_IER(uart, ETBEI);
+#else
unsigned short ier;
ier = UART_GET_IER(uart);
ier &= ~ETBEI;
UART_PUT_IER(uart, ier);
#endif
+#endif
}
/*
@@ -102,12 +121,16 @@ static void bfin_serial_start_tx(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_DMA
bfin_serial_dma_tx_chars(uart);
#else
+#ifdef CONFIG_BF54x
+ UART_SET_IER(uart, ETBEI);
+#else
unsigned short ier;
ier = UART_GET_IER(uart);
ier |= ETBEI;
UART_PUT_IER(uart, ier);
bfin_serial_tx_chars(uart);
#endif
+#endif
}
/*
@@ -116,11 +139,18 @@ static void bfin_serial_start_tx(struct uart_port *port)
static void bfin_serial_stop_rx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+#ifdef CONFIG_BF54x
+ UART_CLEAR_IER(uart, ERBFI);
+#else
unsigned short ier;
ier = UART_GET_IER(uart);
+#ifdef CONFIG_KGDB_UART
+ if (uart->port.line != CONFIG_KGDB_UART_PORT)
+#endif
ier &= ~ERBFI;
UART_PUT_IER(uart, ier);
+#endif
}
/*
@@ -130,6 +160,49 @@ static void bfin_serial_enable_ms(struct uart_port *port)
{
}
+#ifdef CONFIG_KGDB_UART
+static int kgdb_entry_state;
+
+void kgdb_put_debug_char(int chr)
+{
+ struct bfin_serial_port *uart;
+
+ if (CONFIG_KGDB_UART_PORT<0 || CONFIG_KGDB_UART_PORT>=NR_PORTS)
+ uart = &bfin_serial_ports[0];
+ else
+ uart = &bfin_serial_ports[CONFIG_KGDB_UART_PORT];
+
+ while (!(UART_GET_LSR(uart) & THRE)) {
+ __builtin_bfin_ssync();
+ }
+ UART_PUT_LCR(uart, UART_GET_LCR(uart)&(~DLAB));
+ __builtin_bfin_ssync();
+ UART_PUT_CHAR(uart, (unsigned char)chr);
+ __builtin_bfin_ssync();
+}
+
+int kgdb_get_debug_char(void)
+{
+ struct bfin_serial_port *uart;
+ unsigned char chr;
+
+ if (CONFIG_KGDB_UART_PORT<0 || CONFIG_KGDB_UART_PORT>=NR_PORTS)
+ uart = &bfin_serial_ports[0];
+ else
+ uart = &bfin_serial_ports[CONFIG_KGDB_UART_PORT];
+
+ while(!(UART_GET_LSR(uart) & DR)) {
+ __builtin_bfin_ssync();
+ }
+ UART_PUT_LCR(uart, UART_GET_LCR(uart)&(~DLAB));
+ __builtin_bfin_ssync();
+ chr = UART_GET_CHAR(uart);
+ __builtin_bfin_ssync();
+
+ return chr;
+}
+#endif
+
#ifdef CONFIG_SERIAL_BFIN_PIO
static void local_put_char(struct bfin_serial_port *uart, char ch)
{
@@ -152,6 +225,9 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
struct tty_struct *tty = uart->port.info->tty;
unsigned int status, ch, flg;
+#ifdef CONFIG_KGDB_UART
+ struct pt_regs *regs = get_irq_regs();
+#endif
#ifdef BF533_FAMILY
static int in_break = 0;
#endif
@@ -160,6 +236,27 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
ch = UART_GET_CHAR(uart);
uart->port.icount.rx++;
+#ifdef CONFIG_KGDB_UART
+ if (uart->port.line == CONFIG_KGDB_UART_PORT) {
+ if (uart->port.cons->index == CONFIG_KGDB_UART_PORT && ch == 0x1) { /* Ctrl + A */
+ kgdb_breakkey_pressed(regs);
+ return;
+ } else if (kgdb_entry_state == 0 && ch == '$') {/* connection from KGDB */
+ kgdb_entry_state = 1;
+ } else if (kgdb_entry_state == 1 && ch == 'q') {
+ kgdb_entry_state = 0;
+ kgdb_breakkey_pressed(regs);
+ return;
+ } else if (ch == 0x3) {/* Ctrl + C */
+ kgdb_entry_state = 0;
+ kgdb_breakkey_pressed(regs);
+ return;
+ } else {
+ kgdb_entry_state = 0;
+ }
+ }
+#endif
+
#ifdef BF533_FAMILY
/* The BF533 family of processors have a nice misbehavior where
* they continuously generate characters for a "single" break.
@@ -250,10 +347,21 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
+#ifdef CONFIG_BF54x
+ unsigned short status;
+ spin_lock(&uart->port.lock);
+ status = UART_GET_LSR(uart);
+ while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) {
+ bfin_serial_rx_chars(uart);
+ status = UART_GET_LSR(uart);
+ }
+ spin_unlock(&uart->port.lock);
+#else
spin_lock(&uart->port.lock);
while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY)
bfin_serial_rx_chars(uart);
spin_unlock(&uart->port.lock);
+#endif
return IRQ_HANDLED;
}
@@ -261,10 +369,21 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
+#ifdef CONFIG_BF54x
+ unsigned short status;
+ spin_lock(&uart->port.lock);
+ status = UART_GET_LSR(uart);
+ while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) {
+ bfin_serial_tx_chars(uart);
+ status = UART_GET_LSR(uart);
+ }
+ spin_unlock(&uart->port.lock);
+#else
spin_lock(&uart->port.lock);
while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY)
bfin_serial_tx_chars(uart);
spin_unlock(&uart->port.lock);
+#endif
return IRQ_HANDLED;
}
@@ -275,7 +394,6 @@ static void bfin_serial_do_work(struct work_struct *work)
bfin_serial_mctrl_check(uart);
}
-
#endif
#ifdef CONFIG_SERIAL_BFIN_DMA
@@ -324,9 +442,13 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
set_dma_x_modify(uart->tx_dma_channel, 1);
enable_dma(uart->tx_dma_channel);
+#ifdef CONFIG_BF54x
+ UART_SET_IER(uart, ETBEI);
+#else
ier = UART_GET_IER(uart);
ier |= ETBEI;
UART_PUT_IER(uart, ier);
+#endif
spin_unlock_irqrestore(&uart->port.lock, flags);
}
@@ -406,9 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
clear_dma_irqstat(uart->tx_dma_channel);
disable_dma(uart->tx_dma_channel);
+#ifdef CONFIG_BF54x
+ UART_CLEAR_IER(uart, ETBEI);
+#else
ier = UART_GET_IER(uart);
ier &= ~ETBEI;
UART_PUT_IER(uart, ier);
+#endif
xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1);
uart->port.icount.tx+=uart->tx_count;
@@ -571,7 +697,11 @@ static int bfin_serial_startup(struct uart_port *port)
uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
add_timer(&(uart->rx_dma_timer));
#else
+# ifdef CONFIG_KGDB_UART
+ if (uart->port.line != CONFIG_KGDB_UART_PORT && request_irq
+# else
if (request_irq
+# endif
(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED,
"BFIN_UART_RX", uart)) {
printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
@@ -586,7 +716,11 @@ static int bfin_serial_startup(struct uart_port *port)
return -EBUSY;
}
#endif
+#ifdef CONFIG_BF54x
+ UART_SET_IER(uart, ERBFI);
+#else
UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI);
+#endif
return 0;
}
@@ -601,6 +735,9 @@ static void bfin_serial_shutdown(struct uart_port *port)
free_dma(uart->rx_dma_channel);
del_timer(&(uart->rx_dma_timer));
#else
+#ifdef CONFIG_KGDB_UART
+ if (uart->port.line != CONFIG_KGDB_UART_PORT)
+#endif
free_irq(uart->port.irq, uart);
free_irq(uart->port.irq+1, uart);
#endif
@@ -674,29 +811,41 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
/* Disable UART */
ier = UART_GET_IER(uart);
+#ifdef CONFIG_BF54x
+ UART_CLEAR_IER(uart, 0xF);
+#else
UART_PUT_IER(uart, 0);
+#endif
+#ifndef CONFIG_BF54x
/* Set DLAB in LCR to Access DLL and DLH */
val = UART_GET_LCR(uart);
val |= DLAB;
UART_PUT_LCR(uart, val);
SSYNC();
+#endif
UART_PUT_DLL(uart, quot & 0xFF);
SSYNC();
UART_PUT_DLH(uart, (quot >> 8) & 0xFF);
SSYNC();
+#ifndef CONFIG_BF54x
/* Clear DLAB in LCR to Access THR RBR IER */
val = UART_GET_LCR(uart);
val &= ~DLAB;
UART_PUT_LCR(uart, val);
SSYNC();
+#endif
UART_PUT_LCR(uart, lcr);
/* Enable UART */
+#ifdef CONFIG_BF54x
+ UART_SET_IER(uart, ier);
+#else
UART_PUT_IER(uart, ier);
+#endif
val = UART_GET_GCTL(uart);
val |= UCEN;
@@ -808,15 +957,15 @@ static void __init bfin_serial_init_ports(void)
bfin_serial_resource[i].uart_rts_pin;
#endif
bfin_serial_hw_init(&bfin_serial_ports[i]);
-
}
+
}
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
static void bfin_serial_console_putchar(struct uart_port *port, int ch)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
- while (!(UART_GET_LSR(uart)))
+ while (!(UART_GET_LSR(uart) & THRE))
barrier();
UART_PUT_CHAR(uart, ch);
SSYNC();
@@ -868,18 +1017,22 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
case 2: *bits = 7; break;
case 3: *bits = 8; break;
}
+#ifndef CONFIG_BF54x
/* Set DLAB in LCR to Access DLL and DLH */
val = UART_GET_LCR(uart);
val |= DLAB;
UART_PUT_LCR(uart, val);
+#endif
dll = UART_GET_DLL(uart);
dlh = UART_GET_DLH(uart);
+#ifndef CONFIG_BF54x
/* Clear DLAB in LCR to Access THR RBR IER */
val = UART_GET_LCR(uart);
val &= ~DLAB;
UART_PUT_LCR(uart, val);
+#endif
*baud = get_sclk() / (16*(dll | dlh << 8));
}
@@ -931,6 +1084,10 @@ static int __init bfin_serial_rs_console_init(void)
{
bfin_serial_init_ports();
register_console(&bfin_serial_console);
+#ifdef CONFIG_KGDB_UART
+ kgdb_entry_state = 0;
+ init_kgdb_uart();
+#endif
return 0;
}
console_initcall(bfin_serial_rs_console_init);
@@ -1023,6 +1180,10 @@ static struct platform_driver bfin_serial_driver = {
static int __init bfin_serial_init(void)
{
int ret;
+#ifdef CONFIG_KGDB_UART
+ struct bfin_serial_port *uart = &bfin_serial_ports[CONFIG_KGDB_UART_PORT];
+ struct termios t;
+#endif
pr_info("Serial: Blackfin serial driver\n");
@@ -1036,6 +1197,21 @@ static int __init bfin_serial_init(void)
uart_unregister_driver(&bfin_serial_reg);
}
}
+#ifdef CONFIG_KGDB_UART
+ if (uart->port.cons->index != CONFIG_KGDB_UART_PORT) {
+ request_irq(uart->port.irq, bfin_serial_int,
+ IRQF_DISABLED, "BFIN_UART_RX", uart);
+ pr_info("Request irq for kgdb uart port\n");
+ UART_PUT_IER(uart, UART_GET_IER(uart) | ERBFI);
+ __builtin_bfin_ssync();
+ t.c_cflag = CS8|B57600;
+ t.c_iflag = 0;
+ t.c_oflag = 0;
+ t.c_lflag = ICANON;
+ t.c_line = CONFIG_KGDB_UART_PORT;
+ bfin_serial_set_termios(&uart->port, &t, &t);
+ }
+#endif
return ret;
}
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index b63ff8dd7304..cefde58dbad2 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -678,7 +678,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
}
bdp->cbd_datlen = count;
bdp->cbd_sc |= BD_SC_READY;
- __asm__("eieio");
+ eieio();
/* Get next BD. */
if (bdp->cbd_sc & BD_SC_WRAP)
bdp = pinfo->tx_bd_base;
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index e42faa4e4282..dc1967176fe2 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1114,8 +1114,8 @@ static int __init imx_serial_init(void)
static void __exit imx_serial_exit(void)
{
- uart_unregister_driver(&imx_reg);
platform_driver_unregister(&serial_imx_driver);
+ uart_unregister_driver(&imx_reg);
}
module_init(imx_serial_init);
diff --git a/drivers/serial/ip22zilog.c b/drivers/serial/ip22zilog.c
index c3abfb39f316..f3257f708ef9 100644
--- a/drivers/serial/ip22zilog.c
+++ b/drivers/serial/ip22zilog.c
@@ -862,6 +862,7 @@ ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
up->cflag = termios->c_cflag;
ip22zilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port));
+ uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&up->port.lock, flags);
}
@@ -1017,6 +1018,8 @@ ip22serial_console_termios(struct console *con, char *options)
}
con->cflag = cflag | CS8; /* 8N1 */
+
+ uart_update_timeout(&ip22zilog_port_table[con->index].port, cflag, baud);
}
static int __init ip22zilog_console_setup(struct console *con, char *options)
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 81792e6eeb2d..6767ee381cd1 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -88,7 +88,7 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&brd->bd_intr_lock);
/* store which revision we have */
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+ brd->rev = pdev->revision;
brd->irq = pdev->irq;
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 7ffdaeaf0545..a64d85821996 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -17,6 +17,11 @@
#include <asm/of_platform.h>
#include <asm/prom.h>
+struct of_serial_info {
+ int type;
+ int line;
+};
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -62,6 +67,7 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev,
static int __devinit of_platform_serial_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
+ struct of_serial_info *info;
struct uart_port port;
int port_type;
int ret;
@@ -69,30 +75,35 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
if (of_find_property(ofdev->node, "used-by-rtas", NULL))
return -EBUSY;
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
port_type = (unsigned long)id->data;
ret = of_platform_serial_setup(ofdev, port_type, &port);
if (ret)
goto out;
switch (port_type) {
- case PORT_UNKNOWN:
- dev_info(&ofdev->dev, "Unknown serial port found, "
- "attempting to use 8250 driver\n");
- /* fallthrough */
case PORT_8250 ... PORT_MAX_8250:
ret = serial8250_register_port(&port);
break;
default:
/* need to add code for these */
+ case PORT_UNKNOWN:
+ dev_info(&ofdev->dev, "Unknown serial port found, ignored\n");
ret = -ENODEV;
break;
}
if (ret < 0)
goto out;
- ofdev->dev.driver_data = (void *)(unsigned long)ret;
+ info->type = port_type;
+ info->line = ret;
+ ofdev->dev.driver_data = info;
return 0;
out:
+ kfree(info);
irq_dispose_mapping(port.irq);
return ret;
}
@@ -102,8 +113,16 @@ out:
*/
static int of_platform_serial_remove(struct of_device *ofdev)
{
- int line = (unsigned long)ofdev->dev.driver_data;
- serial8250_unregister_port(line);
+ struct of_serial_info *info = ofdev->dev.driver_data;
+ switch (info->type) {
+ case PORT_8250 ... PORT_MAX_8250:
+ serial8250_unregister_port(info->line);
+ break;
+ default:
+ /* need to add code for these */
+ break;
+ }
+ kfree(info);
return 0;
}
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 10bc0209cd66..3f26c4b2f322 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -78,7 +78,7 @@
#include <asm/hardware.h>
-#include <asm/arch/regs-serial.h>
+#include <asm/plat-s3c/regs-serial.h>
#include <asm/arch/regs-gpio.h>
/* structures */
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
new file mode 100644
index 000000000000..1d9d7285172a
--- /dev/null
+++ b/drivers/serial/sb1250-duart.c
@@ -0,0 +1,972 @@
+/*
+ * drivers/serial/sb1250-duart.c
+ *
+ * Support for the asynchronous serial interface (DUART) included
+ * in the BCM1250 and derived System-On-a-Chip (SOC) devices.
+ *
+ * Copyright (c) 2007 Maciej W. Rozycki
+ *
+ * Derived from drivers/char/sb1250_duart.c for which the following
+ * copyright applies:
+ *
+ * Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * References:
+ *
+ * "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
+ */
+
+#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/war.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_uart.h>
+#include <asm/sibyte/swarm.h>
+
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+
+#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0)
+#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line))
+
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+
+#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0)
+#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0)
+#define SBD_INT(line) (K_INT_UART_0 + (line))
+
+#else
+#error invalid SB1250 UART configuration
+
+#endif
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
+MODULE_LICENSE("GPL");
+
+
+#define DUART_MAX_CHIP 2
+#define DUART_MAX_SIDE 2
+
+/*
+ * Per-port state.
+ */
+struct sbd_port {
+ struct sbd_duart *duart;
+ struct uart_port port;
+ unsigned char __iomem *memctrl;
+ int tx_stopped;
+ int initialised;
+};
+
+/*
+ * Per-DUART state for the shared register space.
+ */
+struct sbd_duart {
+ struct sbd_port sport[2];
+ unsigned long mapctrl;
+ atomic_t map_guard;
+};
+
+#define to_sport(uport) container_of(uport, struct sbd_port, port)
+
+static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
+
+#define __unused __attribute__((__unused__))
+
+
+/*
+ * Reading and writing SB1250 DUART registers.
+ *
+ * There are three register spaces: two per-channel ones and
+ * a shared one. We have to define accessors appropriately.
+ * All registers are 64-bit and all but the Baud Rate Clock
+ * registers only define 8 least significant bits. There is
+ * also a workaround to take into account. Raw accessors use
+ * the full register width, but cooked ones truncate it
+ * intentionally so that the rest of the driver does not care.
+ */
+static u64 __read_sbdchn(struct sbd_port *sport, int reg)
+{
+ void __iomem *csr = sport->port.membase + reg;
+
+ return __raw_readq(csr);
+}
+
+static u64 __read_sbdshr(struct sbd_port *sport, int reg)
+{
+ void __iomem *csr = sport->memctrl + reg;
+
+ return __raw_readq(csr);
+}
+
+static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
+{
+ void __iomem *csr = sport->port.membase + reg;
+
+ __raw_writeq(value, csr);
+}
+
+static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
+{
+ void __iomem *csr = sport->memctrl + reg;
+
+ __raw_writeq(value, csr);
+}
+
+/*
+ * In bug 1956, we get glitches that can mess up uart registers. This
+ * "read-mode-reg after any register access" is an accepted workaround.
+ */
+static void __war_sbd1956(struct sbd_port *sport)
+{
+ __read_sbdchn(sport, R_DUART_MODE_REG_1);
+ __read_sbdchn(sport, R_DUART_MODE_REG_2);
+}
+
+static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
+{
+ unsigned char retval;
+
+ retval = __read_sbdchn(sport, reg);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+ return retval;
+}
+
+static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
+{
+ unsigned char retval;
+
+ retval = __read_sbdshr(sport, reg);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+ return retval;
+}
+
+static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
+{
+ __write_sbdchn(sport, reg, value);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+}
+
+static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
+{
+ __write_sbdshr(sport, reg, value);
+ if (SIBYTE_1956_WAR)
+ __war_sbd1956(sport);
+}
+
+
+static int sbd_receive_ready(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
+}
+
+static int sbd_receive_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (sbd_receive_ready(sport) && loops--)
+ read_sbdchn(sport, R_DUART_RX_HOLD);
+ return loops;
+}
+
+static int __unused sbd_transmit_ready(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
+}
+
+static int __unused sbd_transmit_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (!sbd_transmit_ready(sport) && loops--)
+ udelay(2);
+ return loops;
+}
+
+static int sbd_transmit_empty(struct sbd_port *sport)
+{
+ return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
+}
+
+static int sbd_line_drain(struct sbd_port *sport)
+{
+ int loops = 10000;
+
+ while (!sbd_transmit_empty(sport) && loops--)
+ udelay(2);
+ return loops;
+}
+
+
+static unsigned int sbd_tx_empty(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int sbd_get_mctrl(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mctrl, status;
+
+ status = read_sbdshr(sport, R_DUART_IN_PORT);
+ status >>= (uport->line) % 2;
+ mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
+ (!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
+ (!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
+ (!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
+ return mctrl;
+}
+
+static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int clr = 0, set = 0, mode2;
+
+ if (mctrl & TIOCM_DTR)
+ set |= M_DUART_SET_OPR2;
+ else
+ clr |= M_DUART_CLR_OPR2;
+ if (mctrl & TIOCM_RTS)
+ set |= M_DUART_SET_OPR0;
+ else
+ clr |= M_DUART_CLR_OPR0;
+ clr <<= (uport->line) % 2;
+ set <<= (uport->line) % 2;
+
+ mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
+ mode2 &= ~M_DUART_CHAN_MODE;
+ if (mctrl & TIOCM_LOOP)
+ mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
+ else
+ mode2 |= V_DUART_CHAN_MODE_NORMAL;
+
+ write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
+ write_sbdshr(sport, R_DUART_SET_OPR, set);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
+}
+
+static void sbd_stop_tx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ sport->tx_stopped = 1;
+};
+
+static void sbd_start_tx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mask;
+
+ /* Enable tx interrupts. */
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ mask |= M_DUART_IMR_TX;
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+
+ /* Go!, go!, go!... */
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+ sport->tx_stopped = 0;
+};
+
+static void sbd_stop_rx(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+};
+
+static void sbd_enable_ms(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_AUXCTL_X,
+ M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
+}
+
+static void sbd_break_ctl(struct uart_port *uport, int break_state)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ if (break_state == -1)
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
+ else
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
+}
+
+
+static void sbd_receive_chars(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ struct uart_icount *icount;
+ unsigned int status, ch, flag;
+ int count;
+
+ for (count = 16; count; count--) {
+ status = read_sbdchn(sport, R_DUART_STATUS);
+ if (!(status & M_DUART_RX_RDY))
+ break;
+
+ ch = read_sbdchn(sport, R_DUART_RX_HOLD);
+
+ flag = TTY_NORMAL;
+
+ icount = &uport->icount;
+ icount->rx++;
+
+ if (unlikely(status &
+ (M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
+ if (status & M_DUART_RCVD_BRK) {
+ icount->brk++;
+ if (uart_handle_break(uport))
+ continue;
+ } else if (status & M_DUART_FRM_ERR)
+ icount->frame++;
+ else if (status & M_DUART_PARITY_ERR)
+ icount->parity++;
+ if (status & M_DUART_OVRUN_ERR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & M_DUART_RCVD_BRK)
+ flag = TTY_BREAK;
+ else if (status & M_DUART_FRM_ERR)
+ flag = TTY_FRAME;
+ else if (status & M_DUART_PARITY_ERR)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(uport, ch))
+ continue;
+
+ uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
+ }
+
+ tty_flip_buffer_push(uport->info->tty);
+}
+
+static void sbd_transmit_chars(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ struct circ_buf *xmit = &sport->port.info->xmit;
+ unsigned int mask;
+ int stop_tx;
+
+ /* XON/XOFF chars. */
+ if (sport->port.x_char) {
+ write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
+ sport->port.icount.tx++;
+ sport->port.x_char = 0;
+ return;
+ }
+
+ /* If nothing to do or stopped or hardware stopped. */
+ stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
+
+ /* Send char. */
+ if (!stop_tx) {
+ write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+ }
+
+ /* Are we are done? */
+ if (stop_tx || uart_circ_empty(xmit)) {
+ /* Disable tx interrupts. */
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ mask &= ~M_DUART_IMR_TX;
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+ }
+}
+
+static void sbd_status_handle(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+ unsigned int delta;
+
+ delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+ delta >>= (uport->line) % 2;
+
+ if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
+ uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
+
+ if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
+ uport->icount.dsr++;
+
+ if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
+ S_DUART_IN_PIN_CHNG))
+ wake_up_interruptible(&uport->info->delta_msr_wait);
+}
+
+static irqreturn_t sbd_interrupt(int irq, void *dev_id)
+{
+ struct sbd_port *sport = dev_id;
+ struct uart_port *uport = &sport->port;
+ irqreturn_t status = IRQ_NONE;
+ unsigned int intstat;
+ int count;
+
+ for (count = 16; count; count--) {
+ intstat = read_sbdshr(sport,
+ R_DUART_ISRREG((uport->line) % 2));
+ intstat &= read_sbdshr(sport,
+ R_DUART_IMRREG((uport->line) % 2));
+ intstat &= M_DUART_ISR_ALL;
+ if (!intstat)
+ break;
+
+ if (intstat & M_DUART_ISR_RX)
+ sbd_receive_chars(sport);
+ if (intstat & M_DUART_ISR_IN)
+ sbd_status_handle(sport);
+ if (intstat & M_DUART_ISR_TX)
+ sbd_transmit_chars(sport);
+
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+
+static int sbd_startup(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mode1;
+ int ret;
+
+ ret = request_irq(sport->port.irq, sbd_interrupt,
+ IRQF_SHARED, "sb1250-duart", sport);
+ if (ret)
+ return ret;
+
+ /* Clear the receive FIFO. */
+ sbd_receive_drain(sport);
+
+ /* Clear the interrupt registers. */
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
+ read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
+
+ /* Set rx/tx interrupt to FIFO available. */
+ mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
+ mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
+ write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
+
+ /* Disable tx, enable rx. */
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
+ sport->tx_stopped = 1;
+
+ /* Enable interrupts. */
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ M_DUART_IMR_IN | M_DUART_IMR_RX);
+
+ return 0;
+}
+
+static void sbd_shutdown(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+ sport->tx_stopped = 1;
+ free_irq(sport->port.irq, sport);
+}
+
+
+static void sbd_init_port(struct sbd_port *sport)
+{
+ struct uart_port *uport = &sport->port;
+
+ if (sport->initialised)
+ return;
+
+ /* There is no DUART reset feature, so just set some sane defaults. */
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
+ write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
+ write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
+ write_sbdchn(sport, R_DUART_FULL_CTL,
+ V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
+ write_sbdchn(sport, R_DUART_OPCR_X, 0);
+ write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
+
+ sport->initialised = 1;
+}
+
+static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ struct sbd_port *sport = to_sport(uport);
+ unsigned int mode1 = 0, mode2 = 0, aux = 0;
+ unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
+ unsigned int oldmode1, oldmode2, oldaux;
+ unsigned int baud, brg;
+ unsigned int command;
+
+ mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
+ M_DUART_BITS_PER_CHAR);
+ mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
+ auxmask |= ~M_DUART_CTS_CHNG_ENA;
+
+ /* Byte size. */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ case CS6:
+ /* Unsupported, leave unchanged. */
+ mode1mask |= M_DUART_PARITY_MODE;
+ break;
+ case CS7:
+ mode1 |= V_DUART_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mode1 |= V_DUART_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* Parity and stop bits. */
+ if (termios->c_cflag & CSTOPB)
+ mode2 |= M_DUART_STOP_BIT_LEN_2;
+ else
+ mode2 |= M_DUART_STOP_BIT_LEN_1;
+ if (termios->c_cflag & PARENB)
+ mode1 |= V_DUART_PARITY_MODE_ADD;
+ else
+ mode1 |= V_DUART_PARITY_MODE_NONE;
+ if (termios->c_cflag & PARODD)
+ mode1 |= M_DUART_PARITY_TYPE_ODD;
+ else
+ mode1 |= M_DUART_PARITY_TYPE_EVEN;
+
+ baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
+ brg = V_DUART_BAUD_RATE(baud);
+ /* The actual lower bound is 1221bps, so compensate. */
+ if (brg > M_DUART_CLK_COUNTER)
+ brg = M_DUART_CLK_COUNTER;
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ uport->read_status_mask = M_DUART_OVRUN_ERR;
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ uport->read_status_mask |= M_DUART_RCVD_BRK;
+
+ uport->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= M_DUART_FRM_ERR |
+ M_DUART_PARITY_ERR;
+ if (termios->c_iflag & IGNBRK) {
+ uport->ignore_status_mask |= M_DUART_RCVD_BRK;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
+ }
+
+ if (termios->c_cflag & CREAD)
+ command = M_DUART_RX_EN;
+ else
+ command = M_DUART_RX_DIS;
+
+ if (termios->c_cflag & CRTSCTS)
+ aux |= M_DUART_CTS_CHNG_ENA;
+ else
+ aux &= ~M_DUART_CTS_CHNG_ENA;
+
+ spin_lock(&uport->lock);
+
+ if (sport->tx_stopped)
+ command |= M_DUART_TX_DIS;
+ else
+ command |= M_DUART_TX_EN;
+
+ oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
+ oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
+ oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
+
+ if (!sport->tx_stopped)
+ sbd_line_drain(sport);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
+
+ write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
+ write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
+ write_sbdchn(sport, R_DUART_CLK_SEL, brg);
+ write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
+
+ write_sbdchn(sport, R_DUART_CMD, command);
+
+ spin_unlock(&uport->lock);
+}
+
+
+static const char *sbd_type(struct uart_port *uport)
+{
+ return "SB1250 DUART";
+}
+
+static void sbd_release_port(struct uart_port *uport)
+{
+ struct sbd_port *sport = to_sport(uport);
+ struct sbd_duart *duart = sport->duart;
+ int map_guard;
+
+ iounmap(sport->memctrl);
+ sport->memctrl = NULL;
+ iounmap(uport->membase);
+ uport->membase = NULL;
+
+ map_guard = atomic_add_return(-1, &duart->map_guard);
+ if (!map_guard)
+ release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
+ release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+}
+
+static int sbd_map_port(struct uart_port *uport)
+{
+ static const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
+ struct sbd_port *sport = to_sport(uport);
+ struct sbd_duart *duart = sport->duart;
+
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ DUART_CHANREG_SPACING);
+ if (!uport->membase) {
+ printk(err);
+ return -ENOMEM;
+ }
+
+ if (!sport->memctrl)
+ sport->memctrl = ioremap_nocache(duart->mapctrl,
+ DUART_CHANREG_SPACING);
+ if (!sport->memctrl) {
+ printk(err);
+ iounmap(uport->membase);
+ uport->membase = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sbd_request_port(struct uart_port *uport)
+{
+ static const char *err = KERN_ERR
+ "sbd: Unable to reserve MMIO resource\n";
+ struct sbd_duart *duart = to_sport(uport)->duart;
+ int map_guard;
+ int ret = 0;
+
+ if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
+ "sb1250-duart")) {
+ printk(err);
+ return -EBUSY;
+ }
+ map_guard = atomic_add_return(1, &duart->map_guard);
+ if (map_guard == 1) {
+ if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
+ "sb1250-duart")) {
+ atomic_add(-1, &duart->map_guard);
+ printk(err);
+ ret = -EBUSY;
+ }
+ }
+ if (!ret) {
+ ret = sbd_map_port(uport);
+ if (ret) {
+ map_guard = atomic_add_return(-1, &duart->map_guard);
+ if (!map_guard)
+ release_mem_region(duart->mapctrl,
+ DUART_CHANREG_SPACING);
+ }
+ }
+ if (ret) {
+ release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
+ return ret;
+ }
+ return 0;
+}
+
+static void sbd_config_port(struct uart_port *uport, int flags)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (sbd_request_port(uport))
+ return;
+
+ uport->type = PORT_SB1250_DUART;
+
+ sbd_init_port(sport);
+ }
+}
+
+static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
+ ret = -EINVAL;
+ if (ser->irq != uport->irq)
+ ret = -EINVAL;
+ if (ser->baud_base != uport->uartclk / 16)
+ ret = -EINVAL;
+ return ret;
+}
+
+
+static struct uart_ops sbd_ops = {
+ .tx_empty = sbd_tx_empty,
+ .set_mctrl = sbd_set_mctrl,
+ .get_mctrl = sbd_get_mctrl,
+ .stop_tx = sbd_stop_tx,
+ .start_tx = sbd_start_tx,
+ .stop_rx = sbd_stop_rx,
+ .enable_ms = sbd_enable_ms,
+ .break_ctl = sbd_break_ctl,
+ .startup = sbd_startup,
+ .shutdown = sbd_shutdown,
+ .set_termios = sbd_set_termios,
+ .type = sbd_type,
+ .release_port = sbd_release_port,
+ .request_port = sbd_request_port,
+ .config_port = sbd_config_port,
+ .verify_port = sbd_verify_port,
+};
+
+/* Initialize SB1250 DUART port structures. */
+static void __init sbd_probe_duarts(void)
+{
+ static int probed;
+ int chip, side;
+ int max_lines, line;
+
+ if (probed)
+ return;
+
+ /* Set the number of available units based on the SOC type. */
+ switch (soc_type) {
+ case K_SYS_SOC_TYPE_BCM1x55:
+ case K_SYS_SOC_TYPE_BCM1x80:
+ max_lines = 4;
+ break;
+ default:
+ /* Assume at least two serial ports at the normal address. */
+ max_lines = 2;
+ break;
+ }
+
+ probed = 1;
+
+ for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
+ chip++) {
+ sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
+
+ for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
+ side++, line++) {
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+
+ sport->duart = &sbd_duarts[chip];
+
+ uport->irq = SBD_INT(line);
+ uport->uartclk = 100000000 / 20 * 16;
+ uport->fifosize = 16;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &sbd_ops;
+ uport->line = line;
+ uport->mapbase = SBD_CHANREGS(line);
+ }
+ }
+}
+
+
+#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
+/*
+ * Serial console stuff. Very basic, polling driver for doing serial
+ * console output. The console_sem is held by the caller, so we
+ * shouldn't be interrupted for more console activity.
+ */
+static void sbd_console_putchar(struct uart_port *uport, int ch)
+{
+ struct sbd_port *sport = to_sport(uport);
+
+ sbd_transmit_drain(sport);
+ write_sbdchn(sport, R_DUART_TX_HOLD, ch);
+}
+
+static void sbd_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int chip = co->index / DUART_MAX_SIDE;
+ int side = co->index % DUART_MAX_SIDE;
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+ unsigned long flags;
+ unsigned int mask;
+
+ /* Disable transmit interrupts and enable the transmitter. */
+ spin_lock_irqsave(&uport->lock, flags);
+ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
+ mask & ~M_DUART_IMR_TX);
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
+ spin_unlock_irqrestore(&uport->lock, flags);
+
+ uart_console_write(&sport->port, s, count, sbd_console_putchar);
+
+ /* Restore transmit interrupts and the transmitter enable. */
+ spin_lock_irqsave(&uport->lock, flags);
+ sbd_line_drain(sport);
+ if (sport->tx_stopped)
+ write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
+ write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
+ spin_unlock_irqrestore(&uport->lock, flags);
+}
+
+static int __init sbd_console_setup(struct console *co, char *options)
+{
+ int chip = co->index / DUART_MAX_SIDE;
+ int side = co->index % DUART_MAX_SIDE;
+ struct sbd_port *sport = &sbd_duarts[chip].sport[side];
+ struct uart_port *uport = &sport->port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ if (!sport->duart)
+ return -ENXIO;
+
+ ret = sbd_map_port(uport);
+ if (ret)
+ return ret;
+
+ sbd_init_port(sport);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver sbd_reg;
+static struct console sbd_console = {
+ .name = "duart",
+ .write = sbd_console_write,
+ .device = uart_console_device,
+ .setup = sbd_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sbd_reg
+};
+
+static int __init sbd_serial_console_init(void)
+{
+ sbd_probe_duarts();
+ register_console(&sbd_console);
+
+ return 0;
+}
+
+console_initcall(sbd_serial_console_init);
+
+#define SERIAL_SB1250_DUART_CONSOLE &sbd_console
+#else
+#define SERIAL_SB1250_DUART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
+
+
+static struct uart_driver sbd_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "duart",
+ .major = TTY_MAJOR,
+ .minor = SB1250_DUART_MINOR_BASE,
+ .nr = DUART_MAX_CHIP * DUART_MAX_SIDE,
+ .cons = SERIAL_SB1250_DUART_CONSOLE,
+};
+
+/* Set up the driver and register it. */
+static int __init sbd_init(void)
+{
+ int i, ret;
+
+ sbd_probe_duarts();
+
+ ret = uart_register_driver(&sbd_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
+ struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+ struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+ struct uart_port *uport = &sport->port;
+
+ if (sport->duart)
+ uart_add_one_port(&sbd_reg, uport);
+ }
+
+ return 0;
+}
+
+/* Unload the driver. Unregister stuff, get ready to go away. */
+static void __exit sbd_exit(void)
+{
+ int i;
+
+ for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
+ struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
+ struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
+ struct uart_port *uport = &sport->port;
+
+ if (sport->duart)
+ uart_remove_one_port(&sbd_reg, uport);
+ }
+
+ uart_unregister_driver(&sbd_reg);
+}
+
+module_init(sbd_init);
+module_exit(sbd_exit);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 326020f86f75..9c57486c2e7f 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1910,6 +1910,12 @@ uart_set_options(struct uart_port *port, struct console *co,
if (flow == 'r')
termios.c_cflag |= CRTSCTS;
+ /*
+ * some uarts on other side don't support no flow control.
+ * So we set * DTR in host uart to make them happy
+ */
+ port->mctrl |= TIOCM_DTR;
+
port->ops->set_termios(port, &termios, NULL);
co->cflag = termios.c_cflag;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 1f89496d530e..672cd1042539 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -367,7 +367,9 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
} else {
#ifdef CONFIG_CPU_SUBTYPE_SH7343
/* Nothing */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780) || defined(CONFIG_CPU_SUBTYPE_SH7785)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SHX3)
ctrl_outw(0x0080, SCSPTR0); /* Set RTS = 1 */
#else
ctrl_outw(0x0080, SCSPTR2); /* Set RTS = 1 */
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index fb04fb5f9843..247fb66bf0f4 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -53,7 +53,12 @@
# define SCIF_ORER 0x0001 /* overrun error bit */
# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
# define SCIF_ONLY
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
# define SCSPTR1 0xffe0001c /* 8 bit SCI */
# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -73,7 +78,7 @@
# define SCPDR 0xA4050136 /* 16 bit SCIF */
# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
# define SCIF_ONLY
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
# define SCI_NPORTS 2
# define SCIF_ORER 0x0001 /* overrun error bit */
@@ -168,6 +173,14 @@
# define SCIF_ORER 0x0001 /* overrun error bit */
# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
# define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
+# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
+# define SCSPTR1 0xffc40020 /* 16 bit SCIF */
+# define SCSPTR2 0xffc50020 /* 16 bit SCIF */
+# define SCSPTR3 0xffc60020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001 /* Overrun error bit */
+# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
#else
# error CPU subtype not defined
#endif
@@ -177,10 +190,15 @@
#define SCI_CTRL_FLAGS_RIE 0x40 /* all */
#define SCI_CTRL_FLAGS_TE 0x20 /* all */
#define SCI_CTRL_FLAGS_RE 0x10 /* all */
-#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785)
+#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7780) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7785) || \
+ defined(CONFIG_CPU_SUBTYPE_SHX3)
#define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */
#else
#define SCI_CTRL_FLAGS_REIE 0
@@ -514,8 +532,12 @@ static inline void set_sh771x_scif_pfc(struct uart_port *port)
}
}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7091) || \
defined(CONFIG_CPU_SUBTYPE_SH4_202)
static inline int sci_rxd_in(struct uart_port *port)
{
@@ -653,6 +675,18 @@ static inline int sci_rxd_in(struct uart_port *port)
return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
+#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+ if (port->mapbase == 0xffc30000)
+ return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+ if (port->mapbase == 0xffc40000)
+ return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+ if (port->mapbase == 0xffc50000)
+ return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+ if (port->mapbase == 0xffc60000)
+ return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+}
#endif
/*
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index a27e9e92cb5e..41fc61264443 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -759,7 +759,7 @@ static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
*/
static void sn_sal_console_write(struct console *, const char *, unsigned);
-static int __init sn_sal_console_setup(struct console *, char *);
+static int sn_sal_console_setup(struct console *, char *);
static struct uart_driver sal_console_uart;
extern struct tty_driver *uart_console_device(struct console *, int *);
@@ -1006,7 +1006,7 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
* here so providing it is easier.
*
*/
-static int __init sn_sal_console_setup(struct console *co, char *options)
+static int sn_sal_console_setup(struct console *co, char *options)
{
return 0;
}
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index b45ba5392dd3..70a09a3d5af0 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -16,9 +16,10 @@
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/serial_core.h>
#include <linux/init.h>
-#include <asm/oplib.h>
+#include <asm/prom.h>
#include "suncore.h"
@@ -26,92 +27,60 @@ int sunserial_current_minor = 64;
EXPORT_SYMBOL(sunserial_current_minor);
-void
-sunserial_console_termios(struct console *con)
+int sunserial_console_match(struct console *con, struct device_node *dp,
+ struct uart_driver *drv, int line)
{
- char mode[16], buf[16], *s;
- char mode_prop[] = "ttyX-mode";
- char cd_prop[] = "ttyX-ignore-cd";
- char dtr_prop[] = "ttyX-rts-dtr-off";
- char *ssp_console_modes_prop = "ssp-console-modes";
- int baud, bits, stop, cflag;
- char parity;
- int carrier = 0;
- int rtsdtr = 1;
- int topnd, nd;
-
- if (!serial_console)
- return;
-
- switch (serial_console) {
- case PROMDEV_OTTYA:
- mode_prop[3] = 'a';
- cd_prop[3] = 'a';
- dtr_prop[3] = 'a';
- break;
-
- case PROMDEV_OTTYB:
- mode_prop[3] = 'b';
- cd_prop[3] = 'b';
- dtr_prop[3] = 'b';
- break;
-
- case PROMDEV_ORSC:
-
- nd = prom_pathtoinode("rsc");
- if (!nd) {
- strcpy(mode, "115200,8,n,1,-");
- goto no_options;
- }
+ int off;
- if (!prom_node_has_property(nd, ssp_console_modes_prop)) {
- strcpy(mode, "115200,8,n,1,-");
- goto no_options;
- }
+ if (!con || of_console_device != dp)
+ return 0;
- memset(mode, 0, sizeof(mode));
- prom_getstring(nd, ssp_console_modes_prop, mode, sizeof(mode));
- goto no_options;
+ off = 0;
+ if (of_console_options &&
+ *of_console_options == 'b')
+ off = 1;
- default:
- strcpy(mode, "9600,8,n,1,-");
- goto no_options;
- }
+ if ((line & 1) != off)
+ return 0;
- topnd = prom_getchild(prom_root_node);
- nd = prom_searchsiblings(topnd, "options");
- if (!nd) {
- strcpy(mode, "9600,8,n,1,-");
- goto no_options;
- }
-
- if (!prom_node_has_property(nd, mode_prop)) {
- strcpy(mode, "9600,8,n,1,-");
- goto no_options;
- }
+ con->index = line;
+ drv->cons = con;
+ add_preferred_console(con->name, line, NULL);
- memset(mode, 0, sizeof(mode));
- prom_getstring(nd, mode_prop, mode, sizeof(mode));
-
- if (prom_node_has_property(nd, cd_prop)) {
- memset(buf, 0, sizeof(buf));
- prom_getstring(nd, cd_prop, buf, sizeof(buf));
- if (!strcmp(buf, "false"))
- carrier = 1;
-
- /* XXX: this is unused below. */
- }
+ return 1;
+}
+EXPORT_SYMBOL(sunserial_console_match);
- if (prom_node_has_property(nd, dtr_prop)) {
- memset(buf, 0, sizeof(buf));
- prom_getstring(nd, dtr_prop, buf, sizeof(buf));
- if (!strcmp(buf, "false"))
- rtsdtr = 0;
+void
+sunserial_console_termios(struct console *con)
+{
+ struct device_node *dp;
+ const char *od, *mode, *s;
+ char mode_prop[] = "ttyX-mode";
+ int baud, bits, stop, cflag;
+ char parity;
- /* XXX: this is unused below. */
+ dp = of_find_node_by_path("/options");
+ od = of_get_property(dp, "output-device", NULL);
+ if (!strcmp(od, "rsc")) {
+ mode = of_get_property(of_console_device,
+ "ssp-console-modes", NULL);
+ if (!mode)
+ mode = "115200,8,n,1,-";
+ } else {
+ char c;
+
+ c = 'a';
+ if (of_console_options)
+ c = *of_console_options;
+
+ mode_prop[3] = c;
+
+ mode = of_get_property(dp, mode_prop, NULL);
+ if (!mode)
+ mode = "9600,8,n,1,-";
}
-no_options:
cflag = CREAD | HUPCL | CLOCAL;
s = mode;
diff --git a/drivers/serial/suncore.h b/drivers/serial/suncore.h
index 513916a8ce37..829d7d65d6db 100644
--- a/drivers/serial/suncore.h
+++ b/drivers/serial/suncore.h
@@ -24,6 +24,8 @@ extern int suncore_mouse_baud_detection(unsigned char, int);
extern int sunserial_current_minor;
+extern int sunserial_console_match(struct console *, struct device_node *,
+ struct uart_driver *, int);
extern void sunserial_console_termios(struct console *);
#endif /* !(_SERIAL_SUN_H) */
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index 96557e6dba60..8ff900b09811 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -258,17 +258,7 @@ static void sunhv_stop_tx(struct uart_port *port)
/* port->lock held by caller. */
static void sunhv_start_tx(struct uart_port *port)
{
- struct circ_buf *xmit = &port->info->xmit;
-
- while (!uart_circ_empty(xmit)) {
- long status = sun4v_con_putchar(xmit->buf[xmit->tail]);
-
- if (status != HV_EOK)
- break;
-
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- }
+ transmit_chars(port);
}
/* port->lock is not held. */
@@ -440,8 +430,16 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
{
struct uart_port *port = sunhv_port;
unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else
+ spin_lock(&port->lock);
- spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
unsigned long page_bytes;
@@ -469,7 +467,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
ra += written;
}
}
- spin_unlock_irqrestore(&port->lock, flags);
+
+ if (locked)
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -488,7 +489,15 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
{
struct uart_port *port = sunhv_port;
unsigned long flags;
- int i;
+ int i, locked = 1;
+
+ local_irq_save(flags);
+ if (port->sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&port->lock);
+ } else
+ spin_lock(&port->lock);
spin_lock_irqsave(&port->lock, flags);
for (i = 0; i < n; i++) {
@@ -496,7 +505,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
sunhv_console_putchar(port, '\r');
sunhv_console_putchar(port, *s++);
}
- spin_unlock_irqrestore(&port->lock, flags);
+
+ if (locked)
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
}
static struct console sunhv_console = {
@@ -508,16 +520,6 @@ static struct console sunhv_console = {
.data = &sunhv_reg,
};
-static inline struct console *SUNHV_CONSOLE(void)
-{
- if (con_is_present())
- return NULL;
-
- sunhv_console.index = 0;
-
- return &sunhv_console;
-}
-
static int __devinit hv_probe(struct of_device *op, const struct of_device_id *match)
{
struct uart_port *port;
@@ -570,7 +572,8 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
sunhv_reg.tty_driver->name_base = sunhv_reg.minor - 64;
sunserial_current_minor += 1;
- sunhv_reg.cons = SUNHV_CONSOLE();
+ sunserial_console_match(&sunhv_console, op->node,
+ &sunhv_reg, port->line);
err = uart_add_one_port(&sunhv_reg, port);
if (err)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index deb9ab4b5a0b..bca57bb94939 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -860,22 +860,31 @@ static int num_channels;
static void sunsab_console_putchar(struct uart_port *port, int c)
{
struct uart_sunsab_port *up = (struct uart_sunsab_port *)port;
- unsigned long flags;
-
- spin_lock_irqsave(&up->port.lock, flags);
sunsab_tec_wait(up);
writeb(c, &up->regs->w.tic);
-
- spin_unlock_irqrestore(&up->port.lock, flags);
}
static void sunsab_console_write(struct console *con, const char *s, unsigned n)
{
struct uart_sunsab_port *up = &sunsab_ports[con->index];
+ unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
static int sunsab_console_setup(struct console *con, char *options)
@@ -959,22 +968,6 @@ static struct console sunsab_console = {
static inline struct console *SUNSAB_CONSOLE(void)
{
- int i;
-
- if (con_is_present())
- return NULL;
-
- for (i = 0; i < num_channels; i++) {
- int this_minor = sunsab_reg.minor + i;
-
- if ((this_minor - 64) == (serial_console - 1))
- break;
- }
- if (i == num_channels)
- return NULL;
-
- sunsab_console.index = i;
-
return &sunsab_console;
}
#else
@@ -1071,7 +1064,12 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
return err;
}
+ sunserial_console_match(SUNSAB_CONSOLE(), op->node,
+ &sunsab_reg, up[0].port.line);
uart_add_one_port(&sunsab_reg, &up[0].port);
+
+ sunserial_console_match(SUNSAB_CONSOLE(), op->node,
+ &sunsab_reg, up[1].port.line);
uart_add_one_port(&sunsab_reg, &up[1].port);
dev_set_drvdata(&op->dev, &up[0]);
@@ -1155,7 +1153,6 @@ static int __init sunsab_init(void)
}
sunsab_reg.tty_driver->name_base = sunsab_reg.minor - 64;
- sunsab_reg.cons = SUNSAB_CONSOLE();
sunserial_current_minor += num_channels;
}
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 2a63cdba3208..79b13685bdfa 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1288,7 +1288,17 @@ static void sunsu_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_sunsu_port *up = &sunsu_ports[co->index];
+ unsigned long flags;
unsigned int ier;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
/*
* First save the UER then disable the interrupts
@@ -1304,6 +1314,10 @@ static void sunsu_console_write(struct console *co, const char *s,
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
/*
@@ -1357,28 +1371,12 @@ static struct console sunsu_console = {
* Register console.
*/
-static inline struct console *SUNSU_CONSOLE(int num_uart)
+static inline struct console *SUNSU_CONSOLE(void)
{
- int i;
-
- if (con_is_present())
- return NULL;
-
- for (i = 0; i < num_uart; i++) {
- int this_minor = sunsu_reg.minor + i;
-
- if ((this_minor - 64) == (serial_console - 1))
- break;
- }
- if (i == num_uart)
- return NULL;
-
- sunsu_console.index = i;
-
return &sunsu_console;
}
#else
-#define SUNSU_CONSOLE(num_uart) (NULL)
+#define SUNSU_CONSOLE() (NULL)
#define sunsu_serial_console_init() do { } while (0)
#endif
@@ -1468,6 +1466,8 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
up->port.ops = &sunsu_pops;
+ sunserial_console_match(SUNSU_CONSOLE(), dp,
+ &sunsu_reg, up->port.line);
err = uart_add_one_port(&sunsu_reg, &up->port);
if (err)
goto out_unmap;
@@ -1558,7 +1558,6 @@ static int __init sunsu_init(void)
return err;
sunsu_reg.tty_driver->name_base = sunsu_reg.minor - 64;
sunserial_current_minor += num_uart;
- sunsu_reg.cons = SUNSU_CONSOLE(num_uart);
}
err = of_register_driver(&su_driver, &of_bus_type);
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 15b6e1cb040b..1d262c0c613f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -9,7 +9,7 @@
* C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their
* work there.
*
- * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2002, 2006, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
@@ -1151,11 +1151,22 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
{
struct uart_sunzilog_port *up = &sunzilog_port_table[con->index];
unsigned long flags;
+ int locked = 1;
+
+ local_irq_save(flags);
+ if (up->port.sysrq) {
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
- spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
- spin_unlock_irqrestore(&up->port.lock, flags);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+ local_irq_restore(flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
@@ -1215,23 +1226,6 @@ static struct console sunzilog_console_ops = {
static inline struct console *SUNZILOG_CONSOLE(void)
{
- int i;
-
- if (con_is_present())
- return NULL;
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- int this_minor = sunzilog_reg.minor + i;
-
- if ((this_minor - 64) == (serial_console - 1))
- break;
- }
- if (i == NUM_CHANNELS)
- return NULL;
-
- sunzilog_console_ops.index = i;
- sunzilog_port_table[i].flags |= SUNZILOG_FLAG_IS_CONS;
-
return &sunzilog_console_ops;
}
@@ -1417,12 +1411,18 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
sunzilog_init_hw(&up[1]);
if (!keyboard_mouse) {
+ if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
+ &sunzilog_reg, up[0].port.line))
+ up->flags |= SUNZILOG_FLAG_IS_CONS;
err = uart_add_one_port(&sunzilog_reg, &up[0].port);
if (err) {
of_iounmap(&op->resource[0],
rp, sizeof(struct zilog_layout));
return err;
}
+ if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
+ &sunzilog_reg, up[1].port.line))
+ up->flags |= SUNZILOG_FLAG_IS_CONS;
err = uart_add_one_port(&sunzilog_reg, &up[1].port);
if (err) {
uart_remove_one_port(&sunzilog_reg, &up[0].port);
@@ -1520,7 +1520,6 @@ static int __init sunzilog_init(void)
goto out_free_tables;
sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64;
- sunzilog_reg.cons = SUNZILOG_CONSOLE();
sunserial_current_minor += uart_count;
}
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index cf0e663b42ed..85309acb75f6 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2004-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* Based on drivers/serial/8250.c, by Russell King.
*
@@ -25,12 +25,12 @@
#endif
#include <linux/console.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/ioport.h>
+#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
@@ -38,11 +38,9 @@
#include <linux/tty_flip.h>
#include <asm/io.h>
-#include <asm/vr41xx/irq.h>
#include <asm/vr41xx/siu.h>
#include <asm/vr41xx/vr41xx.h>
-#define SIU_PORTS_MAX 2
#define SIU_BAUD_BASE 1152000
#define SIU_MAJOR 204
#define SIU_MINOR_BASE 82
@@ -60,32 +58,13 @@
#define IRUSESEL 0x02
#define SIRSEL 0x01
-struct siu_port {
- unsigned int type;
- unsigned int irq;
- unsigned long start;
-};
-
-static const struct siu_port siu_type1_ports[] = {
- { .type = PORT_VR41XX_SIU,
- .irq = SIU_IRQ,
- .start = 0x0c000000UL, },
-};
-
-#define SIU_TYPE1_NR_PORTS (sizeof(siu_type1_ports) / sizeof(struct siu_port))
-
-static const struct siu_port siu_type2_ports[] = {
- { .type = PORT_VR41XX_SIU,
- .irq = SIU_IRQ,
- .start = 0x0f000800UL, },
- { .type = PORT_VR41XX_DSIU,
- .irq = DSIU_IRQ,
- .start = 0x0f000820UL, },
+static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
+ [0 ... SIU_PORTS_MAX-1] = {
+ .lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
+ .irq = -1,
+ },
};
-#define SIU_TYPE2_NR_PORTS (sizeof(siu_type2_ports) / sizeof(struct siu_port))
-
-static struct uart_port siu_uart_ports[SIU_PORTS_MAX];
static uint8_t lsr_break_flag[SIU_PORTS_MAX];
#define siu_read(port, offset) readb((port)->membase + (offset))
@@ -110,7 +89,6 @@ void vr41xx_select_siu_interface(siu_interface_t interface)
spin_unlock_irqrestore(&port->lock, flags);
}
-
EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
void vr41xx_use_irda(irda_use_t use)
@@ -132,7 +110,6 @@ void vr41xx_use_irda(irda_use_t use)
spin_unlock_irqrestore(&port->lock, flags);
}
-
EXPORT_SYMBOL_GPL(vr41xx_use_irda);
void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
@@ -166,7 +143,6 @@ void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
spin_unlock_irqrestore(&port->lock, flags);
}
-
EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
static inline void siu_clear_fifo(struct uart_port *port)
@@ -177,21 +153,6 @@ static inline void siu_clear_fifo(struct uart_port *port)
siu_write(port, UART_FCR, 0);
}
-static inline int siu_probe_ports(void)
-{
- switch (current_cpu_data.cputype) {
- case CPU_VR4111:
- case CPU_VR4121:
- return SIU_TYPE1_NR_PORTS;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- return SIU_TYPE2_NR_PORTS;
- }
-
- return 0;
-}
-
static inline unsigned long siu_port_size(struct uart_port *port)
{
switch (port->type) {
@@ -206,21 +167,10 @@ static inline unsigned long siu_port_size(struct uart_port *port)
static inline unsigned int siu_check_type(struct uart_port *port)
{
- switch (current_cpu_data.cputype) {
- case CPU_VR4111:
- case CPU_VR4121:
- if (port->line == 0)
- return PORT_VR41XX_SIU;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- if (port->line == 0)
- return PORT_VR41XX_SIU;
- else if (port->line == 1)
- return PORT_VR41XX_DSIU;
- break;
- }
+ if (port->line == 0)
+ return PORT_VR41XX_SIU;
+ if (port->line == 1 && port->irq != -1)
+ return PORT_VR41XX_DSIU;
return PORT_UNKNOWN;
}
@@ -751,44 +701,34 @@ static struct uart_ops siu_uart_ops = {
.verify_port = siu_verify_port,
};
-static int siu_init_ports(void)
+static int siu_init_ports(struct platform_device *pdev)
{
- const struct siu_port *siu;
struct uart_port *port;
- int i, num;
+ struct resource *res;
+ int *type = pdev->dev.platform_data;
+ int i;
- switch (current_cpu_data.cputype) {
- case CPU_VR4111:
- case CPU_VR4121:
- siu = siu_type1_ports;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
- case CPU_VR4133:
- siu = siu_type2_ports;
- break;
- default:
+ if (!type)
return 0;
- }
port = siu_uart_ports;
- num = siu_probe_ports();
- for (i = 0; i < num; i++) {
- spin_lock_init(&port->lock);
- port->irq = siu->irq;
+ for (i = 0; i < SIU_PORTS_MAX; i++) {
+ port->type = type[i];
+ if (port->type == PORT_UNKNOWN)
+ continue;
+ port->irq = platform_get_irq(pdev, i);
port->uartclk = SIU_BAUD_BASE * 16;
port->fifosize = 16;
port->regshift = 0;
port->iotype = UPIO_MEM;
port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
- port->type = siu->type;
port->line = i;
- port->mapbase = siu->start;
- siu++;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ port->mapbase = res->start;
port++;
}
- return num;
+ return i;
}
#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
@@ -883,13 +823,9 @@ static struct console siu_console = {
static int __devinit siu_console_init(void)
{
struct uart_port *port;
- int num, i;
-
- num = siu_init_ports();
- if (num <= 0)
- return -ENODEV;
+ int i;
- for (i = 0; i < num; i++) {
+ for (i = 0; i < SIU_PORTS_MAX; i++) {
port = &siu_uart_ports[i];
port->ops = &siu_uart_ops;
}
@@ -920,7 +856,7 @@ static int __devinit siu_probe(struct platform_device *dev)
struct uart_port *port;
int num, i, retval;
- num = siu_init_ports();
+ num = siu_init_ports(dev);
if (num <= 0)
return -ENODEV;
@@ -998,8 +934,6 @@ static int siu_resume(struct platform_device *dev)
return 0;
}
-static struct platform_device *siu_platform_device;
-
static struct platform_driver siu_device_driver = {
.probe = siu_probe,
.remove = __devexit_p(siu_remove),
@@ -1013,29 +947,12 @@ static struct platform_driver siu_device_driver = {
static int __init vr41xx_siu_init(void)
{
- int retval;
-
- siu_platform_device = platform_device_alloc("SIU", -1);
- if (!siu_platform_device)
- return -ENOMEM;
-
- retval = platform_device_add(siu_platform_device);
- if (retval < 0) {
- platform_device_put(siu_platform_device);
- return retval;
- }
-
- retval = platform_driver_register(&siu_device_driver);
- if (retval < 0)
- platform_device_unregister(siu_platform_device);
-
- return retval;
+ return platform_driver_register(&siu_device_driver);
}
static void __exit vr41xx_siu_exit(void)
{
platform_driver_unregister(&siu_device_driver);
- platform_device_unregister(siu_platform_device);
}
module_init(vr41xx_siu_init);
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
new file mode 100644
index 000000000000..65f1294fd27b
--- /dev/null
+++ b/drivers/serial/zs.c
@@ -0,0 +1,1287 @@
+/*
+ * zs.c: Serial port driver for IOASIC DECstations.
+ *
+ * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras.
+ * Derived from drivers/macintosh/macserial.c by Harald Koerfgen.
+ *
+ * DECstation changes
+ * Copyright (C) 1998-2000 Harald Koerfgen
+ * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ *
+ * For the rest of the code the original Copyright applies:
+ * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ *
+ * Note: for IOASIC systems the wiring is as follows:
+ *
+ * mouse/keyboard:
+ * DIN-7 MJ-4 signal SCC
+ * 2 1 TxD <- A.TxD
+ * 3 4 RxD -> A.RxD
+ *
+ * EIA-232/EIA-423:
+ * DB-25 MMJ-6 signal SCC
+ * 2 2 TxD <- B.TxD
+ * 3 5 RxD -> B.RxD
+ * 4 RTS <- ~A.RTS
+ * 5 CTS -> ~B.CTS
+ * 6 6 DSR -> ~A.SYNC
+ * 8 CD -> ~B.DCD
+ * 12 DSRS(DCE) -> ~A.CTS (*)
+ * 15 TxC -> B.TxC
+ * 17 RxC -> B.RxC
+ * 20 1 DTR <- ~A.DTR
+ * 22 RI -> ~A.DCD
+ * 23 DSRS(DTE) <- ~B.RTS
+ *
+ * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE)
+ * is shared with DSRS(DTE) at pin 23.
+ *
+ * As you can immediately notice the wiring of the RTS, DTR and DSR signals
+ * is a bit odd. This makes the handling of port B unnecessarily
+ * complicated and prevents the use of some automatic modes of operation.
+ */
+
+#if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/bug.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irqflags.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/system.h>
+
+#include "zs.h"
+
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
+MODULE_DESCRIPTION("DECstation Z85C30 serial driver");
+MODULE_LICENSE("GPL");
+
+
+static char zs_name[] __initdata = "DECstation Z85C30 serial driver version ";
+static char zs_version[] __initdata = "0.10";
+
+/*
+ * It would be nice to dynamically allocate everything that
+ * depends on ZS_NUM_SCCS, so we could support any number of
+ * Z85C30s, but for now...
+ */
+#define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */
+#define ZS_NUM_CHAN 2 /* 2 channels per chip. */
+#define ZS_CHAN_A 0 /* Index of the channel A. */
+#define ZS_CHAN_B 1 /* Index of the channel B. */
+#define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */
+#define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */
+#define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte
+ of the 16-bit IOBUS. */
+#define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */
+
+#define to_zport(uport) container_of(uport, struct zs_port, port)
+
+struct zs_parms {
+ resource_size_t scc[ZS_NUM_SCCS];
+ int irq[ZS_NUM_SCCS];
+};
+
+static struct zs_scc zs_sccs[ZS_NUM_SCCS];
+
+static u8 zs_init_regs[ZS_NUM_REGS] __initdata = {
+ 0, /* write 0 */
+ PAR_SPEC, /* write 1 */
+ 0, /* write 2 */
+ 0, /* write 3 */
+ X16CLK | SB1, /* write 4 */
+ 0, /* write 5 */
+ 0, 0, 0, /* write 6, 7, 8 */
+ MIE | DLC | NV, /* write 9 */
+ NRZ, /* write 10 */
+ TCBR | RCBR, /* write 11 */
+ 0, 0, /* BRG time constant, write 12 + 13 */
+ BRSRC | BRENABL, /* write 14 */
+ 0, /* write 15 */
+};
+
+/*
+ * Debugging.
+ */
+#undef ZS_DEBUG_REGS
+
+
+/*
+ * Reading and writing Z85C30 registers.
+ */
+static void recovery_delay(void)
+{
+ udelay(2);
+}
+
+static u8 read_zsreg(struct zs_port *zport, int reg)
+{
+ void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
+ u8 retval;
+
+ if (reg != 0) {
+ writeb(reg & 0xf, control);
+ fast_iob();
+ recovery_delay();
+ }
+ retval = readb(control);
+ recovery_delay();
+ return retval;
+}
+
+static void write_zsreg(struct zs_port *zport, int reg, u8 value)
+{
+ void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET;
+
+ if (reg != 0) {
+ writeb(reg & 0xf, control);
+ fast_iob(); recovery_delay();
+ }
+ writeb(value, control);
+ fast_iob();
+ recovery_delay();
+ return;
+}
+
+static u8 read_zsdata(struct zs_port *zport)
+{
+ void __iomem *data = zport->port.membase +
+ ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
+ u8 retval;
+
+ retval = readb(data);
+ recovery_delay();
+ return retval;
+}
+
+static void write_zsdata(struct zs_port *zport, u8 value)
+{
+ void __iomem *data = zport->port.membase +
+ ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET;
+
+ writeb(value, data);
+ fast_iob();
+ recovery_delay();
+ return;
+}
+
+#ifdef ZS_DEBUG_REGS
+void zs_dump(void)
+{
+ struct zs_port *zport;
+ int i, j;
+
+ for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
+ zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN];
+
+ if (!zport->scc)
+ continue;
+
+ for (j = 0; j < 16; j++)
+ printk("W%-2d = 0x%02x\t", j, zport->regs[j]);
+ printk("\n");
+ for (j = 0; j < 16; j++)
+ printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j));
+ printk("\n\n");
+ }
+}
+#endif
+
+
+static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq)
+{
+ if (irq)
+ spin_lock_irq(lock);
+ else
+ spin_lock(lock);
+}
+
+static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq)
+{
+ if (irq)
+ spin_unlock_irq(lock);
+ else
+ spin_unlock(lock);
+}
+
+static int zs_receive_drain(struct zs_port *zport)
+{
+ int loops = 10000;
+
+ while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--)
+ read_zsdata(zport);
+ return loops;
+}
+
+static int zs_transmit_drain(struct zs_port *zport, int irq)
+{
+ struct zs_scc *scc = zport->scc;
+ int loops = 10000;
+
+ while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) {
+ zs_spin_unlock_cond_irq(&scc->zlock, irq);
+ udelay(2);
+ zs_spin_lock_cond_irq(&scc->zlock, irq);
+ }
+ return loops;
+}
+
+static int zs_line_drain(struct zs_port *zport, int irq)
+{
+ struct zs_scc *scc = zport->scc;
+ int loops = 10000;
+
+ while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) {
+ zs_spin_unlock_cond_irq(&scc->zlock, irq);
+ udelay(2);
+ zs_spin_lock_cond_irq(&scc->zlock, irq);
+ }
+ return loops;
+}
+
+
+static void load_zsregs(struct zs_port *zport, u8 *regs, int irq)
+{
+ /* Let the current transmission finish. */
+ zs_line_drain(zport, irq);
+ /* Load 'em up. */
+ write_zsreg(zport, R3, regs[3] & ~RxENABLE);
+ write_zsreg(zport, R5, regs[5] & ~TxENAB);
+ write_zsreg(zport, R4, regs[4]);
+ write_zsreg(zport, R9, regs[9]);
+ write_zsreg(zport, R1, regs[1]);
+ write_zsreg(zport, R2, regs[2]);
+ write_zsreg(zport, R10, regs[10]);
+ write_zsreg(zport, R14, regs[14] & ~BRENABL);
+ write_zsreg(zport, R11, regs[11]);
+ write_zsreg(zport, R12, regs[12]);
+ write_zsreg(zport, R13, regs[13]);
+ write_zsreg(zport, R14, regs[14]);
+ write_zsreg(zport, R15, regs[15]);
+ if (regs[3] & RxENABLE)
+ write_zsreg(zport, R3, regs[3]);
+ if (regs[5] & TxENAB)
+ write_zsreg(zport, R5, regs[5]);
+ return;
+}
+
+
+/*
+ * Status handling routines.
+ */
+
+/*
+ * zs_tx_empty() -- get the transmitter empty status
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static unsigned int zs_tx_empty(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ status = read_zsreg(zport, R1);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ return status & ALL_SNT ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a,
+ struct zs_port *zport_b)
+{
+ u8 status_a, status_b;
+ unsigned int mctrl;
+
+ status_a = read_zsreg(zport_a, R0);
+ status_b = read_zsreg(zport_b, R0);
+
+ mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) |
+ ((status_b & DCD) ? TIOCM_CAR : 0) |
+ ((status_a & DCD) ? TIOCM_RNG : 0) |
+ ((status_a & SYNC_HUNT) ? TIOCM_DSR : 0);
+
+ return mctrl;
+}
+
+static unsigned int zs_raw_get_mctrl(struct zs_port *zport)
+{
+ struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
+
+ return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0;
+}
+
+static unsigned int zs_raw_xor_mctrl(struct zs_port *zport)
+{
+ struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A];
+ unsigned int mmask, mctrl, delta;
+ u8 mask_a, mask_b;
+
+ if (zport == zport_a)
+ return 0;
+
+ mask_a = zport_a->regs[15];
+ mask_b = zport->regs[15];
+
+ mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) |
+ ((mask_b & DCDIE) ? TIOCM_CAR : 0) |
+ ((mask_a & DCDIE) ? TIOCM_RNG : 0) |
+ ((mask_a & SYNCIE) ? TIOCM_DSR : 0);
+
+ mctrl = zport->mctrl;
+ if (mmask) {
+ mctrl &= ~mmask;
+ mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask;
+ }
+
+ delta = mctrl ^ zport->mctrl;
+ if (delta)
+ zport->mctrl = mctrl;
+
+ return delta;
+}
+
+static unsigned int zs_get_mctrl(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned int mctrl;
+
+ spin_lock(&scc->zlock);
+ mctrl = zs_raw_get_mctrl(zport);
+ spin_unlock(&scc->zlock);
+
+ return mctrl;
+}
+
+static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+ u8 oldloop, newloop;
+
+ spin_lock(&scc->zlock);
+ if (zport != zport_a) {
+ if (mctrl & TIOCM_DTR)
+ zport_a->regs[5] |= DTR;
+ else
+ zport_a->regs[5] &= ~DTR;
+ if (mctrl & TIOCM_RTS)
+ zport_a->regs[5] |= RTS;
+ else
+ zport_a->regs[5] &= ~RTS;
+ write_zsreg(zport_a, R5, zport_a->regs[5]);
+ }
+
+ /* Rarely modified, so don't poke at hardware unless necessary. */
+ oldloop = zport->regs[14];
+ newloop = oldloop;
+ if (mctrl & TIOCM_LOOP)
+ newloop |= LOOPBAK;
+ else
+ newloop &= ~LOOPBAK;
+ if (newloop != oldloop) {
+ zport->regs[14] = newloop;
+ write_zsreg(zport, R14, zport->regs[14]);
+ }
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_raw_stop_tx(struct zs_port *zport)
+{
+ write_zsreg(zport, R0, RES_Tx_P);
+ zport->tx_stopped = 1;
+}
+
+static void zs_stop_tx(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+
+ spin_lock(&scc->zlock);
+ zs_raw_stop_tx(zport);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_raw_transmit_chars(struct zs_port *);
+
+static void zs_start_tx(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+
+ spin_lock(&scc->zlock);
+ if (zport->tx_stopped) {
+ zs_transmit_drain(zport, 0);
+ zport->tx_stopped = 0;
+ zs_raw_transmit_chars(zport);
+ }
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_stop_rx(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+
+ spin_lock(&scc->zlock);
+ zport->regs[15] &= ~BRKIE;
+ zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB);
+ zport->regs[1] |= RxINT_DISAB;
+
+ if (zport != zport_a) {
+ /* A-side DCD tracks RI and SYNC tracks DSR. */
+ zport_a->regs[15] &= ~(DCDIE | SYNCIE);
+ write_zsreg(zport_a, R15, zport_a->regs[15]);
+ if (!(zport_a->regs[15] & BRKIE)) {
+ zport_a->regs[1] &= ~EXT_INT_ENAB;
+ write_zsreg(zport_a, R1, zport_a->regs[1]);
+ }
+
+ /* This-side DCD tracks DCD and CTS tracks CTS. */
+ zport->regs[15] &= ~(DCDIE | CTSIE);
+ zport->regs[1] &= ~EXT_INT_ENAB;
+ } else {
+ /* DCD tracks RI and SYNC tracks DSR for the B side. */
+ if (!(zport->regs[15] & (DCDIE | SYNCIE)))
+ zport->regs[1] &= ~EXT_INT_ENAB;
+ }
+
+ write_zsreg(zport, R15, zport->regs[15]);
+ write_zsreg(zport, R1, zport->regs[1]);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_enable_ms(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+
+ if (zport == zport_a)
+ return;
+
+ spin_lock(&scc->zlock);
+
+ /* Clear Ext interrupts if not being handled already. */
+ if (!(zport_a->regs[1] & EXT_INT_ENAB))
+ write_zsreg(zport_a, R0, RES_EXT_INT);
+
+ /* A-side DCD tracks RI and SYNC tracks DSR. */
+ zport_a->regs[1] |= EXT_INT_ENAB;
+ zport_a->regs[15] |= DCDIE | SYNCIE;
+
+ /* This-side DCD tracks DCD and CTS tracks CTS. */
+ zport->regs[15] |= DCDIE | CTSIE;
+
+ zs_raw_xor_mctrl(zport);
+
+ write_zsreg(zport_a, R1, zport_a->regs[1]);
+ write_zsreg(zport_a, R15, zport_a->regs[15]);
+ write_zsreg(zport, R15, zport->regs[15]);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_break_ctl(struct uart_port *uport, int break_state)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ if (break_state == -1)
+ zport->regs[5] |= SND_BRK;
+ else
+ zport->regs[5] &= ~SND_BRK;
+ write_zsreg(zport, R5, zport->regs[5]);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+
+/*
+ * Interrupt handling routines.
+ */
+#define Rx_BRK 0x0100 /* BREAK event software flag. */
+#define Rx_SYS 0x0200 /* SysRq event software flag. */
+
+static void zs_receive_chars(struct zs_port *zport)
+{
+ struct uart_port *uport = &zport->port;
+ struct zs_scc *scc = zport->scc;
+ struct uart_icount *icount;
+ unsigned int avail, status, ch, flag;
+ int count;
+
+ for (count = 16; count; count--) {
+ spin_lock(&scc->zlock);
+ avail = read_zsreg(zport, R0) & Rx_CH_AV;
+ spin_unlock(&scc->zlock);
+ if (!avail)
+ break;
+
+ spin_lock(&scc->zlock);
+ status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR);
+ ch = read_zsdata(zport);
+ spin_unlock(&scc->zlock);
+
+ flag = TTY_NORMAL;
+
+ icount = &uport->icount;
+ icount->rx++;
+
+ /* Handle the null char got when BREAK is removed. */
+ if (!ch)
+ status |= zport->tty_break;
+ if (unlikely(status &
+ (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) {
+ zport->tty_break = 0;
+
+ /* Reset the error indication. */
+ if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) {
+ spin_lock(&scc->zlock);
+ write_zsreg(zport, R0, ERR_RES);
+ spin_unlock(&scc->zlock);
+ }
+
+ if (status & (Rx_SYS | Rx_BRK)) {
+ icount->brk++;
+ /* SysRq discards the null char. */
+ if (status & Rx_SYS)
+ continue;
+ } else if (status & FRM_ERR)
+ icount->frame++;
+ else if (status & PAR_ERR)
+ icount->parity++;
+ if (status & Rx_OVR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & Rx_BRK)
+ flag = TTY_BREAK;
+ else if (status & FRM_ERR)
+ flag = TTY_FRAME;
+ else if (status & PAR_ERR)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(uport, ch))
+ continue;
+
+ uart_insert_char(uport, status, Rx_OVR, ch, flag);
+ }
+
+ tty_flip_buffer_push(uport->info->tty);
+}
+
+static void zs_raw_transmit_chars(struct zs_port *zport)
+{
+ struct circ_buf *xmit = &zport->port.info->xmit;
+
+ /* XON/XOFF chars. */
+ if (zport->port.x_char) {
+ write_zsdata(zport, zport->port.x_char);
+ zport->port.icount.tx++;
+ zport->port.x_char = 0;
+ return;
+ }
+
+ /* If nothing to do or stopped or hardware stopped. */
+ if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) {
+ zs_raw_stop_tx(zport);
+ return;
+ }
+
+ /* Send char. */
+ write_zsdata(zport, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ zport->port.icount.tx++;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&zport->port);
+
+ /* Are we are done? */
+ if (uart_circ_empty(xmit))
+ zs_raw_stop_tx(zport);
+}
+
+static void zs_transmit_chars(struct zs_port *zport)
+{
+ struct zs_scc *scc = zport->scc;
+
+ spin_lock(&scc->zlock);
+ zs_raw_transmit_chars(zport);
+ spin_unlock(&scc->zlock);
+}
+
+static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a)
+{
+ struct uart_port *uport = &zport->port;
+ struct zs_scc *scc = zport->scc;
+ unsigned int delta;
+ u8 status, brk;
+
+ spin_lock(&scc->zlock);
+
+ /* Get status from Read Register 0. */
+ status = read_zsreg(zport, R0);
+
+ if (zport->regs[15] & BRKIE) {
+ brk = status & BRK_ABRT;
+ if (brk && !zport->brk) {
+ spin_unlock(&scc->zlock);
+ if (uart_handle_break(uport))
+ zport->tty_break = Rx_SYS;
+ else
+ zport->tty_break = Rx_BRK;
+ spin_lock(&scc->zlock);
+ }
+ zport->brk = brk;
+ }
+
+ if (zport != zport_a) {
+ delta = zs_raw_xor_mctrl(zport);
+ spin_unlock(&scc->zlock);
+
+ if (delta & TIOCM_CTS)
+ uart_handle_cts_change(uport,
+ zport->mctrl & TIOCM_CTS);
+ if (delta & TIOCM_CAR)
+ uart_handle_dcd_change(uport,
+ zport->mctrl & TIOCM_CAR);
+ if (delta & TIOCM_RNG)
+ uport->icount.dsr++;
+ if (delta & TIOCM_DSR)
+ uport->icount.rng++;
+
+ if (delta)
+ wake_up_interruptible(&uport->info->delta_msr_wait);
+
+ spin_lock(&scc->zlock);
+ }
+
+ /* Clear the status condition... */
+ write_zsreg(zport, R0, RES_EXT_INT);
+
+ spin_unlock(&scc->zlock);
+}
+
+/*
+ * This is the Z85C30 driver's generic interrupt routine.
+ */
+static irqreturn_t zs_interrupt(int irq, void *dev_id)
+{
+ struct zs_scc *scc = dev_id;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+ struct zs_port *zport_b = &scc->zport[ZS_CHAN_B];
+ irqreturn_t status = IRQ_NONE;
+ u8 zs_intreg;
+ int count;
+
+ /*
+ * NOTE: The read register 3, which holds the irq status,
+ * does so for both channels on each chip. Although
+ * the status value itself must be read from the A
+ * channel and is only valid when read from channel A.
+ * Yes... broken hardware...
+ */
+ for (count = 16; count; count--) {
+ spin_lock(&scc->zlock);
+ zs_intreg = read_zsreg(zport_a, R3);
+ spin_unlock(&scc->zlock);
+ if (!zs_intreg)
+ break;
+
+ /*
+ * We do not like losing characters, so we prioritise
+ * interrupt sources a little bit differently than
+ * the SCC would, was it allowed to.
+ */
+ if (zs_intreg & CHBRxIP)
+ zs_receive_chars(zport_b);
+ if (zs_intreg & CHARxIP)
+ zs_receive_chars(zport_a);
+ if (zs_intreg & CHBEXT)
+ zs_status_handle(zport_b, zport_a);
+ if (zs_intreg & CHAEXT)
+ zs_status_handle(zport_a, zport_a);
+ if (zs_intreg & CHBTxIP)
+ zs_transmit_chars(zport_b);
+ if (zs_intreg & CHATxIP)
+ zs_transmit_chars(zport_a);
+
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+
+
+/*
+ * Finally, routines used to initialize the serial port.
+ */
+static int zs_startup(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ int irq_guard;
+ int ret;
+
+ irq_guard = atomic_add_return(1, &scc->irq_guard);
+ if (irq_guard == 1) {
+ ret = request_irq(zport->port.irq, zs_interrupt,
+ IRQF_SHARED, "scc", scc);
+ if (ret) {
+ atomic_add(-1, &scc->irq_guard);
+ printk(KERN_ERR "zs: can't get irq %d\n",
+ zport->port.irq);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&scc->zlock, flags);
+
+ /* Clear the receive FIFO. */
+ zs_receive_drain(zport);
+
+ /* Clear the interrupt registers. */
+ write_zsreg(zport, R0, ERR_RES);
+ write_zsreg(zport, R0, RES_Tx_P);
+ /* But Ext only if not being handled already. */
+ if (!(zport->regs[1] & EXT_INT_ENAB))
+ write_zsreg(zport, R0, RES_EXT_INT);
+
+ /* Finally, enable sequencing and interrupts. */
+ zport->regs[1] &= ~RxINT_MASK;
+ zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB;
+ zport->regs[3] |= RxENABLE;
+ zport->regs[5] |= TxENAB;
+ zport->regs[15] |= BRKIE;
+ write_zsreg(zport, R1, zport->regs[1]);
+ write_zsreg(zport, R3, zport->regs[3]);
+ write_zsreg(zport, R5, zport->regs[5]);
+ write_zsreg(zport, R15, zport->regs[15]);
+
+ /* Record the current state of RR0. */
+ zport->mctrl = zs_raw_get_mctrl(zport);
+ zport->brk = read_zsreg(zport, R0) & BRK_ABRT;
+
+ zport->tx_stopped = 1;
+
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ return 0;
+}
+
+static void zs_shutdown(struct uart_port *uport)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ int irq_guard;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+
+ zport->regs[5] &= ~TxENAB;
+ zport->regs[3] &= ~RxENABLE;
+ write_zsreg(zport, R5, zport->regs[5]);
+ write_zsreg(zport, R3, zport->regs[3]);
+
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ irq_guard = atomic_add_return(-1, &scc->irq_guard);
+ if (!irq_guard)
+ free_irq(zport->port.irq, scc);
+}
+
+
+static void zs_reset(struct zs_port *zport)
+{
+ struct zs_scc *scc = zport->scc;
+ int irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+ if (!scc->initialised) {
+ /* Reset the pointer first, just in case... */
+ read_zsreg(zport, R0);
+ /* And let the current transmission finish. */
+ zs_line_drain(zport, irq);
+ write_zsreg(zport, R9, FHWRES);
+ udelay(10);
+ write_zsreg(zport, R9, 0);
+ scc->initialised = 1;
+ }
+ load_zsregs(zport, zport->regs, irq);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
+ struct ktermios *old_termios)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ struct zs_port *zport_a = &scc->zport[ZS_CHAN_A];
+ int irq;
+ unsigned int baud, brg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+
+ /* Byte size. */
+ zport->regs[3] &= ~RxNBITS_MASK;
+ zport->regs[5] &= ~TxNBITS_MASK;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ zport->regs[3] |= Rx5;
+ zport->regs[5] |= Tx5;
+ break;
+ case CS6:
+ zport->regs[3] |= Rx6;
+ zport->regs[5] |= Tx6;
+ break;
+ case CS7:
+ zport->regs[3] |= Rx7;
+ zport->regs[5] |= Tx7;
+ break;
+ case CS8:
+ default:
+ zport->regs[3] |= Rx8;
+ zport->regs[5] |= Tx8;
+ break;
+ }
+
+ /* Parity and stop bits. */
+ zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN);
+ if (termios->c_cflag & CSTOPB)
+ zport->regs[4] |= SB2;
+ else
+ zport->regs[4] |= SB1;
+ if (termios->c_cflag & PARENB)
+ zport->regs[4] |= PAR_ENA;
+ if (!(termios->c_cflag & PARODD))
+ zport->regs[4] |= PAR_EVEN;
+ switch (zport->clk_mode) {
+ case 64:
+ zport->regs[4] |= X64CLK;
+ break;
+ case 32:
+ zport->regs[4] |= X32CLK;
+ break;
+ case 16:
+ zport->regs[4] |= X16CLK;
+ break;
+ case 1:
+ zport->regs[4] |= X1CLK;
+ break;
+ default:
+ BUG();
+ }
+
+ baud = uart_get_baud_rate(uport, termios, old_termios, 0,
+ uport->uartclk / zport->clk_mode / 4);
+
+ brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode);
+ zport->regs[12] = brg & 0xff;
+ zport->regs[13] = (brg >> 8) & 0xff;
+
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ uport->read_status_mask = Rx_OVR;
+ if (termios->c_iflag & INPCK)
+ uport->read_status_mask |= FRM_ERR | PAR_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ uport->read_status_mask |= Rx_BRK;
+
+ uport->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= FRM_ERR | PAR_ERR;
+ if (termios->c_iflag & IGNBRK) {
+ uport->ignore_status_mask |= Rx_BRK;
+ if (termios->c_iflag & IGNPAR)
+ uport->ignore_status_mask |= Rx_OVR;
+ }
+
+ if (termios->c_cflag & CREAD)
+ zport->regs[3] |= RxENABLE;
+ else
+ zport->regs[3] &= ~RxENABLE;
+
+ if (zport != zport_a) {
+ if (!(termios->c_cflag & CLOCAL)) {
+ zport->regs[15] |= DCDIE;
+ } else
+ zport->regs[15] &= ~DCDIE;
+ if (termios->c_cflag & CRTSCTS) {
+ zport->regs[15] |= CTSIE;
+ } else
+ zport->regs[15] &= ~CTSIE;
+ zs_raw_xor_mctrl(zport);
+ }
+
+ /* Load up the new values. */
+ load_zsregs(zport, zport->regs, irq);
+
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+
+static const char *zs_type(struct uart_port *uport)
+{
+ return "Z85C30 SCC";
+}
+
+static void zs_release_port(struct uart_port *uport)
+{
+ iounmap(uport->membase);
+ uport->membase = 0;
+ release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
+}
+
+static int zs_map_port(struct uart_port *uport)
+{
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ ZS_CHAN_IO_SIZE);
+ if (!uport->membase) {
+ printk(KERN_ERR "zs: Cannot map MMIO\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int zs_request_port(struct uart_port *uport)
+{
+ int ret;
+
+ if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) {
+ printk(KERN_ERR "zs: Unable to reserve MMIO resource\n");
+ return -EBUSY;
+ }
+ ret = zs_map_port(uport);
+ if (ret) {
+ release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE);
+ return ret;
+ }
+ return 0;
+}
+
+static void zs_config_port(struct uart_port *uport, int flags)
+{
+ struct zs_port *zport = to_zport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (zs_request_port(uport))
+ return;
+
+ uport->type = PORT_ZS;
+
+ zs_reset(zport);
+ }
+}
+
+static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser)
+{
+ struct zs_port *zport = to_zport(uport);
+ int ret = 0;
+
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS)
+ ret = -EINVAL;
+ if (ser->irq != uport->irq)
+ ret = -EINVAL;
+ if (ser->baud_base != uport->uartclk / zport->clk_mode / 4)
+ ret = -EINVAL;
+ return ret;
+}
+
+
+static struct uart_ops zs_ops = {
+ .tx_empty = zs_tx_empty,
+ .set_mctrl = zs_set_mctrl,
+ .get_mctrl = zs_get_mctrl,
+ .stop_tx = zs_stop_tx,
+ .start_tx = zs_start_tx,
+ .stop_rx = zs_stop_rx,
+ .enable_ms = zs_enable_ms,
+ .break_ctl = zs_break_ctl,
+ .startup = zs_startup,
+ .shutdown = zs_shutdown,
+ .set_termios = zs_set_termios,
+ .type = zs_type,
+ .release_port = zs_release_port,
+ .request_port = zs_request_port,
+ .config_port = zs_config_port,
+ .verify_port = zs_verify_port,
+};
+
+/*
+ * Initialize Z85C30 port structures.
+ */
+static int __init zs_probe_sccs(void)
+{
+ static int probed;
+ struct zs_parms zs_parms;
+ int chip, side, irq;
+ int n_chips = 0;
+ int i;
+
+ if (probed)
+ return 0;
+
+ irq = dec_interrupt[DEC_IRQ_SCC0];
+ if (irq >= 0) {
+ zs_parms.scc[n_chips] = IOASIC_SCC0;
+ zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0];
+ n_chips++;
+ }
+ irq = dec_interrupt[DEC_IRQ_SCC1];
+ if (irq >= 0) {
+ zs_parms.scc[n_chips] = IOASIC_SCC1;
+ zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1];
+ n_chips++;
+ }
+ if (!n_chips)
+ return -ENXIO;
+
+ probed = 1;
+
+ for (chip = 0; chip < n_chips; chip++) {
+ spin_lock_init(&zs_sccs[chip].zlock);
+ for (side = 0; side < ZS_NUM_CHAN; side++) {
+ struct zs_port *zport = &zs_sccs[chip].zport[side];
+ struct uart_port *uport = &zport->port;
+
+ zport->scc = &zs_sccs[chip];
+ zport->clk_mode = 16;
+
+ uport->irq = zs_parms.irq[chip];
+ uport->uartclk = ZS_CLOCK;
+ uport->fifosize = 1;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &zs_ops;
+ uport->line = chip * ZS_NUM_CHAN + side;
+ uport->mapbase = dec_kn_slot_base +
+ zs_parms.scc[chip] +
+ (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE;
+
+ for (i = 0; i < ZS_NUM_REGS; i++)
+ zport->regs[i] = zs_init_regs[i];
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_SERIAL_ZS_CONSOLE
+static void zs_console_putchar(struct uart_port *uport, int ch)
+{
+ struct zs_port *zport = to_zport(uport);
+ struct zs_scc *scc = zport->scc;
+ int irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+ if (zs_transmit_drain(zport, irq))
+ write_zsdata(zport, ch);
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+static void zs_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
+ struct zs_port *zport = &zs_sccs[chip].zport[side];
+ struct zs_scc *scc = zport->scc;
+ unsigned long flags;
+ u8 txint, txenb;
+ int irq;
+
+ /* Disable transmit interrupts and enable the transmitter. */
+ spin_lock_irqsave(&scc->zlock, flags);
+ txint = zport->regs[1];
+ txenb = zport->regs[5];
+ if (txint & TxINT_ENAB) {
+ zport->regs[1] = txint & ~TxINT_ENAB;
+ write_zsreg(zport, R1, zport->regs[1]);
+ }
+ if (!(txenb & TxENAB)) {
+ zport->regs[5] = txenb | TxENAB;
+ write_zsreg(zport, R5, zport->regs[5]);
+ }
+ spin_unlock_irqrestore(&scc->zlock, flags);
+
+ uart_console_write(&zport->port, s, count, zs_console_putchar);
+
+ /* Restore transmit interrupts and the transmitter enable. */
+ spin_lock_irqsave(&scc->zlock, flags);
+ irq = !irqs_disabled_flags(flags);
+ zs_line_drain(zport, irq);
+ if (!(txenb & TxENAB)) {
+ zport->regs[5] &= ~TxENAB;
+ write_zsreg(zport, R5, zport->regs[5]);
+ }
+ if (txint & TxINT_ENAB) {
+ zport->regs[1] |= TxINT_ENAB;
+ write_zsreg(zport, R1, zport->regs[1]);
+ }
+ spin_unlock_irqrestore(&scc->zlock, flags);
+}
+
+/*
+ * Setup serial console baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first uart_open()
+ * - initialise the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init zs_console_setup(struct console *co, char *options)
+{
+ int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN;
+ struct zs_port *zport = &zs_sccs[chip].zport[side];
+ struct uart_port *uport = &zport->port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret;
+
+ ret = zs_map_port(uport);
+ if (ret)
+ return ret;
+
+ zs_reset(zport);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ return uart_set_options(uport, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver zs_reg;
+static struct console zs_console = {
+ .name = "ttyS",
+ .write = zs_console_write,
+ .device = uart_console_device,
+ .setup = zs_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &zs_reg,
+};
+
+/*
+ * Register console.
+ */
+static int __init zs_serial_console_init(void)
+{
+ int ret;
+
+ ret = zs_probe_sccs();
+ if (ret)
+ return ret;
+ register_console(&zs_console);
+
+ return 0;
+}
+
+console_initcall(zs_serial_console_init);
+
+#define SERIAL_ZS_CONSOLE &zs_console
+#else
+#define SERIAL_ZS_CONSOLE NULL
+#endif /* CONFIG_SERIAL_ZS_CONSOLE */
+
+static struct uart_driver zs_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "serial",
+ .dev_name = "ttyS",
+ .major = TTY_MAJOR,
+ .minor = 64,
+ .nr = ZS_NUM_SCCS * ZS_NUM_CHAN,
+ .cons = SERIAL_ZS_CONSOLE,
+};
+
+/* zs_init inits the driver. */
+static int __init zs_init(void)
+{
+ int i, ret;
+
+ pr_info("%s%s\n", zs_name, zs_version);
+
+ /* Find out how many Z85C30 SCCs we have. */
+ ret = zs_probe_sccs();
+ if (ret)
+ return ret;
+
+ ret = uart_register_driver(&zs_reg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) {
+ struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
+ struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
+ struct uart_port *uport = &zport->port;
+
+ if (zport->scc)
+ uart_add_one_port(&zs_reg, uport);
+ }
+
+ return 0;
+}
+
+static void __exit zs_exit(void)
+{
+ int i;
+
+ for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) {
+ struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN];
+ struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN];
+ struct uart_port *uport = &zport->port;
+
+ if (zport->scc)
+ uart_remove_one_port(&zs_reg, uport);
+ }
+
+ uart_unregister_driver(&zs_reg);
+}
+
+module_init(zs_init);
+module_exit(zs_exit);
diff --git a/drivers/serial/zs.h b/drivers/serial/zs.h
new file mode 100644
index 000000000000..aa921b57d827
--- /dev/null
+++ b/drivers/serial/zs.h
@@ -0,0 +1,284 @@
+/*
+ * zs.h: Definitions for the DECstation Z85C30 serial driver.
+ *
+ * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras.
+ * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen.
+ *
+ * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 2004, 2005, 2007 Maciej W. Rozycki
+ */
+#ifndef _SERIAL_ZS_H
+#define _SERIAL_ZS_H
+
+#ifdef __KERNEL__
+
+#define ZS_NUM_REGS 16
+
+/*
+ * This is our internal structure for each serial port's state.
+ */
+struct zs_port {
+ struct zs_scc *scc; /* Containing SCC. */
+ struct uart_port port; /* Underlying UART. */
+
+ int clk_mode; /* May be 1, 16, 32, or 64. */
+
+ unsigned int tty_break; /* Set on BREAK condition. */
+ int tx_stopped; /* Output is suspended. */
+
+ unsigned int mctrl; /* State of modem lines. */
+ u8 brk; /* BREAK state from RR0. */
+
+ u8 regs[ZS_NUM_REGS]; /* Channel write registers. */
+};
+
+/*
+ * Per-SCC state for locking and the interrupt handler.
+ */
+struct zs_scc {
+ struct zs_port zport[2];
+ spinlock_t zlock;
+ atomic_t irq_guard;
+ int initialised;
+};
+
+#endif /* __KERNEL__ */
+
+/*
+ * Conversion routines to/from brg time constants from/to bits per second.
+ */
+#define ZS_BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
+#define ZS_BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
+
+/*
+ * The Zilog register set.
+ */
+
+/* Write Register 0 (Command) */
+#define R0 0 /* Register selects */
+#define R1 1
+#define R2 2
+#define R3 3
+#define R4 4
+#define R5 5
+#define R6 6
+#define R7 7
+#define R8 8
+#define R9 9
+#define R10 10
+#define R11 11
+#define R12 12
+#define R13 13
+#define R14 14
+#define R15 15
+
+#define NULLCODE 0 /* Null Code */
+#define POINT_HIGH 0x8 /* Select upper half of registers */
+#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
+#define SEND_ABORT 0x18 /* HDLC Abort */
+#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
+#define RES_Tx_P 0x28 /* Reset TxINT Pending */
+#define ERR_RES 0x30 /* Error Reset */
+#define RES_H_IUS 0x38 /* Reset highest IUS */
+
+#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
+#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
+#define RES_EOM_L 0xC0 /* Reset EOM latch */
+
+/* Write Register 1 (Tx/Rx/Ext Int Enable and WAIT/DMA Commands) */
+#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
+#define TxINT_ENAB 0x2 /* Tx Int Enable */
+#define PAR_SPEC 0x4 /* Parity is special condition */
+
+#define RxINT_DISAB 0 /* Rx Int Disable */
+#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
+#define RxINT_ALL 0x10 /* Int on all Rx Characters or error */
+#define RxINT_ERR 0x18 /* Int on error only */
+#define RxINT_MASK 0x18
+
+#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
+#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
+#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
+
+/* Write Register 2 (Interrupt Vector) */
+
+/* Write Register 3 (Receive Parameters and Control) */
+#define RxENABLE 0x1 /* Rx Enable */
+#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
+#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
+#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
+#define ENT_HM 0x10 /* Enter Hunt Mode */
+#define AUTO_ENAB 0x20 /* Auto Enables */
+#define Rx5 0x0 /* Rx 5 Bits/Character */
+#define Rx7 0x40 /* Rx 7 Bits/Character */
+#define Rx6 0x80 /* Rx 6 Bits/Character */
+#define Rx8 0xc0 /* Rx 8 Bits/Character */
+#define RxNBITS_MASK 0xc0
+
+/* Write Register 4 (Transmit/Receive Miscellaneous Parameters and Modes) */
+#define PAR_ENA 0x1 /* Parity Enable */
+#define PAR_EVEN 0x2 /* Parity Even/Odd* */
+
+#define SYNC_ENAB 0 /* Sync Modes Enable */
+#define SB1 0x4 /* 1 stop bit/char */
+#define SB15 0x8 /* 1.5 stop bits/char */
+#define SB2 0xc /* 2 stop bits/char */
+#define SB_MASK 0xc
+
+#define MONSYNC 0 /* 8 Bit Sync character */
+#define BISYNC 0x10 /* 16 bit sync character */
+#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
+#define EXTSYNC 0x30 /* External Sync Mode */
+
+#define X1CLK 0x0 /* x1 clock mode */
+#define X16CLK 0x40 /* x16 clock mode */
+#define X32CLK 0x80 /* x32 clock mode */
+#define X64CLK 0xc0 /* x64 clock mode */
+#define XCLK_MASK 0xc0
+
+/* Write Register 5 (Transmit Parameters and Controls) */
+#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
+#define RTS 0x2 /* RTS */
+#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
+#define TxENAB 0x8 /* Tx Enable */
+#define SND_BRK 0x10 /* Send Break */
+#define Tx5 0x0 /* Tx 5 bits (or less)/character */
+#define Tx7 0x20 /* Tx 7 bits/character */
+#define Tx6 0x40 /* Tx 6 bits/character */
+#define Tx8 0x60 /* Tx 8 bits/character */
+#define TxNBITS_MASK 0x60
+#define DTR 0x80 /* DTR */
+
+/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
+
+/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
+
+/* Write Register 8 (Transmit Buffer) */
+
+/* Write Register 9 (Master Interrupt Control) */
+#define VIS 1 /* Vector Includes Status */
+#define NV 2 /* No Vector */
+#define DLC 4 /* Disable Lower Chain */
+#define MIE 8 /* Master Interrupt Enable */
+#define STATHI 0x10 /* Status high */
+#define SOFTACK 0x20 /* Software Interrupt Acknowledge */
+#define NORESET 0 /* No reset on write to R9 */
+#define CHRB 0x40 /* Reset channel B */
+#define CHRA 0x80 /* Reset channel A */
+#define FHWRES 0xc0 /* Force hardware reset */
+
+/* Write Register 10 (Miscellaneous Transmitter/Receiver Control Bits) */
+#define BIT6 1 /* 6 bit/8bit sync */
+#define LOOPMODE 2 /* SDLC Loop mode */
+#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
+#define MARKIDLE 8 /* Mark/flag on idle */
+#define GAOP 0x10 /* Go active on poll */
+#define NRZ 0 /* NRZ mode */
+#define NRZI 0x20 /* NRZI mode */
+#define FM1 0x40 /* FM1 (transition = 1) */
+#define FM0 0x60 /* FM0 (transition = 0) */
+#define CRCPS 0x80 /* CRC Preset I/O */
+
+/* Write Register 11 (Clock Mode Control) */
+#define TRxCXT 0 /* TRxC = Xtal output */
+#define TRxCTC 1 /* TRxC = Transmit clock */
+#define TRxCBR 2 /* TRxC = BR Generator Output */
+#define TRxCDP 3 /* TRxC = DPLL output */
+#define TRxCOI 4 /* TRxC O/I */
+#define TCRTxCP 0 /* Transmit clock = RTxC pin */
+#define TCTRxCP 8 /* Transmit clock = TRxC pin */
+#define TCBR 0x10 /* Transmit clock = BR Generator output */
+#define TCDPLL 0x18 /* Transmit clock = DPLL output */
+#define RCRTxCP 0 /* Receive clock = RTxC pin */
+#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
+#define RCBR 0x40 /* Receive clock = BR Generator output */
+#define RCDPLL 0x60 /* Receive clock = DPLL output */
+#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
+
+/* Write Register 12 (Lower Byte of Baud Rate Generator Time Constant) */
+
+/* Write Register 13 (Upper Byte of Baud Rate Generator Time Constant) */
+
+/* Write Register 14 (Miscellaneous Control Bits) */
+#define BRENABL 1 /* Baud rate generator enable */
+#define BRSRC 2 /* Baud rate generator source */
+#define DTRREQ 4 /* DTR/Request function */
+#define AUTOECHO 8 /* Auto Echo */
+#define LOOPBAK 0x10 /* Local loopback */
+#define SEARCH 0x20 /* Enter search mode */
+#define RMC 0x40 /* Reset missing clock */
+#define DISDPLL 0x60 /* Disable DPLL */
+#define SSBR 0x80 /* Set DPLL source = BR generator */
+#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
+#define SFMM 0xc0 /* Set FM mode */
+#define SNRZI 0xe0 /* Set NRZI mode */
+
+/* Write Register 15 (External/Status Interrupt Control) */
+#define WR7P_EN 1 /* WR7 Prime SDLC Feature Enable */
+#define ZCIE 2 /* Zero count IE */
+#define DCDIE 8 /* DCD IE */
+#define SYNCIE 0x10 /* Sync/hunt IE */
+#define CTSIE 0x20 /* CTS IE */
+#define TxUIE 0x40 /* Tx Underrun/EOM IE */
+#define BRKIE 0x80 /* Break/Abort IE */
+
+
+/* Read Register 0 (Transmit/Receive Buffer Status and External Status) */
+#define Rx_CH_AV 0x1 /* Rx Character Available */
+#define ZCOUNT 0x2 /* Zero count */
+#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
+#define DCD 0x8 /* DCD */
+#define SYNC_HUNT 0x10 /* Sync/hunt */
+#define CTS 0x20 /* CTS */
+#define TxEOM 0x40 /* Tx underrun */
+#define BRK_ABRT 0x80 /* Break/Abort */
+
+/* Read Register 1 (Special Receive Condition Status) */
+#define ALL_SNT 0x1 /* All sent */
+/* Residue Data for 8 Rx bits/char programmed */
+#define RES3 0x8 /* 0/3 */
+#define RES4 0x4 /* 0/4 */
+#define RES5 0xc /* 0/5 */
+#define RES6 0x2 /* 0/6 */
+#define RES7 0xa /* 0/7 */
+#define RES8 0x6 /* 0/8 */
+#define RES18 0xe /* 1/8 */
+#define RES28 0x0 /* 2/8 */
+/* Special Rx Condition Interrupts */
+#define PAR_ERR 0x10 /* Parity Error */
+#define Rx_OVR 0x20 /* Rx Overrun Error */
+#define FRM_ERR 0x40 /* CRC/Framing Error */
+#define END_FR 0x80 /* End of Frame (SDLC) */
+
+/* Read Register 2 (Interrupt Vector (WR2) -- channel A). */
+
+/* Read Register 2 (Modified Interrupt Vector -- channel B). */
+
+/* Read Register 3 (Interrupt Pending Bits -- channel A only). */
+#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
+#define CHBTxIP 0x2 /* Channel B Tx IP */
+#define CHBRxIP 0x4 /* Channel B Rx IP */
+#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
+#define CHATxIP 0x10 /* Channel A Tx IP */
+#define CHARxIP 0x20 /* Channel A Rx IP */
+
+/* Read Register 6 (SDLC FIFO Status and Byte Count LSB) */
+
+/* Read Register 7 (SDLC FIFO Status and Byte Count MSB) */
+
+/* Read Register 8 (Receive Data) */
+
+/* Read Register 10 (Miscellaneous Status Bits) */
+#define ONLOOP 2 /* On loop */
+#define LOOPSEND 0x10 /* Loop sending */
+#define CLK2MIS 0x40 /* Two clocks missing */
+#define CLK1MIS 0x80 /* One clock missing */
+
+/* Read Register 12 (Lower Byte of Baud Rate Generator Constant (WR12)) */
+
+/* Read Register 13 (Upper Byte of Baud Rate Generator Constant (WR13) */
+
+/* Read Register 15 (External/Status Interrupt Control (WR15)) */
+
+#endif /* _SERIAL_ZS_H */
diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c
index 94b229031198..7d873b3b0513 100644
--- a/drivers/sh/superhyway/superhyway.c
+++ b/drivers/sh/superhyway/superhyway.c
@@ -56,11 +56,10 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev,
struct superhyway_device *dev = sdev;
if (!dev) {
- dev = kmalloc(sizeof(struct superhyway_device), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct superhyway_device), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- memset(dev, 0, sizeof(struct superhyway_device));
}
dev->bus = bus;
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 2dd6eed50aa0..29fcd6d0301d 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -629,7 +629,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
#endif
/* Set up per-IOC3 data */
- idd = kmalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL);
+ idd = kzalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL);
if (!idd) {
printk(KERN_WARNING
"%s: Failed to allocate IOC3 data for pci_dev %s.\n",
@@ -637,7 +637,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
ret = -ENODEV;
goto out_idd;
}
- memset(idd, 0, sizeof(struct ioc3_driver_data));
spin_lock_init(&idd->ir_lock);
spin_lock_init(&idd->gpio_lock);
idd->pdev = pdev;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5e3f748f2693..b91571122daa 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -107,6 +107,15 @@ config SPI_IMX
This enables using the Freescale iMX SPI controller in master
mode.
+config SPI_LM70_LLP
+ tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
+ depends on SPI_MASTER && PARPORT && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ This driver supports the NS LM70 LLP Evaluation Board,
+ which interfaces to an LM70 temperature sensor using
+ a parallel port.
+
config SPI_MPC52xx_PSC
tristate "Freescale MPC52xx PSC SPI controller"
depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL
@@ -133,6 +142,12 @@ config SPI_OMAP_UWIRE
help
This hooks up to the MicroWire controller on OMAP1 chips.
+config SPI_OMAP24XX
+ tristate "McSPI driver for OMAP24xx"
+ depends on SPI_MASTER && ARCH_OMAP24XX
+ help
+ SPI master controller for OMAP24xx Multichannel SPI
+ (McSPI) modules.
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
@@ -145,17 +160,36 @@ config SPI_PXA2XX
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+ select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs
config SPI_S3C24XX_GPIO
tristate "Samsung S3C24XX series SPI by GPIO"
- depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL
+ depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
+ select SPI_BITBANG
help
SPI driver for Samsung S3C24XX series ARM SoCs using
GPIO lines to provide the SPI bus. This can be used where
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+
+config SPI_TXX9
+ tristate "Toshiba TXx9 SPI controller"
+ depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
+ help
+ SPI driver for Toshiba TXx9 MIPS SoCs
+
+config SPI_XILINX
+ tristate "Xilinx SPI controller"
+ depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ This exposes the SPI controller IP from the Xilinx EDK.
+
+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
+ Product Specification document (DS464) for hardware details.
+
#
# Add new SPI master controllers in alphabetical order above this line
#
@@ -187,6 +221,15 @@ config SPI_SPIDEV
Note that this application programming interface is EXPERIMENTAL
and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
+config SPI_TLE62X0
+ tristate "Infineon TLE62X0 (for power switching)"
+ depends on SPI_MASTER && SYSFS
+ help
+ SPI driver for Infineon TLE62X0 series line driver chips,
+ such as the TLE6220, TLE6230 and TLE6240. This provides a
+ sysfs interface, with each line presented as a kind of GPIO
+ exposing both switch control and diagnostic feedback.
+
#
# Add new SPI protocol masters in alphabetical order above this line
#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5788d867de84..41fbac45c323 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,17 +17,22 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
+obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
+obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
+obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
+obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
obj-$(CONFIG_SPI_AT25) += at25.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
+obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
# ... add above this line ...
# SPI slave controller drivers (upstream link)
diff --git a/drivers/spi/at25.c b/drivers/spi/at25.c
index 8efa07e8b8c2..e007833cca59 100644
--- a/drivers/spi/at25.c
+++ b/drivers/spi/at25.c
@@ -111,7 +111,8 @@ at25_ee_read(
}
static ssize_t
-at25_bin_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev;
struct at25_data *at25;
@@ -236,7 +237,8 @@ at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count)
}
static ssize_t
-at25_bin_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+at25_bin_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev;
struct at25_data *at25;
@@ -314,7 +316,6 @@ static int at25_probe(struct spi_device *spi)
*/
at25->bin.attr.name = "eeprom";
at25->bin.attr.mode = S_IRUSR;
- at25->bin.attr.owner = THIS_MODULE;
at25->bin.read = at25_bin_read;
at25->bin.size = at25->chip.byte_len;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 8b2601de3630..ad144054da30 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -46,6 +46,7 @@ struct atmel_spi {
struct clk *clk;
struct platform_device *pdev;
unsigned new_1:1;
+ struct spi_device *stay;
u8 stopping;
struct list_head queue;
@@ -62,29 +63,62 @@ struct atmel_spi {
/*
* Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
* they assume that spi slave device state will not change on deselect, so
- * that automagic deselection is OK. Not so! Workaround uses nCSx pins
- * as GPIOs; or newer controllers have CSAAT and friends.
+ * that automagic deselection is OK. ("NPCSx rises if no data is to be
+ * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
+ * controllers have CSAAT and friends.
*
- * Since the CSAAT functionality is a bit weird on newer controllers
- * as well, we use GPIO to control nCSx pins on all controllers.
+ * Since the CSAAT functionality is a bit weird on newer controllers as
+ * well, we use GPIO to control nCSx pins on all controllers, updating
+ * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
+ * support active-high chipselects despite the controller's belief that
+ * only active-low devices/systems exists.
+ *
+ * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
+ * right when driven with GPIO. ("Mode Fault does not allow more than one
+ * Master on Chip Select 0.") No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
*/
-static inline void cs_activate(struct spi_device *spi)
+static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
unsigned gpio = (unsigned) spi->controller_data;
unsigned active = spi->mode & SPI_CS_HIGH;
+ u32 mr;
+
+ mr = spi_readl(as, MR);
+ mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
- dev_dbg(&spi->dev, "activate %u%s\n", gpio, active ? " (high)" : "");
- gpio_set_value(gpio, active);
+ dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
+ gpio, active ? " (high)" : "",
+ mr);
+
+ if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
+ gpio_set_value(gpio, active);
+ spi_writel(as, MR, mr);
}
-static inline void cs_deactivate(struct spi_device *spi)
+static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
unsigned gpio = (unsigned) spi->controller_data;
unsigned active = spi->mode & SPI_CS_HIGH;
+ u32 mr;
- dev_dbg(&spi->dev, "DEactivate %u%s\n", gpio, active ? " (low)" : "");
- gpio_set_value(gpio, !active);
+ /* only deactivate *this* device; sometimes transfers to
+ * another device may be active when this routine is called.
+ */
+ mr = spi_readl(as, MR);
+ if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
+ mr = SPI_BFINS(PCS, 0xf, mr);
+ spi_writel(as, MR, mr);
+ }
+
+ dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
+ gpio, active ? " (low)" : "",
+ mr);
+
+ if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
+ gpio_set_value(gpio, !active);
}
/*
@@ -140,6 +174,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
/* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
* mechanism might help avoid the IRQ latency between transfers
+ * (and improve the nCS0 errata handling on at91rm9200 chips)
*
* We're also waiting for ENDRX before we start the next
* transfer because we need to handle some difficult timing
@@ -169,33 +204,62 @@ static void atmel_spi_next_message(struct spi_master *master)
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct spi_message *msg;
- u32 mr;
+ struct spi_device *spi;
BUG_ON(as->current_transfer);
msg = list_entry(as->queue.next, struct spi_message, queue);
+ spi = msg->spi;
- /* Select the chip */
- mr = spi_readl(as, MR);
- mr = SPI_BFINS(PCS, ~(1 << msg->spi->chip_select), mr);
- spi_writel(as, MR, mr);
- cs_activate(msg->spi);
+ dev_dbg(master->cdev.dev, "start message %p for %s\n",
+ msg, spi->dev.bus_id);
+
+ /* select chip if it's not still active */
+ if (as->stay) {
+ if (as->stay != spi) {
+ cs_deactivate(as, as->stay);
+ cs_activate(as, spi);
+ }
+ as->stay = NULL;
+ } else
+ cs_activate(as, spi);
atmel_spi_next_xfer(master, msg);
}
-static void
+/*
+ * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
+ * - The buffer is either valid for CPU access, else NULL
+ * - If the buffer is valid, so is its DMA addresss
+ *
+ * This driver manages the dma addresss unless message->is_dma_mapped.
+ */
+static int
atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
{
+ struct device *dev = &as->pdev->dev;
+
xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
- if (xfer->tx_buf)
- xfer->tx_dma = dma_map_single(&as->pdev->dev,
+ if (xfer->tx_buf) {
+ xfer->tx_dma = dma_map_single(dev,
(void *) xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
- if (xfer->rx_buf)
- xfer->rx_dma = dma_map_single(&as->pdev->dev,
+ if (dma_mapping_error(xfer->tx_dma))
+ return -ENOMEM;
+ }
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(dev,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(xfer->tx_dma)) {
+ if (xfer->tx_buf)
+ dma_unmap_single(dev,
+ xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ }
+ return 0;
}
static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
@@ -211,9 +275,13 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
static void
atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
- struct spi_message *msg, int status)
+ struct spi_message *msg, int status, int stay)
{
- cs_deactivate(msg->spi);
+ if (!stay || status < 0)
+ cs_deactivate(as, msg->spi);
+ else
+ as->stay = msg->spi;
+
list_del(&msg->queue);
msg->status = status;
@@ -303,7 +371,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
- atmel_spi_msg_done(master, as, msg, -EIO);
+ atmel_spi_msg_done(master, as, msg, -EIO, 0);
} else if (pending & SPI_BIT(ENDRX)) {
ret = IRQ_HANDLED;
@@ -321,12 +389,13 @@ atmel_spi_interrupt(int irq, void *dev_id)
if (msg->transfers.prev == &xfer->transfer_list) {
/* report completed message */
- atmel_spi_msg_done(master, as, msg, 0);
+ atmel_spi_msg_done(master, as, msg, 0,
+ xfer->cs_change);
} else {
if (xfer->cs_change) {
- cs_deactivate(msg->spi);
+ cs_deactivate(as, msg->spi);
udelay(1);
- cs_activate(msg->spi);
+ cs_activate(as, msg->spi);
}
/*
@@ -350,6 +419,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
return ret;
}
+/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
static int atmel_spi_setup(struct spi_device *spi)
@@ -388,6 +458,14 @@ static int atmel_spi_setup(struct spi_device *spi)
return -EINVAL;
}
+ /* see notes above re chipselect */
+ if (cpu_is_at91rm9200()
+ && spi->chip_select == 0
+ && (spi->mode & SPI_CS_HIGH)) {
+ dev_dbg(&spi->dev, "setup: can't be active-high\n");
+ return -EINVAL;
+ }
+
/* speed zero convention is used by some upper layers */
bus_hz = clk_get_rate(as->clk);
if (spi->max_speed_hz) {
@@ -397,8 +475,9 @@ static int atmel_spi_setup(struct spi_device *spi)
scbr = ((bus_hz + spi->max_speed_hz - 1)
/ spi->max_speed_hz);
if (scbr >= (1 << SPI_SCBR_SIZE)) {
- dev_dbg(&spi->dev, "setup: %d Hz too slow, scbr %u\n",
- spi->max_speed_hz, scbr);
+ dev_dbg(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ spi->max_speed_hz, scbr, bus_hz/255);
return -EINVAL;
}
} else
@@ -423,6 +502,14 @@ static int atmel_spi_setup(struct spi_device *spi)
return ret;
spi->controller_state = (void *)npcs_pin;
gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&as->lock, flags);
+ if (as->stay == spi)
+ as->stay = NULL;
+ cs_deactivate(as, spi);
+ spin_unlock_irqrestore(&as->lock, flags);
}
dev_dbg(&spi->dev,
@@ -464,14 +551,22 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
dev_dbg(&spi->dev, "no protocol options yet\n");
return -ENOPROTOOPT;
}
- }
- /* scrub dcache "early" */
- if (!msg->is_dma_mapped) {
- list_for_each_entry(xfer, &msg->transfers, transfer_list)
- atmel_spi_dma_map_xfer(as, xfer);
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting. This is a DMA-only driver.
+ *
+ * NOTE that if dma_unmap_single() ever starts to do work on
+ * platforms supported by this driver, we would need to clean
+ * up mappings for previously-mapped transfers.
+ */
+ if (!msg->is_dma_mapped) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
}
+#ifdef VERBOSE
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
dev_dbg(controller,
" xfer %p: len %u tx %p/%08x rx %p/%08x\n",
@@ -479,6 +574,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
xfer->tx_buf, xfer->tx_dma,
xfer->rx_buf, xfer->rx_dma);
}
+#endif
msg->status = -EINPROGRESS;
msg->actual_length = 0;
@@ -494,8 +590,21 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
static void atmel_spi_cleanup(struct spi_device *spi)
{
- if (spi->controller_state)
- gpio_free((unsigned int)spi->controller_data);
+ struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ unsigned gpio = (unsigned) spi->controller_data;
+ unsigned long flags;
+
+ if (!spi->controller_state)
+ return;
+
+ spin_lock_irqsave(&as->lock, flags);
+ if (as->stay == spi) {
+ as->stay = NULL;
+ cs_deactivate(as, spi);
+ }
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ gpio_free(gpio);
}
/*-------------------------------------------------------------------------*/
@@ -536,6 +645,10 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
as = spi_master_get_devdata(master);
+ /*
+ * Scratch buffer is used for throwaway rx and tx data.
+ * It's coherent to minimize dcache pollution.
+ */
as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
&as->buffer_dma, GFP_KERNEL);
if (!as->buffer)
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index ae2b1af0dba4..c47a650183a1 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -280,6 +280,9 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
+
static int au1550_spi_setup(struct spi_device *spi)
{
struct au1550_spi *hw = spi_master_get_devdata(spi->master);
@@ -292,6 +295,12 @@ static int au1550_spi_setup(struct spi_device *spi)
return -EINVAL;
}
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (spi->max_speed_hz == 0)
spi->max_speed_hz = hw->freq_max;
if (spi->max_speed_hz > hw->freq_max
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 11f36bef3057..d2a4b2bdb07b 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -270,6 +270,9 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
spin_unlock_irq(&mps->lock);
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
+
static int mpc52xx_psc_spi_setup(struct spi_device *spi)
{
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -279,6 +282,12 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
if (spi->bits_per_word%8)
return -EINVAL;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
new file mode 100644
index 000000000000..6b357cdb9ea3
--- /dev/null
+++ b/drivers/spi/omap2_mcspi.c
@@ -0,0 +1,1081 @@
+/*
+ * OMAP2 McSPI controller driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/clock.h>
+
+
+#define OMAP2_MCSPI_MAX_FREQ 48000000
+
+#define OMAP2_MCSPI_REVISION 0x00
+#define OMAP2_MCSPI_SYSCONFIG 0x10
+#define OMAP2_MCSPI_SYSSTATUS 0x14
+#define OMAP2_MCSPI_IRQSTATUS 0x18
+#define OMAP2_MCSPI_IRQENABLE 0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE 0x20
+#define OMAP2_MCSPI_SYST 0x24
+#define OMAP2_MCSPI_MODULCTRL 0x28
+
+/* per-channel banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHCONF0 0x2c
+#define OMAP2_MCSPI_CHSTAT0 0x30
+#define OMAP2_MCSPI_CHCTRL0 0x34
+#define OMAP2_MCSPI_TX0 0x38
+#define OMAP2_MCSPI_RX0 0x3c
+
+/* per-register bitmasks: */
+
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
+
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE (1 << 0)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE (1 << 0)
+#define OMAP2_MCSPI_MODULCTRL_MS (1 << 2)
+#define OMAP2_MCSPI_MODULCTRL_STEST (1 << 3)
+
+#define OMAP2_MCSPI_CHCONF_PHA (1 << 0)
+#define OMAP2_MCSPI_CHCONF_POL (1 << 1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL (1 << 6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY (0x01 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY (0x02 << 12)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW (1 << 14)
+#define OMAP2_MCSPI_CHCONF_DMAR (1 << 15)
+#define OMAP2_MCSPI_CHCONF_DPE0 (1 << 16)
+#define OMAP2_MCSPI_CHCONF_DPE1 (1 << 17)
+#define OMAP2_MCSPI_CHCONF_IS (1 << 18)
+#define OMAP2_MCSPI_CHCONF_TURBO (1 << 19)
+#define OMAP2_MCSPI_CHCONF_FORCE (1 << 20)
+
+#define OMAP2_MCSPI_CHSTAT_RXS (1 << 0)
+#define OMAP2_MCSPI_CHSTAT_TXS (1 << 1)
+#define OMAP2_MCSPI_CHSTAT_EOT (1 << 2)
+
+#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
+
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct omap2_mcspi_dma {
+ int dma_tx_channel;
+ int dma_rx_channel;
+
+ int dma_tx_sync_dev;
+ int dma_rx_sync_dev;
+
+ struct completion dma_tx_completion;
+ struct completion dma_rx_completion;
+};
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 8
+
+
+struct omap2_mcspi {
+ struct work_struct work;
+ /* lock protects queue and registers */
+ spinlock_t lock;
+ struct list_head msg_queue;
+ struct spi_master *master;
+ struct clk *ick;
+ struct clk *fck;
+ /* Virtual base address of the controller */
+ void __iomem *base;
+ /* SPI1 has 4 channels, while SPI2 has 2 */
+ struct omap2_mcspi_dma *dma_channels;
+};
+
+struct omap2_mcspi_cs {
+ void __iomem *base;
+ int word_len;
+};
+
+static struct workqueue_struct *omap2_mcspi_wq;
+
+#define MOD_REG_BIT(val, mask, set) do { \
+ if (set) \
+ val |= mask; \
+ else \
+ val &= ~mask; \
+} while (0)
+
+static inline void mcspi_write_reg(struct spi_master *master,
+ int idx, u32 val)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ __raw_writel(val, mcspi->base + idx);
+}
+
+static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ return __raw_readl(mcspi->base + idx);
+}
+
+static inline void mcspi_write_cs_reg(const struct spi_device *spi,
+ int idx, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ __raw_writel(val, cs->base + idx);
+}
+
+static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return __raw_readl(cs->base + idx);
+}
+
+static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
+ int is_read, int enable)
+{
+ u32 l, rw;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+
+ if (is_read) /* 1 is read, 0 write */
+ rw = OMAP2_MCSPI_CHCONF_DMAR;
+ else
+ rw = OMAP2_MCSPI_CHCONF_DMAW;
+
+ MOD_REG_BIT(l, rw, enable);
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+}
+
+static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+{
+ u32 l;
+
+ l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+}
+
+static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
+{
+ u32 l;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+}
+
+static void omap2_mcspi_set_master_mode(struct spi_master *master)
+{
+ u32 l;
+
+ /* setup when switching from (reset default) slave mode
+ * to single-channel master mode
+ */
+ l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
+ MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
+ MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
+ MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
+}
+
+static unsigned
+omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count, c;
+ unsigned long base, tx_reg, rx_reg;
+ int word_len, data_type, element_count;
+ u8 * rx;
+ const u8 * tx;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ base = (unsigned long) io_v2p(cs->base);
+ tx_reg = base + OMAP2_MCSPI_TX0;
+ rx_reg = base + OMAP2_MCSPI_RX0;
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ if (word_len <= 8) {
+ data_type = OMAP_DMA_DATA_TYPE_S8;
+ element_count = count;
+ } else if (word_len <= 16) {
+ data_type = OMAP_DMA_DATA_TYPE_S16;
+ element_count = count >> 1;
+ } else /* word_len <= 32 */ {
+ data_type = OMAP_DMA_DATA_TYPE_S32;
+ element_count = count >> 2;
+ }
+
+ if (tx != NULL) {
+ omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
+ data_type, element_count, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ mcspi_dma->dma_tx_sync_dev, 0);
+
+ omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ tx_reg, 0, 0);
+
+ omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ xfer->tx_dma, 0, 0);
+ }
+
+ if (rx != NULL) {
+ omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
+ data_type, element_count, 1,
+ OMAP_DMA_SYNC_ELEMENT,
+ mcspi_dma->dma_rx_sync_dev, 1);
+
+ omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
+ OMAP_DMA_AMODE_CONSTANT,
+ rx_reg, 0, 0);
+
+ omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
+ OMAP_DMA_AMODE_POST_INC,
+ xfer->rx_dma, 0, 0);
+ }
+
+ if (tx != NULL) {
+ omap_start_dma(mcspi_dma->dma_tx_channel);
+ omap2_mcspi_set_dma_req(spi, 0, 1);
+ }
+
+ if (rx != NULL) {
+ omap_start_dma(mcspi_dma->dma_rx_channel);
+ omap2_mcspi_set_dma_req(spi, 1, 1);
+ }
+
+ if (tx != NULL) {
+ wait_for_completion(&mcspi_dma->dma_tx_completion);
+ dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
+ }
+
+ if (rx != NULL) {
+ wait_for_completion(&mcspi_dma->dma_rx_completion);
+ dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
+ }
+ return count;
+}
+
+static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(__raw_readl(reg) & bit)) {
+ if (time_after(jiffies, timeout))
+ return -1;
+ cpu_relax();
+ }
+ return 0;
+}
+
+static unsigned
+omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ u32 l;
+ void __iomem *base = cs->base;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+ void __iomem *chstat_reg;
+ int word_len;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+
+ /* We store the pre-calculated register addresses on stack to speed
+ * up the transfer loop. */
+ tx_reg = base + OMAP2_MCSPI_TX0;
+ rx_reg = base + OMAP2_MCSPI_RX0;
+ chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ do {
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "write-%d %02x\n",
+ word_len, *tx);
+#endif
+ __raw_writel(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+ /* prevent last RX_ONLY read from triggering
+ * more word i/o: switch to rx+tx
+ */
+ if (c == 0 && tx == NULL)
+ mcspi_write_cs_reg(spi,
+ OMAP2_MCSPI_CHCONF0, l);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+#endif
+ }
+ c -= 1;
+ } while (c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "write-%d %04x\n",
+ word_len, *tx);
+#endif
+ __raw_writel(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+ /* prevent last RX_ONLY read from triggering
+ * more word i/o: switch to rx+tx
+ */
+ if (c == 0 && tx == NULL)
+ mcspi_write_cs_reg(spi,
+ OMAP2_MCSPI_CHCONF0, l);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+#endif
+ }
+ c -= 2;
+ } while (c);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "write-%d %04x\n",
+ word_len, *tx);
+#endif
+ __raw_writel(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+ /* prevent last RX_ONLY read from triggering
+ * more word i/o: switch to rx+tx
+ */
+ if (c == 0 && tx == NULL)
+ mcspi_write_cs_reg(spi,
+ OMAP2_MCSPI_CHCONF0, l);
+ *rx++ = __raw_readl(rx_reg);
+#ifdef VERBOSE
+ dev_dbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+#endif
+ }
+ c -= 4;
+ } while (c);
+ }
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (xfer->rx_buf == NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ } else if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0)
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+out:
+ return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ u32 l = 0, div = 0;
+ u8 word_len = spi->bits_per_word;
+
+ mcspi = spi_master_get_devdata(spi->master);
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+
+ cs->word_len = word_len;
+
+ if (spi->max_speed_hz) {
+ while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
+ > spi->max_speed_hz)
+ div++;
+ } else
+ div = 15;
+
+ l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+
+ /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+
+ /* wordlength */
+ l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+ l |= (word_len - 1) << 7;
+
+ /* set chipselect polarity; manage with FORCE */
+ if (!(spi->mode & SPI_CS_HIGH))
+ l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+ /* set clock divisor */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+ l |= div << 2;
+
+ /* set SPI mode 0..3 */
+ if (spi->mode & SPI_CPOL)
+ l |= OMAP2_MCSPI_CHCONF_POL;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_POL;
+ if (spi->mode & SPI_CPHA)
+ l |= OMAP2_MCSPI_CHCONF_PHA;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
+
+ dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
+ OMAP2_MCSPI_MAX_FREQ / (1 << div),
+ (spi->mode & SPI_CPHA) ? "trailing" : "leading",
+ (spi->mode & SPI_CPOL) ? "inverted" : "normal");
+
+ return 0;
+}
+
+static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+ complete(&mcspi_dma->dma_rx_completion);
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
+
+ complete(&mcspi_dma->dma_tx_completion);
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
+static int omap2_mcspi_request_dma(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+
+ if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
+ omap2_mcspi_dma_rx_callback, spi,
+ &mcspi_dma->dma_rx_channel)) {
+ dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
+ return -EAGAIN;
+ }
+
+ if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
+ omap2_mcspi_dma_tx_callback, spi,
+ &mcspi_dma->dma_tx_channel)) {
+ omap_free_dma(mcspi_dma->dma_rx_channel);
+ mcspi_dma->dma_rx_channel = -1;
+ dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+ return -EAGAIN;
+ }
+
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap2_mcspi_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ if (spi->bits_per_word == 0)
+ spi->bits_per_word = 8;
+ else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+ dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = mcspi->base + spi->chip_select * 0x14;
+ spi->controller_state = cs;
+ }
+
+ if (mcspi_dma->dma_rx_channel == -1
+ || mcspi_dma->dma_tx_channel == -1) {
+ ret = omap2_mcspi_request_dma(spi);
+ if (ret < 0)
+ return ret;
+ }
+
+ clk_enable(mcspi->ick);
+ clk_enable(mcspi->fck);
+ ret = omap2_mcspi_setup_transfer(spi, NULL);
+ clk_disable(mcspi->fck);
+ clk_disable(mcspi->ick);
+
+ return ret;
+}
+
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ kfree(spi->controller_state);
+
+ if (mcspi_dma->dma_rx_channel != -1) {
+ omap_free_dma(mcspi_dma->dma_rx_channel);
+ mcspi_dma->dma_rx_channel = -1;
+ }
+ if (mcspi_dma->dma_tx_channel != -1) {
+ omap_free_dma(mcspi_dma->dma_tx_channel);
+ mcspi_dma->dma_tx_channel = -1;
+ }
+}
+
+static void omap2_mcspi_work(struct work_struct *work)
+{
+ struct omap2_mcspi *mcspi;
+
+ mcspi = container_of(work, struct omap2_mcspi, work);
+ spin_lock_irq(&mcspi->lock);
+
+ clk_enable(mcspi->ick);
+ clk_enable(mcspi->fck);
+
+ /* We only enable one channel at a time -- the one whose message is
+ * at the head of the queue -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+ * channel" master mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+ while (!list_empty(&mcspi->msg_queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ struct omap2_mcspi_device_config *conf;
+ struct omap2_mcspi_cs *cs;
+ int par_override = 0;
+ int status = 0;
+ u32 chconf;
+
+ m = container_of(mcspi->msg_queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&m->queue);
+ spin_unlock_irq(&mcspi->lock);
+
+ spi = m->spi;
+ conf = spi->controller_data;
+ cs = spi->controller_state;
+
+ omap2_mcspi_set_enable(spi, 1);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
+
+ if (!cs_active) {
+ omap2_mcspi_force_cs(spi, 1);
+ cs_active = 1;
+ }
+
+ chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf);
+
+ if (t->len) {
+ unsigned count;
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ __raw_writel(0, cs->base
+ + OMAP2_MCSPI_TX0);
+
+ if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ /* ignore the "leave it on after last xfer" hint */
+ if (t->cs_change) {
+ omap2_mcspi_force_cs(spi, 0);
+ cs_active = 0;
+ }
+ }
+
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap2_mcspi_setup_transfer(spi, NULL);
+ }
+
+ if (cs_active)
+ omap2_mcspi_force_cs(spi, 0);
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ m->status = status;
+ m->complete(m->context);
+
+ spin_lock_irq(&mcspi->lock);
+ }
+
+ clk_disable(mcspi->fck);
+ clk_disable(mcspi->ick);
+
+ spin_unlock_irq(&mcspi->lock);
+}
+
+static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct omap2_mcspi *mcspi;
+ unsigned long flags;
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = 0;
+
+ /* reject invalid messages and transfers */
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned len = t->len;
+
+ if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
+ || (len && !(rx_buf || tx_buf))
+ || (t->bits_per_word &&
+ ( t->bits_per_word < 4
+ || t->bits_per_word > 32))) {
+ dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ t->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ t->bits_per_word);
+ return -EINVAL;
+ }
+ if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
+ dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
+ t->speed_hz,
+ OMAP2_MCSPI_MAX_FREQ/(1<<16));
+ return -EINVAL;
+ }
+
+ if (m->is_dma_mapped || len < DMA_MIN_BYTES)
+ continue;
+
+ /* Do DMA mapping "early" for better error reporting and
+ * dcache use. Note that if dma_unmap_single() ever starts
+ * to do real work on ARM, we'd need to clean up mappings
+ * for previous transfers on *ALL* exits of this loop...
+ */
+ if (tx_buf != NULL) {
+ t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(t->tx_dma)) {
+ dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ 'T', len);
+ return -EINVAL;
+ }
+ }
+ if (rx_buf != NULL) {
+ t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(t->rx_dma)) {
+ dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
+ 'R', len);
+ if (tx_buf != NULL)
+ dma_unmap_single(NULL, t->tx_dma,
+ len, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+ }
+ }
+
+ mcspi = spi_master_get_devdata(spi->master);
+
+ spin_lock_irqsave(&mcspi->lock, flags);
+ list_add_tail(&m->queue, &mcspi->msg_queue);
+ queue_work(omap2_mcspi_wq, &mcspi->work);
+ spin_unlock_irqrestore(&mcspi->lock, flags);
+
+ return 0;
+}
+
+static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
+{
+ struct spi_master *master = mcspi->master;
+ u32 tmp;
+
+ clk_enable(mcspi->ick);
+ clk_enable(mcspi->fck);
+
+ mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
+ OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
+ do {
+ tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
+ } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
+
+ mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
+ /* (3 << 8) | (2 << 3) | */
+ OMAP2_MCSPI_SYSCONFIG_AUTOIDLE);
+
+ omap2_mcspi_set_master_mode(master);
+
+ clk_disable(mcspi->fck);
+ clk_disable(mcspi->ick);
+ return 0;
+}
+
+static u8 __initdata spi1_rxdma_id [] = {
+ OMAP24XX_DMA_SPI1_RX0,
+ OMAP24XX_DMA_SPI1_RX1,
+ OMAP24XX_DMA_SPI1_RX2,
+ OMAP24XX_DMA_SPI1_RX3,
+};
+
+static u8 __initdata spi1_txdma_id [] = {
+ OMAP24XX_DMA_SPI1_TX0,
+ OMAP24XX_DMA_SPI1_TX1,
+ OMAP24XX_DMA_SPI1_TX2,
+ OMAP24XX_DMA_SPI1_TX3,
+};
+
+static u8 __initdata spi2_rxdma_id[] = {
+ OMAP24XX_DMA_SPI2_RX0,
+ OMAP24XX_DMA_SPI2_RX1,
+};
+
+static u8 __initdata spi2_txdma_id[] = {
+ OMAP24XX_DMA_SPI2_TX0,
+ OMAP24XX_DMA_SPI2_TX1,
+};
+
+static int __init omap2_mcspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap2_mcspi *mcspi;
+ struct resource *r;
+ int status = 0, i;
+ const u8 *rxdma_id, *txdma_id;
+ unsigned num_chipselect;
+
+ switch (pdev->id) {
+ case 1:
+ rxdma_id = spi1_rxdma_id;
+ txdma_id = spi1_txdma_id;
+ num_chipselect = 4;
+ break;
+ case 2:
+ rxdma_id = spi2_rxdma_id;
+ txdma_id = spi2_txdma_id;
+ num_chipselect = 2;
+ break;
+ /* REVISIT omap2430 has a third McSPI ... */
+ default:
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = omap2_mcspi_setup;
+ master->transfer = omap2_mcspi_transfer;
+ master->cleanup = omap2_mcspi_cleanup;
+ master->num_chipselect = num_chipselect;
+
+ dev_set_drvdata(&pdev->dev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ status = -ENODEV;
+ goto err1;
+ }
+ if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ pdev->dev.bus_id)) {
+ status = -EBUSY;
+ goto err1;
+ }
+
+ mcspi->base = (void __iomem *) io_p2v(r->start);
+
+ INIT_WORK(&mcspi->work, omap2_mcspi_work);
+
+ spin_lock_init(&mcspi->lock);
+ INIT_LIST_HEAD(&mcspi->msg_queue);
+
+ mcspi->ick = clk_get(&pdev->dev, "mcspi_ick");
+ if (IS_ERR(mcspi->ick)) {
+ dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
+ status = PTR_ERR(mcspi->ick);
+ goto err1a;
+ }
+ mcspi->fck = clk_get(&pdev->dev, "mcspi_fck");
+ if (IS_ERR(mcspi->fck)) {
+ dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
+ status = PTR_ERR(mcspi->fck);
+ goto err2;
+ }
+
+ mcspi->dma_channels = kcalloc(master->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+
+ if (mcspi->dma_channels == NULL)
+ goto err3;
+
+ for (i = 0; i < num_chipselect; i++) {
+ mcspi->dma_channels[i].dma_rx_channel = -1;
+ mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
+ mcspi->dma_channels[i].dma_tx_channel = -1;
+ mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
+ }
+
+ if (omap2_mcspi_reset(mcspi) < 0)
+ goto err4;
+
+ status = spi_register_master(master);
+ if (status < 0)
+ goto err4;
+
+ return status;
+
+err4:
+ kfree(mcspi->dma_channels);
+err3:
+ clk_put(mcspi->fck);
+err2:
+ clk_put(mcspi->ick);
+err1a:
+ release_mem_region(r->start, (r->end - r->start) + 1);
+err1:
+ spi_master_put(master);
+ return status;
+}
+
+static int __exit omap2_mcspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *dma_channels;
+ struct resource *r;
+
+ master = dev_get_drvdata(&pdev->dev);
+ mcspi = spi_master_get_devdata(master);
+ dma_channels = mcspi->dma_channels;
+
+ clk_put(mcspi->fck);
+ clk_put(mcspi->ick);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, (r->end - r->start) + 1);
+
+ spi_unregister_master(master);
+ kfree(dma_channels);
+
+ return 0;
+}
+
+static struct platform_driver omap2_mcspi_driver = {
+ .driver = {
+ .name = "omap2_mcspi",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(omap2_mcspi_remove),
+};
+
+
+static int __init omap2_mcspi_init(void)
+{
+ omap2_mcspi_wq = create_singlethread_workqueue(
+ omap2_mcspi_driver.driver.name);
+ if (omap2_mcspi_wq == NULL)
+ return -1;
+ return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
+}
+subsys_initcall(omap2_mcspi_init);
+
+static void __exit omap2_mcspi_exit(void)
+{
+ platform_driver_unregister(&omap2_mcspi_driver);
+
+ destroy_workqueue(omap2_mcspi_wq);
+}
+module_exit(omap2_mcspi_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 95183e1df525..d275c615a73e 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -445,10 +445,19 @@ done:
return status;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
static int uwire_setup(struct spi_device *spi)
{
struct uwire_state *ust = spi->controller_state;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (ust == NULL) {
ust = kzalloc(sizeof(*ust), GFP_KERNEL);
if (ust == NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9f2c887ffa04..e51311b2da0b 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1067,6 +1067,9 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
static int setup(struct spi_device *spi)
{
struct pxa2xx_spi_chip *chip_info = NULL;
@@ -1093,6 +1096,12 @@ static int setup(struct spi_device *spi)
return -EINVAL;
}
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4831edbae2d5..b05de30b5d9b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/mutex.h>
#include <linux/spi/spi.h>
@@ -185,7 +186,7 @@ struct boardinfo {
};
static LIST_HEAD(board_list);
-static DECLARE_MUTEX(board_lock);
+static DEFINE_MUTEX(board_lock);
/**
@@ -292,9 +293,9 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
bi->n_board_info = n;
memcpy(bi->board_info, info, n * sizeof *info);
- down(&board_lock);
+ mutex_lock(&board_lock);
list_add_tail(&bi->list, &board_list);
- up(&board_lock);
+ mutex_unlock(&board_lock);
return 0;
}
@@ -302,13 +303,12 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
* creates board info from kernel command lines
*/
-static void __init_or_module
-scan_boardinfo(struct spi_master *master)
+static void scan_boardinfo(struct spi_master *master)
{
struct boardinfo *bi;
struct device *dev = master->cdev.dev;
- down(&board_lock);
+ mutex_lock(&board_lock);
list_for_each_entry(bi, &board_list, list) {
struct spi_board_info *chip = bi->board_info;
unsigned n;
@@ -330,7 +330,7 @@ scan_boardinfo(struct spi_master *master)
(void) spi_new_device(master, chip);
}
}
- up(&board_lock);
+ mutex_unlock(&board_lock);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 88425e1af4d3..0c85c984ccb4 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -187,12 +187,10 @@ int spi_bitbang_setup(struct spi_device *spi)
bitbang = spi_master_get_devdata(spi->master);
- /* REVISIT: some systems will want to support devices using lsb-first
- * bit encodings on the wire. In pure software that would be trivial,
- * just bitbang_txrx_le_cphaX() routines shifting the other way, and
- * some hardware controllers also have this support.
+ /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
+ * add those to master->flags, and provide the other support.
*/
- if ((spi->mode & SPI_LSB_FIRST) != 0)
+ if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
return -EINVAL;
if (!cs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 656be4a5094a..aee9ad6f633c 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1163,6 +1163,9 @@ msg_rejected:
return -EINVAL;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
/* On first setup bad values must free chip_data memory since will cause
spi_new_device to fail. Bad value setup from protocol driver are simply not
applied and notified to the calling driver. */
@@ -1174,6 +1177,12 @@ static int setup(struct spi_device *spi)
u32 tmp;
int status = 0;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
/* Get controller data */
chip_info = spi->controller_data;
@@ -1245,21 +1254,6 @@ static int setup(struct spi_device *spi)
/* SPI mode */
tmp = spi->mode;
- if (tmp & SPI_LSB_FIRST) {
- status = -EINVAL;
- if (first_setup) {
- dev_err(&spi->dev,
- "setup - "
- "HW doesn't support LSB first transfer\n");
- goto err_first_setup;
- } else {
- dev_err(&spi->dev,
- "setup - "
- "HW doesn't support LSB first transfer, "
- "default to MSB first\n");
- spi->mode &= ~SPI_LSB_FIRST;
- }
- }
if (tmp & SPI_CS_HIGH) {
u32_EDIT(chip->control,
SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
new file mode 100644
index 000000000000..4ea68ac16115
--- /dev/null
+++ b/drivers/spi/spi_lm70llp.c
@@ -0,0 +1,361 @@
+/*
+ * spi_lm70llp.c - driver for lm70llp eval board for the LM70 sensor
+ *
+ * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+
+/*
+ * The LM70 communicates with a host processor using a 3-wire variant of
+ * the SPI/Microwire bus interface. This driver specifically supports an
+ * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
+ * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
+ * master controller driver. The hwmon/lm70 driver is a "SPI protocol
+ * driver", layered on top of this one and usable without the lm70llp.
+ *
+ * The LM70 is a temperature sensor chip from National Semiconductor; its
+ * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ *
+ * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is
+ * (heavily) based on spi-butterfly by David Brownell.
+ *
+ * The LM70 LLP connects to the PC parallel port in the following manner:
+ *
+ * Parallel LM70 LLP
+ * Port Direction JP2 Header
+ * ----------- --------- ------------
+ * D0 2 - -
+ * D1 3 --> V+ 5
+ * D2 4 --> V+ 5
+ * D3 5 --> V+ 5
+ * D4 6 --> V+ 5
+ * D5 7 --> nCS 8
+ * D6 8 --> SCLK 3
+ * D7 9 --> SI/O 5
+ * GND 25 - GND 7
+ * Select 13 <-- SI/O 1
+ *
+ * Note that parport pin 13 actually gets inverted by the transistor
+ * arrangement which lets either the parport or the LM70 drive the
+ * SI/SO signal.
+ */
+
+#define DRVNAME "spi-lm70llp"
+
+#define lm70_INIT 0xBE
+#define SIO 0x10
+#define nCS 0x20
+#define SCLK 0x40
+
+/*-------------------------------------------------------------------------*/
+
+struct spi_lm70llp {
+ struct spi_bitbang bitbang;
+ struct parport *port;
+ struct pardevice *pd;
+ struct spi_device *spidev_lm70;
+ struct spi_board_info info;
+ struct class_device *cdev;
+};
+
+/* REVISIT : ugly global ; provides "exclusive open" facility */
+static struct spi_lm70llp *lm70llp;
+
+
+/*-------------------------------------------------------------------*/
+
+static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+/*---------------------- LM70 LLP eval board-specific inlines follow */
+
+/* NOTE: we don't actually need to reread the output values, since they'll
+ * still be what we wrote before. Plus, going through parport builds in
+ * a ~1ms/operation delay; these SPI transfers could easily be faster.
+ */
+
+static inline void deassertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data | nCS);
+}
+
+static inline void assertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data & ~nCS);
+}
+
+static inline void clkHigh(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data | SCLK);
+}
+
+static inline void clkLow(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data & ~SCLK);
+}
+
+/*------------------------- SPI-LM70-specific inlines ----------------------*/
+
+static inline void spidelay(unsigned d)
+{
+ udelay(d);
+}
+
+static inline void setsck(struct spi_device *s, int is_on)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+
+ if (is_on)
+ clkHigh(pp);
+ else
+ clkLow(pp);
+}
+
+static inline void setmosi(struct spi_device *s, int is_on)
+{
+ /* FIXME update D7 ... this way we can put the chip
+ * into shutdown mode and read the manufacturer ID,
+ * but we can't put it back into operational mode.
+ */
+}
+
+/*
+ * getmiso:
+ * Why do we return 0 when the SIO line is high and vice-versa?
+ * The fact is, the lm70 eval board from NS (which this driver drives),
+ * is wired in just such a way : when the lm70's SIO goes high, a transistor
+ * switches it to low reflecting this on the parport (pin 13), and vice-versa.
+ */
+static inline int getmiso(struct spi_device *s)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+ return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 );
+}
+/*--------------------------------------------------------------------*/
+
+#define EXPAND_BITBANG_TXRX 1
+#include <linux/spi/spi_bitbang.h>
+
+static void lm70_chipselect(struct spi_device *spi, int value)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(spi);
+
+ if (value)
+ assertCS(pp);
+ else
+ deassertCS(pp);
+}
+
+/*
+ * Our actual bitbanger routine.
+ */
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
+{
+ static u32 sio=0;
+ static int first_time=1;
+
+ /* First time: perform SPI bitbang and return the LSB of
+ * the result of the SPI call.
+ */
+ if (first_time) {
+ sio = bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+ first_time=0;
+ return (sio & 0x00ff);
+ }
+ /* Return the MSB of the result of the SPI call */
+ else {
+ first_time=1;
+ return (sio >> 8);
+ }
+}
+
+static void spi_lm70llp_attach(struct parport *p)
+{
+ struct pardevice *pd;
+ struct spi_lm70llp *pp;
+ struct spi_master *master;
+ int status;
+
+ if (lm70llp) {
+ printk(KERN_WARNING
+ "%s: spi_lm70llp instance already loaded. Aborting.\n",
+ DRVNAME);
+ return;
+ }
+
+ /* TODO: this just _assumes_ a lm70 is there ... no probe;
+ * the lm70 driver could verify it, reading the manf ID.
+ */
+
+ master = spi_alloc_master(p->physport->dev, sizeof *pp);
+ if (!master) {
+ status = -ENOMEM;
+ goto out_fail;
+ }
+ pp = spi_master_get_devdata(master);
+
+ master->bus_num = -1; /* dynamic alloc of a bus number */
+ master->num_chipselect = 1;
+
+ /*
+ * SPI and bitbang hookup.
+ */
+ pp->bitbang.master = spi_master_get(master);
+ pp->bitbang.chipselect = lm70_chipselect;
+ pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
+ pp->bitbang.flags = SPI_3WIRE;
+
+ /*
+ * Parport hookup
+ */
+ pp->port = p;
+ pd = parport_register_device(p, DRVNAME,
+ NULL, NULL, NULL,
+ PARPORT_FLAG_EXCL, pp);
+ if (!pd) {
+ status = -ENOMEM;
+ goto out_free_master;
+ }
+ pp->pd = pd;
+
+ status = parport_claim(pd);
+ if (status < 0)
+ goto out_parport_unreg;
+
+ /*
+ * Start SPI ...
+ */
+ status = spi_bitbang_start(&pp->bitbang);
+ if (status < 0) {
+ printk(KERN_WARNING
+ "%s: spi_bitbang_start failed with status %d\n",
+ DRVNAME, status);
+ goto out_off_and_release;
+ }
+
+ /*
+ * The modalias name MUST match the device_driver name
+ * for the bus glue code to match and subsequently bind them.
+ * We are binding to the generic drivers/hwmon/lm70.c device
+ * driver.
+ */
+ strcpy(pp->info.modalias, "lm70");
+ pp->info.max_speed_hz = 6 * 1000 * 1000;
+ pp->info.chip_select = 0;
+ pp->info.mode = SPI_3WIRE | SPI_MODE_0;
+
+ /* power up the chip, and let the LM70 control SI/SO */
+ parport_write_data(pp->port, lm70_INIT);
+
+ /* Enable access to our primary data structure via
+ * the board info's (void *)controller_data.
+ */
+ pp->info.controller_data = pp;
+ pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
+ if (pp->spidev_lm70)
+ dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
+ pp->spidev_lm70->dev.bus_id);
+ else {
+ printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
+ status = -ENODEV;
+ goto out_bitbang_stop;
+ }
+ pp->spidev_lm70->bits_per_word = 16;
+
+ lm70llp = pp;
+
+ return;
+
+out_bitbang_stop:
+ spi_bitbang_stop(&pp->bitbang);
+out_off_and_release:
+ /* power down */
+ parport_write_data(pp->port, 0);
+ mdelay(10);
+ parport_release(pp->pd);
+out_parport_unreg:
+ parport_unregister_device(pd);
+out_free_master:
+ (void) spi_master_put(master);
+out_fail:
+ pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status);
+}
+
+static void spi_lm70llp_detach(struct parport *p)
+{
+ struct spi_lm70llp *pp;
+
+ if (!lm70llp || lm70llp->port != p)
+ return;
+
+ pp = lm70llp;
+ spi_bitbang_stop(&pp->bitbang);
+
+ /* power down */
+ parport_write_data(pp->port, 0);
+ msleep(10);
+
+ parport_release(pp->pd);
+ parport_unregister_device(pp->pd);
+
+ (void) spi_master_put(pp->bitbang.master);
+
+ lm70llp = NULL;
+}
+
+
+static struct parport_driver spi_lm70llp_drv = {
+ .name = DRVNAME,
+ .attach = spi_lm70llp_attach,
+ .detach = spi_lm70llp_detach,
+};
+
+static int __init init_spi_lm70llp(void)
+{
+ return parport_register_driver(&spi_lm70llp_drv);
+}
+module_init(init_spi_lm70llp);
+
+static void __exit cleanup_spi_lm70llp(void)
+{
+ parport_unregister_driver(&spi_lm70llp_drv);
+}
+module_exit(cleanup_spi_lm70llp);
+
+MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
+MODULE_DESCRIPTION(
+ "Parport adapter for the National Semiconductor LM70 LLP eval board");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index e9798bf7b8c6..3295cfcc9f20 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -47,6 +47,7 @@ struct mpc83xx_spi_reg {
#define SPMODE_ENABLE (1 << 24)
#define SPMODE_LEN(x) ((x) << 20)
#define SPMODE_PM(x) ((x) << 16)
+#define SPMODE_OP (1 << 14)
/*
* Default for SPI Mode:
@@ -85,6 +86,11 @@ struct mpc83xx_spi {
unsigned nsecs; /* (clock cycle time)/2 */
u32 sysclk;
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+
+ bool qe_mode;
+
void (*activate_cs) (u8 cs, u8 polarity);
void (*deactivate_cs) (u8 cs, u8 polarity);
};
@@ -103,7 +109,7 @@ static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg)
void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \
{ \
type * rx = mpc83xx_spi->rx; \
- *rx++ = (type)data; \
+ *rx++ = (type)(data >> mpc83xx_spi->rx_shift); \
mpc83xx_spi->rx = rx; \
}
@@ -114,7 +120,7 @@ u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \
const type * tx = mpc83xx_spi->tx; \
if (!tx) \
return 0; \
- data = *tx++; \
+ data = *tx++ << mpc83xx_spi->tx_shift; \
mpc83xx_spi->tx = tx; \
return data; \
}
@@ -158,6 +164,12 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) {
u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64);
+ if (pm > 0x0f) {
+ printk(KERN_WARNING "MPC83xx SPI: SPICLK can't be less then a SYSCLK/1024!\n"
+ "Requested SPICLK is %d Hz. Will use %d Hz instead.\n",
+ spi->max_speed_hz, mpc83xx_spi->sysclk / 1024);
+ pm = 0x0f;
+ }
regval |= SPMODE_PM(pm) | SPMODE_DIV16;
} else {
u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4);
@@ -197,12 +209,22 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
|| ((bits_per_word > 16) && (bits_per_word != 32)))
return -EINVAL;
+ mpc83xx_spi->rx_shift = 0;
+ mpc83xx_spi->tx_shift = 0;
if (bits_per_word <= 8) {
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+ if (mpc83xx_spi->qe_mode) {
+ mpc83xx_spi->rx_shift = 16;
+ mpc83xx_spi->tx_shift = 24;
+ }
} else if (bits_per_word <= 16) {
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
+ if (mpc83xx_spi->qe_mode) {
+ mpc83xx_spi->rx_shift = 16;
+ mpc83xx_spi->tx_shift = 16;
+ }
} else if (bits_per_word <= 32) {
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
@@ -232,12 +254,21 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
static int mpc83xx_spi_setup(struct spi_device *spi)
{
struct spi_bitbang *bitbang;
struct mpc83xx_spi *mpc83xx_spi;
int retval;
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
if (!spi->max_speed_hz)
return -EINVAL;
@@ -371,7 +402,6 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
ret = -ENODEV;
goto free_master;
}
-
mpc83xx_spi = spi_master_get_devdata(master);
mpc83xx_spi->bitbang.master = spi_master_get(master);
mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
@@ -380,9 +410,17 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
mpc83xx_spi->sysclk = pdata->sysclk;
mpc83xx_spi->activate_cs = pdata->activate_cs;
mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
+ mpc83xx_spi->qe_mode = pdata->qe_mode;
mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
+ mpc83xx_spi->rx_shift = 0;
+ mpc83xx_spi->tx_shift = 0;
+ if (mpc83xx_spi->qe_mode) {
+ mpc83xx_spi->rx_shift = 16;
+ mpc83xx_spi->tx_shift = 24;
+ }
+
mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
init_completion(&mpc83xx_spi->done);
@@ -417,6 +455,9 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ if (pdata->qe_mode)
+ regval |= SPMODE_OP;
+
mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index d5a710f6e445..7071ff8da63e 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,6 +146,9 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
return 0;
}
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
static int s3c24xx_spi_setup(struct spi_device *spi)
{
int ret;
@@ -153,8 +156,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
- if ((spi->mode & SPI_LSB_FIRST) != 0)
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
return -EINVAL;
+ }
ret = s3c24xx_spi_setupxfer(spi, NULL);
if (ret < 0) {
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
new file mode 100644
index 000000000000..08e981c40646
--- /dev/null
+++ b/drivers/spi/spi_txx9.c
@@ -0,0 +1,474 @@
+/*
+ * spi_txx9.c - TXx9 SPI controller driver.
+ *
+ * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ *
+ * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <asm/gpio.h>
+
+
+#define SPI_FIFO_SIZE 4
+
+#define TXx9_SPMCR 0x00
+#define TXx9_SPCR0 0x04
+#define TXx9_SPCR1 0x08
+#define TXx9_SPFS 0x0c
+#define TXx9_SPSR 0x14
+#define TXx9_SPDR 0x18
+
+/* SPMCR : SPI Master Control */
+#define TXx9_SPMCR_OPMODE 0xc0
+#define TXx9_SPMCR_CONFIG 0x40
+#define TXx9_SPMCR_ACTIVE 0x80
+#define TXx9_SPMCR_SPSTP 0x02
+#define TXx9_SPMCR_BCLR 0x01
+
+/* SPCR0 : SPI Control 0 */
+#define TXx9_SPCR0_TXIFL_MASK 0xc000
+#define TXx9_SPCR0_RXIFL_MASK 0x3000
+#define TXx9_SPCR0_SIDIE 0x0800
+#define TXx9_SPCR0_SOEIE 0x0400
+#define TXx9_SPCR0_RBSIE 0x0200
+#define TXx9_SPCR0_TBSIE 0x0100
+#define TXx9_SPCR0_IFSPSE 0x0010
+#define TXx9_SPCR0_SBOS 0x0004
+#define TXx9_SPCR0_SPHA 0x0002
+#define TXx9_SPCR0_SPOL 0x0001
+
+/* SPSR : SPI Status */
+#define TXx9_SPSR_TBSI 0x8000
+#define TXx9_SPSR_RBSI 0x4000
+#define TXx9_SPSR_TBS_MASK 0x3800
+#define TXx9_SPSR_RBS_MASK 0x0700
+#define TXx9_SPSR_SPOE 0x0080
+#define TXx9_SPSR_IFSD 0x0008
+#define TXx9_SPSR_SIDLE 0x0004
+#define TXx9_SPSR_STRDY 0x0002
+#define TXx9_SPSR_SRRDY 0x0001
+
+
+struct txx9spi {
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ spinlock_t lock; /* protect 'queue' */
+ struct list_head queue;
+ wait_queue_head_t waitq;
+ void __iomem *membase;
+ int irq;
+ int baseclk;
+ struct clk *clk;
+ u32 max_speed_hz, min_speed_hz;
+ int last_chipselect;
+ int last_chipselect_val;
+};
+
+static u32 txx9spi_rd(struct txx9spi *c, int reg)
+{
+ return __raw_readl(c->membase + reg);
+}
+static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
+{
+ __raw_writel(val, c->membase + reg);
+}
+
+static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
+ int on, unsigned int cs_delay)
+{
+ int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
+ if (on) {
+ /* deselect the chip with cs_change hint in last transfer */
+ if (c->last_chipselect >= 0)
+ gpio_set_value(c->last_chipselect,
+ !c->last_chipselect_val);
+ c->last_chipselect = spi->chip_select;
+ c->last_chipselect_val = val;
+ } else {
+ c->last_chipselect = -1;
+ ndelay(cs_delay); /* CS Hold Time */
+ }
+ gpio_set_value(spi->chip_select, val);
+ ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
+
+static int txx9spi_setup(struct spi_device *spi)
+{
+ struct txx9spi *c = spi_master_get_devdata(spi->master);
+ u8 bits_per_word;
+
+ if (spi->mode & ~MODEBITS)
+ return -EINVAL;
+
+ if (!spi->max_speed_hz
+ || spi->max_speed_hz > c->max_speed_hz
+ || spi->max_speed_hz < c->min_speed_hz)
+ return -EINVAL;
+
+ bits_per_word = spi->bits_per_word ? : 8;
+ if (bits_per_word != 8 && bits_per_word != 16)
+ return -EINVAL;
+
+ if (gpio_direction_output(spi->chip_select,
+ !(spi->mode & SPI_CS_HIGH))) {
+ dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
+ return -EINVAL;
+ }
+
+ /* deselect chip */
+ spin_lock(&c->lock);
+ txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
+ spin_unlock(&c->lock);
+
+ return 0;
+}
+
+static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
+{
+ struct txx9spi *c = dev_id;
+
+ /* disable rx intr */
+ txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
+ TXx9_SPCR0);
+ wake_up(&c->waitq);
+ return IRQ_HANDLED;
+}
+
+static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
+{
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ unsigned int cs_delay;
+ unsigned int cs_change = 1;
+ int status = 0;
+ u32 mcr;
+ u32 prev_speed_hz = 0;
+ u8 prev_bits_per_word = 0;
+
+ /* CS setup/hold/recovery time in nsec */
+ cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
+
+ mcr = txx9spi_rd(c, TXx9_SPMCR);
+ if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
+ dev_err(&spi->dev, "Bad mode.\n");
+ status = -EIO;
+ goto exit;
+ }
+ mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+ txx9spi_wr(c, TXx9_SPCR0_SBOS
+ | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
+ | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
+ | 0x08,
+ TXx9_SPCR0);
+
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ const void *txbuf = t->tx_buf;
+ void *rxbuf = t->rx_buf;
+ u32 data;
+ unsigned int len = t->len;
+ unsigned int wsize;
+ u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+ u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+
+ bits_per_word = bits_per_word ? : 8;
+ wsize = bits_per_word >> 3; /* in bytes */
+
+ if (prev_speed_hz != speed_hz
+ || prev_bits_per_word != bits_per_word) {
+ u32 n = (c->baseclk + speed_hz - 1) / speed_hz;
+ if (n < 1)
+ n = 1;
+ else if (n > 0xff)
+ n = 0xff;
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
+ TXx9_SPMCR);
+ txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
+ /* enter active mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
+
+ prev_speed_hz = speed_hz;
+ prev_bits_per_word = bits_per_word;
+ }
+
+ if (cs_change)
+ txx9spi_cs_func(spi, c, 1, cs_delay);
+ cs_change = t->cs_change;
+ while (len) {
+ unsigned int count = SPI_FIFO_SIZE;
+ int i;
+ u32 cr0;
+
+ if (len < count * wsize)
+ count = len / wsize;
+ /* now tx must be idle... */
+ while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
+ cpu_relax();
+ cr0 = txx9spi_rd(c, TXx9_SPCR0);
+ cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
+ cr0 |= (count - 1) << 12;
+ /* enable rx intr */
+ cr0 |= TXx9_SPCR0_RBSIE;
+ txx9spi_wr(c, cr0, TXx9_SPCR0);
+ /* send */
+ for (i = 0; i < count; i++) {
+ if (txbuf) {
+ data = (wsize == 1)
+ ? *(const u8 *)txbuf
+ : *(const u16 *)txbuf;
+ txx9spi_wr(c, data, TXx9_SPDR);
+ txbuf += wsize;
+ } else
+ txx9spi_wr(c, 0, TXx9_SPDR);
+ }
+ /* wait all rx data */
+ wait_event(c->waitq,
+ txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
+ /* receive */
+ for (i = 0; i < count; i++) {
+ data = txx9spi_rd(c, TXx9_SPDR);
+ if (rxbuf) {
+ if (wsize == 1)
+ *(u8 *)rxbuf = data;
+ else
+ *(u16 *)rxbuf = data;
+ rxbuf += wsize;
+ }
+ }
+ len -= count * wsize;
+ }
+ m->actual_length += t->len;
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (!cs_change)
+ continue;
+ if (t->transfer_list.next == &m->transfers)
+ break;
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
+ txx9spi_cs_func(spi, c, 0, cs_delay);
+ }
+
+exit:
+ m->status = status;
+ m->complete(m->context);
+
+ /* normally deactivate chipselect ... unless no error and
+ * cs_change has hinted that the next message will probably
+ * be for this chip too.
+ */
+ if (!(status == 0 && cs_change))
+ txx9spi_cs_func(spi, c, 0, cs_delay);
+
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+}
+
+static void txx9spi_work(struct work_struct *work)
+{
+ struct txx9spi *c = container_of(work, struct txx9spi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->lock, flags);
+ while (!list_empty(&c->queue)) {
+ struct spi_message *m;
+
+ m = container_of(c->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ txx9spi_work_one(c, m);
+
+ spin_lock_irqsave(&c->lock, flags);
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+}
+
+static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_master *master = spi->master;
+ struct txx9spi *c = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ unsigned long flags;
+
+ m->actual_length = 0;
+
+ /* check each transfer's parameters */
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+ u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
+
+ bits_per_word = bits_per_word ? : 8;
+ if (!t->tx_buf && !t->rx_buf && t->len)
+ return -EINVAL;
+ if (bits_per_word != 8 && bits_per_word != 16)
+ return -EINVAL;
+ if (t->len & ((bits_per_word >> 3) - 1))
+ return -EINVAL;
+ if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+ list_add_tail(&m->queue, &c->queue);
+ queue_work(c->workqueue, &c->work);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ return 0;
+}
+
+static int __init txx9spi_probe(struct platform_device *dev)
+{
+ struct spi_master *master;
+ struct txx9spi *c;
+ struct resource *res;
+ int ret = -ENODEV;
+ u32 mcr;
+
+ master = spi_alloc_master(&dev->dev, sizeof(*c));
+ if (!master)
+ return ret;
+ c = spi_master_get_devdata(master);
+ c->irq = -1;
+ platform_set_drvdata(dev, master);
+
+ INIT_WORK(&c->work, txx9spi_work);
+ spin_lock_init(&c->lock);
+ INIT_LIST_HEAD(&c->queue);
+ init_waitqueue_head(&c->waitq);
+
+ c->clk = clk_get(&dev->dev, "spi-baseclk");
+ if (IS_ERR(c->clk)) {
+ ret = PTR_ERR(c->clk);
+ c->clk = NULL;
+ goto exit;
+ }
+ ret = clk_enable(c->clk);
+ if (ret) {
+ clk_put(c->clk);
+ c->clk = NULL;
+ goto exit;
+ }
+ c->baseclk = clk_get_rate(c->clk);
+ c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff;
+ c->max_speed_hz = c->baseclk;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto exit;
+ c->membase = ioremap(res->start, res->end - res->start + 1);
+ if (!c->membase)
+ goto exit;
+
+ /* enter config mode */
+ mcr = txx9spi_rd(c, TXx9_SPMCR);
+ mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+
+ c->irq = platform_get_irq(dev, 0);
+ if (c->irq < 0)
+ goto exit;
+ ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c);
+ if (ret) {
+ c->irq = -1;
+ goto exit;
+ }
+
+ c->workqueue = create_singlethread_workqueue(master->cdev.dev->bus_id);
+ if (!c->workqueue)
+ goto exit;
+ c->last_chipselect = -1;
+
+ dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
+ (unsigned long long)res->start, c->irq,
+ (c->baseclk + 500000) / 1000000);
+
+ master->bus_num = dev->id;
+ master->setup = txx9spi_setup;
+ master->transfer = txx9spi_transfer;
+ master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto exit;
+ return 0;
+exit:
+ if (c->workqueue)
+ destroy_workqueue(c->workqueue);
+ if (c->irq >= 0)
+ free_irq(c->irq, c);
+ if (c->membase)
+ iounmap(c->membase);
+ if (c->clk) {
+ clk_disable(c->clk);
+ clk_put(c->clk);
+ }
+ platform_set_drvdata(dev, NULL);
+ spi_master_put(master);
+ return ret;
+}
+
+static int __exit txx9spi_remove(struct platform_device *dev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
+ struct txx9spi *c = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+ platform_set_drvdata(dev, NULL);
+ destroy_workqueue(c->workqueue);
+ free_irq(c->irq, c);
+ iounmap(c->membase);
+ clk_disable(c->clk);
+ clk_put(c->clk);
+ spi_master_put(master);
+ return 0;
+}
+
+static struct platform_driver txx9spi_driver = {
+ .remove = __exit_p(txx9spi_remove),
+ .driver = {
+ .name = "txx9spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init txx9spi_init(void)
+{
+ return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
+}
+subsys_initcall(txx9spi_init);
+
+static void __exit txx9spi_exit(void)
+{
+ platform_driver_unregister(&txx9spi_driver);
+}
+module_exit(txx9spi_exit);
+
+MODULE_DESCRIPTION("TXx9 SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d04242aee40d..38b60ad0eda0 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -181,7 +181,8 @@ static int spidev_message(struct spidev_data *spidev,
}
if (u_tmp->tx_buf) {
k_tmp->tx_buf = buf;
- if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf,
+ if (copy_from_user(buf, (const u8 __user *)
+ (ptrdiff_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
}
@@ -213,7 +214,8 @@ static int spidev_message(struct spidev_data *spidev,
buf = spidev->buffer;
for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
if (u_tmp->rx_buf) {
- if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf,
+ if (__copy_to_user((u8 __user *)
+ (ptrdiff_t) u_tmp->rx_buf, buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
new file mode 100644
index 000000000000..6da58ca48b33
--- /dev/null
+++ b/drivers/spi/tle62x0.c
@@ -0,0 +1,328 @@
+/*
+ * tle62x0.c -- support Infineon TLE62x0 driver chips
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ * Ben Dooks, <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/tle62x0.h>
+
+
+#define CMD_READ 0x00
+#define CMD_SET 0xff
+
+#define DIAG_NORMAL 0x03
+#define DIAG_OVERLOAD 0x02
+#define DIAG_OPEN 0x01
+#define DIAG_SHORTGND 0x00
+
+struct tle62x0_state {
+ struct spi_device *us;
+ struct mutex lock;
+ unsigned int nr_gpio;
+ unsigned int gpio_state;
+
+ unsigned char tx_buff[4];
+ unsigned char rx_buff[4];
+};
+
+static int to_gpio_num(struct device_attribute *attr);
+
+static inline int tle62x0_write(struct tle62x0_state *st)
+{
+ unsigned char *buff = st->tx_buff;
+ unsigned int gpio_state = st->gpio_state;
+
+ buff[0] = CMD_SET;
+
+ if (st->nr_gpio == 16) {
+ buff[1] = gpio_state >> 8;
+ buff[2] = gpio_state;
+ } else {
+ buff[1] = gpio_state;
+ }
+
+ dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
+ buff[0], buff[1], buff[2]);
+
+ return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
+}
+
+static inline int tle62x0_read(struct tle62x0_state *st)
+{
+ unsigned char *txbuff = st->tx_buff;
+ struct spi_transfer xfer = {
+ .tx_buf = txbuff,
+ .rx_buf = st->rx_buff,
+ .len = (st->nr_gpio * 2) / 8,
+ };
+ struct spi_message msg;
+
+ txbuff[0] = CMD_READ;
+ txbuff[1] = 0x00;
+ txbuff[2] = 0x00;
+ txbuff[3] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(st->us, &msg);
+}
+
+static unsigned char *decode_fault(unsigned int fault_code)
+{
+ fault_code &= 3;
+
+ switch (fault_code) {
+ case DIAG_NORMAL:
+ return "N";
+ case DIAG_OVERLOAD:
+ return "V";
+ case DIAG_OPEN:
+ return "O";
+ case DIAG_SHORTGND:
+ return "G";
+ }
+
+ return "?";
+}
+
+static ssize_t tle62x0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ char *bp = buf;
+ unsigned char *buff = st->rx_buff;
+ unsigned long fault = 0;
+ int ptr;
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = tle62x0_read(st);
+
+ dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
+
+ for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
+ fault <<= 8;
+ fault |= ((unsigned long)buff[ptr]);
+
+ dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
+ }
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++) {
+ bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
+ }
+
+ *bp++ = '\n';
+
+ mutex_unlock(&st->lock);
+ return bp - buf;
+}
+
+static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
+
+static ssize_t tle62x0_gpio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ int value;
+
+ mutex_lock(&st->lock);
+ value = (st->gpio_state >> gpio_num) & 1;
+ mutex_unlock(&st->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d", value);
+}
+
+static ssize_t tle62x0_gpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ unsigned long val;
+ char *endp;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (buf == endp)
+ return -EINVAL;
+
+ dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
+
+ mutex_lock(&st->lock);
+
+ if (val)
+ st->gpio_state |= 1 << gpio_num;
+ else
+ st->gpio_state &= ~(1 << gpio_num);
+
+ tle62x0_write(st);
+ mutex_unlock(&st->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+
+static struct device_attribute *gpio_attrs[] = {
+ [0] = &dev_attr_gpio1,
+ [1] = &dev_attr_gpio2,
+ [2] = &dev_attr_gpio3,
+ [3] = &dev_attr_gpio4,
+ [4] = &dev_attr_gpio5,
+ [5] = &dev_attr_gpio6,
+ [6] = &dev_attr_gpio7,
+ [7] = &dev_attr_gpio8,
+ [8] = &dev_attr_gpio9,
+ [9] = &dev_attr_gpio10,
+ [10] = &dev_attr_gpio11,
+ [11] = &dev_attr_gpio12,
+ [12] = &dev_attr_gpio13,
+ [13] = &dev_attr_gpio14,
+ [14] = &dev_attr_gpio15,
+ [15] = &dev_attr_gpio16
+};
+
+static int to_gpio_num(struct device_attribute *attr)
+{
+ int ptr;
+
+ for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
+ if (gpio_attrs[ptr] == attr)
+ return ptr;
+ }
+
+ return -1;
+}
+
+static int __devinit tle62x0_probe(struct spi_device *spi)
+{
+ struct tle62x0_state *st;
+ struct tle62x0_pdata *pdata;
+ int ptr;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&spi->dev, "no device data specified\n");
+ return -EINVAL;
+ }
+
+ st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
+ if (st == NULL) {
+ dev_err(&spi->dev, "no memory for device state\n");
+ return -ENOMEM;
+ }
+
+ st->us = spi;
+ st->nr_gpio = pdata->gpio_count;
+ st->gpio_state = pdata->init_state;
+
+ mutex_init(&st->lock);
+
+ ret = device_create_file(&spi->dev, &dev_attr_status_show);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create status attribute\n");
+ goto err_status;
+ }
+
+ for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
+ ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create gpio attribute\n");
+ goto err_gpios;
+ }
+ }
+
+ /* tle62x0_write(st); */
+ spi_set_drvdata(spi, st);
+ return 0;
+
+ err_gpios:
+ for (; ptr > 0; ptr--)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ device_remove_file(&spi->dev, &dev_attr_status_show);
+
+ err_status:
+ kfree(st);
+ return ret;
+}
+
+static int __devexit tle62x0_remove(struct spi_device *spi)
+{
+ struct tle62x0_state *st = spi_get_drvdata(spi);
+ int ptr;
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ kfree(st);
+ return 0;
+}
+
+static struct spi_driver tle62x0_driver = {
+ .driver = {
+ .name = "tle62x0",
+ .owner = THIS_MODULE,
+ },
+ .probe = tle62x0_probe,
+ .remove = __devexit_p(tle62x0_remove),
+};
+
+static __init int tle62x0_init(void)
+{
+ return spi_register_driver(&tle62x0_driver);
+}
+
+static __exit void tle62x0_exit(void)
+{
+ spi_unregister_driver(&tle62x0_driver);
+}
+
+module_init(tle62x0_init);
+module_exit(tle62x0_exit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("TLE62x0 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
new file mode 100644
index 000000000000..f0bf9a68e96b
--- /dev/null
+++ b/drivers/spi/xilinx_spi.c
@@ -0,0 +1,434 @@
+/*
+ * xilinx_spi.c
+ *
+ * Xilinx SPI controller driver (master mode only)
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/io.h>
+
+#include <syslib/virtex_devices.h>
+
+#define XILINX_SPI_NAME "xspi"
+
+/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
+ * Product Specification", DS464
+ */
+#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
+
+#define XSPI_CR_ENABLE 0x02
+#define XSPI_CR_MASTER_MODE 0x04
+#define XSPI_CR_CPOL 0x08
+#define XSPI_CR_CPHA 0x10
+#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL)
+#define XSPI_CR_TXFIFO_RESET 0x20
+#define XSPI_CR_RXFIFO_RESET 0x40
+#define XSPI_CR_MANUAL_SSELECT 0x80
+#define XSPI_CR_TRANS_INHIBIT 0x100
+
+#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
+
+#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
+#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
+#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
+#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
+#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
+
+#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
+#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
+
+#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
+
+/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
+ * IPIF registers are 32 bit
+ */
+#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
+#define XIPIF_V123B_GINTR_ENABLE 0x80000000
+
+#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
+#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
+
+#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
+#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
+ * disabled */
+#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
+#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
+#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
+#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
+
+#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
+#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
+
+struct xilinx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *regs; /* virt. address of the control registers */
+
+ u32 irq;
+
+ u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
+
+ u8 *rx_ptr; /* pointer in the Tx buffer */
+ const u8 *tx_ptr; /* pointer in the Rx buffer */
+ int remaining_bytes; /* the number of bytes left to transfer */
+};
+
+static void xspi_init_hw(void __iomem *regs_base)
+{
+ /* Reset the SPI device */
+ out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
+ XIPIF_V123B_RESET_MASK);
+ /* Disable all the interrupts just in case */
+ out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
+ /* Enable the global IPIF interrupt */
+ out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
+ XIPIF_V123B_GINTR_ENABLE);
+ /* Deselect the slave on the SPI bus */
+ out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
+ /* Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it */
+ out_be16(regs_base + XSPI_CR_OFFSET,
+ XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
+ | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
+}
+
+static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ /* Deselect the slave on the SPI bus */
+ out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
+ } else if (is_on == BITBANG_CS_ACTIVE) {
+ /* Set the SPI clock phase and polarity */
+ u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
+ & ~XSPI_CR_MODE_MASK;
+ if (spi->mode & SPI_CPHA)
+ cr |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cr |= XSPI_CR_CPOL;
+ out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+
+ /* We do not check spi->max_speed_hz here as the SPI clock
+ * frequency is not software programmable (the IP block design
+ * parameter)
+ */
+
+ /* Activate the chip select */
+ out_be32(xspi->regs + XSPI_SSR_OFFSET,
+ ~(0x0001 << spi->chip_select));
+ }
+}
+
+/* spi_bitbang requires custom setup_transfer() to be defined if there is a
+ * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
+ * supports just 8 bits per word, and SPI clock can't be changed in software.
+ * Check for 8 bits per word. Chip select delay calculations could be
+ * added here as soon as bitbang_work() can be made aware of the delay value.
+ */
+static int xilinx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u8 bits_per_word;
+ u32 hz;
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
+ hz = (t) ? t->speed_hz : spi->max_speed_hz;
+ if (bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __FUNCTION__, bits_per_word);
+ return -EINVAL;
+ }
+
+ if (hz && xspi->speed_hz > hz) {
+ dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
+ __FUNCTION__, hz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+static int xilinx_spi_setup(struct spi_device *spi)
+{
+ struct spi_bitbang *bitbang;
+ struct xilinx_spi *xspi;
+ int retval;
+
+ xspi = spi_master_get_devdata(spi->master);
+ bitbang = &xspi->bitbang;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if (spi->mode & ~MODEBITS) {
+ dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
+ __FUNCTION__, spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ retval = xilinx_spi_setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
+ __FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
+
+ return 0;
+}
+
+static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
+{
+ u8 sr;
+
+ /* Fill the Tx FIFO with as many bytes as possible */
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
+ if (xspi->tx_ptr) {
+ out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
+ } else {
+ out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
+ }
+ xspi->remaining_bytes--;
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ }
+}
+
+static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ipif_ier;
+ u16 cr;
+
+ /* We get here with transmitter inhibited */
+
+ xspi->tx_ptr = t->tx_buf;
+ xspi->rx_ptr = t->rx_buf;
+ xspi->remaining_bytes = t->len;
+ INIT_COMPLETION(xspi->done);
+
+ xilinx_spi_fill_tx_fifo(xspi);
+
+ /* Enable the transmit empty interrupt, which we use to determine
+ * progress on the transmission.
+ */
+ ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
+ out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
+ ipif_ier | XSPI_INTR_TX_EMPTY);
+
+ /* Start the transfer by not inhibiting the transmitter any longer */
+ cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
+ out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+
+ wait_for_completion(&xspi->done);
+
+ /* Disable the transmit empty interrupt */
+ out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
+
+ return t->len - xspi->remaining_bytes;
+}
+
+
+/* This driver supports single master mode only. Hence Tx FIFO Empty
+ * is the only interrupt we care about.
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Fault are not to happen.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+ struct xilinx_spi *xspi = dev_id;
+ u32 ipif_isr;
+
+ /* Get the IPIF interrupts, and clear them immediately */
+ ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
+
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
+ u16 cr;
+ u8 sr;
+
+ /* A transmit has just completed. Process received data and
+ * check for more data to transmit. Always inhibit the
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+ cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
+ out_be16(xspi->regs + XSPI_CR_OFFSET,
+ cr | XSPI_CR_TRANS_INHIBIT);
+
+ /* Read out all the data from the Rx FIFO */
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ u8 data;
+
+ data = in_8(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *xspi->rx_ptr++ = data;
+ }
+ sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ }
+
+ /* See if there is more data to send */
+ if (xspi->remaining_bytes > 0) {
+ xilinx_spi_fill_tx_fifo(xspi);
+ /* Start the transfer by not inhibiting the
+ * transmitter any longer
+ */
+ out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ } else {
+ /* No more data to send.
+ * Indicate the transfer is completed.
+ */
+ complete(&xspi->done);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init xilinx_spi_probe(struct platform_device *dev)
+{
+ int ret = 0;
+ struct spi_master *master;
+ struct xilinx_spi *xspi;
+ struct xspi_platform_data *pdata;
+ struct resource *r;
+
+ /* Get resources(memory, IRQ) associated with the device */
+ master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
+
+ if (master == NULL) {
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(dev, master);
+ pdata = dev->dev.platform_data;
+
+ if (pdata == NULL) {
+ ret = -ENODEV;
+ goto put_master;
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto put_master;
+ }
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = spi_master_get(master);
+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+ xspi->bitbang.master->setup = xilinx_spi_setup;
+ init_completion(&xspi->done);
+
+ if (!request_mem_region(r->start,
+ r->end - r->start + 1, XILINX_SPI_NAME)) {
+ ret = -ENXIO;
+ goto put_master;
+ }
+
+ xspi->regs = ioremap(r->start, r->end - r->start + 1);
+ if (xspi->regs == NULL) {
+ ret = -ENOMEM;
+ goto put_master;
+ }
+
+ xspi->irq = platform_get_irq(dev, 0);
+ if (xspi->irq < 0) {
+ ret = -ENXIO;
+ goto unmap_io;
+ }
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+ xspi->speed_hz = pdata->speed_hz;
+
+ /* SPI controller initializations */
+ xspi_init_hw(xspi->regs);
+
+ /* Register for SPI Interrupt */
+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+ if (ret != 0)
+ goto unmap_io;
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret != 0) {
+ dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
+ goto free_irq;
+ }
+
+ dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
+ r->start, (u32)xspi->regs, xspi->irq);
+
+ return ret;
+
+free_irq:
+ free_irq(xspi->irq, xspi);
+unmap_io:
+ iounmap(xspi->regs);
+put_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit xilinx_spi_remove(struct platform_device *dev)
+{
+ struct xilinx_spi *xspi;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(dev);
+ xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ free_irq(xspi->irq, xspi);
+ iounmap(xspi->regs);
+ platform_set_drvdata(dev, 0);
+ spi_master_put(xspi->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver xilinx_spi_driver = {
+ .probe = xilinx_spi_probe,
+ .remove = __devexit_p(xilinx_spi_remove),
+ .driver = {
+ .name = XILINX_SPI_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init xilinx_spi_init(void)
+{
+ return platform_driver_register(&xilinx_spi_driver);
+}
+module_init(xilinx_spi_init);
+
+static void __exit xilinx_spi_exit(void)
+{
+ platform_driver_unregister(&xilinx_spi_driver);
+}
+module_exit(xilinx_spi_exit);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tc/Makefile b/drivers/tc/Makefile
index 967342692211..c899246bd362 100644
--- a/drivers/tc/Makefile
+++ b/drivers/tc/Makefile
@@ -5,7 +5,6 @@
# Object file lists.
obj-$(CONFIG_TC) += tc.o tc-driver.o
-obj-$(CONFIG_ZS) += zs.o
obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o
$(obj)/lk201-map.o: $(obj)/lk201-map.c
diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c
deleted file mode 100644
index 4fff61b32dcb..000000000000
--- a/drivers/tc/zs.c
+++ /dev/null
@@ -1,2203 +0,0 @@
-/*
- * decserial.c: Serial port driver for IOASIC DECstations.
- *
- * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras.
- * Derived from drivers/macintosh/macserial.c by Harald Koerfgen.
- *
- * DECstation changes
- * Copyright (C) 1998-2000 Harald Koerfgen
- * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Maciej W. Rozycki
- *
- * For the rest of the code the original Copyright applies:
- * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- *
- *
- * Note: for IOASIC systems the wiring is as follows:
- *
- * mouse/keyboard:
- * DIN-7 MJ-4 signal SCC
- * 2 1 TxD <- A.TxD
- * 3 4 RxD -> A.RxD
- *
- * EIA-232/EIA-423:
- * DB-25 MMJ-6 signal SCC
- * 2 2 TxD <- B.TxD
- * 3 5 RxD -> B.RxD
- * 4 RTS <- ~A.RTS
- * 5 CTS -> ~B.CTS
- * 6 6 DSR -> ~A.SYNC
- * 8 CD -> ~B.DCD
- * 12 DSRS(DCE) -> ~A.CTS (*)
- * 15 TxC -> B.TxC
- * 17 RxC -> B.RxC
- * 20 1 DTR <- ~A.DTR
- * 22 RI -> ~A.DCD
- * 23 DSRS(DTE) <- ~B.RTS
- *
- * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE)
- * is shared with DSRS(DTE) at pin 23.
- */
-
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-#include <linux/console.h>
-#endif
-
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/bootinfo.h>
-
-#include <asm/dec/interrupts.h>
-#include <asm/dec/ioasic_addrs.h>
-#include <asm/dec/machtype.h>
-#include <asm/dec/serial.h>
-#include <asm/dec/system.h>
-
-#ifdef CONFIG_KGDB
-#include <asm/kgdb.h>
-#endif
-#ifdef CONFIG_MAGIC_SYSRQ
-#include <linux/sysrq.h>
-#endif
-
-#include "zs.h"
-
-/*
- * It would be nice to dynamically allocate everything that
- * depends on NUM_SERIAL, so we could support any number of
- * Z8530s, but for now...
- */
-#define NUM_SERIAL 2 /* Max number of ZS chips supported */
-#define NUM_CHANNELS (NUM_SERIAL * 2) /* 2 channels per chip */
-#define CHANNEL_A_NR (zs_parms->channel_a_offset > zs_parms->channel_b_offset)
- /* Number of channel A in the chip */
-#define ZS_CHAN_IO_SIZE 8
-#define ZS_CLOCK 7372800 /* Z8530 RTxC input clock rate */
-
-#define RECOVERY_DELAY udelay(2)
-
-struct zs_parms {
- unsigned long scc0;
- unsigned long scc1;
- int channel_a_offset;
- int channel_b_offset;
- int irq0;
- int irq1;
- int clock;
-};
-
-static struct zs_parms *zs_parms;
-
-#ifdef CONFIG_MACH_DECSTATION
-static struct zs_parms ds_parms = {
- scc0 : IOASIC_SCC0,
- scc1 : IOASIC_SCC1,
- channel_a_offset : 1,
- channel_b_offset : 9,
- irq0 : -1,
- irq1 : -1,
- clock : ZS_CLOCK
-};
-#endif
-
-#ifdef CONFIG_MACH_DECSTATION
-#define DS_BUS_PRESENT (IOASIC)
-#else
-#define DS_BUS_PRESENT 0
-#endif
-
-#define BUS_PRESENT (DS_BUS_PRESENT)
-
-DEFINE_SPINLOCK(zs_lock);
-
-struct dec_zschannel zs_channels[NUM_CHANNELS];
-struct dec_serial zs_soft[NUM_CHANNELS];
-int zs_channels_found;
-struct dec_serial *zs_chain; /* list of all channels */
-
-struct tty_struct zs_ttys[NUM_CHANNELS];
-
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-static struct console sercons;
-#endif
-#if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \
- !defined(MODULE)
-static unsigned long break_pressed; /* break, really ... */
-#endif
-
-static unsigned char zs_init_regs[16] = {
- 0, /* write 0 */
- 0, /* write 1 */
- 0, /* write 2 */
- 0, /* write 3 */
- (X16CLK), /* write 4 */
- 0, /* write 5 */
- 0, 0, 0, /* write 6, 7, 8 */
- (MIE | DLC | NV), /* write 9 */
- (NRZ), /* write 10 */
- (TCBR | RCBR), /* write 11 */
- 0, 0, /* BRG time constant, write 12 + 13 */
- (BRSRC | BRENABL), /* write 14 */
- 0 /* write 15 */
-};
-
-static struct tty_driver *serial_driver;
-
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
-/* number of characters left in xmit buffer before we ask for more */
-#define WAKEUP_CHARS 256
-
-/*
- * Debugging.
- */
-#undef SERIAL_DEBUG_OPEN
-#undef SERIAL_DEBUG_FLOW
-#undef SERIAL_DEBUG_THROTTLE
-#undef SERIAL_PARANOIA_CHECK
-
-#undef ZS_DEBUG_REGS
-
-#ifdef SERIAL_DEBUG_THROTTLE
-#define _tty_name(tty,buf) tty_name(tty,buf)
-#endif
-
-#define RS_STROBE_TIME 10
-#define RS_ISR_PASS_LIMIT 256
-
-static void probe_sccs(void);
-static void change_speed(struct dec_serial *info);
-static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
-
-static inline int serial_paranoia_check(struct dec_serial *info,
- char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- static const char *badmagic =
- "Warning: bad magic number for serial struct %s in %s\n";
- static const char *badinfo =
- "Warning: null mac_serial for %s in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (info->magic != SERIAL_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-/*
- * This is used to figure out the divisor speeds and the timeouts
- */
-static int baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 0 };
-
-/*
- * Reading and writing Z8530 registers.
- */
-static inline unsigned char read_zsreg(struct dec_zschannel *channel,
- unsigned char reg)
-{
- unsigned char retval;
-
- if (reg != 0) {
- *channel->control = reg & 0xf;
- fast_iob(); RECOVERY_DELAY;
- }
- retval = *channel->control;
- RECOVERY_DELAY;
- return retval;
-}
-
-static inline void write_zsreg(struct dec_zschannel *channel,
- unsigned char reg, unsigned char value)
-{
- if (reg != 0) {
- *channel->control = reg & 0xf;
- fast_iob(); RECOVERY_DELAY;
- }
- *channel->control = value;
- fast_iob(); RECOVERY_DELAY;
- return;
-}
-
-static inline unsigned char read_zsdata(struct dec_zschannel *channel)
-{
- unsigned char retval;
-
- retval = *channel->data;
- RECOVERY_DELAY;
- return retval;
-}
-
-static inline void write_zsdata(struct dec_zschannel *channel,
- unsigned char value)
-{
- *channel->data = value;
- fast_iob(); RECOVERY_DELAY;
- return;
-}
-
-static inline void load_zsregs(struct dec_zschannel *channel,
- unsigned char *regs)
-{
-/* ZS_CLEARERR(channel);
- ZS_CLEARFIFO(channel); */
- /* Load 'em up */
- write_zsreg(channel, R3, regs[R3] & ~RxENABLE);
- write_zsreg(channel, R5, regs[R5] & ~TxENAB);
- write_zsreg(channel, R4, regs[R4]);
- write_zsreg(channel, R9, regs[R9]);
- write_zsreg(channel, R1, regs[R1]);
- write_zsreg(channel, R2, regs[R2]);
- write_zsreg(channel, R10, regs[R10]);
- write_zsreg(channel, R11, regs[R11]);
- write_zsreg(channel, R12, regs[R12]);
- write_zsreg(channel, R13, regs[R13]);
- write_zsreg(channel, R14, regs[R14]);
- write_zsreg(channel, R15, regs[R15]);
- write_zsreg(channel, R3, regs[R3]);
- write_zsreg(channel, R5, regs[R5]);
- return;
-}
-
-/* Sets or clears DTR/RTS on the requested line */
-static inline void zs_rtsdtr(struct dec_serial *info, int which, int set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&zs_lock, flags);
- if (info->zs_channel != info->zs_chan_a) {
- if (set) {
- info->zs_chan_a->curregs[5] |= (which & (RTS | DTR));
- } else {
- info->zs_chan_a->curregs[5] &= ~(which & (RTS | DTR));
- }
- write_zsreg(info->zs_chan_a, 5, info->zs_chan_a->curregs[5]);
- }
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/* Utility routines for the Zilog */
-static inline int get_zsbaud(struct dec_serial *ss)
-{
- struct dec_zschannel *channel = ss->zs_channel;
- int brg;
-
- /* The baud rate is split up between two 8-bit registers in
- * what is termed 'BRG time constant' format in my docs for
- * the chip, it is a function of the clk rate the chip is
- * receiving which happens to be constant.
- */
- brg = (read_zsreg(channel, 13) << 8);
- brg |= read_zsreg(channel, 12);
- return BRG_TO_BPS(brg, (zs_parms->clock/(ss->clk_divisor)));
-}
-
-/* On receive, this clears errors and the receiver interrupts */
-static inline void rs_recv_clear(struct dec_zschannel *zsc)
-{
- write_zsreg(zsc, 0, ERR_RES);
- write_zsreg(zsc, 0, RES_H_IUS); /* XXX this is unnecessary */
-}
-
-/*
- * ----------------------------------------------------------------------
- *
- * Here starts the interrupt handling routines. All of the following
- * subroutines are declared as inline and are folded into
- * rs_interrupt(). They were separated out for readability's sake.
- *
- * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
- * -----------------------------------------------------------------------
- */
-
-/*
- * This routine is used by the interrupt handler to schedule
- * processing in the software interrupt portion of the driver.
- */
-static void rs_sched_event(struct dec_serial *info, int event)
-{
- info->event |= 1 << event;
- tasklet_schedule(&info->tlet);
-}
-
-static void receive_chars(struct dec_serial *info)
-{
- struct tty_struct *tty = info->tty;
- unsigned char ch, stat, flag;
-
- while ((read_zsreg(info->zs_channel, R0) & Rx_CH_AV) != 0) {
-
- stat = read_zsreg(info->zs_channel, R1);
- ch = read_zsdata(info->zs_channel);
-
- if (!tty && (!info->hook || !info->hook->rx_char))
- continue;
-
- flag = TTY_NORMAL;
- if (info->tty_break) {
- info->tty_break = 0;
- flag = TTY_BREAK;
- if (info->flags & ZILOG_SAK)
- do_SAK(tty);
- /* Ignore the null char got when BREAK is removed. */
- if (ch == 0)
- continue;
- } else {
- if (stat & Rx_OVR) {
- flag = TTY_OVERRUN;
- } else if (stat & FRM_ERR) {
- flag = TTY_FRAME;
- } else if (stat & PAR_ERR) {
- flag = TTY_PARITY;
- }
- if (flag != TTY_NORMAL)
- /* reset the error indication */
- write_zsreg(info->zs_channel, R0, ERR_RES);
- }
-
-#if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \
- !defined(MODULE)
- if (break_pressed && info->line == sercons.index) {
- /* Ignore the null char got when BREAK is removed. */
- if (ch == 0)
- continue;
- if (time_before(jiffies, break_pressed + HZ * 5)) {
- handle_sysrq(ch, NULL);
- break_pressed = 0;
- continue;
- }
- break_pressed = 0;
- }
-#endif
-
- if (info->hook && info->hook->rx_char) {
- (*info->hook->rx_char)(ch, flag);
- return;
- }
-
- tty_insert_flip_char(tty, ch, flag);
- }
- if (tty)
- tty_flip_buffer_push(tty);
-}
-
-static void transmit_chars(struct dec_serial *info)
-{
- if ((read_zsreg(info->zs_channel, R0) & Tx_BUF_EMP) == 0)
- return;
- info->tx_active = 0;
-
- if (info->x_char) {
- /* Send next char */
- write_zsdata(info->zs_channel, info->x_char);
- info->x_char = 0;
- info->tx_active = 1;
- return;
- }
-
- if ((info->xmit_cnt <= 0) || (info->tty && info->tty->stopped)
- || info->tx_stopped) {
- write_zsreg(info->zs_channel, R0, RES_Tx_P);
- return;
- }
- /* Send char */
- write_zsdata(info->zs_channel, info->xmit_buf[info->xmit_tail++]);
- info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt--;
- info->tx_active = 1;
-
- if (info->xmit_cnt < WAKEUP_CHARS)
- rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
-}
-
-static void status_handle(struct dec_serial *info)
-{
- unsigned char stat;
-
- /* Get status from Read Register 0 */
- stat = read_zsreg(info->zs_channel, R0);
-
- if ((stat & BRK_ABRT) && !(info->read_reg_zero & BRK_ABRT)) {
-#if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \
- !defined(MODULE)
- if (info->line == sercons.index) {
- if (!break_pressed)
- break_pressed = jiffies;
- } else
-#endif
- info->tty_break = 1;
- }
-
- if (info->zs_channel != info->zs_chan_a) {
-
- /* Check for DCD transitions */
- if (info->tty && !C_CLOCAL(info->tty) &&
- ((stat ^ info->read_reg_zero) & DCD) != 0 ) {
- if (stat & DCD) {
- wake_up_interruptible(&info->open_wait);
- } else {
- tty_hangup(info->tty);
- }
- }
-
- /* Check for CTS transitions */
- if (info->tty && C_CRTSCTS(info->tty)) {
- if ((stat & CTS) != 0) {
- if (info->tx_stopped) {
- info->tx_stopped = 0;
- if (!info->tx_active)
- transmit_chars(info);
- }
- } else {
- info->tx_stopped = 1;
- }
- }
-
- }
-
- /* Clear status condition... */
- write_zsreg(info->zs_channel, R0, RES_EXT_INT);
- info->read_reg_zero = stat;
-}
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-static irqreturn_t rs_interrupt(int irq, void *dev_id)
-{
- struct dec_serial *info = (struct dec_serial *) dev_id;
- irqreturn_t status = IRQ_NONE;
- unsigned char zs_intreg;
- int shift;
-
- /* NOTE: The read register 3, which holds the irq status,
- * does so for both channels on each chip. Although
- * the status value itself must be read from the A
- * channel and is only valid when read from channel A.
- * Yes... broken hardware...
- */
-#define CHAN_IRQMASK (CHBRxIP | CHBTxIP | CHBEXT)
-
- if (info->zs_chan_a == info->zs_channel)
- shift = 3; /* Channel A */
- else
- shift = 0; /* Channel B */
-
- for (;;) {
- zs_intreg = read_zsreg(info->zs_chan_a, R3) >> shift;
- if ((zs_intreg & CHAN_IRQMASK) == 0)
- break;
-
- status = IRQ_HANDLED;
-
- if (zs_intreg & CHBRxIP) {
- receive_chars(info);
- }
- if (zs_intreg & CHBTxIP) {
- transmit_chars(info);
- }
- if (zs_intreg & CHBEXT) {
- status_handle(info);
- }
- }
-
- /* Why do we need this ? */
- write_zsreg(info->zs_channel, 0, RES_H_IUS);
-
- return status;
-}
-
-#ifdef ZS_DEBUG_REGS
-void zs_dump (void) {
- int i, j;
- for (i = 0; i < zs_channels_found; i++) {
- struct dec_zschannel *ch = &zs_channels[i];
- if ((long)ch->control == UNI_IO_BASE+UNI_SCC1A_CTRL) {
- for (j = 0; j < 15; j++) {
- printk("W%d = 0x%x\t",
- j, (int)ch->curregs[j]);
- }
- for (j = 0; j < 15; j++) {
- printk("R%d = 0x%x\t",
- j, (int)read_zsreg(ch,j));
- }
- printk("\n\n");
- }
- }
-}
-#endif
-
-/*
- * -------------------------------------------------------------------
- * Here ends the serial interrupt routines.
- * -------------------------------------------------------------------
- */
-
-/*
- * ------------------------------------------------------------
- * rs_stop() and rs_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * ------------------------------------------------------------
- */
-static void rs_stop(struct tty_struct *tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_stop"))
- return;
-
-#if 1
- spin_lock_irqsave(&zs_lock, flags);
- if (info->zs_channel->curregs[5] & TxENAB) {
- info->zs_channel->curregs[5] &= ~TxENAB;
- write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
- }
- spin_unlock_irqrestore(&zs_lock, flags);
-#endif
-}
-
-static void rs_start(struct tty_struct *tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_start"))
- return;
-
- spin_lock_irqsave(&zs_lock, flags);
-#if 1
- if (info->xmit_cnt && info->xmit_buf && !(info->zs_channel->curregs[5] & TxENAB)) {
- info->zs_channel->curregs[5] |= TxENAB;
- write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
- }
-#else
- if (info->xmit_cnt && info->xmit_buf && !info->tx_active) {
- transmit_chars(info);
- }
-#endif
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/*
- * This routine is used to handle the "bottom half" processing for the
- * serial driver, known also the "software interrupt" processing.
- * This processing is done at the kernel interrupt level, after the
- * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
- * is where time-consuming activities which can not be done in the
- * interrupt driver proper are done; the interrupt driver schedules
- * them using rs_sched_event(), and they get done here.
- */
-
-static void do_softint(unsigned long private_)
-{
- struct dec_serial *info = (struct dec_serial *) private_;
- struct tty_struct *tty;
-
- tty = info->tty;
- if (!tty)
- return;
-
- if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event))
- tty_wakeup(tty);
-}
-
-static int zs_startup(struct dec_serial * info)
-{
- unsigned long flags;
-
- if (info->flags & ZILOG_INITIALIZED)
- return 0;
-
- if (!info->xmit_buf) {
- info->xmit_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL);
- if (!info->xmit_buf)
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&zs_lock, flags);
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("starting up ttyS%d (irq %d)...", info->line, info->irq);
-#endif
-
- /*
- * Clear the receive FIFO.
- */
- ZS_CLEARFIFO(info->zs_channel);
- info->xmit_fifo_size = 1;
-
- /*
- * Clear the interrupt registers.
- */
- write_zsreg(info->zs_channel, R0, ERR_RES);
- write_zsreg(info->zs_channel, R0, RES_H_IUS);
-
- /*
- * Set the speed of the serial port
- */
- change_speed(info);
-
- /*
- * Turn on RTS and DTR.
- */
- zs_rtsdtr(info, RTS | DTR, 1);
-
- /*
- * Finally, enable sequencing and interrupts
- */
- info->zs_channel->curregs[R1] &= ~RxINT_MASK;
- info->zs_channel->curregs[R1] |= (RxINT_ALL | TxINT_ENAB |
- EXT_INT_ENAB);
- info->zs_channel->curregs[R3] |= RxENABLE;
- info->zs_channel->curregs[R5] |= TxENAB;
- info->zs_channel->curregs[R15] |= (DCDIE | CTSIE | TxUIE | BRKIE);
- write_zsreg(info->zs_channel, R1, info->zs_channel->curregs[R1]);
- write_zsreg(info->zs_channel, R3, info->zs_channel->curregs[R3]);
- write_zsreg(info->zs_channel, R5, info->zs_channel->curregs[R5]);
- write_zsreg(info->zs_channel, R15, info->zs_channel->curregs[R15]);
-
- /*
- * And clear the interrupt registers again for luck.
- */
- write_zsreg(info->zs_channel, R0, ERR_RES);
- write_zsreg(info->zs_channel, R0, RES_H_IUS);
-
- /* Save the current value of RR0 */
- info->read_reg_zero = read_zsreg(info->zs_channel, R0);
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- info->flags |= ZILOG_INITIALIZED;
- spin_unlock_irqrestore(&zs_lock, flags);
- return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void shutdown(struct dec_serial * info)
-{
- unsigned long flags;
-
- if (!(info->flags & ZILOG_INITIALIZED))
- return;
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("Shutting down serial port %d (irq %d)....", info->line,
- info->irq);
-#endif
-
- spin_lock_irqsave(&zs_lock, flags);
-
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = 0;
- }
-
- info->zs_channel->curregs[1] = 0;
- write_zsreg(info->zs_channel, 1, info->zs_channel->curregs[1]); /* no interrupts */
-
- info->zs_channel->curregs[3] &= ~RxENABLE;
- write_zsreg(info->zs_channel, 3, info->zs_channel->curregs[3]);
-
- info->zs_channel->curregs[5] &= ~TxENAB;
- write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
- if (!info->tty || C_HUPCL(info->tty)) {
- zs_rtsdtr(info, RTS | DTR, 0);
- }
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- info->flags &= ~ZILOG_INITIALIZED;
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void change_speed(struct dec_serial *info)
-{
- unsigned cflag;
- int i;
- int brg, bits;
- unsigned long flags;
-
- if (!info->hook) {
- if (!info->tty || !info->tty->termios)
- return;
- cflag = info->tty->termios->c_cflag;
- if (!info->port)
- return;
- } else {
- cflag = info->hook->cflags;
- }
-
- i = cflag & CBAUD;
- if (i & CBAUDEX) {
- i &= ~CBAUDEX;
- if (i < 1 || i > 2) {
- if (!info->hook)
- info->tty->termios->c_cflag &= ~CBAUDEX;
- else
- info->hook->cflags &= ~CBAUDEX;
- } else
- i += 15;
- }
-
- spin_lock_irqsave(&zs_lock, flags);
- info->zs_baud = baud_table[i];
- if (info->zs_baud) {
- brg = BPS_TO_BRG(info->zs_baud, zs_parms->clock/info->clk_divisor);
- info->zs_channel->curregs[12] = (brg & 255);
- info->zs_channel->curregs[13] = ((brg >> 8) & 255);
- zs_rtsdtr(info, DTR, 1);
- } else {
- zs_rtsdtr(info, RTS | DTR, 0);
- return;
- }
-
- /* byte size and parity */
- info->zs_channel->curregs[3] &= ~RxNBITS_MASK;
- info->zs_channel->curregs[5] &= ~TxNBITS_MASK;
- switch (cflag & CSIZE) {
- case CS5:
- bits = 7;
- info->zs_channel->curregs[3] |= Rx5;
- info->zs_channel->curregs[5] |= Tx5;
- break;
- case CS6:
- bits = 8;
- info->zs_channel->curregs[3] |= Rx6;
- info->zs_channel->curregs[5] |= Tx6;
- break;
- case CS7:
- bits = 9;
- info->zs_channel->curregs[3] |= Rx7;
- info->zs_channel->curregs[5] |= Tx7;
- break;
- case CS8:
- default: /* defaults to 8 bits */
- bits = 10;
- info->zs_channel->curregs[3] |= Rx8;
- info->zs_channel->curregs[5] |= Tx8;
- break;
- }
-
- info->timeout = ((info->xmit_fifo_size*HZ*bits) / info->zs_baud);
- info->timeout += HZ/50; /* Add .02 seconds of slop */
-
- info->zs_channel->curregs[4] &= ~(SB_MASK | PAR_ENA | PAR_EVEN);
- if (cflag & CSTOPB) {
- info->zs_channel->curregs[4] |= SB2;
- } else {
- info->zs_channel->curregs[4] |= SB1;
- }
- if (cflag & PARENB) {
- info->zs_channel->curregs[4] |= PAR_ENA;
- }
- if (!(cflag & PARODD)) {
- info->zs_channel->curregs[4] |= PAR_EVEN;
- }
-
- if (!(cflag & CLOCAL)) {
- if (!(info->zs_channel->curregs[15] & DCDIE))
- info->read_reg_zero = read_zsreg(info->zs_channel, 0);
- info->zs_channel->curregs[15] |= DCDIE;
- } else
- info->zs_channel->curregs[15] &= ~DCDIE;
- if (cflag & CRTSCTS) {
- info->zs_channel->curregs[15] |= CTSIE;
- if ((read_zsreg(info->zs_channel, 0) & CTS) == 0)
- info->tx_stopped = 1;
- } else {
- info->zs_channel->curregs[15] &= ~CTSIE;
- info->tx_stopped = 0;
- }
-
- /* Load up the new values */
- load_zsregs(info->zs_channel, info->zs_channel->curregs);
-
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static void rs_flush_chars(struct tty_struct *tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
- return;
-
- if (info->xmit_cnt <= 0 || tty->stopped || info->tx_stopped ||
- !info->xmit_buf)
- return;
-
- /* Enable transmitter */
- spin_lock_irqsave(&zs_lock, flags);
- transmit_chars(info);
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static int rs_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
-{
- int c, total = 0;
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_write"))
- return 0;
-
- if (!tty || !info->xmit_buf)
- return 0;
-
- while (1) {
- spin_lock_irqsave(&zs_lock, flags);
- c = min(count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
- if (c <= 0)
- break;
-
- memcpy(info->xmit_buf + info->xmit_head, buf, c);
- info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt += c;
- spin_unlock_irqrestore(&zs_lock, flags);
- buf += c;
- count -= c;
- total += c;
- }
-
- if (info->xmit_cnt && !tty->stopped && !info->tx_stopped
- && !info->tx_active)
- transmit_chars(info);
- spin_unlock_irqrestore(&zs_lock, flags);
- return total;
-}
-
-static int rs_write_room(struct tty_struct *tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- int ret;
-
- if (serial_paranoia_check(info, tty->name, "rs_write_room"))
- return 0;
- ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
- return 0;
- return info->xmit_cnt;
-}
-
-static void rs_flush_buffer(struct tty_struct *tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
- return;
- spin_lock_irq(&zs_lock);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- spin_unlock_irq(&zs_lock);
- tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void rs_throttle(struct tty_struct * tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
-#ifdef SERIAL_DEBUG_THROTTLE
- char buf[64];
-
- printk("throttle %s: %d....\n", _tty_name(tty, buf),
- tty->ldisc.chars_in_buffer(tty));
-#endif
-
- if (serial_paranoia_check(info, tty->name, "rs_throttle"))
- return;
-
- if (I_IXOFF(tty)) {
- spin_lock_irqsave(&zs_lock, flags);
- info->x_char = STOP_CHAR(tty);
- if (!info->tx_active)
- transmit_chars(info);
- spin_unlock_irqrestore(&zs_lock, flags);
- }
-
- if (C_CRTSCTS(tty)) {
- zs_rtsdtr(info, RTS, 0);
- }
-}
-
-static void rs_unthrottle(struct tty_struct * tty)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
-#ifdef SERIAL_DEBUG_THROTTLE
- char buf[64];
-
- printk("unthrottle %s: %d....\n", _tty_name(tty, buf),
- tty->ldisc.chars_in_buffer(tty));
-#endif
-
- if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
- return;
-
- if (I_IXOFF(tty)) {
- spin_lock_irqsave(&zs_lock, flags);
- if (info->x_char)
- info->x_char = 0;
- else {
- info->x_char = START_CHAR(tty);
- if (!info->tx_active)
- transmit_chars(info);
- }
- spin_unlock_irqrestore(&zs_lock, flags);
- }
-
- if (C_CRTSCTS(tty)) {
- zs_rtsdtr(info, RTS, 1);
- }
-}
-
-/*
- * ------------------------------------------------------------
- * rs_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-static int get_serial_info(struct dec_serial * info,
- struct serial_struct * retinfo)
-{
- struct serial_struct tmp;
-
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->line;
- tmp.port = info->port;
- tmp.irq = info->irq;
- tmp.flags = info->flags;
- tmp.baud_base = info->baud_base;
- tmp.close_delay = info->close_delay;
- tmp.closing_wait = info->closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- return copy_to_user(retinfo,&tmp,sizeof(*retinfo)) ? -EFAULT : 0;
-}
-
-static int set_serial_info(struct dec_serial * info,
- struct serial_struct * new_info)
-{
- struct serial_struct new_serial;
- struct dec_serial old_info;
- int retval = 0;
-
- if (!new_info)
- return -EFAULT;
- copy_from_user(&new_serial,new_info,sizeof(new_serial));
- old_info = *info;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.type != info->type) ||
- (new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ~ZILOG_USR_MASK) !=
- (info->flags & ~ZILOG_USR_MASK)))
- return -EPERM;
- info->flags = ((info->flags & ~ZILOG_USR_MASK) |
- (new_serial.flags & ZILOG_USR_MASK));
- info->custom_divisor = new_serial.custom_divisor;
- goto check_and_exit;
- }
-
- if (info->count > 1)
- return -EBUSY;
-
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
-
- info->baud_base = new_serial.baud_base;
- info->flags = ((info->flags & ~ZILOG_FLAGS) |
- (new_serial.flags & ZILOG_FLAGS));
- info->type = new_serial.type;
- info->close_delay = new_serial.close_delay;
- info->closing_wait = new_serial.closing_wait;
-
-check_and_exit:
- retval = zs_startup(info);
- return retval;
-}
-
-/*
- * get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int get_lsr_info(struct dec_serial * info, unsigned int *value)
-{
- unsigned char status;
-
- spin_lock(&zs_lock);
- status = read_zsreg(info->zs_channel, 0);
- spin_unlock_irq(&zs_lock);
- put_user(status,value);
- return 0;
-}
-
-static int rs_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct dec_serial * info = (struct dec_serial *)tty->driver_data;
- unsigned char control, status_a, status_b;
- unsigned int result;
-
- if (info->hook)
- return -ENODEV;
-
- if (serial_paranoia_check(info, tty->name, __FUNCTION__))
- return -ENODEV;
-
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- if (info->zs_channel == info->zs_chan_a)
- result = 0;
- else {
- spin_lock(&zs_lock);
- control = info->zs_chan_a->curregs[5];
- status_a = read_zsreg(info->zs_chan_a, 0);
- status_b = read_zsreg(info->zs_channel, 0);
- spin_unlock_irq(&zs_lock);
- result = ((control & RTS) ? TIOCM_RTS: 0)
- | ((control & DTR) ? TIOCM_DTR: 0)
- | ((status_b & DCD) ? TIOCM_CAR: 0)
- | ((status_a & DCD) ? TIOCM_RNG: 0)
- | ((status_a & SYNC_HUNT) ? TIOCM_DSR: 0)
- | ((status_b & CTS) ? TIOCM_CTS: 0);
- }
- return result;
-}
-
-static int rs_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-
- if (info->hook)
- return -ENODEV;
-
- if (serial_paranoia_check(info, tty->name, __FUNCTION__))
- return -ENODEV;
-
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- if (info->zs_channel == info->zs_chan_a)
- return 0;
-
- spin_lock(&zs_lock);
- if (set & TIOCM_RTS)
- info->zs_chan_a->curregs[5] |= RTS;
- if (set & TIOCM_DTR)
- info->zs_chan_a->curregs[5] |= DTR;
- if (clear & TIOCM_RTS)
- info->zs_chan_a->curregs[5] &= ~RTS;
- if (clear & TIOCM_DTR)
- info->zs_chan_a->curregs[5] &= ~DTR;
- write_zsreg(info->zs_chan_a, 5, info->zs_chan_a->curregs[5]);
- spin_unlock_irq(&zs_lock);
- return 0;
-}
-
-/*
- * rs_break - turn transmit break condition on/off
- */
-static void rs_break(struct tty_struct *tty, int break_state)
-{
- struct dec_serial *info = (struct dec_serial *) tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_break"))
- return;
- if (!info->port)
- return;
-
- spin_lock_irqsave(&zs_lock, flags);
- if (break_state == -1)
- info->zs_channel->curregs[5] |= SND_BRK;
- else
- info->zs_channel->curregs[5] &= ~SND_BRK;
- write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]);
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static int rs_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-
- if (info->hook)
- return -ENODEV;
-
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
-
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
- (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- }
-
- switch (cmd) {
- case TIOCGSERIAL:
- if (!access_ok(VERIFY_WRITE, (void *)arg,
- sizeof(struct serial_struct)))
- return -EFAULT;
- return get_serial_info(info, (struct serial_struct *)arg);
-
- case TIOCSSERIAL:
- return set_serial_info(info, (struct serial_struct *)arg);
-
- case TIOCSERGETLSR: /* Get line status register */
- if (!access_ok(VERIFY_WRITE, (void *)arg,
- sizeof(unsigned int)))
- return -EFAULT;
- return get_lsr_info(info, (unsigned int *)arg);
-
- case TIOCSERGSTRUCT:
- if (!access_ok(VERIFY_WRITE, (void *)arg,
- sizeof(struct dec_serial)))
- return -EFAULT;
- copy_from_user((struct dec_serial *)arg, info,
- sizeof(struct dec_serial));
- return 0;
-
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct dec_serial *info = (struct dec_serial *)tty->driver_data;
- int was_stopped;
-
- if (tty->termios->c_cflag == old_termios->c_cflag)
- return;
- was_stopped = info->tx_stopped;
-
- change_speed(info);
-
- if (was_stopped && !info->tx_stopped)
- rs_start(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_close()
- *
- * This routine is called when the serial port gets closed.
- * Wait for the last remaining data to be sent.
- * ------------------------------------------------------------
- */
-static void rs_close(struct tty_struct *tty, struct file * filp)
-{
- struct dec_serial * info = (struct dec_serial *)tty->driver_data;
- unsigned long flags;
-
- if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
- return;
-
- spin_lock_irqsave(&zs_lock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&zs_lock, flags);
- return;
- }
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("rs_close ttyS%d, count = %d\n", info->line, info->count);
-#endif
- if ((tty->count == 1) && (info->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk("rs_close: bad serial port count; tty->count is 1, "
- "info->count is %d\n", info->count);
- info->count = 1;
- }
- if (--info->count < 0) {
- printk("rs_close: bad serial port count for ttyS%d: %d\n",
- info->line, info->count);
- info->count = 0;
- }
- if (info->count) {
- spin_unlock_irqrestore(&zs_lock, flags);
- return;
- }
- info->flags |= ZILOG_CLOSING;
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- if (info->closing_wait != ZILOG_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, info->closing_wait);
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receiver and receive interrupts.
- */
- info->zs_channel->curregs[3] &= ~RxENABLE;
- write_zsreg(info->zs_channel, 3, info->zs_channel->curregs[3]);
- info->zs_channel->curregs[1] = 0; /* disable any rx ints */
- write_zsreg(info->zs_channel, 1, info->zs_channel->curregs[1]);
- ZS_CLEARFIFO(info->zs_channel);
- if (info->flags & ZILOG_INITIALIZED) {
- /*
- * Before we drop DTR, make sure the SCC transmitter
- * has completely drained.
- */
- rs_wait_until_sent(tty, info->timeout);
- }
-
- shutdown(info);
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
- tty_ldisc_flush(tty);
- tty->closing = 0;
- info->event = 0;
- info->tty = 0;
- if (info->blocked_open) {
- if (info->close_delay) {
- msleep_interruptible(jiffies_to_msecs(info->close_delay));
- }
- wake_up_interruptible(&info->open_wait);
- }
- info->flags &= ~(ZILOG_NORMAL_ACTIVE|ZILOG_CLOSING);
- wake_up_interruptible(&info->close_wait);
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-/*
- * rs_wait_until_sent() --- wait until the transmitter is empty
- */
-static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct dec_serial *info = (struct dec_serial *) tty->driver_data;
- unsigned long orig_jiffies;
- int char_time;
-
- if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
- return;
-
- orig_jiffies = jiffies;
- /*
- * Set the check interval to be 1/5 of the estimated time to
- * send a single character, and make it at least 1. The check
- * interval should also be less than the timeout.
- */
- char_time = (info->timeout - HZ/50) / info->xmit_fifo_size;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
- if (timeout)
- char_time = min(char_time, timeout);
- while ((read_zsreg(info->zs_channel, 1) & Tx_BUF_EMP) == 0) {
- msleep_interruptible(jiffies_to_msecs(char_time));
- if (signal_pending(current))
- break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
- break;
- }
- current->state = TASK_RUNNING;
-}
-
-/*
- * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-static void rs_hangup(struct tty_struct *tty)
-{
- struct dec_serial * info = (struct dec_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_hangup"))
- return;
-
- rs_flush_buffer(tty);
- shutdown(info);
- info->event = 0;
- info->count = 0;
- info->flags &= ~ZILOG_NORMAL_ACTIVE;
- info->tty = 0;
- wake_up_interruptible(&info->open_wait);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_open() and friends
- * ------------------------------------------------------------
- */
-static int block_til_ready(struct tty_struct *tty, struct file * filp,
- struct dec_serial *info)
-{
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
-
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (info->flags & ZILOG_CLOSING) {
- interruptible_sleep_on(&info->close_wait);
-#ifdef SERIAL_DO_RESTART
- return ((info->flags & ZILOG_HUP_NOTIFY) ?
- -EAGAIN : -ERESTARTSYS);
-#else
- return -EAGAIN;
-#endif
- }
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- info->flags |= ZILOG_NORMAL_ACTIVE;
- return 0;
- }
-
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->count is dropped by one, so that
- * rs_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&info->open_wait, &wait);
-#ifdef SERIAL_DEBUG_OPEN
- printk("block_til_ready before block: ttyS%d, count = %d\n",
- info->line, info->count);
-#endif
- spin_lock(&zs_lock);
- if (!tty_hung_up_p(filp))
- info->count--;
- spin_unlock_irq(&zs_lock);
- info->blocked_open++;
- while (1) {
- spin_lock(&zs_lock);
- if (tty->termios->c_cflag & CBAUD)
- zs_rtsdtr(info, RTS | DTR, 1);
- spin_unlock_irq(&zs_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(info->flags & ZILOG_INITIALIZED)) {
-#ifdef SERIAL_DO_RESTART
- if (info->flags & ZILOG_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
-#else
- retval = -EAGAIN;
-#endif
- break;
- }
- if (!(info->flags & ZILOG_CLOSING) &&
- (do_clocal || (read_zsreg(info->zs_channel, 0) & DCD)))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk("block_til_ready blocking: ttyS%d, count = %d\n",
- info->line, info->count);
-#endif
- schedule();
- }
- current->state = TASK_RUNNING;
- remove_wait_queue(&info->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- info->count++;
- info->blocked_open--;
-#ifdef SERIAL_DEBUG_OPEN
- printk("block_til_ready after blocking: ttyS%d, count = %d\n",
- info->line, info->count);
-#endif
- if (retval)
- return retval;
- info->flags |= ZILOG_NORMAL_ACTIVE;
- return 0;
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its ZILOG structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int rs_open(struct tty_struct *tty, struct file * filp)
-{
- struct dec_serial *info;
- int retval, line;
-
- line = tty->index;
- if ((line < 0) || (line >= zs_channels_found))
- return -ENODEV;
- info = zs_soft + line;
-
- if (info->hook)
- return -ENODEV;
-
- if (serial_paranoia_check(info, tty->name, "rs_open"))
- return -ENODEV;
-#ifdef SERIAL_DEBUG_OPEN
- printk("rs_open %s, count = %d\n", tty->name, info->count);
-#endif
-
- info->count++;
- tty->driver_data = info;
- info->tty = tty;
-
- /*
- * If the port is the middle of closing, bail out now
- */
- if (tty_hung_up_p(filp) ||
- (info->flags & ZILOG_CLOSING)) {
- if (info->flags & ZILOG_CLOSING)
- interruptible_sleep_on(&info->close_wait);
-#ifdef SERIAL_DO_RESTART
- return ((info->flags & ZILOG_HUP_NOTIFY) ?
- -EAGAIN : -ERESTARTSYS);
-#else
- return -EAGAIN;
-#endif
- }
-
- /*
- * Start up serial port
- */
- retval = zs_startup(info);
- if (retval)
- return retval;
-
- retval = block_til_ready(tty, filp, info);
- if (retval) {
-#ifdef SERIAL_DEBUG_OPEN
- printk("rs_open returning after block_til_ready with %d\n",
- retval);
-#endif
- return retval;
- }
-
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
- if (sercons.cflag && sercons.index == line) {
- tty->termios->c_cflag = sercons.cflag;
- sercons.cflag = 0;
- change_speed(info);
- }
-#endif
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("rs_open %s successful...", tty->name);
-#endif
-/* tty->low_latency = 1; */
- return 0;
-}
-
-/* Finally, routines used to initialize the serial driver. */
-
-static void __init show_serial_version(void)
-{
- printk("DECstation Z8530 serial driver version 0.09\n");
-}
-
-/* Initialize Z8530s zs_channels
- */
-
-static void probe_sccs(void)
-{
- struct dec_serial **pp;
- int i, n, n_chips = 0, n_channels, chip, channel;
- unsigned long flags;
-
- /*
- * did we get here by accident?
- */
- if(!BUS_PRESENT) {
- printk("Not on JUNKIO machine, skipping probe_sccs\n");
- return;
- }
-
- switch(mips_machtype) {
-#ifdef CONFIG_MACH_DECSTATION
- case MACH_DS5000_2X0:
- case MACH_DS5900:
- n_chips = 2;
- zs_parms = &ds_parms;
- zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0];
- zs_parms->irq1 = dec_interrupt[DEC_IRQ_SCC1];
- break;
- case MACH_DS5000_1XX:
- n_chips = 2;
- zs_parms = &ds_parms;
- zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0];
- zs_parms->irq1 = dec_interrupt[DEC_IRQ_SCC1];
- break;
- case MACH_DS5000_XX:
- n_chips = 1;
- zs_parms = &ds_parms;
- zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0];
- break;
-#endif
- default:
- panic("zs: unsupported bus");
- }
- if (!zs_parms)
- panic("zs: uninitialized parms");
-
- pp = &zs_chain;
-
- n_channels = 0;
-
- for (chip = 0; chip < n_chips; chip++) {
- for (channel = 0; channel <= 1; channel++) {
- /*
- * The sccs reside on the high byte of the 16 bit IOBUS
- */
- zs_channels[n_channels].control =
- (volatile void *)CKSEG1ADDR(dec_kn_slot_base +
- (0 == chip ? zs_parms->scc0 : zs_parms->scc1) +
- (0 == channel ? zs_parms->channel_a_offset :
- zs_parms->channel_b_offset));
- zs_channels[n_channels].data =
- zs_channels[n_channels].control + 4;
-
-#ifndef CONFIG_SERIAL_DEC_CONSOLE
- /*
- * We're called early and memory managment isn't up, yet.
- * Thus request_region would fail.
- */
- if (!request_region((unsigned long)
- zs_channels[n_channels].control,
- ZS_CHAN_IO_SIZE, "SCC"))
- panic("SCC I/O region is not free");
-#endif
- zs_soft[n_channels].zs_channel = &zs_channels[n_channels];
- /* HACK alert! */
- if (!(chip & 1))
- zs_soft[n_channels].irq = zs_parms->irq0;
- else
- zs_soft[n_channels].irq = zs_parms->irq1;
-
- /*
- * Identification of channel A. Location of channel A
- * inside chip depends on mapping of internal address
- * the chip decodes channels by.
- * CHANNEL_A_NR returns either 0 (in case of
- * DECstations) or 1 (in case of Baget).
- */
- if (CHANNEL_A_NR == channel)
- zs_soft[n_channels].zs_chan_a =
- &zs_channels[n_channels+1-2*CHANNEL_A_NR];
- else
- zs_soft[n_channels].zs_chan_a =
- &zs_channels[n_channels];
-
- *pp = &zs_soft[n_channels];
- pp = &zs_soft[n_channels].zs_next;
- n_channels++;
- }
- }
-
- *pp = 0;
- zs_channels_found = n_channels;
-
- for (n = 0; n < zs_channels_found; n++) {
- for (i = 0; i < 16; i++) {
- zs_soft[n].zs_channel->curregs[i] = zs_init_regs[i];
- }
- }
-
- spin_lock_irqsave(&zs_lock, flags);
- for (n = 0; n < zs_channels_found; n++) {
- if (n % 2 == 0) {
- write_zsreg(zs_soft[n].zs_chan_a, R9, FHWRES);
- udelay(10);
- write_zsreg(zs_soft[n].zs_chan_a, R9, 0);
- }
- load_zsregs(zs_soft[n].zs_channel,
- zs_soft[n].zs_channel->curregs);
- }
- spin_unlock_irqrestore(&zs_lock, flags);
-}
-
-static const struct tty_operations serial_ops = {
- .open = rs_open,
- .close = rs_close,
- .write = rs_write,
- .flush_chars = rs_flush_chars,
- .write_room = rs_write_room,
- .chars_in_buffer = rs_chars_in_buffer,
- .flush_buffer = rs_flush_buffer,
- .ioctl = rs_ioctl,
- .throttle = rs_throttle,
- .unthrottle = rs_unthrottle,
- .set_termios = rs_set_termios,
- .stop = rs_stop,
- .start = rs_start,
- .hangup = rs_hangup,
- .break_ctl = rs_break,
- .wait_until_sent = rs_wait_until_sent,
- .tiocmget = rs_tiocmget,
- .tiocmset = rs_tiocmset,
-};
-
-/* zs_init inits the driver */
-int __init zs_init(void)
-{
- int channel, i;
- struct dec_serial *info;
-
- if(!BUS_PRESENT)
- return -ENODEV;
-
- /* Find out how many Z8530 SCCs we have */
- if (zs_chain == 0)
- probe_sccs();
- serial_driver = alloc_tty_driver(zs_channels_found);
- if (!serial_driver)
- return -ENOMEM;
-
- show_serial_version();
-
- /* Initialize the tty_driver structure */
- /* Not all of this is exactly right for us. */
-
- serial_driver->owner = THIS_MODULE;
- serial_driver->name = "ttyS";
- serial_driver->major = TTY_MAJOR;
- serial_driver->minor_start = 64;
- serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- serial_driver->subtype = SERIAL_TYPE_NORMAL;
- serial_driver->init_termios = tty_std_termios;
- serial_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(serial_driver, &serial_ops);
-
- if (tty_register_driver(serial_driver))
- panic("Couldn't register serial driver");
-
- for (info = zs_chain, i = 0; info; info = info->zs_next, i++) {
-
- /* Needed before interrupts are enabled. */
- info->tty = 0;
- info->x_char = 0;
-
- if (info->hook && info->hook->init_info) {
- (*info->hook->init_info)(info);
- continue;
- }
-
- info->magic = SERIAL_MAGIC;
- info->port = (int) info->zs_channel->control;
- info->line = i;
- info->custom_divisor = 16;
- info->close_delay = 50;
- info->closing_wait = 3000;
- info->event = 0;
- info->count = 0;
- info->blocked_open = 0;
- tasklet_init(&info->tlet, do_softint, (unsigned long)info);
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->close_wait);
- printk("ttyS%02d at 0x%08x (irq = %d) is a Z85C30 SCC\n",
- info->line, info->port, info->irq);
- tty_register_device(serial_driver, info->line, NULL);
-
- }
-
- for (channel = 0; channel < zs_channels_found; ++channel) {
- zs_soft[channel].clk_divisor = 16;
- zs_soft[channel].zs_baud = get_zsbaud(&zs_soft[channel]);
-
- if (request_irq(zs_soft[channel].irq, rs_interrupt, IRQF_SHARED,
- "scc", &zs_soft[channel]))
- printk(KERN_ERR "decserial: can't get irq %d\n",
- zs_soft[channel].irq);
-
- if (zs_soft[channel].hook) {
- zs_startup(&zs_soft[channel]);
- if (zs_soft[channel].hook->init_channel)
- (*zs_soft[channel].hook->init_channel)
- (&zs_soft[channel]);
- }
- }
-
- return 0;
-}
-
-/*
- * polling I/O routines
- */
-static int zs_poll_tx_char(void *handle, unsigned char ch)
-{
- struct dec_serial *info = handle;
- struct dec_zschannel *chan = info->zs_channel;
- int ret;
-
- if(chan) {
- int loops = 10000;
-
- while (loops && !(read_zsreg(chan, 0) & Tx_BUF_EMP))
- loops--;
-
- if (loops) {
- write_zsdata(chan, ch);
- ret = 0;
- } else
- ret = -EAGAIN;
-
- return ret;
- } else
- return -ENODEV;
-}
-
-static int zs_poll_rx_char(void *handle)
-{
- struct dec_serial *info = handle;
- struct dec_zschannel *chan = info->zs_channel;
- int ret;
-
- if(chan) {
- int loops = 10000;
-
- while (loops && !(read_zsreg(chan, 0) & Rx_CH_AV))
- loops--;
-
- if (loops)
- ret = read_zsdata(chan);
- else
- ret = -EAGAIN;
-
- return ret;
- } else
- return -ENODEV;
-}
-
-int register_zs_hook(unsigned int channel, struct dec_serial_hook *hook)
-{
- struct dec_serial *info = &zs_soft[channel];
-
- if (info->hook) {
- printk("%s: line %d has already a hook registered\n",
- __FUNCTION__, channel);
-
- return 0;
- } else {
- hook->poll_rx_char = zs_poll_rx_char;
- hook->poll_tx_char = zs_poll_tx_char;
- info->hook = hook;
-
- return 1;
- }
-}
-
-int unregister_zs_hook(unsigned int channel)
-{
- struct dec_serial *info = &zs_soft[channel];
-
- if (info->hook) {
- info->hook = NULL;
- return 1;
- } else {
- printk("%s: trying to unregister hook on line %d,"
- " but none is registered\n", __FUNCTION__, channel);
- return 0;
- }
-}
-
-/*
- * ------------------------------------------------------------
- * Serial console driver
- * ------------------------------------------------------------
- */
-#ifdef CONFIG_SERIAL_DEC_CONSOLE
-
-
-/*
- * Print a string to the serial port trying not to disturb
- * any possible real use of the port...
- */
-static void serial_console_write(struct console *co, const char *s,
- unsigned count)
-{
- struct dec_serial *info;
- int i;
-
- info = zs_soft + co->index;
-
- for (i = 0; i < count; i++, s++) {
- if(*s == '\n')
- zs_poll_tx_char(info, '\r');
- zs_poll_tx_char(info, *s);
- }
-}
-
-static struct tty_driver *serial_console_device(struct console *c, int *index)
-{
- *index = c->index;
- return serial_driver;
-}
-
-/*
- * Setup initial baud/bits/parity. We do two things here:
- * - construct a cflag setting for the first rs_open()
- * - initialize the serial port
- * Return non-zero if we didn't find a serial port.
- */
-static int serial_console_setup(struct console *co, char *options)
-{
- struct dec_serial *info;
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int cflag = CREAD | HUPCL | CLOCAL;
- int clk_divisor = 16;
- int brg;
- char *s;
- unsigned long flags;
-
- if(!BUS_PRESENT)
- return -ENODEV;
-
- info = zs_soft + co->index;
-
- if (zs_chain == 0)
- probe_sccs();
-
- info->is_cons = 1;
-
- if (options) {
- baud = simple_strtoul(options, NULL, 10);
- s = options;
- while(*s >= '0' && *s <= '9')
- s++;
- if (*s)
- parity = *s++;
- if (*s)
- bits = *s - '0';
- }
-
- /*
- * Now construct a cflag setting.
- */
- switch(baud) {
- case 1200:
- cflag |= B1200;
- break;
- case 2400:
- cflag |= B2400;
- break;
- case 4800:
- cflag |= B4800;
- break;
- case 19200:
- cflag |= B19200;
- break;
- case 38400:
- cflag |= B38400;
- break;
- case 57600:
- cflag |= B57600;
- break;
- case 115200:
- cflag |= B115200;
- break;
- case 9600:
- default:
- cflag |= B9600;
- /*
- * Set this to a sane value to prevent a divide error.
- */
- baud = 9600;
- break;
- }
- switch(bits) {
- case 7:
- cflag |= CS7;
- break;
- default:
- case 8:
- cflag |= CS8;
- break;
- }
- switch(parity) {
- case 'o': case 'O':
- cflag |= PARODD;
- break;
- case 'e': case 'E':
- cflag |= PARENB;
- break;
- }
- co->cflag = cflag;
-
- spin_lock_irqsave(&zs_lock, flags);
-
- /*
- * Set up the baud rate generator.
- */
- brg = BPS_TO_BRG(baud, zs_parms->clock / clk_divisor);
- info->zs_channel->curregs[R12] = (brg & 255);
- info->zs_channel->curregs[R13] = ((brg >> 8) & 255);
-
- /*
- * Set byte size and parity.
- */
- if (bits == 7) {
- info->zs_channel->curregs[R3] |= Rx7;
- info->zs_channel->curregs[R5] |= Tx7;
- } else {
- info->zs_channel->curregs[R3] |= Rx8;
- info->zs_channel->curregs[R5] |= Tx8;
- }
- if (cflag & PARENB) {
- info->zs_channel->curregs[R4] |= PAR_ENA;
- }
- if (!(cflag & PARODD)) {
- info->zs_channel->curregs[R4] |= PAR_EVEN;
- }
- info->zs_channel->curregs[R4] |= SB1;
-
- /*
- * Turn on RTS and DTR.
- */
- zs_rtsdtr(info, RTS | DTR, 1);
-
- /*
- * Finally, enable sequencing.
- */
- info->zs_channel->curregs[R3] |= RxENABLE;
- info->zs_channel->curregs[R5] |= TxENAB;
-
- /*
- * Clear the interrupt registers.
- */
- write_zsreg(info->zs_channel, R0, ERR_RES);
- write_zsreg(info->zs_channel, R0, RES_H_IUS);
-
- /*
- * Load up the new values.
- */
- load_zsregs(info->zs_channel, info->zs_channel->curregs);
-
- /* Save the current value of RR0 */
- info->read_reg_zero = read_zsreg(info->zs_channel, R0);
-
- zs_soft[co->index].clk_divisor = clk_divisor;
- zs_soft[co->index].zs_baud = get_zsbaud(&zs_soft[co->index]);
-
- spin_unlock_irqrestore(&zs_lock, flags);
-
- return 0;
-}
-
-static struct console sercons = {
- .name = "ttyS",
- .write = serial_console_write,
- .device = serial_console_device,
- .setup = serial_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-/*
- * Register console.
- */
-void __init zs_serial_console_init(void)
-{
- register_console(&sercons);
-}
-#endif /* ifdef CONFIG_SERIAL_DEC_CONSOLE */
-
-#ifdef CONFIG_KGDB
-struct dec_zschannel *zs_kgdbchan;
-static unsigned char scc_inittab[] = {
- 9, 0x80, /* reset A side (CHRA) */
- 13, 0, /* set baud rate divisor */
- 12, 1,
- 14, 1, /* baud rate gen enable, src=rtxc (BRENABL) */
- 11, 0x50, /* clocks = br gen (RCBR | TCBR) */
- 5, 0x6a, /* tx 8 bits, assert RTS (Tx8 | TxENAB | RTS) */
- 4, 0x44, /* x16 clock, 1 stop (SB1 | X16CLK)*/
- 3, 0xc1, /* rx enable, 8 bits (RxENABLE | Rx8)*/
-};
-
-/* These are for receiving and sending characters under the kgdb
- * source level kernel debugger.
- */
-void putDebugChar(char kgdb_char)
-{
- struct dec_zschannel *chan = zs_kgdbchan;
- while ((read_zsreg(chan, 0) & Tx_BUF_EMP) == 0)
- RECOVERY_DELAY;
- write_zsdata(chan, kgdb_char);
-}
-char getDebugChar(void)
-{
- struct dec_zschannel *chan = zs_kgdbchan;
- while((read_zsreg(chan, 0) & Rx_CH_AV) == 0)
- eieio(); /*barrier();*/
- return read_zsdata(chan);
-}
-void kgdb_interruptible(int yes)
-{
- struct dec_zschannel *chan = zs_kgdbchan;
- int one, nine;
- nine = read_zsreg(chan, 9);
- if (yes == 1) {
- one = EXT_INT_ENAB|RxINT_ALL;
- nine |= MIE;
- printk("turning serial ints on\n");
- } else {
- one = RxINT_DISAB;
- nine &= ~MIE;
- printk("turning serial ints off\n");
- }
- write_zsreg(chan, 1, one);
- write_zsreg(chan, 9, nine);
-}
-
-static int kgdbhook_init_channel(void *handle)
-{
- return 0;
-}
-
-static void kgdbhook_init_info(void *handle)
-{
-}
-
-static void kgdbhook_rx_char(void *handle, unsigned char ch, unsigned char fl)
-{
- struct dec_serial *info = handle;
-
- if (fl != TTY_NORMAL)
- return;
- if (ch == 0x03 || ch == '$')
- breakpoint();
-}
-
-/* This sets up the serial port we're using, and turns on
- * interrupts for that channel, so kgdb is usable once we're done.
- */
-static inline void kgdb_chaninit(struct dec_zschannel *ms, int intson, int bps)
-{
- int brg;
- int i, x;
- volatile char *sccc = ms->control;
- brg = BPS_TO_BRG(bps, zs_parms->clock/16);
- printk("setting bps on kgdb line to %d [brg=%x]\n", bps, brg);
- for (i = 20000; i != 0; --i) {
- x = *sccc; eieio();
- }
- for (i = 0; i < sizeof(scc_inittab); ++i) {
- write_zsreg(ms, scc_inittab[i], scc_inittab[i+1]);
- i++;
- }
-}
-/* This is called at boot time to prime the kgdb serial debugging
- * serial line. The 'tty_num' argument is 0 for /dev/ttya and 1
- * for /dev/ttyb which is determined in setup_arch() from the
- * boot command line flags.
- */
-struct dec_serial_hook zs_kgdbhook = {
- .init_channel = kgdbhook_init_channel,
- .init_info = kgdbhook_init_info,
- .rx_char = kgdbhook_rx_char,
- .cflags = B38400 | CS8 | CLOCAL,
-};
-
-void __init zs_kgdb_hook(int tty_num)
-{
- /* Find out how many Z8530 SCCs we have */
- if (zs_chain == 0)
- probe_sccs();
- zs_soft[tty_num].zs_channel = &zs_channels[tty_num];
- zs_kgdbchan = zs_soft[tty_num].zs_channel;
- zs_soft[tty_num].change_needed = 0;
- zs_soft[tty_num].clk_divisor = 16;
- zs_soft[tty_num].zs_baud = 38400;
- zs_soft[tty_num].hook = &zs_kgdbhook; /* This runs kgdb */
- /* Turn on transmitter/receiver at 8-bits/char */
- kgdb_chaninit(zs_soft[tty_num].zs_channel, 1, 38400);
- printk("KGDB: on channel %d initialized\n", tty_num);
- set_debug_traps(); /* init stub */
-}
-#endif /* ifdef CONFIG_KGDB */
diff --git a/drivers/tc/zs.h b/drivers/tc/zs.h
deleted file mode 100644
index 13512200ceba..000000000000
--- a/drivers/tc/zs.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * drivers/tc/zs.h: Definitions for the DECstation Z85C30 serial driver.
- *
- * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras.
- * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen.
- *
- * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au)
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 2004, 2005 Maciej W. Rozycki
- */
-#ifndef _DECSERIAL_H
-#define _DECSERIAL_H
-
-#include <asm/dec/serial.h>
-
-#define NUM_ZSREGS 16
-
-struct serial_struct {
- int type;
- int line;
- int port;
- int irq;
- int flags;
- int xmit_fifo_size;
- int custom_divisor;
- int baud_base;
- unsigned short close_delay;
- char reserved_char[2];
- int hub6;
- unsigned short closing_wait; /* time to wait before closing */
- unsigned short closing_wait2; /* no longer used... */
- int reserved[4];
-};
-
-/*
- * For the close wait times, 0 means wait forever for serial port to
- * flush its output. 65535 means don't wait at all.
- */
-#define ZILOG_CLOSING_WAIT_INF 0
-#define ZILOG_CLOSING_WAIT_NONE 65535
-
-/*
- * Definitions for ZILOG_struct (and serial_struct) flags field
- */
-#define ZILOG_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes
- on the callout port */
-#define ZILOG_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */
-#define ZILOG_SAK 0x0004 /* Secure Attention Key (Orange book) */
-#define ZILOG_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */
-
-#define ZILOG_SPD_MASK 0x0030
-#define ZILOG_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */
-
-#define ZILOG_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */
-#define ZILOG_SPD_CUST 0x0030 /* Use user-specified divisor */
-
-#define ZILOG_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */
-#define ZILOG_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */
-#define ZILOG_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */
-#define ZILOG_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */
-#define ZILOG_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */
-
-#define ZILOG_FLAGS 0x0FFF /* Possible legal ZILOG flags */
-#define ZILOG_USR_MASK 0x0430 /* Legal flags that non-privileged
- * users can set or reset */
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
-#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
-#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-
-/* Software state per channel */
-
-#ifdef __KERNEL__
-/*
- * This is our internal structure for each serial port's state.
- *
- * Many fields are paralleled by the structure used by the serial_struct
- * structure.
- *
- * For definitions of the flags field, see tty.h
- */
-
-struct dec_zschannel {
- volatile unsigned char *control;
- volatile unsigned char *data;
-
- /* Current write register values */
- unsigned char curregs[NUM_ZSREGS];
-};
-
-struct dec_serial {
- struct dec_serial *zs_next; /* For IRQ servicing chain. */
- struct dec_zschannel *zs_channel; /* Channel registers. */
- struct dec_zschannel *zs_chan_a; /* A side registers. */
- unsigned char read_reg_zero;
-
- struct dec_serial_hook *hook; /* Hook on this channel. */
- int tty_break; /* Set on BREAK condition. */
- int is_cons; /* Is this our console. */
- int tx_active; /* Char is being xmitted. */
- int tx_stopped; /* Output is suspended. */
-
- /*
- * We need to know the current clock divisor
- * to read the bps rate the chip has currently loaded.
- */
- int clk_divisor; /* May be 1, 16, 32, or 64. */
- int zs_baud;
-
- char change_needed;
-
- int magic;
- int baud_base;
- int port;
- int irq;
- int flags; /* Defined in tty.h. */
- int type; /* UART type. */
- struct tty_struct *tty;
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int xmit_fifo_size;
- int custom_divisor;
- int x_char; /* XON/XOFF character. */
- int close_delay;
- unsigned short closing_wait;
- unsigned short closing_wait2;
- unsigned long event;
- unsigned long last_active;
- int line;
- int count; /* # of fds on device. */
- int blocked_open; /* # of blocked opens. */
- unsigned char *xmit_buf;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- struct tasklet_struct tlet;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
-};
-
-
-#define SERIAL_MAGIC 0x5301
-
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE 4096
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define RS_EVENT_WRITE_WAKEUP 0
-
-#endif /* __KERNEL__ */
-
-/* Conversion routines to/from brg time constants from/to bits
- * per second.
- */
-#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
-#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
-
-/* The Zilog register set */
-
-#define FLAG 0x7e
-
-/* Write Register 0 */
-#define R0 0 /* Register selects */
-#define R1 1
-#define R2 2
-#define R3 3
-#define R4 4
-#define R5 5
-#define R6 6
-#define R7 7
-#define R8 8
-#define R9 9
-#define R10 10
-#define R11 11
-#define R12 12
-#define R13 13
-#define R14 14
-#define R15 15
-
-#define NULLCODE 0 /* Null Code */
-#define POINT_HIGH 0x8 /* Select upper half of registers */
-#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
-#define SEND_ABORT 0x18 /* HDLC Abort */
-#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
-#define RES_Tx_P 0x28 /* Reset TxINT Pending */
-#define ERR_RES 0x30 /* Error Reset */
-#define RES_H_IUS 0x38 /* Reset highest IUS */
-
-#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
-#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
-#define RES_EOM_L 0xC0 /* Reset EOM latch */
-
-/* Write Register 1 */
-
-#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
-#define TxINT_ENAB 0x2 /* Tx Int Enable */
-#define PAR_SPEC 0x4 /* Parity is special condition */
-
-#define RxINT_DISAB 0 /* Rx Int Disable */
-#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
-#define RxINT_ALL 0x10 /* Int on all Rx Characters or error */
-#define RxINT_ERR 0x18 /* Int on error only */
-#define RxINT_MASK 0x18
-
-#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
-#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
-#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
-
-/* Write Register #2 (Interrupt Vector) */
-
-/* Write Register 3 */
-
-#define RxENABLE 0x1 /* Rx Enable */
-#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
-#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
-#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
-#define ENT_HM 0x10 /* Enter Hunt Mode */
-#define AUTO_ENAB 0x20 /* Auto Enables */
-#define Rx5 0x0 /* Rx 5 Bits/Character */
-#define Rx7 0x40 /* Rx 7 Bits/Character */
-#define Rx6 0x80 /* Rx 6 Bits/Character */
-#define Rx8 0xc0 /* Rx 8 Bits/Character */
-#define RxNBITS_MASK 0xc0
-
-/* Write Register 4 */
-
-#define PAR_ENA 0x1 /* Parity Enable */
-#define PAR_EVEN 0x2 /* Parity Even/Odd* */
-
-#define SYNC_ENAB 0 /* Sync Modes Enable */
-#define SB1 0x4 /* 1 stop bit/char */
-#define SB15 0x8 /* 1.5 stop bits/char */
-#define SB2 0xc /* 2 stop bits/char */
-#define SB_MASK 0xc
-
-#define MONSYNC 0 /* 8 Bit Sync character */
-#define BISYNC 0x10 /* 16 bit sync character */
-#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
-#define EXTSYNC 0x30 /* External Sync Mode */
-
-#define X1CLK 0x0 /* x1 clock mode */
-#define X16CLK 0x40 /* x16 clock mode */
-#define X32CLK 0x80 /* x32 clock mode */
-#define X64CLK 0xC0 /* x64 clock mode */
-#define XCLK_MASK 0xC0
-
-/* Write Register 5 */
-
-#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
-#define RTS 0x2 /* RTS */
-#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
-#define TxENAB 0x8 /* Tx Enable */
-#define SND_BRK 0x10 /* Send Break */
-#define Tx5 0x0 /* Tx 5 bits (or less)/character */
-#define Tx7 0x20 /* Tx 7 bits/character */
-#define Tx6 0x40 /* Tx 6 bits/character */
-#define Tx8 0x60 /* Tx 8 bits/character */
-#define TxNBITS_MASK 0x60
-#define DTR 0x80 /* DTR */
-
-/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
-
-/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
-
-/* Write Register 8 (transmit buffer) */
-
-/* Write Register 9 (Master interrupt control) */
-#define VIS 1 /* Vector Includes Status */
-#define NV 2 /* No Vector */
-#define DLC 4 /* Disable Lower Chain */
-#define MIE 8 /* Master Interrupt Enable */
-#define STATHI 0x10 /* Status high */
-#define SOFTACK 0x20 /* Software Interrupt Acknowledge */
-#define NORESET 0 /* No reset on write to R9 */
-#define CHRB 0x40 /* Reset channel B */
-#define CHRA 0x80 /* Reset channel A */
-#define FHWRES 0xc0 /* Force hardware reset */
-
-/* Write Register 10 (misc control bits) */
-#define BIT6 1 /* 6 bit/8bit sync */
-#define LOOPMODE 2 /* SDLC Loop mode */
-#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
-#define MARKIDLE 8 /* Mark/flag on idle */
-#define GAOP 0x10 /* Go active on poll */
-#define NRZ 0 /* NRZ mode */
-#define NRZI 0x20 /* NRZI mode */
-#define FM1 0x40 /* FM1 (transition = 1) */
-#define FM0 0x60 /* FM0 (transition = 0) */
-#define CRCPS 0x80 /* CRC Preset I/O */
-
-/* Write Register 11 (Clock Mode control) */
-#define TRxCXT 0 /* TRxC = Xtal output */
-#define TRxCTC 1 /* TRxC = Transmit clock */
-#define TRxCBR 2 /* TRxC = BR Generator Output */
-#define TRxCDP 3 /* TRxC = DPLL output */
-#define TRxCOI 4 /* TRxC O/I */
-#define TCRTxCP 0 /* Transmit clock = RTxC pin */
-#define TCTRxCP 8 /* Transmit clock = TRxC pin */
-#define TCBR 0x10 /* Transmit clock = BR Generator output */
-#define TCDPLL 0x18 /* Transmit clock = DPLL output */
-#define RCRTxCP 0 /* Receive clock = RTxC pin */
-#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
-#define RCBR 0x40 /* Receive clock = BR Generator output */
-#define RCDPLL 0x60 /* Receive clock = DPLL output */
-#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
-
-/* Write Register 12 (lower byte of baud rate generator time constant) */
-
-/* Write Register 13 (upper byte of baud rate generator time constant) */
-
-/* Write Register 14 (Misc control bits) */
-#define BRENABL 1 /* Baud rate generator enable */
-#define BRSRC 2 /* Baud rate generator source */
-#define DTRREQ 4 /* DTR/Request function */
-#define AUTOECHO 8 /* Auto Echo */
-#define LOOPBAK 0x10 /* Local loopback */
-#define SEARCH 0x20 /* Enter search mode */
-#define RMC 0x40 /* Reset missing clock */
-#define DISDPLL 0x60 /* Disable DPLL */
-#define SSBR 0x80 /* Set DPLL source = BR generator */
-#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
-#define SFMM 0xc0 /* Set FM mode */
-#define SNRZI 0xe0 /* Set NRZI mode */
-
-/* Write Register 15 (external/status interrupt control) */
-#define ZCIE 2 /* Zero count IE */
-#define DCDIE 8 /* DCD IE */
-#define SYNCIE 0x10 /* Sync/hunt IE */
-#define CTSIE 0x20 /* CTS IE */
-#define TxUIE 0x40 /* Tx Underrun/EOM IE */
-#define BRKIE 0x80 /* Break/Abort IE */
-
-
-/* Read Register 0 */
-#define Rx_CH_AV 0x1 /* Rx Character Available */
-#define ZCOUNT 0x2 /* Zero count */
-#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
-#define DCD 0x8 /* DCD */
-#define SYNC_HUNT 0x10 /* Sync/hunt */
-#define CTS 0x20 /* CTS */
-#define TxEOM 0x40 /* Tx underrun */
-#define BRK_ABRT 0x80 /* Break/Abort */
-
-/* Read Register 1 */
-#define ALL_SNT 0x1 /* All sent */
-/* Residue Data for 8 Rx bits/char programmed */
-#define RES3 0x8 /* 0/3 */
-#define RES4 0x4 /* 0/4 */
-#define RES5 0xc /* 0/5 */
-#define RES6 0x2 /* 0/6 */
-#define RES7 0xa /* 0/7 */
-#define RES8 0x6 /* 0/8 */
-#define RES18 0xe /* 1/8 */
-#define RES28 0x0 /* 2/8 */
-/* Special Rx Condition Interrupts */
-#define PAR_ERR 0x10 /* Parity error */
-#define Rx_OVR 0x20 /* Rx Overrun Error */
-#define FRM_ERR 0x40 /* CRC/Framing Error */
-#define END_FR 0x80 /* End of Frame (SDLC) */
-
-/* Read Register 2 (channel b only) - Interrupt vector */
-
-/* Read Register 3 (interrupt pending register) ch a only */
-#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
-#define CHBTxIP 0x2 /* Channel B Tx IP */
-#define CHBRxIP 0x4 /* Channel B Rx IP */
-#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
-#define CHATxIP 0x10 /* Channel A Tx IP */
-#define CHARxIP 0x20 /* Channel A Rx IP */
-
-/* Read Register 8 (receive data register) */
-
-/* Read Register 10 (misc status bits) */
-#define ONLOOP 2 /* On loop */
-#define LOOPSEND 0x10 /* Loop sending */
-#define CLK2MIS 0x40 /* Two clocks missing */
-#define CLK1MIS 0x80 /* One clock missing */
-
-/* Read Register 12 (lower byte of baud rate generator constant) */
-
-/* Read Register 13 (upper byte of baud rate generator constant) */
-
-/* Read Register 15 (value of WR 15) */
-
-/* Misc macros */
-#define ZS_CLEARERR(channel) (write_zsreg(channel, 0, ERR_RES))
-#define ZS_CLEARFIFO(channel) do { volatile unsigned char garbage; \
- garbage = read_zsdata(channel); \
- garbage = read_zsdata(channel); \
- garbage = read_zsdata(channel); \
- } while(0)
-
-#endif /* !(_DECSERIAL_H) */
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index dd1d6a53f3c0..5f98f673f1b6 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -2,11 +2,9 @@
# Telephony device configuration
#
-menu "Telephony Support"
+menuconfig PHONE
+ tristate "Telephony support"
depends on HAS_IOMEM
-
-config PHONE
- tristate "Linux telephony support"
---help---
Say Y here if you have a telephony card, which for example allows
you to use a regular phone for voice-over-IP applications.
@@ -17,9 +15,11 @@ config PHONE
To compile this driver as a module, choose M here: the
module will be called phonedev.
+if PHONE
+
config PHONE_IXJ
tristate "QuickNet Internet LineJack/PhoneJack support"
- depends on PHONE
+ depends ISA || PCI
---help---
Say M if you have a telephony card manufactured by Quicknet
Technologies, Inc. These include the Internet PhoneJACK and
@@ -44,5 +44,4 @@ config PHONE_IXJ_PCMCIA
cards manufactured by Quicknet Technologies, Inc. This changes the
card initialization code to work with the card manager daemon.
-endmenu
-
+endif # PHONE
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index c7b0a357b04a..49cd9793404f 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -3453,7 +3453,6 @@ static void ixj_write_frame(IXJ *j)
{
int cnt, frame_count, dly;
IXJ_WORD dat;
- BYTES blankword;
frame_count = 0;
if(j->flags.cidplay) {
@@ -3501,6 +3500,8 @@ static void ixj_write_frame(IXJ *j)
}
if (frame_count >= 1) {
if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
+ BYTES blankword;
+
switch (j->play_mode) {
case PLAYBACK_MODE_ULAW:
case PLAYBACK_MODE_ALAW:
@@ -3508,6 +3509,7 @@ static void ixj_write_frame(IXJ *j)
break;
case PLAYBACK_MODE_8LINEAR:
case PLAYBACK_MODE_16LINEAR:
+ default:
blankword.low = blankword.high = 0x00;
break;
case PLAYBACK_MODE_8LINEAR_WSS:
@@ -3531,6 +3533,8 @@ static void ixj_write_frame(IXJ *j)
j->flags.play_first_frame = 0;
} else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
for (cnt = 0; cnt < 24; cnt++) {
+ BYTES blankword;
+
if(cnt == 12) {
blankword.low = 0x02;
blankword.high = 0x00;
@@ -4868,6 +4872,7 @@ static char daa_CR_read(IXJ *j, int cr)
bytes.high = 0xB0 + cr;
break;
case SOP_PU_PULSEDIALING:
+ default:
bytes.high = 0xF0 + cr;
break;
}
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index 3e658dc7c2d8..ff9a29b76336 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -45,11 +45,10 @@ static int ixj_probe(struct pcmcia_device *p_dev)
p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
p_dev->io.IOAddrLines = 3;
p_dev->conf.IntType = INT_MEMORY_AND_IO;
- p_dev->priv = kmalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
+ p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL);
if (!p_dev->priv) {
return -ENOMEM;
}
- memset(p_dev->priv, 0, sizeof(struct ixj_info_t));
return ixj_config(p_dev);
}
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
new file mode 100644
index 000000000000..b778ed71f636
--- /dev/null
+++ b/drivers/uio/Kconfig
@@ -0,0 +1,29 @@
+menu "Userspace I/O"
+ depends on !S390
+
+config UIO
+ tristate "Userspace I/O drivers"
+ default n
+ help
+ Enable this to allow the userspace driver core code to be
+ built. This code allows userspace programs easy access to
+ kernel interrupts and memory locations, allowing some drivers
+ to be written in userspace. Note that a small kernel driver
+ is also required for interrupt handling to work properly.
+
+ If you don't know what to do here, say N.
+
+config UIO_CIF
+ tristate "generic Hilscher CIF Card driver"
+ depends on UIO && PCI
+ default n
+ help
+ Driver for Hilscher CIF DeviceNet and Profibus cards. This
+ driver requires a userspace component that handles all of the
+ heavy lifting and can be found at:
+ http://www.osadl.org/projects/downloads/UIO/user/cif-*
+
+ To compile this driver as a module, choose M here: the module
+ will be called uio_cif.
+
+endmenu
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
new file mode 100644
index 000000000000..7fecfb459da5
--- /dev/null
+++ b/drivers/uio/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_UIO) += uio.o
+obj-$(CONFIG_UIO_CIF) += uio_cif.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
new file mode 100644
index 000000000000..865f32b63b5c
--- /dev/null
+++ b/drivers/uio/uio.c
@@ -0,0 +1,701 @@
+/*
+ * drivers/uio/uio.c
+ *
+ * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de>
+ * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Userspace IO
+ *
+ * Base Functions
+ *
+ * Licensed under the GPLv2 only.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/uio_driver.h>
+
+#define UIO_MAX_DEVICES 255
+
+struct uio_device {
+ struct module *owner;
+ struct device *dev;
+ int minor;
+ atomic_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+ int vma_count;
+ struct uio_info *info;
+ struct kset map_attr_kset;
+};
+
+static int uio_major;
+static DEFINE_IDR(uio_idr);
+static struct file_operations uio_fops;
+
+/* UIO class infrastructure */
+static struct uio_class {
+ struct kref kref;
+ struct class *class;
+} *uio_class;
+
+/*
+ * attributes
+ */
+
+static struct attribute attr_addr = {
+ .name = "addr",
+ .mode = S_IRUGO,
+};
+
+static struct attribute attr_size = {
+ .name = "size",
+ .mode = S_IRUGO,
+};
+
+static struct attribute* map_attrs[] = {
+ &attr_addr, &attr_size, NULL
+};
+
+static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct uio_mem *mem = container_of(kobj, struct uio_mem, kobj);
+
+ if (strncmp(attr->name,"addr",4) == 0)
+ return sprintf(buf, "0x%lx\n", mem->addr);
+
+ if (strncmp(attr->name,"size",4) == 0)
+ return sprintf(buf, "0x%lx\n", mem->size);
+
+ return -ENODEV;
+}
+
+static void map_attr_release(struct kobject *kobj)
+{
+ /* TODO ??? */
+}
+
+static struct sysfs_ops map_attr_ops = {
+ .show = map_attr_show,
+};
+
+static struct kobj_type map_attr_type = {
+ .release = map_attr_release,
+ .sysfs_ops = &map_attr_ops,
+ .default_attrs = map_attrs,
+};
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uio_device *idev = dev_get_drvdata(dev);
+ if (idev)
+ return sprintf(buf, "%s\n", idev->info->name);
+ else
+ return -ENODEV;
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static ssize_t show_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uio_device *idev = dev_get_drvdata(dev);
+ if (idev)
+ return sprintf(buf, "%s\n", idev->info->version);
+ else
+ return -ENODEV;
+}
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+
+static ssize_t show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uio_device *idev = dev_get_drvdata(dev);
+ if (idev)
+ return sprintf(buf, "%u\n",
+ (unsigned int)atomic_read(&idev->event));
+ else
+ return -ENODEV;
+}
+static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
+
+static struct attribute *uio_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_version.attr,
+ &dev_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group uio_attr_grp = {
+ .attrs = uio_attrs,
+};
+
+/*
+ * device functions
+ */
+static int uio_dev_add_attributes(struct uio_device *idev)
+{
+ int ret;
+ int mi;
+ int map_found = 0;
+ struct uio_mem *mem;
+
+ ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
+ if (ret)
+ goto err_group;
+
+ for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
+ mem = &idev->info->mem[mi];
+ if (mem->size == 0)
+ break;
+ if (!map_found) {
+ map_found = 1;
+ kobject_set_name(&idev->map_attr_kset.kobj,"maps");
+ idev->map_attr_kset.ktype = &map_attr_type;
+ idev->map_attr_kset.kobj.parent = &idev->dev->kobj;
+ ret = kset_register(&idev->map_attr_kset);
+ if (ret)
+ goto err_remove_group;
+ }
+ kobject_init(&mem->kobj);
+ kobject_set_name(&mem->kobj,"map%d",mi);
+ mem->kobj.parent = &idev->map_attr_kset.kobj;
+ mem->kobj.kset = &idev->map_attr_kset;
+ ret = kobject_add(&mem->kobj);
+ if (ret)
+ goto err_remove_maps;
+ }
+
+ return 0;
+
+err_remove_maps:
+ for (mi--; mi>=0; mi--) {
+ mem = &idev->info->mem[mi];
+ kobject_unregister(&mem->kobj);
+ }
+ kset_unregister(&idev->map_attr_kset); /* Needed ? */
+err_remove_group:
+ sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
+err_group:
+ dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
+ return ret;
+}
+
+static void uio_dev_del_attributes(struct uio_device *idev)
+{
+ int mi;
+ struct uio_mem *mem;
+ for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
+ mem = &idev->info->mem[mi];
+ if (mem->size == 0)
+ break;
+ kobject_unregister(&mem->kobj);
+ }
+ kset_unregister(&idev->map_attr_kset);
+ sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
+}
+
+static int uio_get_minor(struct uio_device *idev)
+{
+ static DEFINE_MUTEX(minor_lock);
+ int retval = -ENOMEM;
+ int id;
+
+ mutex_lock(&minor_lock);
+ if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
+ goto exit;
+
+ retval = idr_get_new(&uio_idr, idev, &id);
+ if (retval < 0) {
+ if (retval == -EAGAIN)
+ retval = -ENOMEM;
+ goto exit;
+ }
+ idev->minor = id & MAX_ID_MASK;
+exit:
+ mutex_unlock(&minor_lock);
+ return retval;
+}
+
+static void uio_free_minor(struct uio_device *idev)
+{
+ idr_remove(&uio_idr, idev->minor);
+}
+
+/**
+ * uio_event_notify - trigger an interrupt event
+ * @info: UIO device capabilities
+ */
+void uio_event_notify(struct uio_info *info)
+{
+ struct uio_device *idev = info->uio_dev;
+
+ atomic_inc(&idev->event);
+ wake_up_interruptible(&idev->wait);
+ kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
+}
+EXPORT_SYMBOL_GPL(uio_event_notify);
+
+/**
+ * uio_interrupt - hardware interrupt handler
+ * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
+ * @dev_id: Pointer to the devices uio_device structure
+ */
+static irqreturn_t uio_interrupt(int irq, void *dev_id)
+{
+ struct uio_device *idev = (struct uio_device *)dev_id;
+ irqreturn_t ret = idev->info->handler(irq, idev->info);
+
+ if (ret == IRQ_HANDLED)
+ uio_event_notify(idev->info);
+
+ return ret;
+}
+
+struct uio_listener {
+ struct uio_device *dev;
+ s32 event_count;
+};
+
+static int uio_open(struct inode *inode, struct file *filep)
+{
+ struct uio_device *idev;
+ struct uio_listener *listener;
+ int ret = 0;
+
+ idev = idr_find(&uio_idr, iminor(inode));
+ if (!idev)
+ return -ENODEV;
+
+ listener = kmalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return -ENOMEM;
+
+ listener->dev = idev;
+ listener->event_count = atomic_read(&idev->event);
+ filep->private_data = listener;
+
+ if (idev->info->open) {
+ if (!try_module_get(idev->owner))
+ return -ENODEV;
+ ret = idev->info->open(idev->info, inode);
+ module_put(idev->owner);
+ }
+
+ if (ret)
+ kfree(listener);
+
+ return ret;
+}
+
+static int uio_fasync(int fd, struct file *filep, int on)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+
+ return fasync_helper(fd, filep, on, &idev->async_queue);
+}
+
+static int uio_release(struct inode *inode, struct file *filep)
+{
+ int ret = 0;
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+
+ if (idev->info->release) {
+ if (!try_module_get(idev->owner))
+ return -ENODEV;
+ ret = idev->info->release(idev->info, inode);
+ module_put(idev->owner);
+ }
+ if (filep->f_flags & FASYNC)
+ ret = uio_fasync(-1, filep, 0);
+ kfree(listener);
+ return ret;
+}
+
+static unsigned int uio_poll(struct file *filep, poll_table *wait)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+
+ if (idev->info->irq == UIO_IRQ_NONE)
+ return -EIO;
+
+ poll_wait(filep, &idev->wait, wait);
+ if (listener->event_count != atomic_read(&idev->event))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
+static ssize_t uio_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+ DECLARE_WAITQUEUE(wait, current);
+ ssize_t retval;
+ s32 event_count;
+
+ if (idev->info->irq == UIO_IRQ_NONE)
+ return -EIO;
+
+ if (count != sizeof(s32))
+ return -EINVAL;
+
+ add_wait_queue(&idev->wait, &wait);
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ event_count = atomic_read(&idev->event);
+ if (event_count != listener->event_count) {
+ if (copy_to_user(buf, &event_count, count))
+ retval = -EFAULT;
+ else {
+ listener->event_count = event_count;
+ retval = count;
+ }
+ break;
+ }
+
+ if (filep->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ } while (1);
+
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&idev->wait, &wait);
+
+ return retval;
+}
+
+static int uio_find_mem_index(struct vm_area_struct *vma)
+{
+ int mi;
+ struct uio_device *idev = vma->vm_private_data;
+
+ for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
+ if (idev->info->mem[mi].size == 0)
+ return -1;
+ if (vma->vm_pgoff == mi)
+ return mi;
+ }
+ return -1;
+}
+
+static void uio_vma_open(struct vm_area_struct *vma)
+{
+ struct uio_device *idev = vma->vm_private_data;
+ idev->vma_count++;
+}
+
+static void uio_vma_close(struct vm_area_struct *vma)
+{
+ struct uio_device *idev = vma->vm_private_data;
+ idev->vma_count--;
+}
+
+static struct page *uio_vma_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct uio_device *idev = vma->vm_private_data;
+ struct page* page = NOPAGE_SIGBUS;
+
+ int mi = uio_find_mem_index(vma);
+ if (mi < 0)
+ return page;
+
+ if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
+ page = virt_to_page(idev->info->mem[mi].addr);
+ else
+ page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
+ get_page(page);
+ if (type)
+ *type = VM_FAULT_MINOR;
+ return page;
+}
+
+static struct vm_operations_struct uio_vm_ops = {
+ .open = uio_vma_open,
+ .close = uio_vma_close,
+ .nopage = uio_vma_nopage,
+};
+
+static int uio_mmap_physical(struct vm_area_struct *vma)
+{
+ struct uio_device *idev = vma->vm_private_data;
+ int mi = uio_find_mem_index(vma);
+ if (mi < 0)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ idev->info->mem[mi].addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int uio_mmap_logical(struct vm_area_struct *vma)
+{
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &uio_vm_ops;
+ uio_vma_open(vma);
+ return 0;
+}
+
+static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+ int mi;
+ unsigned long requested_pages, actual_pages;
+ int ret = 0;
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+
+ vma->vm_private_data = idev;
+
+ mi = uio_find_mem_index(vma);
+ if (mi < 0)
+ return -EINVAL;
+
+ requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ if (requested_pages > actual_pages)
+ return -EINVAL;
+
+ if (idev->info->mmap) {
+ if (!try_module_get(idev->owner))
+ return -ENODEV;
+ ret = idev->info->mmap(idev->info, vma);
+ module_put(idev->owner);
+ return ret;
+ }
+
+ switch (idev->info->mem[mi].memtype) {
+ case UIO_MEM_PHYS:
+ return uio_mmap_physical(vma);
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ return uio_mmap_logical(vma);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct file_operations uio_fops = {
+ .owner = THIS_MODULE,
+ .open = uio_open,
+ .release = uio_release,
+ .read = uio_read,
+ .mmap = uio_mmap,
+ .poll = uio_poll,
+ .fasync = uio_fasync,
+};
+
+static int uio_major_init(void)
+{
+ uio_major = register_chrdev(0, "uio", &uio_fops);
+ if (uio_major < 0)
+ return uio_major;
+ return 0;
+}
+
+static void uio_major_cleanup(void)
+{
+ unregister_chrdev(uio_major, "uio");
+}
+
+static int init_uio_class(void)
+{
+ int ret = 0;
+
+ if (uio_class != NULL) {
+ kref_get(&uio_class->kref);
+ goto exit;
+ }
+
+ /* This is the first time in here, set everything up properly */
+ ret = uio_major_init();
+ if (ret)
+ goto exit;
+
+ uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL);
+ if (!uio_class) {
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ kref_init(&uio_class->kref);
+ uio_class->class = class_create(THIS_MODULE, "uio");
+ if (IS_ERR(uio_class->class)) {
+ ret = IS_ERR(uio_class->class);
+ printk(KERN_ERR "class_create failed for uio\n");
+ goto err_class_create;
+ }
+ return 0;
+
+err_class_create:
+ kfree(uio_class);
+ uio_class = NULL;
+err_kzalloc:
+ uio_major_cleanup();
+exit:
+ return ret;
+}
+
+static void release_uio_class(struct kref *kref)
+{
+ /* Ok, we cheat as we know we only have one uio_class */
+ class_destroy(uio_class->class);
+ kfree(uio_class);
+ uio_major_cleanup();
+ uio_class = NULL;
+}
+
+static void uio_class_destroy(void)
+{
+ if (uio_class)
+ kref_put(&uio_class->kref, release_uio_class);
+}
+
+/**
+ * uio_register_device - register a new userspace IO device
+ * @owner: module that creates the new device
+ * @parent: parent device
+ * @info: UIO device capabilities
+ *
+ * returns zero on success or a negative error code.
+ */
+int __uio_register_device(struct module *owner,
+ struct device *parent,
+ struct uio_info *info)
+{
+ struct uio_device *idev;
+ int ret = 0;
+
+ if (!parent || !info || !info->name || !info->version)
+ return -EINVAL;
+
+ info->uio_dev = NULL;
+
+ ret = init_uio_class();
+ if (ret)
+ return ret;
+
+ idev = kzalloc(sizeof(*idev), GFP_KERNEL);
+ if (!idev) {
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ idev->owner = owner;
+ idev->info = info;
+ init_waitqueue_head(&idev->wait);
+ atomic_set(&idev->event, 0);
+
+ ret = uio_get_minor(idev);
+ if (ret)
+ goto err_get_minor;
+
+ idev->dev = device_create(uio_class->class, parent,
+ MKDEV(uio_major, idev->minor),
+ "uio%d", idev->minor);
+ if (IS_ERR(idev->dev)) {
+ printk(KERN_ERR "UIO: device register failed\n");
+ ret = PTR_ERR(idev->dev);
+ goto err_device_create;
+ }
+ dev_set_drvdata(idev->dev, idev);
+
+ ret = uio_dev_add_attributes(idev);
+ if (ret)
+ goto err_uio_dev_add_attributes;
+
+ info->uio_dev = idev;
+
+ if (idev->info->irq >= 0) {
+ ret = request_irq(idev->info->irq, uio_interrupt,
+ idev->info->irq_flags, idev->info->name, idev);
+ if (ret)
+ goto err_request_irq;
+ }
+
+ return 0;
+
+err_request_irq:
+ uio_dev_del_attributes(idev);
+err_uio_dev_add_attributes:
+ device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
+err_device_create:
+ uio_free_minor(idev);
+err_get_minor:
+ kfree(idev);
+err_kzalloc:
+ uio_class_destroy();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__uio_register_device);
+
+/**
+ * uio_unregister_device - unregister a industrial IO device
+ * @info: UIO device capabilities
+ *
+ */
+void uio_unregister_device(struct uio_info *info)
+{
+ struct uio_device *idev;
+
+ if (!info || !info->uio_dev)
+ return;
+
+ idev = info->uio_dev;
+
+ uio_free_minor(idev);
+
+ if (info->irq >= 0)
+ free_irq(info->irq, idev);
+
+ uio_dev_del_attributes(idev);
+
+ dev_set_drvdata(idev->dev, NULL);
+ device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
+ kfree(idev);
+ uio_class_destroy();
+
+ return;
+}
+EXPORT_SYMBOL_GPL(uio_unregister_device);
+
+static int __init uio_init(void)
+{
+ return 0;
+}
+
+static void __exit uio_exit(void)
+{
+}
+
+module_init(uio_init)
+module_exit(uio_exit)
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
new file mode 100644
index 000000000000..838bae460831
--- /dev/null
+++ b/drivers/uio/uio_cif.c
@@ -0,0 +1,156 @@
+/*
+ * UIO Hilscher CIF card driver
+ *
+ * (C) 2007 Hans J. Koch <hjk@linutronix.de>
+ * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de>
+ *
+ * Licensed under GPL version 2 only.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+
+#include <asm/io.h>
+
+#ifndef PCI_DEVICE_ID_PLX_9030
+#define PCI_DEVICE_ID_PLX_9030 0x9030
+#endif
+
+#define PLX9030_INTCSR 0x4C
+#define INTSCR_INT1_ENABLE 0x01
+#define INTSCR_INT1_STATUS 0x04
+#define INT1_ENABLED_AND_ACTIVE (INTSCR_INT1_ENABLE | INTSCR_INT1_STATUS)
+
+#define PCI_SUBVENDOR_ID_PEP 0x1518
+#define CIF_SUBDEVICE_PROFIBUS 0x430
+#define CIF_SUBDEVICE_DEVICENET 0x432
+
+
+static irqreturn_t hilscher_handler(int irq, struct uio_info *dev_info)
+{
+ void __iomem *plx_intscr = dev_info->mem[0].internal_addr
+ + PLX9030_INTCSR;
+
+ if ((ioread8(plx_intscr) & INT1_ENABLED_AND_ACTIVE)
+ != INT1_ENABLED_AND_ACTIVE)
+ return IRQ_NONE;
+
+ /* Disable interrupt */
+ iowrite8(ioread8(plx_intscr) & ~INTSCR_INT1_ENABLE, plx_intscr);
+ return IRQ_HANDLED;
+}
+
+static int __devinit hilscher_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ struct uio_info *info;
+
+ info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (pci_enable_device(dev))
+ goto out_free;
+
+ if (pci_request_regions(dev, "hilscher"))
+ goto out_disable;
+
+ info->mem[0].addr = pci_resource_start(dev, 0);
+ if (!info->mem[0].addr)
+ goto out_release;
+ info->mem[0].internal_addr = ioremap(pci_resource_start(dev, 0),
+ pci_resource_len(dev, 0));
+ if (!info->mem[0].internal_addr)
+ goto out_release;
+
+ info->mem[0].size = pci_resource_len(dev, 0);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->mem[1].addr = pci_resource_start(dev, 2);
+ info->mem[1].size = pci_resource_len(dev, 2);
+ info->mem[1].memtype = UIO_MEM_PHYS;
+ switch (id->subdevice) {
+ case CIF_SUBDEVICE_PROFIBUS:
+ info->name = "CIF_Profibus";
+ break;
+ case CIF_SUBDEVICE_DEVICENET:
+ info->name = "CIF_Devicenet";
+ break;
+ default:
+ info->name = "CIF_???";
+ }
+ info->version = "0.0.1";
+ info->irq = dev->irq;
+ info->irq_flags = IRQF_DISABLED | IRQF_SHARED;
+ info->handler = hilscher_handler;
+
+ if (uio_register_device(&dev->dev, info))
+ goto out_unmap;
+
+ pci_set_drvdata(dev, info);
+
+ return 0;
+out_unmap:
+ iounmap(info->mem[0].internal_addr);
+out_release:
+ pci_release_regions(dev);
+out_disable:
+ pci_disable_device(dev);
+out_free:
+ kfree (info);
+ return -ENODEV;
+}
+
+static void hilscher_pci_remove(struct pci_dev *dev)
+{
+ struct uio_info *info = pci_get_drvdata(dev);
+
+ uio_unregister_device(info);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+ iounmap(info->mem[0].internal_addr);
+
+ kfree (info);
+}
+
+static struct pci_device_id hilscher_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9030,
+ .subvendor = PCI_SUBVENDOR_ID_PEP,
+ .subdevice = CIF_SUBDEVICE_PROFIBUS,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_9030,
+ .subvendor = PCI_SUBVENDOR_ID_PEP,
+ .subdevice = CIF_SUBDEVICE_DEVICENET,
+ },
+ { 0, }
+};
+
+static struct pci_driver hilscher_pci_driver = {
+ .name = "hilscher",
+ .id_table = hilscher_pci_ids,
+ .probe = hilscher_pci_probe,
+ .remove = hilscher_pci_remove,
+};
+
+static int __init hilscher_init_module(void)
+{
+ return pci_register_driver(&hilscher_pci_driver);
+}
+
+static void __exit hilscher_exit_module(void)
+{
+ pci_unregister_driver(&hilscher_pci_driver);
+}
+
+module_init(hilscher_init_module);
+module_exit(hilscher_exit_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hans J. Koch, Benedikt Spranger");
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 15499b7e33f4..7dd73546bf43 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -2,8 +2,12 @@
# USB device configuration
#
-menu "USB support"
+menuconfig USB_SUPPORT
+ bool "USB support"
depends on HAS_IOMEM
+ default y
+
+if USB_SUPPORT
# Host-side USB depends on having a host controller
# NOTE: dummy_hcd is always an option, but it's ignored here ...
@@ -12,6 +16,7 @@ config USB_ARCH_HAS_HCD
boolean
default y if USB_ARCH_HAS_OHCI
default y if USB_ARCH_HAS_EHCI
+ default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
default PCI
@@ -130,5 +135,4 @@ source "drivers/usb/atm/Kconfig"
source "drivers/usb/gadget/Kconfig"
-endmenu
-
+endif # USB_SUPPORT
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 72464b586990..befff5f9d58c 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_USB_OHCI_HCD) += host/
obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_SL811_HCD) += host/
obj-$(CONFIG_USB_U132_HCD) += host/
-obj-$(CONFIG_USB_OHCI_AT91) += host/
+obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_ACM) += class/
obj-$(CONFIG_USB_PRINTER) += class/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 8bcf7fe1dd80..02c52f8d5dbf 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -171,7 +171,7 @@ struct cxacru_data {
struct delayed_work poll_work;
u32 card_info[CXINF_MAX];
struct mutex poll_state_serialize;
- int poll_state;
+ enum cxacru_poll_state poll_state;
/* contol handles */
struct mutex cm_serialize;
@@ -226,58 +226,48 @@ static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf)
static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf)
{
- if (unlikely(value < 0)) {
- return snprintf(buf, PAGE_SIZE, "%d.%02u\n",
- value / 100, -value % 100);
- } else {
- return snprintf(buf, PAGE_SIZE, "%d.%02u\n",
- value / 100, value % 100);
- }
+ return snprintf(buf, PAGE_SIZE, "%d.%02u\n",
+ value / 100, abs(value) % 100);
}
static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
{
- switch (value) {
- case 0: return snprintf(buf, PAGE_SIZE, "no\n");
- case 1: return snprintf(buf, PAGE_SIZE, "yes\n");
- default: return 0;
- }
+ static char *str[] = { "no", "yes" };
+ if (unlikely(value >= ARRAY_SIZE(str)))
+ return snprintf(buf, PAGE_SIZE, "%u\n", value);
+ return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
{
- switch (value) {
- case 1: return snprintf(buf, PAGE_SIZE, "not connected\n");
- case 2: return snprintf(buf, PAGE_SIZE, "connected\n");
- case 3: return snprintf(buf, PAGE_SIZE, "lost\n");
- default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
- }
+ static char *str[] = { NULL, "not connected", "connected", "lost" };
+ if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
+ return snprintf(buf, PAGE_SIZE, "%u\n", value);
+ return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
{
- switch (value) {
- case 0: return snprintf(buf, PAGE_SIZE, "down\n");
- case 1: return snprintf(buf, PAGE_SIZE, "attempting to activate\n");
- case 2: return snprintf(buf, PAGE_SIZE, "training\n");
- case 3: return snprintf(buf, PAGE_SIZE, "channel analysis\n");
- case 4: return snprintf(buf, PAGE_SIZE, "exchange\n");
- case 5: return snprintf(buf, PAGE_SIZE, "up\n");
- case 6: return snprintf(buf, PAGE_SIZE, "waiting\n");
- case 7: return snprintf(buf, PAGE_SIZE, "initialising\n");
- default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
- }
+ static char *str[] = { "down", "attempting to activate",
+ "training", "channel analysis", "exchange", "up",
+ "waiting", "initialising"
+ };
+ if (unlikely(value >= ARRAY_SIZE(str)))
+ return snprintf(buf, PAGE_SIZE, "%u\n", value);
+ return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
{
- switch (value) {
- case 0: return 0;
- case 1: return snprintf(buf, PAGE_SIZE, "ANSI T1.413\n");
- case 2: return snprintf(buf, PAGE_SIZE, "ITU-T G.992.1 (G.DMT)\n");
- case 3: return snprintf(buf, PAGE_SIZE, "ITU-T G.992.2 (G.LITE)\n");
- default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
- }
+ static char *str[] = {
+ NULL,
+ "ANSI T1.413",
+ "ITU-T G.992.1 (G.DMT)",
+ "ITU-T G.992.2 (G.LITE)"
+ };
+ if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
+ return snprintf(buf, PAGE_SIZE, "%u\n", value);
+ return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
/*
@@ -308,11 +298,10 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
struct cxacru_data *instance = usbatm_instance->driver_data;
u32 value = instance->card_info[CXINF_LINE_STARTABLE];
- switch (value) {
- case 0: return snprintf(buf, PAGE_SIZE, "running\n");
- case 1: return snprintf(buf, PAGE_SIZE, "stopped\n");
- default: return snprintf(buf, PAGE_SIZE, "unknown (%u)\n", value);
- }
+ static char *str[] = { "running", "stopped" };
+ if (unlikely(value >= ARRAY_SIZE(str)))
+ return snprintf(buf, PAGE_SIZE, "%u\n", value);
+ return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
@@ -467,7 +456,7 @@ static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int* actual_length)
{
struct timer_list timer;
- int status;
+ int status = urb->status;
init_timer(&timer);
timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
@@ -475,7 +464,6 @@ static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
timer.function = cxacru_timeout_kill;
add_timer(&timer);
wait_for_completion(done);
- status = urb->status;
del_timer_sync(&timer);
if (actual_length)
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 638b8009b3bc..eb0615abff68 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -612,7 +612,8 @@ static void speedtch_handle_int(struct urb *int_urb)
struct speedtch_instance_data *instance = int_urb->context;
struct usbatm_data *usbatm = instance->usbatm;
unsigned int count = int_urb->actual_length;
- int ret = int_urb->status;
+ int status = int_urb->status;
+ int ret;
/* The magic interrupt for "up state" */
static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
@@ -621,8 +622,8 @@ static void speedtch_handle_int(struct urb *int_urb)
atm_dbg(usbatm, "%s entered\n", __func__);
- if (ret < 0) {
- atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, ret);
+ if (status < 0) {
+ atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status);
goto fail;
}
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4973e147bc79..a1a1c9d467e0 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1168,6 +1168,7 @@ static int uea_kthread(void *data)
struct uea_softc *sc = data;
int ret = -EAGAIN;
+ set_freezable();
uea_enters(INS_TO_USBDEV(sc));
while (!kthread_should_stop()) {
if (ret < 0 || sc->reset)
@@ -1307,11 +1308,13 @@ static void uea_intr(struct urb *urb)
{
struct uea_softc *sc = urb->context;
struct intr_pkt *intr = urb->transfer_buffer;
+ int status = urb->status;
+
uea_enters(INS_TO_USBDEV(sc));
- if (unlikely(urb->status < 0)) {
+ if (unlikely(status < 0)) {
uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n",
- urb->status);
+ status);
return;
}
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 11e9b15ca45a..e717f5b1caee 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -257,9 +257,10 @@ static void usbatm_complete(struct urb *urb)
{
struct usbatm_channel *channel = urb->context;
unsigned long flags;
+ int status = urb->status;
vdbg("%s: urb 0x%p, status %d, actual_length %d",
- __func__, urb, urb->status, urb->actual_length);
+ __func__, urb, status, urb->actual_length);
/* usually in_interrupt(), but not always */
spin_lock_irqsave(&channel->lock, flags);
@@ -269,16 +270,16 @@ static void usbatm_complete(struct urb *urb)
spin_unlock_irqrestore(&channel->lock, flags);
- if (unlikely(urb->status) &&
+ if (unlikely(status) &&
(!(channel->usbatm->flags & UDSL_IGNORE_EILSEQ) ||
- urb->status != -EILSEQ ))
+ status != -EILSEQ ))
{
- if (urb->status == -ESHUTDOWN)
+ if (status == -ESHUTDOWN)
return;
if (printk_ratelimit())
atm_warn(channel->usbatm, "%s: urb 0x%p failed (%d)!\n",
- __func__, urb, urb->status);
+ __func__, urb, status);
/* throttle processing in case of an error */
mod_timer(&channel->delay, jiffies + msecs_to_jiffies(THROTTLE_MSECS));
} else
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0081c1d12687..fe940e0536e0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -257,9 +257,10 @@ static void acm_ctrl_irq(struct urb *urb)
struct usb_cdc_notification *dr = urb->transfer_buffer;
unsigned char *data;
int newctrl;
- int status;
+ int retval;
+ int status = urb->status;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -267,10 +268,10 @@ static void acm_ctrl_irq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d", __FUNCTION__, status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero urb status received: %d", __FUNCTION__, status);
goto exit;
}
@@ -311,10 +312,10 @@ static void acm_ctrl_irq(struct urb *urb)
break;
}
exit:
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb (urb, GFP_ATOMIC);
+ if (retval)
err ("%s - usb_submit_urb failed with result %d",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
/* data interface returns incoming bytes, or we got unthrottled */
@@ -324,7 +325,8 @@ static void acm_read_bulk(struct urb *urb)
struct acm_ru *rcv = urb->context;
struct acm *acm = rcv->instance;
int status = urb->status;
- dbg("Entering acm_read_bulk with status %d", urb->status);
+
+ dbg("Entering acm_read_bulk with status %d", status);
if (!ACM_READY(acm))
return;
@@ -1157,6 +1159,9 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x0482, 0x0203), /* KYOCERA AH-K3001V */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 6778f9af7943..5192cd9356de 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1,5 +1,5 @@
/*
- * usblp.c Version 0.13
+ * usblp.c
*
* Copyright (c) 1999 Michael Gee <michael@linuxspecific.com>
* Copyright (c) 1999 Pavel Machek <pavel@suse.cz>
@@ -61,11 +61,11 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.13"
#define DRIVER_AUTHOR "Michael Gee, Pavel Machek, Vojtech Pavlik, Randy Dunlap, Pete Zaitcev, David Paschal"
#define DRIVER_DESC "USB Printer Device Class driver"
#define USBLP_BUF_SIZE 8192
+#define USBLP_BUF_SIZE_IN 1024
#define USBLP_DEVICE_ID_SIZE 1024
/* ioctls: */
@@ -127,14 +127,22 @@ MFG:HEWLETT-PACKARD;MDL:DESKJET 970C;CMD:MLC,PCL,PML;CLASS:PRINTER;DESCRIPTION:H
*/
#define STATUS_BUF_SIZE 8
+/*
+ * Locks down the locking order:
+ * ->wmut locks wstatus.
+ * ->mut locks the whole usblp, except [rw]complete, and thus, by indirection,
+ * [rw]status. We only touch status when we know the side idle.
+ * ->lock locks what interrupt accesses.
+ */
struct usblp {
struct usb_device *dev; /* USB device */
- struct mutex mut; /* locks this struct, especially "dev" */
- char *writebuf; /* write transfer_buffer */
+ struct mutex wmut;
+ struct mutex mut;
+ spinlock_t lock; /* locks rcomplete, wcomplete */
char *readbuf; /* read transfer_buffer */
char *statusbuf; /* status transfer_buffer */
- struct urb *readurb, *writeurb; /* The urbs */
- wait_queue_head_t wait; /* Zzzzz ... */
+ struct usb_anchor urbs;
+ wait_queue_head_t rwait, wwait;
int readcount; /* Counter for reads */
int ifnum; /* Interface number */
struct usb_interface *intf; /* The interface */
@@ -147,8 +155,9 @@ struct usblp {
} protocol[USBLP_MAX_PROTOCOLS];
int current_protocol;
int minor; /* minor number of device */
- int wcomplete; /* writing is completed */
- int rcomplete; /* reading is completed */
+ int wcomplete, rcomplete;
+ int wstatus; /* bytes written or error */
+ int rstatus; /* bytes ready or error */
unsigned int quirks; /* quirks flags */
unsigned char used; /* True if open */
unsigned char present; /* True if not disconnected */
@@ -166,9 +175,6 @@ static void usblp_dump(struct usblp *usblp) {
dbg("dev=0x%p", usblp->dev);
dbg("present=%d", usblp->present);
dbg("readbuf=0x%p", usblp->readbuf);
- dbg("writebuf=0x%p", usblp->writebuf);
- dbg("readurb=0x%p", usblp->readurb);
- dbg("writeurb=0x%p", usblp->writeurb);
dbg("readcount=%d", usblp->readcount);
dbg("ifnum=%d", usblp->ifnum);
for (p = USBLP_FIRST_PROTOCOL; p <= USBLP_LAST_PROTOCOL; p++) {
@@ -178,8 +184,8 @@ static void usblp_dump(struct usblp *usblp) {
}
dbg("current_protocol=%d", usblp->current_protocol);
dbg("minor=%d", usblp->minor);
- dbg("wcomplete=%d", usblp->wcomplete);
- dbg("rcomplete=%d", usblp->rcomplete);
+ dbg("wstatus=%d", usblp->wstatus);
+ dbg("rstatus=%d", usblp->rstatus);
dbg("quirks=%d", usblp->quirks);
dbg("used=%d", usblp->used);
dbg("bidir=%d", usblp->bidir);
@@ -222,6 +228,11 @@ static const struct quirk_printer_struct quirk_printers[] = {
{ 0, 0 }
};
+static int usblp_wwait(struct usblp *usblp, int nonblock);
+static int usblp_wtest(struct usblp *usblp, int nonblock);
+static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock);
+static int usblp_rtest(struct usblp *usblp, int nonblock);
+static int usblp_submit_read(struct usblp *usblp);
static int usblp_select_alts(struct usblp *usblp);
static int usblp_set_protocol(struct usblp *usblp, int protocol);
static int usblp_cache_device_id_string(struct usblp *usblp);
@@ -278,34 +289,50 @@ static int proto_bias = -1;
static void usblp_bulk_read(struct urb *urb)
{
struct usblp *usblp = urb->context;
+ int status = urb->status;
- if (unlikely(!usblp || !usblp->dev || !usblp->used))
- return;
-
- if (unlikely(!usblp->present))
- goto unplug;
- if (unlikely(urb->status))
- warn("usblp%d: nonzero read/write bulk status received: %d",
- usblp->minor, urb->status);
+ if (usblp->present && usblp->used) {
+ if (status)
+ printk(KERN_WARNING "usblp%d: "
+ "nonzero read bulk status received: %d\n",
+ usblp->minor, status);
+ }
+ spin_lock(&usblp->lock);
+ if (status < 0)
+ usblp->rstatus = status;
+ else
+ usblp->rstatus = urb->actual_length;
usblp->rcomplete = 1;
-unplug:
- wake_up_interruptible(&usblp->wait);
+ wake_up(&usblp->rwait);
+ spin_unlock(&usblp->lock);
+
+ usb_free_urb(urb);
}
static void usblp_bulk_write(struct urb *urb)
{
struct usblp *usblp = urb->context;
+ int status = urb->status;
- if (unlikely(!usblp || !usblp->dev || !usblp->used))
- return;
- if (unlikely(!usblp->present))
- goto unplug;
- if (unlikely(urb->status))
- warn("usblp%d: nonzero read/write bulk status received: %d",
- usblp->minor, urb->status);
+ if (usblp->present && usblp->used) {
+ if (status)
+ printk(KERN_WARNING "usblp%d: "
+ "nonzero write bulk status received: %d\n",
+ usblp->minor, status);
+ }
+ spin_lock(&usblp->lock);
+ if (status < 0)
+ usblp->wstatus = status;
+ else
+ usblp->wstatus = urb->actual_length;
usblp->wcomplete = 1;
-unplug:
- wake_up_interruptible(&usblp->wait);
+ wake_up(&usblp->wwait);
+ spin_unlock(&usblp->lock);
+
+ /* XXX Use usb_setup_bulk_urb when available. Talk to Marcel. */
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL; /* Not refcounted, so to be safe... */
+ usb_free_urb(urb);
}
/*
@@ -322,7 +349,8 @@ static int usblp_check_status(struct usblp *usblp, int err)
error = usblp_read_status (usblp, usblp->statusbuf);
if (error < 0) {
if (printk_ratelimit())
- err("usblp%d: error %d reading printer status",
+ printk(KERN_ERR
+ "usblp%d: error %d reading printer status\n",
usblp->minor, error);
return 0;
}
@@ -336,8 +364,10 @@ static int usblp_check_status(struct usblp *usblp, int err)
if (~status & LP_PSELECD)
newerr = 2;
- if (newerr != err)
- info("usblp%d: %s", usblp->minor, usblp_messages[newerr]);
+ if (newerr != err) {
+ printk(KERN_INFO "usblp%d: %s\n",
+ usblp->minor, usblp_messages[newerr]);
+ }
return newerr;
}
@@ -345,12 +375,9 @@ static int usblp_check_status(struct usblp *usblp, int err)
static int handle_bidir (struct usblp *usblp)
{
if (usblp->bidir && usblp->used && !usblp->sleeping) {
- usblp->readcount = 0;
- usblp->readurb->dev = usblp->dev;
- if (usb_submit_urb(usblp->readurb, GFP_KERNEL) < 0)
+ if (usblp_submit_read(usblp) < 0)
return -EIO;
}
-
return 0;
}
@@ -403,11 +430,9 @@ static int usblp_open(struct inode *inode, struct file *file)
usblp->used = 1;
file->private_data = usblp;
- usblp->writeurb->transfer_buffer_length = 0;
usblp->wcomplete = 1; /* we begin writeable */
+ usblp->wstatus = 0;
usblp->rcomplete = 0;
- usblp->writeurb->status = 0;
- usblp->readurb->status = 0;
if (handle_bidir(usblp) < 0) {
usblp->used = 0;
@@ -421,20 +446,17 @@ out:
static void usblp_cleanup (struct usblp *usblp)
{
- info("usblp%d: removed", usblp->minor);
+ printk(KERN_INFO "usblp%d: removed\n", usblp->minor);
+ kfree(usblp->readbuf);
kfree (usblp->device_id_string);
kfree (usblp->statusbuf);
- usb_free_urb(usblp->writeurb);
- usb_free_urb(usblp->readurb);
kfree (usblp);
}
static void usblp_unlink_urbs(struct usblp *usblp)
{
- usb_kill_urb(usblp->writeurb);
- if (usblp->bidir)
- usb_kill_urb(usblp->readurb);
+ usb_kill_anchored_urbs(&usblp->urbs);
}
static int usblp_release(struct inode *inode, struct file *file)
@@ -455,10 +477,18 @@ static int usblp_release(struct inode *inode, struct file *file)
/* No kernel lock - fine */
static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait)
{
+ int ret;
+ unsigned long flags;
+
struct usblp *usblp = file->private_data;
- poll_wait(file, &usblp->wait, wait);
- return ((!usblp->bidir || !usblp->rcomplete) ? 0 : POLLIN | POLLRDNORM)
+ /* Should we check file->f_mode & FMODE_WRITE before poll_wait()? */
+ poll_wait(file, &usblp->rwait, wait);
+ poll_wait(file, &usblp->wwait, wait);
+ spin_lock_irqsave(&usblp->lock, flags);
+ ret = ((!usblp->bidir || !usblp->rcomplete) ? 0 : POLLIN | POLLRDNORM)
| (!usblp->wcomplete ? 0 : POLLOUT | POLLWRNORM);
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ return ret;
}
static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -632,10 +662,11 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case LPGETSTATUS:
- if (usblp_read_status(usblp, usblp->statusbuf)) {
+ if ((retval = usblp_read_status(usblp, usblp->statusbuf))) {
if (printk_ratelimit())
- err("usblp%d: failed reading printer status",
- usblp->minor);
+ printk(KERN_ERR "usblp%d:"
+ "failed reading printer status (%d)\n",
+ usblp->minor, retval);
retval = -EIO;
goto done;
}
@@ -656,168 +687,304 @@ done:
static ssize_t usblp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct usblp *usblp = file->private_data;
- int timeout, intr, rv, err = 0, transfer_length = 0;
- size_t writecount = 0;
+ char *writebuf;
+ struct urb *writeurb;
+ int rv;
+ int transfer_length;
+ ssize_t writecount = 0;
+
+ if (mutex_lock_interruptible(&usblp->wmut)) {
+ rv = -EINTR;
+ goto raise_biglock;
+ }
+ if ((rv = usblp_wwait(usblp, !!(file->f_flags & O_NONBLOCK))) < 0)
+ goto raise_wait;
while (writecount < count) {
- if (!usblp->wcomplete) {
- barrier();
- if (file->f_flags & O_NONBLOCK) {
- writecount += transfer_length;
- return writecount ? writecount : -EAGAIN;
- }
-
- timeout = USBLP_WRITE_TIMEOUT;
-
- rv = wait_event_interruptible_timeout(usblp->wait, usblp->wcomplete || !usblp->present , timeout);
- if (rv < 0)
- return writecount ? writecount : -EINTR;
- }
- intr = mutex_lock_interruptible (&usblp->mut);
- if (intr)
- return writecount ? writecount : -EINTR;
- if (!usblp->present) {
- mutex_unlock (&usblp->mut);
- return -ENODEV;
- }
-
- if (usblp->sleeping) {
- mutex_unlock (&usblp->mut);
- return writecount ? writecount : -ENODEV;
- }
-
- if (usblp->writeurb->status != 0) {
- if (usblp->quirks & USBLP_QUIRK_BIDIR) {
- if (!usblp->wcomplete)
- err("usblp%d: error %d writing to printer",
- usblp->minor, usblp->writeurb->status);
- err = usblp->writeurb->status;
- } else
- err = usblp_check_status(usblp, err);
- mutex_unlock (&usblp->mut);
-
- /* if the fault was due to disconnect, let khubd's
- * call to usblp_disconnect() grab usblp->mut ...
- */
- schedule ();
- continue;
- }
-
- /* We must increment writecount here, and not at the
- * end of the loop. Otherwise, the final loop iteration may
- * be skipped, leading to incomplete printer output.
+ /*
+ * Step 1: Submit next block.
*/
- writecount += transfer_length;
- if (writecount == count) {
- mutex_unlock(&usblp->mut);
- break;
- }
-
- transfer_length=(count - writecount);
- if (transfer_length > USBLP_BUF_SIZE)
+ if ((transfer_length = count - writecount) > USBLP_BUF_SIZE)
transfer_length = USBLP_BUF_SIZE;
- usblp->writeurb->transfer_buffer_length = transfer_length;
-
- if (copy_from_user(usblp->writeurb->transfer_buffer,
+ rv = -ENOMEM;
+ if ((writebuf = kmalloc(USBLP_BUF_SIZE, GFP_KERNEL)) == NULL)
+ goto raise_buf;
+ if ((writeurb = usb_alloc_urb(0, GFP_KERNEL)) == NULL)
+ goto raise_urb;
+ usb_fill_bulk_urb(writeurb, usblp->dev,
+ usb_sndbulkpipe(usblp->dev,
+ usblp->protocol[usblp->current_protocol].epwrite->bEndpointAddress),
+ writebuf, transfer_length, usblp_bulk_write, usblp);
+ usb_anchor_urb(writeurb, &usblp->urbs);
+
+ if (copy_from_user(writebuf,
buffer + writecount, transfer_length)) {
- mutex_unlock(&usblp->mut);
- return writecount ? writecount : -EFAULT;
+ rv = -EFAULT;
+ goto raise_badaddr;
}
- usblp->writeurb->dev = usblp->dev;
+ spin_lock_irq(&usblp->lock);
usblp->wcomplete = 0;
- err = usb_submit_urb(usblp->writeurb, GFP_KERNEL);
- if (err) {
+ spin_unlock_irq(&usblp->lock);
+ if ((rv = usb_submit_urb(writeurb, GFP_KERNEL)) < 0) {
+ usblp->wstatus = 0;
+ spin_lock_irq(&usblp->lock);
usblp->wcomplete = 1;
- if (err != -ENOMEM)
- count = -EIO;
- else
- count = writecount ? writecount : -ENOMEM;
- mutex_unlock (&usblp->mut);
- break;
+ wake_up(&usblp->wwait);
+ spin_unlock_irq(&usblp->lock);
+ if (rv != -ENOMEM)
+ rv = -EIO;
+ goto raise_submit;
}
- mutex_unlock (&usblp->mut);
+
+ /*
+ * Step 2: Wait for transfer to end, collect results.
+ */
+ rv = usblp_wwait(usblp, !!(file->f_flags&O_NONBLOCK));
+ if (rv < 0) {
+ if (rv == -EAGAIN) {
+ /* Presume that it's going to complete well. */
+ writecount += transfer_length;
+ }
+ /* Leave URB dangling, to be cleaned on close. */
+ goto collect_error;
+ }
+
+ if (usblp->wstatus < 0) {
+ usblp_check_status(usblp, 0);
+ rv = -EIO;
+ goto collect_error;
+ }
+ /*
+ * This is critical: it must be our URB, not other writer's.
+ * The wmut exists mainly to cover us here.
+ */
+ writecount += usblp->wstatus;
}
- return count;
+ mutex_unlock(&usblp->wmut);
+ return writecount;
+
+raise_submit:
+raise_badaddr:
+ usb_unanchor_urb(writeurb);
+ usb_free_urb(writeurb);
+raise_urb:
+ kfree(writebuf);
+raise_buf:
+raise_wait:
+collect_error: /* Out of raise sequence */
+ mutex_unlock(&usblp->wmut);
+raise_biglock:
+ return writecount ? writecount : rv;
}
-static ssize_t usblp_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+/*
+ * Notice that we fail to restart in a few cases: on EFAULT, on restart
+ * error, etc. This is the historical behaviour. In all such cases we return
+ * EIO, and applications loop in order to get the new read going.
+ */
+static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, loff_t *ppos)
{
struct usblp *usblp = file->private_data;
- int rv, intr;
+ ssize_t count;
+ ssize_t avail;
+ int rv;
if (!usblp->bidir)
return -EINVAL;
- intr = mutex_lock_interruptible (&usblp->mut);
- if (intr)
- return -EINTR;
- if (!usblp->present) {
- count = -ENODEV;
+ rv = usblp_rwait_and_lock(usblp, !!(file->f_flags & O_NONBLOCK));
+ if (rv < 0)
+ return rv;
+
+ if ((avail = usblp->rstatus) < 0) {
+ printk(KERN_ERR "usblp%d: error %d reading from printer\n",
+ usblp->minor, (int)avail);
+ usblp_submit_read(usblp);
+ count = -EIO;
goto done;
}
- if (!usblp->rcomplete) {
- barrier();
+ count = len < avail - usblp->readcount ? len : avail - usblp->readcount;
+ if (count != 0 &&
+ copy_to_user(buffer, usblp->readbuf + usblp->readcount, count)) {
+ count = -EFAULT;
+ goto done;
+ }
- if (file->f_flags & O_NONBLOCK) {
- count = -EAGAIN;
- goto done;
- }
- mutex_unlock(&usblp->mut);
- rv = wait_event_interruptible(usblp->wait, usblp->rcomplete || !usblp->present);
- mutex_lock(&usblp->mut);
- if (rv < 0) {
- count = -EINTR;
+ if ((usblp->readcount += count) == avail) {
+ if (usblp_submit_read(usblp) < 0) {
+ /* We don't want to leak USB return codes into errno. */
+ if (count == 0)
+ count = -EIO;
goto done;
}
}
- if (!usblp->present) {
- count = -ENODEV;
- goto done;
+done:
+ mutex_unlock (&usblp->mut);
+ return count;
+}
+
+/*
+ * Wait for the write path to come idle.
+ * This is called under the ->wmut, so the idle path stays idle.
+ *
+ * Our write path has a peculiar property: it does not buffer like a tty,
+ * but waits for the write to succeed. This allows our ->release to bug out
+ * without waiting for writes to drain. But it obviously does not work
+ * when O_NONBLOCK is set. So, applications setting O_NONBLOCK must use
+ * select(2) or poll(2) to wait for the buffer to drain before closing.
+ * Alternatively, set blocking mode with fcntl and issue a zero-size write.
+ *
+ * Old v0.13 code had a non-functional timeout for wait_event(). Someone forgot
+ * to check the return code for timeout expiration, so it had no effect.
+ * Apparently, it was intended to check for error conditons, such as out
+ * of paper. It is going to return when we settle things with CUPS. XXX
+ */
+static int usblp_wwait(struct usblp *usblp, int nonblock)
+{
+ DECLARE_WAITQUEUE(waita, current);
+ int rc;
+
+ add_wait_queue(&usblp->wwait, &waita);
+ for (;;) {
+ if (mutex_lock_interruptible(&usblp->mut)) {
+ rc = -EINTR;
+ break;
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ((rc = usblp_wtest(usblp, nonblock)) < 0) {
+ mutex_unlock(&usblp->mut);
+ break;
+ }
+ mutex_unlock(&usblp->mut);
+ if (rc == 0)
+ break;
+ schedule();
}
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&usblp->wwait, &waita);
+ return rc;
+}
- if (usblp->sleeping) {
- count = -ENODEV;
- goto done;
+static int usblp_wtest(struct usblp *usblp, int nonblock)
+{
+ unsigned long flags;
+
+ if (!usblp->present)
+ return -ENODEV;
+ if (signal_pending(current))
+ return -EINTR;
+ spin_lock_irqsave(&usblp->lock, flags);
+ if (usblp->wcomplete) {
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ return 0;
}
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ if (usblp->sleeping)
+ return -ENODEV;
+ if (nonblock)
+ return -EAGAIN;
+ return 1;
+}
- if (usblp->readurb->status) {
- err("usblp%d: error %d reading from printer",
- usblp->minor, usblp->readurb->status);
- usblp->readurb->dev = usblp->dev;
- usblp->readcount = 0;
- usblp->rcomplete = 0;
- if (usb_submit_urb(usblp->readurb, GFP_KERNEL) < 0)
- dbg("error submitting urb");
- count = -EIO;
- goto done;
+/*
+ * Wait for read bytes to become available. This probably should have been
+ * called usblp_r_lock_and_wait(), because we lock first. But it's a traditional
+ * name for functions which lock and return.
+ *
+ * We do not use wait_event_interruptible because it makes locking iffy.
+ */
+static int usblp_rwait_and_lock(struct usblp *usblp, int nonblock)
+{
+ DECLARE_WAITQUEUE(waita, current);
+ int rc;
+
+ add_wait_queue(&usblp->rwait, &waita);
+ for (;;) {
+ if (mutex_lock_interruptible(&usblp->mut)) {
+ rc = -EINTR;
+ break;
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ((rc = usblp_rtest(usblp, nonblock)) < 0) {
+ mutex_unlock(&usblp->mut);
+ break;
+ }
+ if (rc == 0) /* Keep it locked */
+ break;
+ mutex_unlock(&usblp->mut);
+ schedule();
}
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&usblp->rwait, &waita);
+ return rc;
+}
- count = count < usblp->readurb->actual_length - usblp->readcount ?
- count : usblp->readurb->actual_length - usblp->readcount;
+static int usblp_rtest(struct usblp *usblp, int nonblock)
+{
+ unsigned long flags;
- if (copy_to_user(buffer, usblp->readurb->transfer_buffer + usblp->readcount, count)) {
- count = -EFAULT;
- goto done;
+ if (!usblp->present)
+ return -ENODEV;
+ if (signal_pending(current))
+ return -EINTR;
+ spin_lock_irqsave(&usblp->lock, flags);
+ if (usblp->rcomplete) {
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ return 0;
}
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ if (usblp->sleeping)
+ return -ENODEV;
+ if (nonblock)
+ return -EAGAIN;
+ return 1;
+}
- if ((usblp->readcount += count) == usblp->readurb->actual_length) {
- usblp->readcount = 0;
- usblp->readurb->dev = usblp->dev;
- usblp->rcomplete = 0;
- if (usb_submit_urb(usblp->readurb, GFP_KERNEL)) {
- count = -EIO;
- goto done;
- }
+/*
+ * Please check ->bidir and other such things outside for now.
+ */
+static int usblp_submit_read(struct usblp *usblp)
+{
+ struct urb *urb;
+ unsigned long flags;
+ int rc;
+
+ rc = -ENOMEM;
+ if ((urb = usb_alloc_urb(0, GFP_KERNEL)) == NULL)
+ goto raise_urb;
+
+ usb_fill_bulk_urb(urb, usblp->dev,
+ usb_rcvbulkpipe(usblp->dev,
+ usblp->protocol[usblp->current_protocol].epread->bEndpointAddress),
+ usblp->readbuf, USBLP_BUF_SIZE_IN,
+ usblp_bulk_read, usblp);
+ usb_anchor_urb(urb, &usblp->urbs);
+
+ spin_lock_irqsave(&usblp->lock, flags);
+ usblp->readcount = 0; /* XXX Why here? */
+ usblp->rcomplete = 0;
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ if ((rc = usb_submit_urb(urb, GFP_KERNEL)) < 0) {
+ dbg("error submitting urb (%d)", rc);
+ spin_lock_irqsave(&usblp->lock, flags);
+ usblp->rstatus = rc;
+ usblp->rcomplete = 1;
+ spin_unlock_irqrestore(&usblp->lock, flags);
+ goto raise_submit;
}
-done:
- mutex_unlock (&usblp->mut);
- return count;
+ return 0;
+
+raise_submit:
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+raise_urb:
+ return rc;
}
/*
@@ -891,55 +1058,41 @@ static int usblp_probe(struct usb_interface *intf,
/* Malloc and start initializing usblp structure so we can use it
* directly. */
if (!(usblp = kzalloc(sizeof(struct usblp), GFP_KERNEL))) {
- err("out of memory for usblp");
+ retval = -ENOMEM;
goto abort;
}
usblp->dev = dev;
+ mutex_init(&usblp->wmut);
mutex_init (&usblp->mut);
- init_waitqueue_head(&usblp->wait);
+ spin_lock_init(&usblp->lock);
+ init_waitqueue_head(&usblp->rwait);
+ init_waitqueue_head(&usblp->wwait);
+ init_usb_anchor(&usblp->urbs);
usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
usblp->intf = intf;
- usblp->writeurb = usb_alloc_urb(0, GFP_KERNEL);
- if (!usblp->writeurb) {
- err("out of memory");
- goto abort;
- }
- usblp->readurb = usb_alloc_urb(0, GFP_KERNEL);
- if (!usblp->readurb) {
- err("out of memory");
- goto abort;
- }
-
/* Malloc device ID string buffer to the largest expected length,
* since we can re-query it on an ioctl and a dynamic string
* could change in length. */
if (!(usblp->device_id_string = kmalloc(USBLP_DEVICE_ID_SIZE, GFP_KERNEL))) {
- err("out of memory for device_id_string");
+ retval = -ENOMEM;
goto abort;
}
- usblp->writebuf = usblp->readbuf = NULL;
- usblp->writeurb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
- usblp->readurb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
- /* Malloc write & read buffers. We somewhat wastefully
+ /*
+ * Allocate read buffer. We somewhat wastefully
* malloc both regardless of bidirectionality, because the
- * alternate setting can be changed later via an ioctl. */
- if (!(usblp->writebuf = usb_buffer_alloc(dev, USBLP_BUF_SIZE,
- GFP_KERNEL, &usblp->writeurb->transfer_dma))) {
- err("out of memory for write buf");
- goto abort;
- }
- if (!(usblp->readbuf = usb_buffer_alloc(dev, USBLP_BUF_SIZE,
- GFP_KERNEL, &usblp->readurb->transfer_dma))) {
- err("out of memory for read buf");
+ * alternate setting can be changed later via an ioctl.
+ */
+ if (!(usblp->readbuf = kmalloc(USBLP_BUF_SIZE_IN, GFP_KERNEL))) {
+ retval = -ENOMEM;
goto abort;
}
/* Allocate buffer for printer status */
usblp->statusbuf = kmalloc(STATUS_BUF_SIZE, GFP_KERNEL);
if (!usblp->statusbuf) {
- err("out of memory for statusbuf");
+ retval = -ENOMEM;
goto abort;
}
@@ -954,12 +1107,15 @@ static int usblp_probe(struct usb_interface *intf,
dbg("incompatible printer-class device 0x%4.4X/0x%4.4X",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
+ retval = -ENODEV;
goto abort;
}
/* Setup the selected alternate setting and endpoints. */
- if (usblp_set_protocol(usblp, protocol) < 0)
+ if (usblp_set_protocol(usblp, protocol) < 0) {
+ retval = -ENODEV; /* ->probe isn't ->ioctl */
goto abort;
+ }
/* Retrieve and store the device ID string. */
usblp_cache_device_id_string(usblp);
@@ -977,12 +1133,14 @@ static int usblp_probe(struct usb_interface *intf,
retval = usb_register_dev(intf, &usblp_class);
if (retval) {
- err("Not able to get a minor for this device.");
+ printk(KERN_ERR "usblp: Not able to get a minor"
+ " (base %u, slice default): %d\n",
+ USBLP_MINOR_BASE, retval);
goto abort_intfdata;
}
usblp->minor = intf->minor;
- info("usblp%d: USB %sdirectional printer dev %d "
- "if %d alt %d proto %d vid 0x%4.4X pid 0x%4.4X",
+ printk(KERN_INFO "usblp%d: USB %sdirectional printer dev %d "
+ "if %d alt %d proto %d vid 0x%4.4X pid 0x%4.4X\n",
usblp->minor, usblp->bidir ? "Bi" : "Uni", dev->devnum,
usblp->ifnum,
usblp->protocol[usblp->current_protocol].alt_setting,
@@ -997,19 +1155,12 @@ abort_intfdata:
device_remove_file(&intf->dev, &dev_attr_ieee1284_id);
abort:
if (usblp) {
- if (usblp->writebuf)
- usb_buffer_free (usblp->dev, USBLP_BUF_SIZE,
- usblp->writebuf, usblp->writeurb->transfer_dma);
- if (usblp->readbuf)
- usb_buffer_free (usblp->dev, USBLP_BUF_SIZE,
- usblp->readbuf, usblp->readurb->transfer_dma);
+ kfree(usblp->readbuf);
kfree(usblp->statusbuf);
kfree(usblp->device_id_string);
- usb_free_urb(usblp->writeurb);
- usb_free_urb(usblp->readurb);
kfree(usblp);
}
- return -EIO;
+ return retval;
}
/*
@@ -1078,8 +1229,9 @@ static int usblp_select_alts(struct usblp *usblp)
if (ifd->desc.bInterfaceProtocol == 1) {
epread = NULL;
} else if (usblp->quirks & USBLP_QUIRK_BIDIR) {
- info("Disabling reads from problem bidirectional "
- "printer on usblp%d", usblp->minor);
+ printk(KERN_INFO "usblp%d: Disabling reads from "
+ "problematic bidirectional printer\n",
+ usblp->minor);
epread = NULL;
}
@@ -1119,25 +1271,12 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
return -EINVAL;
r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
if (r < 0) {
- err("can't set desired altsetting %d on interface %d",
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
alts, usblp->ifnum);
return r;
}
- usb_fill_bulk_urb(usblp->writeurb, usblp->dev,
- usb_sndbulkpipe(usblp->dev,
- usblp->protocol[protocol].epwrite->bEndpointAddress),
- usblp->writebuf, 0,
- usblp_bulk_write, usblp);
-
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
- if (usblp->bidir)
- usb_fill_bulk_urb(usblp->readurb, usblp->dev,
- usb_rcvbulkpipe(usblp->dev,
- usblp->protocol[protocol].epread->bEndpointAddress),
- usblp->readbuf, USBLP_BUF_SIZE,
- usblp_bulk_read, usblp);
-
usblp->current_protocol = protocol;
dbg("usblp%d set protocol %d", usblp->minor, protocol);
return 0;
@@ -1190,13 +1329,11 @@ static void usblp_disconnect(struct usb_interface *intf)
mutex_lock (&usblp_mutex);
mutex_lock (&usblp->mut);
usblp->present = 0;
+ wake_up(&usblp->wwait);
+ wake_up(&usblp->rwait);
usb_set_intfdata (intf, NULL);
usblp_unlink_urbs(usblp);
- usb_buffer_free (usblp->dev, USBLP_BUF_SIZE,
- usblp->writebuf, usblp->writeurb->transfer_dma);
- usb_buffer_free (usblp->dev, USBLP_BUF_SIZE,
- usblp->readbuf, usblp->readurb->transfer_dma);
mutex_unlock (&usblp->mut);
if (!usblp->used)
@@ -1211,6 +1348,11 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
/* we take no more IO */
usblp->sleeping = 1;
usblp_unlink_urbs(usblp);
+#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
+ /* not strictly necessary, but just in case */
+ wake_up(&usblp->wwait);
+ wake_up(&usblp->rwait);
+#endif
return 0;
}
@@ -1251,12 +1393,7 @@ static struct usb_driver usblp_driver = {
static int __init usblp_init(void)
{
- int retval;
- retval = usb_register(&usblp_driver);
- if (!retval)
- info(DRIVER_VERSION ": " DRIVER_DESC);
-
- return retval;
+ return usb_register(&usblp_driver);
}
static void __exit usblp_exit(void)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 346fc030c929..97b09f282705 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -86,6 +86,31 @@ config USB_SUSPEND
If you are unsure about this, say N here.
+config USB_PERSIST
+ bool "USB device persistence during system suspend (DANGEROUS)"
+ depends on USB && PM && EXPERIMENTAL
+ default n
+ help
+
+ If you say Y here and enable the "power/persist" attribute
+ for a USB device, the device's data structures will remain
+ persistent across system suspend, even if the USB bus loses
+ power. (This includes hibernation, also known as swsusp or
+ suspend-to-disk.) The devices will reappear as if by magic
+ when the system wakes up, with no need to unmount USB
+ filesystems, rmmod host-controller drivers, or do anything
+ else.
+
+ WARNING: This option can be dangerous!
+
+ If a USB device is replaced by another of the same type while
+ the system is asleep, there's a good chance the kernel won't
+ detect the change. Likewise if the media in a USB storage
+ device is replaced. When this happens it's almost certain to
+ cause data corruption and maybe even crash your system.
+
+ If you are unsure, say N here.
+
config USB_OTG
bool
depends on USB && EXPERIMENTAL
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index dd3482328ad2..cb69aa1e02e8 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -85,15 +85,21 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
- /* If the bInterval value is outside the legal range,
- * set it to a default value: 32 ms */
+ /* Fix up bInterval values outside the legal range. Use 32 ms if no
+ * proper value can be guessed. */
i = 0; /* i = min, j = max, n = default */
j = 255;
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_HIGH:
- n = 9; /* 32 ms = 2^(9-1) uframes */
+ /* Many device manufacturers are using full-speed
+ * bInterval values in high-speed interrupt endpoint
+ * descriptors. Try to fix those and fall back to a
+ * 32 ms default value otherwise. */
+ n = fls(d->bInterval*8);
+ if (n == 0)
+ n = 9; /* 32 ms = 2^(9-1) uframes */
j = 16;
break;
default: /* USB_SPEED_FULL or _LOW */
@@ -124,6 +130,21 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.bInterval = n;
}
+ /* Some buggy low-speed devices have Bulk endpoints, which is
+ * explicitly forbidden by the USB spec. In an attempt to make
+ * them usable, we will try treating them as Interrupt endpoints.
+ */
+ if (to_usb_device(ddev)->speed == USB_SPEED_LOW &&
+ usb_endpoint_xfer_bulk(d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d "
+ "endpoint 0x%X is Bulk; changing to Interrupt\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
+ endpoint->desc.bInterval = 1;
+ if (le16_to_cpu(endpoint->desc.wMaxPacketSize) > 8)
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
+ }
+
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the next endpoint or interface descriptor */
endpoint->extra = buffer;
@@ -274,6 +295,7 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
struct usb_descriptor_header *header;
int len, retval;
u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES];
+ unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
@@ -351,6 +373,20 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
++n;
}
+ } else if (header->bDescriptorType ==
+ USB_DT_INTERFACE_ASSOCIATION) {
+ if (iad_num == USB_MAXIADS) {
+ dev_warn(ddev, "found more Interface "
+ "Association Descriptors "
+ "than allocated for in "
+ "configuration %d\n", cfgno);
+ } else {
+ config->intf_assoc[iad_num] =
+ (struct usb_interface_assoc_descriptor
+ *)header;
+ iad_num++;
+ }
+
} else if (header->bDescriptorType == USB_DT_DEVICE ||
header->bDescriptorType == USB_DT_CONFIG)
dev_warn(ddev, "config %d contains an unexpected "
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 6753ca059ee4..87c794d60aa0 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -102,6 +102,10 @@ static const char *format_config =
/* C: #Ifs=dd Cfg#=dd Atr=xx MPwr=dddmA */
"C:%c #Ifs=%2d Cfg#=%2d Atr=%02x MxPwr=%3dmA\n";
+static const char *format_iad =
+/* A: FirstIf#=dd IfCount=dd Cls=xx(sssss) Sub=xx Prot=xx */
+ "A: FirstIf#=%2d IfCount=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x\n";
+
static const char *format_iface =
/* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/
"I:%c If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n";
@@ -146,6 +150,7 @@ static const struct class_info clas_info[] =
{USB_CLASS_STILL_IMAGE, "still"},
{USB_CLASS_CSCID, "scard"},
{USB_CLASS_CONTENT_SEC, "c-sec"},
+ {USB_CLASS_VIDEO, "video"},
{-1, "unk."} /* leave as last */
};
@@ -286,6 +291,21 @@ static char *usb_dump_interface(
return start;
}
+static char *usb_dump_iad_descriptor(char *start, char *end,
+ const struct usb_interface_assoc_descriptor *iad)
+{
+ if (start > end)
+ return start;
+ start += sprintf(start, format_iad,
+ iad->bFirstInterface,
+ iad->bInterfaceCount,
+ iad->bFunctionClass,
+ class_decode(iad->bFunctionClass),
+ iad->bFunctionSubClass,
+ iad->bFunctionProtocol);
+ return start;
+}
+
/* TBD:
* 0. TBDs
* 1. marking active interface altsettings (code lists all, but should mark
@@ -322,6 +342,12 @@ static char *usb_dump_config (
if (!config) /* getting these some in 2.3.7; none in 2.3.6 */
return start + sprintf(start, "(null Cfg. desc.)\n");
start = usb_dump_config_descriptor(start, end, &config->desc, active);
+ for (i = 0; i < USB_MAXIADS; i++) {
+ if (config->intf_assoc[i] == NULL)
+ break;
+ start = usb_dump_iad_descriptor(start, end,
+ config->intf_assoc[i]);
+ }
for (i = 0; i < config->desc.bNumInterfaces; i++) {
intfc = config->intf_cache[i];
interface = config->interface[i];
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 2619986e5300..654857493a82 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -24,10 +24,12 @@
#include <linux/device.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/workqueue.h>
#include "hcd.h"
#include "usb.h"
+
#ifdef CONFIG_HOTPLUG
/*
@@ -802,18 +804,17 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
udev->state == USB_STATE_SUSPENDED)
goto done;
- /* For devices that don't have a driver, we do a standard suspend. */
- if (udev->dev.driver == NULL) {
+ /* For devices that don't have a driver, we do a generic suspend. */
+ if (udev->dev.driver)
+ udriver = to_usb_device_driver(udev->dev.driver);
+ else {
udev->do_remote_wakeup = 0;
- status = usb_port_suspend(udev);
- goto done;
+ udriver = &usb_generic_driver;
}
-
- udriver = to_usb_device_driver(udev->dev.driver);
status = udriver->suspend(udev, msg);
-done:
- // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
+ done:
+ dev_vdbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
if (status == 0)
udev->dev.power.power_state.event = msg.event;
return status;
@@ -825,8 +826,9 @@ static int usb_resume_device(struct usb_device *udev)
struct usb_device_driver *udriver;
int status = 0;
- if (udev->state == USB_STATE_NOTATTACHED ||
- udev->state != USB_STATE_SUSPENDED)
+ if (udev->state == USB_STATE_NOTATTACHED)
+ goto done;
+ if (udev->state != USB_STATE_SUSPENDED && !udev->reset_resume)
goto done;
/* Can't resume it if it doesn't have a driver. */
@@ -835,11 +837,14 @@ static int usb_resume_device(struct usb_device *udev)
goto done;
}
+ if (udev->quirks & USB_QUIRK_RESET_RESUME)
+ udev->reset_resume = 1;
+
udriver = to_usb_device_driver(udev->dev.driver);
status = udriver->resume(udev);
-done:
- // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
+ done:
+ dev_vdbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
if (status == 0) {
udev->autoresume_disabled = 0;
udev->dev.power.power_state.event = PM_EVENT_ON;
@@ -877,15 +882,13 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
mark_quiesced(intf);
}
-done:
- // dev_dbg(&intf->dev, "%s: status %d\n", __FUNCTION__, status);
- if (status == 0)
- intf->dev.power.power_state.event = msg.event;
+ done:
+ dev_vdbg(&intf->dev, "%s: status %d\n", __FUNCTION__, status);
return status;
}
/* Caller has locked intf's usb_device's pm_mutex */
-static int usb_resume_interface(struct usb_interface *intf)
+static int usb_resume_interface(struct usb_interface *intf, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
@@ -905,23 +908,37 @@ static int usb_resume_interface(struct usb_interface *intf)
}
driver = to_usb_driver(intf->dev.driver);
- if (driver->resume) {
- status = driver->resume(intf);
- if (status)
- dev_err(&intf->dev, "%s error %d\n",
- "resume", status);
- else
- mark_active(intf);
+ if (reset_resume) {
+ if (driver->reset_resume) {
+ status = driver->reset_resume(intf);
+ if (status)
+ dev_err(&intf->dev, "%s error %d\n",
+ "reset_resume", status);
+ } else {
+ // status = -EOPNOTSUPP;
+ dev_warn(&intf->dev, "no %s for driver %s?\n",
+ "reset_resume", driver->name);
+ }
} else {
- dev_warn(&intf->dev, "no resume for driver %s?\n",
- driver->name);
- mark_active(intf);
+ if (driver->resume) {
+ status = driver->resume(intf);
+ if (status)
+ dev_err(&intf->dev, "%s error %d\n",
+ "resume", status);
+ } else {
+ // status = -EOPNOTSUPP;
+ dev_warn(&intf->dev, "no %s for driver %s?\n",
+ "resume", driver->name);
+ }
}
done:
- // dev_dbg(&intf->dev, "%s: status %d\n", __FUNCTION__, status);
+ dev_vdbg(&intf->dev, "%s: status %d\n", __FUNCTION__, status);
if (status == 0)
- intf->dev.power.power_state.event = PM_EVENT_ON;
+ mark_active(intf);
+
+ /* FIXME: Unbind the driver and reprobe if the resume failed
+ * (not possible if auto_pm is set) */
return status;
}
@@ -958,6 +975,18 @@ static int autosuspend_check(struct usb_device *udev)
"for autosuspend\n");
return -EOPNOTSUPP;
}
+
+ /* Don't allow autosuspend if the device will need
+ * a reset-resume and any of its interface drivers
+ * doesn't include support.
+ */
+ if (udev->quirks & USB_QUIRK_RESET_RESUME) {
+ struct usb_driver *driver;
+
+ driver = to_usb_driver(intf->dev.driver);
+ if (!driver->reset_resume)
+ return -EOPNOTSUPP;
+ }
}
}
@@ -974,7 +1003,7 @@ static int autosuspend_check(struct usb_device *udev)
* or for the past.
*/
queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
- suspend_time - jiffies);
+ round_jiffies_relative(suspend_time - jiffies));
}
return -EAGAIN;
}
@@ -1054,14 +1083,21 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
break;
}
}
- if (status == 0)
+ if (status == 0) {
+
+ /* Non-root devices don't need to do anything for FREEZE
+ * or PRETHAW. */
+ if (udev->parent && (msg.event == PM_EVENT_FREEZE ||
+ msg.event == PM_EVENT_PRETHAW))
+ goto done;
status = usb_suspend_device(udev, msg);
+ }
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(intf);
+ usb_resume_interface(intf, 0);
}
/* Try another autosuspend when the interfaces aren't busy */
@@ -1076,7 +1112,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
}
done:
- // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
+ dev_vdbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
return status;
}
@@ -1131,7 +1167,8 @@ static int usb_resume_both(struct usb_device *udev)
status = usb_autoresume_device(parent);
if (status == 0) {
status = usb_resume_device(udev);
- if (status) {
+ if (status || udev->state ==
+ USB_STATE_NOTATTACHED) {
usb_autosuspend_device(parent);
/* It's possible usb_resume_device()
@@ -1152,28 +1189,25 @@ static int usb_resume_both(struct usb_device *udev)
/* We can't progagate beyond the USB subsystem,
* so if a root hub's controller is suspended
* then we're stuck. */
- if (udev->dev.parent->power.power_state.event !=
- PM_EVENT_ON)
- status = -EHOSTUNREACH;
- else
- status = usb_resume_device(udev);
+ status = usb_resume_device(udev);
}
} else {
- /* Needed only for setting udev->dev.power.power_state.event
- * and for possible debugging message. */
+ /* Needed for setting udev->dev.power.power_state.event,
+ * for possible debugging message, and for reset_resume. */
status = usb_resume_device(udev);
}
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(intf);
+ usb_resume_interface(intf, udev->reset_resume);
}
}
done:
- // dev_dbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
+ dev_vdbg(&udev->dev, "%s: status %d\n", __FUNCTION__, status);
+ udev->reset_resume = 0;
return status;
}
@@ -1240,8 +1274,8 @@ void usb_autosuspend_device(struct usb_device *udev)
int status;
status = usb_autopm_do_device(udev, -1);
- // dev_dbg(&udev->dev, "%s: cnt %d\n",
- // __FUNCTION__, udev->pm_usage_cnt);
+ dev_vdbg(&udev->dev, "%s: cnt %d\n",
+ __FUNCTION__, udev->pm_usage_cnt);
}
/**
@@ -1260,8 +1294,8 @@ void usb_autosuspend_device(struct usb_device *udev)
void usb_try_autosuspend_device(struct usb_device *udev)
{
usb_autopm_do_device(udev, 0);
- // dev_dbg(&udev->dev, "%s: cnt %d\n",
- // __FUNCTION__, udev->pm_usage_cnt);
+ dev_vdbg(&udev->dev, "%s: cnt %d\n",
+ __FUNCTION__, udev->pm_usage_cnt);
}
/**
@@ -1288,8 +1322,8 @@ int usb_autoresume_device(struct usb_device *udev)
int status;
status = usb_autopm_do_device(udev, 1);
- // dev_dbg(&udev->dev, "%s: status %d cnt %d\n",
- // __FUNCTION__, status, udev->pm_usage_cnt);
+ dev_vdbg(&udev->dev, "%s: status %d cnt %d\n",
+ __FUNCTION__, status, udev->pm_usage_cnt);
return status;
}
@@ -1361,8 +1395,8 @@ void usb_autopm_put_interface(struct usb_interface *intf)
int status;
status = usb_autopm_do_interface(intf, -1);
- // dev_dbg(&intf->dev, "%s: status %d cnt %d\n",
- // __FUNCTION__, status, intf->pm_usage_cnt);
+ dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+ __FUNCTION__, status, intf->pm_usage_cnt);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
@@ -1405,8 +1439,8 @@ int usb_autopm_get_interface(struct usb_interface *intf)
int status;
status = usb_autopm_do_interface(intf, 1);
- // dev_dbg(&intf->dev, "%s: status %d cnt %d\n",
- // __FUNCTION__, status, intf->pm_usage_cnt);
+ dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+ __FUNCTION__, status, intf->pm_usage_cnt);
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1427,8 +1461,8 @@ int usb_autopm_set_interface(struct usb_interface *intf)
int status;
status = usb_autopm_do_interface(intf, 0);
- // dev_dbg(&intf->dev, "%s: status %d cnt %d\n",
- // __FUNCTION__, status, intf->pm_usage_cnt);
+ dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
+ __FUNCTION__, status, intf->pm_usage_cnt);
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
@@ -1508,8 +1542,15 @@ static int usb_resume(struct device *dev)
if (!is_usb_device(dev)) /* Ignore PM for interfaces */
return 0;
udev = to_usb_device(dev);
- if (udev->autoresume_disabled)
- return -EPERM;
+
+ /* If autoresume is disabled then we also want to prevent resume
+ * during system wakeup. However, a "persistent-device" reset-resume
+ * after power loss counts as a wakeup event. So allow a
+ * reset-resume to occur if remote wakeup is enabled. */
+ if (udev->autoresume_disabled) {
+ if (!(udev->reset_resume && udev->do_remote_wakeup))
+ return -EPERM;
+ }
return usb_external_resume_device(udev);
}
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 01c857ac27af..5d860bc9b421 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -16,15 +16,15 @@
*/
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/errno.h>
+#include <linux/rwsem.h>
#include <linux/usb.h>
#include "usb.h"
#define MAX_USB_MINORS 256
static const struct file_operations *usb_minors[MAX_USB_MINORS];
-static DEFINE_SPINLOCK(minor_lock);
+static DECLARE_RWSEM(minor_rwsem);
static int usb_open(struct inode * inode, struct file * file)
{
@@ -33,14 +33,11 @@ static int usb_open(struct inode * inode, struct file * file)
int err = -ENODEV;
const struct file_operations *old_fops, *new_fops = NULL;
- spin_lock (&minor_lock);
+ down_read(&minor_rwsem);
c = usb_minors[minor];
- if (!c || !(new_fops = fops_get(c))) {
- spin_unlock(&minor_lock);
- return err;
- }
- spin_unlock(&minor_lock);
+ if (!c || !(new_fops = fops_get(c)))
+ goto done;
old_fops = file->f_op;
file->f_op = new_fops;
@@ -52,6 +49,8 @@ static int usb_open(struct inode * inode, struct file * file)
file->f_op = fops_get(old_fops);
}
fops_put(old_fops);
+ done:
+ up_read(&minor_rwsem);
return err;
}
@@ -166,7 +165,7 @@ int usb_register_dev(struct usb_interface *intf,
if (class_driver->fops == NULL)
goto exit;
- spin_lock (&minor_lock);
+ down_write(&minor_rwsem);
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
if (usb_minors[minor])
continue;
@@ -176,7 +175,7 @@ int usb_register_dev(struct usb_interface *intf,
retval = 0;
break;
}
- spin_unlock (&minor_lock);
+ up_write(&minor_rwsem);
if (retval)
goto exit;
@@ -197,9 +196,9 @@ int usb_register_dev(struct usb_interface *intf,
intf->usb_dev = device_create(usb_class->class, &intf->dev,
MKDEV(USB_MAJOR, minor), "%s", temp);
if (IS_ERR(intf->usb_dev)) {
- spin_lock (&minor_lock);
+ down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
- spin_unlock (&minor_lock);
+ up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
exit:
@@ -236,9 +235,9 @@ void usb_deregister_dev(struct usb_interface *intf,
dbg ("removing %d minor", intf->minor);
- spin_lock (&minor_lock);
+ down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
- spin_unlock (&minor_lock);
+ up_write(&minor_rwsem);
snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base);
device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
@@ -247,5 +246,3 @@ void usb_deregister_dev(struct usb_interface *intf,
destroy_usb_class();
}
EXPORT_SYMBOL(usb_deregister_dev);
-
-
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 9bbcb20e2d94..b2fc2b115256 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,7 @@
#include <linux/usb.h>
#include "usb.h"
+#include "hcd.h"
static inline const char *plural(int n)
{
@@ -193,16 +194,34 @@ static void generic_disconnect(struct usb_device *udev)
static int generic_suspend(struct usb_device *udev, pm_message_t msg)
{
- /* USB devices enter SUSPEND state through their hubs, but can be
- * marked for FREEZE as soon as their children are already idled.
- * But those semantics are useless, so we equate the two (sigh).
+ int rc;
+
+ /* Normal USB devices suspend through their upstream port.
+ * Root hubs don't have upstream ports to suspend,
+ * so we have to shut down their downstream HC-to-USB
+ * interfaces manually by doing a bus (or "global") suspend.
*/
- return usb_port_suspend(udev);
+ if (!udev->parent)
+ rc = hcd_bus_suspend(udev);
+ else
+ rc = usb_port_suspend(udev);
+ return rc;
}
static int generic_resume(struct usb_device *udev)
{
- return usb_port_resume(udev);
+ int rc;
+
+ /* Normal USB devices resume/reset through their upstream port.
+ * Root hubs don't have upstream ports to resume or reset,
+ * so we have to start up their downstream HC-to-USB
+ * interfaces manually by doing a bus (or "global") resume.
+ */
+ if (!udev->parent)
+ rc = hcd_bus_resume(udev);
+ else
+ rc = usb_port_resume(udev);
+ return rc;
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index edf4300a3f7a..5cf6d5f9acbd 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -207,7 +207,8 @@ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
* We must ignore the FREEZE vs SUSPEND distinction here, because
* otherwise the swsusp will save (and restore) garbage state.
*/
- if (hcd->self.root_hub->dev.power.power_state.event == PM_EVENT_ON)
+ if (!(hcd->state == HC_STATE_SUSPENDED ||
+ hcd->state == HC_STATE_HALT))
return -EBUSY;
if (hcd->driver->suspend) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8969e42434b9..42ef1d5f6c8a 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -99,12 +99,17 @@ EXPORT_SYMBOL_GPL (usb_bus_list_lock);
/* used for controlling access to virtual root hubs */
static DEFINE_SPINLOCK(hcd_root_hub_lock);
-/* used when updating hcd data */
-static DEFINE_SPINLOCK(hcd_data_lock);
+/* used when updating an endpoint's URB list */
+static DEFINE_SPINLOCK(hcd_urb_list_lock);
/* wait queue for synchronous unlinks */
DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
+static inline int is_root_hub(struct usb_device *udev)
+{
+ return (udev->parent == NULL);
+}
+
/*-------------------------------------------------------------------------*/
/*
@@ -582,10 +587,12 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
}
/* The USB 2.0 spec says 256 ms. This is close enough and won't
- * exceed that limit if HZ is 100. */
+ * exceed that limit if HZ is 100. The math is more clunky than
+ * maybe expected, this is to make sure that all timers for USB devices
+ * fire at the same time to give the CPU a break inbetween */
if (hcd->uses_new_polling ? hcd->poll_rh :
(length == 0 && hcd->status_urb != NULL))
- mod_timer (&hcd->rh_timer, jiffies + msecs_to_jiffies(250));
+ mod_timer (&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
}
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
@@ -614,8 +621,8 @@ static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb)
urb->hcpriv = hcd; /* indicate it's queued */
if (!hcd->uses_new_polling)
- mod_timer (&hcd->rh_timer, jiffies +
- msecs_to_jiffies(250));
+ mod_timer (&hcd->rh_timer,
+ (jiffies/(HZ/4) + 1) * (HZ/4));
/* If a status change has already occurred, report it ASAP */
else if (hcd->poll_pending)
@@ -901,17 +908,31 @@ EXPORT_SYMBOL (usb_calc_bus_time);
/*-------------------------------------------------------------------------*/
-static void urb_unlink (struct urb *urb)
+static void urb_unlink(struct usb_hcd *hcd, struct urb *urb)
{
unsigned long flags;
/* clear all state linking urb to this dev (and hcd) */
-
- spin_lock_irqsave (&hcd_data_lock, flags);
+ spin_lock_irqsave(&hcd_urb_list_lock, flags);
list_del_init (&urb->urb_list);
- spin_unlock_irqrestore (&hcd_data_lock, flags);
-}
+ spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
+ if (hcd->self.uses_dma && !is_root_hub(urb->dev)) {
+ if (usb_pipecontrol (urb->pipe)
+ && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
+ dma_unmap_single (hcd->self.controller, urb->setup_dma,
+ sizeof (struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ if (urb->transfer_buffer_length != 0
+ && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
+ dma_unmap_single (hcd->self.controller,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ usb_pipein (urb->pipe)
+ ? DMA_FROM_DEVICE
+ : DMA_TO_DEVICE);
+ }
+}
/* may be called in any context with a valid urb->dev usecount
* caller surrenders "ownership" of urb
@@ -938,7 +959,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
// FIXME: verify that quiescing hc works right (RH cleans up)
- spin_lock_irqsave (&hcd_data_lock, flags);
+ spin_lock_irqsave(&hcd_urb_list_lock, flags);
ep = (usb_pipein(urb->pipe) ? urb->dev->ep_in : urb->dev->ep_out)
[usb_pipeendpoint(urb->pipe)];
if (unlikely (!ep))
@@ -948,24 +969,14 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
else switch (hcd->state) {
case HC_STATE_RUNNING:
case HC_STATE_RESUMING:
-doit:
list_add_tail (&urb->urb_list, &ep->urb_list);
status = 0;
break;
- case HC_STATE_SUSPENDED:
- /* HC upstream links (register access, wakeup signaling) can work
- * even when the downstream links (and DMA etc) are quiesced; let
- * usbcore talk to the root hub.
- */
- if (hcd->self.controller->power.power_state.event == PM_EVENT_ON
- && urb->dev->parent == NULL)
- goto doit;
- /* FALL THROUGH */
default:
status = -ESHUTDOWN;
break;
}
- spin_unlock_irqrestore (&hcd_data_lock, flags);
+ spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
if (status) {
INIT_LIST_HEAD (&urb->urb_list);
usbmon_urb_submit_error(&hcd->self, urb, status);
@@ -979,7 +990,7 @@ doit:
urb = usb_get_urb (urb);
atomic_inc (&urb->use_count);
- if (urb->dev == hcd->self.root_hub) {
+ if (is_root_hub(urb->dev)) {
/* NOTE: requirement on hub callers (usbfs and the hub
* driver, for now) that URBs' urb->transfer_buffer be
* valid and usb_buffer_{sync,unmap}() not be needed, since
@@ -1014,7 +1025,7 @@ doit:
status = hcd->driver->urb_enqueue (hcd, ep, urb, mem_flags);
done:
if (unlikely (status)) {
- urb_unlink (urb);
+ urb_unlink(hcd, urb);
atomic_dec (&urb->use_count);
if (urb->reject)
wake_up (&usb_kill_urb_queue);
@@ -1026,18 +1037,6 @@ done:
/*-------------------------------------------------------------------------*/
-/* called in any context */
-int usb_hcd_get_frame_number (struct usb_device *udev)
-{
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
-
- if (!HC_IS_RUNNING (hcd->state))
- return -ESHUTDOWN;
- return hcd->driver->get_frame_number (hcd);
-}
-
-/*-------------------------------------------------------------------------*/
-
/* this makes the hcd giveback() the urb more quickly, by kicking it
* off hardware queues (which may take a while) and returning it as
* soon as practical. we've already set up the urb's return status,
@@ -1048,7 +1047,7 @@ unlink1 (struct usb_hcd *hcd, struct urb *urb)
{
int value;
- if (urb->dev == hcd->self.root_hub)
+ if (is_root_hub(urb->dev))
value = usb_rh_urb_dequeue (hcd, urb);
else {
@@ -1096,11 +1095,11 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
* that it was submitted. But as a rule it can't know whether or
* not it's already been unlinked ... so we respect the reversed
* lock sequence needed for the usb_hcd_giveback_urb() code paths
- * (urb lock, then hcd_data_lock) in case some other CPU is now
+ * (urb lock, then hcd_urb_list_lock) in case some other CPU is now
* unlinking it.
*/
spin_lock_irqsave (&urb->lock, flags);
- spin_lock (&hcd_data_lock);
+ spin_lock(&hcd_urb_list_lock);
sys = &urb->dev->dev;
hcd = bus_to_hcd(urb->dev->bus);
@@ -1132,17 +1131,16 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
* finish unlinking the initial failed usb_set_address()
* or device descriptor fetch.
*/
- if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags)
- && hcd->self.root_hub != urb->dev) {
+ if (!test_bit(HCD_FLAG_SAW_IRQ, &hcd->flags) &&
+ !is_root_hub(urb->dev)) {
dev_warn (hcd->self.controller, "Unlink after no-IRQ? "
- "Controller is probably using the wrong IRQ."
- "\n");
+ "Controller is probably using the wrong IRQ.\n");
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
}
urb->status = status;
- spin_unlock (&hcd_data_lock);
+ spin_unlock(&hcd_urb_list_lock);
spin_unlock_irqrestore (&urb->lock, flags);
retval = unlink1 (hcd, urb);
@@ -1151,7 +1149,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
return retval;
done:
- spin_unlock (&hcd_data_lock);
+ spin_unlock(&hcd_urb_list_lock);
spin_unlock_irqrestore (&urb->lock, flags);
if (retval != -EIDRM && sys && sys->driver)
dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval);
@@ -1160,6 +1158,35 @@ done:
/*-------------------------------------------------------------------------*/
+/**
+ * usb_hcd_giveback_urb - return URB from HCD to device driver
+ * @hcd: host controller returning the URB
+ * @urb: urb being returned to the USB device driver.
+ * Context: in_interrupt()
+ *
+ * This hands the URB from HCD to its USB device driver, using its
+ * completion function. The HCD has freed all per-urb resources
+ * (and is done using urb->hcpriv). It also released all HCD locks;
+ * the device driver won't cause problems if it frees, modifies,
+ * or resubmits this URB.
+ */
+void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb)
+{
+ urb_unlink(hcd, urb);
+ usbmon_urb_complete (&hcd->self, urb);
+ usb_unanchor_urb(urb);
+
+ /* pass ownership to the completion handler */
+ urb->complete (urb);
+ atomic_dec (&urb->use_count);
+ if (unlikely (urb->reject))
+ wake_up (&usb_kill_urb_queue);
+ usb_put_urb (urb);
+}
+EXPORT_SYMBOL (usb_hcd_giveback_urb);
+
+/*-------------------------------------------------------------------------*/
+
/* disables the endpoint: cancels any pending urbs, then synchronizes with
* the hcd to make sure all endpoint state is gone from hardware, and then
* waits until the endpoint's queue is completely drained. use for
@@ -1179,7 +1206,7 @@ void usb_hcd_endpoint_disable (struct usb_device *udev,
/* ep is already gone from udev->ep_{in,out}[]; no more submits */
rescan:
- spin_lock (&hcd_data_lock);
+ spin_lock(&hcd_urb_list_lock);
list_for_each_entry (urb, &ep->urb_list, urb_list) {
int tmp;
@@ -1187,7 +1214,7 @@ rescan:
if (urb->status != -EINPROGRESS)
continue;
usb_get_urb (urb);
- spin_unlock (&hcd_data_lock);
+ spin_unlock(&hcd_urb_list_lock);
spin_lock (&urb->lock);
tmp = urb->status;
@@ -1216,7 +1243,7 @@ rescan:
/* list contents may have changed */
goto rescan;
}
- spin_unlock (&hcd_data_lock);
+ spin_unlock(&hcd_urb_list_lock);
local_irq_enable ();
/* synchronize with the hardware, so old configuration state
@@ -1233,7 +1260,7 @@ rescan:
* endpoint_disable methods.
*/
while (!list_empty (&ep->urb_list)) {
- spin_lock_irq (&hcd_data_lock);
+ spin_lock_irq(&hcd_urb_list_lock);
/* The list may have changed while we acquired the spinlock */
urb = NULL;
@@ -1242,7 +1269,7 @@ rescan:
urb_list);
usb_get_urb (urb);
}
- spin_unlock_irq (&hcd_data_lock);
+ spin_unlock_irq(&hcd_urb_list_lock);
if (urb) {
usb_kill_urb (urb);
@@ -1253,44 +1280,73 @@ rescan:
/*-------------------------------------------------------------------------*/
+/* called in any context */
+int usb_hcd_get_frame_number (struct usb_device *udev)
+{
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ if (!HC_IS_RUNNING (hcd->state))
+ return -ESHUTDOWN;
+ return hcd->driver->get_frame_number (hcd);
+}
+
+/*-------------------------------------------------------------------------*/
+
#ifdef CONFIG_PM
-int hcd_bus_suspend (struct usb_bus *bus)
+int hcd_bus_suspend(struct usb_device *rhdev)
{
- struct usb_hcd *hcd;
- int status;
+ struct usb_hcd *hcd = container_of(rhdev->bus, struct usb_hcd, self);
+ int status;
+ int old_state = hcd->state;
- hcd = container_of (bus, struct usb_hcd, self);
- if (!hcd->driver->bus_suspend)
- return -ENOENT;
- hcd->state = HC_STATE_QUIESCING;
- status = hcd->driver->bus_suspend (hcd);
- if (status == 0)
+ dev_dbg(&rhdev->dev, "bus %s%s\n",
+ rhdev->auto_pm ? "auto-" : "", "suspend");
+ if (!hcd->driver->bus_suspend) {
+ status = -ENOENT;
+ } else {
+ hcd->state = HC_STATE_QUIESCING;
+ status = hcd->driver->bus_suspend(hcd);
+ }
+ if (status == 0) {
+ usb_set_device_state(rhdev, USB_STATE_SUSPENDED);
hcd->state = HC_STATE_SUSPENDED;
- else
- dev_dbg(&bus->root_hub->dev, "%s fail, err %d\n",
+ } else {
+ hcd->state = old_state;
+ dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
"suspend", status);
+ }
return status;
}
-int hcd_bus_resume (struct usb_bus *bus)
+int hcd_bus_resume(struct usb_device *rhdev)
{
- struct usb_hcd *hcd;
- int status;
+ struct usb_hcd *hcd = container_of(rhdev->bus, struct usb_hcd, self);
+ int status;
+ int old_state = hcd->state;
- hcd = container_of (bus, struct usb_hcd, self);
+ dev_dbg(&rhdev->dev, "usb %s%s\n",
+ rhdev->auto_pm ? "auto-" : "", "resume");
if (!hcd->driver->bus_resume)
return -ENOENT;
if (hcd->state == HC_STATE_RUNNING)
return 0;
+
hcd->state = HC_STATE_RESUMING;
- status = hcd->driver->bus_resume (hcd);
- if (status == 0)
+ status = hcd->driver->bus_resume(hcd);
+ if (status == 0) {
+ /* TRSMRCY = 10 msec */
+ msleep(10);
+ usb_set_device_state(rhdev, rhdev->actconfig
+ ? USB_STATE_CONFIGURED
+ : USB_STATE_ADDRESS);
hcd->state = HC_STATE_RUNNING;
- else {
- dev_dbg(&bus->root_hub->dev, "%s fail, err %d\n",
+ } else {
+ hcd->state = old_state;
+ dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
"resume", status);
- usb_hc_died(hcd);
+ if (status != -ESHUTDOWN)
+ usb_hc_died(hcd);
}
return status;
}
@@ -1371,55 +1427,6 @@ EXPORT_SYMBOL (usb_bus_start_enum);
/*-------------------------------------------------------------------------*/
/**
- * usb_hcd_giveback_urb - return URB from HCD to device driver
- * @hcd: host controller returning the URB
- * @urb: urb being returned to the USB device driver.
- * Context: in_interrupt()
- *
- * This hands the URB from HCD to its USB device driver, using its
- * completion function. The HCD has freed all per-urb resources
- * (and is done using urb->hcpriv). It also released all HCD locks;
- * the device driver won't cause problems if it frees, modifies,
- * or resubmits this URB.
- */
-void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb)
-{
- int at_root_hub;
-
- at_root_hub = (urb->dev == hcd->self.root_hub);
- urb_unlink (urb);
-
- /* lower level hcd code should use *_dma exclusively if the
- * host controller does DMA */
- if (hcd->self.uses_dma && !at_root_hub) {
- if (usb_pipecontrol (urb->pipe)
- && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
- dma_unmap_single (hcd->self.controller, urb->setup_dma,
- sizeof (struct usb_ctrlrequest),
- DMA_TO_DEVICE);
- if (urb->transfer_buffer_length != 0
- && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
- dma_unmap_single (hcd->self.controller,
- urb->transfer_dma,
- urb->transfer_buffer_length,
- usb_pipein (urb->pipe)
- ? DMA_FROM_DEVICE
- : DMA_TO_DEVICE);
- }
-
- usbmon_urb_complete (&hcd->self, urb);
- /* pass ownership to the completion handler */
- urb->complete (urb);
- atomic_dec (&urb->use_count);
- if (unlikely (urb->reject))
- wake_up (&usb_kill_urb_queue);
- usb_put_urb (urb);
-}
-EXPORT_SYMBOL (usb_hcd_giveback_urb);
-
-/*-------------------------------------------------------------------------*/
-
-/**
* usb_hcd_irq - hook IRQs to HCD framework (bus glue)
* @irq: the IRQ being raised
* @__hcd: pointer to the HCD whose IRQ is being signaled
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index ef50fa494e47..b5ebb73c2332 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -364,23 +364,13 @@ extern int usb_find_interface_driver (struct usb_device *dev,
#ifdef CONFIG_PM
extern void usb_hcd_resume_root_hub (struct usb_hcd *hcd);
extern void usb_root_hub_lost_power (struct usb_device *rhdev);
-extern int hcd_bus_suspend (struct usb_bus *bus);
-extern int hcd_bus_resume (struct usb_bus *bus);
+extern int hcd_bus_suspend(struct usb_device *rhdev);
+extern int hcd_bus_resume(struct usb_device *rhdev);
#else
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
return;
}
-
-static inline int hcd_bus_suspend(struct usb_bus *bus)
-{
- return 0;
-}
-
-static inline int hcd_bus_resume (struct usb_bus *bus)
-{
- return 0;
-}
#endif /* CONFIG_PM */
/*
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 24f10a19dbdb..e341a1da517f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -31,9 +31,16 @@
#include "hcd.h"
#include "hub.h"
+#ifdef CONFIG_USB_PERSIST
+#define USB_PERSIST 1
+#else
+#define USB_PERSIST 0
+#endif
+
struct usb_hub {
struct device *intfdev; /* the "interface" device */
struct usb_device *hdev;
+ struct kref kref;
struct urb *urb; /* for interrupt polling pipe */
/* buffer for urb ... with extra space in case of babble */
@@ -66,6 +73,7 @@ struct usb_hub {
unsigned limited_power:1;
unsigned quiescing:1;
unsigned activating:1;
+ unsigned disconnected:1;
unsigned has_indicators:1;
u8 indicator[USB_MAXCHILDREN];
@@ -321,7 +329,7 @@ static void kick_khubd(struct usb_hub *hub)
to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
spin_lock_irqsave(&hub_event_lock, flags);
- if (list_empty(&hub->event_list)) {
+ if (!hub->disconnected & list_empty(&hub->event_list)) {
list_add_tail(&hub->event_list, &hub_event_list);
wake_up(&khubd_wait);
}
@@ -330,6 +338,7 @@ static void kick_khubd(struct usb_hub *hub)
void usb_kick_khubd(struct usb_device *hdev)
{
+ /* FIXME: What if hdev isn't bound to the hub driver? */
kick_khubd(hdev_to_hub(hdev));
}
@@ -400,9 +409,10 @@ static void hub_tt_kevent (struct work_struct *work)
struct usb_hub *hub =
container_of(work, struct usb_hub, tt.kevent);
unsigned long flags;
+ int limit = 100;
spin_lock_irqsave (&hub->tt.lock, flags);
- while (!list_empty (&hub->tt.clear_list)) {
+ while (--limit && !list_empty (&hub->tt.clear_list)) {
struct list_head *temp;
struct usb_tt_clear *clear;
struct usb_device *hdev = hub->hdev;
@@ -550,48 +560,68 @@ static int hub_hub_status(struct usb_hub *hub,
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_device *hdev = hub->hdev;
- int ret;
+ int ret = 0;
- if (hdev->children[port1-1] && set_state) {
+ if (hdev->children[port1-1] && set_state)
usb_set_device_state(hdev->children[port1-1],
USB_STATE_NOTATTACHED);
- }
- ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
+ if (!hub->error)
+ ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
if (ret)
dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
- port1, ret);
-
+ port1, ret);
return ret;
}
+/*
+ * Disable a port and mark a logical connnect-change event, so that some
+ * time later khubd will disconnect() any existing usb_device on the port
+ * and will re-enumerate if there actually is a device attached.
+ */
+static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
+{
+ dev_dbg(hub->intfdev, "logical disconnect on port %d\n", port1);
+ hub_port_disable(hub, port1, 1);
+
+ /* FIXME let caller ask to power down the port:
+ * - some devices won't enumerate without a VBUS power cycle
+ * - SRP saves power that way
+ * - ... new call, TBD ...
+ * That's easy if this hub can switch power per-port, and
+ * khubd reactivates the port later (timer, SRP, etc).
+ * Powerdown must be optional, because of reset/DFU.
+ */
+
+ set_bit(port1, hub->change_bits);
+ kick_khubd(hub);
+}
/* caller has locked the hub device */
-static void hub_pre_reset(struct usb_interface *intf)
+static int hub_pre_reset(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = hub->hdev;
- int port1;
+ int i;
- for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
- if (hdev->children[port1 - 1]) {
- usb_disconnect(&hdev->children[port1 - 1]);
- if (hub->error == 0)
- hub_port_disable(hub, port1, 0);
- }
+ /* Disconnect all the children */
+ for (i = 0; i < hdev->maxchild; ++i) {
+ if (hdev->children[i])
+ usb_disconnect(&hdev->children[i]);
}
hub_quiesce(hub);
+ return 0;
}
/* caller has locked the hub device */
-static void hub_post_reset(struct usb_interface *intf)
+static int hub_post_reset(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
- hub_activate(hub);
hub_power_on(hub);
+ hub_activate(hub);
+ return 0;
}
-
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
@@ -845,43 +875,42 @@ fail:
return ret;
}
+static void hub_release(struct kref *kref)
+{
+ struct usb_hub *hub = container_of(kref, struct usb_hub, kref);
+
+ usb_put_intf(to_usb_interface(hub->intfdev));
+ kfree(hub);
+}
+
static unsigned highspeed_hubs;
static void hub_disconnect(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata (intf);
- struct usb_device *hdev;
+
+ /* Take the hub off the event list and don't let it be added again */
+ spin_lock_irq(&hub_event_lock);
+ list_del_init(&hub->event_list);
+ hub->disconnected = 1;
+ spin_unlock_irq(&hub_event_lock);
/* Disconnect all children and quiesce the hub */
hub->error = 0;
hub_pre_reset(intf);
usb_set_intfdata (intf, NULL);
- hdev = hub->hdev;
- if (hdev->speed == USB_SPEED_HIGH)
+ if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
usb_free_urb(hub->urb);
- hub->urb = NULL;
-
- spin_lock_irq(&hub_event_lock);
- list_del_init(&hub->event_list);
- spin_unlock_irq(&hub_event_lock);
-
kfree(hub->descriptor);
- hub->descriptor = NULL;
-
kfree(hub->status);
- hub->status = NULL;
-
- if (hub->buffer) {
- usb_buffer_free(hdev, sizeof(*hub->buffer), hub->buffer,
- hub->buffer_dma);
- hub->buffer = NULL;
- }
+ usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer,
+ hub->buffer_dma);
- kfree(hub);
+ kref_put(&hub->kref, hub_release);
}
static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -929,10 +958,12 @@ descriptor_error:
return -ENOMEM;
}
+ kref_init(&hub->kref);
INIT_LIST_HEAD(&hub->event_list);
hub->intfdev = &intf->dev;
hub->hdev = hdev;
INIT_DELAYED_WORK(&hub->leds, led_work);
+ usb_get_intf(intf);
usb_set_intfdata (intf, hub);
intf->needs_remote_wakeup = 1;
@@ -982,49 +1013,6 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
}
-/* grab device/port lock, returning index of that port (zero based).
- * protects the upstream link used by this device from concurrent
- * tree operations like suspend, resume, reset, and disconnect, which
- * apply to everything downstream of a given port.
- */
-static int locktree(struct usb_device *udev)
-{
- int t;
- struct usb_device *hdev;
-
- if (!udev)
- return -ENODEV;
-
- /* root hub is always the first lock in the series */
- hdev = udev->parent;
- if (!hdev) {
- usb_lock_device(udev);
- return 0;
- }
-
- /* on the path from root to us, lock everything from
- * top down, dropping parent locks when not needed
- */
- t = locktree(hdev);
- if (t < 0)
- return t;
-
- /* everything is fail-fast once disconnect
- * processing starts
- */
- if (udev->state == USB_STATE_NOTATTACHED) {
- usb_unlock_device(hdev);
- return -ENODEV;
- }
-
- /* when everyone grabs locks top->bottom,
- * non-overlapping work may be concurrent
- */
- usb_lock_device(udev);
- usb_unlock_device(hdev);
- return udev->portnum;
-}
-
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
int i;
@@ -1089,46 +1077,6 @@ void usb_set_device_state(struct usb_device *udev,
spin_unlock_irqrestore(&device_state_lock, flags);
}
-
-#ifdef CONFIG_PM
-
-/**
- * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power
- * @rhdev: struct usb_device for the root hub
- *
- * The USB host controller driver calls this function when its root hub
- * is resumed and Vbus power has been interrupted or the controller
- * has been reset. The routine marks all the children of the root hub
- * as NOTATTACHED and marks logical connect-change events on their ports.
- */
-void usb_root_hub_lost_power(struct usb_device *rhdev)
-{
- struct usb_hub *hub;
- int port1;
- unsigned long flags;
-
- dev_warn(&rhdev->dev, "root hub lost power or was reset\n");
-
- /* Make sure no potential wakeup events get lost,
- * by forcing the root hub to be resumed.
- */
- rhdev->dev.power.prev_state.event = PM_EVENT_ON;
-
- spin_lock_irqsave(&device_state_lock, flags);
- hub = hdev_to_hub(rhdev);
- for (port1 = 1; port1 <= rhdev->maxchild; ++port1) {
- if (rhdev->children[port1 - 1]) {
- recursively_mark_NOTATTACHED(
- rhdev->children[port1 - 1]);
- set_bit(port1, hub->change_bits);
- }
- }
- spin_unlock_irqrestore(&device_state_lock, flags);
-}
-EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
-
-#endif /* CONFIG_PM */
-
static void choose_address(struct usb_device *udev)
{
int devnum;
@@ -1269,7 +1217,6 @@ static inline void show_string(struct usb_device *udev, char *id, char *string)
#ifdef CONFIG_USB_OTG
#include "otg_whitelist.h"
-static int __usb_port_suspend(struct usb_device *, int port1);
#endif
/**
@@ -1375,11 +1322,11 @@ int usb_new_device(struct usb_device *udev)
* (Includes HNP test device.)
*/
if (udev->bus->b_hnp_enable || udev->bus->is_b_host) {
- err = __usb_port_suspend(udev, udev->bus->otg_port);
+ err = usb_port_suspend(udev);
if (err < 0)
dev_dbg(&udev->dev, "HNP fail, %d\n", err);
}
- err = -ENODEV;
+ err = -ENOTSUPP;
goto fail;
}
#endif
@@ -1388,6 +1335,10 @@ int usb_new_device(struct usb_device *udev)
udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
+ /* Increment the parent's count of unsuspended children */
+ if (udev->parent)
+ usb_autoresume_device(udev->parent);
+
/* Register the device. The device driver is responsible
* for adding the device files to sysfs and for configuring
* the device.
@@ -1395,13 +1346,11 @@ int usb_new_device(struct usb_device *udev)
err = device_add(&udev->dev);
if (err) {
dev_err(&udev->dev, "can't device_add, error %d\n", err);
+ if (udev->parent)
+ usb_autosuspend_device(udev->parent);
goto fail;
}
- /* Increment the parent's count of unsuspended children */
- if (udev->parent)
- usb_autoresume_device(udev->parent);
-
exit:
return err;
@@ -1476,9 +1425,9 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
- /* bomb out completely if something weird happened */
+ /* bomb out completely if the connection bounced */
if ((portchange & USB_PORT_STAT_C_CONNECTION))
- return -EINVAL;
+ return -ENOTCONN;
/* if we`ve finished resetting, then break out of the loop */
if (!(portstatus & USB_PORT_STAT_RESET) &&
@@ -1557,34 +1506,24 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
return status;
}
-/*
- * Disable a port and mark a logical connnect-change event, so that some
- * time later khubd will disconnect() any existing usb_device on the port
- * and will re-enumerate if there actually is a device attached.
- */
-static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
-{
- dev_dbg(hub->intfdev, "logical disconnect on port %d\n", port1);
- hub_port_disable(hub, port1, 1);
-
- /* FIXME let caller ask to power down the port:
- * - some devices won't enumerate without a VBUS power cycle
- * - SRP saves power that way
- * - ... new call, TBD ...
- * That's easy if this hub can switch power per-port, and
- * khubd reactivates the port later (timer, SRP, etc).
- * Powerdown must be optional, because of reset/DFU.
- */
-
- set_bit(port1, hub->change_bits);
- kick_khubd(hub);
-}
-
#ifdef CONFIG_PM
#ifdef CONFIG_USB_SUSPEND
/*
+ * usb_port_suspend - suspend a usb device's upstream port
+ * @udev: device that's no longer in active use, not a root hub
+ * Context: must be able to sleep; device not locked; pm locks held
+ *
+ * Suspends a USB device that isn't in active use, conserving power.
+ * Devices may wake out of a suspend, if anything important happens,
+ * using the remote wakeup mechanism. They may also be taken out of
+ * suspend by the host, using usb_port_resume(). It's also routine
+ * to disconnect devices while they are suspended.
+ *
+ * This only affects the USB hardware for a device; its interfaces
+ * (and, for hubs, child devices) must already have been suspended.
+ *
* Selective port suspend reduces power; most suspended devices draw
* less than 500 uA. It's also used in OTG, along with remote wakeup.
* All devices below the suspended port are also suspended.
@@ -1593,11 +1532,35 @@ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
* also support "remote wakeup", where the device can activate the USB
* tree above them to deliver data, such as a keypress or packet. In
* some cases, this wakes the USB host.
+ *
+ * Suspending OTG devices may trigger HNP, if that's been enabled
+ * between a pair of dual-role devices. That will change roles, such
+ * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral.
+ *
+ * Devices on USB hub ports have only one "suspend" state, corresponding
+ * to ACPI D2, "may cause the device to lose some context".
+ * State transitions include:
+ *
+ * - suspend, resume ... when the VBUS power link stays live
+ * - suspend, disconnect ... VBUS lost
+ *
+ * Once VBUS drop breaks the circuit, the port it's using has to go through
+ * normal re-enumeration procedures, starting with enabling VBUS power.
+ * Other than re-initializing the hub (plug/unplug, except for root hubs),
+ * Linux (2.6) currently has NO mechanisms to initiate that: no khubd
+ * timer, no SRP, no requests through sysfs.
+ *
+ * If CONFIG_USB_SUSPEND isn't enabled, devices only really suspend when
+ * the root hub for their bus goes into global suspend ... so we don't
+ * (falsely) update the device power state to say it suspended.
+ *
+ * Returns 0 on success, else negative errno.
*/
-static int hub_port_suspend(struct usb_hub *hub, int port1,
- struct usb_device *udev)
+int usb_port_suspend(struct usb_device *udev)
{
- int status;
+ struct usb_hub *hub = hdev_to_hub(udev->parent);
+ int port1 = udev->portnum;
+ int status;
// dev_dbg(hub->intfdev, "suspend port %d\n", port1);
@@ -1614,17 +1577,15 @@ static int hub_port_suspend(struct usb_hub *hub, int port1,
NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (status)
- dev_dbg(&udev->dev,
- "won't remote wakeup, status %d\n",
- status);
+ dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
+ status);
}
/* see 7.1.7.6 */
status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND);
if (status) {
- dev_dbg(hub->intfdev,
- "can't suspend port %d, status %d\n",
- port1, status);
+ dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
+ port1, status);
/* paranoia: "should not happen" */
(void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
@@ -1642,85 +1603,24 @@ static int hub_port_suspend(struct usb_hub *hub, int port1,
}
/*
- * Devices on USB hub ports have only one "suspend" state, corresponding
- * to ACPI D2, "may cause the device to lose some context".
- * State transitions include:
- *
- * - suspend, resume ... when the VBUS power link stays live
- * - suspend, disconnect ... VBUS lost
- *
- * Once VBUS drop breaks the circuit, the port it's using has to go through
- * normal re-enumeration procedures, starting with enabling VBUS power.
- * Other than re-initializing the hub (plug/unplug, except for root hubs),
- * Linux (2.6) currently has NO mechanisms to initiate that: no khubd
- * timer, no SRP, no requests through sysfs.
- *
- * If CONFIG_USB_SUSPEND isn't enabled, devices only really suspend when
- * the root hub for their bus goes into global suspend ... so we don't
- * (falsely) update the device power state to say it suspended.
- */
-static int __usb_port_suspend (struct usb_device *udev, int port1)
-{
- int status = 0;
-
- /* caller owns the udev device lock */
- if (port1 < 0)
- return port1;
-
- /* we change the device's upstream USB link,
- * but root hubs have no upstream USB link.
- */
- if (udev->parent)
- status = hub_port_suspend(hdev_to_hub(udev->parent), port1,
- udev);
- else {
- dev_dbg(&udev->dev, "usb %ssuspend\n",
- udev->auto_pm ? "auto-" : "");
- usb_set_device_state(udev, USB_STATE_SUSPENDED);
- }
- return status;
-}
-
-/*
- * usb_port_suspend - suspend a usb device's upstream port
- * @udev: device that's no longer in active use
- * Context: must be able to sleep; device not locked; pm locks held
- *
- * Suspends a USB device that isn't in active use, conserving power.
- * Devices may wake out of a suspend, if anything important happens,
- * using the remote wakeup mechanism. They may also be taken out of
- * suspend by the host, using usb_port_resume(). It's also routine
- * to disconnect devices while they are suspended.
- *
- * This only affects the USB hardware for a device; its interfaces
- * (and, for hubs, child devices) must already have been suspended.
- *
- * Suspending OTG devices may trigger HNP, if that's been enabled
- * between a pair of dual-role devices. That will change roles, such
- * as from A-Host to A-Peripheral or from B-Host back to B-Peripheral.
- *
- * Returns 0 on success, else negative errno.
- */
-int usb_port_suspend(struct usb_device *udev)
-{
- return __usb_port_suspend(udev, udev->portnum);
-}
-
-/*
* If the USB "suspend" state is in use (rather than "global suspend"),
* many devices will be individually taken out of suspend state using
- * special" resume" signaling. These routines kick in shortly after
+ * special "resume" signaling. This routine kicks in shortly after
* hardware resume signaling is finished, either because of selective
* resume (by host) or remote wakeup (by device) ... now see what changed
* in the tree that's rooted at this device.
+ *
+ * If @udev->reset_resume is set then the device is reset before the
+ * status check is done.
*/
static int finish_port_resume(struct usb_device *udev)
{
- int status;
+ int status = 0;
u16 devstatus;
/* caller owns the udev device lock */
- dev_dbg(&udev->dev, "finish resume\n");
+ dev_dbg(&udev->dev, "finish %sresume\n",
+ udev->reset_resume ? "reset-" : "");
/* usb ch9 identifies four variants of SUSPENDED, based on what
* state the device resumes to. Linux currently won't see the
@@ -1731,22 +1631,30 @@ static int finish_port_resume(struct usb_device *udev)
? USB_STATE_CONFIGURED
: USB_STATE_ADDRESS);
+ /* 10.5.4.5 says not to reset a suspended port if the attached
+ * device is enabled for remote wakeup. Hence the reset
+ * operation is carried out here, after the port has been
+ * resumed.
+ */
+ if (udev->reset_resume)
+ status = usb_reset_device(udev);
+
/* 10.5.4.5 says be sure devices in the tree are still there.
* For now let's assume the device didn't go crazy on resume,
* and device drivers will know about any resume quirks.
*/
- status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
- if (status >= 0)
- status = (status == 2 ? 0 : -ENODEV);
+ if (status == 0) {
+ status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ if (status >= 0)
+ status = (status == 2 ? 0 : -ENODEV);
+ }
- if (status)
- dev_dbg(&udev->dev,
- "gone after usb resume? status %d\n",
- status);
- else if (udev->actconfig) {
+ if (status) {
+ dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
+ status);
+ } else if (udev->actconfig) {
le16_to_cpus(&devstatus);
- if ((devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
- && udev->parent) {
+ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
status = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE,
@@ -1759,19 +1667,52 @@ static int finish_port_resume(struct usb_device *udev)
"wakeup, status %d\n", status);
}
status = 0;
-
- } else if (udev->devnum <= 0) {
- dev_dbg(&udev->dev, "bogus resume!\n");
- status = -EINVAL;
}
return status;
}
-static int
-hub_port_resume(struct usb_hub *hub, int port1, struct usb_device *udev)
+/*
+ * usb_port_resume - re-activate a suspended usb device's upstream port
+ * @udev: device to re-activate, not a root hub
+ * Context: must be able to sleep; device not locked; pm locks held
+ *
+ * This will re-activate the suspended device, increasing power usage
+ * while letting drivers communicate again with its endpoints.
+ * USB resume explicitly guarantees that the power session between
+ * the host and the device is the same as it was when the device
+ * suspended.
+ *
+ * If CONFIG_USB_PERSIST and @udev->reset_resume are both set then this
+ * routine won't check that the port is still enabled. Furthermore,
+ * if @udev->reset_resume is set then finish_port_resume() above will
+ * reset @udev. The end result is that a broken power session can be
+ * recovered and @udev will appear to persist across a loss of VBUS power.
+ *
+ * For example, if a host controller doesn't maintain VBUS suspend current
+ * during a system sleep or is reset when the system wakes up, all the USB
+ * power sessions below it will be broken. This is especially troublesome
+ * for mass-storage devices containing mounted filesystems, since the
+ * device will appear to have disconnected and all the memory mappings
+ * to it will be lost. Using the USB_PERSIST facility, the device can be
+ * made to appear as if it had not disconnected.
+ *
+ * This facility is inherently dangerous. Although usb_reset_device()
+ * makes every effort to insure that the same device is present after the
+ * reset as before, it cannot provide a 100% guarantee. Furthermore it's
+ * quite possible for a device to remain unaltered but its media to be
+ * changed. If the user replaces a flash memory card while the system is
+ * asleep, he will have only himself to blame when the filesystem on the
+ * new card is corrupted and the system crashes.
+ *
+ * Returns 0 on success, else negative errno.
+ */
+int usb_port_resume(struct usb_device *udev)
{
- int status;
- u16 portchange, portstatus;
+ struct usb_hub *hub = hdev_to_hub(udev->parent);
+ int port1 = udev->portnum;
+ int status;
+ u16 portchange, portstatus;
+ unsigned mask_flags, want_flags;
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
@@ -1786,30 +1727,31 @@ hub_port_resume(struct usb_hub *hub, int port1, struct usb_device *udev)
status = clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_SUSPEND);
if (status) {
- dev_dbg(hub->intfdev,
- "can't resume port %d, status %d\n",
- port1, status);
+ dev_dbg(hub->intfdev, "can't resume port %d, status %d\n",
+ port1, status);
} else {
/* drive resume for at least 20 msec */
- if (udev)
- dev_dbg(&udev->dev, "usb %sresume\n",
- udev->auto_pm ? "auto-" : "");
+ dev_dbg(&udev->dev, "usb %sresume\n",
+ udev->auto_pm ? "auto-" : "");
msleep(25);
-#define LIVE_FLAGS ( USB_PORT_STAT_POWER \
- | USB_PORT_STAT_ENABLE \
- | USB_PORT_STAT_CONNECTION)
-
/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
* sequence.
*/
status = hub_port_status(hub, port1, &portstatus, &portchange);
-SuspendCleared:
- if (status < 0
- || (portstatus & LIVE_FLAGS) != LIVE_FLAGS
- || (portstatus & USB_PORT_STAT_SUSPEND) != 0
- ) {
+
+ SuspendCleared:
+ if (USB_PERSIST && udev->reset_resume)
+ want_flags = USB_PORT_STAT_POWER
+ | USB_PORT_STAT_CONNECTION;
+ else
+ want_flags = USB_PORT_STAT_POWER
+ | USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE;
+ mask_flags = want_flags | USB_PORT_STAT_SUSPEND;
+
+ if (status < 0 || (portstatus & mask_flags) != want_flags) {
dev_dbg(hub->intfdev,
"port %d status %04x.%04x after resume, %d\n",
port1, portchange, portstatus, status);
@@ -1821,51 +1763,19 @@ SuspendCleared:
USB_PORT_FEAT_C_SUSPEND);
/* TRSMRCY = 10 msec */
msleep(10);
- if (udev)
- status = finish_port_resume(udev);
}
}
- if (status < 0)
- hub_port_logical_disconnect(hub, port1);
clear_bit(port1, hub->busy_bits);
if (!hub->hdev->parent && !hub->busy_bits[0])
usb_enable_root_hub_irq(hub->hdev->bus);
- return status;
-}
-
-/*
- * usb_port_resume - re-activate a suspended usb device's upstream port
- * @udev: device to re-activate
- * Context: must be able to sleep; device not locked; pm locks held
- *
- * This will re-activate the suspended device, increasing power usage
- * while letting drivers communicate again with its endpoints.
- * USB resume explicitly guarantees that the power session between
- * the host and the device is the same as it was when the device
- * suspended.
- *
- * Returns 0 on success, else negative errno.
- */
-int usb_port_resume(struct usb_device *udev)
-{
- int status;
-
- /* we change the device's upstream USB link,
- * but root hubs have no upstream USB link.
- */
- if (udev->parent) {
- // NOTE this fails if parent is also suspended...
- status = hub_port_resume(hdev_to_hub(udev->parent),
- udev->portnum, udev);
- } else {
- dev_dbg(&udev->dev, "usb %sresume\n",
- udev->auto_pm ? "auto-" : "");
+ if (status == 0)
status = finish_port_resume(udev);
- }
- if (status < 0)
+ if (status < 0) {
dev_dbg(&udev->dev, "can't resume, status %d\n", status);
+ hub_port_logical_disconnect(hub, port1);
+ }
return status;
}
@@ -1892,21 +1802,16 @@ int usb_port_suspend(struct usb_device *udev)
return 0;
}
-static inline int
-finish_port_resume(struct usb_device *udev)
-{
- return 0;
-}
-
-static inline int
-hub_port_resume(struct usb_hub *hub, int port1, struct usb_device *udev)
-{
- return 0;
-}
-
int usb_port_resume(struct usb_device *udev)
{
- return 0;
+ int status = 0;
+
+ /* However we may need to do a reset-resume */
+ if (udev->reset_resume) {
+ dev_dbg(&udev->dev, "reset-resume\n");
+ status = usb_reset_device(udev);
+ }
+ return status;
}
static inline int remote_wakeup(struct usb_device *udev)
@@ -1921,7 +1826,6 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
struct usb_hub *hub = usb_get_intfdata (intf);
struct usb_device *hdev = hub->hdev;
unsigned port1;
- int status = 0;
/* fail if children aren't already suspended */
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
@@ -1947,49 +1851,75 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
/* stop khubd and related activity */
hub_quiesce(hub);
-
- /* "global suspend" of the downstream HC-to-USB interface */
- if (!hdev->parent) {
- status = hcd_bus_suspend(hdev->bus);
- if (status != 0) {
- dev_dbg(&hdev->dev, "'global' suspend %d\n", status);
- hub_activate(hub);
- }
- }
- return status;
+ return 0;
}
static int hub_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata (intf);
- struct usb_device *hdev = hub->hdev;
- int status;
dev_dbg(&intf->dev, "%s\n", __FUNCTION__);
- /* "global resume" of the downstream HC-to-USB interface */
- if (!hdev->parent) {
- struct usb_bus *bus = hdev->bus;
- if (bus) {
- status = hcd_bus_resume (bus);
- if (status) {
- dev_dbg(&intf->dev, "'global' resume %d\n",
- status);
- return status;
+ /* tell khubd to look for changes on this hub */
+ hub_activate(hub);
+ return 0;
+}
+
+static int hub_reset_resume(struct usb_interface *intf)
+{
+ struct usb_hub *hub = usb_get_intfdata(intf);
+ struct usb_device *hdev = hub->hdev;
+ int port1;
+
+ hub_power_on(hub);
+
+ for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
+ struct usb_device *child = hdev->children[port1-1];
+
+ if (child) {
+
+ /* For "USB_PERSIST"-enabled children we must
+ * mark the child device for reset-resume and
+ * turn off the connect-change status to prevent
+ * khubd from disconnecting it later.
+ */
+ if (USB_PERSIST && child->persist_enabled) {
+ child->reset_resume = 1;
+ clear_port_feature(hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+
+ /* Otherwise we must disconnect the child,
+ * but as we may not lock the child device here
+ * we have to do a "logical" disconnect.
+ */
+ } else {
+ hub_port_logical_disconnect(hub, port1);
}
- } else
- return -EOPNOTSUPP;
- if (status == 0) {
- /* TRSMRCY = 10 msec */
- msleep(10);
}
}
- /* tell khubd to look for changes on this hub */
hub_activate(hub);
return 0;
}
+/**
+ * usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power
+ * @rhdev: struct usb_device for the root hub
+ *
+ * The USB host controller driver calls this function when its root hub
+ * is resumed and Vbus power has been interrupted or the controller
+ * has been reset. The routine marks @rhdev as having lost power. When
+ * the hub driver is resumed it will take notice; if CONFIG_USB_PERSIST
+ * is enabled then it will carry out power-session recovery, otherwise
+ * it will disconnect all the child devices.
+ */
+void usb_root_hub_lost_power(struct usb_device *rhdev)
+{
+ dev_warn(&rhdev->dev, "root hub lost power or was reset\n");
+ rhdev->reset_resume = 1;
+}
+EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
+
#else /* CONFIG_PM */
static inline int remote_wakeup(struct usb_device *udev)
@@ -1997,8 +1927,9 @@ static inline int remote_wakeup(struct usb_device *udev)
return 0;
}
-#define hub_suspend NULL
-#define hub_resume NULL
+#define hub_suspend NULL
+#define hub_resume NULL
+#define hub_reset_resume NULL
#endif
@@ -2461,19 +2392,6 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
return;
}
-#ifdef CONFIG_USB_SUSPEND
- /* If something is connected, but the port is suspended, wake it up. */
- if (portstatus & USB_PORT_STAT_SUSPEND) {
- status = hub_port_resume(hub, port1, NULL);
- if (status < 0) {
- dev_dbg(hub_dev,
- "can't clear suspend on port %d; %d\n",
- port1, status);
- goto done;
- }
- }
-#endif
-
for (i = 0; i < SET_CONFIG_TRIES; i++) {
struct usb_device *udev;
@@ -2584,7 +2502,7 @@ loop:
ep0_reinit(udev);
release_address(udev);
usb_put_dev(udev);
- if (status == -ENOTCONN)
+ if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
}
@@ -2625,10 +2543,12 @@ static void hub_events(void)
list_del_init(tmp);
hub = list_entry(tmp, struct usb_hub, event_list);
- hdev = hub->hdev;
- intf = to_usb_interface(hub->intfdev);
- hub_dev = &intf->dev;
+ kref_get(&hub->kref);
+ spin_unlock_irq(&hub_event_lock);
+ hdev = hub->hdev;
+ hub_dev = hub->intfdev;
+ intf = to_usb_interface(hub_dev);
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hub->descriptor
? hub->descriptor->bNbrPorts
@@ -2637,16 +2557,10 @@ static void hub_events(void)
(u16) hub->change_bits[0],
(u16) hub->event_bits[0]);
- usb_get_intf(intf);
- spin_unlock_irq(&hub_event_lock);
-
/* Lock the device, then check to see if we were
* disconnected while waiting for the lock to succeed. */
- if (locktree(hdev) < 0) {
- usb_put_intf(intf);
- continue;
- }
- if (hub != usb_get_intfdata(intf))
+ usb_lock_device(hdev);
+ if (unlikely(hub->disconnected))
goto loop;
/* If the hub has died, clean up after it */
@@ -2809,13 +2723,14 @@ loop_autopm:
usb_autopm_enable(intf);
loop:
usb_unlock_device(hdev);
- usb_put_intf(intf);
+ kref_put(&hub->kref, hub_release);
} /* end while (1) */
}
static int hub_thread(void *__unused)
{
+ set_freezable();
do {
hub_events();
wait_event_interruptible(khubd_wait,
@@ -2844,6 +2759,7 @@ static struct usb_driver hub_driver = {
.disconnect = hub_disconnect,
.suspend = hub_suspend,
.resume = hub_resume,
+ .reset_resume = hub_reset_resume,
.pre_reset = hub_pre_reset,
.post_reset = hub_post_reset,
.ioctl = hub_ioctl,
@@ -2946,6 +2862,11 @@ static int config_descriptors_changed(struct usb_device *udev)
* this from a driver probe() routine after downloading new firmware.
* For calls that might not occur during probe(), drivers should lock
* the device using usb_lock_device_for_reset().
+ *
+ * Locking exception: This routine may also be called from within an
+ * autoresume handler. Such usage won't conflict with other tasks
+ * holding the device lock because these tasks should always call
+ * usb_autopm_resume_device(), thereby preventing any unwanted autoresume.
*/
int usb_reset_device(struct usb_device *udev)
{
@@ -2976,7 +2897,7 @@ int usb_reset_device(struct usb_device *udev)
* Other endpoints will be handled by re-enumeration. */
ep0_reinit(udev);
ret = hub_port_init(parent_hub, udev, port1, i);
- if (ret >= 0)
+ if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
clear_bit(port1, parent_hub->busy_bits);
@@ -3092,6 +3013,7 @@ int usb_reset_composite_device(struct usb_device *udev,
drv = to_usb_driver(cintf->dev.driver);
if (drv->pre_reset)
(drv->pre_reset)(cintf);
+ /* FIXME: Unbind if pre_reset returns an error or isn't defined */
}
}
}
@@ -3110,6 +3032,7 @@ int usb_reset_composite_device(struct usb_device *udev,
drv = to_usb_driver(cintf->dev.driver);
if (drv->post_reset)
(drv->post_reset)(cintf);
+ /* FIXME: Unbind if post_reset returns an error or isn't defined */
}
if (cintf != iface)
up(&cintf->dev.sem);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f9fed34bf7d8..25f63f1096b4 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -34,13 +34,14 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
{
struct completion done;
unsigned long expire;
- int status;
+ int retval;
+ int status = urb->status;
init_completion(&done);
urb->context = &done;
urb->actual_length = 0;
- status = usb_submit_urb(urb, GFP_NOIO);
- if (unlikely(status))
+ retval = usb_submit_urb(urb, GFP_NOIO);
+ if (unlikely(retval))
goto out;
expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
@@ -55,15 +56,15 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
urb->transfer_buffer_length);
usb_kill_urb(urb);
- status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
+ retval = status == -ENOENT ? -ETIMEDOUT : status;
} else
- status = urb->status;
+ retval = status;
out:
if (actual_length)
*actual_length = urb->actual_length;
usb_free_urb(urb);
- return status;
+ return retval;
}
/*-------------------------------------------------------------------*/
@@ -250,6 +251,7 @@ static void sg_clean (struct usb_sg_request *io)
static void sg_complete (struct urb *urb)
{
struct usb_sg_request *io = urb->context;
+ int status = urb->status;
spin_lock (&io->lock);
@@ -265,21 +267,21 @@ static void sg_complete (struct urb *urb)
*/
if (io->status
&& (io->status != -ECONNRESET
- || urb->status != -ECONNRESET)
+ || status != -ECONNRESET)
&& urb->actual_length) {
dev_err (io->dev->bus->controller,
"dev %s ep%d%s scatterlist error %d/%d\n",
io->dev->devpath,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
- urb->status, io->status);
+ status, io->status);
// BUG ();
}
- if (io->status == 0 && urb->status && urb->status != -ECONNRESET) {
- int i, found, status;
+ if (io->status == 0 && status && status != -ECONNRESET) {
+ int i, found, retval;
- io->status = urb->status;
+ io->status = status;
/* the previous urbs, and this one, completed already.
* unlink pending urbs so they won't rx/tx bad data.
@@ -290,13 +292,13 @@ static void sg_complete (struct urb *urb)
if (!io->urbs [i] || !io->urbs [i]->dev)
continue;
if (found) {
- status = usb_unlink_urb (io->urbs [i]);
- if (status != -EINPROGRESS
- && status != -ENODEV
- && status != -EBUSY)
+ retval = usb_unlink_urb (io->urbs [i]);
+ if (retval != -EINPROGRESS &&
+ retval != -ENODEV &&
+ retval != -EBUSY)
dev_err (&io->dev->dev,
"%s, unlink --> %d\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
} else if (urb == io->urbs [i])
found = 1;
}
@@ -404,8 +406,6 @@ int usb_sg_init (
io->urbs [i]->complete = sg_complete;
io->urbs [i]->context = io;
- io->urbs [i]->status = -EINPROGRESS;
- io->urbs [i]->actual_length = 0;
/*
* Some systems need to revert to PIO when DMA is temporarily
@@ -499,7 +499,8 @@ void usb_sg_wait (struct usb_sg_request *io)
/* queue the urbs. */
spin_lock_irq (&io->lock);
- for (i = 0; i < entries && !io->status; i++) {
+ i = 0;
+ while (i < entries && !io->status) {
int retval;
io->urbs [i]->dev = io->dev;
@@ -516,7 +517,6 @@ void usb_sg_wait (struct usb_sg_request *io)
case -ENOMEM:
io->urbs[i]->dev = NULL;
retval = 0;
- i--;
yield ();
break;
@@ -527,6 +527,7 @@ void usb_sg_wait (struct usb_sg_request *io)
* URBs are queued at once; N milliseconds?
*/
case 0:
+ ++i;
cpu_relax ();
break;
@@ -1385,6 +1386,36 @@ struct device_type usb_if_device_type = {
.uevent = usb_if_uevent,
};
+static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
+ struct usb_host_config *config,
+ u8 inum)
+{
+ struct usb_interface_assoc_descriptor *retval = NULL;
+ struct usb_interface_assoc_descriptor *intf_assoc;
+ int first_intf;
+ int last_intf;
+ int i;
+
+ for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) {
+ intf_assoc = config->intf_assoc[i];
+ if (intf_assoc->bInterfaceCount == 0)
+ continue;
+
+ first_intf = intf_assoc->bFirstInterface;
+ last_intf = first_intf + (intf_assoc->bInterfaceCount - 1);
+ if (inum >= first_intf && inum <= last_intf) {
+ if (!retval)
+ retval = intf_assoc;
+ else
+ dev_err(&dev->dev, "Interface #%d referenced"
+ " by multiple IADs\n", inum);
+ }
+ }
+
+ return retval;
+}
+
+
/*
* usb_set_configuration - Makes a particular device setting be current
* @dev: the device whose configuration is being updated
@@ -1531,6 +1562,7 @@ free_interfaces:
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
+ intf->intf_assoc = find_iad(dev, cp, i);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 739f520908aa..aa21b38a31ce 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -30,10 +30,28 @@
static const struct usb_device_id usb_quirk_list[] = {
/* HP 5300/5370C scanner */
{ USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 },
+ /* Benq S2W 3300U */
+ { USB_DEVICE(0x04a5, 0x20b0), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+ /* Seiko Epson Corp. Perfection 1200 */
+ { USB_DEVICE(0x04b8, 0x0104), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
/* Seiko Epson Corp - Perfection 1670 */
{ USB_DEVICE(0x04b8, 0x011f), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+ /* Samsung ML-2510 Series printer */
+ { USB_DEVICE(0x04e8, 0x327e), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
/* Elsa MicroLink 56k (V.250) */
{ USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+ /* Ultima Electronics Corp.*/
+ { USB_DEVICE(0x05d8, 0x4005), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+ /* Umax [hex] Astra 3400U */
+ { USB_DEVICE(0x1606, 0x0060), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+
+ /* Philips PSC805 audio device */
+ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* RIM Blackberry */
+ { USB_DEVICE(0x0fca, 0x0001), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+ { USB_DEVICE(0x0fca, 0x0004), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
+ { USB_DEVICE(0x0fca, 0x0006), .driver_info = USB_QUIRK_NO_AUTOSUSPEND },
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index be37c863fdfb..2ab222be8fd1 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -169,6 +169,73 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
+
+#if defined(CONFIG_USB_PERSIST) || defined(CONFIG_USB_SUSPEND)
+static const char power_group[] = "power";
+#endif
+
+#ifdef CONFIG_USB_PERSIST
+
+static ssize_t
+show_persist(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev = to_usb_device(dev);
+
+ return sprintf(buf, "%d\n", udev->persist_enabled);
+}
+
+static ssize_t
+set_persist(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ int value;
+
+ /* Hubs are always enabled for USB_PERSIST */
+ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ return -EPERM;
+
+ if (sscanf(buf, "%d", &value) != 1)
+ return -EINVAL;
+ usb_pm_lock(udev);
+ udev->persist_enabled = !!value;
+ usb_pm_unlock(udev);
+ return count;
+}
+
+static DEVICE_ATTR(persist, S_IRUGO | S_IWUSR, show_persist, set_persist);
+
+static int add_persist_attributes(struct device *dev)
+{
+ int rc = 0;
+
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+
+ /* Hubs are automatically enabled for USB_PERSIST */
+ if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
+ udev->persist_enabled = 1;
+ rc = sysfs_add_file_to_group(&dev->kobj,
+ &dev_attr_persist.attr,
+ power_group);
+ }
+ return rc;
+}
+
+static void remove_persist_attributes(struct device *dev)
+{
+ sysfs_remove_file_from_group(&dev->kobj,
+ &dev_attr_persist.attr,
+ power_group);
+}
+
+#else
+
+#define add_persist_attributes(dev) 0
+#define remove_persist_attributes(dev) do {} while (0)
+
+#endif /* CONFIG_USB_PERSIST */
+
#ifdef CONFIG_USB_SUSPEND
static ssize_t
@@ -276,8 +343,6 @@ set_level(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
-static char power_group[] = "power";
-
static int add_power_attributes(struct device *dev)
{
int rc = 0;
@@ -311,6 +376,7 @@ static void remove_power_attributes(struct device *dev)
#endif /* CONFIG_USB_SUSPEND */
+
/* Descriptor fields */
#define usb_descriptor_attr_le16(field, format_string) \
static ssize_t \
@@ -375,6 +441,54 @@ static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
+/* Binary descriptors */
+
+static ssize_t
+read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct usb_device *udev = to_usb_device(
+ container_of(kobj, struct device, kobj));
+ size_t nleft = count;
+ size_t srclen, n;
+
+ usb_lock_device(udev);
+
+ /* The binary attribute begins with the device descriptor */
+ srclen = sizeof(struct usb_device_descriptor);
+ if (off < srclen) {
+ n = min_t(size_t, nleft, srclen - off);
+ memcpy(buf, off + (char *) &udev->descriptor, n);
+ nleft -= n;
+ buf += n;
+ off = 0;
+ } else {
+ off -= srclen;
+ }
+
+ /* Then follows the raw descriptor entry for the current
+ * configuration (config plus subsidiary descriptors).
+ */
+ if (udev->actconfig) {
+ int cfgno = udev->actconfig - udev->config;
+
+ srclen = __le16_to_cpu(udev->actconfig->desc.wTotalLength);
+ if (off < srclen) {
+ n = min_t(size_t, nleft, srclen - off);
+ memcpy(buf, off + udev->rawdescriptors[cfgno], n);
+ nleft -= n;
+ }
+ }
+ usb_unlock_device(udev);
+ return count - nleft;
+}
+
+static struct bin_attribute dev_bin_attr_descriptors = {
+ .attr = {.name = "descriptors", .mode = 0444},
+ .read = read_descriptors,
+ .size = 18 + 65535, /* dev descr + max-size raw descriptor */
+};
+
int usb_create_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
@@ -384,6 +498,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
if (retval)
return retval;
+ retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
+ if (retval)
+ goto error;
+
+ retval = add_persist_attributes(dev);
+ if (retval)
+ goto error;
+
retval = add_power_attributes(dev);
if (retval)
goto error;
@@ -421,9 +543,30 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
device_remove_file(dev, &dev_attr_product);
device_remove_file(dev, &dev_attr_serial);
remove_power_attributes(dev);
+ remove_persist_attributes(dev);
+ device_remove_bin_file(dev, &dev_bin_attr_descriptors);
sysfs_remove_group(&dev->kobj, &dev_attr_grp);
}
+/* Interface Accociation Descriptor fields */
+#define usb_intf_assoc_attr(field, format_string) \
+static ssize_t \
+show_iad_##field (struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct usb_interface *intf = to_usb_interface (dev); \
+ \
+ return sprintf (buf, format_string, \
+ intf->intf_assoc->field); \
+} \
+static DEVICE_ATTR(iad_##field, S_IRUGO, show_iad_##field, NULL);
+
+usb_intf_assoc_attr (bFirstInterface, "%02x\n")
+usb_intf_assoc_attr (bInterfaceCount, "%02d\n")
+usb_intf_assoc_attr (bFunctionClass, "%02x\n")
+usb_intf_assoc_attr (bFunctionSubClass, "%02x\n")
+usb_intf_assoc_attr (bFunctionProtocol, "%02x\n")
+
/* Interface fields */
#define usb_intf_attr(field, format_string) \
static ssize_t \
@@ -487,6 +630,18 @@ static ssize_t show_modalias(struct device *dev,
}
static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+static struct attribute *intf_assoc_attrs[] = {
+ &dev_attr_iad_bFirstInterface.attr,
+ &dev_attr_iad_bInterfaceCount.attr,
+ &dev_attr_iad_bFunctionClass.attr,
+ &dev_attr_iad_bFunctionSubClass.attr,
+ &dev_attr_iad_bFunctionProtocol.attr,
+ NULL,
+};
+static struct attribute_group intf_assoc_attr_grp = {
+ .attrs = intf_assoc_attrs,
+};
+
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
&dev_attr_bAlternateSetting.attr,
@@ -538,6 +693,8 @@ int usb_create_sysfs_intf_files(struct usb_interface *intf)
alt->string = usb_cache_string(udev, alt->desc.iInterface);
if (alt->string)
retval = device_create_file(dev, &dev_attr_interface);
+ if (intf->intf_assoc)
+ retval = sysfs_create_group(&dev->kobj, &intf_assoc_attr_grp);
usb_create_intf_ep_files(intf, udev);
return 0;
}
@@ -549,4 +706,5 @@ void usb_remove_sysfs_intf_files(struct usb_interface *intf)
usb_remove_intf_ep_files(intf);
device_remove_file(dev, &dev_attr_interface);
sysfs_remove_group(&dev->kobj, &intf_attr_grp);
+ sysfs_remove_group(&intf->dev.kobj, &intf_assoc_attr_grp);
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 94ea9727ff55..be630228461c 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -4,6 +4,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/usb.h>
+#include <linux/wait.h>
#include "hcd.h"
#define to_urb(d) container_of(d, struct urb, kref)
@@ -11,6 +12,10 @@
static void urb_destroy(struct kref *kref)
{
struct urb *urb = to_urb(kref);
+
+ if (urb->transfer_flags & URB_FREE_BUFFER)
+ kfree(urb->transfer_buffer);
+
kfree(urb);
}
@@ -34,6 +39,7 @@ void usb_init_urb(struct urb *urb)
memset(urb, 0, sizeof(*urb));
kref_init(&urb->kref);
spin_lock_init(&urb->lock);
+ INIT_LIST_HEAD(&urb->anchor_list);
}
}
@@ -100,8 +106,60 @@ struct urb * usb_get_urb(struct urb *urb)
kref_get(&urb->kref);
return urb;
}
-
-
+
+/**
+ * usb_anchor_urb - anchors an URB while it is processed
+ * @urb: pointer to the urb to anchor
+ * @anchor: pointer to the anchor
+ *
+ * This can be called to have access to URBs which are to be executed
+ * without bothering to track them
+ */
+void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&anchor->lock, flags);
+ usb_get_urb(urb);
+ list_add_tail(&urb->anchor_list, &anchor->urb_list);
+ urb->anchor = anchor;
+ spin_unlock_irqrestore(&anchor->lock, flags);
+}
+EXPORT_SYMBOL_GPL(usb_anchor_urb);
+
+/**
+ * usb_unanchor_urb - unanchors an URB
+ * @urb: pointer to the urb to anchor
+ *
+ * Call this to stop the system keeping track of this URB
+ */
+void usb_unanchor_urb(struct urb *urb)
+{
+ unsigned long flags;
+ struct usb_anchor *anchor;
+
+ if (!urb)
+ return;
+
+ anchor = urb->anchor;
+ if (!anchor)
+ return;
+
+ spin_lock_irqsave(&anchor->lock, flags);
+ if (unlikely(anchor != urb->anchor)) {
+ /* we've lost the race to another thread */
+ spin_unlock_irqrestore(&anchor->lock, flags);
+ return;
+ }
+ urb->anchor = NULL;
+ list_del(&urb->anchor_list);
+ spin_unlock_irqrestore(&anchor->lock, flags);
+ usb_put_urb(urb);
+ if (list_empty(&anchor->urb_list))
+ wake_up(&anchor->wait);
+}
+EXPORT_SYMBOL_GPL(usb_unanchor_urb);
+
/*-------------------------------------------------------------------*/
/**
@@ -382,55 +440,57 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
* @urb: pointer to urb describing a previously submitted request,
* may be NULL
*
- * This routine cancels an in-progress request. URBs complete only
- * once per submission, and may be canceled only once per submission.
- * Successful cancellation means the requests's completion handler will
- * be called with a status code indicating that the request has been
- * canceled (rather than any other code) and will quickly be removed
- * from host controller data structures.
- *
- * This request is always asynchronous.
- * Success is indicated by returning -EINPROGRESS,
- * at which time the URB will normally have been unlinked but not yet
- * given back to the device driver. When it is called, the completion
- * function will see urb->status == -ECONNRESET. Failure is indicated
- * by any other return value. Unlinking will fail when the URB is not
- * currently "linked" (i.e., it was never submitted, or it was unlinked
- * before, or the hardware is already finished with it), even if the
- * completion handler has not yet run.
+ * This routine cancels an in-progress request. URBs complete only once
+ * per submission, and may be canceled only once per submission.
+ * Successful cancellation means termination of @urb will be expedited
+ * and the completion handler will be called with a status code
+ * indicating that the request has been canceled (rather than any other
+ * code).
+ *
+ * This request is always asynchronous. Success is indicated by
+ * returning -EINPROGRESS, at which time the URB will probably not yet
+ * have been given back to the device driver. When it is eventually
+ * called, the completion function will see @urb->status == -ECONNRESET.
+ * Failure is indicated by usb_unlink_urb() returning any other value.
+ * Unlinking will fail when @urb is not currently "linked" (i.e., it was
+ * never submitted, or it was unlinked before, or the hardware is already
+ * finished with it), even if the completion handler has not yet run.
*
* Unlinking and Endpoint Queues:
*
+ * [The behaviors and guarantees described below do not apply to virtual
+ * root hubs but only to endpoint queues for physical USB devices.]
+ *
* Host Controller Drivers (HCDs) place all the URBs for a particular
* endpoint in a queue. Normally the queue advances as the controller
* hardware processes each request. But when an URB terminates with an
- * error its queue stops, at least until that URB's completion routine
- * returns. It is guaranteed that the queue will not restart until all
- * its unlinked URBs have been fully retired, with their completion
- * routines run, even if that's not until some time after the original
- * completion handler returns. Normally the same behavior and guarantees
- * apply when an URB terminates because it was unlinked; however if an
- * URB is unlinked before the hardware has started to execute it, then
- * its queue is not guaranteed to stop until all the preceding URBs have
- * completed.
- *
- * This means that USB device drivers can safely build deep queues for
- * large or complex transfers, and clean them up reliably after any sort
- * of aborted transfer by unlinking all pending URBs at the first fault.
- *
- * Note that an URB terminating early because a short packet was received
- * will count as an error if and only if the URB_SHORT_NOT_OK flag is set.
- * Also, that all unlinks performed in any URB completion handler must
- * be asynchronous.
- *
- * Queues for isochronous endpoints are treated differently, because they
- * advance at fixed rates. Such queues do not stop when an URB is unlinked.
- * An unlinked URB may leave a gap in the stream of packets. It is undefined
- * whether such gaps can be filled in.
- *
- * When a control URB terminates with an error, it is likely that the
- * status stage of the transfer will not take place, even if it is merely
- * a soft error resulting from a short-packet with URB_SHORT_NOT_OK set.
+ * error its queue generally stops (see below), at least until that URB's
+ * completion routine returns. It is guaranteed that a stopped queue
+ * will not restart until all its unlinked URBs have been fully retired,
+ * with their completion routines run, even if that's not until some time
+ * after the original completion handler returns. The same behavior and
+ * guarantee apply when an URB terminates because it was unlinked.
+ *
+ * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
+ * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
+ * and -EREMOTEIO. Control endpoint queues behave the same way except
+ * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
+ * for isochronous endpoints are treated differently, because they must
+ * advance at fixed rates. Such queues do not stop when an URB
+ * encounters an error or is unlinked. An unlinked isochronous URB may
+ * leave a gap in the stream of packets; it is undefined whether such
+ * gaps can be filled in.
+ *
+ * Note that early termination of an URB because a short packet was
+ * received will generate a -EREMOTEIO error if and only if the
+ * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
+ * drivers can build deep queues for large or complex bulk transfers
+ * and clean them up reliably after any sort of aborted transfer by
+ * unlinking all pending URBs at the first fault.
+ *
+ * When a control URB terminates with an error other than -EREMOTEIO, it
+ * is quite likely that the status stage of the transfer will not take
+ * place.
*/
int usb_unlink_urb(struct urb *urb)
{
@@ -478,6 +538,48 @@ void usb_kill_urb(struct urb *urb)
spin_unlock_irq(&urb->lock);
}
+/**
+ * usb_kill_anchored_urbs - cancel transfer requests en masse
+ * @anchor: anchor the requests are bound to
+ *
+ * this allows all outstanding URBs to be killed starting
+ * from the back of the queue
+ */
+void usb_kill_anchored_urbs(struct usb_anchor *anchor)
+{
+ struct urb *victim;
+
+ spin_lock_irq(&anchor->lock);
+ while (!list_empty(&anchor->urb_list)) {
+ victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list);
+ /* we must make sure the URB isn't freed before we kill it*/
+ usb_get_urb(victim);
+ spin_unlock_irq(&anchor->lock);
+ /* this will unanchor the URB */
+ usb_kill_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irq(&anchor->lock);
+ }
+ spin_unlock_irq(&anchor->lock);
+}
+EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
+
+/**
+ * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
+ * @anchor: the anchor you want to become unused
+ * @timeout: how long you are willing to wait in milliseconds
+ *
+ * Call this is you want to be sure all an anchor's
+ * URBs have finished
+ */
+int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
+ unsigned int timeout)
+{
+ return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
+ msecs_to_jiffies(timeout));
+}
+EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
+
EXPORT_SYMBOL(usb_init_urb);
EXPORT_SYMBOL(usb_alloc_urb);
EXPORT_SYMBOL(usb_free_urb);
@@ -485,4 +587,3 @@ EXPORT_SYMBOL(usb_get_urb);
EXPORT_SYMBOL(usb_submit_urb);
EXPORT_SYMBOL(usb_unlink_urb);
EXPORT_SYMBOL(usb_kill_urb);
-
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 4a6299bd0047..0fee5c66fd64 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -253,6 +253,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
dev->dev.bus = &usb_bus_type;
dev->dev.type = &usb_device_type;
dev->dev.dma_mask = bus->controller->dma_mask;
+ set_dev_node(&dev->dev, dev_to_node(bus->controller));
dev->state = USB_STATE_ATTACHED;
INIT_LIST_HEAD(&dev->ep0.urb_list);
@@ -578,11 +579,12 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
* address (through the pointer provided).
*
* These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags
- * to avoid behaviors like using "DMA bounce buffers", or tying down I/O
- * mapping hardware for long idle periods. The implementation varies between
+ * to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU
+ * hardware during URB completion/resubmit. The implementation varies between
* platforms, depending on details of how DMA will work to this device.
- * Using these buffers also helps prevent cacheline sharing problems on
- * architectures where CPU caches are not DMA-coherent.
+ * Using these buffers also eliminates cacheline sharing problems on
+ * architectures where CPU caches are not DMA-coherent. On systems without
+ * bus-snooping caches, these buffers are uncached.
*
* When the buffer is no longer used, free it with usb_buffer_free().
*/
@@ -607,7 +609,7 @@ void *usb_buffer_alloc(
*
* This reclaims an I/O buffer, letting it be reused. The memory must have
* been allocated using usb_buffer_alloc(), and the parameters must match
- * those provided in that allocation request.
+ * those provided in that allocation request.
*/
void usb_buffer_free(
struct usb_device *dev,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index bf2eb0dae2ec..ad5fa0338f49 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -52,8 +52,16 @@ static inline void usb_pm_unlock(struct usb_device *udev)
#else
-#define usb_port_suspend(dev) 0
-#define usb_port_resume(dev) 0
+static inline int usb_port_suspend(struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline int usb_port_resume(struct usb_device *udev)
+{
+ return 0;
+}
+
static inline void usb_pm_lock(struct usb_device *udev) {}
static inline void usb_pm_unlock(struct usb_device *udev) {}
@@ -100,11 +108,13 @@ static inline int is_usb_device_driver(struct device_driver *drv)
static inline void mark_active(struct usb_interface *f)
{
f->is_active = 1;
+ f->dev.power.power_state.event = PM_EVENT_ON;
}
static inline void mark_quiesced(struct usb_interface *f)
{
f->is_active = 0;
+ f->dev.power.power_state.event = PM_EVENT_SUSPEND;
}
static inline int is_active(const struct usb_interface *f)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index f771a7cae9ec..767aed5b4bea 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -42,6 +42,20 @@ config USB_GADGET
For more information, see <http://www.linux-usb.org/gadget> and
the kernel DocBook documentation for this API.
+config USB_GADGET_DEBUG
+ boolean "Debugging messages"
+ depends on USB_GADGET && DEBUG_KERNEL && EXPERIMENTAL
+ help
+ Many controller and gadget drivers will print some debugging
+ messages if you use this option to ask for those messages.
+
+ Avoid enabling these messages, even if you're actively
+ debugging such a driver. Many drivers will emit so many
+ messages that the driver timings are affected, which will
+ either create new failure modes or remove the one you're
+ trying to track down. Never enable these messages for a
+ production build.
+
config USB_GADGET_DEBUG_FILES
boolean "Debugging information files"
depends on USB_GADGET && PROC_FS
@@ -68,6 +82,27 @@ choice
Many controller drivers are platform-specific; these
often need board-specific hooks.
+config USB_GADGET_AMD5536UDC
+ boolean "AMD5536 UDC"
+ depends on PCI
+ select USB_GADGET_DUALSPEED
+ help
+ The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
+ It is a USB Highspeed DMA capable USB device controller. Beside ep0
+ it provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+ The UDC port supports OTG operation, and may be used as a host port
+ if it's not being used to implement peripheral or OTG roles.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "amd5536udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_AMD5536UDC
+ tristate
+ depends on USB_GADGET_AMD5536UDC
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
config USB_GADGET_FSL_USB2
boolean "Freescale Highspeed USB DR Peripheral Controller"
depends on MPC834x || PPC_MPC831x
@@ -142,6 +177,24 @@ config USB_PXA2XX_SMALL
default y if USB_ETH
default y if USB_G_SERIAL
+config USB_GADGET_M66592
+ boolean "Renesas M66592 USB Peripheral Controller"
+ select USB_GADGET_DUALSPEED
+ help
+ M66592 is a discrete USB peripheral controller chip that
+ supports both full and high speed USB 2.0 data transfers.
+ It has seven configurable endpoints, and endpoint zero.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "m66592_udc" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_M66592
+ tristate
+ depends on USB_GADGET_M66592
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
config USB_GADGET_GOKU
boolean "Toshiba TC86C001 'Goku-S'"
depends on PCI
@@ -208,6 +261,27 @@ config USB_OTG
Select this only if your OMAP board has a Mini-AB connector.
+config USB_GADGET_S3C2410
+ boolean "S3C2410 USB Device Controller"
+ depends on ARCH_S3C2410
+ help
+ Samsung's S3C2410 is an ARM-4 processor with an integrated
+ full speed USB 1.1 device controller. It has 4 configurable
+ endpoints, as well as endpoint zero (for control transfers).
+
+ This driver has been tested on the S3C2410, S3C2412, and
+ S3C2440 processors.
+
+config USB_S3C2410
+ tristate
+ depends on USB_GADGET_S3C2410
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
+config USB_S3C2410_DEBUG
+ boolean "S3C2410 udc debug messages"
+ depends on USB_GADGET_S3C2410
+
config USB_GADGET_AT91
boolean "AT91 USB Device Port"
depends on ARCH_AT91 && !ARCH_AT91SAM9RL
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 5db19396631c..1bc0f03550ce 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -1,14 +1,21 @@
#
# USB peripheral controller drivers
#
+ifeq ($(CONFIG_USB_GADGET_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
+
obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
obj-$(CONFIG_USB_NET2280) += net2280.o
+obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
obj-$(CONFIG_USB_PXA2XX) += pxa2xx_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
obj-$(CONFIG_USB_OMAP) += omap_udc.o
obj-$(CONFIG_USB_LH7A40X) += lh7a40x_udc.o
+obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
obj-$(CONFIG_USB_AT91) += at91_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
+obj-$(CONFIG_USB_M66592) += m66592-udc.o
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
new file mode 100644
index 000000000000..714156ca8fe4
--- /dev/null
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -0,0 +1,3454 @@
+/*
+ * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2007 AMD (http://www.amd.com)
+ * Author: Thomas Dahlmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
+ * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
+ * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+ *
+ * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
+ * be used as host port) and UOC bits PAD_EN and APU are set (should be done
+ * by BIOS init).
+ *
+ * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
+ * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
+ * can be used with gadget ether.
+ */
+
+/* debug control */
+/* #define UDC_VERBOSE */
+
+/* Driver strings */
+#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
+#define UDC_DRIVER_VERSION_STRING "01.00.0206 - $Revision: #3 $"
+
+/* system */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/dmapool.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+/* gadget stack */
+#include <linux/usb/ch9.h>
+#include <linux/usb_gadget.h>
+
+/* udc specific */
+#include "amd5536udc.h"
+
+
+static void udc_tasklet_disconnect(unsigned long);
+static void empty_req_queue(struct udc_ep *);
+static int udc_probe(struct udc *dev);
+static void udc_basic_init(struct udc *dev);
+static void udc_setup_endpoints(struct udc *dev);
+static void udc_soft_reset(struct udc *dev);
+static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
+static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
+static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
+static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
+ unsigned long buf_len, gfp_t gfp_flags);
+static int udc_remote_wakeup(struct udc *dev);
+static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void udc_pci_remove(struct pci_dev *pdev);
+
+/* description */
+static const char mod_desc[] = UDC_MOD_DESCRIPTION;
+static const char name[] = "amd5536udc";
+
+/* structure to hold endpoint function pointers */
+static const struct usb_ep_ops udc_ep_ops;
+
+/* received setup data */
+static union udc_setup_data setup_data;
+
+/* pointer to device object */
+static struct udc *udc;
+
+/* irq spin lock for soft reset */
+static DEFINE_SPINLOCK(udc_irq_spinlock);
+/* stall spin lock */
+static DEFINE_SPINLOCK(udc_stall_spinlock);
+
+/*
+* slave mode: pending bytes in rx fifo after nyet,
+* used if EPIN irq came but no req was available
+*/
+static unsigned int udc_rxfifo_pending;
+
+/* count soft resets after suspend to avoid loop */
+static int soft_reset_occured;
+static int soft_reset_after_usbreset_occured;
+
+/* timer */
+static struct timer_list udc_timer;
+static int stop_timer;
+
+/* set_rde -- Is used to control enabling of RX DMA. Problem is
+ * that UDC has only one bit (RDE) to enable/disable RX DMA for
+ * all OUT endpoints. So we have to handle race conditions like
+ * when OUT data reaches the fifo but no request was queued yet.
+ * This cannot be solved by letting the RX DMA disabled until a
+ * request gets queued because there may be other OUT packets
+ * in the FIFO (important for not blocking control traffic).
+ * The value of set_rde controls the correspondig timer.
+ *
+ * set_rde -1 == not used, means it is alloed to be set to 0 or 1
+ * set_rde 0 == do not touch RDE, do no start the RDE timer
+ * set_rde 1 == timer function will look whether FIFO has data
+ * set_rde 2 == set by timer function to enable RX DMA on next call
+ */
+static int set_rde = -1;
+
+static DECLARE_COMPLETION(on_exit);
+static struct timer_list udc_pollstall_timer;
+static int stop_pollstall_timer;
+static DECLARE_COMPLETION(on_pollstall_exit);
+
+/* tasklet for usb disconnect */
+static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
+ (unsigned long) &udc);
+
+
+/* endpoint names used for print */
+static const char ep0_string[] = "ep0in";
+static const char *ep_string[] = {
+ ep0_string,
+ "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
+ "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
+ "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
+ "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
+ "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
+ "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
+ "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
+};
+
+/* DMA usage flag */
+static int use_dma = 1;
+/* packet per buffer dma */
+static int use_dma_ppb = 1;
+/* with per descr. update */
+static int use_dma_ppb_du;
+/* buffer fill mode */
+static int use_dma_bufferfill_mode;
+/* full speed only mode */
+static int use_fullspeed;
+/* tx buffer size for high speed */
+static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
+
+/* module parameters */
+module_param(use_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma, "true for DMA");
+module_param(use_dma_ppb, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
+module_param(use_dma_ppb_du, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb_du,
+ "true for DMA in packet per buffer mode with descriptor update");
+module_param(use_fullspeed, bool, S_IRUGO);
+MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
+
+/*---------------------------------------------------------------------------*/
+/* Prints UDC device registers and endpoint irq registers */
+static void print_regs(struct udc *dev)
+{
+ DBG(dev, "------- Device registers -------\n");
+ DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
+ DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
+ DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
+ DBG(dev, "\n");
+ DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
+ DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
+ DBG(dev, "\n");
+ DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
+ DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
+ DBG(dev, "\n");
+ DBG(dev, "USE DMA = %d\n", use_dma);
+ if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
+ DBG(dev, "DMA mode = PPBNDU (packet per buffer "
+ "WITHOUT desc. update)\n");
+ dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
+ } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
+ DBG(dev, "DMA mode = PPBDU (packet per buffer "
+ "WITH desc. update)\n");
+ dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
+ }
+ if (use_dma && use_dma_bufferfill_mode) {
+ DBG(dev, "DMA mode = BF (buffer fill mode)\n");
+ dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
+ }
+ if (!use_dma) {
+ dev_info(&dev->pdev->dev, "FIFO mode\n");
+ }
+ DBG(dev, "-------------------------------------------------------\n");
+}
+
+/* Masks unused interrupts */
+static int udc_mask_unused_interrupts(struct udc *dev)
+{
+ u32 tmp;
+
+ /* mask all dev interrupts */
+ tmp = AMD_BIT(UDC_DEVINT_SVC) |
+ AMD_BIT(UDC_DEVINT_ENUM) |
+ AMD_BIT(UDC_DEVINT_US) |
+ AMD_BIT(UDC_DEVINT_UR) |
+ AMD_BIT(UDC_DEVINT_ES) |
+ AMD_BIT(UDC_DEVINT_SI) |
+ AMD_BIT(UDC_DEVINT_SOF)|
+ AMD_BIT(UDC_DEVINT_SC);
+ writel(tmp, &dev->regs->irqmsk);
+
+ /* mask all ep interrupts */
+ writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
+
+ return 0;
+}
+
+/* Enables endpoint 0 interrupts */
+static int udc_enable_ep0_interrupts(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "udc_enable_ep0_interrupts()\n");
+
+ /* read irq mask */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ /* enable ep0 irq's */
+ tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
+ & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
+ writel(tmp, &dev->regs->ep_irqmsk);
+
+ return 0;
+}
+
+/* Enables device interrupts for SET_INTF and SET_CONFIG */
+static int udc_enable_dev_setup_interrupts(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "enable device interrupts for setup data\n");
+
+ /* read irq mask */
+ tmp = readl(&dev->regs->irqmsk);
+
+ /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
+ tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
+ & AMD_UNMASK_BIT(UDC_DEVINT_SC)
+ & AMD_UNMASK_BIT(UDC_DEVINT_UR)
+ & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
+ & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
+ writel(tmp, &dev->regs->irqmsk);
+
+ return 0;
+}
+
+/* Calculates fifo start of endpoint based on preceeding endpoints */
+static int udc_set_txfifo_addr(struct udc_ep *ep)
+{
+ struct udc *dev;
+ u32 tmp;
+ int i;
+
+ if (!ep || !(ep->in))
+ return -EINVAL;
+
+ dev = ep->dev;
+ ep->txfifo = dev->txfifo;
+
+ /* traverse ep's */
+ for (i = 0; i < ep->num; i++) {
+ if (dev->ep[i].regs) {
+ /* read fifo size */
+ tmp = readl(&dev->ep[i].regs->bufin_framenum);
+ tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
+ ep->txfifo += tmp;
+ }
+ }
+ return 0;
+}
+
+/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
+static u32 cnak_pending;
+
+static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
+{
+ if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
+ DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
+ cnak_pending |= 1 << (num);
+ ep->naking = 1;
+ } else
+ cnak_pending = cnak_pending & (~(1 << (num)));
+}
+
+
+/* Enables endpoint, is called by gadget driver */
+static int
+udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
+{
+ struct udc_ep *ep;
+ struct udc *dev;
+ u32 tmp;
+ unsigned long iflags;
+ u8 udc_csr_epix;
+
+ if (!usbep
+ || usbep->name == ep0_string
+ || !desc
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ dev = ep->dev;
+
+ DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&dev->lock, iflags);
+ ep->desc = desc;
+
+ ep->halted = 0;
+
+ /* set traffic type */
+ tmp = readl(&dev->ep[ep->num].regs->ctl);
+ tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
+ writel(tmp, &dev->ep[ep->num].regs->ctl);
+
+ /* set max packet size */
+ tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
+ tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE);
+ ep->ep.maxpacket = desc->wMaxPacketSize;
+ writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
+
+ /* IN ep */
+ if (ep->in) {
+
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num;
+
+ /* set buffer size (tx fifo entries) */
+ tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
+ /* double buffering: fifo size = 2 x max packet size */
+ tmp = AMD_ADDBITS(
+ tmp,
+ desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT
+ / UDC_DWORD_BYTES,
+ UDC_EPIN_BUFF_SIZE);
+ writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
+
+ /* calc. tx fifo base addr */
+ udc_set_txfifo_addr(ep);
+
+ /* flush fifo */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_F);
+ writel(tmp, &ep->regs->ctl);
+
+ /* OUT ep */
+ } else {
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+
+ /* set max packet size UDC CSR */
+ tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
+ tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize,
+ UDC_CSR_NE_MAX_PKT);
+ writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
+
+ if (use_dma && !ep->in) {
+ /* alloc and init BNA dummy request */
+ ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
+ ep->bna_occurred = 0;
+ }
+
+ if (ep->num != UDC_EP0OUT_IX)
+ dev->data_ep_enabled = 1;
+ }
+
+ /* set ep values */
+ tmp = readl(&dev->csr->ne[udc_csr_epix]);
+ /* max packet */
+ tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT);
+ /* ep number */
+ tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
+ /* ep direction */
+ tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
+ /* ep type */
+ tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
+ /* ep config */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
+ /* ep interface */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
+ /* ep alt */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
+ /* write reg */
+ writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+ /* enable ep irq */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp &= AMD_UNMASK_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
+
+ /*
+ * clear NAK by writing CNAK
+ * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
+ */
+ if (!use_dma || ep->in) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+ tmp = desc->bEndpointAddress;
+ DBG(dev, "%s enabled\n", usbep->name);
+
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ return 0;
+}
+
+/* Resets endpoint */
+static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
+{
+ u32 tmp;
+
+ VDBG(ep->dev, "ep-%d reset\n", ep->num);
+ ep->desc = NULL;
+ ep->ep.ops = &udc_ep_ops;
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->ep.maxpacket = (u16) ~0;
+ /* set NAK */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_SNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 1;
+
+ /* disable interrupt */
+ tmp = readl(&regs->ep_irqmsk);
+ tmp |= AMD_BIT(ep->num);
+ writel(tmp, &regs->ep_irqmsk);
+
+ if (ep->in) {
+ /* unset P and IN bit of potential former DMA */
+ tmp = readl(&ep->regs->ctl);
+ tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
+ writel(tmp, &ep->regs->ctl);
+
+ tmp = readl(&ep->regs->sts);
+ tmp |= AMD_BIT(UDC_EPSTS_IN);
+ writel(tmp, &ep->regs->sts);
+
+ /* flush the fifo */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_F);
+ writel(tmp, &ep->regs->ctl);
+
+ }
+ /* reset desc pointer */
+ writel(0, &ep->regs->desptr);
+}
+
+/* Disables endpoint, is called by gadget driver */
+static int udc_ep_disable(struct usb_ep *usbep)
+{
+ struct udc_ep *ep = NULL;
+ unsigned long iflags;
+
+ if (!usbep)
+ return -EINVAL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (usbep->name == ep0_string || !ep->desc)
+ return -EINVAL;
+
+ DBG(ep->dev, "Disable ep-%d\n", ep->num);
+
+ spin_lock_irqsave(&ep->dev->lock, iflags);
+ udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
+ empty_req_queue(ep);
+ ep_init(ep->dev->regs, ep);
+ spin_unlock_irqrestore(&ep->dev->lock, iflags);
+
+ return 0;
+}
+
+/* Allocates request packet, called by gadget driver */
+static struct usb_request *
+udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
+{
+ struct udc_request *req;
+ struct udc_data_dma *dma_desc;
+ struct udc_ep *ep;
+
+ if (!usbep)
+ return NULL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+
+ VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
+ req = kzalloc(sizeof(struct udc_request), gfp);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_DONT_USE;
+ INIT_LIST_HEAD(&req->queue);
+
+ if (ep->dma) {
+ /* ep0 in requests are allocated from data pool here */
+ dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
+ &req->td_phys);
+ if (!dma_desc) {
+ kfree(req);
+ return NULL;
+ }
+
+ VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
+ "td_phys = %lx\n",
+ req, dma_desc,
+ (unsigned long)req->td_phys);
+ /* prevent from using desc. - set HOST BUSY */
+ dma_desc->status = AMD_ADDBITS(dma_desc->status,
+ UDC_DMA_STP_STS_BS_HOST_BUSY,
+ UDC_DMA_STP_STS_BS);
+ dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);
+ req->td_data = dma_desc;
+ req->td_data_last = NULL;
+ req->chain_len = 1;
+ }
+
+ return &req->req;
+}
+
+/* Frees request packet, called by gadget driver */
+static void
+udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
+{
+ struct udc_ep *ep;
+ struct udc_request *req;
+
+ if (!usbep || !usbreq)
+ return;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ req = container_of(usbreq, struct udc_request, req);
+ VDBG(ep->dev, "free_req req=%p\n", req);
+ BUG_ON(!list_empty(&req->queue));
+ if (req->td_data) {
+ VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
+
+ /* free dma chain if created */
+ if (req->chain_len > 1) {
+ udc_free_dma_chain(ep->dev, req);
+ }
+
+ pci_pool_free(ep->dev->data_requests, req->td_data,
+ req->td_phys);
+ }
+ kfree(req);
+}
+
+/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
+static void udc_init_bna_dummy(struct udc_request *req)
+{
+ if (req) {
+ /* set last bit */
+ req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+ /* set next pointer to itself */
+ req->td_data->next = req->td_phys;
+ /* set HOST BUSY */
+ req->td_data->status
+ = AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_STP_STS_BS_DMA_DONE,
+ UDC_DMA_STP_STS_BS);
+#ifdef UDC_VERBOSE
+ pr_debug("bna desc = %p, sts = %08x\n",
+ req->td_data, req->td_data->status);
+#endif
+ }
+}
+
+/* Allocate BNA dummy descriptor */
+static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
+{
+ struct udc_request *req = NULL;
+ struct usb_request *_req = NULL;
+
+ /* alloc the dummy request */
+ _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
+ if (_req) {
+ req = container_of(_req, struct udc_request, req);
+ ep->bna_dummy_req = req;
+ udc_init_bna_dummy(req);
+ }
+ return req;
+}
+
+/* Write data to TX fifo for IN packets */
+static void
+udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
+{
+ u8 *req_buf;
+ u32 *buf;
+ int i, j;
+ unsigned bytes = 0;
+ unsigned remaining = 0;
+
+ if (!req || !ep)
+ return;
+
+ req_buf = req->buf + req->actual;
+ prefetch(req_buf);
+ remaining = req->length - req->actual;
+
+ buf = (u32 *) req_buf;
+
+ bytes = ep->ep.maxpacket;
+ if (bytes > remaining)
+ bytes = remaining;
+
+ /* dwords first */
+ for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
+ writel(*(buf + i), ep->txfifo);
+ }
+
+ /* remaining bytes must be written by byte access */
+ for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
+ writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
+ ep->txfifo);
+ }
+
+ /* dummy write confirm */
+ writel(0, &ep->regs->confirm);
+}
+
+/* Read dwords from RX fifo for OUT transfers */
+static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
+{
+ int i;
+
+ VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
+
+ for (i = 0; i < dwords; i++) {
+ *(buf + i) = readl(dev->rxfifo);
+ }
+ return 0;
+}
+
+/* Read bytes from RX fifo for OUT transfers */
+static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
+{
+ int i, j;
+ u32 tmp;
+
+ VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
+
+ /* dwords first */
+ for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
+ *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
+ }
+
+ /* remaining bytes must be read by byte access */
+ if (bytes % UDC_DWORD_BYTES) {
+ tmp = readl(dev->rxfifo);
+ for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
+ *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
+ tmp = tmp >> UDC_BITS_PER_BYTE;
+ }
+ }
+
+ return 0;
+}
+
+/* Read data from RX fifo for OUT transfers */
+static int
+udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
+{
+ u8 *buf;
+ unsigned buf_space;
+ unsigned bytes = 0;
+ unsigned finished = 0;
+
+ /* received number bytes */
+ bytes = readl(&ep->regs->sts);
+ bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
+
+ buf_space = req->req.length - req->req.actual;
+ buf = req->req.buf + req->req.actual;
+ if (bytes > buf_space) {
+ if ((buf_space % ep->ep.maxpacket) != 0) {
+ DBG(ep->dev,
+ "%s: rx %d bytes, rx-buf space = %d bytesn\n",
+ ep->ep.name, bytes, buf_space);
+ req->req.status = -EOVERFLOW;
+ }
+ bytes = buf_space;
+ }
+ req->req.actual += bytes;
+
+ /* last packet ? */
+ if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
+ || ((req->req.actual == req->req.length) && !req->req.zero))
+ finished = 1;
+
+ /* read rx fifo bytes */
+ VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
+ udc_rxfifo_read_bytes(ep->dev, buf, bytes);
+
+ return finished;
+}
+
+/* create/re-init a DMA descriptor or a DMA descriptor chain */
+static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
+{
+ int retval = 0;
+ u32 tmp;
+
+ VDBG(ep->dev, "prep_dma\n");
+ VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
+ ep->num, req->td_data);
+
+ /* set buffer pointer */
+ req->td_data->bufptr = req->req.dma;
+
+ /* set last bit */
+ req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+
+ /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
+ if (use_dma_ppb) {
+
+ retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
+ if (retval != 0) {
+ if (retval == -ENOMEM)
+ DBG(ep->dev, "Out of DMA memory\n");
+ return retval;
+ }
+ if (ep->in) {
+ if (req->req.length == ep->ep.maxpacket) {
+ /* write tx bytes */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ ep->ep.maxpacket,
+ UDC_DMA_IN_STS_TXBYTES);
+
+ }
+ }
+
+ }
+
+ if (ep->in) {
+ VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
+ "maxpacket=%d ep%d\n",
+ use_dma_ppb, req->req.length,
+ ep->ep.maxpacket, ep->num);
+ /*
+ * if bytes < max packet then tx bytes must
+ * be written in packet per buffer mode
+ */
+ if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
+ || ep->num == UDC_EP0OUT_IX
+ || ep->num == UDC_EP0IN_IX) {
+ /* write tx bytes */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ req->req.length,
+ UDC_DMA_IN_STS_TXBYTES);
+ /* reset frame num */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ 0,
+ UDC_DMA_IN_STS_FRAMENUM);
+ }
+ /* set HOST BUSY */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_STP_STS_BS_HOST_BUSY,
+ UDC_DMA_STP_STS_BS);
+ } else {
+ VDBG(ep->dev, "OUT set host ready\n");
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_STP_STS_BS_HOST_READY,
+ UDC_DMA_STP_STS_BS);
+
+
+ /* clear NAK by writing CNAK */
+ if (ep->naking) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+
+ }
+
+ return retval;
+}
+
+/* Completes request packet ... caller MUST hold lock */
+static void
+complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
+__releases(ep->dev->lock)
+__acquires(ep->dev->lock)
+{
+ struct udc *dev;
+ unsigned halted;
+
+ VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
+
+ dev = ep->dev;
+ /* unmap DMA */
+ if (req->dma_mapping) {
+ if (ep->in)
+ pci_unmap_single(dev->pdev,
+ req->req.dma,
+ req->req.length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(dev->pdev,
+ req->req.dma,
+ req->req.length,
+ PCI_DMA_FROMDEVICE);
+ req->dma_mapping = 0;
+ req->req.dma = DMA_DONT_USE;
+ }
+
+ halted = ep->halted;
+ ep->halted = 1;
+
+ /* set new status if pending */
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = sts;
+
+ /* remove from ep queue */
+ list_del_init(&req->queue);
+
+ VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
+ &req->req, req->req.length, ep->ep.name, sts);
+
+ spin_unlock(&dev->lock);
+ req->req.complete(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->halted = halted;
+}
+
+/* frees pci pool descriptors of a DMA chain */
+static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
+{
+
+ int ret_val = 0;
+ struct udc_data_dma *td;
+ struct udc_data_dma *td_last = NULL;
+ unsigned int i;
+
+ DBG(dev, "free chain req = %p\n", req);
+
+ /* do not free first desc., will be done by free for request */
+ td_last = req->td_data;
+ td = phys_to_virt(td_last->next);
+
+ for (i = 1; i < req->chain_len; i++) {
+
+ pci_pool_free(dev->data_requests, td,
+ (dma_addr_t) td_last->next);
+ td_last = td;
+ td = phys_to_virt(td_last->next);
+ }
+
+ return ret_val;
+}
+
+/* Iterates to the end of a DMA chain and returns last descriptor */
+static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
+{
+ struct udc_data_dma *td;
+
+ td = req->td_data;
+ while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
+ td = phys_to_virt(td->next);
+ }
+
+ return td;
+
+}
+
+/* Iterates to the end of a DMA chain and counts bytes received */
+static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
+{
+ struct udc_data_dma *td;
+ u32 count;
+
+ td = req->td_data;
+ /* received number bytes */
+ count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
+
+ while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
+ td = phys_to_virt(td->next);
+ /* received number bytes */
+ if (td) {
+ count += AMD_GETBITS(td->status,
+ UDC_DMA_OUT_STS_RXBYTES);
+ }
+ }
+
+ return count;
+
+}
+
+/* Creates or re-inits a DMA chain */
+static int udc_create_dma_chain(
+ struct udc_ep *ep,
+ struct udc_request *req,
+ unsigned long buf_len, gfp_t gfp_flags
+)
+{
+ unsigned long bytes = req->req.length;
+ unsigned int i;
+ dma_addr_t dma_addr;
+ struct udc_data_dma *td = NULL;
+ struct udc_data_dma *last = NULL;
+ unsigned long txbytes;
+ unsigned create_new_chain = 0;
+ unsigned len;
+
+ VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
+ bytes, buf_len);
+ dma_addr = DMA_DONT_USE;
+
+ /* unset L bit in first desc for OUT */
+ if (!ep->in) {
+ req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
+ }
+
+ /* alloc only new desc's if not already available */
+ len = req->req.length / ep->ep.maxpacket;
+ if (req->req.length % ep->ep.maxpacket) {
+ len++;
+ }
+
+ if (len > req->chain_len) {
+ /* shorter chain already allocated before */
+ if (req->chain_len > 1) {
+ udc_free_dma_chain(ep->dev, req);
+ }
+ req->chain_len = len;
+ create_new_chain = 1;
+ }
+
+ td = req->td_data;
+ /* gen. required number of descriptors and buffers */
+ for (i = buf_len; i < bytes; i += buf_len) {
+ /* create or determine next desc. */
+ if (create_new_chain) {
+
+ td = pci_pool_alloc(ep->dev->data_requests,
+ gfp_flags, &dma_addr);
+ if (!td)
+ return -ENOMEM;
+
+ td->status = 0;
+ } else if (i == buf_len) {
+ /* first td */
+ td = (struct udc_data_dma *) phys_to_virt(
+ req->td_data->next);
+ td->status = 0;
+ } else {
+ td = (struct udc_data_dma *) phys_to_virt(last->next);
+ td->status = 0;
+ }
+
+
+ if (td)
+ td->bufptr = req->req.dma + i; /* assign buffer */
+ else
+ break;
+
+ /* short packet ? */
+ if ((bytes - i) >= buf_len) {
+ txbytes = buf_len;
+ } else {
+ /* short packet */
+ txbytes = bytes - i;
+ }
+
+ /* link td and assign tx bytes */
+ if (i == buf_len) {
+ if (create_new_chain) {
+ req->td_data->next = dma_addr;
+ } else {
+ /* req->td_data->next = virt_to_phys(td); */
+ }
+ /* write tx bytes */
+ if (ep->in) {
+ /* first desc */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ ep->ep.maxpacket,
+ UDC_DMA_IN_STS_TXBYTES);
+ /* second desc */
+ td->status = AMD_ADDBITS(td->status,
+ txbytes,
+ UDC_DMA_IN_STS_TXBYTES);
+ }
+ } else {
+ if (create_new_chain) {
+ last->next = dma_addr;
+ } else {
+ /* last->next = virt_to_phys(td); */
+ }
+ if (ep->in) {
+ /* write tx bytes */
+ td->status = AMD_ADDBITS(td->status,
+ txbytes,
+ UDC_DMA_IN_STS_TXBYTES);
+ }
+ }
+ last = td;
+ }
+ /* set last bit */
+ if (td) {
+ td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
+ /* last desc. points to itself */
+ req->td_data_last = td;
+ }
+
+ return 0;
+}
+
+/* Enabling RX DMA */
+static void udc_set_rde(struct udc *dev)
+{
+ u32 tmp;
+
+ VDBG(dev, "udc_set_rde()\n");
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* set RDE */
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_RDE);
+ writel(tmp, &dev->regs->ctl);
+}
+
+/* Queues a request packet, called by gadget driver */
+static int
+udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
+{
+ int retval = 0;
+ u8 open_rxfifo = 0;
+ unsigned long iflags;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ struct udc *dev;
+ u32 tmp;
+
+ /* check the inputs */
+ req = container_of(usbreq, struct udc_request, req);
+
+ if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
+ || !list_empty(&req->queue))
+ return -EINVAL;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
+ return -EINVAL;
+
+ VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
+ dev = ep->dev;
+
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* map dma (usually done before) */
+ if (ep->dma && usbreq->length != 0
+ && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
+ VDBG(dev, "DMA map req %p\n", req);
+ if (ep->in)
+ usbreq->dma = pci_map_single(dev->pdev,
+ usbreq->buf,
+ usbreq->length,
+ PCI_DMA_TODEVICE);
+ else
+ usbreq->dma = pci_map_single(dev->pdev,
+ usbreq->buf,
+ usbreq->length,
+ PCI_DMA_FROMDEVICE);
+ req->dma_mapping = 1;
+ }
+
+ VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
+ usbep->name, usbreq, usbreq->length,
+ req->td_data, usbreq->buf);
+
+ spin_lock_irqsave(&dev->lock, iflags);
+ usbreq->actual = 0;
+ usbreq->status = -EINPROGRESS;
+ req->dma_done = 0;
+
+ /* on empty queue just do first transfer */
+ if (list_empty(&ep->queue)) {
+ /* zlp */
+ if (usbreq->length == 0) {
+ /* IN zlp's are handled by hardware */
+ complete_req(ep, req, 0);
+ VDBG(dev, "%s: zlp\n", ep->ep.name);
+ /*
+ * if set_config or set_intf is waiting for ack by zlp
+ * then set CSR_DONE
+ */
+ if (dev->set_cfg_not_acked) {
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
+ writel(tmp, &dev->regs->ctl);
+ dev->set_cfg_not_acked = 0;
+ }
+ /* setup command is ACK'ed now by zlp */
+ if (dev->waiting_zlp_ack_ep0in) {
+ /* clear NAK by writing CNAK in EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
+ UDC_EP0IN_IX);
+ dev->waiting_zlp_ack_ep0in = 0;
+ }
+ goto finished;
+ }
+ if (ep->dma) {
+ retval = prep_dma(ep, req, gfp);
+ if (retval != 0)
+ goto finished;
+ /* write desc pointer to enable DMA */
+ if (ep->in) {
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(req->td_data->status,
+ UDC_DMA_IN_STS_BS_HOST_READY,
+ UDC_DMA_IN_STS_BS);
+ }
+
+ /* disabled rx dma while descriptor update */
+ if (!ep->in) {
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* clear RDE */
+ tmp = readl(&dev->regs->ctl);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
+ writel(tmp, &dev->regs->ctl);
+ open_rxfifo = 1;
+
+ /*
+ * if BNA occurred then let BNA dummy desc.
+ * point to current desc.
+ */
+ if (ep->bna_occurred) {
+ VDBG(dev, "copy to BNA dummy desc.\n");
+ memcpy(ep->bna_dummy_req->td_data,
+ req->td_data,
+ sizeof(struct udc_data_dma));
+ }
+ }
+ /* write desc pointer */
+ writel(req->td_phys, &ep->regs->desptr);
+
+ /* clear NAK by writing CNAK */
+ if (ep->naking) {
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->naking = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+
+ if (ep->in) {
+ /* enable ep irq */
+ tmp = readl(&dev->regs->ep_irqmsk);
+ tmp &= AMD_UNMASK_BIT(ep->num);
+ writel(tmp, &dev->regs->ep_irqmsk);
+ }
+ }
+
+ } else if (ep->dma) {
+
+ /*
+ * prep_dma not used for OUT ep's, this is not possible
+ * for PPB modes, because of chain creation reasons
+ */
+ if (ep->in) {
+ retval = prep_dma(ep, req, gfp);
+ if (retval != 0)
+ goto finished;
+ }
+ }
+ VDBG(dev, "list_add\n");
+ /* add request to ep queue */
+ if (req) {
+
+ list_add_tail(&req->queue, &ep->queue);
+
+ /* open rxfifo if out data queued */
+ if (open_rxfifo) {
+ /* enable DMA */
+ req->dma_going = 1;
+ udc_set_rde(dev);
+ if (ep->num != UDC_EP0OUT_IX)
+ dev->data_ep_queued = 1;
+ }
+ /* stop OUT naking */
+ if (!ep->in) {
+ if (!use_dma && udc_rxfifo_pending) {
+ DBG(dev, "udc_queue(): pending bytes in"
+ "rxfifo after nyet\n");
+ /*
+ * read pending bytes afer nyet:
+ * referring to isr
+ */
+ if (udc_rxfifo_read(ep, req)) {
+ /* finish */
+ complete_req(ep, req, 0);
+ }
+ udc_rxfifo_pending = 0;
+
+ }
+ }
+ }
+
+finished:
+ spin_unlock_irqrestore(&dev->lock, iflags);
+ return retval;
+}
+
+/* Empty request queue of an endpoint; caller holds spinlock */
+static void empty_req_queue(struct udc_ep *ep)
+{
+ struct udc_request *req;
+
+ ep->halted = 1;
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct udc_request,
+ queue);
+ complete_req(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* Dequeues a request packet, called by gadget driver */
+static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
+{
+ struct udc_ep *ep;
+ struct udc_request *req;
+ unsigned halted;
+ unsigned long iflags;
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
+ && ep->num != UDC_EP0OUT_IX)))
+ return -EINVAL;
+
+ req = container_of(usbreq, struct udc_request, req);
+
+ spin_lock_irqsave(&ep->dev->lock, iflags);
+ halted = ep->halted;
+ ep->halted = 1;
+ /* request in processing or next one */
+ if (ep->queue.next == &req->queue) {
+ if (ep->dma && req->dma_going) {
+ if (ep->in)
+ ep->cancel_transfer = 1;
+ else {
+ u32 tmp;
+ u32 dma_sts;
+ /* stop potential receive DMA */
+ tmp = readl(&udc->regs->ctl);
+ writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
+ &udc->regs->ctl);
+ /*
+ * Cancel transfer later in ISR
+ * if descriptor was touched.
+ */
+ dma_sts = AMD_GETBITS(req->td_data->status,
+ UDC_DMA_OUT_STS_BS);
+ if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
+ ep->cancel_transfer = 1;
+ else {
+ udc_init_bna_dummy(ep->req);
+ writel(ep->bna_dummy_req->td_phys,
+ &ep->regs->desptr);
+ }
+ writel(tmp, &udc->regs->ctl);
+ }
+ }
+ }
+ complete_req(ep, req, -ECONNRESET);
+ ep->halted = halted;
+
+ spin_unlock_irqrestore(&ep->dev->lock, iflags);
+ return 0;
+}
+
+/* Halt or clear halt of endpoint */
+static int
+udc_set_halt(struct usb_ep *usbep, int halt)
+{
+ struct udc_ep *ep;
+ u32 tmp;
+ unsigned long iflags;
+ int retval = 0;
+
+ if (!usbep)
+ return -EINVAL;
+
+ pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
+
+ ep = container_of(usbep, struct udc_ep, ep);
+ if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
+ return -EINVAL;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&udc_stall_spinlock, iflags);
+ /* halt or clear halt */
+ if (halt) {
+ if (ep->num == 0)
+ ep->dev->stall_ep0in = 1;
+ else {
+ /*
+ * set STALL
+ * rxfifo empty not taken into acount
+ */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ ep->halted = 1;
+
+ /* setup poll timer */
+ if (!timer_pending(&udc_pollstall_timer)) {
+ udc_pollstall_timer.expires = jiffies +
+ HZ * UDC_POLLSTALL_TIMER_USECONDS
+ / (1000 * 1000);
+ if (!stop_pollstall_timer) {
+ DBG(ep->dev, "start polltimer\n");
+ add_timer(&udc_pollstall_timer);
+ }
+ }
+ }
+ } else {
+ /* ep is halted by set_halt() before */
+ if (ep->halted) {
+ tmp = readl(&ep->regs->ctl);
+ /* clear stall bit */
+ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+ /* clear NAK by writing CNAK */
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->halted = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+ }
+ spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+ return retval;
+}
+
+/* gadget interface */
+static const struct usb_ep_ops udc_ep_ops = {
+ .enable = udc_ep_enable,
+ .disable = udc_ep_disable,
+
+ .alloc_request = udc_alloc_request,
+ .free_request = udc_free_request,
+
+ .queue = udc_queue,
+ .dequeue = udc_dequeue,
+
+ .set_halt = udc_set_halt,
+ /* fifo ops not implemented */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Get frame counter (not implemented) */
+static int udc_get_frame(struct usb_gadget *gadget)
+{
+ return -EOPNOTSUPP;
+}
+
+/* Remote wakeup gadget interface */
+static int udc_wakeup(struct usb_gadget *gadget)
+{
+ struct udc *dev;
+
+ if (!gadget)
+ return -EINVAL;
+ dev = container_of(gadget, struct udc, gadget);
+ udc_remote_wakeup(dev);
+
+ return 0;
+}
+
+/* gadget operations */
+static const struct usb_gadget_ops udc_ops = {
+ .wakeup = udc_wakeup,
+ .get_frame = udc_get_frame,
+};
+
+/* Setups endpoint parameters, adds endpoints to linked list */
+static void make_ep_lists(struct udc *dev)
+{
+ /* make gadget ep lists */
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+ list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
+ &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
+ &dev->gadget.ep_list);
+ list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
+ &dev->gadget.ep_list);
+
+ /* fifo config */
+ dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
+ dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
+}
+
+/* init registers at driver load time */
+static int startup_registers(struct udc *dev)
+{
+ u32 tmp;
+
+ /* init controller by soft reset */
+ udc_soft_reset(dev);
+
+ /* mask not needed interrupts */
+ udc_mask_unused_interrupts(dev);
+
+ /* put into initial config */
+ udc_basic_init(dev);
+ /* link up all endpoints */
+ udc_setup_endpoints(dev);
+
+ /* program speed */
+ tmp = readl(&dev->regs->cfg);
+ if (use_fullspeed) {
+ tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
+ } else {
+ tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
+ }
+ writel(tmp, &dev->regs->cfg);
+
+ return 0;
+}
+
+/* Inits UDC context */
+static void udc_basic_init(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "udc_basic_init()\n");
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* stop poll stall timer */
+ if (timer_pending(&udc_pollstall_timer)) {
+ mod_timer(&udc_pollstall_timer, jiffies - 1);
+ }
+ /* disable DMA */
+ tmp = readl(&dev->regs->ctl);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
+ writel(tmp, &dev->regs->ctl);
+
+ /* enable dynamic CSR programming */
+ tmp = readl(&dev->regs->cfg);
+ tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
+ /* set self powered */
+ tmp |= AMD_BIT(UDC_DEVCFG_SP);
+ /* set remote wakeupable */
+ tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
+ writel(tmp, &dev->regs->cfg);
+
+ make_ep_lists(dev);
+
+ dev->data_ep_enabled = 0;
+ dev->data_ep_queued = 0;
+}
+
+/* Sets initial endpoint parameters */
+static void udc_setup_endpoints(struct udc *dev)
+{
+ struct udc_ep *ep;
+ u32 tmp;
+ u32 reg;
+
+ DBG(dev, "udc_setup_endpoints()\n");
+
+ /* read enum speed */
+ tmp = readl(&dev->regs->sts);
+ tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
+ if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
+ dev->gadget.speed = USB_SPEED_HIGH;
+ } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
+ dev->gadget.speed = USB_SPEED_FULL;
+ }
+
+ /* set basic ep parameters */
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
+ ep = &dev->ep[tmp];
+ ep->dev = dev;
+ ep->ep.name = ep_string[tmp];
+ ep->num = tmp;
+ /* txfifo size is calculated at enable time */
+ ep->txfifo = dev->txfifo;
+
+ /* fifo size */
+ if (tmp < UDC_EPIN_NUM) {
+ ep->fifo_depth = UDC_TXFIFO_SIZE;
+ ep->in = 1;
+ } else {
+ ep->fifo_depth = UDC_RXFIFO_SIZE;
+ ep->in = 0;
+
+ }
+ ep->regs = &dev->ep_regs[tmp];
+ /*
+ * ep will be reset only if ep was not enabled before to avoid
+ * disabling ep interrupts when ENUM interrupt occurs but ep is
+ * not enabled by gadget driver
+ */
+ if (!ep->desc) {
+ ep_init(dev->regs, ep);
+ }
+
+ if (use_dma) {
+ /*
+ * ep->dma is not really used, just to indicate that
+ * DMA is active: remove this
+ * dma regs = dev control regs
+ */
+ ep->dma = &dev->regs->ctl;
+
+ /* nak OUT endpoints until enable - not for ep0 */
+ if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
+ && tmp > UDC_EPIN_NUM) {
+ /* set NAK */
+ reg = readl(&dev->ep[tmp].regs->ctl);
+ reg |= AMD_BIT(UDC_EPCTL_SNAK);
+ writel(reg, &dev->ep[tmp].regs->ctl);
+ dev->ep[tmp].naking = 1;
+
+ }
+ }
+ }
+ /* EP0 max packet */
+ if (dev->gadget.speed == USB_SPEED_FULL) {
+ dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
+ dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
+ UDC_FS_EP0OUT_MAX_PKT_SIZE;
+ } else if (dev->gadget.speed == USB_SPEED_HIGH) {
+ dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
+ dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
+ }
+
+ /*
+ * with suspend bug workaround, ep0 params for gadget driver
+ * are set at gadget driver bind() call
+ */
+ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
+ dev->ep[UDC_EP0IN_IX].halted = 0;
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+
+ /* init cfg/alt/int */
+ dev->cur_config = 0;
+ dev->cur_intf = 0;
+ dev->cur_alt = 0;
+}
+
+/* Bringup after Connect event, initial bringup to be ready for ep0 events */
+static void usb_connect(struct udc *dev)
+{
+
+ dev_info(&dev->pdev->dev, "USB Connect\n");
+
+ dev->connected = 1;
+
+ /* put into initial config */
+ udc_basic_init(dev);
+
+ /* enable device setup interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+}
+
+/*
+ * Calls gadget with disconnect event and resets the UDC and makes
+ * initial bringup to be ready for ep0 events
+ */
+static void usb_disconnect(struct udc *dev)
+{
+
+ dev_info(&dev->pdev->dev, "USB Disconnect\n");
+
+ dev->connected = 0;
+
+ /* mask interrupts */
+ udc_mask_unused_interrupts(dev);
+
+ /* REVISIT there doesn't seem to be a point to having this
+ * talk to a tasklet ... do it directly, we already hold
+ * the spinlock needed to process the disconnect.
+ */
+
+ tasklet_schedule(&disconnect_tasklet);
+}
+
+/* Tasklet for disconnect to be outside of interrupt context */
+static void udc_tasklet_disconnect(unsigned long par)
+{
+ struct udc *dev = (struct udc *)(*((struct udc **) par));
+ u32 tmp;
+
+ DBG(dev, "Tasklet disconnect\n");
+ spin_lock_irq(&dev->lock);
+
+ if (dev->driver) {
+ spin_unlock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+
+ /* empty queues */
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
+ empty_req_queue(&dev->ep[tmp]);
+ }
+
+ }
+
+ /* disable ep0 */
+ ep_init(dev->regs,
+ &dev->ep[UDC_EP0IN_IX]);
+
+
+ if (!soft_reset_occured) {
+ /* init controller by soft reset */
+ udc_soft_reset(dev);
+ soft_reset_occured++;
+ }
+
+ /* re-enable dev interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+ /* back to full speed ? */
+ if (use_fullspeed) {
+ tmp = readl(&dev->regs->cfg);
+ tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
+ writel(tmp, &dev->regs->cfg);
+ }
+
+ spin_unlock_irq(&dev->lock);
+}
+
+/* Reset the UDC core */
+static void udc_soft_reset(struct udc *dev)
+{
+ unsigned long flags;
+
+ DBG(dev, "Soft reset\n");
+ /*
+ * reset possible waiting interrupts, because int.
+ * status is lost after soft reset,
+ * ep int. status reset
+ */
+ writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
+ /* device int. status reset */
+ writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
+
+ spin_lock_irqsave(&udc_irq_spinlock, flags);
+ writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+ readl(&dev->regs->cfg);
+ spin_unlock_irqrestore(&udc_irq_spinlock, flags);
+
+}
+
+/* RDE timer callback to set RDE bit */
+static void udc_timer_function(unsigned long v)
+{
+ u32 tmp;
+
+ spin_lock_irq(&udc_irq_spinlock);
+
+ if (set_rde > 0) {
+ /*
+ * open the fifo if fifo was filled on last timer call
+ * conditionally
+ */
+ if (set_rde > 1) {
+ /* set RDE to receive setup data */
+ tmp = readl(&udc->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_RDE);
+ writel(tmp, &udc->regs->ctl);
+ set_rde = -1;
+ } else if (readl(&udc->regs->sts)
+ & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
+ /*
+ * if fifo empty setup polling, do not just
+ * open the fifo
+ */
+ udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
+ if (!stop_timer) {
+ add_timer(&udc_timer);
+ }
+ } else {
+ /*
+ * fifo contains data now, setup timer for opening
+ * the fifo when timer expires to be able to receive
+ * setup packets, when data packets gets queued by
+ * gadget layer then timer will forced to expire with
+ * set_rde=0 (RDE is set in udc_queue())
+ */
+ set_rde++;
+ /* debug: lhadmot_timer_start = 221070 */
+ udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
+ if (!stop_timer) {
+ add_timer(&udc_timer);
+ }
+ }
+
+ } else
+ set_rde = -1; /* RDE was set by udc_queue() */
+ spin_unlock_irq(&udc_irq_spinlock);
+ if (stop_timer)
+ complete(&on_exit);
+
+}
+
+/* Handle halt state, used in stall poll timer */
+static void udc_handle_halt_state(struct udc_ep *ep)
+{
+ u32 tmp;
+ /* set stall as long not halted */
+ if (ep->halted == 1) {
+ tmp = readl(&ep->regs->ctl);
+ /* STALL cleared ? */
+ if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
+ /*
+ * FIXME: MSC spec requires that stall remains
+ * even on receivng of CLEAR_FEATURE HALT. So
+ * we would set STALL again here to be compliant.
+ * But with current mass storage drivers this does
+ * not work (would produce endless host retries).
+ * So we clear halt on CLEAR_FEATURE.
+ *
+ DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);*/
+
+ /* clear NAK by writing CNAK */
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &ep->regs->ctl);
+ ep->halted = 0;
+ UDC_QUEUE_CNAK(ep, ep->num);
+ }
+ }
+}
+
+/* Stall timer callback to poll S bit and set it again after */
+static void udc_pollstall_timer_function(unsigned long v)
+{
+ struct udc_ep *ep;
+ int halted = 0;
+
+ spin_lock_irq(&udc_stall_spinlock);
+ /*
+ * only one IN and OUT endpoints are handled
+ * IN poll stall
+ */
+ ep = &udc->ep[UDC_EPIN_IX];
+ udc_handle_halt_state(ep);
+ if (ep->halted)
+ halted = 1;
+ /* OUT poll stall */
+ ep = &udc->ep[UDC_EPOUT_IX];
+ udc_handle_halt_state(ep);
+ if (ep->halted)
+ halted = 1;
+
+ /* setup timer again when still halted */
+ if (!stop_pollstall_timer && halted) {
+ udc_pollstall_timer.expires = jiffies +
+ HZ * UDC_POLLSTALL_TIMER_USECONDS
+ / (1000 * 1000);
+ add_timer(&udc_pollstall_timer);
+ }
+ spin_unlock_irq(&udc_stall_spinlock);
+
+ if (stop_pollstall_timer)
+ complete(&on_pollstall_exit);
+}
+
+/* Inits endpoint 0 so that SETUP packets are processed */
+static void activate_control_endpoints(struct udc *dev)
+{
+ u32 tmp;
+
+ DBG(dev, "activate_control_endpoints\n");
+
+ /* flush fifo */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_F);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+
+ /* set ep0 directions */
+ dev->ep[UDC_EP0IN_IX].in = 1;
+ dev->ep[UDC_EP0OUT_IX].in = 0;
+
+ /* set buffer size (tx fifo entries) of EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
+ UDC_EPIN_BUFF_SIZE);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
+ UDC_EPIN_BUFF_SIZE);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
+
+ /* set max packet size of EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
+
+ /* set max packet size of EP0_OUT */
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
+ UDC_EP_MAX_PKT_SIZE);
+ writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
+
+ /* set max packet size of EP0 in UDC CSR */
+ tmp = readl(&dev->csr->ne[0]);
+ if (dev->gadget.speed == USB_SPEED_FULL)
+ tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
+ UDC_CSR_NE_MAX_PKT);
+ else if (dev->gadget.speed == USB_SPEED_HIGH)
+ tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
+ UDC_CSR_NE_MAX_PKT);
+ writel(tmp, &dev->csr->ne[0]);
+
+ if (use_dma) {
+ dev->ep[UDC_EP0OUT_IX].td->status |=
+ AMD_BIT(UDC_DMA_OUT_STS_L);
+ /* write dma desc address */
+ writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
+ &dev->ep[UDC_EP0OUT_IX].regs->subptr);
+ writel(dev->ep[UDC_EP0OUT_IX].td_phys,
+ &dev->ep[UDC_EP0OUT_IX].regs->desptr);
+ /* stop RDE timer */
+ if (timer_pending(&udc_timer)) {
+ set_rde = 0;
+ mod_timer(&udc_timer, jiffies - 1);
+ }
+ /* stop pollstall timer */
+ if (timer_pending(&udc_pollstall_timer)) {
+ mod_timer(&udc_pollstall_timer, jiffies - 1);
+ }
+ /* enable DMA */
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_MODE)
+ | AMD_BIT(UDC_DEVCTL_RDE)
+ | AMD_BIT(UDC_DEVCTL_TDE);
+ if (use_dma_bufferfill_mode) {
+ tmp |= AMD_BIT(UDC_DEVCTL_BF);
+ } else if (use_dma_ppb_du) {
+ tmp |= AMD_BIT(UDC_DEVCTL_DU);
+ }
+ writel(tmp, &dev->regs->ctl);
+ }
+
+ /* clear NAK by writing CNAK for EP0IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
+
+ /* clear NAK by writing CNAK for EP0OUT */
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ dev->ep[UDC_EP0OUT_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
+}
+
+/* Make endpoint 0 ready for control traffic */
+static int setup_ep0(struct udc *dev)
+{
+ activate_control_endpoints(dev);
+ /* enable ep0 interrupts */
+ udc_enable_ep0_interrupts(dev);
+ /* enable device setup interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+
+ return 0;
+}
+
+/* Called by gadget driver to register itself */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct udc *dev = udc;
+ int retval;
+ u32 tmp;
+
+ if (!driver || !driver->bind || !driver->setup
+ || driver->speed != USB_SPEED_HIGH)
+ return -EINVAL;
+ if (!dev)
+ return -ENODEV;
+ if (dev->driver)
+ return -EBUSY;
+
+ driver->driver.bus = NULL;
+ dev->driver = driver;
+ dev->gadget.dev.driver = &driver->driver;
+
+ retval = driver->bind(&dev->gadget);
+
+ /* Some gadget drivers use both ep0 directions.
+ * NOTE: to gadget driver, ep0 is just one endpoint...
+ */
+ dev->ep[UDC_EP0OUT_IX].ep.driver_data =
+ dev->ep[UDC_EP0IN_IX].ep.driver_data;
+
+ if (retval) {
+ DBG(dev, "binding to %s returning %d\n",
+ driver->driver.name, retval);
+ dev->driver = NULL;
+ dev->gadget.dev.driver = NULL;
+ return retval;
+ }
+
+ /* get ready for ep0 traffic */
+ setup_ep0(dev);
+
+ /* clear SD */
+ tmp = readl(&dev->regs->ctl);
+ tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
+ writel(tmp, &dev->regs->ctl);
+
+ usb_connect(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/* shutdown requests and disconnect from gadget */
+static void
+shutdown(struct udc *dev, struct usb_gadget_driver *driver)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ int tmp;
+
+ /* empty queues and init hardware */
+ udc_basic_init(dev);
+ for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
+ empty_req_queue(&dev->ep[tmp]);
+ }
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ /* init */
+ udc_setup_endpoints(dev);
+}
+
+/* Called by gadget driver to unregister itself */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct udc *dev = udc;
+ unsigned long flags;
+ u32 tmp;
+
+ if (!dev)
+ return -ENODEV;
+ if (!driver || driver != dev->driver || !driver->unbind)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ udc_mask_unused_interrupts(dev);
+ shutdown(dev, driver);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ driver->unbind(&dev->gadget);
+ dev->driver = NULL;
+
+ /* set SD */
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_SD);
+ writel(tmp, &dev->regs->ctl);
+
+
+ DBG(dev, "%s: unregistered\n", driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/* Clear pending NAK bits */
+static void udc_process_cnak_queue(struct udc *dev)
+{
+ u32 tmp;
+ u32 reg;
+
+ /* check epin's */
+ DBG(dev, "CNAK pending queue processing\n");
+ for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
+ if (cnak_pending & (1 << tmp)) {
+ DBG(dev, "CNAK pending for ep%d\n", tmp);
+ /* clear NAK by writing CNAK */
+ reg = readl(&dev->ep[tmp].regs->ctl);
+ reg |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(reg, &dev->ep[tmp].regs->ctl);
+ dev->ep[tmp].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
+ }
+ }
+ /* ... and ep0out */
+ if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
+ DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
+ /* clear NAK by writing CNAK */
+ reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ reg |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ dev->ep[UDC_EP0OUT_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
+ dev->ep[UDC_EP0OUT_IX].num);
+ }
+}
+
+/* Enabling RX DMA after setup packet */
+static void udc_ep0_set_rde(struct udc *dev)
+{
+ if (use_dma) {
+ /*
+ * only enable RXDMA when no data endpoint enabled
+ * or data is queued
+ */
+ if (!dev->data_ep_enabled || dev->data_ep_queued) {
+ udc_set_rde(dev);
+ } else {
+ /*
+ * setup timer for enabling RDE (to not enable
+ * RXFIFO DMA for data endpoints to early)
+ */
+ if (set_rde != 0 && !timer_pending(&udc_timer)) {
+ udc_timer.expires =
+ jiffies + HZ/UDC_RDE_TIMER_DIV;
+ set_rde = 1;
+ if (!stop_timer) {
+ add_timer(&udc_timer);
+ }
+ }
+ }
+ }
+}
+
+
+/* Interrupt handler for data OUT traffic */
+static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ unsigned int count;
+ struct udc_data_dma *td = NULL;
+ unsigned dma_done;
+
+ VDBG(dev, "ep%d irq\n", ep_ix);
+ ep = &dev->ep[ep_ix];
+
+ tmp = readl(&ep->regs->sts);
+ if (use_dma) {
+ /* BNA event ? */
+ if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
+ DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
+ ep->num, readl(&ep->regs->desptr));
+ /* clear BNA */
+ writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
+ if (!ep->cancel_transfer)
+ ep->bna_occurred = 1;
+ else
+ ep->cancel_transfer = 0;
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+ }
+ /* HE event ? */
+ if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
+ dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
+
+ /* clear HE */
+ writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+
+ if (!list_empty(&ep->queue)) {
+
+ /* next request */
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ } else {
+ req = NULL;
+ udc_rxfifo_pending = 1;
+ }
+ VDBG(dev, "req = %p\n", req);
+ /* fifo mode */
+ if (!use_dma) {
+
+ /* read fifo */
+ if (req && udc_rxfifo_read(ep, req)) {
+ ret_val = IRQ_HANDLED;
+
+ /* finish */
+ complete_req(ep, req, 0);
+ /* next request */
+ if (!list_empty(&ep->queue) && !ep->halted) {
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ } else
+ req = NULL;
+ }
+
+ /* DMA */
+ } else if (!ep->cancel_transfer && req != NULL) {
+ ret_val = IRQ_HANDLED;
+
+ /* check for DMA done */
+ if (!use_dma_ppb) {
+ dma_done = AMD_GETBITS(req->td_data->status,
+ UDC_DMA_OUT_STS_BS);
+ /* packet per buffer mode - rx bytes */
+ } else {
+ /*
+ * if BNA occurred then recover desc. from
+ * BNA dummy desc.
+ */
+ if (ep->bna_occurred) {
+ VDBG(dev, "Recover desc. from BNA dummy\n");
+ memcpy(req->td_data, ep->bna_dummy_req->td_data,
+ sizeof(struct udc_data_dma));
+ ep->bna_occurred = 0;
+ udc_init_bna_dummy(ep->req);
+ }
+ td = udc_get_last_dma_desc(req);
+ dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
+ }
+ if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
+ /* buffer fill mode - rx bytes */
+ if (!use_dma_ppb) {
+ /* received number bytes */
+ count = AMD_GETBITS(req->td_data->status,
+ UDC_DMA_OUT_STS_RXBYTES);
+ VDBG(dev, "rx bytes=%u\n", count);
+ /* packet per buffer mode - rx bytes */
+ } else {
+ VDBG(dev, "req->td_data=%p\n", req->td_data);
+ VDBG(dev, "last desc = %p\n", td);
+ /* received number bytes */
+ if (use_dma_ppb_du) {
+ /* every desc. counts bytes */
+ count = udc_get_ppbdu_rxbytes(req);
+ } else {
+ /* last desc. counts bytes */
+ count = AMD_GETBITS(td->status,
+ UDC_DMA_OUT_STS_RXBYTES);
+ if (!count && req->req.length
+ == UDC_DMA_MAXPACKET) {
+ /*
+ * on 64k packets the RXBYTES
+ * field is zero
+ */
+ count = UDC_DMA_MAXPACKET;
+ }
+ }
+ VDBG(dev, "last desc rx bytes=%u\n", count);
+ }
+
+ tmp = req->req.length - req->req.actual;
+ if (count > tmp) {
+ if ((tmp % ep->ep.maxpacket) != 0) {
+ DBG(dev, "%s: rx %db, space=%db\n",
+ ep->ep.name, count, tmp);
+ req->req.status = -EOVERFLOW;
+ }
+ count = tmp;
+ }
+ req->req.actual += count;
+ req->dma_going = 0;
+ /* complete request */
+ complete_req(ep, req, 0);
+
+ /* next request */
+ if (!list_empty(&ep->queue) && !ep->halted) {
+ req = list_entry(ep->queue.next,
+ struct udc_request,
+ queue);
+ /*
+ * DMA may be already started by udc_queue()
+ * called by gadget drivers completion
+ * routine. This happens when queue
+ * holds one request only.
+ */
+ if (req->dma_going == 0) {
+ /* next dma */
+ if (prep_dma(ep, req, GFP_ATOMIC) != 0)
+ goto finished;
+ /* write desc pointer */
+ writel(req->td_phys,
+ &ep->regs->desptr);
+ req->dma_going = 1;
+ /* enable DMA */
+ udc_set_rde(dev);
+ }
+ } else {
+ /*
+ * implant BNA dummy descriptor to allow
+ * RXFIFO opening by RDE
+ */
+ if (ep->bna_dummy_req) {
+ /* write desc pointer */
+ writel(ep->bna_dummy_req->td_phys,
+ &ep->regs->desptr);
+ ep->bna_occurred = 0;
+ }
+
+ /*
+ * schedule timer for setting RDE if queue
+ * remains empty to allow ep0 packets pass
+ * through
+ */
+ if (set_rde != 0
+ && !timer_pending(&udc_timer)) {
+ udc_timer.expires =
+ jiffies
+ + HZ*UDC_RDE_TIMER_SECONDS;
+ set_rde = 1;
+ if (!stop_timer) {
+ add_timer(&udc_timer);
+ }
+ }
+ if (ep->num != UDC_EP0OUT_IX)
+ dev->data_ep_queued = 0;
+ }
+
+ } else {
+ /*
+ * RX DMA must be reenabled for each desc in PPBDU mode
+ * and must be enabled for PPBNDU mode in case of BNA
+ */
+ udc_set_rde(dev);
+ }
+
+ } else if (ep->cancel_transfer) {
+ ret_val = IRQ_HANDLED;
+ ep->cancel_transfer = 0;
+ }
+
+ /* check pending CNAKS */
+ if (cnak_pending) {
+ /* CNAk processing when rxfifo empty only */
+ if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
+ udc_process_cnak_queue(dev);
+ }
+ }
+
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
+finished:
+ return ret_val;
+}
+
+/* Interrupt handler for data IN traffic */
+static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ u32 epsts;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ struct udc_data_dma *td;
+ unsigned dma_done;
+ unsigned len;
+
+ ep = &dev->ep[ep_ix];
+
+ epsts = readl(&ep->regs->sts);
+ if (use_dma) {
+ /* BNA ? */
+ if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
+ dev_err(&dev->pdev->dev,
+ "BNA ep%din occured - DESPTR = %08lx \n",
+ ep->num,
+ (unsigned long) readl(&ep->regs->desptr));
+
+ /* clear BNA */
+ writel(epsts, &ep->regs->sts);
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+ }
+ /* HE event ? */
+ if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
+ dev_err(&dev->pdev->dev,
+ "HE ep%dn occured - DESPTR = %08lx \n",
+ ep->num, (unsigned long) readl(&ep->regs->desptr));
+
+ /* clear HE */
+ writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+
+ /* DMA completion */
+ if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
+ VDBG(dev, "TDC set- completion\n");
+ ret_val = IRQ_HANDLED;
+ if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ if (req) {
+ /*
+ * length bytes transfered
+ * check dma done of last desc. in PPBDU mode
+ */
+ if (use_dma_ppb_du) {
+ td = udc_get_last_dma_desc(req);
+ if (td) {
+ dma_done =
+ AMD_GETBITS(td->status,
+ UDC_DMA_IN_STS_BS);
+ /* don't care DMA done */
+ req->req.actual =
+ req->req.length;
+ }
+ } else {
+ /* assume all bytes transferred */
+ req->req.actual = req->req.length;
+ }
+
+ if (req->req.actual == req->req.length) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ req->dma_going = 0;
+ /* further request available ? */
+ if (list_empty(&ep->queue)) {
+ /* disable interrupt */
+ tmp = readl(
+ &dev->regs->ep_irqmsk);
+ tmp |= AMD_BIT(ep->num);
+ writel(tmp,
+ &dev->regs->ep_irqmsk);
+ }
+
+ }
+ }
+ }
+ ep->cancel_transfer = 0;
+
+ }
+ /*
+ * status reg has IN bit set and TDC not set (if TDC was handled,
+ * IN must not be handled (UDC defect) ?
+ */
+ if ((epsts & AMD_BIT(UDC_EPSTS_IN))
+ && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
+ ret_val = IRQ_HANDLED;
+ if (!list_empty(&ep->queue)) {
+ /* next request */
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+ /* FIFO mode */
+ if (!use_dma) {
+ /* write fifo */
+ udc_txfifo_write(ep, &req->req);
+ len = req->req.length - req->req.actual;
+ if (len > ep->ep.maxpacket)
+ len = ep->ep.maxpacket;
+ req->req.actual += len;
+ if (req->req.actual == req->req.length
+ || (len != ep->ep.maxpacket)) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ }
+ /* DMA */
+ } else if (req && !req->dma_going) {
+ VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
+ req, req->td_data);
+ if (req->td_data) {
+
+ req->dma_going = 1;
+
+ /*
+ * unset L bit of first desc.
+ * for chain
+ */
+ if (use_dma_ppb && req->req.length >
+ ep->ep.maxpacket) {
+ req->td_data->status &=
+ AMD_CLEAR_BIT(
+ UDC_DMA_IN_STS_L);
+ }
+
+ /* write desc pointer */
+ writel(req->td_phys, &ep->regs->desptr);
+
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(
+ req->td_data->status,
+ UDC_DMA_IN_STS_BS_HOST_READY,
+ UDC_DMA_IN_STS_BS);
+
+ /* set poll demand bit */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_P);
+ writel(tmp, &ep->regs->ctl);
+ }
+ }
+
+ }
+ }
+ /* clear status bits */
+ writel(epsts, &ep->regs->sts);
+
+finished:
+ return ret_val;
+
+}
+
+/* Interrupt handler for Control OUT traffic */
+static irqreturn_t udc_control_out_isr(struct udc *dev)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ int setup_supported;
+ u32 count;
+ int set = 0;
+ struct udc_ep *ep;
+ struct udc_ep *ep_tmp;
+
+ ep = &dev->ep[UDC_EP0OUT_IX];
+
+ /* clear irq */
+ writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
+
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
+ /* check BNA and clear if set */
+ if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
+ VDBG(dev, "ep0: BNA set\n");
+ writel(AMD_BIT(UDC_EPSTS_BNA),
+ &dev->ep[UDC_EP0OUT_IX].regs->sts);
+ ep->bna_occurred = 1;
+ ret_val = IRQ_HANDLED;
+ goto finished;
+ }
+
+ /* type of data: SETUP or DATA 0 bytes */
+ tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
+ VDBG(dev, "data_typ = %x\n", tmp);
+
+ /* setup data */
+ if (tmp == UDC_EPSTS_OUT_SETUP) {
+ ret_val = IRQ_HANDLED;
+
+ ep->dev->stall_ep0in = 0;
+ dev->waiting_zlp_ack_ep0in = 0;
+
+ /* set NAK for EP0_IN */
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_SNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 1;
+ /* get setup data */
+ if (use_dma) {
+
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR,
+ &dev->ep[UDC_EP0OUT_IX].regs->sts);
+
+ setup_data.data[0] =
+ dev->ep[UDC_EP0OUT_IX].td_stp->data12;
+ setup_data.data[1] =
+ dev->ep[UDC_EP0OUT_IX].td_stp->data34;
+ /* set HOST READY */
+ dev->ep[UDC_EP0OUT_IX].td_stp->status =
+ UDC_DMA_STP_STS_BS_HOST_READY;
+ } else {
+ /* read fifo */
+ udc_rxfifo_read_dwords(dev, setup_data.data, 2);
+ }
+
+ /* determine direction of control data */
+ if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
+ dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
+ /* enable RDE */
+ udc_ep0_set_rde(dev);
+ set = 0;
+ } else {
+ dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
+ /*
+ * implant BNA dummy descriptor to allow RXFIFO opening
+ * by RDE
+ */
+ if (ep->bna_dummy_req) {
+ /* write desc pointer */
+ writel(ep->bna_dummy_req->td_phys,
+ &dev->ep[UDC_EP0OUT_IX].regs->desptr);
+ ep->bna_occurred = 0;
+ }
+
+ set = 1;
+ dev->ep[UDC_EP0OUT_IX].naking = 1;
+ /*
+ * setup timer for enabling RDE (to not enable
+ * RXFIFO DMA for data to early)
+ */
+ set_rde = 1;
+ if (!timer_pending(&udc_timer)) {
+ udc_timer.expires = jiffies +
+ HZ/UDC_RDE_TIMER_DIV;
+ if (!stop_timer) {
+ add_timer(&udc_timer);
+ }
+ }
+ }
+
+ /*
+ * mass storage reset must be processed here because
+ * next packet may be a CLEAR_FEATURE HALT which would not
+ * clear the stall bit when no STALL handshake was received
+ * before (autostall can cause this)
+ */
+ if (setup_data.data[0] == UDC_MSCRES_DWORD0
+ && setup_data.data[1] == UDC_MSCRES_DWORD1) {
+ DBG(dev, "MSC Reset\n");
+ /*
+ * clear stall bits
+ * only one IN and OUT endpoints are handled
+ */
+ ep_tmp = &udc->ep[UDC_EPIN_IX];
+ udc_set_halt(&ep_tmp->ep, 0);
+ ep_tmp = &udc->ep[UDC_EPOUT_IX];
+ udc_set_halt(&ep_tmp->ep, 0);
+ }
+
+ /* call gadget with setup data received */
+ spin_unlock(&dev->lock);
+ setup_supported = dev->driver->setup(&dev->gadget,
+ &setup_data.request);
+ spin_lock(&dev->lock);
+
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ /* ep0 in returns data (not zlp) on IN phase */
+ if (setup_supported >= 0 && setup_supported <
+ UDC_EP0IN_MAXPACKET) {
+ /* clear NAK by writing CNAK in EP0_IN */
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ dev->ep[UDC_EP0IN_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
+
+ /* if unsupported request then stall */
+ } else if (setup_supported < 0) {
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
+ } else
+ dev->waiting_zlp_ack_ep0in = 1;
+
+
+ /* clear NAK by writing CNAK in EP0_OUT */
+ if (!set) {
+ tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_CNAK);
+ writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
+ dev->ep[UDC_EP0OUT_IX].naking = 0;
+ UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
+ }
+
+ if (!use_dma) {
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR,
+ &dev->ep[UDC_EP0OUT_IX].regs->sts);
+ }
+
+ /* data packet 0 bytes */
+ } else if (tmp == UDC_EPSTS_OUT_DATA) {
+ /* clear OUT bits in ep status */
+ writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
+
+ /* get setup data: only 0 packet */
+ if (use_dma) {
+ /* no req if 0 packet, just reactivate */
+ if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
+ VDBG(dev, "ZLP\n");
+
+ /* set HOST READY */
+ dev->ep[UDC_EP0OUT_IX].td->status =
+ AMD_ADDBITS(
+ dev->ep[UDC_EP0OUT_IX].td->status,
+ UDC_DMA_OUT_STS_BS_HOST_READY,
+ UDC_DMA_OUT_STS_BS);
+ /* enable RDE */
+ udc_ep0_set_rde(dev);
+ ret_val = IRQ_HANDLED;
+
+ } else {
+ /* control write */
+ ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
+ /* re-program desc. pointer for possible ZLPs */
+ writel(dev->ep[UDC_EP0OUT_IX].td_phys,
+ &dev->ep[UDC_EP0OUT_IX].regs->desptr);
+ /* enable RDE */
+ udc_ep0_set_rde(dev);
+ }
+ } else {
+
+ /* received number bytes */
+ count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
+ count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
+ /* out data for fifo mode not working */
+ count = 0;
+
+ /* 0 packet or real data ? */
+ if (count != 0) {
+ ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
+ } else {
+ /* dummy read confirm */
+ readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
+ ret_val = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* check pending CNAKS */
+ if (cnak_pending) {
+ /* CNAk processing when rxfifo empty only */
+ if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
+ udc_process_cnak_queue(dev);
+ }
+ }
+
+finished:
+ return ret_val;
+}
+
+/* Interrupt handler for Control IN traffic */
+static irqreturn_t udc_control_in_isr(struct udc *dev)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ struct udc_ep *ep;
+ struct udc_request *req;
+ unsigned len;
+
+ ep = &dev->ep[UDC_EP0IN_IX];
+
+ /* clear irq */
+ writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
+
+ tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
+ /* DMA completion */
+ if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
+ VDBG(dev, "isr: TDC clear \n");
+ ret_val = IRQ_HANDLED;
+
+ /* clear TDC bit */
+ writel(AMD_BIT(UDC_EPSTS_TDC),
+ &dev->ep[UDC_EP0IN_IX].regs->sts);
+
+ /* status reg has IN bit set ? */
+ } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
+ ret_val = IRQ_HANDLED;
+
+ if (ep->dma) {
+ /* clear IN bit */
+ writel(AMD_BIT(UDC_EPSTS_IN),
+ &dev->ep[UDC_EP0IN_IX].regs->sts);
+ }
+ if (dev->stall_ep0in) {
+ DBG(dev, "stall ep0in\n");
+ /* halt ep0in */
+ tmp = readl(&ep->regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ } else {
+ if (!list_empty(&ep->queue)) {
+ /* next request */
+ req = list_entry(ep->queue.next,
+ struct udc_request, queue);
+
+ if (ep->dma) {
+ /* write desc pointer */
+ writel(req->td_phys, &ep->regs->desptr);
+ /* set HOST READY */
+ req->td_data->status =
+ AMD_ADDBITS(
+ req->td_data->status,
+ UDC_DMA_STP_STS_BS_HOST_READY,
+ UDC_DMA_STP_STS_BS);
+
+ /* set poll demand bit */
+ tmp =
+ readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
+ tmp |= AMD_BIT(UDC_EPCTL_P);
+ writel(tmp,
+ &dev->ep[UDC_EP0IN_IX].regs->ctl);
+
+ /* all bytes will be transferred */
+ req->req.actual = req->req.length;
+
+ /* complete req */
+ complete_req(ep, req, 0);
+
+ } else {
+ /* write fifo */
+ udc_txfifo_write(ep, &req->req);
+
+ /* lengh bytes transfered */
+ len = req->req.length - req->req.actual;
+ if (len > ep->ep.maxpacket)
+ len = ep->ep.maxpacket;
+
+ req->req.actual += len;
+ if (req->req.actual == req->req.length
+ || (len != ep->ep.maxpacket)) {
+ /* complete req */
+ complete_req(ep, req, 0);
+ }
+ }
+
+ }
+ }
+ ep->halted = 0;
+ dev->stall_ep0in = 0;
+ if (!ep->dma) {
+ /* clear IN bit */
+ writel(AMD_BIT(UDC_EPSTS_IN),
+ &dev->ep[UDC_EP0IN_IX].regs->sts);
+ }
+ }
+
+ return ret_val;
+}
+
+
+/* Interrupt handler for global device events */
+static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
+__releases(dev->lock)
+__acquires(dev->lock)
+{
+ irqreturn_t ret_val = IRQ_NONE;
+ u32 tmp;
+ u32 cfg;
+ struct udc_ep *ep;
+ u16 i;
+ u8 udc_csr_epix;
+
+ /* SET_CONFIG irq ? */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
+ ret_val = IRQ_HANDLED;
+
+ /* read config value */
+ tmp = readl(&dev->regs->sts);
+ cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
+ DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
+ dev->cur_config = cfg;
+ dev->set_cfg_not_acked = 1;
+
+ /* make usb request for gadget driver */
+ memset(&setup_data, 0 , sizeof(union udc_setup_data));
+ setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
+ setup_data.request.wValue = dev->cur_config;
+
+ /* programm the NE registers */
+ for (i = 0; i < UDC_EP_NUM; i++) {
+ ep = &dev->ep[i];
+ if (ep->in) {
+
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num;
+
+
+ /* OUT ep */
+ } else {
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+ }
+
+ tmp = readl(&dev->csr->ne[udc_csr_epix]);
+ /* ep cfg */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
+ UDC_CSR_NE_CFG);
+ /* write reg */
+ writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+ /* clear stall bits */
+ ep->halted = 0;
+ tmp = readl(&ep->regs->ctl);
+ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ }
+ /* call gadget zero with setup data received */
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
+ spin_lock(&dev->lock);
+
+ } /* SET_INTERFACE ? */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
+ ret_val = IRQ_HANDLED;
+
+ dev->set_cfg_not_acked = 1;
+ /* read interface and alt setting values */
+ tmp = readl(&dev->regs->sts);
+ dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
+ dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
+
+ /* make usb request for gadget driver */
+ memset(&setup_data, 0 , sizeof(union udc_setup_data));
+ setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
+ setup_data.request.bRequestType = USB_RECIP_INTERFACE;
+ setup_data.request.wValue = dev->cur_alt;
+ setup_data.request.wIndex = dev->cur_intf;
+
+ DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
+ dev->cur_alt, dev->cur_intf);
+
+ /* programm the NE registers */
+ for (i = 0; i < UDC_EP_NUM; i++) {
+ ep = &dev->ep[i];
+ if (ep->in) {
+
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num;
+
+
+ /* OUT ep */
+ } else {
+ /* ep ix in UDC CSR register space */
+ udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
+ }
+
+ /* UDC CSR reg */
+ /* set ep values */
+ tmp = readl(&dev->csr->ne[udc_csr_epix]);
+ /* ep interface */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
+ UDC_CSR_NE_INTF);
+ /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
+ /* ep alt */
+ tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
+ UDC_CSR_NE_ALT);
+ /* write reg */
+ writel(tmp, &dev->csr->ne[udc_csr_epix]);
+
+ /* clear stall bits */
+ ep->halted = 0;
+ tmp = readl(&ep->regs->ctl);
+ tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
+ writel(tmp, &ep->regs->ctl);
+ }
+
+ /* call gadget zero with setup data received */
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
+ spin_lock(&dev->lock);
+
+ } /* USB reset */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
+ DBG(dev, "USB Reset interrupt\n");
+ ret_val = IRQ_HANDLED;
+
+ /* allow soft reset when suspend occurs */
+ soft_reset_occured = 0;
+
+ dev->waiting_zlp_ack_ep0in = 0;
+ dev->set_cfg_not_acked = 0;
+
+ /* mask not needed interrupts */
+ udc_mask_unused_interrupts(dev);
+
+ /* call gadget to resume and reset configs etc. */
+ spin_unlock(&dev->lock);
+ if (dev->sys_suspended && dev->driver->resume) {
+ dev->driver->resume(&dev->gadget);
+ dev->sys_suspended = 0;
+ }
+ dev->driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+
+ /* disable ep0 to empty req queue */
+ empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+ /* soft reset when rxfifo not empty */
+ tmp = readl(&dev->regs->sts);
+ if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
+ && !soft_reset_after_usbreset_occured) {
+ udc_soft_reset(dev);
+ soft_reset_after_usbreset_occured++;
+ }
+
+ /*
+ * DMA reset to kill potential old DMA hw hang,
+ * POLL bit is already reset by ep_init() through
+ * disconnect()
+ */
+ DBG(dev, "DMA machine reset\n");
+ tmp = readl(&dev->regs->cfg);
+ writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
+ writel(tmp, &dev->regs->cfg);
+
+ /* put into initial config */
+ udc_basic_init(dev);
+
+ /* enable device setup interrupts */
+ udc_enable_dev_setup_interrupts(dev);
+
+ /* enable suspend interrupt */
+ tmp = readl(&dev->regs->irqmsk);
+ tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
+ writel(tmp, &dev->regs->irqmsk);
+
+ } /* USB suspend */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
+ DBG(dev, "USB Suspend interrupt\n");
+ ret_val = IRQ_HANDLED;
+ if (dev->driver->suspend) {
+ spin_unlock(&dev->lock);
+ dev->sys_suspended = 1;
+ dev->driver->suspend(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+ } /* new speed ? */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
+ DBG(dev, "ENUM interrupt\n");
+ ret_val = IRQ_HANDLED;
+ soft_reset_after_usbreset_occured = 0;
+
+ /* disable ep0 to empty req queue */
+ empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
+
+ /* link up all endpoints */
+ udc_setup_endpoints(dev);
+ if (dev->gadget.speed == USB_SPEED_HIGH) {
+ dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
+ "high");
+ } else if (dev->gadget.speed == USB_SPEED_FULL) {
+ dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
+ "full");
+ }
+
+ /* init ep 0 */
+ activate_control_endpoints(dev);
+
+ /* enable ep0 interrupts */
+ udc_enable_ep0_interrupts(dev);
+ }
+ /* session valid change interrupt */
+ if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
+ DBG(dev, "USB SVC interrupt\n");
+ ret_val = IRQ_HANDLED;
+
+ /* check that session is not valid to detect disconnect */
+ tmp = readl(&dev->regs->sts);
+ if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
+ /* disable suspend interrupt */
+ tmp = readl(&dev->regs->irqmsk);
+ tmp |= AMD_BIT(UDC_DEVINT_US);
+ writel(tmp, &dev->regs->irqmsk);
+ DBG(dev, "USB Disconnect (session valid low)\n");
+ /* cleanup on disconnect */
+ usb_disconnect(udc);
+ }
+
+ }
+
+ return ret_val;
+}
+
+/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
+static irqreturn_t udc_irq(int irq, void *pdev)
+{
+ struct udc *dev = pdev;
+ u32 reg;
+ u16 i;
+ u32 ep_irq;
+ irqreturn_t ret_val = IRQ_NONE;
+
+ spin_lock(&dev->lock);
+
+ /* check for ep irq */
+ reg = readl(&dev->regs->ep_irqsts);
+ if (reg) {
+ if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
+ ret_val |= udc_control_out_isr(dev);
+ if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
+ ret_val |= udc_control_in_isr(dev);
+
+ /*
+ * data endpoint
+ * iterate ep's
+ */
+ for (i = 1; i < UDC_EP_NUM; i++) {
+ ep_irq = 1 << i;
+ if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
+ continue;
+
+ /* clear irq status */
+ writel(ep_irq, &dev->regs->ep_irqsts);
+
+ /* irq for out ep ? */
+ if (i > UDC_EPIN_NUM)
+ ret_val |= udc_data_out_isr(dev, i);
+ else
+ ret_val |= udc_data_in_isr(dev, i);
+ }
+
+ }
+
+
+ /* check for dev irq */
+ reg = readl(&dev->regs->irqsts);
+ if (reg) {
+ /* clear irq */
+ writel(reg, &dev->regs->irqsts);
+ ret_val |= udc_dev_isr(dev, reg);
+ }
+
+
+ spin_unlock(&dev->lock);
+ return ret_val;
+}
+
+/* Tears down device */
+static void gadget_release(struct device *pdev)
+{
+ struct amd5536udc *dev = dev_get_drvdata(pdev);
+ kfree(dev);
+}
+
+/* Cleanup on device remove */
+static void udc_remove(struct udc *dev)
+{
+ /* remove timer */
+ stop_timer++;
+ if (timer_pending(&udc_timer))
+ wait_for_completion(&on_exit);
+ if (udc_timer.data)
+ del_timer_sync(&udc_timer);
+ /* remove pollstall timer */
+ stop_pollstall_timer++;
+ if (timer_pending(&udc_pollstall_timer))
+ wait_for_completion(&on_pollstall_exit);
+ if (udc_pollstall_timer.data)
+ del_timer_sync(&udc_pollstall_timer);
+ udc = NULL;
+}
+
+/* Reset all pci context */
+static void udc_pci_remove(struct pci_dev *pdev)
+{
+ struct udc *dev;
+
+ dev = pci_get_drvdata(pdev);
+
+ /* gadget driver must not be registered */
+ BUG_ON(dev->driver != NULL);
+
+ /* dma pool cleanup */
+ if (dev->data_requests)
+ pci_pool_destroy(dev->data_requests);
+
+ if (dev->stp_requests) {
+ /* cleanup DMA desc's for ep0in */
+ pci_pool_free(dev->stp_requests,
+ dev->ep[UDC_EP0OUT_IX].td_stp,
+ dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+ pci_pool_free(dev->stp_requests,
+ dev->ep[UDC_EP0OUT_IX].td,
+ dev->ep[UDC_EP0OUT_IX].td_phys);
+
+ pci_pool_destroy(dev->stp_requests);
+ }
+
+ /* reset controller */
+ writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+ if (dev->irq_registered)
+ free_irq(pdev->irq, dev);
+ if (dev->regs)
+ iounmap(dev->regs);
+ if (dev->mem_region)
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (dev->active)
+ pci_disable_device(pdev);
+
+ device_unregister(&dev->gadget.dev);
+ pci_set_drvdata(pdev, NULL);
+
+ udc_remove(dev);
+}
+
+/* create dma pools on init */
+static int init_dma_pools(struct udc *dev)
+{
+ struct udc_stp_dma *td_stp;
+ struct udc_data_dma *td_data;
+ int retval;
+
+ /* consistent DMA mode setting ? */
+ if (use_dma_ppb) {
+ use_dma_bufferfill_mode = 0;
+ } else {
+ use_dma_ppb_du = 0;
+ use_dma_bufferfill_mode = 1;
+ }
+
+ /* DMA setup */
+ dev->data_requests = dma_pool_create("data_requests", NULL,
+ sizeof(struct udc_data_dma), 0, 0);
+ if (!dev->data_requests) {
+ DBG(dev, "can't get request data pool\n");
+ retval = -ENOMEM;
+ goto finished;
+ }
+
+ /* EP0 in dma regs = dev control regs */
+ dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
+
+ /* dma desc for setup data */
+ dev->stp_requests = dma_pool_create("setup requests", NULL,
+ sizeof(struct udc_stp_dma), 0, 0);
+ if (!dev->stp_requests) {
+ DBG(dev, "can't get stp request pool\n");
+ retval = -ENOMEM;
+ goto finished;
+ }
+ /* setup */
+ td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+ &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
+ if (td_stp == NULL) {
+ retval = -ENOMEM;
+ goto finished;
+ }
+ dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
+
+ /* data: 0 packets !? */
+ td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
+ &dev->ep[UDC_EP0OUT_IX].td_phys);
+ if (td_data == NULL) {
+ retval = -ENOMEM;
+ goto finished;
+ }
+ dev->ep[UDC_EP0OUT_IX].td = td_data;
+ return 0;
+
+finished:
+ return retval;
+}
+
+/* Called by pci bus driver to init pci context */
+static int udc_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id
+)
+{
+ struct udc *dev;
+ unsigned long resource;
+ unsigned long len;
+ int retval = 0;
+
+ /* one udc only */
+ if (udc) {
+ dev_dbg(&pdev->dev, "already probed\n");
+ return -EBUSY;
+ }
+
+ /* init */
+ dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto finished;
+ }
+ memset(dev, 0, sizeof(struct udc));
+
+ /* pci setup */
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto finished;
+ }
+ dev->active = 1;
+
+ /* PCI resource allocation */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ if (!request_mem_region(resource, len, name)) {
+ dev_dbg(&pdev->dev, "pci device used already\n");
+ retval = -EBUSY;
+ goto finished;
+ }
+ dev->mem_region = 1;
+
+ dev->virt_addr = ioremap_nocache(resource, len);
+ if (dev->virt_addr == NULL) {
+ dev_dbg(&pdev->dev, "start address cannot be mapped\n");
+ retval = -EFAULT;
+ goto finished;
+ }
+
+ if (!pdev->irq) {
+ dev_err(&dev->pdev->dev, "irq not set\n");
+ retval = -ENODEV;
+ goto finished;
+ }
+
+ if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
+ dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+ retval = -EBUSY;
+ goto finished;
+ }
+ dev->irq_registered = 1;
+
+ pci_set_drvdata(pdev, dev);
+
+ /* chip revision */
+ dev->chiprev = 0;
+
+ pci_set_master(pdev);
+ pci_set_mwi(pdev);
+
+ /* chip rev for Hs AMD5536 */
+ pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) &dev->chiprev);
+ /* init dma pools */
+ if (use_dma) {
+ retval = init_dma_pools(dev);
+ if (retval != 0)
+ goto finished;
+ }
+
+ dev->phys_addr = resource;
+ dev->irq = pdev->irq;
+ dev->pdev = pdev;
+ dev->gadget.dev.parent = &pdev->dev;
+ dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+ /* general probing */
+ if (udc_probe(dev) == 0)
+ return 0;
+
+finished:
+ if (dev)
+ udc_pci_remove(pdev);
+ return retval;
+}
+
+/* general probe */
+static int udc_probe(struct udc *dev)
+{
+ char tmp[128];
+ u32 reg;
+ int retval;
+
+ /* mark timer as not initialized */
+ udc_timer.data = 0;
+ udc_pollstall_timer.data = 0;
+
+ /* device struct setup */
+ spin_lock_init(&dev->lock);
+ dev->gadget.ops = &udc_ops;
+
+ strcpy(dev->gadget.dev.bus_id, "gadget");
+ dev->gadget.dev.release = gadget_release;
+ dev->gadget.name = name;
+ dev->gadget.name = name;
+ dev->gadget.is_dualspeed = 1;
+
+ /* udc csr registers base */
+ dev->csr = dev->virt_addr + UDC_CSR_ADDR;
+ /* dev registers base */
+ dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
+ /* ep registers base */
+ dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
+ /* fifo's base */
+ dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
+ dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
+
+ /* init registers, interrupts, ... */
+ startup_registers(dev);
+
+ dev_info(&dev->pdev->dev, "%s\n", mod_desc);
+
+ snprintf(tmp, sizeof tmp, "%d", dev->irq);
+ dev_info(&dev->pdev->dev,
+ "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
+ tmp, dev->phys_addr, dev->chiprev,
+ (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
+ strcpy(tmp, UDC_DRIVER_VERSION_STRING);
+ if (dev->chiprev == UDC_HSA0_REV) {
+ dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
+ retval = -ENODEV;
+ goto finished;
+ }
+ dev_info(&dev->pdev->dev,
+ "driver version: %s(for Geode5536 B1)\n", tmp);
+ udc = dev;
+
+ retval = device_register(&dev->gadget.dev);
+ if (retval)
+ goto finished;
+
+ /* timer init */
+ init_timer(&udc_timer);
+ udc_timer.function = udc_timer_function;
+ udc_timer.data = 1;
+ /* timer pollstall init */
+ init_timer(&udc_pollstall_timer);
+ udc_pollstall_timer.function = udc_pollstall_timer_function;
+ udc_pollstall_timer.data = 1;
+
+ /* set SD */
+ reg = readl(&dev->regs->ctl);
+ reg |= AMD_BIT(UDC_DEVCTL_SD);
+ writel(reg, &dev->regs->ctl);
+
+ /* print dev register info */
+ print_regs(dev);
+
+ return 0;
+
+finished:
+ return retval;
+}
+
+/* Initiates a remote wakeup */
+static int udc_remote_wakeup(struct udc *dev)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ DBG(dev, "UDC initiates remote wakeup\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ tmp = readl(&dev->regs->ctl);
+ tmp |= AMD_BIT(UDC_DEVCTL_RES);
+ writel(tmp, &dev->regs->ctl);
+ tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
+ writel(tmp, &dev->regs->ctl);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+/* PCI device parameters */
+static const struct pci_device_id pci_id[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
+ .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class_mask = 0xffffffff,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, pci_id);
+
+/* PCI functions */
+static struct pci_driver udc_pci_driver = {
+ .name = (char *) name,
+ .id_table = pci_id,
+ .probe = udc_pci_probe,
+ .remove = udc_pci_remove,
+};
+
+/* Inits driver */
+static int __init init(void)
+{
+ return pci_register_driver(&udc_pci_driver);
+}
+module_init(init);
+
+/* Cleans driver */
+static void __exit cleanup(void)
+{
+ pci_unregister_driver(&udc_pci_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Thomas Dahlmann");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/usb/gadget/amd5536udc.h b/drivers/usb/gadget/amd5536udc.h
new file mode 100644
index 000000000000..4bbabbbfc93f
--- /dev/null
+++ b/drivers/usb/gadget/amd5536udc.h
@@ -0,0 +1,626 @@
+/*
+ * amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2007 AMD (http://www.amd.com)
+ * Author: Thomas Dahlmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef AMD5536UDC_H
+#define AMD5536UDC_H
+
+/* various constants */
+#define UDC_RDE_TIMER_SECONDS 1
+#define UDC_RDE_TIMER_DIV 10
+#define UDC_POLLSTALL_TIMER_USECONDS 500
+
+/* Hs AMD5536 chip rev. */
+#define UDC_HSA0_REV 1
+#define UDC_HSB1_REV 2
+
+/*
+ * SETUP usb commands
+ * needed, because some SETUP's are handled in hw, but must be passed to
+ * gadget driver above
+ * SET_CONFIG
+ */
+#define UDC_SETCONFIG_DWORD0 0x00000900
+#define UDC_SETCONFIG_DWORD0_VALUE_MASK 0xffff0000
+#define UDC_SETCONFIG_DWORD0_VALUE_OFS 16
+
+#define UDC_SETCONFIG_DWORD1 0x00000000
+
+/* SET_INTERFACE */
+#define UDC_SETINTF_DWORD0 0x00000b00
+#define UDC_SETINTF_DWORD0_ALT_MASK 0xffff0000
+#define UDC_SETINTF_DWORD0_ALT_OFS 16
+
+#define UDC_SETINTF_DWORD1 0x00000000
+#define UDC_SETINTF_DWORD1_INTF_MASK 0x0000ffff
+#define UDC_SETINTF_DWORD1_INTF_OFS 0
+
+/* Mass storage reset */
+#define UDC_MSCRES_DWORD0 0x0000ff21
+#define UDC_MSCRES_DWORD1 0x00000000
+
+/* Global CSR's -------------------------------------------------------------*/
+#define UDC_CSR_ADDR 0x500
+
+/* EP NE bits */
+/* EP number */
+#define UDC_CSR_NE_NUM_MASK 0x0000000f
+#define UDC_CSR_NE_NUM_OFS 0
+/* EP direction */
+#define UDC_CSR_NE_DIR_MASK 0x00000010
+#define UDC_CSR_NE_DIR_OFS 4
+/* EP type */
+#define UDC_CSR_NE_TYPE_MASK 0x00000060
+#define UDC_CSR_NE_TYPE_OFS 5
+/* EP config number */
+#define UDC_CSR_NE_CFG_MASK 0x00000780
+#define UDC_CSR_NE_CFG_OFS 7
+/* EP interface number */
+#define UDC_CSR_NE_INTF_MASK 0x00007800
+#define UDC_CSR_NE_INTF_OFS 11
+/* EP alt setting */
+#define UDC_CSR_NE_ALT_MASK 0x00078000
+#define UDC_CSR_NE_ALT_OFS 15
+
+/* max pkt */
+#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
+#define UDC_CSR_NE_MAX_PKT_OFS 19
+
+/* Device Config Register ---------------------------------------------------*/
+#define UDC_DEVCFG_ADDR 0x400
+
+#define UDC_DEVCFG_SOFTRESET 31
+#define UDC_DEVCFG_HNPSFEN 30
+#define UDC_DEVCFG_DMARST 29
+#define UDC_DEVCFG_SET_DESC 18
+#define UDC_DEVCFG_CSR_PRG 17
+#define UDC_DEVCFG_STATUS 7
+#define UDC_DEVCFG_DIR 6
+#define UDC_DEVCFG_PI 5
+#define UDC_DEVCFG_SS 4
+#define UDC_DEVCFG_SP 3
+#define UDC_DEVCFG_RWKP 2
+
+#define UDC_DEVCFG_SPD_MASK 0x3
+#define UDC_DEVCFG_SPD_OFS 0
+#define UDC_DEVCFG_SPD_HS 0x0
+#define UDC_DEVCFG_SPD_FS 0x1
+#define UDC_DEVCFG_SPD_LS 0x2
+/*#define UDC_DEVCFG_SPD_FS 0x3*/
+
+
+/* Device Control Register --------------------------------------------------*/
+#define UDC_DEVCTL_ADDR 0x404
+
+#define UDC_DEVCTL_THLEN_MASK 0xff000000
+#define UDC_DEVCTL_THLEN_OFS 24
+
+#define UDC_DEVCTL_BRLEN_MASK 0x00ff0000
+#define UDC_DEVCTL_BRLEN_OFS 16
+
+#define UDC_DEVCTL_CSR_DONE 13
+#define UDC_DEVCTL_DEVNAK 12
+#define UDC_DEVCTL_SD 10
+#define UDC_DEVCTL_MODE 9
+#define UDC_DEVCTL_BREN 8
+#define UDC_DEVCTL_THE 7
+#define UDC_DEVCTL_BF 6
+#define UDC_DEVCTL_BE 5
+#define UDC_DEVCTL_DU 4
+#define UDC_DEVCTL_TDE 3
+#define UDC_DEVCTL_RDE 2
+#define UDC_DEVCTL_RES 0
+
+
+/* Device Status Register ---------------------------------------------------*/
+#define UDC_DEVSTS_ADDR 0x408
+
+#define UDC_DEVSTS_TS_MASK 0xfffc0000
+#define UDC_DEVSTS_TS_OFS 18
+
+#define UDC_DEVSTS_SESSVLD 17
+#define UDC_DEVSTS_PHY_ERROR 16
+#define UDC_DEVSTS_RXFIFO_EMPTY 15
+
+#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
+#define UDC_DEVSTS_ENUM_SPEED_OFS 13
+#define UDC_DEVSTS_ENUM_SPEED_FULL 1
+#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
+
+#define UDC_DEVSTS_SUSP 12
+
+#define UDC_DEVSTS_ALT_MASK 0x00000f00
+#define UDC_DEVSTS_ALT_OFS 8
+
+#define UDC_DEVSTS_INTF_MASK 0x000000f0
+#define UDC_DEVSTS_INTF_OFS 4
+
+#define UDC_DEVSTS_CFG_MASK 0x0000000f
+#define UDC_DEVSTS_CFG_OFS 0
+
+
+/* Device Interrupt Register ------------------------------------------------*/
+#define UDC_DEVINT_ADDR 0x40c
+
+#define UDC_DEVINT_SVC 7
+#define UDC_DEVINT_ENUM 6
+#define UDC_DEVINT_SOF 5
+#define UDC_DEVINT_US 4
+#define UDC_DEVINT_UR 3
+#define UDC_DEVINT_ES 2
+#define UDC_DEVINT_SI 1
+#define UDC_DEVINT_SC 0
+
+/* Device Interrupt Mask Register -------------------------------------------*/
+#define UDC_DEVINT_MSK_ADDR 0x410
+
+#define UDC_DEVINT_MSK 0x7f
+
+/* Endpoint Interrupt Register ----------------------------------------------*/
+#define UDC_EPINT_ADDR 0x414
+
+#define UDC_EPINT_OUT_MASK 0xffff0000
+#define UDC_EPINT_OUT_OFS 16
+#define UDC_EPINT_IN_MASK 0x0000ffff
+#define UDC_EPINT_IN_OFS 0
+
+#define UDC_EPINT_IN_EP0 0
+#define UDC_EPINT_IN_EP1 1
+#define UDC_EPINT_IN_EP2 2
+#define UDC_EPINT_IN_EP3 3
+#define UDC_EPINT_OUT_EP0 16
+#define UDC_EPINT_OUT_EP1 17
+#define UDC_EPINT_OUT_EP2 18
+#define UDC_EPINT_OUT_EP3 19
+
+#define UDC_EPINT_EP0_ENABLE_MSK 0x001e001e
+
+/* Endpoint Interrupt Mask Register -----------------------------------------*/
+#define UDC_EPINT_MSK_ADDR 0x418
+
+#define UDC_EPINT_OUT_MSK_MASK 0xffff0000
+#define UDC_EPINT_OUT_MSK_OFS 16
+#define UDC_EPINT_IN_MSK_MASK 0x0000ffff
+#define UDC_EPINT_IN_MSK_OFS 0
+
+#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
+/* mask non-EP0 endpoints */
+#define UDC_EPDATAINT_MSK_DISABLE 0xfffefffe
+/* mask all dev interrupts */
+#define UDC_DEV_MSK_DISABLE 0x7f
+
+/* Endpoint-specific CSR's --------------------------------------------------*/
+#define UDC_EPREGS_ADDR 0x0
+#define UDC_EPIN_REGS_ADDR 0x0
+#define UDC_EPOUT_REGS_ADDR 0x200
+
+#define UDC_EPCTL_ADDR 0x0
+
+#define UDC_EPCTL_RRDY 9
+#define UDC_EPCTL_CNAK 8
+#define UDC_EPCTL_SNAK 7
+#define UDC_EPCTL_NAK 6
+
+#define UDC_EPCTL_ET_MASK 0x00000030
+#define UDC_EPCTL_ET_OFS 4
+#define UDC_EPCTL_ET_CONTROL 0
+#define UDC_EPCTL_ET_ISO 1
+#define UDC_EPCTL_ET_BULK 2
+#define UDC_EPCTL_ET_INTERRUPT 3
+
+#define UDC_EPCTL_P 3
+#define UDC_EPCTL_SN 2
+#define UDC_EPCTL_F 1
+#define UDC_EPCTL_S 0
+
+/* Endpoint Status Registers ------------------------------------------------*/
+#define UDC_EPSTS_ADDR 0x4
+
+#define UDC_EPSTS_RX_PKT_SIZE_MASK 0x007ff800
+#define UDC_EPSTS_RX_PKT_SIZE_OFS 11
+
+#define UDC_EPSTS_TDC 10
+#define UDC_EPSTS_HE 9
+#define UDC_EPSTS_BNA 7
+#define UDC_EPSTS_IN 6
+
+#define UDC_EPSTS_OUT_MASK 0x00000030
+#define UDC_EPSTS_OUT_OFS 4
+#define UDC_EPSTS_OUT_DATA 1
+#define UDC_EPSTS_OUT_DATA_CLEAR 0x10
+#define UDC_EPSTS_OUT_SETUP 2
+#define UDC_EPSTS_OUT_SETUP_CLEAR 0x20
+#define UDC_EPSTS_OUT_CLEAR 0x30
+
+/* Endpoint Buffer Size IN/ Receive Packet Frame Number OUT Registers ------*/
+#define UDC_EPIN_BUFF_SIZE_ADDR 0x8
+#define UDC_EPOUT_FRAME_NUMBER_ADDR 0x8
+
+#define UDC_EPIN_BUFF_SIZE_MASK 0x0000ffff
+#define UDC_EPIN_BUFF_SIZE_OFS 0
+/* EP0in txfifo = 128 bytes*/
+#define UDC_EPIN0_BUFF_SIZE 32
+/* EP0in fullspeed txfifo = 128 bytes*/
+#define UDC_FS_EPIN0_BUFF_SIZE 32
+
+/* fifo size mult = fifo size / max packet */
+#define UDC_EPIN_BUFF_SIZE_MULT 2
+
+/* EPin data fifo size = 1024 bytes DOUBLE BUFFERING */
+#define UDC_EPIN_BUFF_SIZE 256
+/* EPin small INT data fifo size = 128 bytes */
+#define UDC_EPIN_SMALLINT_BUFF_SIZE 32
+
+/* EPin fullspeed data fifo size = 128 bytes DOUBLE BUFFERING */
+#define UDC_FS_EPIN_BUFF_SIZE 32
+
+#define UDC_EPOUT_FRAME_NUMBER_MASK 0x0000ffff
+#define UDC_EPOUT_FRAME_NUMBER_OFS 0
+
+/* Endpoint Buffer Size OUT/Max Packet Size Registers -----------------------*/
+#define UDC_EPOUT_BUFF_SIZE_ADDR 0x0c
+#define UDC_EP_MAX_PKT_SIZE_ADDR 0x0c
+
+#define UDC_EPOUT_BUFF_SIZE_MASK 0xffff0000
+#define UDC_EPOUT_BUFF_SIZE_OFS 16
+#define UDC_EP_MAX_PKT_SIZE_MASK 0x0000ffff
+#define UDC_EP_MAX_PKT_SIZE_OFS 0
+/* EP0in max packet size = 64 bytes */
+#define UDC_EP0IN_MAX_PKT_SIZE 64
+/* EP0out max packet size = 64 bytes */
+#define UDC_EP0OUT_MAX_PKT_SIZE 64
+/* EP0in fullspeed max packet size = 64 bytes */
+#define UDC_FS_EP0IN_MAX_PKT_SIZE 64
+/* EP0out fullspeed max packet size = 64 bytes */
+#define UDC_FS_EP0OUT_MAX_PKT_SIZE 64
+
+/*
+ * Endpoint dma descriptors ------------------------------------------------
+ *
+ * Setup data, Status dword
+ */
+#define UDC_DMA_STP_STS_CFG_MASK 0x0fff0000
+#define UDC_DMA_STP_STS_CFG_OFS 16
+#define UDC_DMA_STP_STS_CFG_ALT_MASK 0x000f0000
+#define UDC_DMA_STP_STS_CFG_ALT_OFS 16
+#define UDC_DMA_STP_STS_CFG_INTF_MASK 0x00f00000
+#define UDC_DMA_STP_STS_CFG_INTF_OFS 20
+#define UDC_DMA_STP_STS_CFG_NUM_MASK 0x0f000000
+#define UDC_DMA_STP_STS_CFG_NUM_OFS 24
+#define UDC_DMA_STP_STS_RX_MASK 0x30000000
+#define UDC_DMA_STP_STS_RX_OFS 28
+#define UDC_DMA_STP_STS_BS_MASK 0xc0000000
+#define UDC_DMA_STP_STS_BS_OFS 30
+#define UDC_DMA_STP_STS_BS_HOST_READY 0
+#define UDC_DMA_STP_STS_BS_DMA_BUSY 1
+#define UDC_DMA_STP_STS_BS_DMA_DONE 2
+#define UDC_DMA_STP_STS_BS_HOST_BUSY 3
+/* IN data, Status dword */
+#define UDC_DMA_IN_STS_TXBYTES_MASK 0x0000ffff
+#define UDC_DMA_IN_STS_TXBYTES_OFS 0
+#define UDC_DMA_IN_STS_FRAMENUM_MASK 0x07ff0000
+#define UDC_DMA_IN_STS_FRAMENUM_OFS 0
+#define UDC_DMA_IN_STS_L 27
+#define UDC_DMA_IN_STS_TX_MASK 0x30000000
+#define UDC_DMA_IN_STS_TX_OFS 28
+#define UDC_DMA_IN_STS_BS_MASK 0xc0000000
+#define UDC_DMA_IN_STS_BS_OFS 30
+#define UDC_DMA_IN_STS_BS_HOST_READY 0
+#define UDC_DMA_IN_STS_BS_DMA_BUSY 1
+#define UDC_DMA_IN_STS_BS_DMA_DONE 2
+#define UDC_DMA_IN_STS_BS_HOST_BUSY 3
+/* OUT data, Status dword */
+#define UDC_DMA_OUT_STS_RXBYTES_MASK 0x0000ffff
+#define UDC_DMA_OUT_STS_RXBYTES_OFS 0
+#define UDC_DMA_OUT_STS_FRAMENUM_MASK 0x07ff0000
+#define UDC_DMA_OUT_STS_FRAMENUM_OFS 0
+#define UDC_DMA_OUT_STS_L 27
+#define UDC_DMA_OUT_STS_RX_MASK 0x30000000
+#define UDC_DMA_OUT_STS_RX_OFS 28
+#define UDC_DMA_OUT_STS_BS_MASK 0xc0000000
+#define UDC_DMA_OUT_STS_BS_OFS 30
+#define UDC_DMA_OUT_STS_BS_HOST_READY 0
+#define UDC_DMA_OUT_STS_BS_DMA_BUSY 1
+#define UDC_DMA_OUT_STS_BS_DMA_DONE 2
+#define UDC_DMA_OUT_STS_BS_HOST_BUSY 3
+/* max ep0in packet */
+#define UDC_EP0IN_MAXPACKET 1000
+/* max dma packet */
+#define UDC_DMA_MAXPACKET 65536
+
+/* un-usable DMA address */
+#define DMA_DONT_USE (~(dma_addr_t) 0 )
+
+/* other Endpoint register addresses and values-----------------------------*/
+#define UDC_EP_SUBPTR_ADDR 0x10
+#define UDC_EP_DESPTR_ADDR 0x14
+#define UDC_EP_WRITE_CONFIRM_ADDR 0x1c
+
+/* EP number as layouted in AHB space */
+#define UDC_EP_NUM 32
+#define UDC_EPIN_NUM 16
+#define UDC_EPIN_NUM_USED 5
+#define UDC_EPOUT_NUM 16
+/* EP number of EP's really used = EP0 + 8 data EP's */
+#define UDC_USED_EP_NUM 9
+/* UDC CSR regs are aligned but AHB regs not - offset for OUT EP's */
+#define UDC_CSR_EP_OUT_IX_OFS 12
+
+#define UDC_EP0OUT_IX 16
+#define UDC_EP0IN_IX 0
+
+/* Rx fifo address and size = 1k -------------------------------------------*/
+#define UDC_RXFIFO_ADDR 0x800
+#define UDC_RXFIFO_SIZE 0x400
+
+/* Tx fifo address and size = 1.5k -----------------------------------------*/
+#define UDC_TXFIFO_ADDR 0xc00
+#define UDC_TXFIFO_SIZE 0x600
+
+/* default data endpoints --------------------------------------------------*/
+#define UDC_EPIN_STATUS_IX 1
+#define UDC_EPIN_IX 2
+#define UDC_EPOUT_IX 18
+
+/* general constants -------------------------------------------------------*/
+#define UDC_DWORD_BYTES 4
+#define UDC_BITS_PER_BYTE_SHIFT 3
+#define UDC_BYTE_MASK 0xff
+#define UDC_BITS_PER_BYTE 8
+
+/*---------------------------------------------------------------------------*/
+/* UDC CSR's */
+struct udc_csrs {
+
+ /* sca - setup command address */
+ u32 sca;
+
+ /* ep ne's */
+ u32 ne[UDC_USED_EP_NUM];
+} __attribute__ ((packed));
+
+/* AHB subsystem CSR registers */
+struct udc_regs {
+
+ /* device configuration */
+ u32 cfg;
+
+ /* device control */
+ u32 ctl;
+
+ /* device status */
+ u32 sts;
+
+ /* device interrupt */
+ u32 irqsts;
+
+ /* device interrupt mask */
+ u32 irqmsk;
+
+ /* endpoint interrupt */
+ u32 ep_irqsts;
+
+ /* endpoint interrupt mask */
+ u32 ep_irqmsk;
+} __attribute__ ((packed));
+
+/* endpoint specific registers */
+struct udc_ep_regs {
+
+ /* endpoint control */
+ u32 ctl;
+
+ /* endpoint status */
+ u32 sts;
+
+ /* endpoint buffer size in/ receive packet frame number out */
+ u32 bufin_framenum;
+
+ /* endpoint buffer size out/max packet size */
+ u32 bufout_maxpkt;
+
+ /* endpoint setup buffer pointer */
+ u32 subptr;
+
+ /* endpoint data descriptor pointer */
+ u32 desptr;
+
+ /* reserverd */
+ u32 reserved;
+
+ /* write/read confirmation */
+ u32 confirm;
+
+} __attribute__ ((packed));
+
+/* control data DMA desc */
+struct udc_stp_dma {
+ /* status quadlet */
+ u32 status;
+ /* reserved */
+ u32 _reserved;
+ /* first setup word */
+ u32 data12;
+ /* second setup word */
+ u32 data34;
+} __attribute__ ((aligned (16)));
+
+/* normal data DMA desc */
+struct udc_data_dma {
+ /* status quadlet */
+ u32 status;
+ /* reserved */
+ u32 _reserved;
+ /* buffer pointer */
+ u32 bufptr;
+ /* next descriptor pointer */
+ u32 next;
+} __attribute__ ((aligned (16)));
+
+/* request packet */
+struct udc_request {
+ /* embedded gadget ep */
+ struct usb_request req;
+
+ /* flags */
+ unsigned dma_going : 1,
+ dma_mapping : 1,
+ dma_done : 1;
+ /* phys. address */
+ dma_addr_t td_phys;
+ /* first dma desc. of chain */
+ struct udc_data_dma *td_data;
+ /* last dma desc. of chain */
+ struct udc_data_dma *td_data_last;
+ struct list_head queue;
+
+ /* chain length */
+ unsigned chain_len;
+
+};
+
+/* UDC specific endpoint parameters */
+struct udc_ep {
+ struct usb_ep ep;
+ struct udc_ep_regs __iomem *regs;
+ u32 __iomem *txfifo;
+ u32 __iomem *dma;
+ dma_addr_t td_phys;
+ dma_addr_t td_stp_dma;
+ struct udc_stp_dma *td_stp;
+ struct udc_data_dma *td;
+ /* temp request */
+ struct udc_request *req;
+ unsigned req_used;
+ unsigned req_completed;
+ /* dummy DMA desc for BNA dummy */
+ struct udc_request *bna_dummy_req;
+ unsigned bna_occurred;
+
+ /* NAK state */
+ unsigned naking;
+
+ struct udc *dev;
+
+ /* queue for requests */
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned halted;
+ unsigned cancel_transfer;
+ unsigned num : 5,
+ fifo_depth : 14,
+ in : 1;
+};
+
+/* device struct */
+struct udc {
+ struct usb_gadget gadget;
+ spinlock_t lock; /* protects all state */
+ /* all endpoints */
+ struct udc_ep ep[UDC_EP_NUM];
+ struct usb_gadget_driver *driver;
+ /* operational flags */
+ unsigned active : 1,
+ stall_ep0in : 1,
+ waiting_zlp_ack_ep0in : 1,
+ set_cfg_not_acked : 1,
+ irq_registered : 1,
+ data_ep_enabled : 1,
+ data_ep_queued : 1,
+ mem_region : 1,
+ sys_suspended : 1,
+ connected;
+
+ u16 chiprev;
+
+ /* registers */
+ struct pci_dev *pdev;
+ struct udc_csrs __iomem *csr;
+ struct udc_regs __iomem *regs;
+ struct udc_ep_regs __iomem *ep_regs;
+ u32 __iomem *rxfifo;
+ u32 __iomem *txfifo;
+
+ /* DMA desc pools */
+ struct pci_pool *data_requests;
+ struct pci_pool *stp_requests;
+
+ /* device data */
+ unsigned long phys_addr;
+ void __iomem *virt_addr;
+ unsigned irq;
+
+ /* states */
+ u16 cur_config;
+ u16 cur_intf;
+ u16 cur_alt;
+};
+
+/* setup request data */
+union udc_setup_data {
+ u32 data[2];
+ struct usb_ctrlrequest request;
+};
+
+/*
+ *---------------------------------------------------------------------------
+ * SET and GET bitfields in u32 values
+ * via constants for mask/offset:
+ * <bit_field_stub_name> is the text between
+ * UDC_ and _MASK|_OFS of appropiate
+ * constant
+ *
+ * set bitfield value in u32 u32Val
+ */
+#define AMD_ADDBITS(u32Val, bitfield_val, bitfield_stub_name) \
+ (((u32Val) & (((u32) ~((u32) bitfield_stub_name##_MASK)))) \
+ | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \
+ & ((u32) bitfield_stub_name##_MASK)))
+
+/*
+ * set bitfield value in zero-initialized u32 u32Val
+ * => bitfield bits in u32Val are all zero
+ */
+#define AMD_INIT_SETBITS(u32Val, bitfield_val, bitfield_stub_name) \
+ ((u32Val) \
+ | (((bitfield_val) << ((u32) bitfield_stub_name##_OFS)) \
+ & ((u32) bitfield_stub_name##_MASK)))
+
+/* get bitfield value from u32 u32Val */
+#define AMD_GETBITS(u32Val, bitfield_stub_name) \
+ ((u32Val & ((u32) bitfield_stub_name##_MASK)) \
+ >> ((u32) bitfield_stub_name##_OFS))
+
+/* SET and GET bits in u32 values ------------------------------------------*/
+#define AMD_BIT(bit_stub_name) (1 << bit_stub_name)
+#define AMD_UNMASK_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
+#define AMD_CLEAR_BIT(bit_stub_name) (~AMD_BIT(bit_stub_name))
+
+/* debug macros ------------------------------------------------------------*/
+
+#define DBG(udc , args...) dev_dbg(&(udc)->pdev->dev, args)
+
+#ifdef UDC_VERBOSE
+#define VDBG DBG
+#else
+#define VDBG(udc , args...) do {} while (0)
+#endif
+
+#endif /* #ifdef AMD5536UDC_H */
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index ba163f35bf21..63d7d6568699 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -601,25 +601,6 @@ static void at91_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-static void *at91_ep_alloc_buffer(
- struct usb_ep *_ep,
- unsigned bytes,
- dma_addr_t *dma,
- gfp_t gfp_flags)
-{
- *dma = ~0;
- return kmalloc(bytes, gfp_flags);
-}
-
-static void at91_ep_free_buffer(
- struct usb_ep *ep,
- void *buf,
- dma_addr_t dma,
- unsigned bytes)
-{
- kfree(buf);
-}
-
static int at91_ep_queue(struct usb_ep *_ep,
struct usb_request *_req, gfp_t gfp_flags)
{
@@ -788,8 +769,6 @@ static const struct usb_ep_ops at91_ep_ops = {
.disable = at91_ep_disable,
.alloc_request = at91_ep_alloc_request,
.free_request = at91_ep_free_request,
- .alloc_buffer = at91_ep_alloc_buffer,
- .free_buffer = at91_ep_free_buffer,
.queue = at91_ep_queue,
.dequeue = at91_ep_dequeue,
.set_halt = at91_ep_set_halt,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index fcb5526cb085..f2fbdc7fe376 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -497,38 +497,6 @@ dummy_free_request (struct usb_ep *_ep, struct usb_request *_req)
kfree (req);
}
-static void *
-dummy_alloc_buffer (
- struct usb_ep *_ep,
- unsigned bytes,
- dma_addr_t *dma,
- gfp_t mem_flags
-) {
- char *retval;
- struct dummy_ep *ep;
- struct dummy *dum;
-
- ep = usb_ep_to_dummy_ep (_ep);
- dum = ep_to_dummy (ep);
-
- if (!dum->driver)
- return NULL;
- retval = kmalloc (bytes, mem_flags);
- *dma = (dma_addr_t) retval;
- return retval;
-}
-
-static void
-dummy_free_buffer (
- struct usb_ep *_ep,
- void *buf,
- dma_addr_t dma,
- unsigned bytes
-) {
- if (bytes)
- kfree (buf);
-}
-
static void
fifo_complete (struct usb_ep *ep, struct usb_request *req)
{
@@ -659,10 +627,6 @@ static const struct usb_ep_ops dummy_ep_ops = {
.alloc_request = dummy_alloc_request,
.free_request = dummy_free_request,
- .alloc_buffer = dummy_alloc_buffer,
- .free_buffer = dummy_free_buffer,
- /* map, unmap, ... eventually hook the "generic" dma calls */
-
.queue = dummy_queue,
.dequeue = dummy_dequeue,
@@ -1784,8 +1748,7 @@ static int dummy_bus_resume (struct usb_hcd *hcd)
spin_lock_irq (&dum->lock);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
- dev_warn (&hcd->self.root_hub->dev, "HC isn't running!\n");
- rc = -ENODEV;
+ rc = -ESHUTDOWN;
} else {
dum->rh_state = DUMMY_RH_RUNNING;
set_link_state (dum);
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 325bf7cfb83f..a3376739a81b 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -277,7 +277,7 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
#define DEV_CONFIG_CDC
#endif
-#ifdef CONFIG_USB_GADGET_HUSB2DEV
+#ifdef CONFIG_USB_GADGET_ATMEL_USBA
#define DEV_CONFIG_CDC
#endif
@@ -292,7 +292,7 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
#define DEV_CONFIG_SUBSET
#endif
-#ifdef CONFIG_USB_GADGET_SH
+#ifdef CONFIG_USB_GADGET_SUPERH
#define DEV_CONFIG_SUBSET
#endif
@@ -301,6 +301,14 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
#define DEV_CONFIG_SUBSET
#endif
+#ifdef CONFIG_USB_GADGET_M66592
+#define DEV_CONFIG_CDC
+#endif
+
+#ifdef CONFIG_USB_GADGET_AMD5536UDC
+#define DEV_CONFIG_CDC
+#endif
+
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 4639b629e60c..be7a1bd2823b 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3434,6 +3434,9 @@ static int fsg_main_thread(void *fsg_)
allow_signal(SIGKILL);
allow_signal(SIGUSR1);
+ /* Allow the thread to be frozen */
+ set_freezable();
+
/* Arrange for userspace references to be interpreted as kernel
* pointers. That way we can pass a kernel pointer to a routine
* that expects a __user pointer and it will work okay. */
@@ -3733,19 +3736,12 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
}
/* Free the data buffers */
- for (i = 0; i < NUM_BUFFERS; ++i) {
- struct fsg_buffhd *bh = &fsg->buffhds[i];
-
- if (bh->buf)
- usb_ep_free_buffer(fsg->bulk_in, bh->buf, bh->dma,
- mod_data.buflen);
- }
+ for (i = 0; i < NUM_BUFFERS; ++i)
+ kfree(fsg->buffhds[i].buf);
/* Free the request and buffer for endpoint 0 */
if (req) {
- if (req->buf)
- usb_ep_free_buffer(fsg->ep0, req->buf,
- req->dma, EP0_BUFSIZE);
+ kfree(req->buf);
usb_ep_free_request(fsg->ep0, req);
}
@@ -3963,8 +3959,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
#endif
if (gadget->is_otg) {
- otg_desc.bmAttributes |= USB_OTG_HNP,
- config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ otg_desc.bmAttributes |= USB_OTG_HNP;
}
rc = -ENOMEM;
@@ -3973,8 +3968,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
if (!req)
goto out;
- req->buf = usb_ep_alloc_buffer(fsg->ep0, EP0_BUFSIZE,
- &req->dma, GFP_KERNEL);
+ req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
if (!req->buf)
goto out;
req->complete = ep0_complete;
@@ -3986,8 +3980,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
/* Allocate for the bulk-in endpoint. We assume that
* the buffer will also work with the bulk-out (and
* interrupt-in) endpoint. */
- bh->buf = usb_ep_alloc_buffer(fsg->bulk_in, mod_data.buflen,
- &bh->dma, GFP_KERNEL);
+ bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL);
if (!bh->buf)
goto out;
bh->next = bh + 1;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c
index 3ca2b3159f00..10b2b33b8698 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_usb2_udc.c
@@ -228,7 +228,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
/* Config PHY interface */
portctrl = fsl_readl(&dr_regs->portsc1);
- portctrl &= ~(PORTSCX_PHY_TYPE_SEL & PORTSCX_PORT_WIDTH);
+ portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
switch (udc->phy_mode) {
case FSL_USB2_PHY_ULPI:
portctrl |= PORTSCX_PTS_ULPI;
@@ -601,39 +601,6 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/*------------------------------------------------------------------
- * Allocate an I/O buffer
-*---------------------------------------------------------------------*/
-static void *fsl_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
- dma_addr_t *dma, gfp_t gfp_flags)
-{
- struct fsl_ep *ep;
-
- if (!_ep)
- return NULL;
-
- ep = container_of(_ep, struct fsl_ep, ep);
-
- return dma_alloc_coherent(ep->udc->gadget.dev.parent,
- bytes, dma, gfp_flags);
-}
-
-/*------------------------------------------------------------------
- * frees an i/o buffer
-*---------------------------------------------------------------------*/
-static void fsl_free_buffer(struct usb_ep *_ep, void *buf,
- dma_addr_t dma, unsigned bytes)
-{
- struct fsl_ep *ep;
-
- if (!_ep)
- return;
-
- ep = container_of(_ep, struct fsl_ep, ep);
-
- dma_free_coherent(ep->udc->gadget.dev.parent, bytes, buf, dma);
-}
-
/*-------------------------------------------------------------------------*/
static int fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
@@ -1047,9 +1014,6 @@ static struct usb_ep_ops fsl_ep_ops = {
.alloc_request = fsl_alloc_request,
.free_request = fsl_free_request,
- .alloc_buffer = fsl_alloc_buffer,
- .free_buffer = fsl_free_buffer,
-
.queue = fsl_ep_queue,
.dequeue = fsl_ep_dequeue,
@@ -2189,27 +2153,19 @@ static void fsl_udc_release(struct device *dev)
* init resource for globle controller
* Return the udc handle on success or NULL on failure
------------------------------------------------------------------*/
-static struct fsl_udc *__init struct_udc_setup(struct platform_device *pdev)
+static int __init struct_udc_setup(struct fsl_udc *udc,
+ struct platform_device *pdev)
{
- struct fsl_udc *udc;
struct fsl_usb2_platform_data *pdata;
size_t size;
- udc = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
- if (udc == NULL) {
- ERR("malloc udc failed\n");
- return NULL;
- }
-
pdata = pdev->dev.platform_data;
udc->phy_mode = pdata->phy_mode;
- /* max_ep_nr is bidirectional ep number, max_ep doubles the number */
- udc->max_ep = pdata->max_ep_nr * 2;
udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
if (!udc->eps) {
ERR("malloc fsl_ep failed\n");
- goto cleanup;
+ return -1;
}
/* initialized QHs, take care of alignment */
@@ -2225,7 +2181,7 @@ static struct fsl_udc *__init struct_udc_setup(struct platform_device *pdev)
if (!udc->ep_qh) {
ERR("malloc QHs for udc failed\n");
kfree(udc->eps);
- goto cleanup;
+ return -1;
}
udc->ep_qh_size = size;
@@ -2244,11 +2200,7 @@ static struct fsl_udc *__init struct_udc_setup(struct platform_device *pdev)
udc->remote_wakeup = 0; /* default to 0 on reset */
spin_lock_init(&udc->lock);
- return udc;
-
-cleanup:
- kfree(udc);
- return NULL;
+ return 0;
}
/*----------------------------------------------------------------
@@ -2287,35 +2239,37 @@ static int __init struct_ep_setup(struct fsl_udc *udc, unsigned char index,
}
/* Driver probe function
- * all intialize operations implemented here except enabling usb_intr reg
+ * all intialization operations implemented here except enabling usb_intr reg
+ * board setup should have been done in the platform code
*/
static int __init fsl_udc_probe(struct platform_device *pdev)
{
struct resource *res;
int ret = -ENODEV;
unsigned int i;
+ u32 dccparams;
if (strcmp(pdev->name, driver_name)) {
VDBG("Wrong device\n");
return -ENODEV;
}
- /* board setup should have been done in the platform code */
-
- /* Initialize the udc structure including QH member and other member */
- udc_controller = struct_udc_setup(pdev);
- if (!udc_controller) {
- VDBG("udc_controller is NULL \n");
+ udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
+ if (udc_controller == NULL) {
+ ERR("malloc udc failed\n");
return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
+ if (!res) {
+ kfree(udc_controller);
return -ENXIO;
+ }
if (!request_mem_region(res->start, res->end - res->start + 1,
driver_name)) {
ERR("request mem region for %s failed \n", pdev->name);
+ kfree(udc_controller);
return -EBUSY;
}
@@ -2328,13 +2282,24 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
usb_sys_regs = (struct usb_sys_interface *)
((u32)dr_regs + USB_DR_SYS_OFFSET);
+ /* Read Device Controller Capability Parameters register */
+ dccparams = fsl_readl(&dr_regs->dccparams);
+ if (!(dccparams & DCCPARAMS_DC)) {
+ ERR("This SOC doesn't support device role\n");
+ ret = -ENODEV;
+ goto err2;
+ }
+ /* Get max device endpoints */
+ /* DEN is bidirectional ep number, max_ep doubles the number */
+ udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
+
udc_controller->irq = platform_get_irq(pdev, 0);
if (!udc_controller->irq) {
ret = -ENODEV;
goto err2;
}
- ret = request_irq(udc_controller->irq, fsl_udc_irq, SA_SHIRQ,
+ ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
driver_name, udc_controller);
if (ret != 0) {
ERR("cannot request irq %d err %d \n",
@@ -2342,6 +2307,13 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
goto err2;
}
+ /* Initialize the udc structure including QH member and other member */
+ if (struct_udc_setup(udc_controller, pdev)) {
+ ERR("Can't initialize udc data structure\n");
+ ret = -ENOMEM;
+ goto err3;
+ }
+
/* initialize usb hw reg except for regs for EP,
* leave usbintr reg untouched */
dr_controller_setup(udc_controller);
@@ -2403,6 +2375,7 @@ err2:
iounmap(dr_regs);
err1:
release_mem_region(res->start, res->end - res->start + 1);
+ kfree(udc_controller);
return ret;
}
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index c6291e046507..832ab82b4882 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -101,6 +101,10 @@ struct usb_sys_interface {
#define WAIT_FOR_OUT_STATUS 3
#define DATA_STATE_RECV 4
+/* Device Controller Capability Parameter register */
+#define DCCPARAMS_DC 0x00000080
+#define DCCPARAMS_DEN_MASK 0x0000001f
+
/* Frame Index Register Bit Masks */
#define USB_FRINDEX_MASKS 0x3fff
/* USB CMD Register Bit Masks */
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index d041b919e7b8..f7f159c1002b 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -8,6 +8,8 @@
* (And avoiding all runtime comparisons in typical one-choice configs!)
*
* NOTE: some of these controller drivers may not be available yet.
+ * Some are available on 2.4 kernels; several are available, but not
+ * yet pushed in the 2.6 mainline tree.
*/
#ifdef CONFIG_USB_GADGET_NET2280
#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
@@ -15,6 +17,12 @@
#define gadget_is_net2280(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_AMD5536UDC
+#define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name)
+#else
+#define gadget_is_amd5536udc(g) 0
+#endif
+
#ifdef CONFIG_USB_GADGET_DUMMY_HCD
#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
#else
@@ -33,12 +41,14 @@
#define gadget_is_goku(g) 0
#endif
+/* SH3 UDC -- not yet ported 2.4 --> 2.6 */
#ifdef CONFIG_USB_GADGET_SUPERH
#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
#else
#define gadget_is_sh(g) 0
#endif
+/* not yet stable on 2.6 (would help "original Zaurus") */
#ifdef CONFIG_USB_GADGET_SA1100
#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
#else
@@ -51,6 +61,7 @@
#define gadget_is_lh7a40x(g) 0
#endif
+/* handhelds.org tree (?) */
#ifdef CONFIG_USB_GADGET_MQ11XX
#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
#else
@@ -63,22 +74,24 @@
#define gadget_is_omap(g) 0
#endif
+/* not yet ported 2.4 --> 2.6 */
#ifdef CONFIG_USB_GADGET_N9604
#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
#else
#define gadget_is_n9604(g) 0
#endif
+/* various unstable versions available */
#ifdef CONFIG_USB_GADGET_PXA27X
#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
#else
#define gadget_is_pxa27x(g) 0
#endif
-#ifdef CONFIG_USB_GADGET_HUSB2DEV
-#define gadget_is_husb2dev(g) !strcmp("husb2_udc", (g)->name)
+#ifdef CONFIG_USB_GADGET_ATMEL_USBA
+#define gadget_is_atmel_usba(g) !strcmp("atmel_usba_udc", (g)->name)
#else
-#define gadget_is_husb2dev(g) 0
+#define gadget_is_atmel_usba(g) 0
#endif
#ifdef CONFIG_USB_GADGET_S3C2410
@@ -93,6 +106,7 @@
#define gadget_is_at91(g) 0
#endif
+/* status unclear */
#ifdef CONFIG_USB_GADGET_IMX
#define gadget_is_imx(g) !strcmp("imx_udc", (g)->name)
#else
@@ -106,6 +120,7 @@
#endif
/* Mentor high speed function controller */
+/* from Montavista kernel (?) */
#ifdef CONFIG_USB_GADGET_MUSBHSFC
#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
#else
@@ -119,12 +134,20 @@
#define gadget_is_musbhdrc(g) 0
#endif
+/* from Montavista kernel (?) */
#ifdef CONFIG_USB_GADGET_MPC8272
#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
#else
#define gadget_is_mpc8272(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_M66592
+#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
+#else
+#define gadget_is_m66592(g) 0
+#endif
+
+
// CONFIG_USB_GADGET_SX2
// CONFIG_USB_GADGET_AU1X00
// ...
@@ -181,9 +204,13 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x16;
else if (gadget_is_mpc8272(gadget))
return 0x17;
- else if (gadget_is_husb2dev(gadget))
+ else if (gadget_is_atmel_usba(gadget))
return 0x18;
else if (gadget_is_fsl_usb2(gadget))
return 0x19;
+ else if (gadget_is_amd5536udc(gadget))
+ return 0x20;
+ else if (gadget_is_m66592(gadget))
+ return 0x21;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index d08a8d0e6427..1c5aa49d7432 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -1248,17 +1248,11 @@ autoconf_fail:
tasklet_init(&dev->tasklet, gmidi_in_tasklet, (unsigned long)dev);
/* preallocate control response and buffer */
- dev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
+ dev->req = alloc_ep_req(gadget->ep0, USB_BUFSIZ);
if (!dev->req) {
err = -ENOMEM;
goto fail;
}
- dev->req->buf = usb_ep_alloc_buffer(gadget->ep0, USB_BUFSIZ,
- &dev->req->dma, GFP_KERNEL);
- if (!dev->req->buf) {
- err = -ENOMEM;
- goto fail;
- }
dev->req->complete = gmidi_setup_complete;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index ae931af05cef..349b8166f34a 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -20,7 +20,6 @@
* - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
*/
-#undef DEBUG
// #define VERBOSE /* extra debug messages (success too) */
// #define USB_TRACE /* packet-level success messages */
@@ -296,51 +295,6 @@ goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
/*-------------------------------------------------------------------------*/
-/* allocating buffers this way eliminates dma mapping overhead, which
- * on some platforms will mean eliminating a per-io buffer copy. with
- * some kinds of system caches, further tweaks may still be needed.
- */
-static void *
-goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
- dma_addr_t *dma, gfp_t gfp_flags)
-{
- void *retval;
- struct goku_ep *ep;
-
- ep = container_of(_ep, struct goku_ep, ep);
- if (!_ep)
- return NULL;
- *dma = DMA_ADDR_INVALID;
-
- if (ep->dma) {
- /* the main problem with this call is that it wastes memory
- * on typical 1/N page allocations: it allocates 1-N pages.
- */
-#warning Using dma_alloc_coherent even with buffers smaller than a page.
- retval = dma_alloc_coherent(&ep->dev->pdev->dev,
- bytes, dma, gfp_flags);
- } else
- retval = kmalloc(bytes, gfp_flags);
- return retval;
-}
-
-static void
-goku_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma, unsigned bytes)
-{
- /* free memory into the right allocator */
- if (dma != DMA_ADDR_INVALID) {
- struct goku_ep *ep;
-
- ep = container_of(_ep, struct goku_ep, ep);
- if (!_ep)
- return;
- dma_free_coherent(&ep->dev->pdev->dev, bytes, buf, dma);
- } else
- kfree (buf);
-}
-
-/*-------------------------------------------------------------------------*/
-
static void
done(struct goku_ep *ep, struct goku_request *req, int status)
{
@@ -485,7 +439,7 @@ top:
/* use ep1/ep2 double-buffering for OUT */
if (!(size & PACKET_ACTIVE))
size = readl(&regs->EPxSizeLB[ep->num]);
- if (!(size & PACKET_ACTIVE)) // "can't happen"
+ if (!(size & PACKET_ACTIVE)) /* "can't happen" */
break;
size &= DATASIZE; /* EPxSizeH == 0 */
@@ -1026,9 +980,6 @@ static struct usb_ep_ops goku_ep_ops = {
.alloc_request = goku_alloc_request,
.free_request = goku_free_request,
- .alloc_buffer = goku_alloc_buffer,
- .free_buffer = goku_free_buffer,
-
.queue = goku_queue,
.dequeue = goku_dequeue,
@@ -1140,17 +1091,17 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
- ({char *tmp;
+ ({char *state;
switch(dev->ep0state){
- case EP0_DISCONNECT: tmp = "ep0_disconnect"; break;
- case EP0_IDLE: tmp = "ep0_idle"; break;
- case EP0_IN: tmp = "ep0_in"; break;
- case EP0_OUT: tmp = "ep0_out"; break;
- case EP0_STATUS: tmp = "ep0_status"; break;
- case EP0_STALL: tmp = "ep0_stall"; break;
- case EP0_SUSPEND: tmp = "ep0_suspend"; break;
- default: tmp = "ep0_?"; break;
- } tmp; })
+ case EP0_DISCONNECT: state = "ep0_disconnect"; break;
+ case EP0_IDLE: state = "ep0_idle"; break;
+ case EP0_IN: state = "ep0_in"; break;
+ case EP0_OUT: state = "ep0_out"; break;
+ case EP0_STATUS: state = "ep0_status"; break;
+ case EP0_STALL: state = "ep0_stall"; break;
+ case EP0_SUSPEND: state = "ep0_suspend"; break;
+ default: state = "ep0_?"; break;
+ } state; })
);
size -= t;
next += t;
@@ -1195,7 +1146,6 @@ udc_proc_read(char *buffer, char **start, off_t off, int count,
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep [i];
struct goku_request *req;
- int t;
if (i && !ep->desc)
continue;
@@ -1283,7 +1233,7 @@ done:
static void udc_reinit (struct goku_udc *dev)
{
static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
-
+
unsigned i;
INIT_LIST_HEAD (&dev->gadget.ep_list);
@@ -1827,14 +1777,13 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* alloc, and start init */
- dev = kmalloc (sizeof *dev, GFP_KERNEL);
+ dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
pr_debug("enomem %s\n", pci_name(pdev));
retval = -ENOMEM;
goto done;
}
- memset(dev, 0, sizeof *dev);
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
@@ -1896,9 +1845,9 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* done */
the_controller = dev;
- device_register(&dev->gadget.dev);
-
- return 0;
+ retval = device_register(&dev->gadget.dev);
+ if (retval == 0)
+ return 0;
done:
if (dev)
@@ -1910,8 +1859,8 @@ done:
/*-------------------------------------------------------------------------*/
static struct pci_device_id pci_ids [] = { {
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
- .class_mask = ~0,
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
.subvendor = PCI_ANY_ID,
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index ea8c8e58cabf..bc4eb1e0b507 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -41,8 +41,10 @@ struct goku_udc_regs {
#define INT_SYSERROR 0x40000
#define INT_PWRDETECT 0x80000
-#define INT_DEVWIDE (INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
-#define INT_EP0 (INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
+#define INT_DEVWIDE \
+ (INT_PWRDETECT|INT_SYSERROR/*|INT_ERR*/|INT_USBRESET|INT_SUSPEND)
+#define INT_EP0 \
+ (INT_SETUP|INT_ENDPOINT0/*|INT_STATUS*/|INT_STATUSNAK)
u32 dma_master;
#define MST_EOPB_DIS 0x0800
@@ -231,7 +233,7 @@ struct goku_request {
enum ep0state {
EP0_DISCONNECT, /* no host */
EP0_IDLE, /* between STATUS ack and SETUP report */
- EP0_IN, EP0_OUT, /* data stage */
+ EP0_IN, EP0_OUT, /* data stage */
EP0_STATUS, /* status stage */
EP0_STALL, /* data or status stages */
EP0_SUSPEND, /* usb suspend */
@@ -242,7 +244,7 @@ struct goku_udc {
struct usb_gadget gadget;
spinlock_t lock;
struct goku_ep ep[4];
- struct usb_gadget_driver *driver;
+ struct usb_gadget_driver *driver;
enum ep0state ep0state;
unsigned got_irq:1,
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 46d0e5252744..e60745ffaf8e 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -37,7 +37,7 @@
#include <linux/device.h>
#include <linux/moduleparam.h>
-#include <linux/usb_gadgetfs.h>
+#include <linux/usb/gadgetfs.h>
#include <linux/usb_gadget.h>
@@ -923,7 +923,7 @@ static void clean_req (struct usb_ep *ep, struct usb_request *req)
struct dev_data *dev = ep->driver_data;
if (req->buf != dev->rbuf) {
- usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
+ kfree(req->buf);
req->buf = dev->rbuf;
req->dma = DMA_ADDR_INVALID;
}
@@ -963,7 +963,7 @@ static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
return -EBUSY;
}
if (len > sizeof (dev->rbuf))
- req->buf = usb_ep_alloc_buffer (ep, len, &req->dma, GFP_ATOMIC);
+ req->buf = kmalloc(len, GFP_ATOMIC);
if (req->buf == 0) {
req->buf = dev->rbuf;
return -ENOMEM;
@@ -1505,7 +1505,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
}
break;
-#ifndef CONFIG_USB_GADGETFS_PXA2XX
+#ifndef CONFIG_USB_GADGET_PXA2XX
/* PXA automagically handles this request too */
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != 0x80)
diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c
index a0a73c08a344..e78c2ddc1f88 100644
--- a/drivers/usb/gadget/lh7a40x_udc.c
+++ b/drivers/usb/gadget/lh7a40x_udc.c
@@ -75,10 +75,6 @@ static int lh7a40x_ep_enable(struct usb_ep *ep,
static int lh7a40x_ep_disable(struct usb_ep *ep);
static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t);
static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *);
-static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *,
- gfp_t);
-static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t,
- unsigned);
static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t);
static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *);
static int lh7a40x_set_halt(struct usb_ep *ep, int);
@@ -104,9 +100,6 @@ static struct usb_ep_ops lh7a40x_ep_ops = {
.alloc_request = lh7a40x_alloc_request,
.free_request = lh7a40x_free_request,
- .alloc_buffer = lh7a40x_alloc_buffer,
- .free_buffer = lh7a40x_free_buffer,
-
.queue = lh7a40x_queue,
.dequeue = lh7a40x_dequeue,
@@ -1134,26 +1127,6 @@ static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req)
kfree(req);
}
-static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes,
- dma_addr_t * dma, gfp_t gfp_flags)
-{
- char *retval;
-
- DEBUG("%s (%p, %d, %d)\n", __FUNCTION__, ep, bytes, gfp_flags);
-
- retval = kmalloc(bytes, gfp_flags & ~(__GFP_DMA | __GFP_HIGHMEM));
- if (retval)
- *dma = virt_to_bus(retval);
- return retval;
-}
-
-static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma,
- unsigned bytes)
-{
- DEBUG("%s, %p\n", __FUNCTION__, ep);
- kfree(buf);
-}
-
/** Queue one request
* Kickstart transfer if needed
* NOTE: Sets INDEX register
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
new file mode 100644
index 000000000000..700dda8a9157
--- /dev/null
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -0,0 +1,1633 @@
+/*
+ * M66592 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb_gadget.h>
+
+#include "m66592-udc.h"
+
+
+MODULE_DESCRIPTION("M66592 USB gadget driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+
+#define DRIVER_VERSION "29 May 2007"
+
+/* module parameters */
+static unsigned short clock = M66592_XTAL24;
+module_param(clock, ushort, 0644);
+MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
+ "(default=16384)");
+
+static unsigned short vif = M66592_LDRV;
+module_param(vif, ushort, 0644);
+MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0 (default=32768)");
+
+static unsigned short endian;
+module_param(endian, ushort, 0644);
+MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
+
+static unsigned short irq_sense = M66592_INTL;
+module_param(irq_sense, ushort, 0644);
+MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0 "
+ "(default=2)");
+
+static const char udc_name[] = "m66592_udc";
+static const char *m66592_ep_name[] = {
+ "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7"
+};
+
+static void disable_controller(struct m66592 *m66592);
+static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
+static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
+static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags);
+
+static void transfer_complete(struct m66592_ep *ep,
+ struct m66592_request *req, int status);
+
+/*-------------------------------------------------------------------------*/
+static inline u16 get_usb_speed(struct m66592 *m66592)
+{
+ return (m66592_read(m66592, M66592_DVSTCTR) & M66592_RHST);
+}
+
+static void enable_pipe_irq(struct m66592 *m66592, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = m66592_read(m66592, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
+ M66592_INTENB0);
+ m66592_bset(m66592, (1 << pipenum), reg);
+ m66592_write(m66592, tmp, M66592_INTENB0);
+}
+
+static void disable_pipe_irq(struct m66592 *m66592, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = m66592_read(m66592, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_BEMPE | M66592_NRDYE | M66592_BRDYE,
+ M66592_INTENB0);
+ m66592_bclr(m66592, (1 << pipenum), reg);
+ m66592_write(m66592, tmp, M66592_INTENB0);
+}
+
+static void m66592_usb_connect(struct m66592 *m66592)
+{
+ m66592_bset(m66592, M66592_CTRE, M66592_INTENB0);
+ m66592_bset(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
+ M66592_INTENB0);
+ m66592_bset(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
+
+ m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
+}
+
+static void m66592_usb_disconnect(struct m66592 *m66592)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ m66592_bclr(m66592, M66592_CTRE, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_WDST | M66592_RDST | M66592_CMPL,
+ M66592_INTENB0);
+ m66592_bclr(m66592, M66592_BEMPE | M66592_BRDYE, M66592_INTENB0);
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+
+ m66592->gadget.speed = USB_SPEED_UNKNOWN;
+ spin_unlock(&m66592->lock);
+ m66592->driver->disconnect(&m66592->gadget);
+ spin_lock(&m66592->lock);
+
+ disable_controller(m66592);
+ INIT_LIST_HEAD(&m66592->ep[0].queue);
+}
+
+static inline u16 control_reg_get_pid(struct m66592 *m66592, u16 pipenum)
+{
+ u16 pid = 0;
+ unsigned long offset;
+
+ if (pipenum == 0)
+ pid = m66592_read(m66592, M66592_DCPCTR) & M66592_PID;
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ pid = m66592_read(m66592, offset) & M66592_PID;
+ } else
+ printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
+
+ return pid;
+}
+
+static inline void control_reg_set_pid(struct m66592 *m66592, u16 pipenum,
+ u16 pid)
+{
+ unsigned long offset;
+
+ if (pipenum == 0)
+ m66592_mdfy(m66592, pid, M66592_PID, M66592_DCPCTR);
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ m66592_mdfy(m66592, pid, M66592_PID, offset);
+ } else
+ printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
+}
+
+static inline void pipe_start(struct m66592 *m66592, u16 pipenum)
+{
+ control_reg_set_pid(m66592, pipenum, M66592_PID_BUF);
+}
+
+static inline void pipe_stop(struct m66592 *m66592, u16 pipenum)
+{
+ control_reg_set_pid(m66592, pipenum, M66592_PID_NAK);
+}
+
+static inline void pipe_stall(struct m66592 *m66592, u16 pipenum)
+{
+ control_reg_set_pid(m66592, pipenum, M66592_PID_STALL);
+}
+
+static inline u16 control_reg_get(struct m66592 *m66592, u16 pipenum)
+{
+ u16 ret = 0;
+ unsigned long offset;
+
+ if (pipenum == 0)
+ ret = m66592_read(m66592, M66592_DCPCTR);
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ ret = m66592_read(m66592, offset);
+ } else
+ printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
+
+ return ret;
+}
+
+static inline void control_reg_sqclr(struct m66592 *m66592, u16 pipenum)
+{
+ unsigned long offset;
+
+ pipe_stop(m66592, pipenum);
+
+ if (pipenum == 0)
+ m66592_bset(m66592, M66592_SQCLR, M66592_DCPCTR);
+ else if (pipenum < M66592_MAX_NUM_PIPE) {
+ offset = get_pipectr_addr(pipenum);
+ m66592_bset(m66592, M66592_SQCLR, offset);
+ } else
+ printk(KERN_ERR "unexpect pipe num(%d)\n", pipenum);
+}
+
+static inline int get_buffer_size(struct m66592 *m66592, u16 pipenum)
+{
+ u16 tmp;
+ int size;
+
+ if (pipenum == 0) {
+ tmp = m66592_read(m66592, M66592_DCPCFG);
+ if ((tmp & M66592_CNTMD) != 0)
+ size = 256;
+ else {
+ tmp = m66592_read(m66592, M66592_DCPMAXP);
+ size = tmp & M66592_MAXP;
+ }
+ } else {
+ m66592_write(m66592, pipenum, M66592_PIPESEL);
+ tmp = m66592_read(m66592, M66592_PIPECFG);
+ if ((tmp & M66592_CNTMD) != 0) {
+ tmp = m66592_read(m66592, M66592_PIPEBUF);
+ size = ((tmp >> 10) + 1) * 64;
+ } else {
+ tmp = m66592_read(m66592, M66592_PIPEMAXP);
+ size = tmp & M66592_MXPS;
+ }
+ }
+
+ return size;
+}
+
+static inline void pipe_change(struct m66592 *m66592, u16 pipenum)
+{
+ struct m66592_ep *ep = m66592->pipenum2ep[pipenum];
+
+ if (ep->use_dma)
+ return;
+
+ m66592_mdfy(m66592, pipenum, M66592_CURPIPE, ep->fifosel);
+
+ ndelay(450);
+
+ m66592_bset(m66592, M66592_MBW, ep->fifosel);
+}
+
+static int pipe_buffer_setting(struct m66592 *m66592,
+ struct m66592_pipe_info *info)
+{
+ u16 bufnum = 0, buf_bsize = 0;
+ u16 pipecfg = 0;
+
+ if (info->pipe == 0)
+ return -EINVAL;
+
+ m66592_write(m66592, info->pipe, M66592_PIPESEL);
+
+ if (info->dir_in)
+ pipecfg |= M66592_DIR;
+ pipecfg |= info->type;
+ pipecfg |= info->epnum;
+ switch (info->type) {
+ case M66592_INT:
+ bufnum = 4 + (info->pipe - M66592_BASE_PIPENUM_INT);
+ buf_bsize = 0;
+ break;
+ case M66592_BULK:
+ bufnum = m66592->bi_bufnum +
+ (info->pipe - M66592_BASE_PIPENUM_BULK) * 16;
+ m66592->bi_bufnum += 16;
+ buf_bsize = 7;
+ pipecfg |= M66592_DBLB;
+ if (!info->dir_in)
+ pipecfg |= M66592_SHTNAK;
+ break;
+ case M66592_ISO:
+ bufnum = m66592->bi_bufnum +
+ (info->pipe - M66592_BASE_PIPENUM_ISOC) * 16;
+ m66592->bi_bufnum += 16;
+ buf_bsize = 7;
+ break;
+ }
+ if (m66592->bi_bufnum > M66592_MAX_BUFNUM) {
+ printk(KERN_ERR "m66592 pipe memory is insufficient(%d)\n",
+ m66592->bi_bufnum);
+ return -ENOMEM;
+ }
+
+ m66592_write(m66592, pipecfg, M66592_PIPECFG);
+ m66592_write(m66592, (buf_bsize << 10) | (bufnum), M66592_PIPEBUF);
+ m66592_write(m66592, info->maxpacket, M66592_PIPEMAXP);
+ if (info->interval)
+ info->interval--;
+ m66592_write(m66592, info->interval, M66592_PIPEPERI);
+
+ return 0;
+}
+
+static void pipe_buffer_release(struct m66592 *m66592,
+ struct m66592_pipe_info *info)
+{
+ if (info->pipe == 0)
+ return;
+
+ switch (info->type) {
+ case M66592_BULK:
+ if (is_bulk_pipe(info->pipe))
+ m66592->bi_bufnum -= 16;
+ break;
+ case M66592_ISO:
+ if (is_isoc_pipe(info->pipe))
+ m66592->bi_bufnum -= 16;
+ break;
+ }
+
+ if (is_bulk_pipe(info->pipe)) {
+ m66592->bulk--;
+ } else if (is_interrupt_pipe(info->pipe))
+ m66592->interrupt--;
+ else if (is_isoc_pipe(info->pipe)) {
+ m66592->isochronous--;
+ if (info->type == M66592_BULK)
+ m66592->bulk--;
+ } else
+ printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
+ info->pipe);
+}
+
+static void pipe_initialize(struct m66592_ep *ep)
+{
+ struct m66592 *m66592 = ep->m66592;
+
+ m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel);
+
+ m66592_write(m66592, M66592_ACLRM, ep->pipectr);
+ m66592_write(m66592, 0, ep->pipectr);
+ m66592_write(m66592, M66592_SQCLR, ep->pipectr);
+ if (ep->use_dma) {
+ m66592_mdfy(m66592, ep->pipenum, M66592_CURPIPE, ep->fifosel);
+
+ ndelay(450);
+
+ m66592_bset(m66592, M66592_MBW, ep->fifosel);
+ }
+}
+
+static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep,
+ const struct usb_endpoint_descriptor *desc,
+ u16 pipenum, int dma)
+{
+ if ((pipenum != 0) && dma) {
+ if (m66592->num_dma == 0) {
+ m66592->num_dma++;
+ ep->use_dma = 1;
+ ep->fifoaddr = M66592_D0FIFO;
+ ep->fifosel = M66592_D0FIFOSEL;
+ ep->fifoctr = M66592_D0FIFOCTR;
+ ep->fifotrn = M66592_D0FIFOTRN;
+ } else if (m66592->num_dma == 1) {
+ m66592->num_dma++;
+ ep->use_dma = 1;
+ ep->fifoaddr = M66592_D1FIFO;
+ ep->fifosel = M66592_D1FIFOSEL;
+ ep->fifoctr = M66592_D1FIFOCTR;
+ ep->fifotrn = M66592_D1FIFOTRN;
+ } else {
+ ep->use_dma = 0;
+ ep->fifoaddr = M66592_CFIFO;
+ ep->fifosel = M66592_CFIFOSEL;
+ ep->fifoctr = M66592_CFIFOCTR;
+ ep->fifotrn = 0;
+ }
+ } else {
+ ep->use_dma = 0;
+ ep->fifoaddr = M66592_CFIFO;
+ ep->fifosel = M66592_CFIFOSEL;
+ ep->fifoctr = M66592_CFIFOCTR;
+ ep->fifotrn = 0;
+ }
+
+ ep->pipectr = get_pipectr_addr(pipenum);
+ ep->pipenum = pipenum;
+ ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ m66592->pipenum2ep[pipenum] = ep;
+ m66592->epaddr2ep[desc->bEndpointAddress&USB_ENDPOINT_NUMBER_MASK] = ep;
+ INIT_LIST_HEAD(&ep->queue);
+}
+
+static void m66592_ep_release(struct m66592_ep *ep)
+{
+ struct m66592 *m66592 = ep->m66592;
+ u16 pipenum = ep->pipenum;
+
+ if (pipenum == 0)
+ return;
+
+ if (ep->use_dma)
+ m66592->num_dma--;
+ ep->pipenum = 0;
+ ep->busy = 0;
+ ep->use_dma = 0;
+}
+
+static int alloc_pipe_config(struct m66592_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct m66592 *m66592 = ep->m66592;
+ struct m66592_pipe_info info;
+ int dma = 0;
+ int *counter;
+ int ret;
+
+ ep->desc = desc;
+
+ BUG_ON(ep->pipenum);
+
+ switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (m66592->bulk >= M66592_MAX_NUM_BULK) {
+ if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
+ printk(KERN_ERR "bulk pipe is insufficient\n");
+ return -ENODEV;
+ } else {
+ info.pipe = M66592_BASE_PIPENUM_ISOC
+ + m66592->isochronous;
+ counter = &m66592->isochronous;
+ }
+ } else {
+ info.pipe = M66592_BASE_PIPENUM_BULK + m66592->bulk;
+ counter = &m66592->bulk;
+ }
+ info.type = M66592_BULK;
+ dma = 1;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (m66592->interrupt >= M66592_MAX_NUM_INT) {
+ printk(KERN_ERR "interrupt pipe is insufficient\n");
+ return -ENODEV;
+ }
+ info.pipe = M66592_BASE_PIPENUM_INT + m66592->interrupt;
+ info.type = M66592_INT;
+ counter = &m66592->interrupt;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (m66592->isochronous >= M66592_MAX_NUM_ISOC) {
+ printk(KERN_ERR "isochronous pipe is insufficient\n");
+ return -ENODEV;
+ }
+ info.pipe = M66592_BASE_PIPENUM_ISOC + m66592->isochronous;
+ info.type = M66592_ISO;
+ counter = &m66592->isochronous;
+ break;
+ default:
+ printk(KERN_ERR "unexpect xfer type\n");
+ return -EINVAL;
+ }
+ ep->type = info.type;
+
+ info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+ info.interval = desc->bInterval;
+ if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ info.dir_in = 1;
+ else
+ info.dir_in = 0;
+
+ ret = pipe_buffer_setting(m66592, &info);
+ if (ret < 0) {
+ printk(KERN_ERR "pipe_buffer_setting fail\n");
+ return ret;
+ }
+
+ (*counter)++;
+ if ((counter == &m66592->isochronous) && info.type == M66592_BULK)
+ m66592->bulk++;
+
+ m66592_ep_setting(m66592, ep, desc, info.pipe, dma);
+ pipe_initialize(ep);
+
+ return 0;
+}
+
+static int free_pipe_config(struct m66592_ep *ep)
+{
+ struct m66592 *m66592 = ep->m66592;
+ struct m66592_pipe_info info;
+
+ info.pipe = ep->pipenum;
+ info.type = ep->type;
+ pipe_buffer_release(m66592, &info);
+ m66592_ep_release(ep);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+static void pipe_irq_enable(struct m66592 *m66592, u16 pipenum)
+{
+ enable_irq_ready(m66592, pipenum);
+ enable_irq_nrdy(m66592, pipenum);
+}
+
+static void pipe_irq_disable(struct m66592 *m66592, u16 pipenum)
+{
+ disable_irq_ready(m66592, pipenum);
+ disable_irq_nrdy(m66592, pipenum);
+}
+
+/* if complete is true, gadget driver complete function is not call */
+static void control_end(struct m66592 *m66592, unsigned ccpl)
+{
+ m66592->ep[0].internal_ccpl = ccpl;
+ pipe_start(m66592, 0);
+ m66592_bset(m66592, M66592_CCPL, M66592_DCPCTR);
+}
+
+static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ struct m66592 *m66592 = ep->m66592;
+
+ pipe_change(m66592, ep->pipenum);
+ m66592_mdfy(m66592, M66592_ISEL | M66592_PIPE0,
+ (M66592_ISEL | M66592_CURPIPE),
+ M66592_CFIFOSEL);
+ m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+ if (req->req.length == 0) {
+ m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+ pipe_start(m66592, 0);
+ transfer_complete(ep, req, 0);
+ } else {
+ m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
+ irq_ep0_write(ep, req);
+ }
+}
+
+static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ struct m66592 *m66592 = ep->m66592;
+ u16 tmp;
+
+ pipe_change(m66592, ep->pipenum);
+ disable_irq_empty(m66592, ep->pipenum);
+ pipe_start(m66592, ep->pipenum);
+
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (unlikely((tmp & M66592_FRDY) == 0))
+ pipe_irq_enable(m66592, ep->pipenum);
+ else
+ irq_packet_write(ep, req);
+}
+
+static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req)
+{
+ struct m66592 *m66592 = ep->m66592;
+ u16 pipenum = ep->pipenum;
+
+ if (ep->pipenum == 0) {
+ m66592_mdfy(m66592, M66592_PIPE0,
+ (M66592_ISEL | M66592_CURPIPE),
+ M66592_CFIFOSEL);
+ m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+ pipe_start(m66592, pipenum);
+ pipe_irq_enable(m66592, pipenum);
+ } else {
+ if (ep->use_dma) {
+ m66592_bset(m66592, M66592_TRCLR, ep->fifosel);
+ pipe_change(m66592, pipenum);
+ m66592_bset(m66592, M66592_TRENB, ep->fifosel);
+ m66592_write(m66592,
+ (req->req.length + ep->ep.maxpacket - 1)
+ / ep->ep.maxpacket,
+ ep->fifotrn);
+ }
+ pipe_start(m66592, pipenum); /* trigger once */
+ pipe_irq_enable(m66592, pipenum);
+ }
+}
+
+static void start_packet(struct m66592_ep *ep, struct m66592_request *req)
+{
+ if (ep->desc->bEndpointAddress & USB_DIR_IN)
+ start_packet_write(ep, req);
+ else
+ start_packet_read(ep, req);
+}
+
+static void start_ep0(struct m66592_ep *ep, struct m66592_request *req)
+{
+ u16 ctsq;
+
+ ctsq = m66592_read(ep->m66592, M66592_INTSTS0) & M66592_CTSQ;
+
+ switch (ctsq) {
+ case M66592_CS_RDDS:
+ start_ep0_write(ep, req);
+ break;
+ case M66592_CS_WRDS:
+ start_packet_read(ep, req);
+ break;
+
+ case M66592_CS_WRND:
+ control_end(ep->m66592, 0);
+ break;
+ default:
+ printk(KERN_ERR "start_ep0: unexpect ctsq(%x)\n", ctsq);
+ break;
+ }
+}
+
+static void init_controller(struct m66592 *m66592)
+{
+ m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND),
+ M66592_PINCFG);
+ m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */
+ m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG);
+
+ m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG);
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+ m66592_bset(m66592, M66592_USBE, M66592_SYSCFG);
+
+ m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
+
+ msleep(3);
+
+ m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
+
+ msleep(1);
+
+ m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
+
+ m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1);
+ m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR,
+ M66592_DMA0CFG);
+}
+
+static void disable_controller(struct m66592 *m66592)
+{
+ m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG);
+ udelay(1);
+ m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG);
+ udelay(1);
+ m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG);
+ udelay(1);
+ m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG);
+}
+
+static void m66592_start_xclock(struct m66592 *m66592)
+{
+ u16 tmp;
+
+ tmp = m66592_read(m66592, M66592_SYSCFG);
+ if (!(tmp & M66592_XCKE))
+ m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG);
+}
+
+/*-------------------------------------------------------------------------*/
+static void transfer_complete(struct m66592_ep *ep,
+ struct m66592_request *req, int status)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ int restart = 0;
+
+ if (unlikely(ep->pipenum == 0)) {
+ if (ep->internal_ccpl) {
+ ep->internal_ccpl = 0;
+ return;
+ }
+ }
+
+ list_del_init(&req->queue);
+ if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
+ req->req.status = -ESHUTDOWN;
+ else
+ req->req.status = status;
+
+ if (!list_empty(&ep->queue))
+ restart = 1;
+
+ spin_unlock(&ep->m66592->lock);
+ req->req.complete(&ep->ep, &req->req);
+ spin_lock(&ep->m66592->lock);
+
+ if (restart) {
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ if (ep->desc)
+ start_packet(ep, req);
+ }
+}
+
+static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ int i;
+ u16 tmp;
+ unsigned bufsize;
+ size_t size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct m66592 *m66592 = ep->m66592;
+
+ pipe_change(m66592, pipenum);
+ m66592_bset(m66592, M66592_ISEL, ep->fifosel);
+
+ i = 0;
+ do {
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (i++ > 100000) {
+ printk(KERN_ERR "pipe0 is busy. maybe cpu i/o bus"
+ "conflict. please power off this controller.");
+ return;
+ }
+ ndelay(1);
+ } while ((tmp & M66592_FRDY) == 0);
+
+ /* prepare parameters */
+ bufsize = get_buffer_size(m66592, pipenum);
+ buf = req->req.buf + req->req.actual;
+ size = min(bufsize, req->req.length - req->req.actual);
+
+ /* write fifo */
+ if (req->req.buf) {
+ if (size > 0)
+ m66592_write_fifo(m66592, ep->fifoaddr, buf, size);
+ if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
+ m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+ }
+
+ /* update parameters */
+ req->req.actual += size;
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ disable_irq_ready(m66592, pipenum);
+ disable_irq_empty(m66592, pipenum);
+ } else {
+ disable_irq_ready(m66592, pipenum);
+ enable_irq_empty(m66592, pipenum);
+ }
+ pipe_start(m66592, pipenum);
+}
+
+static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
+{
+ u16 tmp;
+ unsigned bufsize;
+ size_t size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct m66592 *m66592 = ep->m66592;
+
+ pipe_change(m66592, pipenum);
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (unlikely((tmp & M66592_FRDY) == 0)) {
+ pipe_stop(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ printk(KERN_ERR "write fifo not ready. pipnum=%d\n", pipenum);
+ return;
+ }
+
+ /* prepare parameters */
+ bufsize = get_buffer_size(m66592, pipenum);
+ buf = req->req.buf + req->req.actual;
+ size = min(bufsize, req->req.length - req->req.actual);
+
+ /* write fifo */
+ if (req->req.buf) {
+ m66592_write_fifo(m66592, ep->fifoaddr, buf, size);
+ if ((size == 0)
+ || ((size % ep->ep.maxpacket) != 0)
+ || ((bufsize != ep->ep.maxpacket)
+ && (bufsize > size)))
+ m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
+ }
+
+ /* update parameters */
+ req->req.actual += size;
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ disable_irq_ready(m66592, pipenum);
+ enable_irq_empty(m66592, pipenum);
+ } else {
+ disable_irq_empty(m66592, pipenum);
+ pipe_irq_enable(m66592, pipenum);
+ }
+}
+
+static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req)
+{
+ u16 tmp;
+ int rcv_len, bufsize, req_len;
+ int size;
+ void *buf;
+ u16 pipenum = ep->pipenum;
+ struct m66592 *m66592 = ep->m66592;
+ int finish = 0;
+
+ pipe_change(m66592, pipenum);
+ tmp = m66592_read(m66592, ep->fifoctr);
+ if (unlikely((tmp & M66592_FRDY) == 0)) {
+ req->req.status = -EPIPE;
+ pipe_stop(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ printk(KERN_ERR "read fifo not ready");
+ return;
+ }
+
+ /* prepare parameters */
+ rcv_len = tmp & M66592_DTLN;
+ bufsize = get_buffer_size(m66592, pipenum);
+
+ buf = req->req.buf + req->req.actual;
+ req_len = req->req.length - req->req.actual;
+ if (rcv_len < bufsize)
+ size = min(rcv_len, req_len);
+ else
+ size = min(bufsize, req_len);
+
+ /* update parameters */
+ req->req.actual += size;
+
+ /* check transfer finish */
+ if ((!req->req.zero && (req->req.actual == req->req.length))
+ || (size % ep->ep.maxpacket)
+ || (size == 0)) {
+ pipe_stop(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ finish = 1;
+ }
+
+ /* read fifo */
+ if (req->req.buf) {
+ if (size == 0)
+ m66592_write(m66592, M66592_BCLR, ep->fifoctr);
+ else
+ m66592_read_fifo(m66592, ep->fifoaddr, buf, size);
+ }
+
+ if ((ep->pipenum != 0) && finish)
+ transfer_complete(ep, req, 0);
+}
+
+static void irq_pipe_ready(struct m66592 *m66592, u16 status, u16 enb)
+{
+ u16 check;
+ u16 pipenum;
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+
+ if ((status & M66592_BRDY0) && (enb & M66592_BRDY0)) {
+ m66592_write(m66592, ~M66592_BRDY0, M66592_BRDYSTS);
+ m66592_mdfy(m66592, M66592_PIPE0, M66592_CURPIPE,
+ M66592_CFIFOSEL);
+
+ ep = &m66592->ep[0];
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ irq_packet_read(ep, req);
+ } else {
+ for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if ((status & check) && (enb & check)) {
+ m66592_write(m66592, ~check, M66592_BRDYSTS);
+ ep = m66592->pipenum2ep[pipenum];
+ req = list_entry(ep->queue.next,
+ struct m66592_request, queue);
+ if (ep->desc->bEndpointAddress & USB_DIR_IN)
+ irq_packet_write(ep, req);
+ else
+ irq_packet_read(ep, req);
+ }
+ }
+ }
+}
+
+static void irq_pipe_empty(struct m66592 *m66592, u16 status, u16 enb)
+{
+ u16 tmp;
+ u16 check;
+ u16 pipenum;
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+
+ if ((status & M66592_BEMP0) && (enb & M66592_BEMP0)) {
+ m66592_write(m66592, ~M66592_BEMP0, M66592_BEMPSTS);
+
+ ep = &m66592->ep[0];
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ irq_ep0_write(ep, req);
+ } else {
+ for (pipenum = 1; pipenum < M66592_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if ((status & check) && (enb & check)) {
+ m66592_write(m66592, ~check, M66592_BEMPSTS);
+ tmp = control_reg_get(m66592, pipenum);
+ if ((tmp & M66592_INBUFM) == 0) {
+ disable_irq_empty(m66592, pipenum);
+ pipe_irq_disable(m66592, pipenum);
+ pipe_stop(m66592, pipenum);
+ ep = m66592->pipenum2ep[pipenum];
+ req = list_entry(ep->queue.next,
+ struct m66592_request,
+ queue);
+ if (!list_empty(&ep->queue))
+ transfer_complete(ep, req, 0);
+ }
+ }
+ }
+ }
+}
+
+static void get_status(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ struct m66592_ep *ep;
+ u16 pid;
+ u16 status = 0;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ break;
+ case USB_RECIP_INTERFACE:
+ status = 0;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pid = control_reg_get_pid(m66592, ep->pipenum);
+ if (pid == M66592_PID_STALL)
+ status = 1 << USB_ENDPOINT_HALT;
+ else
+ status = 0;
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ return; /* exit */
+ }
+
+ m66592->ep0_data = cpu_to_le16(status);
+ m66592->ep0_req->buf = &m66592->ep0_data;
+ m66592->ep0_req->length = 2;
+ /* AV: what happens if we get called again before that gets through? */
+ spin_unlock(&m66592->lock);
+ m66592_queue(m66592->gadget.ep0, m66592->ep0_req, GFP_KERNEL);
+ spin_lock(&m66592->lock);
+}
+
+static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_INTERFACE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pipe_stop(m66592, ep->pipenum);
+ control_reg_sqclr(m66592, ep->pipenum);
+
+ control_end(m66592, 1);
+
+ req = list_entry(ep->queue.next,
+ struct m66592_request, queue);
+ if (ep->busy) {
+ ep->busy = 0;
+ if (list_empty(&ep->queue))
+ break;
+ start_packet(ep, req);
+ } else if (!list_empty(&ep->queue))
+ pipe_start(m66592, ep->pipenum);
+ }
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ break;
+ }
+}
+
+static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_INTERFACE:
+ control_end(m66592, 1);
+ break;
+ case USB_RECIP_ENDPOINT: {
+ struct m66592_ep *ep;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+
+ ep = m66592->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
+ pipe_stall(m66592, ep->pipenum);
+
+ control_end(m66592, 1);
+ }
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ break;
+ }
+}
+
+/* if return value is true, call class driver's setup() */
+static int setup_packet(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+{
+ u16 *p = (u16 *)ctrl;
+ unsigned long offset = M66592_USBREQ;
+ int i, ret = 0;
+
+ /* read fifo */
+ m66592_write(m66592, ~M66592_VALID, M66592_INTSTS0);
+
+ for (i = 0; i < 4; i++)
+ p[i] = m66592_read(m66592, offset + i*2);
+
+ /* check request */
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ get_status(m66592, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ clear_feature(m66592, ctrl);
+ break;
+ case USB_REQ_SET_FEATURE:
+ set_feature(m66592, ctrl);
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ } else
+ ret = 1;
+ return ret;
+}
+
+static void m66592_update_usb_speed(struct m66592 *m66592)
+{
+ u16 speed = get_usb_speed(m66592);
+
+ switch (speed) {
+ case M66592_HSMODE:
+ m66592->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case M66592_FSMODE:
+ m66592->gadget.speed = USB_SPEED_FULL;
+ break;
+ default:
+ m66592->gadget.speed = USB_SPEED_UNKNOWN;
+ printk(KERN_ERR "USB speed unknown\n");
+ }
+}
+
+static void irq_device_state(struct m66592 *m66592)
+{
+ u16 dvsq;
+
+ dvsq = m66592_read(m66592, M66592_INTSTS0) & M66592_DVSQ;
+ m66592_write(m66592, ~M66592_DVST, M66592_INTSTS0);
+
+ if (dvsq == M66592_DS_DFLT) { /* bus reset */
+ m66592->driver->disconnect(&m66592->gadget);
+ m66592_update_usb_speed(m66592);
+ }
+ if (m66592->old_dvsq == M66592_DS_CNFG && dvsq != M66592_DS_CNFG)
+ m66592_update_usb_speed(m66592);
+ if ((dvsq == M66592_DS_CNFG || dvsq == M66592_DS_ADDS)
+ && m66592->gadget.speed == USB_SPEED_UNKNOWN)
+ m66592_update_usb_speed(m66592);
+
+ m66592->old_dvsq = dvsq;
+}
+
+static void irq_control_stage(struct m66592 *m66592)
+__releases(m66592->lock)
+__acquires(m66592->lock)
+{
+ struct usb_ctrlrequest ctrl;
+ u16 ctsq;
+
+ ctsq = m66592_read(m66592, M66592_INTSTS0) & M66592_CTSQ;
+ m66592_write(m66592, ~M66592_CTRT, M66592_INTSTS0);
+
+ switch (ctsq) {
+ case M66592_CS_IDST: {
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ ep = &m66592->ep[0];
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ transfer_complete(ep, req, 0);
+ }
+ break;
+
+ case M66592_CS_RDDS:
+ case M66592_CS_WRDS:
+ case M66592_CS_WRND:
+ if (setup_packet(m66592, &ctrl)) {
+ spin_unlock(&m66592->lock);
+ if (m66592->driver->setup(&m66592->gadget, &ctrl) < 0)
+ pipe_stall(m66592, 0);
+ spin_lock(&m66592->lock);
+ }
+ break;
+ case M66592_CS_RDSS:
+ case M66592_CS_WRSS:
+ control_end(m66592, 0);
+ break;
+ default:
+ printk(KERN_ERR "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
+ break;
+ }
+}
+
+static irqreturn_t m66592_irq(int irq, void *_m66592)
+{
+ struct m66592 *m66592 = _m66592;
+ u16 intsts0;
+ u16 intenb0;
+ u16 brdysts, nrdysts, bempsts;
+ u16 brdyenb, nrdyenb, bempenb;
+ u16 savepipe;
+ u16 mask0;
+
+ spin_lock(&m66592->lock);
+
+ intsts0 = m66592_read(m66592, M66592_INTSTS0);
+ intenb0 = m66592_read(m66592, M66592_INTENB0);
+
+ savepipe = m66592_read(m66592, M66592_CFIFOSEL);
+
+ mask0 = intsts0 & intenb0;
+ if (mask0) {
+ brdysts = m66592_read(m66592, M66592_BRDYSTS);
+ nrdysts = m66592_read(m66592, M66592_NRDYSTS);
+ bempsts = m66592_read(m66592, M66592_BEMPSTS);
+ brdyenb = m66592_read(m66592, M66592_BRDYENB);
+ nrdyenb = m66592_read(m66592, M66592_NRDYENB);
+ bempenb = m66592_read(m66592, M66592_BEMPENB);
+
+ if (mask0 & M66592_VBINT) {
+ m66592_write(m66592, 0xffff & ~M66592_VBINT,
+ M66592_INTSTS0);
+ m66592_start_xclock(m66592);
+
+ /* start vbus sampling */
+ m66592->old_vbus = m66592_read(m66592, M66592_INTSTS0)
+ & M66592_VBSTS;
+ m66592->scount = M66592_MAX_SAMPLING;
+
+ mod_timer(&m66592->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ if (intsts0 & M66592_DVSQ)
+ irq_device_state(m66592);
+
+ if ((intsts0 & M66592_BRDY) && (intenb0 & M66592_BRDYE)
+ && (brdysts & brdyenb)) {
+ irq_pipe_ready(m66592, brdysts, brdyenb);
+ }
+ if ((intsts0 & M66592_BEMP) && (intenb0 & M66592_BEMPE)
+ && (bempsts & bempenb)) {
+ irq_pipe_empty(m66592, bempsts, bempenb);
+ }
+
+ if (intsts0 & M66592_CTRT)
+ irq_control_stage(m66592);
+ }
+
+ m66592_write(m66592, savepipe, M66592_CFIFOSEL);
+
+ spin_unlock(&m66592->lock);
+ return IRQ_HANDLED;
+}
+
+static void m66592_timer(unsigned long _m66592)
+{
+ struct m66592 *m66592 = (struct m66592 *)_m66592;
+ unsigned long flags;
+ u16 tmp;
+
+ spin_lock_irqsave(&m66592->lock, flags);
+ tmp = m66592_read(m66592, M66592_SYSCFG);
+ if (!(tmp & M66592_RCKE)) {
+ m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG);
+ udelay(10);
+ m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG);
+ }
+ if (m66592->scount > 0) {
+ tmp = m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS;
+ if (tmp == m66592->old_vbus) {
+ m66592->scount--;
+ if (m66592->scount == 0) {
+ if (tmp == M66592_VBSTS)
+ m66592_usb_connect(m66592);
+ else
+ m66592_usb_disconnect(m66592);
+ } else {
+ mod_timer(&m66592->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ } else {
+ m66592->scount = M66592_MAX_SAMPLING;
+ m66592->old_vbus = tmp;
+ mod_timer(&m66592->timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ }
+ spin_unlock_irqrestore(&m66592->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+static int m66592_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct m66592_ep *ep;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ return alloc_pipe_config(ep, desc);
+}
+
+static int m66592_disable(struct usb_ep *_ep)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ BUG_ON(!ep);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ transfer_complete(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+ }
+
+ pipe_irq_disable(ep->m66592, ep->pipenum);
+ return free_pipe_config(ep);
+}
+
+static struct usb_request *m66592_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct m66592_request *req;
+
+ req = kzalloc(sizeof(struct m66592_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void m66592_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct m66592_request *req;
+
+ req = container_of(_req, struct m66592_request, req);
+ kfree(req);
+}
+
+static int m66592_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+ int request = 0;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ req = container_of(_req, struct m66592_request, req);
+
+ if (ep->m66592->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+
+ if (list_empty(&ep->queue))
+ request = 1;
+
+ list_add_tail(&req->queue, &ep->queue);
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+
+ if (ep->desc == 0) /* control */
+ start_ep0(ep, req);
+ else {
+ if (request && !ep->busy)
+ start_packet(ep, req);
+ }
+
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+
+ return 0;
+}
+
+static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ req = container_of(_req, struct m66592_request, req);
+
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ if (!list_empty(&ep->queue))
+ transfer_complete(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+
+ return 0;
+}
+
+static int m66592_set_halt(struct usb_ep *_ep, int value)
+{
+ struct m66592_ep *ep;
+ struct m66592_request *req;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ req = list_entry(ep->queue.next, struct m66592_request, queue);
+
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (value) {
+ ep->busy = 1;
+ pipe_stall(ep->m66592, ep->pipenum);
+ } else {
+ ep->busy = 0;
+ pipe_stop(ep->m66592, ep->pipenum);
+ }
+
+out:
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+ return ret;
+}
+
+static void m66592_fifo_flush(struct usb_ep *_ep)
+{
+ struct m66592_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct m66592_ep, ep);
+ spin_lock_irqsave(&ep->m66592->lock, flags);
+ if (list_empty(&ep->queue) && !ep->busy) {
+ pipe_stop(ep->m66592, ep->pipenum);
+ m66592_bclr(ep->m66592, M66592_BCLR, ep->fifoctr);
+ }
+ spin_unlock_irqrestore(&ep->m66592->lock, flags);
+}
+
+static struct usb_ep_ops m66592_ep_ops = {
+ .enable = m66592_enable,
+ .disable = m66592_disable,
+
+ .alloc_request = m66592_alloc_request,
+ .free_request = m66592_free_request,
+
+ .queue = m66592_queue,
+ .dequeue = m66592_dequeue,
+
+ .set_halt = m66592_set_halt,
+ .fifo_flush = m66592_fifo_flush,
+};
+
+/*-------------------------------------------------------------------------*/
+static struct m66592 *the_controller;
+
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct m66592 *m66592 = the_controller;
+ int retval;
+
+ if (!driver
+ || driver->speed != USB_SPEED_HIGH
+ || !driver->bind
+ || !driver->setup)
+ return -EINVAL;
+ if (!m66592)
+ return -ENODEV;
+ if (m66592->driver)
+ return -EBUSY;
+
+ /* hook up the driver */
+ driver->driver.bus = NULL;
+ m66592->driver = driver;
+ m66592->gadget.dev.driver = &driver->driver;
+
+ retval = device_add(&m66592->gadget.dev);
+ if (retval) {
+ printk(KERN_ERR "device_add error (%d)\n", retval);
+ goto error;
+ }
+
+ retval = driver->bind (&m66592->gadget);
+ if (retval) {
+ printk(KERN_ERR "bind to driver error (%d)\n", retval);
+ device_del(&m66592->gadget.dev);
+ goto error;
+ }
+
+ m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+ if (m66592_read(m66592, M66592_INTSTS0) & M66592_VBSTS) {
+ m66592_start_xclock(m66592);
+ /* start vbus sampling */
+ m66592->old_vbus = m66592_read(m66592,
+ M66592_INTSTS0) & M66592_VBSTS;
+ m66592->scount = M66592_MAX_SAMPLING;
+ mod_timer(&m66592->timer, jiffies + msecs_to_jiffies(50));
+ }
+
+ return 0;
+
+error:
+ m66592->driver = NULL;
+ m66592->gadget.dev.driver = NULL;
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct m66592 *m66592 = the_controller;
+ unsigned long flags;
+
+ if (driver != m66592->driver || !driver->unbind)
+ return -EINVAL;
+
+ spin_lock_irqsave(&m66592->lock, flags);
+ if (m66592->gadget.speed != USB_SPEED_UNKNOWN)
+ m66592_usb_disconnect(m66592);
+ spin_unlock_irqrestore(&m66592->lock, flags);
+
+ m66592_bclr(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
+
+ driver->unbind(&m66592->gadget);
+
+ init_controller(m66592);
+ disable_controller(m66592);
+
+ device_del(&m66592->gadget.dev);
+ m66592->driver = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+/*-------------------------------------------------------------------------*/
+static int m66592_get_frame(struct usb_gadget *_gadget)
+{
+ struct m66592 *m66592 = gadget_to_m66592(_gadget);
+ return m66592_read(m66592, M66592_FRMNUM) & 0x03FF;
+}
+
+static struct usb_gadget_ops m66592_gadget_ops = {
+ .get_frame = m66592_get_frame,
+};
+
+static int __exit m66592_remove(struct platform_device *pdev)
+{
+ struct m66592 *m66592 = dev_get_drvdata(&pdev->dev);
+
+ del_timer_sync(&m66592->timer);
+ iounmap(m66592->reg);
+ free_irq(platform_get_irq(pdev, 0), m66592);
+ m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+ kfree(m66592);
+ return 0;
+}
+
+static void nop_completion(struct usb_ep *ep, struct usb_request *r)
+{
+}
+
+#define resource_len(r) (((r)->end - (r)->start) + 1)
+
+static int __init m66592_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int irq;
+ void __iomem *reg = NULL;
+ struct m66592 *m66592 = NULL;
+ int ret = 0;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ (char *)udc_name);
+ if (!res) {
+ ret = -ENODEV;
+ printk(KERN_ERR "platform_get_resource_byname error.\n");
+ goto clean_up;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ printk(KERN_ERR "platform_get_irq error.\n");
+ goto clean_up;
+ }
+
+ reg = ioremap(res->start, resource_len(res));
+ if (reg == NULL) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "ioremap error.\n");
+ goto clean_up;
+ }
+
+ /* initialize ucd */
+ m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
+ if (m66592 == NULL) {
+ printk(KERN_ERR "kzalloc error\n");
+ goto clean_up;
+ }
+
+ spin_lock_init(&m66592->lock);
+ dev_set_drvdata(&pdev->dev, m66592);
+
+ m66592->gadget.ops = &m66592_gadget_ops;
+ device_initialize(&m66592->gadget.dev);
+ strcpy(m66592->gadget.dev.bus_id, "gadget");
+ m66592->gadget.is_dualspeed = 1;
+ m66592->gadget.dev.parent = &pdev->dev;
+ m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
+ m66592->gadget.dev.release = pdev->dev.release;
+ m66592->gadget.name = udc_name;
+
+ init_timer(&m66592->timer);
+ m66592->timer.function = m66592_timer;
+ m66592->timer.data = (unsigned long)m66592;
+ m66592->reg = reg;
+
+ m66592->bi_bufnum = M66592_BASE_BUFNUM;
+
+ ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED,
+ udc_name, m66592);
+ if (ret < 0) {
+ printk(KERN_ERR "request_irq error (%d)\n", ret);
+ goto clean_up;
+ }
+
+ INIT_LIST_HEAD(&m66592->gadget.ep_list);
+ m66592->gadget.ep0 = &m66592->ep[0].ep;
+ INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list);
+ for (i = 0; i < M66592_MAX_NUM_PIPE; i++) {
+ struct m66592_ep *ep = &m66592->ep[i];
+
+ if (i != 0) {
+ INIT_LIST_HEAD(&m66592->ep[i].ep.ep_list);
+ list_add_tail(&m66592->ep[i].ep.ep_list,
+ &m66592->gadget.ep_list);
+ }
+ ep->m66592 = m66592;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->ep.name = m66592_ep_name[i];
+ ep->ep.ops = &m66592_ep_ops;
+ ep->ep.maxpacket = 512;
+ }
+ m66592->ep[0].ep.maxpacket = 64;
+ m66592->ep[0].pipenum = 0;
+ m66592->ep[0].fifoaddr = M66592_CFIFO;
+ m66592->ep[0].fifosel = M66592_CFIFOSEL;
+ m66592->ep[0].fifoctr = M66592_CFIFOCTR;
+ m66592->ep[0].fifotrn = 0;
+ m66592->ep[0].pipectr = get_pipectr_addr(0);
+ m66592->pipenum2ep[0] = &m66592->ep[0];
+ m66592->epaddr2ep[0] = &m66592->ep[0];
+
+ the_controller = m66592;
+
+ m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
+ if (m66592->ep0_req == NULL)
+ goto clean_up2;
+ m66592->ep0_req->complete = nop_completion;
+
+ init_controller(m66592);
+
+ dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
+ return 0;
+
+clean_up2:
+ free_irq(irq, m66592);
+clean_up:
+ if (m66592) {
+ if (m66592->ep0_req)
+ m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+ kfree(m66592);
+ }
+ if (reg)
+ iounmap(reg);
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+static struct platform_driver m66592_driver = {
+ .remove = __exit_p(m66592_remove),
+ .driver = {
+ .name = (char *) udc_name,
+ },
+};
+
+static int __init m66592_udc_init(void)
+{
+ return platform_driver_probe(&m66592_driver, m66592_probe);
+}
+module_init(m66592_udc_init);
+
+static void __exit m66592_udc_cleanup(void)
+{
+ platform_driver_unregister(&m66592_driver);
+}
+module_exit(m66592_udc_cleanup);
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
new file mode 100644
index 000000000000..bfa0c645f229
--- /dev/null
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -0,0 +1,575 @@
+/*
+ * M66592 UDC (USB gadget)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __M66592_UDC_H__
+#define __M66592_UDC_H__
+
+#define M66592_SYSCFG 0x00
+#define M66592_XTAL 0xC000 /* b15-14: Crystal selection */
+#define M66592_XTAL48 0x8000 /* 48MHz */
+#define M66592_XTAL24 0x4000 /* 24MHz */
+#define M66592_XTAL12 0x0000 /* 12MHz */
+#define M66592_XCKE 0x2000 /* b13: External clock enable */
+#define M66592_RCKE 0x1000 /* b12: Register clock enable */
+#define M66592_PLLC 0x0800 /* b11: PLL control */
+#define M66592_SCKE 0x0400 /* b10: USB clock enable */
+#define M66592_ATCKM 0x0100 /* b8: Automatic clock supply */
+#define M66592_HSE 0x0080 /* b7: Hi-speed enable */
+#define M66592_DCFM 0x0040 /* b6: Controller function select */
+#define M66592_DMRPD 0x0020 /* b5: D- pull down control */
+#define M66592_DPRPU 0x0010 /* b4: D+ pull up control */
+#define M66592_FSRPC 0x0004 /* b2: Full-speed receiver enable */
+#define M66592_PCUT 0x0002 /* b1: Low power sleep enable */
+#define M66592_USBE 0x0001 /* b0: USB module operation enable */
+
+#define M66592_SYSSTS 0x02
+#define M66592_LNST 0x0003 /* b1-0: D+, D- line status */
+#define M66592_SE1 0x0003 /* SE1 */
+#define M66592_KSTS 0x0002 /* K State */
+#define M66592_JSTS 0x0001 /* J State */
+#define M66592_SE0 0x0000 /* SE0 */
+
+#define M66592_DVSTCTR 0x04
+#define M66592_WKUP 0x0100 /* b8: Remote wakeup */
+#define M66592_RWUPE 0x0080 /* b7: Remote wakeup sense */
+#define M66592_USBRST 0x0040 /* b6: USB reset enable */
+#define M66592_RESUME 0x0020 /* b5: Resume enable */
+#define M66592_UACT 0x0010 /* b4: USB bus enable */
+#define M66592_RHST 0x0003 /* b1-0: Reset handshake status */
+#define M66592_HSMODE 0x0003 /* Hi-Speed mode */
+#define M66592_FSMODE 0x0002 /* Full-Speed mode */
+#define M66592_HSPROC 0x0001 /* HS handshake is processing */
+
+#define M66592_TESTMODE 0x06
+#define M66592_UTST 0x000F /* b4-0: Test select */
+#define M66592_H_TST_PACKET 0x000C /* HOST TEST Packet */
+#define M66592_H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */
+#define M66592_H_TST_K 0x000A /* HOST TEST K */
+#define M66592_H_TST_J 0x0009 /* HOST TEST J */
+#define M66592_H_TST_NORMAL 0x0000 /* HOST Normal Mode */
+#define M66592_P_TST_PACKET 0x0004 /* PERI TEST Packet */
+#define M66592_P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */
+#define M66592_P_TST_K 0x0002 /* PERI TEST K */
+#define M66592_P_TST_J 0x0001 /* PERI TEST J */
+#define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */
+
+#define M66592_PINCFG 0x0A
+#define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */
+#define M66592_BIGEND 0x0100 /* b8: Big endian mode */
+
+#define M66592_DMA0CFG 0x0C
+#define M66592_DMA1CFG 0x0E
+#define M66592_DREQA 0x4000 /* b14: Dreq active select */
+#define M66592_BURST 0x2000 /* b13: Burst mode */
+#define M66592_DACKA 0x0400 /* b10: Dack active select */
+#define M66592_DFORM 0x0380 /* b9-7: DMA mode select */
+#define M66592_CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */
+#define M66592_CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */
+#define M66592_CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */
+#define M66592_SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */
+#define M66592_SPLIT_DACK_DSTB 0x0300 /* DACK + DSTB0 mode (SPLIT bus) */
+#define M66592_DENDA 0x0040 /* b6: Dend active select */
+#define M66592_PKTM 0x0020 /* b5: Packet mode */
+#define M66592_DENDE 0x0010 /* b4: Dend enable */
+#define M66592_OBUS 0x0004 /* b2: OUTbus mode */
+
+#define M66592_CFIFO 0x10
+#define M66592_D0FIFO 0x14
+#define M66592_D1FIFO 0x18
+
+#define M66592_CFIFOSEL 0x1E
+#define M66592_D0FIFOSEL 0x24
+#define M66592_D1FIFOSEL 0x2A
+#define M66592_RCNT 0x8000 /* b15: Read count mode */
+#define M66592_REW 0x4000 /* b14: Buffer rewind */
+#define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */
+#define M66592_DREQE 0x1000 /* b12: DREQ output enable */
+#define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO */
+#define M66592_MBW_8 0x0000 /* 8bit */
+#define M66592_MBW_16 0x0400 /* 16bit */
+#define M66592_TRENB 0x0200 /* b9: Transaction counter enable */
+#define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */
+#define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */
+#define M66592_ISEL 0x0020 /* b5: DCP FIFO port direction select */
+#define M66592_CURPIPE 0x0007 /* b2-0: PIPE select */
+
+#define M66592_CFIFOCTR 0x20
+#define M66592_D0FIFOCTR 0x26
+#define M66592_D1FIFOCTR 0x2c
+#define M66592_BVAL 0x8000 /* b15: Buffer valid flag */
+#define M66592_BCLR 0x4000 /* b14: Buffer clear */
+#define M66592_FRDY 0x2000 /* b13: FIFO ready */
+#define M66592_DTLN 0x0FFF /* b11-0: FIFO received data length */
+
+#define M66592_CFIFOSIE 0x22
+#define M66592_TGL 0x8000 /* b15: Buffer toggle */
+#define M66592_SCLR 0x4000 /* b14: Buffer clear */
+#define M66592_SBUSY 0x2000 /* b13: SIE_FIFO busy */
+
+#define M66592_D0FIFOTRN 0x28
+#define M66592_D1FIFOTRN 0x2E
+#define M66592_TRNCNT 0xFFFF /* b15-0: Transaction counter */
+
+#define M66592_INTENB0 0x30
+#define M66592_VBSE 0x8000 /* b15: VBUS interrupt */
+#define M66592_RSME 0x4000 /* b14: Resume interrupt */
+#define M66592_SOFE 0x2000 /* b13: Frame update interrupt */
+#define M66592_DVSE 0x1000 /* b12: Device state transition interrupt */
+#define M66592_CTRE 0x0800 /* b11: Control transfer stage transition irq */
+#define M66592_BEMPE 0x0400 /* b10: Buffer empty interrupt */
+#define M66592_NRDYE 0x0200 /* b9: Buffer not ready interrupt */
+#define M66592_BRDYE 0x0100 /* b8: Buffer ready interrupt */
+#define M66592_URST 0x0080 /* b7: USB reset detected interrupt */
+#define M66592_SADR 0x0040 /* b6: Set address executed interrupt */
+#define M66592_SCFG 0x0020 /* b5: Set configuration executed interrupt */
+#define M66592_SUSP 0x0010 /* b4: Suspend detected interrupt */
+#define M66592_WDST 0x0008 /* b3: Control write data stage completed irq */
+#define M66592_RDST 0x0004 /* b2: Control read data stage completed irq */
+#define M66592_CMPL 0x0002 /* b1: Control transfer complete interrupt */
+#define M66592_SERR 0x0001 /* b0: Sequence error interrupt */
+
+#define M66592_INTENB1 0x32
+#define M66592_BCHGE 0x4000 /* b14: USB us chenge interrupt */
+#define M66592_DTCHE 0x1000 /* b12: Detach sense interrupt */
+#define M66592_SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */
+#define M66592_SACKE 0x0010 /* b4: SETUP ACK interrupt */
+#define M66592_BRDYM 0x0004 /* b2: BRDY clear timing */
+#define M66592_INTL 0x0002 /* b1: Interrupt sense select */
+#define M66592_PCSE 0x0001 /* b0: PCUT enable by CS assert */
+
+#define M66592_BRDYENB 0x36
+#define M66592_BRDYSTS 0x46
+#define M66592_BRDY7 0x0080 /* b7: PIPE7 */
+#define M66592_BRDY6 0x0040 /* b6: PIPE6 */
+#define M66592_BRDY5 0x0020 /* b5: PIPE5 */
+#define M66592_BRDY4 0x0010 /* b4: PIPE4 */
+#define M66592_BRDY3 0x0008 /* b3: PIPE3 */
+#define M66592_BRDY2 0x0004 /* b2: PIPE2 */
+#define M66592_BRDY1 0x0002 /* b1: PIPE1 */
+#define M66592_BRDY0 0x0001 /* b1: PIPE0 */
+
+#define M66592_NRDYENB 0x38
+#define M66592_NRDYSTS 0x48
+#define M66592_NRDY7 0x0080 /* b7: PIPE7 */
+#define M66592_NRDY6 0x0040 /* b6: PIPE6 */
+#define M66592_NRDY5 0x0020 /* b5: PIPE5 */
+#define M66592_NRDY4 0x0010 /* b4: PIPE4 */
+#define M66592_NRDY3 0x0008 /* b3: PIPE3 */
+#define M66592_NRDY2 0x0004 /* b2: PIPE2 */
+#define M66592_NRDY1 0x0002 /* b1: PIPE1 */
+#define M66592_NRDY0 0x0001 /* b1: PIPE0 */
+
+#define M66592_BEMPENB 0x3A
+#define M66592_BEMPSTS 0x4A
+#define M66592_BEMP7 0x0080 /* b7: PIPE7 */
+#define M66592_BEMP6 0x0040 /* b6: PIPE6 */
+#define M66592_BEMP5 0x0020 /* b5: PIPE5 */
+#define M66592_BEMP4 0x0010 /* b4: PIPE4 */
+#define M66592_BEMP3 0x0008 /* b3: PIPE3 */
+#define M66592_BEMP2 0x0004 /* b2: PIPE2 */
+#define M66592_BEMP1 0x0002 /* b1: PIPE1 */
+#define M66592_BEMP0 0x0001 /* b0: PIPE0 */
+
+#define M66592_SOFCFG 0x3C
+#define M66592_SOFM 0x000C /* b3-2: SOF palse mode */
+#define M66592_SOF_125US 0x0008 /* SOF OUT 125us uFrame Signal */
+#define M66592_SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */
+#define M66592_SOF_DISABLE 0x0000 /* SOF OUT Disable */
+
+#define M66592_INTSTS0 0x40
+#define M66592_VBINT 0x8000 /* b15: VBUS interrupt */
+#define M66592_RESM 0x4000 /* b14: Resume interrupt */
+#define M66592_SOFR 0x2000 /* b13: SOF frame update interrupt */
+#define M66592_DVST 0x1000 /* b12: Device state transition */
+#define M66592_CTRT 0x0800 /* b11: Control stage transition */
+#define M66592_BEMP 0x0400 /* b10: Buffer empty interrupt */
+#define M66592_NRDY 0x0200 /* b9: Buffer not ready interrupt */
+#define M66592_BRDY 0x0100 /* b8: Buffer ready interrupt */
+#define M66592_VBSTS 0x0080 /* b7: VBUS input port */
+#define M66592_DVSQ 0x0070 /* b6-4: Device state */
+#define M66592_DS_SPD_CNFG 0x0070 /* Suspend Configured */
+#define M66592_DS_SPD_ADDR 0x0060 /* Suspend Address */
+#define M66592_DS_SPD_DFLT 0x0050 /* Suspend Default */
+#define M66592_DS_SPD_POWR 0x0040 /* Suspend Powered */
+#define M66592_DS_SUSP 0x0040 /* Suspend */
+#define M66592_DS_CNFG 0x0030 /* Configured */
+#define M66592_DS_ADDS 0x0020 /* Address */
+#define M66592_DS_DFLT 0x0010 /* Default */
+#define M66592_DS_POWR 0x0000 /* Powered */
+#define M66592_DVSQS 0x0030 /* b5-4: Device state */
+#define M66592_VALID 0x0008 /* b3: Setup packet detected flag */
+#define M66592_CTSQ 0x0007 /* b2-0: Control transfer stage */
+#define M66592_CS_SQER 0x0006 /* Sequence error */
+#define M66592_CS_WRND 0x0005 /* Control write nodata status */
+#define M66592_CS_WRSS 0x0004 /* Control write status stage */
+#define M66592_CS_WRDS 0x0003 /* Control write data stage */
+#define M66592_CS_RDSS 0x0002 /* Control read status stage */
+#define M66592_CS_RDDS 0x0001 /* Control read data stage */
+#define M66592_CS_IDST 0x0000 /* Idle or setup stage */
+
+#define M66592_INTSTS1 0x42
+#define M66592_BCHG 0x4000 /* b14: USB bus chenge interrupt */
+#define M66592_DTCH 0x1000 /* b12: Detach sense interrupt */
+#define M66592_SIGN 0x0020 /* b5: SETUP IGNORE interrupt */
+#define M66592_SACK 0x0010 /* b4: SETUP ACK interrupt */
+
+#define M66592_FRMNUM 0x4C
+#define M66592_OVRN 0x8000 /* b15: Overrun error */
+#define M66592_CRCE 0x4000 /* b14: Received data error */
+#define M66592_SOFRM 0x0800 /* b11: SOF output mode */
+#define M66592_FRNM 0x07FF /* b10-0: Frame number */
+
+#define M66592_UFRMNUM 0x4E
+#define M66592_UFRNM 0x0007 /* b2-0: Micro frame number */
+
+#define M66592_RECOVER 0x50
+#define M66592_STSRECOV 0x0700 /* Status recovery */
+#define M66592_STSR_HI 0x0400 /* FULL(0) or HI(1) Speed */
+#define M66592_STSR_DEFAULT 0x0100 /* Default state */
+#define M66592_STSR_ADDRESS 0x0200 /* Address state */
+#define M66592_STSR_CONFIG 0x0300 /* Configured state */
+#define M66592_USBADDR 0x007F /* b6-0: USB address */
+
+#define M66592_USBREQ 0x54
+#define M66592_bRequest 0xFF00 /* b15-8: bRequest */
+#define M66592_GET_STATUS 0x0000
+#define M66592_CLEAR_FEATURE 0x0100
+#define M66592_ReqRESERVED 0x0200
+#define M66592_SET_FEATURE 0x0300
+#define M66592_ReqRESERVED1 0x0400
+#define M66592_SET_ADDRESS 0x0500
+#define M66592_GET_DESCRIPTOR 0x0600
+#define M66592_SET_DESCRIPTOR 0x0700
+#define M66592_GET_CONFIGURATION 0x0800
+#define M66592_SET_CONFIGURATION 0x0900
+#define M66592_GET_INTERFACE 0x0A00
+#define M66592_SET_INTERFACE 0x0B00
+#define M66592_SYNCH_FRAME 0x0C00
+#define M66592_bmRequestType 0x00FF /* b7-0: bmRequestType */
+#define M66592_bmRequestTypeDir 0x0080 /* b7 : Data direction */
+#define M66592_HOST_TO_DEVICE 0x0000
+#define M66592_DEVICE_TO_HOST 0x0080
+#define M66592_bmRequestTypeType 0x0060 /* b6-5: Type */
+#define M66592_STANDARD 0x0000
+#define M66592_CLASS 0x0020
+#define M66592_VENDOR 0x0040
+#define M66592_bmRequestTypeRecip 0x001F /* b4-0: Recipient */
+#define M66592_DEVICE 0x0000
+#define M66592_INTERFACE 0x0001
+#define M66592_ENDPOINT 0x0002
+
+#define M66592_USBVAL 0x56
+#define M66592_wValue 0xFFFF /* b15-0: wValue */
+/* Standard Feature Selector */
+#define M66592_ENDPOINT_HALT 0x0000
+#define M66592_DEVICE_REMOTE_WAKEUP 0x0001
+#define M66592_TEST_MODE 0x0002
+/* Descriptor Types */
+#define M66592_DT_TYPE 0xFF00
+#define M66592_GET_DT_TYPE(v) (((v) & DT_TYPE) >> 8)
+#define M66592_DT_DEVICE 0x01
+#define M66592_DT_CONFIGURATION 0x02
+#define M66592_DT_STRING 0x03
+#define M66592_DT_INTERFACE 0x04
+#define M66592_DT_ENDPOINT 0x05
+#define M66592_DT_DEVICE_QUALIFIER 0x06
+#define M66592_DT_OTHER_SPEED_CONFIGURATION 0x07
+#define M66592_DT_INTERFACE_POWER 0x08
+#define M66592_DT_INDEX 0x00FF
+#define M66592_CONF_NUM 0x00FF
+#define M66592_ALT_SET 0x00FF
+
+#define M66592_USBINDEX 0x58
+#define M66592_wIndex 0xFFFF /* b15-0: wIndex */
+#define M66592_TEST_SELECT 0xFF00 /* b15-b8: Test Mode */
+#define M66592_TEST_J 0x0100 /* Test_J */
+#define M66592_TEST_K 0x0200 /* Test_K */
+#define M66592_TEST_SE0_NAK 0x0300 /* Test_SE0_NAK */
+#define M66592_TEST_PACKET 0x0400 /* Test_Packet */
+#define M66592_TEST_FORCE_ENABLE 0x0500 /* Test_Force_Enable */
+#define M66592_TEST_STSelectors 0x0600 /* Standard test selectors */
+#define M66592_TEST_Reserved 0x4000 /* Reserved */
+#define M66592_TEST_VSTModes 0xC000 /* Vendor-specific tests */
+#define M66592_EP_DIR 0x0080 /* b7: Endpoint Direction */
+#define M66592_EP_DIR_IN 0x0080
+#define M66592_EP_DIR_OUT 0x0000
+
+#define M66592_USBLENG 0x5A
+#define M66592_wLength 0xFFFF /* b15-0: wLength */
+
+#define M66592_DCPCFG 0x5C
+#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */
+#define M66592_DIR 0x0010 /* b4: Control transfer DIR select */
+
+#define M66592_DCPMAXP 0x5E
+#define M66592_DEVSEL 0xC000 /* b15-14: Device address select */
+#define M66592_DEVICE_0 0x0000 /* Device address 0 */
+#define M66592_DEVICE_1 0x4000 /* Device address 1 */
+#define M66592_DEVICE_2 0x8000 /* Device address 2 */
+#define M66592_DEVICE_3 0xC000 /* Device address 3 */
+#define M66592_MAXP 0x007F /* b6-0: Maxpacket size of ep0 */
+
+#define M66592_DCPCTR 0x60
+#define M66592_BSTS 0x8000 /* b15: Buffer status */
+#define M66592_SUREQ 0x4000 /* b14: Send USB request */
+#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define M66592_CCPL 0x0004 /* b2: control transfer complete */
+#define M66592_PID 0x0003 /* b1-0: Response PID */
+#define M66592_PID_STALL 0x0002 /* STALL */
+#define M66592_PID_BUF 0x0001 /* BUF */
+#define M66592_PID_NAK 0x0000 /* NAK */
+
+#define M66592_PIPESEL 0x64
+#define M66592_PIPENM 0x0007 /* b2-0: Pipe select */
+#define M66592_PIPE0 0x0000 /* PIPE 0 */
+#define M66592_PIPE1 0x0001 /* PIPE 1 */
+#define M66592_PIPE2 0x0002 /* PIPE 2 */
+#define M66592_PIPE3 0x0003 /* PIPE 3 */
+#define M66592_PIPE4 0x0004 /* PIPE 4 */
+#define M66592_PIPE5 0x0005 /* PIPE 5 */
+#define M66592_PIPE6 0x0006 /* PIPE 6 */
+#define M66592_PIPE7 0x0007 /* PIPE 7 */
+
+#define M66592_PIPECFG 0x66
+#define M66592_TYP 0xC000 /* b15-14: Transfer type */
+#define M66592_ISO 0xC000 /* Isochronous */
+#define M66592_INT 0x8000 /* Interrupt */
+#define M66592_BULK 0x4000 /* Bulk */
+#define M66592_BFRE 0x0400 /* b10: Buffer ready interrupt mode */
+#define M66592_DBLB 0x0200 /* b9: Double buffer mode select */
+#define M66592_CNTMD 0x0100 /* b8: Continuous transfer mode */
+#define M66592_SHTNAK 0x0080 /* b7: Transfer end NAK */
+#define M66592_DIR 0x0010 /* b4: Transfer direction select */
+#define M66592_DIR_H_OUT 0x0010 /* HOST OUT */
+#define M66592_DIR_P_IN 0x0010 /* PERI IN */
+#define M66592_DIR_H_IN 0x0000 /* HOST IN */
+#define M66592_DIR_P_OUT 0x0000 /* PERI OUT */
+#define M66592_EPNUM 0x000F /* b3-0: Eendpoint number select */
+#define M66592_EP1 0x0001
+#define M66592_EP2 0x0002
+#define M66592_EP3 0x0003
+#define M66592_EP4 0x0004
+#define M66592_EP5 0x0005
+#define M66592_EP6 0x0006
+#define M66592_EP7 0x0007
+#define M66592_EP8 0x0008
+#define M66592_EP9 0x0009
+#define M66592_EP10 0x000A
+#define M66592_EP11 0x000B
+#define M66592_EP12 0x000C
+#define M66592_EP13 0x000D
+#define M66592_EP14 0x000E
+#define M66592_EP15 0x000F
+
+#define M66592_PIPEBUF 0x68
+#define M66592_BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */
+#define M66592_BUF_SIZE(x) ((((x) / 64) - 1) << 10)
+#define M66592_BUFNMB 0x00FF /* b7-0: Pipe buffer number */
+
+#define M66592_PIPEMAXP 0x6A
+#define M66592_MXPS 0x07FF /* b10-0: Maxpacket size */
+
+#define M66592_PIPEPERI 0x6C
+#define M66592_IFIS 0x1000 /* b12: ISO in-buffer flush mode */
+#define M66592_IITV 0x0007 /* b2-0: ISO interval */
+
+#define M66592_PIPE1CTR 0x70
+#define M66592_PIPE2CTR 0x72
+#define M66592_PIPE3CTR 0x74
+#define M66592_PIPE4CTR 0x76
+#define M66592_PIPE5CTR 0x78
+#define M66592_PIPE6CTR 0x7A
+#define M66592_PIPE7CTR 0x7C
+#define M66592_BSTS 0x8000 /* b15: Buffer status */
+#define M66592_INBUFM 0x4000 /* b14: IN buffer monitor (PIPE 1-5) */
+#define M66592_ACLRM 0x0200 /* b9: Out buffer auto clear mode */
+#define M66592_SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define M66592_SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define M66592_SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define M66592_PID 0x0003 /* b1-0: Response PID */
+
+#define M66592_INVALID_REG 0x7E
+
+
+#define get_pipectr_addr(pipenum) (M66592_PIPE1CTR + (pipenum - 1) * 2)
+
+#define M66592_MAX_SAMPLING 10
+
+#define M66592_MAX_NUM_PIPE 8
+#define M66592_MAX_NUM_BULK 3
+#define M66592_MAX_NUM_ISOC 2
+#define M66592_MAX_NUM_INT 2
+
+#define M66592_BASE_PIPENUM_BULK 3
+#define M66592_BASE_PIPENUM_ISOC 1
+#define M66592_BASE_PIPENUM_INT 6
+
+#define M66592_BASE_BUFNUM 6
+#define M66592_MAX_BUFNUM 0x4F
+
+struct m66592_pipe_info {
+ u16 pipe;
+ u16 epnum;
+ u16 maxpacket;
+ u16 type;
+ u16 interval;
+ u16 dir_in;
+};
+
+struct m66592_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+struct m66592_ep {
+ struct usb_ep ep;
+ struct m66592 *m66592;
+
+ struct list_head queue;
+ unsigned busy:1;
+ unsigned internal_ccpl:1; /* use only control */
+
+ /* this member can able to after m66592_enable */
+ unsigned use_dma:1;
+ u16 pipenum;
+ u16 type;
+ const struct usb_endpoint_descriptor *desc;
+ /* register address */
+ unsigned long fifoaddr;
+ unsigned long fifosel;
+ unsigned long fifoctr;
+ unsigned long fifotrn;
+ unsigned long pipectr;
+};
+
+struct m66592 {
+ spinlock_t lock;
+ void __iomem *reg;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+ struct m66592_ep ep[M66592_MAX_NUM_PIPE];
+ struct m66592_ep *pipenum2ep[M66592_MAX_NUM_PIPE];
+ struct m66592_ep *epaddr2ep[16];
+
+ struct usb_request *ep0_req; /* for internal request */
+ u16 ep0_data; /* for internal request */
+
+ struct timer_list timer;
+
+ u16 old_vbus;
+ int scount;
+
+ int old_dvsq;
+
+ /* pipe config */
+ int bulk;
+ int interrupt;
+ int isochronous;
+ int num_dma;
+ int bi_bufnum; /* bulk and isochronous's bufnum */
+};
+
+#define gadget_to_m66592(_gadget) container_of(_gadget, struct m66592, gadget)
+#define m66592_to_gadget(m66592) (&m66592->gadget)
+
+#define is_bulk_pipe(pipenum) \
+ ((pipenum >= M66592_BASE_PIPENUM_BULK) && \
+ (pipenum < (M66592_BASE_PIPENUM_BULK + M66592_MAX_NUM_BULK)))
+#define is_interrupt_pipe(pipenum) \
+ ((pipenum >= M66592_BASE_PIPENUM_INT) && \
+ (pipenum < (M66592_BASE_PIPENUM_INT + M66592_MAX_NUM_INT)))
+#define is_isoc_pipe(pipenum) \
+ ((pipenum >= M66592_BASE_PIPENUM_ISOC) && \
+ (pipenum < (M66592_BASE_PIPENUM_ISOC + M66592_MAX_NUM_ISOC)))
+
+#define enable_irq_ready(m66592, pipenum) \
+ enable_pipe_irq(m66592, pipenum, M66592_BRDYENB)
+#define disable_irq_ready(m66592, pipenum) \
+ disable_pipe_irq(m66592, pipenum, M66592_BRDYENB)
+#define enable_irq_empty(m66592, pipenum) \
+ enable_pipe_irq(m66592, pipenum, M66592_BEMPENB)
+#define disable_irq_empty(m66592, pipenum) \
+ disable_pipe_irq(m66592, pipenum, M66592_BEMPENB)
+#define enable_irq_nrdy(m66592, pipenum) \
+ enable_pipe_irq(m66592, pipenum, M66592_NRDYENB)
+#define disable_irq_nrdy(m66592, pipenum) \
+ disable_pipe_irq(m66592, pipenum, M66592_NRDYENB)
+
+/*-------------------------------------------------------------------------*/
+static inline u16 m66592_read(struct m66592 *m66592, unsigned long offset)
+{
+ return inw((unsigned long)m66592->reg + offset);
+}
+
+static inline void m66592_read_fifo(struct m66592 *m66592,
+ unsigned long offset,
+ void *buf, unsigned long len)
+{
+ unsigned long fifoaddr = (unsigned long)m66592->reg + offset;
+
+ len = (len + 1) / 2;
+ insw(fifoaddr, buf, len);
+}
+
+static inline void m66592_write(struct m66592 *m66592, u16 val,
+ unsigned long offset)
+{
+ outw(val, (unsigned long)m66592->reg + offset);
+}
+
+static inline void m66592_write_fifo(struct m66592 *m66592,
+ unsigned long offset,
+ void *buf, unsigned long len)
+{
+ unsigned long fifoaddr = (unsigned long)m66592->reg + offset;
+ unsigned long odd = len & 0x0001;
+
+ len = len / 2;
+ outsw(fifoaddr, buf, len);
+ if (odd) {
+ unsigned char *p = buf + len*2;
+ outb(*p, fifoaddr);
+ }
+}
+
+static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat,
+ unsigned long offset)
+{
+ u16 tmp;
+ tmp = m66592_read(m66592, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ m66592_write(m66592, tmp, offset);
+}
+
+#define m66592_bclr(m66592, val, offset) \
+ m66592_mdfy(m66592, 0, val, offset)
+#define m66592_bset(m66592, val, offset) \
+ m66592_mdfy(m66592, val, 0, offset)
+
+#endif /* ifndef __M66592_UDC_H__ */
+
+
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index d975ecf18e00..c3d364ecd4f8 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -450,100 +450,6 @@ net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
/*-------------------------------------------------------------------------*/
-/*
- * dma-coherent memory allocation (for dma-capable endpoints)
- *
- * NOTE: the dma_*_coherent() API calls suck. Most implementations are
- * (a) page-oriented, so small buffers lose big; and (b) asymmetric with
- * respect to calls with irqs disabled: alloc is safe, free is not.
- * We currently work around (b), but not (a).
- */
-
-static void *
-net2280_alloc_buffer (
- struct usb_ep *_ep,
- unsigned bytes,
- dma_addr_t *dma,
- gfp_t gfp_flags
-)
-{
- void *retval;
- struct net2280_ep *ep;
-
- ep = container_of (_ep, struct net2280_ep, ep);
- if (!_ep)
- return NULL;
- *dma = DMA_ADDR_INVALID;
-
- if (ep->dma)
- retval = dma_alloc_coherent(&ep->dev->pdev->dev,
- bytes, dma, gfp_flags);
- else
- retval = kmalloc(bytes, gfp_flags);
- return retval;
-}
-
-static DEFINE_SPINLOCK(buflock);
-static LIST_HEAD(buffers);
-
-struct free_record {
- struct list_head list;
- struct device *dev;
- unsigned bytes;
- dma_addr_t dma;
-};
-
-static void do_free(unsigned long ignored)
-{
- spin_lock_irq(&buflock);
- while (!list_empty(&buffers)) {
- struct free_record *buf;
-
- buf = list_entry(buffers.next, struct free_record, list);
- list_del(&buf->list);
- spin_unlock_irq(&buflock);
-
- dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma);
-
- spin_lock_irq(&buflock);
- }
- spin_unlock_irq(&buflock);
-}
-
-static DECLARE_TASKLET(deferred_free, do_free, 0);
-
-static void
-net2280_free_buffer (
- struct usb_ep *_ep,
- void *address,
- dma_addr_t dma,
- unsigned bytes
-) {
- /* free memory into the right allocator */
- if (dma != DMA_ADDR_INVALID) {
- struct net2280_ep *ep;
- struct free_record *buf = address;
- unsigned long flags;
-
- ep = container_of(_ep, struct net2280_ep, ep);
- if (!_ep)
- return;
-
- ep = container_of (_ep, struct net2280_ep, ep);
- buf->dev = &ep->dev->pdev->dev;
- buf->bytes = bytes;
- buf->dma = dma;
-
- spin_lock_irqsave(&buflock, flags);
- list_add_tail(&buf->list, &buffers);
- tasklet_schedule(&deferred_free);
- spin_unlock_irqrestore(&buflock, flags);
- } else
- kfree (address);
-}
-
-/*-------------------------------------------------------------------------*/
-
/* load a packet into the fifo we use for usb IN transfers.
* works for all endpoints.
*
@@ -1392,9 +1298,6 @@ static const struct usb_ep_ops net2280_ep_ops = {
.alloc_request = net2280_alloc_request,
.free_request = net2280_free_request,
- .alloc_buffer = net2280_alloc_buffer,
- .free_buffer = net2280_free_buffer,
-
.queue = net2280_queue,
.dequeue = net2280_dequeue,
@@ -2964,7 +2867,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
, &dev->pci->pcimstctl);
/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
pci_set_master (pdev);
- pci_set_mwi (pdev);
+ pci_try_set_mwi (pdev);
/* ... also flushes any posted pci writes */
dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index c4975a6cf777..9b0f0925dddf 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -296,111 +296,6 @@ omap_free_request(struct usb_ep *ep, struct usb_request *_req)
/*-------------------------------------------------------------------------*/
-/*
- * dma-coherent memory allocation (for dma-capable endpoints)
- *
- * NOTE: the dma_*_coherent() API calls suck. Most implementations are
- * (a) page-oriented, so small buffers lose big; and (b) asymmetric with
- * respect to calls with irqs disabled: alloc is safe, free is not.
- * We currently work around (b), but not (a).
- */
-
-static void *
-omap_alloc_buffer(
- struct usb_ep *_ep,
- unsigned bytes,
- dma_addr_t *dma,
- gfp_t gfp_flags
-)
-{
- void *retval;
- struct omap_ep *ep;
-
- if (!_ep)
- return NULL;
-
- ep = container_of(_ep, struct omap_ep, ep);
- if (use_dma && ep->has_dma) {
- static int warned;
- if (!warned && bytes < PAGE_SIZE) {
- dev_warn(ep->udc->gadget.dev.parent,
- "using dma_alloc_coherent for "
- "small allocations wastes memory\n");
- warned++;
- }
- return dma_alloc_coherent(ep->udc->gadget.dev.parent,
- bytes, dma, gfp_flags);
- }
-
- retval = kmalloc(bytes, gfp_flags);
- if (retval)
- *dma = virt_to_phys(retval);
- return retval;
-}
-
-static DEFINE_SPINLOCK(buflock);
-static LIST_HEAD(buffers);
-
-struct free_record {
- struct list_head list;
- struct device *dev;
- unsigned bytes;
- dma_addr_t dma;
-};
-
-static void do_free(unsigned long ignored)
-{
- spin_lock_irq(&buflock);
- while (!list_empty(&buffers)) {
- struct free_record *buf;
-
- buf = list_entry(buffers.next, struct free_record, list);
- list_del(&buf->list);
- spin_unlock_irq(&buflock);
-
- dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma);
-
- spin_lock_irq(&buflock);
- }
- spin_unlock_irq(&buflock);
-}
-
-static DECLARE_TASKLET(deferred_free, do_free, 0);
-
-static void omap_free_buffer(
- struct usb_ep *_ep,
- void *buf,
- dma_addr_t dma,
- unsigned bytes
-)
-{
- if (!_ep) {
- WARN_ON(1);
- return;
- }
-
- /* free memory into the right allocator */
- if (dma != DMA_ADDR_INVALID) {
- struct omap_ep *ep;
- struct free_record *rec = buf;
- unsigned long flags;
-
- ep = container_of(_ep, struct omap_ep, ep);
-
- rec->dev = ep->udc->gadget.dev.parent;
- rec->bytes = bytes;
- rec->dma = dma;
-
- spin_lock_irqsave(&buflock, flags);
- list_add_tail(&rec->list, &buffers);
- tasklet_schedule(&deferred_free);
- spin_unlock_irqrestore(&buflock, flags);
- } else
- kfree(buf);
-}
-
-/*-------------------------------------------------------------------------*/
-
static void
done(struct omap_ep *ep, struct omap_req *req, int status)
{
@@ -1271,9 +1166,6 @@ static struct usb_ep_ops omap_ep_ops = {
.alloc_request = omap_alloc_request,
.free_request = omap_free_request,
- .alloc_buffer = omap_alloc_buffer,
- .free_buffer = omap_free_buffer,
-
.queue = omap_ep_queue,
.dequeue = omap_ep_dequeue,
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 84392e835d5f..63b9521c1322 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -24,9 +24,9 @@
*
*/
-#undef DEBUG
// #define VERBOSE DBG_VERBOSE
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
@@ -46,19 +46,17 @@
#include <asm/byteorder.h>
#include <asm/dma.h>
+#include <asm/gpio.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <asm/unaligned.h>
#include <asm/hardware.h>
-#ifdef CONFIG_ARCH_PXA
-#include <asm/arch/pxa-regs.h>
-#endif
#include <linux/usb/ch9.h>
#include <linux/usb_gadget.h>
-#include <asm/arch/udc.h>
+#include <asm/mach/udc_pxa2xx.h>
/*
@@ -76,9 +74,17 @@
* it constrains the sorts of USB configuration change events that work.
* The errata for these chips are misleading; some "fixed" bugs from
* pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
+ *
+ * Note that the UDC hardware supports DMA (except on IXP) but that's
+ * not used here. IN-DMA (to host) is simple enough, when the data is
+ * suitably aligned (16 bytes) ... the network stack doesn't do that,
+ * other software can. OUT-DMA is buggy in most chip versions, as well
+ * as poorly designed (data toggle not automatic). So this driver won't
+ * bother using DMA. (Mostly-working IN-DMA support was available in
+ * kernels before 2.6.23, but was never enabled or well tested.)
*/
-#define DRIVER_VERSION "4-May-2005"
+#define DRIVER_VERSION "30-June-2007"
#define DRIVER_DESC "PXA 25x USB Device Controller driver"
@@ -87,12 +93,9 @@ static const char driver_name [] = "pxa2xx_udc";
static const char ep0name [] = "ep0";
-// #define USE_DMA
-// #define USE_OUT_DMA
// #define DISABLE_TEST_MODE
#ifdef CONFIG_ARCH_IXP4XX
-#undef USE_DMA
/* cpu-specific register addresses are compiled in to this code */
#ifdef CONFIG_ARCH_PXA
@@ -104,25 +107,6 @@ static const char ep0name [] = "ep0";
#include "pxa2xx_udc.h"
-#ifdef USE_DMA
-static int use_dma = 1;
-module_param(use_dma, bool, 0);
-MODULE_PARM_DESC (use_dma, "true to use dma");
-
-static void dma_nodesc_handler (int dmach, void *_ep);
-static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req);
-
-#ifdef USE_OUT_DMA
-#define DMASTR " (dma support)"
-#else
-#define DMASTR " (dma in)"
-#endif
-
-#else /* !USE_DMA */
-#define DMASTR " (pio only)"
-#undef USE_OUT_DMA
-#endif
-
#ifdef CONFIG_USB_PXA2XX_SMALL
#define SIZE_STR " (small)"
#else
@@ -155,7 +139,7 @@ static int is_vbus_present(void)
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
if (mach->gpio_vbus)
- return udc_gpio_get(mach->gpio_vbus);
+ return gpio_get_value(mach->gpio_vbus);
if (mach->udc_is_connected)
return mach->udc_is_connected();
return 1;
@@ -167,7 +151,7 @@ static void pullup_off(void)
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
if (mach->gpio_pullup)
- udc_gpio_set(mach->gpio_pullup, 0);
+ gpio_set_value(mach->gpio_pullup, 0);
else if (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
}
@@ -177,7 +161,7 @@ static void pullup_on(void)
struct pxa2xx_udc_mach_info *mach = the_controller->mach;
if (mach->gpio_pullup)
- udc_gpio_set(mach->gpio_pullup, 1);
+ gpio_set_value(mach->gpio_pullup, 1);
else if (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
}
@@ -281,9 +265,8 @@ static int pxa2xx_ep_enable (struct usb_ep *_ep,
}
ep->desc = desc;
- ep->dma = -1;
ep->stopped = 0;
- ep->pio_irqs = ep->dma_irqs = 0;
+ ep->pio_irqs = 0;
ep->ep.maxpacket = le16_to_cpu (desc->wMaxPacketSize);
/* flush fifo (mostly for OUT buffers) */
@@ -291,30 +274,6 @@ static int pxa2xx_ep_enable (struct usb_ep *_ep,
/* ... reset halt state too, if we could ... */
-#ifdef USE_DMA
- /* for (some) bulk and ISO endpoints, try to get a DMA channel and
- * bind it to the endpoint. otherwise use PIO.
- */
- switch (ep->bmAttributes) {
- case USB_ENDPOINT_XFER_ISOC:
- if (le16_to_cpu(desc->wMaxPacketSize) % 32)
- break;
- // fall through
- case USB_ENDPOINT_XFER_BULK:
- if (!use_dma || !ep->reg_drcmr)
- break;
- ep->dma = pxa_request_dma ((char *)_ep->name,
- (le16_to_cpu (desc->wMaxPacketSize) > 64)
- ? DMA_PRIO_MEDIUM /* some iso */
- : DMA_PRIO_LOW,
- dma_nodesc_handler, ep);
- if (ep->dma >= 0) {
- *ep->reg_drcmr = DRCMR_MAPVLD | ep->dma;
- DMSG("%s using dma%d\n", _ep->name, ep->dma);
- }
- }
-#endif
-
DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
return 0;
}
@@ -334,14 +293,6 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep)
nuke (ep, -ESHUTDOWN);
-#ifdef USE_DMA
- if (ep->dma >= 0) {
- *ep->reg_drcmr = 0;
- pxa_free_dma (ep->dma);
- ep->dma = -1;
- }
-#endif
-
/* flush fifo (mostly for IN buffers) */
pxa2xx_ep_fifo_flush (_ep);
@@ -390,35 +341,6 @@ pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-
-/* PXA cache needs flushing with DMA I/O (it's dma-incoherent), but there's
- * no device-affinity and the heap works perfectly well for i/o buffers.
- * It wastes much less memory than dma_alloc_coherent() would, and even
- * prevents cacheline (32 bytes wide) sharing problems.
- */
-static void *
-pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes,
- dma_addr_t *dma, gfp_t gfp_flags)
-{
- char *retval;
-
- retval = kmalloc (bytes, gfp_flags & ~(__GFP_DMA|__GFP_HIGHMEM));
- if (retval)
-#ifdef USE_DMA
- *dma = virt_to_bus (retval);
-#else
- *dma = (dma_addr_t)~0;
-#endif
- return retval;
-}
-
-static void
-pxa2xx_ep_free_buffer(struct usb_ep *_ep, void *buf, dma_addr_t dma,
- unsigned bytes)
-{
- kfree (buf);
-}
-
/*-------------------------------------------------------------------------*/
/*
@@ -518,18 +440,8 @@ write_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
/* requests complete when all IN data is in the FIFO */
if (is_last) {
done (ep, req, 0);
- if (list_empty(&ep->queue) || unlikely(ep->dma >= 0)) {
+ if (list_empty(&ep->queue))
pio_irq_disable (ep->bEndpointAddress);
-#ifdef USE_DMA
- /* unaligned data and zlps couldn't use dma */
- if (unlikely(!list_empty(&ep->queue))) {
- req = list_entry(ep->queue.next,
- struct pxa2xx_request, queue);
- kick_dma(ep,req);
- return 0;
- }
-#endif
- }
return 1;
}
@@ -728,182 +640,6 @@ read_ep0_fifo (struct pxa2xx_ep *ep, struct pxa2xx_request *req)
return 0;
}
-#ifdef USE_DMA
-
-#define MAX_IN_DMA ((DCMD_LENGTH + 1) - BULK_FIFO_SIZE)
-
-static void
-start_dma_nodesc(struct pxa2xx_ep *ep, struct pxa2xx_request *req, int is_in)
-{
- u32 dcmd = req->req.length;
- u32 buf = req->req.dma;
- u32 fifo = io_v2p ((u32)ep->reg_uddr);
-
- /* caller guarantees there's a packet or more remaining
- * - IN may end with a short packet (TSP set separately),
- * - OUT is always full length
- */
- buf += req->req.actual;
- dcmd -= req->req.actual;
- ep->dma_fixup = 0;
-
- /* no-descriptor mode can be simple for bulk-in, iso-in, iso-out */
- DCSR(ep->dma) = DCSR_NODESC;
- if (is_in) {
- DSADR(ep->dma) = buf;
- DTADR(ep->dma) = fifo;
- if (dcmd > MAX_IN_DMA)
- dcmd = MAX_IN_DMA;
- else
- ep->dma_fixup = (dcmd % ep->ep.maxpacket) != 0;
- dcmd |= DCMD_BURST32 | DCMD_WIDTH1
- | DCMD_FLOWTRG | DCMD_INCSRCADDR;
- } else {
-#ifdef USE_OUT_DMA
- DSADR(ep->dma) = fifo;
- DTADR(ep->dma) = buf;
- if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
- dcmd = ep->ep.maxpacket;
- dcmd |= DCMD_BURST32 | DCMD_WIDTH1
- | DCMD_FLOWSRC | DCMD_INCTRGADDR;
-#endif
- }
- DCMD(ep->dma) = dcmd;
- DCSR(ep->dma) = DCSR_RUN | DCSR_NODESC
- | (unlikely(is_in)
- ? DCSR_STOPIRQEN /* use dma_nodesc_handler() */
- : 0); /* use handle_ep() */
-}
-
-static void kick_dma(struct pxa2xx_ep *ep, struct pxa2xx_request *req)
-{
- int is_in = ep->bEndpointAddress & USB_DIR_IN;
-
- if (is_in) {
- /* unaligned tx buffers and zlps only work with PIO */
- if ((req->req.dma & 0x0f) != 0
- || unlikely((req->req.length - req->req.actual)
- == 0)) {
- pio_irq_enable(ep->bEndpointAddress);
- if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0)
- (void) write_fifo(ep, req);
- } else {
- start_dma_nodesc(ep, req, USB_DIR_IN);
- }
- } else {
- if ((req->req.length - req->req.actual) < ep->ep.maxpacket) {
- DMSG("%s short dma read...\n", ep->ep.name);
- /* we're always set up for pio out */
- read_fifo (ep, req);
- } else {
- *ep->reg_udccs = UDCCS_BO_DME
- | (*ep->reg_udccs & UDCCS_BO_FST);
- start_dma_nodesc(ep, req, USB_DIR_OUT);
- }
- }
-}
-
-static void cancel_dma(struct pxa2xx_ep *ep)
-{
- struct pxa2xx_request *req;
- u32 tmp;
-
- if (DCSR(ep->dma) == 0 || list_empty(&ep->queue))
- return;
-
- DCSR(ep->dma) = 0;
- while ((DCSR(ep->dma) & DCSR_STOPSTATE) == 0)
- cpu_relax();
-
- req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
- tmp = DCMD(ep->dma) & DCMD_LENGTH;
- req->req.actual = req->req.length - (tmp & DCMD_LENGTH);
-
- /* the last tx packet may be incomplete, so flush the fifo.
- * FIXME correct req.actual if we can
- */
- if (ep->bEndpointAddress & USB_DIR_IN)
- *ep->reg_udccs = UDCCS_BI_FTF;
-}
-
-/* dma channel stopped ... normal tx end (IN), or on error (IN/OUT) */
-static void dma_nodesc_handler(int dmach, void *_ep)
-{
- struct pxa2xx_ep *ep = _ep;
- struct pxa2xx_request *req;
- u32 tmp, completed;
-
- local_irq_disable();
-
- req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
-
- ep->dma_irqs++;
- ep->dev->stats.irqs++;
- HEX_DISPLAY(ep->dev->stats.irqs);
-
- /* ack/clear */
- tmp = DCSR(ep->dma);
- DCSR(ep->dma) = tmp;
- if ((tmp & DCSR_STOPSTATE) == 0
- || (DDADR(ep->dma) & DDADR_STOP) != 0) {
- DBG(DBG_VERBOSE, "%s, dcsr %08x ddadr %08x\n",
- ep->ep.name, DCSR(ep->dma), DDADR(ep->dma));
- goto done;
- }
- DCSR(ep->dma) = 0; /* clear DCSR_STOPSTATE */
-
- /* update transfer status */
- completed = tmp & DCSR_BUSERR;
- if (ep->bEndpointAddress & USB_DIR_IN)
- tmp = DSADR(ep->dma);
- else
- tmp = DTADR(ep->dma);
- req->req.actual = tmp - req->req.dma;
-
- /* FIXME seems we sometimes see partial transfers... */
-
- if (unlikely(completed != 0))
- req->req.status = -EIO;
- else if (req->req.actual) {
- /* these registers have zeroes in low bits; they miscount
- * some (end-of-transfer) short packets: tx 14 as tx 12
- */
- if (ep->dma_fixup)
- req->req.actual = min(req->req.actual + 3,
- req->req.length);
-
- tmp = (req->req.length - req->req.actual);
- completed = (tmp == 0);
- if (completed && (ep->bEndpointAddress & USB_DIR_IN)) {
-
- /* maybe validate final short packet ... */
- if ((req->req.actual % ep->ep.maxpacket) != 0)
- *ep->reg_udccs = UDCCS_BI_TSP/*|UDCCS_BI_TPC*/;
-
- /* ... or zlp, using pio fallback */
- else if (ep->bmAttributes == USB_ENDPOINT_XFER_BULK
- && req->req.zero) {
- DMSG("%s zlp terminate ...\n", ep->ep.name);
- completed = 0;
- }
- }
- }
-
- if (likely(completed)) {
- done(ep, req, 0);
-
- /* maybe re-activate after completion */
- if (ep->stopped || list_empty(&ep->queue))
- goto done;
- req = list_entry(ep->queue.next, struct pxa2xx_request, queue);
- }
- kick_dma(ep, req);
-done:
- local_irq_enable();
-}
-
-#endif
-
/*-------------------------------------------------------------------------*/
static int
@@ -942,19 +678,8 @@ pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
(ep->desc->wMaxPacketSize)))
return -EMSGSIZE;
-#ifdef USE_DMA
- // FIXME caller may already have done the dma mapping
- if (ep->dma >= 0) {
- _req->dma = dma_map_single(dev->dev,
- _req->buf, _req->length,
- ((ep->bEndpointAddress & USB_DIR_IN) != 0)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
-#endif
-
DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
- _ep->name, _req, _req->length, _req->buf);
+ _ep->name, _req, _req->length, _req->buf);
local_irq_save(flags);
@@ -1002,11 +727,6 @@ pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
local_irq_restore (flags);
return -EL2HLT;
}
-#ifdef USE_DMA
- /* either start dma or prime pio pump */
- } else if (ep->dma >= 0) {
- kick_dma(ep, req);
-#endif
/* can the FIFO can satisfy the request immediately? */
} else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
@@ -1017,7 +737,7 @@ pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
req = NULL;
}
- if (likely (req && ep->desc) && ep->dma < 0)
+ if (likely (req && ep->desc))
pio_irq_enable(ep->bEndpointAddress);
}
@@ -1038,10 +758,6 @@ static void nuke(struct pxa2xx_ep *ep, int status)
struct pxa2xx_request *req;
/* called with irqs blocked */
-#ifdef USE_DMA
- if (ep->dma >= 0 && !ep->stopped)
- cancel_dma(ep);
-#endif
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct pxa2xx_request,
@@ -1076,19 +792,7 @@ static int pxa2xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
return -EINVAL;
}
-#ifdef USE_DMA
- if (ep->dma >= 0 && ep->queue.next == &req->queue && !ep->stopped) {
- cancel_dma(ep);
- done(ep, req, -ECONNRESET);
- /* restart i/o */
- if (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next,
- struct pxa2xx_request, queue);
- kick_dma(ep, req);
- }
- } else
-#endif
- done(ep, req, -ECONNRESET);
+ done(ep, req, -ECONNRESET);
local_irq_restore(flags);
return 0;
@@ -1203,9 +907,6 @@ static struct usb_ep_ops pxa2xx_ep_ops = {
.alloc_request = pxa2xx_ep_alloc_request,
.free_request = pxa2xx_ep_free_request,
- .alloc_buffer = pxa2xx_ep_alloc_buffer,
- .free_buffer = pxa2xx_ep_free_buffer,
-
.queue = pxa2xx_ep_queue,
.dequeue = pxa2xx_ep_dequeue,
@@ -1325,7 +1026,7 @@ udc_proc_read(char *page, char **start, off_t off, int count,
/* basic device status */
t = scnprintf(next, size, DRIVER_DESC "\n"
"%s version: %s\nGadget driver: %s\nHost %s\n\n",
- driver_name, DRIVER_VERSION SIZE_STR DMASTR,
+ driver_name, DRIVER_VERSION SIZE_STR "(pio)",
dev->driver ? dev->driver->driver.name : "(none)",
is_vbus_present() ? "full speed" : "disconnected");
size -= t;
@@ -1390,7 +1091,6 @@ udc_proc_read(char *page, char **start, off_t off, int count,
for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
struct pxa2xx_ep *ep = &dev->ep [i];
struct pxa2xx_request *req;
- int t;
if (i != 0) {
const struct usb_endpoint_descriptor *d;
@@ -1400,10 +1100,9 @@ udc_proc_read(char *page, char **start, off_t off, int count,
continue;
tmp = *dev->ep [i].reg_udccs;
t = scnprintf(next, size,
- "%s max %d %s udccs %02x irqs %lu/%lu\n",
+ "%s max %d %s udccs %02x irqs %lu\n",
ep->ep.name, le16_to_cpu (d->wMaxPacketSize),
- (ep->dma >= 0) ? "dma" : "pio", tmp,
- ep->pio_irqs, ep->dma_irqs);
+ "pio", tmp, ep->pio_irqs);
/* TODO translate all five groups of udccs bits! */
} else /* ep0 should only have one transfer queued */
@@ -1423,19 +1122,7 @@ udc_proc_read(char *page, char **start, off_t off, int count,
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
-#ifdef USE_DMA
- if (ep->dma >= 0 && req->queue.prev == &ep->queue)
- t = scnprintf(next, size,
- "\treq %p len %d/%d "
- "buf %p (dma%d dcmd %08x)\n",
- &req->req, req->req.actual,
- req->req.length, req->req.buf,
- ep->dma, DCMD(ep->dma)
- // low 13 bits == bytes-to-go
- );
- else
-#endif
- t = scnprintf(next, size,
+ t = scnprintf(next, size,
"\treq %p len %d/%d buf %p\n",
&req->req, req->req.actual,
req->req.length, req->req.buf);
@@ -1488,7 +1175,6 @@ static void udc_disable(struct pxa2xx_udc *dev)
ep0_idle (dev);
dev->gadget.speed = USB_SPEED_UNKNOWN;
- LED_CONNECTED_OFF;
}
@@ -1514,7 +1200,7 @@ static void udc_reinit(struct pxa2xx_udc *dev)
ep->desc = NULL;
ep->stopped = 0;
INIT_LIST_HEAD (&ep->queue);
- ep->pio_irqs = ep->dma_irqs = 0;
+ ep->pio_irqs = 0;
}
/* the rest was statically initialized, and is read-only */
@@ -1666,7 +1352,6 @@ stop_activity(struct pxa2xx_udc *dev, struct usb_gadget_driver *driver)
del_timer_sync(&dev->timer);
/* report disconnect; the driver is already quiesced */
- LED_CONNECTED_OFF;
if (driver)
driver->disconnect(&dev->gadget);
@@ -1715,16 +1400,13 @@ lubbock_vbus_irq(int irq, void *_dev)
int vbus;
dev->stats.irqs++;
- HEX_DISPLAY(dev->stats.irqs);
switch (irq) {
case LUBBOCK_USB_IRQ:
- LED_CONNECTED_ON;
vbus = 1;
disable_irq(LUBBOCK_USB_IRQ);
enable_irq(LUBBOCK_USB_DISC_IRQ);
break;
case LUBBOCK_USB_DISC_IRQ:
- LED_CONNECTED_OFF;
vbus = 0;
disable_irq(LUBBOCK_USB_DISC_IRQ);
enable_irq(LUBBOCK_USB_IRQ);
@@ -1742,7 +1424,7 @@ lubbock_vbus_irq(int irq, void *_dev)
static irqreturn_t udc_vbus_irq(int irq, void *_dev)
{
struct pxa2xx_udc *dev = _dev;
- int vbus = udc_gpio_get(dev->mach->gpio_vbus);
+ int vbus = gpio_get_value(dev->mach->gpio_vbus);
pxa2xx_udc_vbus_session(&dev->gadget, vbus);
return IRQ_HANDLED;
@@ -2040,18 +1722,6 @@ static void handle_ep(struct pxa2xx_ep *ep)
/* fifos can hold packets, ready for reading... */
if (likely(req)) {
-#ifdef USE_OUT_DMA
-// TODO didn't yet debug out-dma. this approach assumes
-// the worst about short packets and RPC; it might be better.
-
- if (likely(ep->dma >= 0)) {
- if (!(udccs & UDCCS_BO_RSP)) {
- *ep->reg_udccs = UDCCS_BO_RPC;
- ep->dma_irqs++;
- return;
- }
- }
-#endif
completed = read_fifo(ep, req);
} else
pio_irq_disable (ep->bEndpointAddress);
@@ -2074,7 +1744,6 @@ pxa2xx_udc_irq(int irq, void *_dev)
int handled;
dev->stats.irqs++;
- HEX_DISPLAY(dev->stats.irqs);
do {
u32 udccr = UDCCR;
@@ -2125,7 +1794,6 @@ pxa2xx_udc_irq(int irq, void *_dev)
} else {
DBG(DBG_VERBOSE, "USB reset end\n");
dev->gadget.speed = USB_SPEED_FULL;
- LED_CONNECTED_ON;
memset(&dev->stats, 0, sizeof dev->stats);
/* driver and endpoints are still reset */
}
@@ -2217,7 +1885,6 @@ static struct pxa2xx_udc memory = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.reg_udccs = &UDCCS1,
.reg_uddr = &UDDR1,
- drcmr (25)
},
.ep[2] = {
.ep = {
@@ -2232,7 +1899,6 @@ static struct pxa2xx_udc memory = {
.reg_udccs = &UDCCS2,
.reg_ubcr = &UBCR2,
.reg_uddr = &UDDR2,
- drcmr (26)
},
#ifndef CONFIG_USB_PXA2XX_SMALL
.ep[3] = {
@@ -2247,7 +1913,6 @@ static struct pxa2xx_udc memory = {
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.reg_udccs = &UDCCS3,
.reg_uddr = &UDDR3,
- drcmr (27)
},
.ep[4] = {
.ep = {
@@ -2262,7 +1927,6 @@ static struct pxa2xx_udc memory = {
.reg_udccs = &UDCCS4,
.reg_ubcr = &UBCR4,
.reg_uddr = &UDDR4,
- drcmr (28)
},
.ep[5] = {
.ep = {
@@ -2291,7 +1955,6 @@ static struct pxa2xx_udc memory = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.reg_udccs = &UDCCS6,
.reg_uddr = &UDDR6,
- drcmr (30)
},
.ep[7] = {
.ep = {
@@ -2306,7 +1969,6 @@ static struct pxa2xx_udc memory = {
.reg_udccs = &UDCCS7,
.reg_ubcr = &UBCR7,
.reg_uddr = &UDDR7,
- drcmr (31)
},
.ep[8] = {
.ep = {
@@ -2320,7 +1982,6 @@ static struct pxa2xx_udc memory = {
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.reg_udccs = &UDCCS8,
.reg_uddr = &UDDR8,
- drcmr (32)
},
.ep[9] = {
.ep = {
@@ -2335,7 +1996,6 @@ static struct pxa2xx_udc memory = {
.reg_udccs = &UDCCS9,
.reg_ubcr = &UBCR9,
.reg_uddr = &UDDR9,
- drcmr (33)
},
.ep[10] = {
.ep = {
@@ -2364,7 +2024,6 @@ static struct pxa2xx_udc memory = {
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.reg_udccs = &UDCCS11,
.reg_uddr = &UDDR11,
- drcmr (35)
},
.ep[12] = {
.ep = {
@@ -2379,7 +2038,6 @@ static struct pxa2xx_udc memory = {
.reg_udccs = &UDCCS12,
.reg_ubcr = &UBCR12,
.reg_uddr = &UDDR12,
- drcmr (36)
},
.ep[13] = {
.ep = {
@@ -2393,7 +2051,6 @@ static struct pxa2xx_udc memory = {
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.reg_udccs = &UDCCS13,
.reg_uddr = &UDDR13,
- drcmr (37)
},
.ep[14] = {
.ep = {
@@ -2408,7 +2065,6 @@ static struct pxa2xx_udc memory = {
.reg_udccs = &UDCCS14,
.reg_ubcr = &UBCR14,
.reg_uddr = &UDDR14,
- drcmr (38)
},
.ep[15] = {
.ep = {
@@ -2466,7 +2122,7 @@ static struct pxa2xx_udc memory = {
static int __init pxa2xx_udc_probe(struct platform_device *pdev)
{
struct pxa2xx_udc *dev = &memory;
- int retval, out_dma = 1, vbus_irq, irq;
+ int retval, vbus_irq, irq;
u32 chiprev;
/* insist on Intel/ARM/XScale */
@@ -2489,7 +2145,7 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
case PXA250_B2: case PXA210_B2:
case PXA250_B1: case PXA210_B1:
case PXA250_B0: case PXA210_B0:
- out_dma = 0;
+ /* OUT-DMA is broken ... */
/* fall through */
case PXA250_C0: case PXA210_C0:
break;
@@ -2498,11 +2154,9 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
case IXP425_B0:
case IXP465_AD:
dev->has_cfr = 1;
- out_dma = 0;
break;
#endif
default:
- out_dma = 0;
printk(KERN_ERR "%s: unrecognized processor: %08x\n",
driver_name, chiprev);
/* iop3xx, ixp4xx, ... */
@@ -2513,36 +2167,41 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- pr_debug("%s: IRQ %d%s%s%s\n", driver_name, irq,
+ pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
dev->has_cfr ? "" : " (!cfr)",
- out_dma ? "" : " (broken dma-out)",
- SIZE_STR DMASTR
+ SIZE_STR "(pio)"
);
-#ifdef USE_DMA
-#ifndef USE_OUT_DMA
- out_dma = 0;
-#endif
- /* pxa 250 erratum 130 prevents using OUT dma (fixed C0) */
- if (!out_dma) {
- DMSG("disabled OUT dma\n");
- dev->ep[ 2].reg_drcmr = dev->ep[ 4].reg_drcmr = 0;
- dev->ep[ 7].reg_drcmr = dev->ep[ 9].reg_drcmr = 0;
- dev->ep[12].reg_drcmr = dev->ep[14].reg_drcmr = 0;
- }
-#endif
-
/* other non-static parts of init */
dev->dev = &pdev->dev;
dev->mach = pdev->dev.platform_data;
+
if (dev->mach->gpio_vbus) {
- udc_gpio_init_vbus(dev->mach->gpio_vbus);
- vbus_irq = udc_gpio_to_irq(dev->mach->gpio_vbus);
+ if ((retval = gpio_request(dev->mach->gpio_vbus,
+ "pxa2xx_udc GPIO VBUS"))) {
+ dev_dbg(&pdev->dev,
+ "can't get vbus gpio %d, err: %d\n",
+ dev->mach->gpio_vbus, retval);
+ return -EBUSY;
+ }
+ gpio_direction_input(dev->mach->gpio_vbus);
+ vbus_irq = gpio_to_irq(dev->mach->gpio_vbus);
set_irq_type(vbus_irq, IRQT_BOTHEDGE);
} else
vbus_irq = 0;
- if (dev->mach->gpio_pullup)
- udc_gpio_init_pullup(dev->mach->gpio_pullup);
+
+ if (dev->mach->gpio_pullup) {
+ if ((retval = gpio_request(dev->mach->gpio_pullup,
+ "pca2xx_udc GPIO PULLUP"))) {
+ dev_dbg(&pdev->dev,
+ "can't get pullup gpio %d, err: %d\n",
+ dev->mach->gpio_pullup, retval);
+ if (dev->mach->gpio_vbus)
+ gpio_free(dev->mach->gpio_vbus);
+ return -EBUSY;
+ }
+ gpio_direction_output(dev->mach->gpio_pullup, 0);
+ }
init_timer(&dev->timer);
dev->timer.function = udc_watchdog;
@@ -2566,6 +2225,10 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
if (retval != 0) {
printk(KERN_ERR "%s: can't get irq %d, err %d\n",
driver_name, irq, retval);
+ if (dev->mach->gpio_pullup)
+ gpio_free(dev->mach->gpio_pullup);
+ if (dev->mach->gpio_vbus)
+ gpio_free(dev->mach->gpio_vbus);
return -EBUSY;
}
dev->got_irq = 1;
@@ -2581,6 +2244,10 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
driver_name, LUBBOCK_USB_DISC_IRQ, retval);
lubbock_fail0:
free_irq(irq, dev);
+ if (dev->mach->gpio_pullup)
+ gpio_free(dev->mach->gpio_pullup);
+ if (dev->mach->gpio_vbus)
+ gpio_free(dev->mach->gpio_vbus);
return -EBUSY;
}
retval = request_irq(LUBBOCK_USB_IRQ,
@@ -2593,11 +2260,6 @@ lubbock_fail0:
free_irq(LUBBOCK_USB_DISC_IRQ, dev);
goto lubbock_fail0;
}
-#ifdef DEBUG
- /* with U-Boot (but not BLOB), hex is off by default */
- HEX_DISPLAY(dev->stats.irqs);
- LUB_DISC_BLNK_LED &= 0xff;
-#endif
} else
#endif
if (vbus_irq) {
@@ -2608,6 +2270,10 @@ lubbock_fail0:
printk(KERN_ERR "%s: can't get irq %i, err %d\n",
driver_name, vbus_irq, retval);
free_irq(irq, dev);
+ if (dev->mach->gpio_pullup)
+ gpio_free(dev->mach->gpio_pullup);
+ if (dev->mach->gpio_vbus)
+ gpio_free(dev->mach->gpio_vbus);
return -EBUSY;
}
}
@@ -2641,8 +2307,13 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
free_irq(LUBBOCK_USB_IRQ, dev);
}
#endif
- if (dev->mach->gpio_vbus)
- free_irq(IRQ_GPIO(dev->mach->gpio_vbus), dev);
+ if (dev->mach->gpio_vbus) {
+ free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev);
+ gpio_free(dev->mach->gpio_vbus);
+ }
+ if (dev->mach->gpio_pullup)
+ gpio_free(dev->mach->gpio_pullup);
+
platform_set_drvdata(pdev, NULL);
the_controller = NULL;
return 0;
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
index 773e549aff3f..0e5d0e6fb0e2 100644
--- a/drivers/usb/gadget/pxa2xx_udc.h
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -54,8 +54,6 @@ struct pxa2xx_ep {
const struct usb_endpoint_descriptor *desc;
struct list_head queue;
unsigned long pio_irqs;
- unsigned long dma_irqs;
- short dma;
unsigned short fifo_size;
u8 bEndpointAddress;
@@ -63,7 +61,7 @@ struct pxa2xx_ep {
unsigned stopped : 1;
unsigned dma_fixup : 1;
-
+
/* UDCCS = UDC Control/Status for this EP
* UBCR = UDC Byte Count Remaining (contents of OUT fifo)
* UDDR = UDC Endpoint Data Register (the fifo)
@@ -72,12 +70,6 @@ struct pxa2xx_ep {
volatile u32 *reg_udccs;
volatile u32 *reg_ubcr;
volatile u32 *reg_uddr;
-#ifdef USE_DMA
- volatile u32 *reg_drcmr;
-#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
-#else
-#define drcmr(n)
-#endif
};
struct pxa2xx_request {
@@ -85,7 +77,7 @@ struct pxa2xx_request {
struct list_head queue;
};
-enum ep0_state {
+enum ep0_state {
EP0_IDLE,
EP0_IN_DATA_PHASE,
EP0_OUT_DATA_PHASE,
@@ -108,7 +100,6 @@ struct udc_stats {
#ifdef CONFIG_USB_PXA2XX_SMALL
/* when memory's tight, SMALL config saves code+data. */
-#undef USE_DMA
#define PXA_UDC_NUM_ENDPOINTS 3
#endif
@@ -144,37 +135,8 @@ struct pxa2xx_udc {
#ifdef CONFIG_ARCH_LUBBOCK
#include <asm/arch/lubbock.h>
/* lubbock can also report usb connect/disconnect irqs */
-
-#ifdef DEBUG
-#define HEX_DISPLAY(n) if (machine_is_lubbock()) { LUB_HEXLED = (n); }
#endif
-#endif
-
-/*-------------------------------------------------------------------------*/
-
-/* LEDs are only for debug */
-#ifndef HEX_DISPLAY
-#define HEX_DISPLAY(n) do {} while(0)
-#endif
-
-#ifdef DEBUG
-#include <asm/leds.h>
-
-#define LED_CONNECTED_ON leds_event(led_green_on)
-#define LED_CONNECTED_OFF do { \
- leds_event(led_green_off); \
- HEX_DISPLAY(0); \
- } while(0)
-#endif
-
-#ifndef LED_CONNECTED_ON
-#define LED_CONNECTED_ON do {} while(0)
-#define LED_CONNECTED_OFF do {} while(0)
-#endif
-
-/*-------------------------------------------------------------------------*/
-
static struct pxa2xx_udc *the_controller;
/*-------------------------------------------------------------------------*/
@@ -204,7 +166,7 @@ static const char *state_name[] = {
# define UDC_DEBUG DBG_NORMAL
#endif
-static void __attribute__ ((__unused__))
+static void __maybe_unused
dump_udccr(const char *label)
{
u32 udccr = UDCCR;
@@ -220,7 +182,7 @@ dump_udccr(const char *label)
(udccr & UDCCR_UDE) ? " ude" : "");
}
-static void __attribute__ ((__unused__))
+static void __maybe_unused
dump_udccs0(const char *label)
{
u32 udccs0 = UDCCS0;
@@ -237,7 +199,7 @@ dump_udccs0(const char *label)
(udccs0 & UDCCS0_OPR) ? " opr" : "");
}
-static void __attribute__ ((__unused__))
+static void __maybe_unused
dump_state(struct pxa2xx_udc *dev)
{
u32 tmp;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 708657c89132..db1b2bfcee4e 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -53,7 +53,7 @@
*/
#if 0
-#define DEBUG(str,args...) do { \
+#define DBG(str,args...) do { \
if (rndis_debug) \
printk(KERN_DEBUG str , ## args ); \
} while (0)
@@ -65,7 +65,7 @@ MODULE_PARM_DESC (rndis_debug, "enable debugging");
#else
#define rndis_debug 0
-#define DEBUG(str,args...) do{}while(0)
+#define DBG(str,args...) do{}while(0)
#endif
#define RNDIS_MAX_CONFIGS 1
@@ -183,9 +183,9 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
if (!resp) return -ENOMEM;
if (buf_len && rndis_debug > 1) {
- DEBUG("query OID %08x value, len %d:\n", OID, buf_len);
+ DBG("query OID %08x value, len %d:\n", OID, buf_len);
for (i = 0; i < buf_len; i += 16) {
- DEBUG ("%03d: %08x %08x %08x %08x\n", i,
+ DBG("%03d: %08x %08x %08x %08x\n", i,
le32_to_cpu(get_unaligned((__le32 *)
&buf[i])),
le32_to_cpu(get_unaligned((__le32 *)
@@ -207,7 +207,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_SUPPORTED_LIST:
- DEBUG ("%s: OID_GEN_SUPPORTED_LIST\n", __FUNCTION__);
+ DBG("%s: OID_GEN_SUPPORTED_LIST\n", __FUNCTION__);
length = sizeof (oid_supported_list);
count = length / sizeof (u32);
for (i = 0; i < count; i++)
@@ -217,7 +217,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_HARDWARE_STATUS:
- DEBUG("%s: OID_GEN_HARDWARE_STATUS\n", __FUNCTION__);
+ DBG("%s: OID_GEN_HARDWARE_STATUS\n", __FUNCTION__);
/* Bogus question!
* Hardware must be ready to receive high level protocols.
* BTW:
@@ -230,14 +230,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_MEDIA_SUPPORTED:
- DEBUG("%s: OID_GEN_MEDIA_SUPPORTED\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MEDIA_SUPPORTED\n", __FUNCTION__);
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr].medium);
retval = 0;
break;
/* mandatory */
case OID_GEN_MEDIA_IN_USE:
- DEBUG("%s: OID_GEN_MEDIA_IN_USE\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MEDIA_IN_USE\n", __FUNCTION__);
/* one medium, one transport... (maybe you do it better) */
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr].medium);
retval = 0;
@@ -245,7 +245,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_MAXIMUM_FRAME_SIZE:
- DEBUG("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MAXIMUM_FRAME_SIZE\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].dev) {
*outbuf = cpu_to_le32 (
rndis_per_dev_params [configNr].dev->mtu);
@@ -256,7 +256,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_LINK_SPEED:
if (rndis_debug > 1)
- DEBUG("%s: OID_GEN_LINK_SPEED\n", __FUNCTION__);
+ DBG("%s: OID_GEN_LINK_SPEED\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].media_state
== NDIS_MEDIA_STATE_DISCONNECTED)
*outbuf = __constant_cpu_to_le32 (0);
@@ -268,7 +268,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_TRANSMIT_BLOCK_SIZE:
- DEBUG("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __FUNCTION__);
+ DBG("%s: OID_GEN_TRANSMIT_BLOCK_SIZE\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].dev) {
*outbuf = cpu_to_le32 (
rndis_per_dev_params [configNr].dev->mtu);
@@ -278,7 +278,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_RECEIVE_BLOCK_SIZE:
- DEBUG("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __FUNCTION__);
+ DBG("%s: OID_GEN_RECEIVE_BLOCK_SIZE\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].dev) {
*outbuf = cpu_to_le32 (
rndis_per_dev_params [configNr].dev->mtu);
@@ -288,7 +288,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_VENDOR_ID:
- DEBUG("%s: OID_GEN_VENDOR_ID\n", __FUNCTION__);
+ DBG("%s: OID_GEN_VENDOR_ID\n", __FUNCTION__);
*outbuf = cpu_to_le32 (
rndis_per_dev_params [configNr].vendorID);
retval = 0;
@@ -296,7 +296,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_VENDOR_DESCRIPTION:
- DEBUG("%s: OID_GEN_VENDOR_DESCRIPTION\n", __FUNCTION__);
+ DBG("%s: OID_GEN_VENDOR_DESCRIPTION\n", __FUNCTION__);
length = strlen (rndis_per_dev_params [configNr].vendorDescr);
memcpy (outbuf,
rndis_per_dev_params [configNr].vendorDescr, length);
@@ -304,7 +304,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_VENDOR_DRIVER_VERSION:
- DEBUG("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __FUNCTION__);
+ DBG("%s: OID_GEN_VENDOR_DRIVER_VERSION\n", __FUNCTION__);
/* Created as LE */
*outbuf = rndis_driver_version;
retval = 0;
@@ -312,14 +312,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_CURRENT_PACKET_FILTER:
- DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __FUNCTION__);
+ DBG("%s: OID_GEN_CURRENT_PACKET_FILTER\n", __FUNCTION__);
*outbuf = cpu_to_le32 (*rndis_per_dev_params[configNr].filter);
retval = 0;
break;
/* mandatory */
case OID_GEN_MAXIMUM_TOTAL_SIZE:
- DEBUG("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MAXIMUM_TOTAL_SIZE\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
retval = 0;
break;
@@ -327,14 +327,14 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_MEDIA_CONNECT_STATUS:
if (rndis_debug > 1)
- DEBUG("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MEDIA_CONNECT_STATUS\n", __FUNCTION__);
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.media_state);
retval = 0;
break;
case OID_GEN_PHYSICAL_MEDIUM:
- DEBUG("%s: OID_GEN_PHYSICAL_MEDIUM\n", __FUNCTION__);
+ DBG("%s: OID_GEN_PHYSICAL_MEDIUM\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32 (0);
retval = 0;
break;
@@ -344,7 +344,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
* versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
*/
case OID_GEN_MAC_OPTIONS: /* from WinME */
- DEBUG("%s: OID_GEN_MAC_OPTIONS\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MAC_OPTIONS\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32(
NDIS_MAC_OPTION_RECEIVE_SERIALIZED
| NDIS_MAC_OPTION_FULL_DUPLEX);
@@ -356,7 +356,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_XMIT_OK:
if (rndis_debug > 1)
- DEBUG("%s: OID_GEN_XMIT_OK\n", __FUNCTION__);
+ DBG("%s: OID_GEN_XMIT_OK\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (
rndis_per_dev_params [configNr].stats->tx_packets -
@@ -369,7 +369,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_RCV_OK:
if (rndis_debug > 1)
- DEBUG("%s: OID_GEN_RCV_OK\n", __FUNCTION__);
+ DBG("%s: OID_GEN_RCV_OK\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (
rndis_per_dev_params [configNr].stats->rx_packets -
@@ -382,7 +382,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_XMIT_ERROR:
if (rndis_debug > 1)
- DEBUG("%s: OID_GEN_XMIT_ERROR\n", __FUNCTION__);
+ DBG("%s: OID_GEN_XMIT_ERROR\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->tx_errors);
@@ -393,7 +393,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_RCV_ERROR:
if (rndis_debug > 1)
- DEBUG("%s: OID_GEN_RCV_ERROR\n", __FUNCTION__);
+ DBG("%s: OID_GEN_RCV_ERROR\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->rx_errors);
@@ -403,7 +403,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_GEN_RCV_NO_BUFFER:
- DEBUG("%s: OID_GEN_RCV_NO_BUFFER\n", __FUNCTION__);
+ DBG("%s: OID_GEN_RCV_NO_BUFFER\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->rx_dropped);
@@ -413,7 +413,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
#ifdef RNDIS_OPTIONAL_STATS
case OID_GEN_DIRECTED_BYTES_XMIT:
- DEBUG("%s: OID_GEN_DIRECTED_BYTES_XMIT\n", __FUNCTION__);
+ DBG("%s: OID_GEN_DIRECTED_BYTES_XMIT\n", __FUNCTION__);
/*
* Aunt Tilly's size of shoes
* minus antarctica count of penguins
@@ -433,7 +433,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_DIRECTED_FRAMES_XMIT:
- DEBUG("%s: OID_GEN_DIRECTED_FRAMES_XMIT\n", __FUNCTION__);
+ DBG("%s: OID_GEN_DIRECTED_FRAMES_XMIT\n", __FUNCTION__);
/* dito */
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (
@@ -449,7 +449,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_MULTICAST_BYTES_XMIT:
- DEBUG("%s: OID_GEN_MULTICAST_BYTES_XMIT\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MULTICAST_BYTES_XMIT\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->multicast*1234);
@@ -458,7 +458,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_MULTICAST_FRAMES_XMIT:
- DEBUG("%s: OID_GEN_MULTICAST_FRAMES_XMIT\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MULTICAST_FRAMES_XMIT\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->multicast);
@@ -467,7 +467,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_BROADCAST_BYTES_XMIT:
- DEBUG("%s: OID_GEN_BROADCAST_BYTES_XMIT\n", __FUNCTION__);
+ DBG("%s: OID_GEN_BROADCAST_BYTES_XMIT\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->tx_packets/42*255);
@@ -476,7 +476,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_BROADCAST_FRAMES_XMIT:
- DEBUG("%s: OID_GEN_BROADCAST_FRAMES_XMIT\n", __FUNCTION__);
+ DBG("%s: OID_GEN_BROADCAST_FRAMES_XMIT\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->tx_packets/42);
@@ -485,19 +485,19 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_DIRECTED_BYTES_RCV:
- DEBUG("%s: OID_GEN_DIRECTED_BYTES_RCV\n", __FUNCTION__);
+ DBG("%s: OID_GEN_DIRECTED_BYTES_RCV\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32 (0);
retval = 0;
break;
case OID_GEN_DIRECTED_FRAMES_RCV:
- DEBUG("%s: OID_GEN_DIRECTED_FRAMES_RCV\n", __FUNCTION__);
+ DBG("%s: OID_GEN_DIRECTED_FRAMES_RCV\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32 (0);
retval = 0;
break;
case OID_GEN_MULTICAST_BYTES_RCV:
- DEBUG("%s: OID_GEN_MULTICAST_BYTES_RCV\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MULTICAST_BYTES_RCV\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->multicast * 1111);
@@ -506,7 +506,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_MULTICAST_FRAMES_RCV:
- DEBUG("%s: OID_GEN_MULTICAST_FRAMES_RCV\n", __FUNCTION__);
+ DBG("%s: OID_GEN_MULTICAST_FRAMES_RCV\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->multicast);
@@ -515,7 +515,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_BROADCAST_BYTES_RCV:
- DEBUG("%s: OID_GEN_BROADCAST_BYTES_RCV\n", __FUNCTION__);
+ DBG("%s: OID_GEN_BROADCAST_BYTES_RCV\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->rx_packets/42*255);
@@ -524,7 +524,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_BROADCAST_FRAMES_RCV:
- DEBUG("%s: OID_GEN_BROADCAST_FRAMES_RCV\n", __FUNCTION__);
+ DBG("%s: OID_GEN_BROADCAST_FRAMES_RCV\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->rx_packets/42);
@@ -533,7 +533,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_RCV_CRC_ERROR:
- DEBUG("%s: OID_GEN_RCV_CRC_ERROR\n", __FUNCTION__);
+ DBG("%s: OID_GEN_RCV_CRC_ERROR\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->rx_crc_errors);
@@ -542,7 +542,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
break;
case OID_GEN_TRANSMIT_QUEUE_LENGTH:
- DEBUG("%s: OID_GEN_TRANSMIT_QUEUE_LENGTH\n", __FUNCTION__);
+ DBG("%s: OID_GEN_TRANSMIT_QUEUE_LENGTH\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32 (0);
retval = 0;
break;
@@ -552,7 +552,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_802_3_PERMANENT_ADDRESS:
- DEBUG("%s: OID_802_3_PERMANENT_ADDRESS\n", __FUNCTION__);
+ DBG("%s: OID_802_3_PERMANENT_ADDRESS\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].dev) {
length = ETH_ALEN;
memcpy (outbuf,
@@ -564,7 +564,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_802_3_CURRENT_ADDRESS:
- DEBUG("%s: OID_802_3_CURRENT_ADDRESS\n", __FUNCTION__);
+ DBG("%s: OID_802_3_CURRENT_ADDRESS\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].dev) {
length = ETH_ALEN;
memcpy (outbuf,
@@ -576,7 +576,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_802_3_MULTICAST_LIST:
- DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
+ DBG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
/* Multicast base address only */
*outbuf = __constant_cpu_to_le32 (0xE0000000);
retval = 0;
@@ -584,21 +584,21 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_802_3_MAXIMUM_LIST_SIZE:
- DEBUG("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __FUNCTION__);
+ DBG("%s: OID_802_3_MAXIMUM_LIST_SIZE\n", __FUNCTION__);
/* Multicast base address only */
*outbuf = __constant_cpu_to_le32 (1);
retval = 0;
break;
case OID_802_3_MAC_OPTIONS:
- DEBUG("%s: OID_802_3_MAC_OPTIONS\n", __FUNCTION__);
+ DBG("%s: OID_802_3_MAC_OPTIONS\n", __FUNCTION__);
break;
/* ieee802.3 statistics OIDs (table 4-4) */
/* mandatory */
case OID_802_3_RCV_ERROR_ALIGNMENT:
- DEBUG("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __FUNCTION__);
+ DBG("%s: OID_802_3_RCV_ERROR_ALIGNMENT\n", __FUNCTION__);
if (rndis_per_dev_params [configNr].stats) {
*outbuf = cpu_to_le32 (rndis_per_dev_params [configNr]
.stats->rx_frame_errors);
@@ -608,51 +608,51 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
/* mandatory */
case OID_802_3_XMIT_ONE_COLLISION:
- DEBUG("%s: OID_802_3_XMIT_ONE_COLLISION\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_ONE_COLLISION\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32 (0);
retval = 0;
break;
/* mandatory */
case OID_802_3_XMIT_MORE_COLLISIONS:
- DEBUG("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_MORE_COLLISIONS\n", __FUNCTION__);
*outbuf = __constant_cpu_to_le32 (0);
retval = 0;
break;
#ifdef RNDIS_OPTIONAL_STATS
case OID_802_3_XMIT_DEFERRED:
- DEBUG("%s: OID_802_3_XMIT_DEFERRED\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_DEFERRED\n", __FUNCTION__);
/* TODO */
break;
case OID_802_3_XMIT_MAX_COLLISIONS:
- DEBUG("%s: OID_802_3_XMIT_MAX_COLLISIONS\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_MAX_COLLISIONS\n", __FUNCTION__);
/* TODO */
break;
case OID_802_3_RCV_OVERRUN:
- DEBUG("%s: OID_802_3_RCV_OVERRUN\n", __FUNCTION__);
+ DBG("%s: OID_802_3_RCV_OVERRUN\n", __FUNCTION__);
/* TODO */
break;
case OID_802_3_XMIT_UNDERRUN:
- DEBUG("%s: OID_802_3_XMIT_UNDERRUN\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_UNDERRUN\n", __FUNCTION__);
/* TODO */
break;
case OID_802_3_XMIT_HEARTBEAT_FAILURE:
- DEBUG("%s: OID_802_3_XMIT_HEARTBEAT_FAILURE\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_HEARTBEAT_FAILURE\n", __FUNCTION__);
/* TODO */
break;
case OID_802_3_XMIT_TIMES_CRS_LOST:
- DEBUG("%s: OID_802_3_XMIT_TIMES_CRS_LOST\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_TIMES_CRS_LOST\n", __FUNCTION__);
/* TODO */
break;
case OID_802_3_XMIT_LATE_COLLISIONS:
- DEBUG("%s: OID_802_3_XMIT_LATE_COLLISIONS\n", __FUNCTION__);
+ DBG("%s: OID_802_3_XMIT_LATE_COLLISIONS\n", __FUNCTION__);
/* TODO */
break;
#endif /* RNDIS_OPTIONAL_STATS */
@@ -660,7 +660,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
#ifdef RNDIS_PM
/* power management OIDs (table 4-5) */
case OID_PNP_CAPABILITIES:
- DEBUG("%s: OID_PNP_CAPABILITIES\n", __FUNCTION__);
+ DBG("%s: OID_PNP_CAPABILITIES\n", __FUNCTION__);
/* for now, no wakeup capabilities */
length = sizeof (struct NDIS_PNP_CAPABILITIES);
@@ -668,7 +668,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
retval = 0;
break;
case OID_PNP_QUERY_POWER:
- DEBUG("%s: OID_PNP_QUERY_POWER D%d\n", __FUNCTION__,
+ DBG("%s: OID_PNP_QUERY_POWER D%d\n", __FUNCTION__,
le32_to_cpu(get_unaligned((__le32 *)buf)) - 1);
/* only suspend is a real power state, and
* it can't be entered by OID_PNP_SET_POWER...
@@ -705,9 +705,9 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
return -ENOMEM;
if (buf_len && rndis_debug > 1) {
- DEBUG("set OID %08x value, len %d:\n", OID, buf_len);
+ DBG("set OID %08x value, len %d:\n", OID, buf_len);
for (i = 0; i < buf_len; i += 16) {
- DEBUG ("%03d: %08x %08x %08x %08x\n", i,
+ DBG("%03d: %08x %08x %08x %08x\n", i,
le32_to_cpu(get_unaligned((__le32 *)
&buf[i])),
le32_to_cpu(get_unaligned((__le32 *)
@@ -731,7 +731,7 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
*/
*params->filter = (u16) le32_to_cpu(get_unaligned(
(__le32 *)buf));
- DEBUG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
+ DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
__FUNCTION__, *params->filter);
/* this call has a significant side effect: it's
@@ -756,7 +756,7 @@ update_linkstate:
case OID_802_3_MULTICAST_LIST:
/* I think we can ignore this */
- DEBUG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
+ DBG("%s: OID_802_3_MULTICAST_LIST\n", __FUNCTION__);
retval = 0;
break;
#if 0
@@ -764,7 +764,7 @@ update_linkstate:
{
struct rndis_config_parameter *param;
param = (struct rndis_config_parameter *) buf;
- DEBUG("%s: OID_GEN_RNDIS_CONFIG_PARAMETER '%*s'\n",
+ DBG("%s: OID_GEN_RNDIS_CONFIG_PARAMETER '%*s'\n",
__FUNCTION__,
min(cpu_to_le32(param->ParameterNameLength),80),
buf + param->ParameterNameOffset);
@@ -781,7 +781,7 @@ update_linkstate:
* FIXME ... then things go batty; Windows wedges itself.
*/
i = le32_to_cpu(get_unaligned((__le32 *)buf));
- DEBUG("%s: OID_PNP_SET_POWER D%d\n", __FUNCTION__, i - 1);
+ DBG("%s: OID_PNP_SET_POWER D%d\n", __FUNCTION__, i - 1);
switch (i) {
case NdisDeviceStateD0:
*params->filter = params->saved_filter;
@@ -858,7 +858,7 @@ static int rndis_query_response (int configNr, rndis_query_msg_type *buf)
rndis_query_cmplt_type *resp;
rndis_resp_t *r;
- // DEBUG("%s: OID = %08X\n", __FUNCTION__, cpu_to_le32(buf->OID));
+ // DBG("%s: OID = %08X\n", __FUNCTION__, cpu_to_le32(buf->OID));
if (!rndis_per_dev_params [configNr].dev) return -ENOTSUPP;
/*
@@ -911,15 +911,15 @@ static int rndis_set_response (int configNr, rndis_set_msg_type *buf)
BufOffset = le32_to_cpu (buf->InformationBufferOffset);
#ifdef VERBOSE
- DEBUG("%s: Length: %d\n", __FUNCTION__, BufLength);
- DEBUG("%s: Offset: %d\n", __FUNCTION__, BufOffset);
- DEBUG("%s: InfoBuffer: ", __FUNCTION__);
+ DBG("%s: Length: %d\n", __FUNCTION__, BufLength);
+ DBG("%s: Offset: %d\n", __FUNCTION__, BufOffset);
+ DBG("%s: InfoBuffer: ", __FUNCTION__);
for (i = 0; i < BufLength; i++) {
- DEBUG ("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
+ DBG("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
}
- DEBUG ("\n");
+ DBG("\n");
#endif
resp->MessageType = __constant_cpu_to_le32 (REMOTE_NDIS_SET_CMPLT);
@@ -1082,14 +1082,14 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
/* For USB: responses may take up to 10 seconds */
switch (MsgType) {
case REMOTE_NDIS_INITIALIZE_MSG:
- DEBUG("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
+ DBG("%s: REMOTE_NDIS_INITIALIZE_MSG\n",
__FUNCTION__ );
params->state = RNDIS_INITIALIZED;
return rndis_init_response (configNr,
(rndis_init_msg_type *) buf);
case REMOTE_NDIS_HALT_MSG:
- DEBUG("%s: REMOTE_NDIS_HALT_MSG\n",
+ DBG("%s: REMOTE_NDIS_HALT_MSG\n",
__FUNCTION__ );
params->state = RNDIS_UNINITIALIZED;
if (params->dev) {
@@ -1107,7 +1107,7 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
(rndis_set_msg_type *) buf);
case REMOTE_NDIS_RESET_MSG:
- DEBUG("%s: REMOTE_NDIS_RESET_MSG\n",
+ DBG("%s: REMOTE_NDIS_RESET_MSG\n",
__FUNCTION__ );
return rndis_reset_response (configNr,
(rndis_reset_msg_type *) buf);
@@ -1115,7 +1115,7 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
case REMOTE_NDIS_KEEPALIVE_MSG:
/* For USB: host does this every 5 seconds */
if (rndis_debug > 1)
- DEBUG("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
+ DBG("%s: REMOTE_NDIS_KEEPALIVE_MSG\n",
__FUNCTION__ );
return rndis_keepalive_response (configNr,
(rndis_keepalive_msg_type *)
@@ -1132,7 +1132,7 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
{
unsigned i;
for (i = 0; i < MsgLength; i += 16) {
- DEBUG ("%03d: "
+ DBG("%03d: "
" %02x %02x %02x %02x"
" %02x %02x %02x %02x"
" %02x %02x %02x %02x"
@@ -1163,18 +1163,18 @@ int rndis_register (int (* rndis_control_ack) (struct net_device *))
if (!rndis_per_dev_params [i].used) {
rndis_per_dev_params [i].used = 1;
rndis_per_dev_params [i].ack = rndis_control_ack;
- DEBUG("%s: configNr = %d\n", __FUNCTION__, i);
+ DBG("%s: configNr = %d\n", __FUNCTION__, i);
return i;
}
}
- DEBUG("failed\n");
+ DBG("failed\n");
return -1;
}
void rndis_deregister (int configNr)
{
- DEBUG("%s: \n", __FUNCTION__ );
+ DBG("%s: \n", __FUNCTION__ );
if (configNr >= RNDIS_MAX_CONFIGS) return;
rndis_per_dev_params [configNr].used = 0;
@@ -1186,7 +1186,7 @@ int rndis_set_param_dev (u8 configNr, struct net_device *dev,
struct net_device_stats *stats,
u16 *cdc_filter)
{
- DEBUG("%s:\n", __FUNCTION__ );
+ DBG("%s:\n", __FUNCTION__ );
if (!dev || !stats) return -1;
if (configNr >= RNDIS_MAX_CONFIGS) return -1;
@@ -1199,7 +1199,7 @@ int rndis_set_param_dev (u8 configNr, struct net_device *dev,
int rndis_set_param_vendor (u8 configNr, u32 vendorID, const char *vendorDescr)
{
- DEBUG("%s:\n", __FUNCTION__ );
+ DBG("%s:\n", __FUNCTION__ );
if (!vendorDescr) return -1;
if (configNr >= RNDIS_MAX_CONFIGS) return -1;
@@ -1211,7 +1211,7 @@ int rndis_set_param_vendor (u8 configNr, u32 vendorID, const char *vendorDescr)
int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed)
{
- DEBUG("%s: %u %u\n", __FUNCTION__, medium, speed);
+ DBG("%s: %u %u\n", __FUNCTION__, medium, speed);
if (configNr >= RNDIS_MAX_CONFIGS) return -1;
rndis_per_dev_params [configNr].medium = medium;
@@ -1390,7 +1390,7 @@ static int rndis_proc_write (struct file *file, const char __user *buffer,
break;
default:
if (fl_speed) p->speed = speed;
- else DEBUG ("%c is not valid\n", c);
+ else DBG("%c is not valid\n", c);
break;
}
@@ -1419,12 +1419,12 @@ int __devinit rndis_init (void)
if (!(rndis_connect_state [i]
= create_proc_entry (name, 0660, NULL)))
{
- DEBUG ("%s :remove entries", __FUNCTION__);
+ DBG("%s :remove entries", __FUNCTION__);
while (i) {
sprintf (name, NAME_TEMPLATE, --i);
remove_proc_entry (name, NULL);
}
- DEBUG ("\n");
+ DBG("\n");
return -EIO;
}
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
new file mode 100644
index 000000000000..0be80c635c48
--- /dev/null
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -0,0 +1,2045 @@
+/*
+ * linux/drivers/usb/gadget/s3c2410_udc.c
+ *
+ * Samsung S3C24xx series on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <linux/clk.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <linux/usb.h>
+#include <linux/usb_gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+#include <asm/arch/irqs.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/regs-clock.h>
+#include <asm/arch/regs-gpio.h>
+#include <asm/arch/regs-udc.h>
+#include <asm/arch/udc.h>
+
+#include <asm/mach-types.h>
+
+#include "s3c2410_udc.h"
+
+#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
+#define DRIVER_VERSION "29 Apr 2007"
+#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
+ "Arnaud Patard <arnaud.patard@rtp-net.org>"
+
+static const char gadget_name[] = "s3c2410_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+static struct s3c2410_udc *the_controller;
+static struct clk *udc_clock;
+static struct clk *usb_bus_clock;
+static void __iomem *base_addr;
+static u64 rsrc_start;
+static u64 rsrc_len;
+static struct dentry *s3c2410_udc_debugfs_root;
+
+static inline u32 udc_read(u32 reg)
+{
+ return readb(base_addr + reg);
+}
+
+static inline void udc_write(u32 value, u32 reg)
+{
+ writeb(value, base_addr + reg);
+}
+
+static inline void udc_writeb(void __iomem *base, u32 value, u32 reg)
+{
+ writeb(value, base + reg);
+}
+
+static struct s3c2410_udc_mach_info *udc_info;
+
+/*************************** DEBUG FUNCTION ***************************/
+#define DEBUG_NORMAL 1
+#define DEBUG_VERBOSE 2
+
+#ifdef CONFIG_USB_S3C2410_DEBUG
+#define USB_S3C2410_DEBUG_LEVEL 0
+
+static uint32_t s3c2410_ticks = 0;
+
+static int dprintk(int level, const char *fmt, ...)
+{
+ static char printk_buf[1024];
+ static long prevticks;
+ static int invocation;
+ va_list args;
+ int len;
+
+ if (level > USB_S3C2410_DEBUG_LEVEL)
+ return 0;
+
+ if (s3c2410_ticks != prevticks) {
+ prevticks = s3c2410_ticks;
+ invocation = 0;
+ }
+
+ len = scnprintf(printk_buf,
+ sizeof(printk_buf), "%1lu.%02d USB: ",
+ prevticks, invocation++);
+
+ va_start(args, fmt);
+ len = vscnprintf(printk_buf+len,
+ sizeof(printk_buf)-len, fmt, args);
+ va_end(args);
+
+ return printk(KERN_DEBUG "%s", printk_buf);
+}
+#else
+static int dprintk(int level, const char *fmt, ...)
+{
+ return 0;
+}
+#endif
+static int s3c2410_udc_debugfs_seq_show(struct seq_file *m, void *p)
+{
+ u32 addr_reg,pwr_reg,ep_int_reg,usb_int_reg;
+ u32 ep_int_en_reg, usb_int_en_reg, ep0_csr;
+ u32 ep1_i_csr1,ep1_i_csr2,ep1_o_csr1,ep1_o_csr2;
+ u32 ep2_i_csr1,ep2_i_csr2,ep2_o_csr1,ep2_o_csr2;
+
+ addr_reg = udc_read(S3C2410_UDC_FUNC_ADDR_REG);
+ pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
+ ep_int_reg = udc_read(S3C2410_UDC_EP_INT_REG);
+ usb_int_reg = udc_read(S3C2410_UDC_USB_INT_REG);
+ ep_int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+ usb_int_en_reg = udc_read(S3C2410_UDC_USB_INT_EN_REG);
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ ep0_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ udc_write(1, S3C2410_UDC_INDEX_REG);
+ ep1_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep1_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+ ep1_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep1_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+ udc_write(2, S3C2410_UDC_INDEX_REG);
+ ep2_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep2_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+ ep2_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ ep2_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
+
+ seq_printf(m, "FUNC_ADDR_REG : 0x%04X\n"
+ "PWR_REG : 0x%04X\n"
+ "EP_INT_REG : 0x%04X\n"
+ "USB_INT_REG : 0x%04X\n"
+ "EP_INT_EN_REG : 0x%04X\n"
+ "USB_INT_EN_REG : 0x%04X\n"
+ "EP0_CSR : 0x%04X\n"
+ "EP1_I_CSR1 : 0x%04X\n"
+ "EP1_I_CSR2 : 0x%04X\n"
+ "EP1_O_CSR1 : 0x%04X\n"
+ "EP1_O_CSR2 : 0x%04X\n"
+ "EP2_I_CSR1 : 0x%04X\n"
+ "EP2_I_CSR2 : 0x%04X\n"
+ "EP2_O_CSR1 : 0x%04X\n"
+ "EP2_O_CSR2 : 0x%04X\n",
+ addr_reg,pwr_reg,ep_int_reg,usb_int_reg,
+ ep_int_en_reg, usb_int_en_reg, ep0_csr,
+ ep1_i_csr1,ep1_i_csr2,ep1_o_csr1,ep1_o_csr2,
+ ep2_i_csr1,ep2_i_csr2,ep2_o_csr1,ep2_o_csr2
+ );
+
+ return 0;
+}
+
+static int s3c2410_udc_debugfs_fops_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, s3c2410_udc_debugfs_seq_show, NULL);
+}
+
+static const struct file_operations s3c2410_udc_debugfs_fops = {
+ .open = s3c2410_udc_debugfs_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+/* io macros */
+
+static inline void s3c2410_udc_clear_ep0_opr(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_SOPKTRDY,
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_clear_ep0_sst(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ writeb(0x00, base + S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_clear_ep0_se(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_SSE, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_ipr(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_IPKRDY, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG);
+}
+
+inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
+{
+ udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+
+ udc_writeb(base,(S3C2410_UDC_EP0_CSR_SOPKTRDY
+ | S3C2410_UDC_EP0_CSR_DE),
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_sse_out(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY
+ | S3C2410_UDC_EP0_CSR_SSE),
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base)
+{
+ udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ udc_writeb(base, (S3C2410_UDC_EP0_CSR_IPKRDY
+ | S3C2410_UDC_EP0_CSR_DE),
+ S3C2410_UDC_EP0_CSR_REG);
+}
+
+/*------------------------- I/O ----------------------------------*/
+
+/*
+ * s3c2410_udc_done
+ */
+static void s3c2410_udc_done(struct s3c2410_ep *ep,
+ struct s3c2410_request *req, int status)
+{
+ unsigned halted = ep->halted;
+
+ list_del_init(&req->queue);
+
+ if (likely (req->req.status == -EINPROGRESS))
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ ep->halted = 1;
+ req->req.complete(&ep->ep, &req->req);
+ ep->halted = halted;
+}
+
+static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
+ struct s3c2410_ep *ep, int status)
+{
+ /* Sanity check */
+ if (&ep->queue == NULL)
+ return;
+
+ while (!list_empty (&ep->queue)) {
+ struct s3c2410_request *req;
+ req = list_entry (ep->queue.next, struct s3c2410_request,
+ queue);
+ s3c2410_udc_done(ep, req, status);
+ }
+}
+
+static inline void s3c2410_udc_clear_ep_state(struct s3c2410_udc *dev)
+{
+ unsigned i;
+
+ /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
+ * fifos, and pending transactions mustn't be continued in any case.
+ */
+
+ for (i = 1; i < S3C2410_ENDPOINTS; i++)
+ s3c2410_udc_nuke(dev, &dev->ep[i], -ECONNABORTED);
+}
+
+static inline int s3c2410_udc_fifo_count_out(void)
+{
+ int tmp;
+
+ tmp = udc_read(S3C2410_UDC_OUT_FIFO_CNT2_REG) << 8;
+ tmp |= udc_read(S3C2410_UDC_OUT_FIFO_CNT1_REG);
+ return tmp;
+}
+
+/*
+ * s3c2410_udc_write_packet
+ */
+static inline int s3c2410_udc_write_packet(int fifo,
+ struct s3c2410_request *req,
+ unsigned max)
+{
+ unsigned len = min(req->req.length - req->req.actual, max);
+ u8 *buf = req->req.buf + req->req.actual;
+
+ prefetch(buf);
+
+ dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__,
+ req->req.actual, req->req.length, len, req->req.actual + len);
+
+ req->req.actual += len;
+
+ udelay(5);
+ writesb(base_addr + fifo, buf, len);
+ return len;
+}
+
+/*
+ * s3c2410_udc_write_fifo
+ *
+ * return: 0 = still running, 1 = completed, negative = errno
+ */
+static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep,
+ struct s3c2410_request *req)
+{
+ unsigned count;
+ int is_last;
+ u32 idx;
+ int fifo_reg;
+ u32 ep_csr;
+
+ idx = ep->bEndpointAddress & 0x7F;
+ switch (idx) {
+ default:
+ idx = 0;
+ case 0:
+ fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
+ break;
+ case 1:
+ fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
+ break;
+ case 2:
+ fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
+ break;
+ case 3:
+ fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
+ break;
+ case 4:
+ fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
+ break;
+ }
+
+ count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket);
+
+ /* last packet is often short (sometimes a zlp) */
+ if (count != ep->ep.maxpacket)
+ is_last = 1;
+ else if (req->req.length != req->req.actual || req->req.zero)
+ is_last = 0;
+ else
+ is_last = 2;
+
+ /* Only ep0 debug messages are interesting */
+ if (idx == 0)
+ dprintk(DEBUG_NORMAL,
+ "Written ep%d %d.%d of %d b [last %d,z %d]\n",
+ idx, count, req->req.actual, req->req.length,
+ is_last, req->req.zero);
+
+ if (is_last) {
+ /* The order is important. It prevents sending 2 packets
+ * at the same time */
+
+ if (idx == 0) {
+ /* Reset signal => no need to say 'data sent' */
+ if (! (udc_read(S3C2410_UDC_USB_INT_REG)
+ & S3C2410_UDC_USBINT_RESET))
+ s3c2410_udc_set_ep0_de_in(base_addr);
+ ep->dev->ep0state=EP0_IDLE;
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
+ S3C2410_UDC_IN_CSR1_REG);
+ }
+
+ s3c2410_udc_done(ep, req, 0);
+ is_last = 1;
+ } else {
+ if (idx == 0) {
+ /* Reset signal => no need to say 'data sent' */
+ if (! (udc_read(S3C2410_UDC_USB_INT_REG)
+ & S3C2410_UDC_USBINT_RESET))
+ s3c2410_udc_set_ep0_ipr(base_addr);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
+ S3C2410_UDC_IN_CSR1_REG);
+ }
+ }
+
+ return is_last;
+}
+
+static inline int s3c2410_udc_read_packet(int fifo, u8 *buf,
+ struct s3c2410_request *req, unsigned avail)
+{
+ unsigned len;
+
+ len = min(req->req.length - req->req.actual, avail);
+ req->req.actual += len;
+
+ readsb(fifo + base_addr, buf, len);
+ return len;
+}
+
+/*
+ * return: 0 = still running, 1 = queue empty, negative = errno
+ */
+static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep,
+ struct s3c2410_request *req)
+{
+ u8 *buf;
+ u32 ep_csr;
+ unsigned bufferspace;
+ int is_last=1;
+ unsigned avail;
+ int fifo_count = 0;
+ u32 idx;
+ int fifo_reg;
+
+ idx = ep->bEndpointAddress & 0x7F;
+
+ switch (idx) {
+ default:
+ idx = 0;
+ case 0:
+ fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
+ break;
+ case 1:
+ fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
+ break;
+ case 2:
+ fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
+ break;
+ case 3:
+ fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
+ break;
+ case 4:
+ fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
+ break;
+ }
+
+ if (!req->req.length)
+ return 1;
+
+ buf = req->req.buf + req->req.actual;
+ bufferspace = req->req.length - req->req.actual;
+ if (!bufferspace) {
+ dprintk(DEBUG_NORMAL, "%s: buffer full!\n", __func__);
+ return -1;
+ }
+
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+
+ fifo_count = s3c2410_udc_fifo_count_out();
+ dprintk(DEBUG_NORMAL, "%s fifo count : %d\n", __func__, fifo_count);
+
+ if (fifo_count > ep->ep.maxpacket)
+ avail = ep->ep.maxpacket;
+ else
+ avail = fifo_count;
+
+ fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail);
+
+ /* checking this with ep0 is not accurate as we already
+ * read a control request
+ **/
+ if (idx != 0 && fifo_count < ep->ep.maxpacket) {
+ is_last = 1;
+ /* overflowed this request? flush extra data */
+ if (fifo_count != avail)
+ req->req.status = -EOVERFLOW;
+ } else {
+ is_last = (req->req.length <= req->req.actual) ? 1 : 0;
+ }
+
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ fifo_count = s3c2410_udc_fifo_count_out();
+
+ /* Only ep0 debug messages are interesting */
+ if (idx == 0)
+ dprintk(DEBUG_VERBOSE, "%s fifo count : %d [last %d]\n",
+ __func__, fifo_count,is_last);
+
+ if (is_last) {
+ if (idx == 0) {
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ ep->dev->ep0state = EP0_IDLE;
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
+ S3C2410_UDC_OUT_CSR1_REG);
+ }
+
+ s3c2410_udc_done(ep, req, 0);
+ } else {
+ if (idx == 0) {
+ s3c2410_udc_clear_ep0_opr(base_addr);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
+ S3C2410_UDC_OUT_CSR1_REG);
+ }
+ }
+
+ return is_last;
+}
+
+static int s3c2410_udc_read_fifo_crq(struct usb_ctrlrequest *crq)
+{
+ unsigned char *outbuf = (unsigned char*)crq;
+ int bytes_read = 0;
+
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+
+ bytes_read = s3c2410_udc_fifo_count_out();
+
+ dprintk(DEBUG_NORMAL, "%s: fifo_count=%d\n", __func__, bytes_read);
+
+ if (bytes_read > sizeof(struct usb_ctrlrequest))
+ bytes_read = sizeof(struct usb_ctrlrequest);
+
+ readsb(S3C2410_UDC_EP0_FIFO_REG + base_addr, outbuf, bytes_read);
+
+ dprintk(DEBUG_VERBOSE, "%s: len=%d %02x:%02x {%x,%x,%x}\n", __func__,
+ bytes_read, crq->bRequest, crq->bRequestType,
+ crq->wValue, crq->wIndex, crq->wLength);
+
+ return bytes_read;
+}
+
+static int s3c2410_udc_get_status(struct s3c2410_udc *dev,
+ struct usb_ctrlrequest *crq)
+{
+ u16 status = 0;
+ u8 ep_num = crq->wIndex & 0x7F;
+ u8 is_in = crq->wIndex & USB_DIR_IN;
+
+ switch (crq->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_DEVICE:
+ status = dev->devstatus;
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ if (ep_num > 4 || crq->wLength > 2)
+ return 1;
+
+ if (ep_num == 0) {
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ status = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ status = status & S3C2410_UDC_EP0_CSR_SENDSTL;
+ } else {
+ udc_write(ep_num, S3C2410_UDC_INDEX_REG);
+ if (is_in) {
+ status = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ status = status & S3C2410_UDC_ICSR1_SENDSTL;
+ } else {
+ status = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ status = status & S3C2410_UDC_OCSR1_SENDSTL;
+ }
+ }
+
+ status = status ? 1 : 0;
+ break;
+
+ default:
+ return 1;
+ }
+
+ /* Seems to be needed to get it working. ouch :( */
+ udelay(5);
+ udc_write(status & 0xFF, S3C2410_UDC_EP0_FIFO_REG);
+ udc_write(status >> 8, S3C2410_UDC_EP0_FIFO_REG);
+ s3c2410_udc_set_ep0_de_in(base_addr);
+
+ return 0;
+}
+/*------------------------- usb state machine -------------------------------*/
+static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value);
+
+static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
+ struct s3c2410_ep *ep,
+ struct usb_ctrlrequest *crq,
+ u32 ep0csr)
+{
+ int len, ret, tmp;
+
+ /* start control request? */
+ if (!(ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY))
+ return;
+
+ s3c2410_udc_nuke(dev, ep, -EPROTO);
+
+ len = s3c2410_udc_read_fifo_crq(crq);
+ if (len != sizeof(*crq)) {
+ dprintk(DEBUG_NORMAL, "setup begin: fifo READ ERROR"
+ " wanted %d bytes got %d. Stalling out...\n",
+ sizeof(*crq), len);
+ s3c2410_udc_set_ep0_ss(base_addr);
+ return;
+ }
+
+ dprintk(DEBUG_NORMAL, "bRequest = %d bRequestType %d wLength = %d\n",
+ crq->bRequest, crq->bRequestType, crq->wLength);
+
+ /* cope with automagic for some standard requests. */
+ dev->req_std = (crq->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD;
+ dev->req_config = 0;
+ dev->req_pending = 1;
+
+ switch (crq->bRequest) {
+ case USB_REQ_SET_CONFIGURATION:
+ dprintk(DEBUG_NORMAL, "USB_REQ_SET_CONFIGURATION ... \n");
+
+ if (crq->bRequestType == USB_RECIP_DEVICE) {
+ dev->req_config = 1;
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ }
+ break;
+
+ case USB_REQ_SET_INTERFACE:
+ dprintk(DEBUG_NORMAL, "USB_REQ_SET_INTERFACE ... \n");
+
+ if (crq->bRequestType == USB_RECIP_INTERFACE) {
+ dev->req_config = 1;
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ }
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ dprintk(DEBUG_NORMAL, "USB_REQ_SET_ADDRESS ... \n");
+
+ if (crq->bRequestType == USB_RECIP_DEVICE) {
+ tmp = crq->wValue & 0x7F;
+ dev->address = tmp;
+ udc_write((tmp | S3C2410_UDC_FUNCADDR_UPDATE),
+ S3C2410_UDC_FUNC_ADDR_REG);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ return;
+ }
+ break;
+
+ case USB_REQ_GET_STATUS:
+ dprintk(DEBUG_NORMAL, "USB_REQ_GET_STATUS ... \n");
+ s3c2410_udc_clear_ep0_opr(base_addr);
+
+ if (dev->req_std) {
+ if (!s3c2410_udc_get_status(dev, crq)) {
+ return;
+ }
+ }
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ s3c2410_udc_clear_ep0_opr(base_addr);
+
+ if (crq->bRequestType != USB_RECIP_ENDPOINT)
+ break;
+
+ if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
+ break;
+
+ s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ return;
+
+ case USB_REQ_SET_FEATURE:
+ s3c2410_udc_clear_ep0_opr(base_addr);
+
+ if (crq->bRequestType != USB_RECIP_ENDPOINT)
+ break;
+
+ if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
+ break;
+
+ s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ return;
+
+ default:
+ s3c2410_udc_clear_ep0_opr(base_addr);
+ break;
+ }
+
+ if (crq->bRequestType & USB_DIR_IN)
+ dev->ep0state = EP0_IN_DATA_PHASE;
+ else
+ dev->ep0state = EP0_OUT_DATA_PHASE;
+
+ ret = dev->driver->setup(&dev->gadget, crq);
+ if (ret < 0) {
+ if (dev->req_config) {
+ dprintk(DEBUG_NORMAL, "config change %02x fail %d?\n",
+ crq->bRequest, ret);
+ return;
+ }
+
+ if (ret == -EOPNOTSUPP)
+ dprintk(DEBUG_NORMAL, "Operation not supported\n");
+ else
+ dprintk(DEBUG_NORMAL,
+ "dev->driver->setup failed. (%d)\n", ret);
+
+ udelay(5);
+ s3c2410_udc_set_ep0_ss(base_addr);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ dev->ep0state = EP0_IDLE;
+ /* deferred i/o == no response yet */
+ } else if (dev->req_pending) {
+ dprintk(DEBUG_VERBOSE, "dev->req_pending... what now?\n");
+ dev->req_pending=0;
+ }
+
+ dprintk(DEBUG_VERBOSE, "ep0state %s\n", ep0states[dev->ep0state]);
+}
+
+static void s3c2410_udc_handle_ep0(struct s3c2410_udc *dev)
+{
+ u32 ep0csr;
+ struct s3c2410_ep *ep = &dev->ep[0];
+ struct s3c2410_request *req;
+ struct usb_ctrlrequest crq;
+
+ if (list_empty(&ep->queue))
+ req = NULL;
+ else
+ req = list_entry(ep->queue.next, struct s3c2410_request, queue);
+
+ /* We make the assumption that S3C2410_UDC_IN_CSR1_REG equal to
+ * S3C2410_UDC_EP0_CSR_REG when index is zero */
+
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+
+ dprintk(DEBUG_NORMAL, "ep0csr %x ep0state %s\n",
+ ep0csr, ep0states[dev->ep0state]);
+
+ /* clear stall status */
+ if (ep0csr & S3C2410_UDC_EP0_CSR_SENTSTL) {
+ s3c2410_udc_nuke(dev, ep, -EPIPE);
+ dprintk(DEBUG_NORMAL, "... clear SENT_STALL ...\n");
+ s3c2410_udc_clear_ep0_sst(base_addr);
+ dev->ep0state = EP0_IDLE;
+ return;
+ }
+
+ /* clear setup end */
+ if (ep0csr & S3C2410_UDC_EP0_CSR_SE) {
+ dprintk(DEBUG_NORMAL, "... serviced SETUP_END ...\n");
+ s3c2410_udc_nuke(dev, ep, 0);
+ s3c2410_udc_clear_ep0_se(base_addr);
+ dev->ep0state = EP0_IDLE;
+ }
+
+ switch (dev->ep0state) {
+ case EP0_IDLE:
+ s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr);
+ break;
+
+ case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
+ dprintk(DEBUG_NORMAL, "EP0_IN_DATA_PHASE ... what now?\n");
+ if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req) {
+ s3c2410_udc_write_fifo(ep, req);
+ }
+ break;
+
+ case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
+ dprintk(DEBUG_NORMAL, "EP0_OUT_DATA_PHASE ... what now?\n");
+ if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req ) {
+ s3c2410_udc_read_fifo(ep,req);
+ }
+ break;
+
+ case EP0_END_XFER:
+ dprintk(DEBUG_NORMAL, "EP0_END_XFER ... what now?\n");
+ dev->ep0state = EP0_IDLE;
+ break;
+
+ case EP0_STALL:
+ dprintk(DEBUG_NORMAL, "EP0_STALL ... what now?\n");
+ dev->ep0state = EP0_IDLE;
+ break;
+ }
+}
+
+/*
+ * handle_ep - Manage I/O endpoints
+ */
+
+static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
+{
+ struct s3c2410_request *req;
+ int is_in = ep->bEndpointAddress & USB_DIR_IN;
+ u32 ep_csr1;
+ u32 idx;
+
+ if (likely (!list_empty(&ep->queue)))
+ req = list_entry(ep->queue.next,
+ struct s3c2410_request, queue);
+ else
+ req = NULL;
+
+ idx = ep->bEndpointAddress & 0x7F;
+
+ if (is_in) {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n",
+ idx, ep_csr1, req ? 1 : 0);
+
+ if (ep_csr1 & S3C2410_UDC_ICSR1_SENTSTL) {
+ dprintk(DEBUG_VERBOSE, "st\n");
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr1 & ~S3C2410_UDC_ICSR1_SENTSTL,
+ S3C2410_UDC_IN_CSR1_REG);
+ return;
+ }
+
+ if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req) {
+ s3c2410_udc_write_fifo(ep,req);
+ }
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr1 = udc_read(S3C2410_UDC_OUT_CSR1_REG);
+ dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1);
+
+ if (ep_csr1 & S3C2410_UDC_OCSR1_SENTSTL) {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ udc_write(ep_csr1 & ~S3C2410_UDC_OCSR1_SENTSTL,
+ S3C2410_UDC_OUT_CSR1_REG);
+ return;
+ }
+
+ if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req) {
+ s3c2410_udc_read_fifo(ep,req);
+ }
+ }
+}
+
+#include <asm/arch/regs-irq.h>
+
+/*
+ * s3c2410_udc_irq - interrupt handler
+ */
+static irqreturn_t s3c2410_udc_irq(int irq, void *_dev)
+{
+ struct s3c2410_udc *dev = _dev;
+ int usb_status;
+ int usbd_status;
+ int pwr_reg;
+ int ep0csr;
+ int i;
+ u32 idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /* Driver connected ? */
+ if (!dev->driver) {
+ /* Clear interrupts */
+ udc_write(udc_read(S3C2410_UDC_USB_INT_REG),
+ S3C2410_UDC_USB_INT_REG);
+ udc_write(udc_read(S3C2410_UDC_EP_INT_REG),
+ S3C2410_UDC_EP_INT_REG);
+ }
+
+ /* Save index */
+ idx = udc_read(S3C2410_UDC_INDEX_REG);
+
+ /* Read status registers */
+ usb_status = udc_read(S3C2410_UDC_USB_INT_REG);
+ usbd_status = udc_read(S3C2410_UDC_EP_INT_REG);
+ pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
+
+ udc_writeb(base_addr, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
+ ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+
+ dprintk(DEBUG_NORMAL, "usbs=%02x, usbds=%02x, pwr=%02x ep0csr=%02x\n",
+ usb_status, usbd_status, pwr_reg, ep0csr);
+
+ /*
+ * Now, handle interrupts. There's two types :
+ * - Reset, Resume, Suspend coming -> usb_int_reg
+ * - EP -> ep_int_reg
+ */
+
+ /* RESET */
+ if (usb_status & S3C2410_UDC_USBINT_RESET) {
+ /* two kind of reset :
+ * - reset start -> pwr reg = 8
+ * - reset end -> pwr reg = 0
+ **/
+ dprintk(DEBUG_NORMAL, "USB reset csr %x pwr %x\n",
+ ep0csr, pwr_reg);
+
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+ udc_write(0x00, S3C2410_UDC_INDEX_REG);
+ udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3,
+ S3C2410_UDC_MAXP_REG);
+ dev->address = 0;
+
+ dev->ep0state = EP0_IDLE;
+ dev->gadget.speed = USB_SPEED_FULL;
+
+ /* clear interrupt */
+ udc_write(S3C2410_UDC_USBINT_RESET,
+ S3C2410_UDC_USB_INT_REG);
+
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ /* RESUME */
+ if (usb_status & S3C2410_UDC_USBINT_RESUME) {
+ dprintk(DEBUG_NORMAL, "USB resume\n");
+
+ /* clear interrupt */
+ udc_write(S3C2410_UDC_USBINT_RESUME,
+ S3C2410_UDC_USB_INT_REG);
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->resume)
+ dev->driver->resume(&dev->gadget);
+ }
+
+ /* SUSPEND */
+ if (usb_status & S3C2410_UDC_USBINT_SUSPEND) {
+ dprintk(DEBUG_NORMAL, "USB suspend\n");
+
+ /* clear interrupt */
+ udc_write(S3C2410_UDC_USBINT_SUSPEND,
+ S3C2410_UDC_USB_INT_REG);
+
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ && dev->driver
+ && dev->driver->suspend)
+ dev->driver->suspend(&dev->gadget);
+
+ dev->ep0state = EP0_IDLE;
+ }
+
+ /* EP */
+ /* control traffic */
+ /* check on ep0csr != 0 is not a good idea as clearing in_pkt_ready
+ * generate an interrupt
+ */
+ if (usbd_status & S3C2410_UDC_INT_EP0) {
+ dprintk(DEBUG_VERBOSE, "USB ep0 irq\n");
+ /* Clear the interrupt bit by setting it to 1 */
+ udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_REG);
+ s3c2410_udc_handle_ep0(dev);
+ }
+
+ /* endpoint data transfers */
+ for (i = 1; i < S3C2410_ENDPOINTS; i++) {
+ u32 tmp = 1 << i;
+ if (usbd_status & tmp) {
+ dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i);
+
+ /* Clear the interrupt bit by setting it to 1 */
+ udc_write(tmp, S3C2410_UDC_EP_INT_REG);
+ s3c2410_udc_handle_ep(&dev->ep[i]);
+ }
+ }
+
+ dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", irq);
+
+ /* Restore old index */
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return IRQ_HANDLED;
+}
+/*------------------------- s3c2410_ep_ops ----------------------------------*/
+
+static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct s3c2410_ep, ep);
+}
+
+static inline struct s3c2410_udc *to_s3c2410_udc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct s3c2410_udc, gadget);
+}
+
+static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req)
+{
+ return container_of(req, struct s3c2410_request, req);
+}
+
+/*
+ * s3c2410_udc_ep_enable
+ */
+static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct s3c2410_udc *dev;
+ struct s3c2410_ep *ep;
+ u32 max, tmp;
+ unsigned long flags;
+ u32 csr1,csr2;
+ u32 int_en_reg;
+
+ ep = to_s3c2410_ep(_ep);
+
+ if (!_ep || !desc || ep->desc
+ || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
+
+ local_irq_save (flags);
+ _ep->maxpacket = max & 0x7ff;
+ ep->desc = desc;
+ ep->halted = 0;
+ ep->bEndpointAddress = desc->bEndpointAddress;
+
+ /* set max packet */
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(max >> 3, S3C2410_UDC_MAXP_REG);
+
+ /* set type, direction, address; reset fifo counters */
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ csr1 = S3C2410_UDC_ICSR1_FFLUSH|S3C2410_UDC_ICSR1_CLRDT;
+ csr2 = S3C2410_UDC_ICSR2_MODEIN|S3C2410_UDC_ICSR2_DMAIEN;
+
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
+ } else {
+ /* don't flush in fifo or it will cause endpoint interrupt */
+ csr1 = S3C2410_UDC_ICSR1_CLRDT;
+ csr2 = S3C2410_UDC_ICSR2_DMAIEN;
+
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
+
+ csr1 = S3C2410_UDC_OCSR1_FFLUSH | S3C2410_UDC_OCSR1_CLRDT;
+ csr2 = S3C2410_UDC_OCSR2_DMAIEN;
+
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr1, S3C2410_UDC_OUT_CSR1_REG);
+ udc_write(ep->num, S3C2410_UDC_INDEX_REG);
+ udc_write(csr2, S3C2410_UDC_OUT_CSR2_REG);
+ }
+
+ /* enable irqs */
+ int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+ udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG);
+
+ /* print some debug message */
+ tmp = desc->bEndpointAddress;
+ dprintk (DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n",
+ _ep->name,ep->num, tmp,
+ desc->bEndpointAddress & USB_DIR_IN ? "in" : "out", max);
+
+ local_irq_restore (flags);
+ s3c2410_udc_set_halt(_ep, 0);
+
+ return 0;
+}
+
+/*
+ * s3c2410_udc_ep_disable
+ */
+static int s3c2410_udc_ep_disable(struct usb_ep *_ep)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ unsigned long flags;
+ u32 int_en_reg;
+
+ if (!_ep || !ep->desc) {
+ dprintk(DEBUG_NORMAL, "%s not enabled\n",
+ _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+
+ local_irq_save(flags);
+
+ dprintk(DEBUG_NORMAL, "ep_disable: %s\n", _ep->name);
+
+ ep->desc = NULL;
+ ep->halted = 1;
+
+ s3c2410_udc_nuke (ep->dev, ep, -ESHUTDOWN);
+
+ /* disable irqs */
+ int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
+ udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG);
+
+ local_irq_restore(flags);
+
+ dprintk(DEBUG_NORMAL, "%s disabled\n", _ep->name);
+
+ return 0;
+}
+
+/*
+ * s3c2410_udc_alloc_request
+ */
+static struct usb_request *
+s3c2410_udc_alloc_request(struct usb_ep *_ep, gfp_t mem_flags)
+{
+ struct s3c2410_request *req;
+
+ dprintk(DEBUG_VERBOSE,"%s(%p,%d)\n", __func__, _ep, mem_flags);
+
+ if (!_ep)
+ return NULL;
+
+ req = kzalloc (sizeof(struct s3c2410_request), mem_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD (&req->queue);
+ return &req->req;
+}
+
+/*
+ * s3c2410_udc_free_request
+ */
+static void
+s3c2410_udc_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ struct s3c2410_request *req = to_s3c2410_req(_req);
+
+ dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
+
+ if (!ep || !_req || (!ep->desc && _ep->name != ep0name))
+ return;
+
+ WARN_ON (!list_empty (&req->queue));
+ kfree(req);
+}
+
+/*
+ * s3c2410_udc_queue
+ */
+static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct s3c2410_request *req = to_s3c2410_req(_req);
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ struct s3c2410_udc *dev;
+ u32 ep_csr = 0;
+ int fifo_count = 0;
+ unsigned long flags;
+
+ if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+ dprintk(DEBUG_NORMAL, "%s: invalid args\n", __func__);
+ return -EINVAL;
+ }
+
+ dev = ep->dev;
+ if (unlikely (!dev->driver
+ || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+ return -ESHUTDOWN;
+ }
+
+ local_irq_save (flags);
+
+ if (unlikely(!_req || !_req->complete
+ || !_req->buf || !list_empty(&req->queue))) {
+ if (!_req)
+ dprintk(DEBUG_NORMAL, "%s: 1 X X X\n", __func__);
+ else {
+ dprintk(DEBUG_NORMAL, "%s: 0 %01d %01d %01d\n",
+ __func__, !_req->complete,!_req->buf,
+ !list_empty(&req->queue));
+ }
+
+ local_irq_restore(flags);
+ return -EINVAL;
+ }
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n",
+ __func__, ep->bEndpointAddress, _req->length);
+
+ if (ep->bEndpointAddress) {
+ udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG);
+
+ ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
+ ? S3C2410_UDC_IN_CSR1_REG
+ : S3C2410_UDC_OUT_CSR1_REG);
+ fifo_count = s3c2410_udc_fifo_count_out();
+ } else {
+ udc_write(0, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
+ fifo_count = s3c2410_udc_fifo_count_out();
+ }
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->halted) {
+ if (ep->bEndpointAddress == 0 /* ep0 */) {
+ switch (dev->ep0state) {
+ case EP0_IN_DATA_PHASE:
+ if (!(ep_csr&S3C2410_UDC_EP0_CSR_IPKRDY)
+ && s3c2410_udc_write_fifo(ep,
+ req)) {
+ dev->ep0state = EP0_IDLE;
+ req = NULL;
+ }
+ break;
+
+ case EP0_OUT_DATA_PHASE:
+ if ((!_req->length)
+ || ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
+ && s3c2410_udc_read_fifo(ep,
+ req))) {
+ dev->ep0state = EP0_IDLE;
+ req = NULL;
+ }
+ break;
+
+ default:
+ local_irq_restore(flags);
+ return -EL2HLT;
+ }
+ } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
+ && (!(ep_csr&S3C2410_UDC_OCSR1_PKTRDY))
+ && s3c2410_udc_write_fifo(ep, req)) {
+ req = NULL;
+ } else if ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
+ && fifo_count
+ && s3c2410_udc_read_fifo(ep, req)) {
+ req = NULL;
+ }
+ }
+
+ /* pio or dma irq handler advances the queue. */
+ if (likely (req != 0))
+ list_add_tail(&req->queue, &ep->queue);
+
+ local_irq_restore(flags);
+
+ dprintk(DEBUG_VERBOSE, "%s ok\n", __func__);
+ return 0;
+}
+
+/*
+ * s3c2410_udc_dequeue
+ */
+static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ struct s3c2410_udc *udc;
+ int retval = -EINVAL;
+ unsigned long flags;
+ struct s3c2410_request *req = NULL;
+
+ dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
+
+ if (!the_controller->driver)
+ return -ESHUTDOWN;
+
+ if (!_ep || !_req)
+ return retval;
+
+ udc = to_s3c2410_udc(ep->gadget);
+
+ local_irq_save (flags);
+
+ list_for_each_entry (req, &ep->queue, queue) {
+ if (&req->req == _req) {
+ list_del_init (&req->queue);
+ _req->status = -ECONNRESET;
+ retval = 0;
+ break;
+ }
+ }
+
+ if (retval == 0) {
+ dprintk(DEBUG_VERBOSE,
+ "dequeued req %p from %s, len %d buf %p\n",
+ req, _ep->name, _req->length, _req->buf);
+
+ s3c2410_udc_done(ep, req, -ECONNRESET);
+ }
+
+ local_irq_restore (flags);
+ return retval;
+}
+
+/*
+ * s3c2410_udc_set_halt
+ */
+static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value)
+{
+ struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
+ u32 ep_csr = 0;
+ unsigned long flags;
+ u32 idx;
+
+ if (unlikely (!_ep || (!ep->desc && ep->ep.name != ep0name))) {
+ dprintk(DEBUG_NORMAL, "%s: inval 2\n", __func__);
+ return -EINVAL;
+ }
+
+ local_irq_save (flags);
+
+ idx = ep->bEndpointAddress & 0x7F;
+
+ if (idx == 0) {
+ s3c2410_udc_set_ep0_ss(base_addr);
+ s3c2410_udc_set_ep0_de_out(base_addr);
+ } else {
+ udc_write(idx, S3C2410_UDC_INDEX_REG);
+ ep_csr = udc_read((ep->bEndpointAddress &USB_DIR_IN)
+ ? S3C2410_UDC_IN_CSR1_REG
+ : S3C2410_UDC_OUT_CSR1_REG);
+
+ if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
+ if (value)
+ udc_write(ep_csr | S3C2410_UDC_ICSR1_SENDSTL,
+ S3C2410_UDC_IN_CSR1_REG);
+ else {
+ ep_csr &= ~S3C2410_UDC_ICSR1_SENDSTL;
+ udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
+ ep_csr |= S3C2410_UDC_ICSR1_CLRDT;
+ udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
+ }
+ } else {
+ if (value)
+ udc_write(ep_csr | S3C2410_UDC_OCSR1_SENDSTL,
+ S3C2410_UDC_OUT_CSR1_REG);
+ else {
+ ep_csr &= ~S3C2410_UDC_OCSR1_SENDSTL;
+ udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
+ ep_csr |= S3C2410_UDC_OCSR1_CLRDT;
+ udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
+ }
+ }
+ }
+
+ ep->halted = value ? 1 : 0;
+ local_irq_restore (flags);
+
+ return 0;
+}
+
+static const struct usb_ep_ops s3c2410_ep_ops = {
+ .enable = s3c2410_udc_ep_enable,
+ .disable = s3c2410_udc_ep_disable,
+
+ .alloc_request = s3c2410_udc_alloc_request,
+ .free_request = s3c2410_udc_free_request,
+
+ .queue = s3c2410_udc_queue,
+ .dequeue = s3c2410_udc_dequeue,
+
+ .set_halt = s3c2410_udc_set_halt,
+};
+
+/*------------------------- usb_gadget_ops ----------------------------------*/
+
+/*
+ * s3c2410_udc_get_frame
+ */
+static int s3c2410_udc_get_frame(struct usb_gadget *_gadget)
+{
+ int tmp;
+
+ dprintk(DEBUG_VERBOSE, "%s()\n", __func__);
+
+ tmp = udc_read(S3C2410_UDC_FRAME_NUM2_REG) << 8;
+ tmp |= udc_read(S3C2410_UDC_FRAME_NUM1_REG);
+ return tmp;
+}
+
+/*
+ * s3c2410_udc_wakeup
+ */
+static int s3c2410_udc_wakeup(struct usb_gadget *_gadget)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+ return 0;
+}
+
+/*
+ * s3c2410_udc_set_selfpowered
+ */
+static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value)
+{
+ struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ if (value)
+ udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+ else
+ udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ return 0;
+}
+
+static void s3c2410_udc_disable(struct s3c2410_udc *dev);
+static void s3c2410_udc_enable(struct s3c2410_udc *dev);
+
+static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ if (udc_info && udc_info->udc_command) {
+ if (is_on)
+ s3c2410_udc_enable(udc);
+ else {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (udc->driver && udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ }
+ s3c2410_udc_disable(udc);
+ }
+ }
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int s3c2410_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ udc->vbus = (is_active != 0);
+ s3c2410_udc_set_pullup(udc, is_active);
+ return 0;
+}
+
+static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ s3c2410_udc_set_pullup(udc, is_on ? 0 : 1);
+ return 0;
+}
+
+static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
+{
+ struct s3c2410_udc *dev = _dev;
+ unsigned int value;
+
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+ value = s3c2410_gpio_getpin(udc_info->vbus_pin);
+
+ if (udc_info->vbus_pin_inverted)
+ value = !value;
+
+ if (value != dev->vbus)
+ s3c2410_udc_vbus_session(&dev->gadget, value);
+
+ return IRQ_HANDLED;
+}
+
+static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ if (udc_info && udc_info->vbus_draw) {
+ udc_info->vbus_draw(ma);
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static const struct usb_gadget_ops s3c2410_ops = {
+ .get_frame = s3c2410_udc_get_frame,
+ .wakeup = s3c2410_udc_wakeup,
+ .set_selfpowered = s3c2410_udc_set_selfpowered,
+ .pullup = s3c2410_udc_pullup,
+ .vbus_session = s3c2410_udc_vbus_session,
+ .vbus_draw = s3c2410_vbus_draw,
+};
+
+/*------------------------- gadget driver handling---------------------------*/
+/*
+ * s3c2410_udc_disable
+ */
+static void s3c2410_udc_disable(struct s3c2410_udc *dev)
+{
+ dprintk(DEBUG_NORMAL, "%s()\n", __func__);
+
+ /* Disable all interrupts */
+ udc_write(0x00, S3C2410_UDC_USB_INT_EN_REG);
+ udc_write(0x00, S3C2410_UDC_EP_INT_EN_REG);
+
+ /* Clear the interrupt registers */
+ udc_write(S3C2410_UDC_USBINT_RESET
+ | S3C2410_UDC_USBINT_RESUME
+ | S3C2410_UDC_USBINT_SUSPEND,
+ S3C2410_UDC_USB_INT_REG);
+
+ udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
+
+ /* Good bye, cruel world */
+ if (udc_info && udc_info->udc_command)
+ udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+
+ /* Set speed to unknown */
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+}
+
+/*
+ * s3c2410_udc_reinit
+ */
+static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
+{
+ u32 i;
+
+ /* device/ep0 records init */
+ INIT_LIST_HEAD (&dev->gadget.ep_list);
+ INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
+ dev->ep0state = EP0_IDLE;
+
+ for (i = 0; i < S3C2410_ENDPOINTS; i++) {
+ struct s3c2410_ep *ep = &dev->ep[i];
+
+ if (i != 0)
+ list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
+
+ ep->dev = dev;
+ ep->desc = NULL;
+ ep->halted = 0;
+ INIT_LIST_HEAD (&ep->queue);
+ }
+}
+
+/*
+ * s3c2410_udc_enable
+ */
+static void s3c2410_udc_enable(struct s3c2410_udc *dev)
+{
+ int i;
+
+ dprintk(DEBUG_NORMAL, "s3c2410_udc_enable called\n");
+
+ /* dev->gadget.speed = USB_SPEED_UNKNOWN; */
+ dev->gadget.speed = USB_SPEED_FULL;
+
+ /* Set MAXP for all endpoints */
+ for (i = 0; i < S3C2410_ENDPOINTS; i++) {
+ udc_write(i, S3C2410_UDC_INDEX_REG);
+ udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3,
+ S3C2410_UDC_MAXP_REG);
+ }
+
+ /* Set default power state */
+ udc_write(DEFAULT_POWER_STATE, S3C2410_UDC_PWR_REG);
+
+ /* Enable reset and suspend interrupt interrupts */
+ udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_SUSPEND,
+ S3C2410_UDC_USB_INT_EN_REG);
+
+ /* Enable ep0 interrupt */
+ udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
+
+ /* time to say "hello, world" */
+ if (udc_info && udc_info->udc_command)
+ udc_info->udc_command(S3C2410_UDC_P_ENABLE);
+}
+
+/*
+ * usb_gadget_register_driver
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ struct s3c2410_udc *udc = the_controller;
+ int retval;
+
+ dprintk(DEBUG_NORMAL, "usb_gadget_register_driver() '%s'\n",
+ driver->driver.name);
+
+ /* Sanity checks */
+ if (!udc)
+ return -ENODEV;
+
+ if (udc->driver)
+ return -EBUSY;
+
+ if (!driver->bind || !driver->setup
+ || driver->speed != USB_SPEED_FULL) {
+ printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
+ driver->bind, driver->setup, driver->speed);
+ return -EINVAL;
+ }
+#if defined(MODULE)
+ if (!driver->unbind) {
+ printk(KERN_ERR "Invalid driver: no unbind method\n");
+ return -EINVAL;
+ }
+#endif
+
+ /* Hook the driver */
+ udc->driver = driver;
+ udc->gadget.dev.driver = &driver->driver;
+
+ /* Bind the driver */
+ if ((retval = device_add(&udc->gadget.dev)) != 0) {
+ printk(KERN_ERR "Error in device_add() : %d\n",retval);
+ goto register_error;
+ }
+
+ dprintk(DEBUG_NORMAL, "binding gadget driver '%s'\n",
+ driver->driver.name);
+
+ if ((retval = driver->bind (&udc->gadget)) != 0) {
+ device_del(&udc->gadget.dev);
+ goto register_error;
+ }
+
+ /* Enable udc */
+ s3c2410_udc_enable(udc);
+
+ return 0;
+
+register_error:
+ udc->driver = NULL;
+ udc->gadget.dev.driver = NULL;
+ return retval;
+}
+
+/*
+ * usb_gadget_unregister_driver
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct s3c2410_udc *udc = the_controller;
+
+ if (!udc)
+ return -ENODEV;
+
+ if (!driver || driver != udc->driver || !driver->unbind)
+ return -EINVAL;
+
+ dprintk(DEBUG_NORMAL,"usb_gadget_register_driver() '%s'\n",
+ driver->driver.name);
+
+ if (driver->disconnect)
+ driver->disconnect(&udc->gadget);
+
+ device_del(&udc->gadget.dev);
+ udc->driver = NULL;
+
+ /* Disable udc */
+ s3c2410_udc_disable(udc);
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+static struct s3c2410_udc memory = {
+ .gadget = {
+ .ops = &s3c2410_ops,
+ .ep0 = &memory.ep[0].ep,
+ .name = gadget_name,
+ .dev = {
+ .bus_id = "gadget",
+ },
+ },
+
+ /* control endpoint */
+ .ep[0] = {
+ .num = 0,
+ .ep = {
+ .name = ep0name,
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP0_FIFO_SIZE,
+ },
+ .dev = &memory,
+ },
+
+ /* first group of endpoints */
+ .ep[1] = {
+ .num = 1,
+ .ep = {
+ .name = "ep1-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 1,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .ep[2] = {
+ .num = 2,
+ .ep = {
+ .name = "ep2-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 2,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .ep[3] = {
+ .num = 3,
+ .ep = {
+ .name = "ep3-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 3,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ },
+ .ep[4] = {
+ .num = 4,
+ .ep = {
+ .name = "ep4-bulk",
+ .ops = &s3c2410_ep_ops,
+ .maxpacket = EP_FIFO_SIZE,
+ },
+ .dev = &memory,
+ .fifo_size = EP_FIFO_SIZE,
+ .bEndpointAddress = 4,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ }
+
+};
+
+/*
+ * probe - binds to the platform device
+ */
+static int s3c2410_udc_probe(struct platform_device *pdev)
+{
+ struct s3c2410_udc *udc = &memory;
+ struct device *dev = &pdev->dev;
+ int retval;
+ unsigned int irq;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ usb_bus_clock = clk_get(NULL, "usb-bus-gadget");
+ if (IS_ERR(usb_bus_clock)) {
+ dev_err(dev, "failed to get usb bus clock source\n");
+ return PTR_ERR(usb_bus_clock);
+ }
+
+ clk_enable(usb_bus_clock);
+
+ udc_clock = clk_get(NULL, "usb-device");
+ if (IS_ERR(udc_clock)) {
+ dev_err(dev, "failed to get udc clock source\n");
+ return PTR_ERR(udc_clock);
+ }
+
+ clk_enable(udc_clock);
+
+ mdelay(10);
+
+ dev_dbg(dev, "got and enabled clocks\n");
+
+ if (strncmp(pdev->name, "s3c2440", 7) == 0) {
+ dev_info(dev, "S3C2440: increasing FIFO to 128 bytes\n");
+ memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE;
+ memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE;
+ memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE;
+ memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE;
+ }
+
+ spin_lock_init (&udc->lock);
+ udc_info = pdev->dev.platform_data;
+
+ rsrc_start = S3C2410_PA_USBDEV;
+ rsrc_len = S3C24XX_SZ_USBDEV;
+
+ if (!request_mem_region(rsrc_start, rsrc_len, gadget_name))
+ return -EBUSY;
+
+ base_addr = ioremap(rsrc_start, rsrc_len);
+ if (!base_addr) {
+ retval = -ENOMEM;
+ goto err_mem;
+ }
+
+ device_initialize(&udc->gadget.dev);
+ udc->gadget.dev.parent = &pdev->dev;
+ udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
+
+ the_controller = udc;
+ platform_set_drvdata(pdev, udc);
+
+ s3c2410_udc_disable(udc);
+ s3c2410_udc_reinit(udc);
+
+ /* irq setup after old hardware state is cleaned up */
+ retval = request_irq(IRQ_USBD, s3c2410_udc_irq,
+ IRQF_DISABLED, gadget_name, udc);
+
+ if (retval != 0) {
+ dev_err(dev, "cannot get irq %i, err %d\n", IRQ_USBD, retval);
+ retval = -EBUSY;
+ goto err_map;
+ }
+
+ dev_dbg(dev, "got irq %i\n", IRQ_USBD);
+
+ if (udc_info && udc_info->vbus_pin > 0) {
+ irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+ retval = request_irq(irq, s3c2410_udc_vbus_irq,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING
+ | IRQF_TRIGGER_FALLING,
+ gadget_name, udc);
+
+ if (retval != 0) {
+ dev_err(dev, "can't get vbus irq %i, err %d\n",
+ irq, retval);
+ retval = -EBUSY;
+ goto err_int;
+ }
+
+ dev_dbg(dev, "got irq %i\n", irq);
+ } else {
+ udc->vbus = 1;
+ }
+
+ if (s3c2410_udc_debugfs_root) {
+ udc->regs_info = debugfs_create_file("registers", S_IRUGO,
+ s3c2410_udc_debugfs_root,
+ udc, &s3c2410_udc_debugfs_fops);
+ if (IS_ERR(udc->regs_info)) {
+ dev_warn(dev, "debugfs file creation failed %ld\n",
+ PTR_ERR(udc->regs_info));
+ udc->regs_info = NULL;
+ }
+ }
+
+ dev_dbg(dev, "probe ok\n");
+
+ return 0;
+
+err_int:
+ free_irq(IRQ_USBD, udc);
+err_map:
+ iounmap(base_addr);
+err_mem:
+ release_mem_region(rsrc_start, rsrc_len);
+
+ return retval;
+}
+
+/*
+ * s3c2410_udc_remove
+ */
+static int s3c2410_udc_remove(struct platform_device *pdev)
+{
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+ unsigned int irq;
+
+ dev_dbg(&pdev->dev, "%s()\n", __func__);
+ if (udc->driver)
+ return -EBUSY;
+
+ debugfs_remove(udc->regs_info);
+
+ if (udc_info && udc_info->vbus_pin > 0) {
+ irq = s3c2410_gpio_getirq(udc_info->vbus_pin);
+ free_irq(irq, udc);
+ }
+
+ free_irq(IRQ_USBD, udc);
+
+ iounmap(base_addr);
+ release_mem_region(rsrc_start, rsrc_len);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (!IS_ERR(udc_clock) && udc_clock != NULL) {
+ clk_disable(udc_clock);
+ clk_put(udc_clock);
+ udc_clock = NULL;
+ }
+
+ if (!IS_ERR(usb_bus_clock) && usb_bus_clock != NULL) {
+ clk_disable(usb_bus_clock);
+ clk_put(usb_bus_clock);
+ usb_bus_clock = NULL;
+ }
+
+ dev_dbg(&pdev->dev, "%s: remove ok\n", __func__);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ if (udc_info && udc_info->udc_command)
+ udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+
+ return 0;
+}
+
+static int s3c2410_udc_resume(struct platform_device *pdev)
+{
+ if (udc_info && udc_info->udc_command)
+ udc_info->udc_command(S3C2410_UDC_P_ENABLE);
+
+ return 0;
+}
+#else
+#define s3c2410_udc_suspend NULL
+#define s3c2410_udc_resume NULL
+#endif
+
+static struct platform_driver udc_driver_2410 = {
+ .driver = {
+ .name = "s3c2410-usbgadget",
+ .owner = THIS_MODULE,
+ },
+ .probe = s3c2410_udc_probe,
+ .remove = s3c2410_udc_remove,
+ .suspend = s3c2410_udc_suspend,
+ .resume = s3c2410_udc_resume,
+};
+
+static struct platform_driver udc_driver_2440 = {
+ .driver = {
+ .name = "s3c2440-usbgadget",
+ .owner = THIS_MODULE,
+ },
+ .probe = s3c2410_udc_probe,
+ .remove = s3c2410_udc_remove,
+ .suspend = s3c2410_udc_suspend,
+ .resume = s3c2410_udc_resume,
+};
+
+static int __init udc_init(void)
+{
+ int retval;
+
+ dprintk(DEBUG_NORMAL, "%s: version %s\n", gadget_name, DRIVER_VERSION);
+
+ s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
+ if (IS_ERR(s3c2410_udc_debugfs_root)) {
+ printk(KERN_ERR "%s: debugfs dir creation failed %ld\n",
+ gadget_name, PTR_ERR(s3c2410_udc_debugfs_root));
+ s3c2410_udc_debugfs_root = NULL;
+ }
+
+ retval = platform_driver_register(&udc_driver_2410);
+ if (retval)
+ goto err;
+
+ retval = platform_driver_register(&udc_driver_2440);
+ if (retval)
+ goto err;
+
+ return 0;
+
+err:
+ debugfs_remove(s3c2410_udc_debugfs_root);
+ return retval;
+}
+
+static void __exit udc_exit(void)
+{
+ platform_driver_unregister(&udc_driver_2410);
+ platform_driver_unregister(&udc_driver_2440);
+ debugfs_remove(s3c2410_udc_debugfs_root);
+}
+
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+module_init(udc_init);
+module_exit(udc_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
new file mode 100644
index 000000000000..9e0bece4f241
--- /dev/null
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -0,0 +1,110 @@
+/*
+ * linux/drivers/usb/gadget/s3c2410_udc.h
+ * Samsung on-chip full speed USB device controllers
+ *
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
+ * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _S3C2410_UDC_H
+#define _S3C2410_UDC_H
+
+struct s3c2410_ep {
+ struct list_head queue;
+ unsigned long last_io; /* jiffies timestamp */
+ struct usb_gadget *gadget;
+ struct s3c2410_udc *dev;
+ const struct usb_endpoint_descriptor *desc;
+ struct usb_ep ep;
+ u8 num;
+
+ unsigned short fifo_size;
+ u8 bEndpointAddress;
+ u8 bmAttributes;
+
+ unsigned halted : 1;
+ unsigned already_seen : 1;
+ unsigned setup_stage : 1;
+};
+
+
+/* Warning : ep0 has a fifo of 16 bytes */
+/* Don't try to set 32 or 64 */
+/* also testusb 14 fails wit 16 but is */
+/* fine with 8 */
+#define EP0_FIFO_SIZE 8
+#define EP_FIFO_SIZE 64
+#define DEFAULT_POWER_STATE 0x00
+
+#define S3C2440_EP_FIFO_SIZE 128
+
+static const char ep0name [] = "ep0";
+
+static const char *const ep_name[] = {
+ ep0name, /* everyone has ep0 */
+ /* s3c2410 four bidirectional bulk endpoints */
+ "ep1-bulk", "ep2-bulk", "ep3-bulk", "ep4-bulk",
+};
+
+#define S3C2410_ENDPOINTS ARRAY_SIZE(ep_name)
+
+struct s3c2410_request {
+ struct list_head queue; /* ep's requests */
+ struct usb_request req;
+};
+
+enum ep0_state {
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_END_XFER,
+ EP0_STALL,
+};
+
+static const char *ep0states[]= {
+ "EP0_IDLE",
+ "EP0_IN_DATA_PHASE",
+ "EP0_OUT_DATA_PHASE",
+ "EP0_END_XFER",
+ "EP0_STALL",
+};
+
+struct s3c2410_udc {
+ spinlock_t lock;
+
+ struct s3c2410_ep ep[S3C2410_ENDPOINTS];
+ int address;
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct s3c2410_request fifo_req;
+ u8 fifo_buf[EP_FIFO_SIZE];
+ u16 devstatus;
+
+ u32 port_status;
+ int ep0state;
+
+ unsigned got_irq : 1;
+
+ unsigned req_std : 1;
+ unsigned req_config : 1;
+ unsigned req_pending : 1;
+ u8 vbus;
+ struct dentry *regs_info;
+};
+
+#endif
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index f847c3414be3..9cd98e73dc1d 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -33,6 +33,7 @@
#include <linux/device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/mutex.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -258,7 +259,7 @@ static const char *EP_IN_NAME;
static const char *EP_OUT_NAME;
static const char *EP_NOTIFY_NAME;
-static struct semaphore gs_open_close_sem[GS_NUM_PORTS];
+static struct mutex gs_open_close_lock[GS_NUM_PORTS];
static unsigned int read_q_size = GS_DEFAULT_READ_Q_SIZE;
static unsigned int write_q_size = GS_DEFAULT_WRITE_Q_SIZE;
@@ -595,7 +596,7 @@ static int __init gs_module_init(void)
tty_set_operations(gs_tty_driver, &gs_tty_ops);
for (i=0; i < GS_NUM_PORTS; i++)
- sema_init(&gs_open_close_sem[i], 1);
+ mutex_init(&gs_open_close_lock[i]);
retval = tty_register_driver(gs_tty_driver);
if (retval) {
@@ -635,7 +636,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
struct gs_port *port;
struct gs_dev *dev;
struct gs_buf *buf;
- struct semaphore *sem;
+ struct mutex *mtx;
int ret;
port_num = tty->index;
@@ -656,10 +657,10 @@ static int gs_open(struct tty_struct *tty, struct file *file)
return -ENODEV;
}
- sem = &gs_open_close_sem[port_num];
- if (down_interruptible(sem)) {
+ mtx = &gs_open_close_lock[port_num];
+ if (mutex_lock_interruptible(mtx)) {
printk(KERN_ERR
- "gs_open: (%d,%p,%p) interrupted waiting for semaphore\n",
+ "gs_open: (%d,%p,%p) interrupted waiting for mutex\n",
port_num, tty, file);
return -ERESTARTSYS;
}
@@ -754,12 +755,12 @@ static int gs_open(struct tty_struct *tty, struct file *file)
exit_unlock_port:
spin_unlock_irqrestore(&port->port_lock, flags);
- up(sem);
+ mutex_unlock(mtx);
return ret;
exit_unlock_dev:
spin_unlock_irqrestore(&dev->dev_lock, flags);
- up(sem);
+ mutex_unlock(mtx);
return ret;
}
@@ -781,7 +782,7 @@ exit_unlock_dev:
static void gs_close(struct tty_struct *tty, struct file *file)
{
struct gs_port *port = tty->driver_data;
- struct semaphore *sem;
+ struct mutex *mtx;
if (port == NULL) {
printk(KERN_ERR "gs_close: NULL port pointer\n");
@@ -790,8 +791,8 @@ static void gs_close(struct tty_struct *tty, struct file *file)
gs_debug("gs_close: (%d,%p,%p)\n", port->port_num, tty, file);
- sem = &gs_open_close_sem[port->port_num];
- down(sem);
+ mtx = &gs_open_close_lock[port->port_num];
+ mutex_lock(mtx);
spin_lock_irq(&port->port_lock);
@@ -846,7 +847,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
exit:
spin_unlock_irq(&port->port_lock);
- up(sem);
+ mutex_unlock(mtx);
}
/*
@@ -1427,7 +1428,7 @@ static int __init gs_bind(struct usb_gadget *gadget)
gs_acm_config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- gs_device = dev = kmalloc(sizeof(struct gs_dev), GFP_KERNEL);
+ gs_device = dev = kzalloc(sizeof(struct gs_dev), GFP_KERNEL);
if (dev == NULL)
return -ENOMEM;
@@ -1435,7 +1436,6 @@ static int __init gs_bind(struct usb_gadget *gadget)
init_utsname()->sysname, init_utsname()->release,
gadget->name);
- memset(dev, 0, sizeof(struct gs_dev));
dev->dev_gadget = gadget;
spin_lock_init(&dev->dev_lock);
INIT_LIST_HEAD(&dev->dev_req_list);
@@ -2215,7 +2215,7 @@ static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags)
*
* Free the buffer and all associated memory.
*/
-void gs_buf_free(struct gs_buf *gb)
+static void gs_buf_free(struct gs_buf *gb)
{
if (gb) {
kfree(gb->buf_buf);
@@ -2228,7 +2228,7 @@ void gs_buf_free(struct gs_buf *gb)
*
* Clear out all data in the circular buffer.
*/
-void gs_buf_clear(struct gs_buf *gb)
+static void gs_buf_clear(struct gs_buf *gb)
{
if (gb != NULL)
gb->buf_get = gb->buf_put;
@@ -2241,7 +2241,7 @@ void gs_buf_clear(struct gs_buf *gb)
* Return the number of bytes of data available in the circular
* buffer.
*/
-unsigned int gs_buf_data_avail(struct gs_buf *gb)
+static unsigned int gs_buf_data_avail(struct gs_buf *gb)
{
if (gb != NULL)
return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
@@ -2255,7 +2255,7 @@ unsigned int gs_buf_data_avail(struct gs_buf *gb)
* Return the number of bytes of space available in the circular
* buffer.
*/
-unsigned int gs_buf_space_avail(struct gs_buf *gb)
+static unsigned int gs_buf_space_avail(struct gs_buf *gb)
{
if (gb != NULL)
return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
@@ -2271,7 +2271,8 @@ unsigned int gs_buf_space_avail(struct gs_buf *gb)
*
* Return the number of bytes copied.
*/
-unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
+static unsigned int
+gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
{
unsigned int len;
@@ -2309,7 +2310,8 @@ unsigned int gs_buf_put(struct gs_buf *gb, const char *buf, unsigned int count)
*
* Return the number of bytes copied.
*/
-unsigned int gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
+static unsigned int
+gs_buf_get(struct gs_buf *gb, char *buf, unsigned int count)
{
unsigned int len;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 7078374d0b79..a2e6e3fc8c8d 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -481,8 +481,7 @@ alloc_ep_req (struct usb_ep *ep, unsigned length)
req = usb_ep_alloc_request (ep, GFP_ATOMIC);
if (req) {
req->length = length;
- req->buf = usb_ep_alloc_buffer (ep, length,
- &req->dma, GFP_ATOMIC);
+ req->buf = kmalloc(length, GFP_ATOMIC);
if (!req->buf) {
usb_ep_free_request (ep, req);
req = NULL;
@@ -493,8 +492,7 @@ alloc_ep_req (struct usb_ep *ep, unsigned length)
static void free_ep_req (struct usb_ep *ep, struct usb_request *req)
{
- if (req->buf)
- usb_ep_free_buffer (ep, req->buf, req->dma, req->length);
+ kfree(req->buf);
usb_ep_free_request (ep, req);
}
@@ -1199,8 +1197,7 @@ autoconf_fail:
dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
if (!dev->req)
goto enomem;
- dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ,
- &dev->req->dma, GFP_KERNEL);
+ dev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL);
if (!dev->req->buf)
goto enomem;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 62711870f8ee..2f529828c74d 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -69,8 +69,20 @@ config USB_EHCI_TT_NEWSCHED
config USB_EHCI_BIG_ENDIAN_MMIO
bool
- depends on USB_EHCI_HCD
- default n
+ depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX)
+ default y
+
+config USB_EHCI_BIG_ENDIAN_DESC
+ bool
+ depends on USB_EHCI_HCD && 440EPX
+ default y
+
+config USB_EHCI_FSL
+ bool
+ select USB_EHCI_ROOT_HUB_TT
+ default y if MPC834x || PPC_MPC831x
+ ---help---
+ Variation of ARC USB block used in some Freescale chips.
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
@@ -224,3 +236,15 @@ config USB_SL811_CS
To compile this driver as a module, choose M here: the
module will be called "sl811_cs".
+config USB_R8A66597_HCD
+ tristate "R8A66597 HCD suppoort"
+ depends on USB
+ help
+ The R8A66597 is a USB 2.0 host and peripheral controller.
+
+ Enable this option if your board has this chip, and you want
+ to use it as a host controller. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called r8a66597-hcd.
+
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 2ff396bd180f..bb8e9d44f371 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -15,3 +15,5 @@ obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
+obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
+
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 43eddaecc3dd..c9cc4413198e 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -52,7 +52,7 @@ static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
HCS_INDICATOR (params) ? " ind" : "",
HCS_N_CC (params),
HCS_N_PCC (params),
- HCS_PORTROUTED (params) ? "" : " ordered",
+ HCS_PORTROUTED (params) ? "" : " ordered",
HCS_PPC (params) ? "" : " !ppc",
HCS_N_PORTS (params)
);
@@ -91,20 +91,20 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
if (HCC_ISOC_CACHE (params)) {
ehci_dbg (ehci,
- "%s hcc_params %04x caching frame %s%s%s\n",
- label, params,
- HCC_PGM_FRAMELISTLEN (params) ? "256/512/1024" : "1024",
- HCC_CANPARK (params) ? " park" : "",
- HCC_64BIT_ADDR (params) ? " 64 bit addr" : "");
+ "%s hcc_params %04x caching frame %s%s%s\n",
+ label, params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "",
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
} else {
ehci_dbg (ehci,
- "%s hcc_params %04x thresh %d uframes %s%s%s\n",
- label,
- params,
- HCC_ISOC_THRES (params),
- HCC_PGM_FRAMELISTLEN (params) ? "256/512/1024" : "1024",
- HCC_CANPARK (params) ? " park" : "",
- HCC_64BIT_ADDR (params) ? " 64 bit addr" : "");
+ "%s hcc_params %04x thresh %d uframes %s%s%s\n",
+ label,
+ params,
+ HCC_ISOC_THRES(params),
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "",
+ HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
}
}
#else
@@ -115,23 +115,23 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
#ifdef DEBUG
-static void __attribute__((__unused__))
+static void __maybe_unused
dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
- ehci_dbg (ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
- le32_to_cpup (&qtd->hw_next),
- le32_to_cpup (&qtd->hw_alt_next),
- le32_to_cpup (&qtd->hw_token),
- le32_to_cpup (&qtd->hw_buf [0]));
+ ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ hc32_to_cpup(ehci, &qtd->hw_next),
+ hc32_to_cpup(ehci, &qtd->hw_alt_next),
+ hc32_to_cpup(ehci, &qtd->hw_token),
+ hc32_to_cpup(ehci, &qtd->hw_buf [0]));
if (qtd->hw_buf [1])
- ehci_dbg (ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
- le32_to_cpup (&qtd->hw_buf [1]),
- le32_to_cpup (&qtd->hw_buf [2]),
- le32_to_cpup (&qtd->hw_buf [3]),
- le32_to_cpup (&qtd->hw_buf [4]));
+ ehci_dbg(ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
+ hc32_to_cpup(ehci, &qtd->hw_buf[1]),
+ hc32_to_cpup(ehci, &qtd->hw_buf[2]),
+ hc32_to_cpup(ehci, &qtd->hw_buf[3]),
+ hc32_to_cpup(ehci, &qtd->hw_buf[4]));
}
-static void __attribute__((__unused__))
+static void __maybe_unused
dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
@@ -140,51 +140,53 @@ dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next);
}
-static void __attribute__((__unused__))
+static void __maybe_unused
dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
{
ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n",
- label, itd->frame, itd, le32_to_cpu(itd->hw_next), itd->urb);
+ label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
+ itd->urb);
ehci_dbg (ehci,
" trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- le32_to_cpu(itd->hw_transaction[0]),
- le32_to_cpu(itd->hw_transaction[1]),
- le32_to_cpu(itd->hw_transaction[2]),
- le32_to_cpu(itd->hw_transaction[3]),
- le32_to_cpu(itd->hw_transaction[4]),
- le32_to_cpu(itd->hw_transaction[5]),
- le32_to_cpu(itd->hw_transaction[6]),
- le32_to_cpu(itd->hw_transaction[7]));
+ hc32_to_cpu(ehci, itd->hw_transaction[0]),
+ hc32_to_cpu(ehci, itd->hw_transaction[1]),
+ hc32_to_cpu(ehci, itd->hw_transaction[2]),
+ hc32_to_cpu(ehci, itd->hw_transaction[3]),
+ hc32_to_cpu(ehci, itd->hw_transaction[4]),
+ hc32_to_cpu(ehci, itd->hw_transaction[5]),
+ hc32_to_cpu(ehci, itd->hw_transaction[6]),
+ hc32_to_cpu(ehci, itd->hw_transaction[7]));
ehci_dbg (ehci,
" buf: %08x %08x %08x %08x %08x %08x %08x\n",
- le32_to_cpu(itd->hw_bufp[0]),
- le32_to_cpu(itd->hw_bufp[1]),
- le32_to_cpu(itd->hw_bufp[2]),
- le32_to_cpu(itd->hw_bufp[3]),
- le32_to_cpu(itd->hw_bufp[4]),
- le32_to_cpu(itd->hw_bufp[5]),
- le32_to_cpu(itd->hw_bufp[6]));
+ hc32_to_cpu(ehci, itd->hw_bufp[0]),
+ hc32_to_cpu(ehci, itd->hw_bufp[1]),
+ hc32_to_cpu(ehci, itd->hw_bufp[2]),
+ hc32_to_cpu(ehci, itd->hw_bufp[3]),
+ hc32_to_cpu(ehci, itd->hw_bufp[4]),
+ hc32_to_cpu(ehci, itd->hw_bufp[5]),
+ hc32_to_cpu(ehci, itd->hw_bufp[6]));
ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n",
itd->index[0], itd->index[1], itd->index[2],
itd->index[3], itd->index[4], itd->index[5],
itd->index[6], itd->index[7]);
}
-static void __attribute__((__unused__))
+static void __maybe_unused
dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
- label, sitd->frame, sitd, le32_to_cpu(sitd->hw_next), sitd->urb);
+ label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
+ sitd->urb);
ehci_dbg (ehci,
" addr %08x sched %04x result %08x buf %08x %08x\n",
- le32_to_cpu(sitd->hw_fullspeed_ep),
- le32_to_cpu(sitd->hw_uframe),
- le32_to_cpu(sitd->hw_results),
- le32_to_cpu(sitd->hw_buf [0]),
- le32_to_cpu(sitd->hw_buf [1]));
+ hc32_to_cpu(ehci, sitd->hw_fullspeed_ep),
+ hc32_to_cpu(ehci, sitd->hw_uframe),
+ hc32_to_cpu(ehci, sitd->hw_results),
+ hc32_to_cpu(ehci, sitd->hw_buf[0]),
+ hc32_to_cpu(ehci, sitd->hw_buf[1]));
}
-static int __attribute__((__unused__))
+static int __maybe_unused
dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf (buf, len,
@@ -203,7 +205,7 @@ dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
);
}
-static int __attribute__((__unused__))
+static int __maybe_unused
dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf (buf, len,
@@ -267,28 +269,27 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
- (status & PORT_CONNECT) ? " CONNECT" : ""
- );
+ (status & PORT_CONNECT) ? " CONNECT" : "");
}
#else
-static inline void __attribute__((__unused__))
+static inline void __maybe_unused
dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{}
-static inline int __attribute__((__unused__))
+static inline int __maybe_unused
dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
{ return 0; }
-static inline int __attribute__((__unused__))
+static inline int __maybe_unused
dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
{ return 0; }
-static inline int __attribute__((__unused__))
+static inline int __maybe_unused
dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
{ return 0; }
-static inline int __attribute__((__unused__))
+static inline int __maybe_unused
dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
@@ -332,9 +333,10 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
default: tmp = '?'; break; \
}; tmp; })
-static inline char token_mark (__le32 token)
+static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
{
- __u32 v = le32_to_cpu (token);
+ __u32 v = hc32_to_cpu(ehci, token);
+
if (v & QTD_STS_ACTIVE)
return '*';
if (v & QTD_STS_HALT)
@@ -360,46 +362,48 @@ static void qh_lines (
unsigned size = *sizep;
char *next = *nextp;
char mark;
+ u32 list_end = EHCI_LIST_END(ehci);
- if (qh->hw_qtd_next == EHCI_LIST_END) /* NEC does this */
+ if (qh->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
- mark = token_mark (qh->hw_token);
+ mark = token_mark(ehci, qh->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
- if ((qh->hw_alt_next & QTD_MASK) == ehci->async->hw_alt_next)
+ if ((qh->hw_alt_next & QTD_MASK(ehci))
+ == ehci->async->hw_alt_next)
mark = '#'; /* blocked */
- else if (qh->hw_alt_next == EHCI_LIST_END)
+ else if (qh->hw_alt_next == list_end)
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
- scratch = le32_to_cpup (&qh->hw_info1);
- hw_curr = (mark == '*') ? le32_to_cpup (&qh->hw_current) : 0;
+ scratch = hc32_to_cpup(ehci, &qh->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0;
temp = scnprintf (next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
- scratch, le32_to_cpup (&qh->hw_info2),
- le32_to_cpup (&qh->hw_token), mark,
- (__constant_cpu_to_le32 (QTD_TOGGLE) & qh->hw_token)
+ scratch, hc32_to_cpup(ehci, &qh->hw_info2),
+ hc32_to_cpup(ehci, &qh->hw_token), mark,
+ (cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token)
? "data1" : "data0",
- (le32_to_cpup (&qh->hw_alt_next) >> 1) & 0x0f);
+ (hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f);
size -= temp;
next += temp;
/* hc may be modifying the list as we read it ... */
list_for_each (entry, &qh->qtd_list) {
td = list_entry (entry, struct ehci_qtd, qtd_list);
- scratch = le32_to_cpup (&td->hw_token);
+ scratch = hc32_to_cpup(ehci, &td->hw_token);
mark = ' ';
if (hw_curr == td->qtd_dma)
mark = '*';
- else if (qh->hw_qtd_next == cpu_to_le32(td->qtd_dma))
+ else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
mark = '+';
else if (QTD_LENGTH (scratch)) {
if (td->hw_alt_next == ehci->async->hw_alt_next)
mark = '#';
- else if (td->hw_alt_next != EHCI_LIST_END)
+ else if (td->hw_alt_next != list_end)
mark = '/';
}
temp = snprintf (next, size,
@@ -490,7 +494,7 @@ show_periodic (struct class_device *class_dev, char *buf)
unsigned temp, size, seen_count;
char *next;
unsigned i;
- __le32 tag;
+ __hc32 tag;
if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
return 0;
@@ -514,18 +518,19 @@ show_periodic (struct class_device *class_dev, char *buf)
p = ehci->pshadow [i];
if (likely (!p.ptr))
continue;
- tag = Q_NEXT_TYPE (ehci->periodic [i]);
+ tag = Q_NEXT_TYPE(ehci, ehci->periodic [i]);
temp = scnprintf (next, size, "%4d: ", i);
size -= temp;
next += temp;
do {
- switch (tag) {
+ switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
temp = scnprintf (next, size, " qh%d-%04x/%p",
p.qh->period,
- le32_to_cpup (&p.qh->hw_info2)
+ hc32_to_cpup(ehci,
+ &p.qh->hw_info2)
/* uframe masks */
& (QH_CMASK | QH_SMASK),
p.qh);
@@ -543,7 +548,7 @@ show_periodic (struct class_device *class_dev, char *buf)
}
/* show more info the first time around */
if (temp == seen_count && p.ptr) {
- u32 scratch = le32_to_cpup (
+ u32 scratch = hc32_to_cpup(ehci,
&p.qh->hw_info1);
struct ehci_qtd *qtd;
char *type = "";
@@ -554,7 +559,8 @@ show_periodic (struct class_device *class_dev, char *buf)
&p.qh->qtd_list,
qtd_list) {
temp++;
- switch (0x03 & (le32_to_cpu (
+ switch (0x03 & (hc32_to_cpu(
+ ehci,
qtd->hw_token) >> 8)) {
case 0: type = "out"; continue;
case 1: type = "in"; continue;
@@ -576,7 +582,7 @@ show_periodic (struct class_device *class_dev, char *buf)
} else
temp = 0;
if (p.qh) {
- tag = Q_NEXT_TYPE (p.qh->hw_next);
+ tag = Q_NEXT_TYPE(ehci, p.qh->hw_next);
p = p.qh->qh_next;
}
break;
@@ -584,23 +590,23 @@ show_periodic (struct class_device *class_dev, char *buf)
temp = scnprintf (next, size,
" fstn-%8x/%p", p.fstn->hw_prev,
p.fstn);
- tag = Q_NEXT_TYPE (p.fstn->hw_next);
+ tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = scnprintf (next, size,
" itd/%p", p.itd);
- tag = Q_NEXT_TYPE (p.itd->hw_next);
+ tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
p = p.itd->itd_next;
break;
case Q_TYPE_SITD:
temp = scnprintf (next, size,
" sitd%d-%04x/%p",
p.sitd->stream->interval,
- le32_to_cpup (&p.sitd->hw_uframe)
+ hc32_to_cpup(ehci, &p.sitd->hw_uframe)
& 0x0000ffff,
p.sitd);
- tag = Q_NEXT_TYPE (p.sitd->hw_next);
+ tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next);
p = p.sitd->sitd_next;
break;
}
@@ -673,7 +679,8 @@ show_registers (struct class_device *class_dev, char *buf)
unsigned count = 256/4;
pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
- offset = HCC_EXT_CAPS (ehci_readl(ehci, &ehci->caps->hcc_params));
+ offset = HCC_EXT_CAPS(ehci_readl(ehci,
+ &ehci->caps->hcc_params));
while (offset && count--) {
pci_read_config_dword (pdev, offset, &cap);
switch (cap & 0xff) {
@@ -740,14 +747,16 @@ show_registers (struct class_device *class_dev, char *buf)
for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) {
temp = dbg_port_buf (scratch, sizeof scratch, label, i,
- ehci_readl(ehci, &ehci->regs->port_status [i - 1]));
+ ehci_readl(ehci,
+ &ehci->regs->port_status[i - 1]));
temp = scnprintf (next, size, fmt, temp, scratch);
size -= temp;
next += temp;
if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
temp = scnprintf (next, size,
" debug control %08x\n",
- ehci_readl(ehci, &ehci->debug->control));
+ ehci_readl(ehci,
+ &ehci->debug->control));
size -= temp;
next += temp;
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index c7a7c590426f..b7b7bfbce527 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -67,7 +67,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
* in host mode.
*/
if (!((pdata->operating_mode == FSL_USB2_DR_HOST) ||
- (pdata->operating_mode == FSL_USB2_MPH_HOST))) {
+ (pdata->operating_mode == FSL_USB2_MPH_HOST) ||
+ (pdata->operating_mode == FSL_USB2_DR_OTG))) {
dev_err(&pdev->dev,
"Non Host Mode configured for %s. Wrong driver linked.\n",
pdev->dev.bus_id);
@@ -185,12 +186,14 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
+ u32 temp;
pdata =
(struct fsl_usb2_platform_data *)hcd->self.controller->
platform_data;
/* Enable PHY interface in the control reg. */
- out_be32(non_ehci + FSL_SOC_USB_CTRL, 0x00000004);
+ temp = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ out_be32(non_ehci + FSL_SOC_USB_CTRL, temp | 0x00000004);
out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
@@ -206,7 +209,8 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
#endif
- if (pdata->operating_mode == FSL_USB2_DR_HOST)
+ if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
+ (pdata->operating_mode == FSL_USB2_DR_OTG))
mpc83xx_setup_phy(ehci, pdata->phy_mode, 0);
if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 099aff64f536..c4e15ed1405a 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -41,10 +41,6 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
-#ifdef CONFIG_PPC_PS3
-#include <asm/firmware.h>
-#endif
-
/*-------------------------------------------------------------------------*/
@@ -201,9 +197,15 @@ static void tdi_reset (struct ehci_hcd *ehci)
u32 __iomem *reg_ptr;
u32 tmp;
- reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68);
+ reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE);
tmp = ehci_readl(ehci, reg_ptr);
- tmp |= 0x3;
+ tmp |= USBMODE_CM_HC;
+ /* The default byte access to MMR space is LE after
+ * controller reset. Set the required endian mode
+ * for transfer buffers to match the host microprocessor
+ */
+ if (ehci_big_endian_mmio(ehci))
+ tmp |= USBMODE_BE;
ehci_writel(ehci, tmp, reg_ptr);
}
@@ -273,6 +275,58 @@ static void ehci_work(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_CPU_FREQ
+
+#include <linux/cpufreq.h>
+
+static void ehci_cpufreq_pause (struct ehci_hcd *ehci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+ if (!ehci->cpufreq_changing++)
+ qh_inactivate_split_intr_qhs(ehci);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+}
+
+static void ehci_cpufreq_unpause (struct ehci_hcd *ehci)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+ if (!--ehci->cpufreq_changing)
+ qh_reactivate_split_intr_qhs(ehci);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+}
+
+/*
+ * ehci_cpufreq_notifier is needed to avoid MMF errors that occur when
+ * EHCI controllers that don't cache many uframes get delayed trying to
+ * read main memory during CPU frequency transitions. This can cause
+ * split interrupt transactions to not be completed in the required uframe.
+ * This has been observed on the Broadcom/ServerWorks HT1000 controller.
+ */
+static int ehci_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct ehci_hcd *ehci = container_of(nb, struct ehci_hcd,
+ cpufreq_transition);
+
+ switch (val) {
+ case CPUFREQ_PRECHANGE:
+ ehci_cpufreq_pause(ehci);
+ break;
+ case CPUFREQ_POSTCHANGE:
+ ehci_cpufreq_unpause(ehci);
+ break;
+ }
+ return 0;
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
static void ehci_watchdog (unsigned long param)
{
struct ehci_hcd *ehci = (struct ehci_hcd *) param;
@@ -347,6 +401,8 @@ static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
is_on ? SetPortFeature : ClearPortFeature,
USB_PORT_FEAT_POWER,
port--, NULL, 0);
+ /* Flush those writes */
+ ehci_readl(ehci, &ehci->regs->command);
msleep(20);
}
@@ -404,6 +460,10 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
spin_unlock_irq(&ehci->lock);
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&ehci->cpufreq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
/* let companion controllers work when we aren't */
ehci_writel(ehci, 0, &ehci->regs->configured_flag);
@@ -470,12 +530,12 @@ static int ehci_init(struct usb_hcd *hcd)
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = NULL;
- ehci->async->hw_next = QH_NEXT(ehci->async->qh_dma);
- ehci->async->hw_info1 = cpu_to_le32(QH_HEAD);
- ehci->async->hw_token = cpu_to_le32(QTD_STS_HALT);
- ehci->async->hw_qtd_next = EHCI_LIST_END;
+ ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
+ ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+ ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
+ ehci->async->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
- ehci->async->hw_alt_next = QTD_NEXT(ehci->async->dummy->qtd_dma);
+ ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
@@ -509,6 +569,17 @@ static int ehci_init(struct usb_hcd *hcd)
}
ehci->command = temp;
+#ifdef CONFIG_CPU_FREQ
+ INIT_LIST_HEAD(&ehci->split_intr_qhs);
+ /*
+ * If the EHCI controller caches enough uframes, this probably
+ * isn't needed unless there are so many low/full speed devices
+ * that the controller's can't cache it all.
+ */
+ ehci->cpufreq_transition.notifier_call = ehci_cpufreq_notifier;
+ cpufreq_register_notifier(&ehci->cpufreq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
return 0;
}
@@ -925,7 +996,7 @@ MODULE_LICENSE ("GPL");
#define PCI_DRIVER ehci_pci_driver
#endif
-#ifdef CONFIG_MPC834x
+#ifdef CONFIG_USB_EHCI_FSL
#include "ehci-fsl.c"
#define PLATFORM_DRIVER ehci_fsl_driver
#endif
@@ -937,7 +1008,12 @@ MODULE_LICENSE ("GPL");
#ifdef CONFIG_PPC_PS3
#include "ehci-ps3.c"
-#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_sb_driver
+#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
+#endif
+
+#ifdef CONFIG_440EPX
+#include "ehci-ppc-soc.c"
+#define PLATFORM_DRIVER ehci_ppc_soc_driver
#endif
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
@@ -971,18 +1047,15 @@ static int __init ehci_hcd_init(void)
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
- if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- retval = ps3_system_bus_driver_register(
- &PS3_SYSTEM_BUS_DRIVER);
- if (retval < 0) {
+ retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
+ if (retval < 0) {
#ifdef PLATFORM_DRIVER
- platform_driver_unregister(&PLATFORM_DRIVER);
+ platform_driver_unregister(&PLATFORM_DRIVER);
#endif
#ifdef PCI_DRIVER
- pci_unregister_driver(&PCI_DRIVER);
+ pci_unregister_driver(&PCI_DRIVER);
#endif
- return retval;
- }
+ return retval;
}
#endif
@@ -999,8 +1072,7 @@ static void __exit ehci_hcd_cleanup(void)
pci_unregister_driver(&PCI_DRIVER);
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
- if (firmware_has_feature(FW_FEATURE_PS3_LV1))
- ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
}
module_exit(ehci_hcd_cleanup);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index f4d301bc83b9..0dcb4164dc83 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -28,6 +28,89 @@
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_PERSIST
+
+static int ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+);
+
+/* After a power loss, ports that were owned by the companion must be
+ * reset so that the companion can still own them.
+ */
+static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
+{
+ u32 __iomem *reg;
+ u32 status;
+ int port;
+ __le32 buf;
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
+
+ if (!ehci->owned_ports)
+ return;
+
+ /* Give the connections some time to appear */
+ msleep(20);
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ if (test_bit(port, &ehci->owned_ports)) {
+ reg = &ehci->regs->port_status[port];
+ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+
+ /* Port already owned by companion? */
+ if (status & PORT_OWNER)
+ clear_bit(port, &ehci->owned_ports);
+ else if (test_bit(port, &ehci->companion_ports))
+ ehci_writel(ehci, status & ~PORT_PE, reg);
+ else
+ ehci_hub_control(hcd, SetPortFeature,
+ USB_PORT_FEAT_RESET, port + 1,
+ NULL, 0);
+ }
+ }
+
+ if (!ehci->owned_ports)
+ return;
+ msleep(90); /* Wait for resets to complete */
+
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ if (test_bit(port, &ehci->owned_ports)) {
+ ehci_hub_control(hcd, GetPortStatus,
+ 0, port + 1,
+ (char *) &buf, sizeof(buf));
+
+ /* The companion should now own the port,
+ * but if something went wrong the port must not
+ * remain enabled.
+ */
+ reg = &ehci->regs->port_status[port];
+ status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
+ if (status & PORT_OWNER)
+ ehci_writel(ehci, status | PORT_CSC, reg);
+ else {
+ ehci_dbg(ehci, "failed handover port %d: %x\n",
+ port + 1, status);
+ ehci_writel(ehci, status & ~PORT_PE, reg);
+ }
+ }
+ }
+
+ ehci->owned_ports = 0;
+}
+
+#else /* CONFIG_USB_PERSIST */
+
+static inline void ehci_handover_companion_ports(struct ehci_hcd *ehci)
+{ }
+
+#endif
+
#ifdef CONFIG_PM
static int ehci_bus_suspend (struct usb_hcd *hcd)
@@ -60,14 +143,16 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
* then manually resume them in the bus_resume() routine.
*/
ehci->bus_suspended = 0;
+ ehci->owned_ports = 0;
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
/* keep track of which ports we suspend */
- if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
- !(t1 & PORT_SUSPEND)) {
+ if (t1 & PORT_OWNER)
+ set_bit(port, &ehci->owned_ports);
+ else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &ehci->bus_suspended);
}
@@ -108,11 +193,16 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
+ u32 power_okay;
int i;
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ spin_unlock_irq(&ehci->lock);
+ return -ESHUTDOWN;
+ }
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
@@ -120,8 +210,9 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
- temp = ehci_readl(ehci, &ehci->regs->intr_enable);
- ehci_dbg(ehci, "resume root hub%s\n", temp ? "" : " after power loss");
+ power_okay = ehci_readl(ehci, &ehci->regs->intr_enable);
+ ehci_dbg(ehci, "resume root hub%s\n",
+ power_okay ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
@@ -184,6 +275,9 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
spin_unlock_irq (&ehci->lock);
+
+ if (!power_okay)
+ ehci_handover_companion_ports(ehci);
return 0;
}
@@ -448,7 +542,8 @@ static int ehci_hub_control (
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int ports = HCS_N_PORTS (ehci->hcs_params);
- u32 __iomem *status_reg = &ehci->regs->port_status[wIndex - 1];
+ u32 __iomem *status_reg = &ehci->regs->port_status[
+ (wIndex & 0xff) - 1];
u32 temp, status;
unsigned long flags;
int retval = 0;
@@ -556,9 +651,24 @@ static int ehci_hub_control (
status |= 1 << USB_PORT_FEAT_C_CONNECTION;
if (temp & PORT_PEC)
status |= 1 << USB_PORT_FEAT_C_ENABLE;
- if ((temp & PORT_OCC) && !ignore_oc)
+
+ if ((temp & PORT_OCC) && !ignore_oc){
status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
+ /*
+ * Hubs should disable port power on over-current.
+ * However, not all EHCI implementations do this
+ * automatically, even if they _do_ support per-port
+ * power switching; they're allowed to just limit the
+ * current. khubd will turn the power back on.
+ */
+ if (HCS_PPC (ehci->hcs_params)){
+ ehci_writel(ehci,
+ temp & ~(PORT_RWC_BITS | PORT_POWER),
+ status_reg);
+ }
+ }
+
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index a8ba2e1497a4..8816d09903d0 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -27,7 +27,7 @@
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... single shot DMA mapped
*
- * There's also PCI "register" data, which is memory mapped.
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
* No memory seen by this driver is pageable.
*/
@@ -35,13 +35,14 @@
/* Allocate the key transfer structures from the previously allocated pool */
-static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma)
+static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd,
+ dma_addr_t dma)
{
memset (qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_le32 (QTD_STS_HALT);
- qtd->hw_next = EHCI_LIST_END;
- qtd->hw_alt_next = EHCI_LIST_END;
+ qtd->hw_next = EHCI_LIST_END(ehci);
+ qtd->hw_alt_next = EHCI_LIST_END(ehci);
INIT_LIST_HEAD (&qtd->qtd_list);
}
@@ -52,7 +53,7 @@ static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags)
qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma);
if (qtd != NULL) {
- ehci_qtd_init (qtd, dma);
+ ehci_qtd_init(ehci, qtd, dma);
}
return qtd;
}
@@ -63,9 +64,8 @@ static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
}
-static void qh_destroy (struct kref *kref)
+static void qh_destroy(struct ehci_qh *qh)
{
- struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
struct ehci_hcd *ehci = qh->ehci;
/* clean qtds first, and know this is not linked */
@@ -89,11 +89,14 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
return qh;
memset (qh, 0, sizeof *qh);
- kref_init(&qh->kref);
+ qh->refcount = 1;
qh->ehci = ehci;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
+#ifdef CONFIG_CPU_FREQ
+ INIT_LIST_HEAD (&qh->split_intr_qhs);
+#endif
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
@@ -108,13 +111,15 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
{
- kref_get(&qh->kref);
+ WARN_ON(!qh->refcount);
+ qh->refcount++;
return qh;
}
static inline void qh_put (struct ehci_qh *qh)
{
- kref_put(&qh->kref, qh_destroy);
+ if (!--qh->refcount)
+ qh_destroy(qh);
}
/*-------------------------------------------------------------------------*/
@@ -217,7 +222,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
goto fail;
}
for (i = 0; i < ehci->periodic_size; i++)
- ehci->periodic [i] = EHCI_LIST_END;
+ ehci->periodic [i] = EHCI_LIST_END(ehci);
/* software shadow of hardware table */
ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 12edc723ec73..a7816e392a85 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -149,8 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
* fixed in newer silicon.
*/
case 0x0068:
- pci_read_config_dword(pdev, PCI_REVISION_ID, &temp);
- if ((temp & 0xff) < 0xa4)
+ if (pdev->revision < 0xa4)
ehci->no_selective_suspend = 1;
break;
}
@@ -313,13 +312,14 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
ehci_work(ehci);
spin_unlock_irq(&ehci->lock);
- /* here we "know" root ports should always stay powered */
- ehci_port_power(ehci, 1);
-
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+ /* here we "know" root ports should always stay powered */
+ ehci_port_power(ehci, 1);
+ ehci_handover_companion_ports(ehci);
+
hcd->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ehci-ppc-soc.c b/drivers/usb/host/ehci-ppc-soc.c
new file mode 100644
index 000000000000..c2cedb09ed8b
--- /dev/null
+++ b/drivers/usb/host/ehci-ppc-soc.c
@@ -0,0 +1,182 @@
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * (C) Copyright 2006-2007 Stefan Roese <sr@denx.de>, DENX Software Engineering
+ *
+ * Bus Glue for PPC On-Chip EHCI driver
+ * Tested on AMCC 440EPx
+ *
+ * Based on "ehci-au12xx.c" by David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This file is licenced under the GPL.
+ */
+
+#include <linux/platform_device.h>
+
+extern int usb_disabled(void);
+
+/**
+ * usb_ehci_ppc_soc_probe - initialize PPC-SoC-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ *
+ */
+int usb_ehci_ppc_soc_probe(const struct hc_driver *driver,
+ struct usb_hcd **hcd_out,
+ struct platform_device *dev)
+{
+ int retval;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+
+ if (dev->resource[1].flags != IORESOURCE_IRQ) {
+ pr_debug("resource[1] is not IORESOURCE_IRQ");
+ retval = -ENOMEM;
+ }
+ hcd = usb_create_hcd(driver, &dev->dev, "PPC-SOC EHCI");
+ if (!hcd)
+ return -ENOMEM;
+ hcd->rsrc_start = dev->resource[0].start;
+ hcd->rsrc_len = dev->resource[0].end - dev->resource[0].start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ pr_debug("request_mem_region failed");
+ retval = -EBUSY;
+ goto err1;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ pr_debug("ioremap failed");
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+ ehci->caps = hcd->regs;
+ ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+#if defined(CONFIG_440EPX)
+ /*
+ * 440EPx Errata USBH_3
+ * Fix: Enable Break Memory Transfer (BMT) in INSNREG3
+ */
+ out_be32((void *)((ulong)(&ehci->regs->command) + 0x8c), (1 << 0));
+ ehci_dbg(ehci, "Break Memory Transfer (BMT) has beed enabled!\n");
+#endif
+
+ retval = usb_add_hcd(hcd, dev->resource[1].start, IRQF_DISABLED);
+ if (retval == 0)
+ return retval;
+
+ iounmap(hcd->regs);
+err2:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+ usb_put_hcd(hcd);
+ return retval;
+}
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * usb_ehci_hcd_ppc_soc_remove - shutdown processing for PPC-SoC-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_ehci_hcd_ppc_soc_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ *
+ */
+void usb_ehci_ppc_soc_remove(struct usb_hcd *hcd, struct platform_device *dev)
+{
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+static const struct hc_driver ehci_ppc_soc_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "PPC-SOC EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_init,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#ifdef CONFIG_PM
+ .hub_suspend = ehci_hub_suspend,
+ .hub_resume = ehci_hub_resume,
+#endif
+};
+
+static int ehci_hcd_ppc_soc_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = NULL;
+ int ret;
+
+ pr_debug("In ehci_hcd_ppc_soc_drv_probe\n");
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ ret = usb_ehci_ppc_soc_probe(&ehci_ppc_soc_hc_driver, &hcd, pdev);
+ return ret;
+}
+
+static int ehci_hcd_ppc_soc_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ usb_ehci_ppc_soc_remove(hcd, pdev);
+ return 0;
+}
+
+MODULE_ALIAS("ppc-soc-ehci");
+static struct platform_driver ehci_ppc_soc_driver = {
+ .probe = ehci_hcd_ppc_soc_drv_probe,
+ .remove = ehci_hcd_ppc_soc_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "ppc-soc-ehci",
+ .bus = &platform_bus_type
+ }
+};
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 37b83ba09969..829fe649a981 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <asm/firmware.h>
#include <asm/ps3.h>
static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
@@ -73,7 +74,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
#endif
};
-static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev)
+static int ps3_ehci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
@@ -85,13 +86,30 @@ static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev)
goto fail_start;
}
+ result = ps3_open_hv_device(dev);
+
+ if (result) {
+ dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
+ __func__, __LINE__);
+ goto fail_open;
+ }
+
+ result = ps3_dma_region_create(dev->d_region);
+
+ if (result) {
+ dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
+ "(%d)\n", __func__, __LINE__, result);
+ BUG_ON("check region type");
+ goto fail_dma_region;
+ }
+
result = ps3_mmio_region_create(dev->m_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
__func__, __LINE__);
result = -EPERM;
- goto fail_mmio;
+ goto fail_mmio_region;
}
dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
@@ -120,6 +138,11 @@ static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev)
hcd->rsrc_start = dev->m_region->lpar_addr;
hcd->rsrc_len = dev->m_region->len;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
+ dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
+ __func__, __LINE__);
+
hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
if (!hcd->regs) {
@@ -153,34 +176,73 @@ static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev)
fail_add_hcd:
iounmap(hcd->regs);
fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
fail_create_hcd:
ps3_io_irq_destroy(virq);
fail_irq:
ps3_free_mmio_region(dev->m_region);
-fail_mmio:
+fail_mmio_region:
+ ps3_dma_region_free(dev->d_region);
+fail_dma_region:
+ ps3_close_hv_device(dev);
+fail_open:
fail_start:
return result;
}
-static int ps3_ehci_sb_remove(struct ps3_system_bus_device *dev)
+static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
{
+ unsigned int tmp;
struct usb_hcd *hcd =
(struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
- usb_put_hcd(hcd);
+ BUG_ON(!hcd);
+
+ dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
+ dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
+
+ tmp = hcd->irq;
+
+ usb_remove_hcd(hcd);
+
ps3_system_bus_set_driver_data(dev, NULL);
+ BUG_ON(!hcd->regs);
+ iounmap(hcd->regs);
+
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ ps3_io_irq_destroy(tmp);
+ ps3_free_mmio_region(dev->m_region);
+
+ ps3_dma_region_free(dev->d_region);
+ ps3_close_hv_device(dev);
+
return 0;
}
-MODULE_ALIAS("ps3-ehci");
+static int ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
+{
+ return firmware_has_feature(FW_FEATURE_PS3_LV1)
+ ? ps3_system_bus_driver_register(drv)
+ : 0;
+}
+
+static void ps3_ehci_driver_unregister(struct ps3_system_bus_driver *drv)
+{
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1))
+ ps3_system_bus_driver_unregister(drv);
+}
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_EHCI);
-static struct ps3_system_bus_driver ps3_ehci_sb_driver = {
+static struct ps3_system_bus_driver ps3_ehci_driver = {
+ .core.name = "ps3-ehci-driver",
+ .core.owner = THIS_MODULE,
.match_id = PS3_MATCH_ID_EHCI,
- .core = {
- .name = "ps3-ehci-driver",
- },
- .probe = ps3_ehci_sb_probe,
- .remove = ps3_ehci_sb_remove,
+ .probe = ps3_ehci_probe,
+ .remove = ps3_ehci_remove,
+ .shutdown = ps3_ehci_remove,
};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index e7fbbd00e7cd..2284028f8aa5 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -43,15 +43,15 @@
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
-qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
- int token, int maxpacket)
+qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
- qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
- qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
+ qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
@@ -62,8 +62,9 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
- qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
- qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
+ qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
+ (u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
@@ -75,7 +76,7 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
if (count != len)
count -= (count % maxpacket);
}
- qtd->hw_token = cpu_to_le32 ((count << 16) | token);
+ qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
qtd->length = count;
return count;
@@ -89,28 +90,28 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
- qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
- qh->hw_alt_next = EHCI_LIST_END;
+ qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ qh->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
- if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
+ if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
unsigned is_out, epnum;
- is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
- epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
+ is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
+ epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE);
+ qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle (qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
- qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING);
+ qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
@@ -128,7 +129,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
- if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current)
+ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
@@ -222,7 +223,7 @@ __acquires(ehci->lock)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
- if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) {
+ if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@@ -277,7 +278,6 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
-#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
@@ -287,6 +287,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
unsigned count = 0;
int do_status = 0;
u8 state;
+ u32 halt = HALT_BIT(ehci);
if (unlikely (list_empty (&qh->qtd_list)))
return count;
@@ -311,6 +312,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
struct urb *urb;
u32 token = 0;
+ /* ignore QHs that are currently inactive */
+ if (qh->hw_info1 & __constant_cpu_to_le32(QH_INACTIVATE))
+ break;
+
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;
@@ -330,7 +335,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* hardware copies qtd out of qh overlay */
rmb ();
- token = le32_to_cpu (qtd->hw_token);
+ token = hc32_to_cpu(ehci, qtd->hw_token);
/* always clean up qtds the hc de-activated */
if ((token & QTD_STS_ACTIVE) == 0) {
@@ -342,7 +347,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
* that silicon quirk can kick in with this dummy too.
*/
} else if (IS_SHORT_READ (token)
- && !(qtd->hw_alt_next & EHCI_LIST_END)) {
+ && !(qtd->hw_alt_next
+ & EHCI_LIST_END(ehci))) {
stopped = 1;
goto halt;
}
@@ -374,17 +380,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* token in overlay may be most current */
if (state == QH_STATE_IDLE
- && cpu_to_le32 (qtd->qtd_dma)
+ && cpu_to_hc32(ehci, qtd->qtd_dma)
== qh->hw_current)
- token = le32_to_cpu (qh->hw_token);
+ token = hc32_to_cpu(ehci, qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
- if ((HALT_BIT & qh->hw_token) == 0) {
+ if ((halt & qh->hw_token) == 0) {
halt:
- qh->hw_token |= HALT_BIT;
+ qh->hw_token |= halt;
wmb ();
}
}
@@ -419,7 +425,7 @@ halt:
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
- if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
+ if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ehci, qh);
@@ -428,7 +434,7 @@ halt:
/* should be rare for periodic transfers,
* except maybe high bandwidth ...
*/
- if ((__constant_cpu_to_le32 (QH_SMASK)
+ if ((cpu_to_hc32(ehci, QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule (ehci, qh);
(void) qh_schedule (ehci, qh);
@@ -502,8 +508,9 @@ qh_urb_transaction (
is_input = usb_pipein (urb->pipe);
if (usb_pipecontrol (urb->pipe)) {
/* SETUP pid */
- qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest),
- token | (2 /* "setup" */ << 8), 8);
+ qtd_fill(ehci, qtd, urb->setup_dma,
+ sizeof (struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
@@ -512,7 +519,7 @@ qh_urb_transaction (
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
@@ -539,7 +546,7 @@ qh_urb_transaction (
for (;;) {
int this_qtd_len;
- this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket);
+ this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
len -= this_qtd_len;
buf += this_qtd_len;
if (is_input)
@@ -557,7 +564,7 @@ qh_urb_transaction (
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
}
@@ -566,7 +573,7 @@ qh_urb_transaction (
*/
if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol (urb->pipe)))
- qtd->hw_alt_next = EHCI_LIST_END;
+ qtd->hw_alt_next = EHCI_LIST_END(ehci);
/*
* control requests may need a terminating data "status" ack;
@@ -590,17 +597,17 @@ qh_urb_transaction (
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
- qtd_fill (qtd, 0, 0, token, 0);
+ qtd_fill(ehci, qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
- qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC);
+ qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
return head;
cleanup:
@@ -769,8 +776,8 @@ done:
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
- qh->hw_info1 = cpu_to_le32 (info1);
- qh->hw_info2 = cpu_to_le32 (info2);
+ qh->hw_info1 = cpu_to_hc32(ehci, info1);
+ qh->hw_info2 = cpu_to_hc32(ehci, info2);
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
@@ -782,7 +789,7 @@ done:
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- __le32 dma = QH_NEXT (qh->qh_dma);
+ __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
struct ehci_qh *head;
/* (re)start the async schedule? */
@@ -820,8 +827,6 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
-#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
-
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
@@ -837,6 +842,7 @@ static struct ehci_qh *qh_append_tds (
)
{
struct ehci_qh *qh = NULL;
+ u32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
qh = (struct ehci_qh *) *ptr;
if (unlikely (qh == NULL)) {
@@ -858,7 +864,7 @@ static struct ehci_qh *qh_append_tds (
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
- qh->hw_info1 &= ~QH_ADDR_MASK;
+ qh->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
@@ -867,7 +873,7 @@ static struct ehci_qh *qh_append_tds (
if (likely (qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
- __le32 token;
+ __hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
@@ -875,7 +881,7 @@ static struct ehci_qh *qh_append_tds (
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
- qtd->hw_token = HALT_BIT;
+ qtd->hw_token = HALT_BIT(ehci);
wmb ();
dummy = qh->dummy;
@@ -887,14 +893,14 @@ static struct ehci_qh *qh_append_tds (
list_add (&dummy->qtd_list, qtd_list);
__list_splice (qtd_list, qh->qtd_list.prev);
- ehci_qtd_init (qtd, qtd->qtd_dma);
+ ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
- qtd->hw_next = QTD_NEXT (dma);
+ qtd->hw_next = QTD_NEXT(ehci, dma);
/* let the hc process these next qtds */
wmb ();
@@ -970,7 +976,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
timer_action_done (ehci, TIMER_IAA_WATCHDOG);
- // qh->hw_next = cpu_to_le32 (qh->qh_dma);
+ // qh->hw_next = cpu_to_hc32(qh->qh_dma);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put (qh); // refcount from reclaim
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 7b5ae7111f23..d4a8ace49676 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -44,9 +44,10 @@ static int ehci_get_frame (struct usb_hcd *hcd);
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *
-periodic_next_shadow (union ehci_shadow *periodic, __le32 tag)
+periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
+ __hc32 tag)
{
- switch (tag) {
+ switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
return &periodic->qh->qh_next;
case Q_TYPE_FSTN:
@@ -62,13 +63,14 @@ periodic_next_shadow (union ehci_shadow *periodic, __le32 tag)
/* caller must hold ehci->lock */
static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
- union ehci_shadow *prev_p = &ehci->pshadow [frame];
- __le32 *hw_p = &ehci->periodic [frame];
+ union ehci_shadow *prev_p = &ehci->pshadow[frame];
+ __hc32 *hw_p = &ehci->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
- prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
+ prev_p = periodic_next_shadow(ehci, prev_p,
+ Q_NEXT_TYPE(ehci, *hw_p));
hw_p = here.hw_next;
here = *prev_p;
}
@@ -79,7 +81,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
- *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
+ *prev_p = *periodic_next_shadow(ehci, &here,
+ Q_NEXT_TYPE(ehci, *hw_p));
*hw_p = *here.hw_next;
}
@@ -87,18 +90,19 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
static unsigned short
periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
{
- __le32 *hw_p = &ehci->periodic [frame];
+ __hc32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned usecs = 0;
while (q->ptr) {
- switch (Q_NEXT_TYPE (*hw_p)) {
+ switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
case Q_TYPE_QH:
/* is it in the S-mask? */
- if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
+ if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
- if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
+ if (q->qh->hw_info2 & cpu_to_hc32(ehci,
+ 1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
@@ -108,7 +112,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
/* for "save place" FSTNs, count the relevant INTR
* bandwidth from the previous frame
*/
- if (q->fstn->hw_prev != EHCI_LIST_END) {
+ if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
ehci_dbg (ehci, "ignoring FSTN cost ...\n");
}
hw_p = &q->fstn->hw_next;
@@ -121,9 +125,10 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
break;
case Q_TYPE_SITD:
/* is it in the S-mask? (count SPLIT, DATA) */
- if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
+ if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
+ 1 << uframe)) {
if (q->sitd->hw_fullspeed_ep &
- __constant_cpu_to_le32 (1<<31))
+ cpu_to_hc32(ehci, 1<<31))
usecs += q->sitd->stream->usecs;
else /* worst case for OUT start-split */
usecs += HS_USECS_ISO (188);
@@ -131,7 +136,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
/* ... C-mask? (count CSPLIT, DATA) */
if (q->sitd->hw_uframe &
- cpu_to_le32 (1 << (8 + uframe))) {
+ cpu_to_hc32(ehci, 1 << (8 + uframe))) {
/* worst case for IN complete-split */
usecs += q->sitd->stream->c_usecs;
}
@@ -173,9 +178,9 @@ static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
* will cause a transfer in "B-frame" uframe 0. "B-frames" lag
* "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
*/
-static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __le32 mask)
+static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
{
- unsigned char smask = QH_SMASK & le32_to_cpu(mask);
+ unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
if (!smask) {
ehci_err(ehci, "invalid empty smask!\n");
/* uframe 7 can't have bw so this will indicate failure */
@@ -217,14 +222,14 @@ periodic_tt_usecs (
unsigned short tt_usecs[8]
)
{
- __le32 *hw_p = &ehci->periodic [frame];
+ __hc32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned char uf;
memset(tt_usecs, 0, 16);
while (q->ptr) {
- switch (Q_NEXT_TYPE(*hw_p)) {
+ switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
case Q_TYPE_ITD:
hw_p = &q->itd->hw_next;
q = &q->itd->itd_next;
@@ -247,8 +252,8 @@ periodic_tt_usecs (
continue;
// case Q_TYPE_FSTN:
default:
- ehci_dbg(ehci,
- "ignoring periodic frame %d FSTN\n", frame);
+ ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
+ frame);
hw_p = &q->fstn->hw_next;
q = &q->fstn->fstn_next;
}
@@ -368,41 +373,42 @@ static int tt_no_collision (
*/
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
- __le32 type;
+ __hc32 type;
here = ehci->pshadow [frame];
- type = Q_NEXT_TYPE (ehci->periodic [frame]);
+ type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
while (here.ptr) {
- switch (type) {
+ switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_ITD:
- type = Q_NEXT_TYPE (here.itd->hw_next);
+ type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
if (same_tt (dev, here.qh->dev)) {
u32 mask;
- mask = le32_to_cpu (here.qh->hw_info2);
+ mask = hc32_to_cpu(ehci,
+ here.qh->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
- type = Q_NEXT_TYPE (here.qh->hw_next);
+ type = Q_NEXT_TYPE(ehci, here.qh->hw_next);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
if (same_tt (dev, here.sitd->urb->dev)) {
u16 mask;
- mask = le32_to_cpu (here.sitd
+ mask = hc32_to_cpu(ehci, here.sitd
->hw_uframe);
/* FIXME assumes no gap for IN! */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
- type = Q_NEXT_TYPE (here.sitd->hw_next);
+ type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
here = here.sitd->sitd_next;
continue;
// case Q_TYPE_FSTN:
@@ -473,6 +479,109 @@ static int disable_periodic (struct ehci_hcd *ehci)
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_CPU_FREQ
+
+static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ int now; /* current (frame * 8) + uframe */
+ int prev_start, next_start; /* uframes from/to split start */
+ int start_uframe = ffs(le32_to_cpup (&qh->hw_info2) & QH_SMASK);
+ int end_uframe = fls((le32_to_cpup (&qh->hw_info2) & QH_CMASK) >> 8);
+ int split_duration = end_uframe - start_uframe;
+
+ now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3);
+
+ next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now)
+ % (qh->period << 3);
+ prev_start = (qh->period << 3) - next_start;
+
+ /*
+ * Make sure there will be at least one uframe when qh is safe.
+ */
+ if ((qh->period << 3) <= (ehci->i_thresh + 2 + split_duration))
+ /* never safe */
+ return -EINVAL;
+
+ /*
+ * Wait 1 uframe after transaction should have started, to make
+ * sure controller has time to write back overlay, so we can
+ * check QTD_STS_STS to see if transaction is in progress.
+ */
+ if ((next_start > ehci->i_thresh) && (prev_start > 1))
+ /* safe to set "i" bit if split isn't in progress */
+ return (qh->hw_token & STATUS_BIT(ehci)) ? 0 : 1;
+ else
+ return 0;
+}
+
+/* Set inactivate bit for all the split interrupt QHs. */
+static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ int not_done, safe;
+ u32 inactivate = INACTIVATE_BIT(ehci);
+ u32 active = ACTIVE_BIT(ehci);
+
+ do {
+ not_done = 0;
+ list_for_each_entry(qh, &ehci->split_intr_qhs,
+ split_intr_qhs) {
+ if (qh->hw_info1 & inactivate)
+ /* already off */
+ continue;
+ /*
+ * To avoid setting "I" after the start split happens,
+ * don't set it if the QH might be cached in the
+ * controller. Some HCs (Broadcom/ServerWorks HT1000)
+ * will stop in the middle of a split transaction when
+ * the "I" bit is set.
+ */
+ safe = safe_to_modify_i(ehci, qh);
+ if (safe == 0) {
+ not_done = 1;
+ } else if (safe > 0) {
+ qh->was_active = qh->hw_token & active;
+ qh->hw_info1 |= inactivate;
+ }
+ }
+ } while (not_done);
+ wmb();
+}
+
+static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ u32 token;
+ int not_done, safe;
+ u32 inactivate = INACTIVATE_BIT(ehci);
+ u32 active = ACTIVE_BIT(ehci);
+ u32 halt = HALT_BIT(ehci);
+
+ do {
+ not_done = 0;
+ list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) {
+ if (!(qh->hw_info1 & inactivate)) /* already on */
+ continue;
+ /*
+ * Don't reactivate if cached, or controller might
+ * overwrite overlay after we modify it!
+ */
+ safe = safe_to_modify_i(ehci, qh);
+ if (safe == 0) {
+ not_done = 1;
+ } else if (safe > 0) {
+ /* See EHCI 1.0 section 4.15.2.4. */
+ token = qh->hw_token;
+ qh->hw_token = (token | halt) & ~active;
+ wmb();
+ qh->hw_info1 &= ~inactivate;
+ wmb();
+ qh->hw_token = (token & ~halt) | qh->was_active;
+ }
+ }
+ } while (not_done);
+}
+#endif
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
@@ -487,25 +596,36 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
dev_dbg (&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
- period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
+#ifdef CONFIG_CPU_FREQ
+ /*
+ * If low/full speed interrupt QHs are inactive (because of
+ * cpufreq changing processor speeds), start QH with I flag set--
+ * it will automatically be cleared when cpufreq is done.
+ */
+ if (ehci->cpufreq_changing)
+ if (!(qh->hw_info1 & (cpu_to_le32(1 << 13))))
+ qh->hw_info1 |= INACTIVATE_BIT(ehci);
+#endif
+
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < ehci->periodic_size; i += period) {
- union ehci_shadow *prev = &ehci->pshadow [i];
- __le32 *hw_p = &ehci->periodic [i];
+ union ehci_shadow *prev = &ehci->pshadow[i];
+ __hc32 *hw_p = &ehci->periodic[i];
union ehci_shadow here = *prev;
- __le32 type = 0;
+ __hc32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
- type = Q_NEXT_TYPE (*hw_p);
- if (type == Q_TYPE_QH)
+ type = Q_NEXT_TYPE(ehci, *hw_p);
+ if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
- prev = periodic_next_shadow (prev, type);
+ prev = periodic_next_shadow(ehci, prev, type);
hw_p = &here.qh->hw_next;
here = *prev;
}
@@ -527,7 +647,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->hw_next = *hw_p;
wmb ();
prev->qh = qh;
- *hw_p = QH_NEXT (qh->qh_dma);
+ *hw_p = QH_NEXT (ehci, qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
@@ -538,6 +658,12 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
+#ifdef CONFIG_CPU_FREQ
+ /* add qh to list of low/full speed interrupt QHs, if applicable */
+ if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
+ list_add(&qh->split_intr_qhs, &ehci->split_intr_qhs);
+ }
+#endif
/* maybe enable periodic schedule processing */
if (!ehci->periodic_sched++)
return enable_periodic (ehci);
@@ -555,7 +681,14 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
// and this qh is active in the current uframe
// (and overlay token SplitXstate is false?)
// THEN
- // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */);
+ // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */);
+
+#ifdef CONFIG_CPU_FREQ
+ /* remove qh from list of low/full speed interrupt QHs */
+ if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
+ list_del_init(&qh->split_intr_qhs);
+ }
+#endif
/* high bandwidth, or otherwise part of every microframe */
if ((period = qh->period) == 0)
@@ -572,7 +705,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
dev_dbg (&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
- le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
@@ -598,7 +731,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
* active high speed queues may need bigger delays...
*/
if (list_empty (&qh->qtd_list)
- || (__constant_cpu_to_le32 (QH_CMASK)
+ || (cpu_to_hc32(ehci, QH_CMASK)
& qh->hw_info2) != 0)
wait = 2;
else
@@ -606,7 +739,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
udelay (wait);
qh->qh_state = QH_STATE_IDLE;
- qh->hw_next = EHCI_LIST_END;
+ qh->hw_next = EHCI_LIST_END(ehci);
wmb ();
}
@@ -663,7 +796,7 @@ static int check_intr_schedule (
unsigned frame,
unsigned uframe,
const struct ehci_qh *qh,
- __le32 *c_maskp
+ __hc32 *c_maskp
)
{
int retval = -ENOSPC;
@@ -695,7 +828,7 @@ static int check_intr_schedule (
retval = 0;
- *c_maskp = cpu_to_le32 (mask << 8);
+ *c_maskp = cpu_to_hc32(ehci, mask << 8);
}
#else
/* Make sure this tt's buffer is also available for CSPLITs.
@@ -706,7 +839,7 @@ static int check_intr_schedule (
* one smart pass...
*/
mask = 0x03 << (uframe + qh->gap_uf);
- *c_maskp = cpu_to_le32 (mask << 8);
+ *c_maskp = cpu_to_hc32(ehci, mask << 8);
mask |= 1 << uframe;
if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
@@ -726,20 +859,20 @@ done:
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
-static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int status;
unsigned uframe;
- __le32 c_mask;
+ __hc32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh_refresh(ehci, qh);
- qh->hw_next = EHCI_LIST_END;
+ qh->hw_next = EHCI_LIST_END(ehci);
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
- uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK);
+ uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
@@ -775,10 +908,10 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
- qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
+ qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period
- ? cpu_to_le32 (1 << uframe)
- : __constant_cpu_to_le32 (QH_SMASK);
+ ? cpu_to_hc32(ehci, 1 << uframe)
+ : cpu_to_hc32(ehci, QH_SMASK);
qh->hw_info2 |= c_mask;
} else
ehci_dbg (ehci, "reused qh %p schedule\n", qh);
@@ -808,7 +941,7 @@ static int intr_submit (
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags))) {
+ &ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
goto done;
}
@@ -898,9 +1031,9 @@ iso_stream_init (
buf1 |= maxp;
maxp *= multi;
- stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);
- stream->buf1 = cpu_to_le32 (buf1);
- stream->buf2 = cpu_to_le32 (multi);
+ stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
+ stream->buf1 = cpu_to_hc32(ehci, buf1);
+ stream->buf2 = cpu_to_hc32(ehci, multi);
/* usbfs wants to report the average usecs per frame tied up
* when transfers on this endpoint are scheduled ...
@@ -943,7 +1076,7 @@ iso_stream_init (
bandwidth /= 1 << (interval + 2);
/* stream->splits gets created from raw_mask later */
- stream->address = cpu_to_le32 (addr);
+ stream->address = cpu_to_hc32(ehci, addr);
}
stream->bandwidth = bandwidth;
@@ -1077,7 +1210,8 @@ iso_sched_alloc (unsigned packets, gfp_t mem_flags)
}
static inline void
-itd_sched_init (
+itd_sched_init(
+ struct ehci_hcd *ehci,
struct ehci_iso_sched *iso_sched,
struct ehci_iso_stream *stream,
struct urb *urb
@@ -1107,7 +1241,7 @@ itd_sched_init (
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= EHCI_ITD_IOC;
trans |= length << 16;
- uframe->transaction = cpu_to_le32 (trans);
+ uframe->transaction = cpu_to_hc32(ehci, trans);
/* might need to cross a buffer page within a uframe */
uframe->bufp = (buf & ~(u64)0x0fff);
@@ -1149,7 +1283,7 @@ itd_urb_transaction (
if (unlikely (sched == NULL))
return -ENOMEM;
- itd_sched_init (sched, stream, urb);
+ itd_sched_init(ehci, sched, stream, urb);
if (urb->interval < 8)
num_itds = 1 + (sched->span + 7) / 8;
@@ -1167,7 +1301,7 @@ itd_urb_transaction (
/* prefer previously-allocated itds */
if (likely (!list_empty(&stream->free_list))) {
itd = list_entry (stream->free_list.prev,
- struct ehci_itd, itd_list);
+ struct ehci_itd, itd_list);
list_del (&itd->itd_list);
itd_dma = itd->itd_dma;
} else
@@ -1294,7 +1428,7 @@ sitd_slot_ok (
uframe += period_uframes;
} while (uframe < mod);
- stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7));
+ stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
return 1;
}
@@ -1415,12 +1549,13 @@ ready:
/*-------------------------------------------------------------------------*/
static inline void
-itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
+itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
+ struct ehci_itd *itd)
{
int i;
/* it's been recently zeroed */
- itd->hw_next = EHCI_LIST_END;
+ itd->hw_next = EHCI_LIST_END(ehci);
itd->hw_bufp [0] = stream->buf0;
itd->hw_bufp [1] = stream->buf1;
itd->hw_bufp [2] = stream->buf2;
@@ -1432,7 +1567,8 @@ itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
}
static inline void
-itd_patch (
+itd_patch(
+ struct ehci_hcd *ehci,
struct ehci_itd *itd,
struct ehci_iso_sched *iso_sched,
unsigned index,
@@ -1447,17 +1583,18 @@ itd_patch (
uframe &= 0x07;
itd->index [uframe] = index;
- itd->hw_transaction [uframe] = uf->transaction;
- itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);
- itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);
- itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
+ itd->hw_transaction[uframe] = uf->transaction;
+ itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
+ itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
/* iso_frame_desc[].offset must be strictly increasing */
if (unlikely (uf->cross)) {
u64 bufp = uf->bufp + 4096;
+
itd->pg = ++pg;
- itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
- itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));
+ itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
}
}
@@ -1470,7 +1607,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
ehci->pshadow [frame].itd = itd;
itd->frame = frame;
wmb ();
- ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
+ ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
/* fit urb's itds into the selected schedule slot; activate as needed */
@@ -1515,14 +1652,14 @@ itd_link_urb (
list_move_tail (&itd->itd_list, &stream->td_list);
itd->stream = iso_stream_get (stream);
itd->urb = usb_get_urb (urb);
- itd_init (stream, itd);
+ itd_init (ehci, stream, itd);
}
uframe = next_uframe & 0x07;
frame = next_uframe >> 3;
itd->usecs [uframe] = stream->usecs;
- itd_patch (itd, iso_sched, packet, uframe);
+ itd_patch(ehci, itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
stream->depth += stream->interval;
@@ -1570,7 +1707,7 @@ itd_complete (
urb_index = itd->index[uframe];
desc = &urb->iso_frame_desc [urb_index];
- t = le32_to_cpup (&itd->hw_transaction [uframe]);
+ t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
itd->hw_transaction [uframe] = 0;
stream->depth -= stream->interval;
@@ -1700,7 +1837,8 @@ done:
*/
static inline void
-sitd_sched_init (
+sitd_sched_init(
+ struct ehci_hcd *ehci,
struct ehci_iso_sched *iso_sched,
struct ehci_iso_stream *stream,
struct urb *urb
@@ -1729,7 +1867,7 @@ sitd_sched_init (
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= SITD_IOC;
trans |= length << 16;
- packet->transaction = cpu_to_le32 (trans);
+ packet->transaction = cpu_to_hc32(ehci, trans);
/* might need to cross a buffer page within a td */
packet->bufp = buf;
@@ -1765,7 +1903,7 @@ sitd_urb_transaction (
if (iso_sched == NULL)
return -ENOMEM;
- sitd_sched_init (iso_sched, stream, urb);
+ sitd_sched_init(ehci, iso_sched, stream, urb);
/* allocate/init sITDs */
spin_lock_irqsave (&ehci->lock, flags);
@@ -1817,7 +1955,8 @@ sitd_urb_transaction (
/*-------------------------------------------------------------------------*/
static inline void
-sitd_patch (
+sitd_patch(
+ struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct ehci_sitd *sitd,
struct ehci_iso_sched *iso_sched,
@@ -1827,20 +1966,20 @@ sitd_patch (
struct ehci_iso_packet *uf = &iso_sched->packet [index];
u64 bufp = uf->bufp;
- sitd->hw_next = EHCI_LIST_END;
+ sitd->hw_next = EHCI_LIST_END(ehci);
sitd->hw_fullspeed_ep = stream->address;
sitd->hw_uframe = stream->splits;
sitd->hw_results = uf->transaction;
- sitd->hw_backpointer = EHCI_LIST_END;
+ sitd->hw_backpointer = EHCI_LIST_END(ehci);
bufp = uf->bufp;
- sitd->hw_buf [0] = cpu_to_le32 (bufp);
- sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32);
+ sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
+ sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
- sitd->hw_buf [1] = cpu_to_le32 (uf->buf1);
+ sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
if (uf->cross)
bufp += 4096;
- sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32);
+ sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
sitd->index = index;
}
@@ -1853,7 +1992,7 @@ sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
ehci->pshadow [frame].sitd = sitd;
sitd->frame = frame;
wmb ();
- ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD;
+ ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
}
/* fit urb's sitds into the selected schedule slot; activate as needed */
@@ -1881,7 +2020,7 @@ sitd_link_urb (
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
(next_uframe >> 3) % ehci->periodic_size,
- stream->interval, le32_to_cpu (stream->splits));
+ stream->interval, hc32_to_cpu(ehci, stream->splits));
stream->start = jiffies;
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
@@ -1902,7 +2041,7 @@ sitd_link_urb (
sitd->stream = iso_stream_get (stream);
sitd->urb = usb_get_urb (urb);
- sitd_patch (stream, sitd, sched, packet);
+ sitd_patch(ehci, stream, sitd, sched, packet);
sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
sitd);
@@ -1940,7 +2079,7 @@ sitd_complete (
urb_index = sitd->index;
desc = &urb->iso_frame_desc [urb_index];
- t = le32_to_cpup (&sitd->hw_results);
+ t = hc32_to_cpup(ehci, &sitd->hw_results);
/* report transfer status */
if (t & SITD_ERRS) {
@@ -2095,7 +2234,7 @@ scan_periodic (struct ehci_hcd *ehci)
for (;;) {
union ehci_shadow q, *q_p;
- __le32 type, *hw_p;
+ __hc32 type, *hw_p;
unsigned uframes;
/* don't scan past the live uframe */
@@ -2113,7 +2252,7 @@ restart:
q_p = &ehci->pshadow [frame];
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
- type = Q_NEXT_TYPE (*hw_p);
+ type = Q_NEXT_TYPE(ehci, *hw_p);
modified = 0;
while (q.ptr != NULL) {
@@ -2122,11 +2261,11 @@ restart:
int live;
live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
- switch (type) {
+ switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get (q.qh);
- type = Q_NEXT_TYPE (q.qh->hw_next);
+ type = Q_NEXT_TYPE(ehci, q.qh->hw_next);
q = q.qh->qh_next;
modified = qh_completions (ehci, temp.qh);
if (unlikely (list_empty (&temp.qh->qtd_list)))
@@ -2137,10 +2276,10 @@ restart:
/* for "save place" FSTNs, look at QH entries
* in the previous frame for completions.
*/
- if (q.fstn->hw_prev != EHCI_LIST_END) {
+ if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) {
dbg ("ignoring completions from FSTNs");
}
- type = Q_NEXT_TYPE (q.fstn->hw_next);
+ type = Q_NEXT_TYPE(ehci, q.fstn->hw_next);
q = q.fstn->fstn_next;
break;
case Q_TYPE_ITD:
@@ -2148,11 +2287,12 @@ restart:
rmb ();
for (uf = live ? uframes : 8; uf < 8; uf++) {
if (0 == (q.itd->hw_transaction [uf]
- & ITD_ACTIVE))
+ & ITD_ACTIVE(ehci)))
continue;
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
- type = Q_NEXT_TYPE (q.itd->hw_next);
+ type = Q_NEXT_TYPE(ehci,
+ q.itd->hw_next);
q = *q_p;
break;
}
@@ -2164,23 +2304,24 @@ restart:
*/
*q_p = q.itd->itd_next;
*hw_p = q.itd->hw_next;
- type = Q_NEXT_TYPE (q.itd->hw_next);
+ type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
wmb();
modified = itd_complete (ehci, q.itd);
q = *q_p;
break;
case Q_TYPE_SITD:
- if ((q.sitd->hw_results & SITD_ACTIVE)
+ if ((q.sitd->hw_results & SITD_ACTIVE(ehci))
&& live) {
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
- type = Q_NEXT_TYPE (q.sitd->hw_next);
+ type = Q_NEXT_TYPE(ehci,
+ q.sitd->hw_next);
q = *q_p;
break;
}
*q_p = q.sitd->sitd_next;
*hw_p = q.sitd->hw_next;
- type = Q_NEXT_TYPE (q.sitd->hw_next);
+ type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
wmb();
modified = sitd_complete (ehci, q.sitd);
q = *q_p;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 46fa57a520d0..2c68a04230c1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -21,6 +21,22 @@
/* definitions used for the EHCI driver */
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given EHCI_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+typedef __u32 __bitwise __hc32;
+typedef __u16 __bitwise __hc16;
+#else
+#define __hc32 __le32
+#define __hc16 __le16
+#endif
+
/* statistics can be kept for for tuning/monitoring */
struct ehci_stats {
/* irq usage */
@@ -55,6 +71,12 @@ struct ehci_hcd { /* one per controller */
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block cpufreq_transition;
+ int cpufreq_changing;
+ struct list_head split_intr_qhs;
+#endif
+
/* async schedule support */
struct ehci_qh *async;
struct ehci_qh *reclaim;
@@ -64,7 +86,7 @@ struct ehci_hcd { /* one per controller */
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
unsigned periodic_size;
- __le32 *periodic; /* hw periodic table */
+ __hc32 *periodic; /* hw periodic table */
dma_addr_t periodic_dma;
unsigned i_thresh; /* uframes HC might cache */
@@ -74,11 +96,14 @@ struct ehci_hcd { /* one per controller */
/* per root hub port */
unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
+
/* bit vectors (one bit per port) */
unsigned long bus_suspended; /* which ports were
already suspended at the start of a bus suspend */
unsigned long companion_ports; /* which ports are
dedicated to the companion controller */
+ unsigned long owned_ports; /* which ports are
+ owned by the companion during a bus suspend */
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
@@ -97,6 +122,7 @@ struct ehci_hcd { /* one per controller */
unsigned no_selective_suspend:1;
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned big_endian_mmio:1;
+ unsigned big_endian_desc:1;
u8 sbrn; /* packed release number */
@@ -276,6 +302,12 @@ struct ehci_regs {
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
} __attribute__ ((packed));
+#define USBMODE 0x68 /* USB Device mode */
+#define USBMODE_SDIS (1<<3) /* Stream disable */
+#define USBMODE_BE (1<<2) /* BE/LE endianness select */
+#define USBMODE_CM_HC (3<<0) /* host controller mode */
+#define USBMODE_CM_IDLE (0<<0) /* idle state */
+
/* Appendix C, Debug port ... intended for use with special "debug devices"
* that can help if there's no serial console. (nonstandard enumeration.)
*/
@@ -303,7 +335,7 @@ struct ehci_dbg_port {
/*-------------------------------------------------------------------------*/
-#define QTD_NEXT(dma) cpu_to_le32((u32)dma)
+#define QTD_NEXT(ehci, dma) cpu_to_hc32(ehci, (u32)dma)
/*
* EHCI Specification 0.95 Section 3.5
@@ -315,9 +347,9 @@ struct ehci_dbg_port {
*/
struct ehci_qtd {
/* first part defined by EHCI spec */
- __le32 hw_next; /* see EHCI 3.5.1 */
- __le32 hw_alt_next; /* see EHCI 3.5.2 */
- __le32 hw_token; /* see EHCI 3.5.3 */
+ __hc32 hw_next; /* see EHCI 3.5.1 */
+ __hc32 hw_alt_next; /* see EHCI 3.5.2 */
+ __hc32 hw_token; /* see EHCI 3.5.3 */
#define QTD_TOGGLE (1 << 31) /* data toggle */
#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
#define QTD_IOC (1 << 15) /* interrupt on complete */
@@ -331,8 +363,13 @@ struct ehci_qtd {
#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
#define QTD_STS_STS (1 << 1) /* split transaction state */
#define QTD_STS_PING (1 << 0) /* issue PING? */
- __le32 hw_buf [5]; /* see EHCI 3.5.4 */
- __le32 hw_buf_hi [5]; /* Appendix B */
+
+#define ACTIVE_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_ACTIVE)
+#define HALT_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_HALT)
+#define STATUS_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_STS)
+
+ __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi [5]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t qtd_dma; /* qtd address */
@@ -342,26 +379,33 @@ struct ehci_qtd {
} __attribute__ ((aligned (32)));
/* mask NakCnt+T in qh->hw_alt_next */
-#define QTD_MASK __constant_cpu_to_le32 (~0x1f)
+#define QTD_MASK(ehci) cpu_to_hc32 (ehci, ~0x1f)
#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
/*-------------------------------------------------------------------------*/
/* type tag from {qh,itd,sitd,fstn}->hw_next */
-#define Q_NEXT_TYPE(dma) ((dma) & __constant_cpu_to_le32 (3 << 1))
+#define Q_NEXT_TYPE(ehci,dma) ((dma) & cpu_to_hc32(ehci, 3 << 1))
+/*
+ * Now the following defines are not converted using the
+ * __constant_cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
/* values for that type tag */
-#define Q_TYPE_ITD __constant_cpu_to_le32 (0 << 1)
-#define Q_TYPE_QH __constant_cpu_to_le32 (1 << 1)
-#define Q_TYPE_SITD __constant_cpu_to_le32 (2 << 1)
-#define Q_TYPE_FSTN __constant_cpu_to_le32 (3 << 1)
+#define Q_TYPE_ITD (0 << 1)
+#define Q_TYPE_QH (1 << 1)
+#define Q_TYPE_SITD (2 << 1)
+#define Q_TYPE_FSTN (3 << 1)
/* next async queue entry, or pointer to interrupt/periodic QH */
-#define QH_NEXT(dma) (cpu_to_le32(((u32)dma)&~0x01f)|Q_TYPE_QH)
+#define QH_NEXT(ehci,dma) (cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH))
/* for periodic/async schedules and qtd lists, mark end of list */
-#define EHCI_LIST_END __constant_cpu_to_le32(1) /* "null pointer" to hw */
+#define EHCI_LIST_END(ehci) cpu_to_hc32(ehci, 1) /* "null pointer" to hw */
/*
* Entries in periodic shadow table are pointers to one of four kinds
@@ -376,7 +420,7 @@ union ehci_shadow {
struct ehci_itd *itd; /* Q_TYPE_ITD */
struct ehci_sitd *sitd; /* Q_TYPE_SITD */
struct ehci_fstn *fstn; /* Q_TYPE_FSTN */
- __le32 *hw_next; /* (all types) */
+ __hc32 *hw_next; /* (all types) */
void *ptr;
};
@@ -392,23 +436,27 @@ union ehci_shadow {
struct ehci_qh {
/* first part defined by EHCI spec */
- __le32 hw_next; /* see EHCI 3.6.1 */
- __le32 hw_info1; /* see EHCI 3.6.2 */
+ __hc32 hw_next; /* see EHCI 3.6.1 */
+ __hc32 hw_info1; /* see EHCI 3.6.2 */
#define QH_HEAD 0x00008000
- __le32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_INACTIVATE 0x00000080
+
+#define INACTIVATE_BIT(ehci) cpu_to_hc32(ehci, QH_INACTIVATE)
+
+ __hc32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00
#define QH_HUBADDR 0x007f0000
#define QH_HUBPORT 0x3f800000
#define QH_MULT 0xc0000000
- __le32 hw_current; /* qtd list - see EHCI 3.6.4 */
+ __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
/* qtd overlay (hardware parts of a struct ehci_qtd) */
- __le32 hw_qtd_next;
- __le32 hw_alt_next;
- __le32 hw_token;
- __le32 hw_buf [5];
- __le32 hw_buf_hi [5];
+ __hc32 hw_qtd_next;
+ __hc32 hw_alt_next;
+ __hc32 hw_token;
+ __hc32 hw_buf [5];
+ __hc32 hw_buf_hi [5];
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
@@ -418,7 +466,14 @@ struct ehci_qh {
struct ehci_qh *reclaim; /* next to reclaim */
struct ehci_hcd *ehci;
- struct kref kref;
+
+ /*
+ * Do NOT use atomic operations for QH refcounting. On some CPUs
+ * (PPC7448 for example), atomic operations cannot be performed on
+ * memory that is cache-inhibited (i.e. being used for DMA).
+ * Spinlocks are used to protect all QH fields.
+ */
+ u32 refcount;
unsigned stamp;
u8 qh_state;
@@ -437,6 +492,10 @@ struct ehci_qh {
unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
struct usb_device *dev; /* access to TT */
+#ifdef CONFIG_CPU_FREQ
+ struct list_head split_intr_qhs; /* list of split qhs */
+ __le32 was_active; /* active bit before "i" set */
+#endif
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/
@@ -445,7 +504,7 @@ struct ehci_qh {
struct ehci_iso_packet {
/* These will be copied to iTD when scheduling */
u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
- __le32 transaction; /* itd->hw_transaction[i] |= */
+ __hc32 transaction; /* itd->hw_transaction[i] |= */
u8 cross; /* buf crosses pages */
/* for full speed OUT splits */
u32 buf1;
@@ -467,8 +526,8 @@ struct ehci_iso_sched {
*/
struct ehci_iso_stream {
/* first two fields match QH, but info1 == 0 */
- __le32 hw_next;
- __le32 hw_info1;
+ __hc32 hw_next;
+ __hc32 hw_info1;
u32 refcount;
u8 bEndpointAddress;
@@ -483,7 +542,7 @@ struct ehci_iso_stream {
unsigned long start; /* jiffies */
unsigned long rescheduled;
int next_uframe;
- __le32 splits;
+ __hc32 splits;
/* the rest is derived from the endpoint descriptor,
* trusting urb->interval == f(epdesc->bInterval) and
@@ -497,12 +556,12 @@ struct ehci_iso_stream {
unsigned bandwidth;
/* This is used to initialize iTD's hw_bufp fields */
- __le32 buf0;
- __le32 buf1;
- __le32 buf2;
+ __hc32 buf0;
+ __hc32 buf1;
+ __hc32 buf2;
/* this is used to initialize sITD's tt info */
- __le32 address;
+ __hc32 address;
};
/*-------------------------------------------------------------------------*/
@@ -515,8 +574,8 @@ struct ehci_iso_stream {
*/
struct ehci_itd {
/* first part defined by EHCI spec */
- __le32 hw_next; /* see EHCI 3.3.1 */
- __le32 hw_transaction [8]; /* see EHCI 3.3.2 */
+ __hc32 hw_next; /* see EHCI 3.3.1 */
+ __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */
#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */
@@ -524,10 +583,10 @@ struct ehci_itd {
#define EHCI_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
#define EHCI_ITD_IOC (1 << 15) /* interrupt on complete */
-#define ITD_ACTIVE __constant_cpu_to_le32(EHCI_ISOC_ACTIVE)
+#define ITD_ACTIVE(ehci) cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE)
- __le32 hw_bufp [7]; /* see EHCI 3.3.3 */
- __le32 hw_bufp_hi [7]; /* Appendix B */
+ __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi [7]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t itd_dma; /* for this itd */
@@ -554,11 +613,11 @@ struct ehci_itd {
*/
struct ehci_sitd {
/* first part defined by EHCI spec */
- __le32 hw_next;
+ __hc32 hw_next;
/* uses bit field macros above - see EHCI 0.95 Table 3-8 */
- __le32 hw_fullspeed_ep; /* EHCI table 3-9 */
- __le32 hw_uframe; /* EHCI table 3-10 */
- __le32 hw_results; /* EHCI table 3-11 */
+ __hc32 hw_fullspeed_ep; /* EHCI table 3-9 */
+ __hc32 hw_uframe; /* EHCI table 3-10 */
+ __hc32 hw_results; /* EHCI table 3-11 */
#define SITD_IOC (1 << 31) /* interrupt on completion */
#define SITD_PAGE (1 << 30) /* buffer 0/1 */
#define SITD_LENGTH(x) (0x3ff & ((x)>>16))
@@ -570,11 +629,11 @@ struct ehci_sitd {
#define SITD_STS_MMF (1 << 2) /* incomplete split transaction */
#define SITD_STS_STS (1 << 1) /* split transaction state */
-#define SITD_ACTIVE __constant_cpu_to_le32(SITD_STS_ACTIVE)
+#define SITD_ACTIVE(ehci) cpu_to_hc32(ehci, SITD_STS_ACTIVE)
- __le32 hw_buf [2]; /* EHCI table 3-12 */
- __le32 hw_backpointer; /* EHCI table 3-13 */
- __le32 hw_buf_hi [2]; /* Appendix B */
+ __hc32 hw_buf [2]; /* EHCI table 3-12 */
+ __hc32 hw_backpointer; /* EHCI table 3-13 */
+ __hc32 hw_buf_hi [2]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t sitd_dma;
@@ -599,8 +658,8 @@ struct ehci_sitd {
* it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
*/
struct ehci_fstn {
- __le32 hw_next; /* any periodic q entry */
- __le32 hw_prev; /* qh or EHCI_LIST_END */
+ __hc32 hw_next; /* any periodic q entry */
+ __hc32 hw_prev; /* qh or EHCI_LIST_END */
/* the rest is HCD-private */
dma_addr_t fstn_dma;
@@ -672,8 +731,21 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#define ehci_big_endian_mmio(e) 0
#endif
-static inline unsigned int ehci_readl (const struct ehci_hcd *ehci,
- __u32 __iomem * regs)
+/*
+ * Big-endian read/write functions are arch-specific.
+ * Other arches can be added if/when they're needed.
+ *
+ * REVISIT: arch/powerpc now has readl/writel_be, so the
+ * definition below can die once the 4xx support is
+ * finally ported over.
+ */
+#if defined(CONFIG_PPC)
+#define readl_be(addr) in_be32((__force unsigned *)addr)
+#define writel_be(val, addr) out_be32((__force unsigned *)addr, val)
+#endif
+
+static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
+ __u32 __iomem * regs)
{
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
return ehci_big_endian_mmio(ehci) ?
@@ -684,8 +756,8 @@ static inline unsigned int ehci_readl (const struct ehci_hcd *ehci,
#endif
}
-static inline void ehci_writel (const struct ehci_hcd *ehci,
- const unsigned int val, __u32 __iomem *regs)
+static inline void ehci_writel(const struct ehci_hcd *ehci,
+ const unsigned int val, __u32 __iomem *regs)
{
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
ehci_big_endian_mmio(ehci) ?
@@ -698,6 +770,62 @@ static inline void ehci_writel (const struct ehci_hcd *ehci,
/*-------------------------------------------------------------------------*/
+/*
+ * The AMCC 440EPx not only implements its EHCI registers in big-endian
+ * format, but also its DMA data structures (descriptors).
+ *
+ * EHCI controllers accessed through PCI work normally (little-endian
+ * everywhere), so we won't bother supporting a BE-only mode for now.
+ */
+#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
+#define ehci_big_endian_desc(e) ((e)->big_endian_desc)
+
+/* cpu to ehci */
+static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
+{
+ return ehci_big_endian_desc(ehci)
+ ? (__force __hc32)cpu_to_be32(x)
+ : (__force __hc32)cpu_to_le32(x);
+}
+
+/* ehci to cpu */
+static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
+{
+ return ehci_big_endian_desc(ehci)
+ ? be32_to_cpu((__force __be32)x)
+ : le32_to_cpu((__force __le32)x);
+}
+
+static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
+{
+ return ehci_big_endian_desc(ehci)
+ ? be32_to_cpup((__force __be32 *)x)
+ : le32_to_cpup((__force __le32 *)x);
+}
+
+#else
+
+/* cpu to ehci */
+static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* ehci to cpu */
+static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
+{
+ return le32_to_cpup(x);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
#ifndef DEBUG
#define STUB_DEBUG_FILES
#endif /* DEBUG */
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 46873f2534b5..5c851a36de72 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -228,7 +228,6 @@ static void preproc_atl_queue(struct isp116x *isp116x)
struct urb, urb_list);
ptd = &ep->ptd;
len = ep->length;
- spin_lock(&urb->lock);
ep->data = (unsigned char *)urb->transfer_buffer
+ urb->actual_length;
@@ -264,7 +263,6 @@ static void preproc_atl_queue(struct isp116x *isp116x)
| PTD_EP(ep->epnum);
ptd->len = PTD_LEN(len) | PTD_DIR(dir);
ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
- spin_unlock(&urb->lock);
if (!ep->active) {
ptd->mps |= PTD_LAST_MSK;
isp116x->atl_last_dir = dir;
@@ -275,6 +273,61 @@ static void preproc_atl_queue(struct isp116x *isp116x)
}
/*
+ Take done or failed requests out of schedule. Give back
+ processed urbs.
+*/
+static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep,
+ struct urb *urb)
+__releases(isp116x->lock) __acquires(isp116x->lock)
+{
+ unsigned i;
+
+ urb->hcpriv = NULL;
+ ep->error_count = 0;
+
+ if (usb_pipecontrol(urb->pipe))
+ ep->nextpid = USB_PID_SETUP;
+
+ urb_dbg(urb, "Finish");
+
+ spin_unlock(&isp116x->lock);
+ usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb);
+ spin_lock(&isp116x->lock);
+
+ /* take idle endpoints out of the schedule */
+ if (!list_empty(&ep->hep->urb_list))
+ return;
+
+ /* async deschedule */
+ if (!list_empty(&ep->schedule)) {
+ list_del_init(&ep->schedule);
+ return;
+ }
+
+ /* periodic deschedule */
+ DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+ for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
+ struct isp116x_ep *temp;
+ struct isp116x_ep **prev = &isp116x->periodic[i];
+
+ while (*prev && ((temp = *prev) != ep))
+ prev = &temp->next;
+ if (*prev)
+ *prev = ep->next;
+ isp116x->load[i] -= ep->load;
+ }
+ ep->branch = PERIODIC_SIZE;
+ isp116x_to_hcd(isp116x)->self.bandwidth_allocated -=
+ ep->load / ep->period;
+
+ /* switch irq type? */
+ if (!--isp116x->periodic_count) {
+ isp116x->irqenb &= ~HCuPINT_SOF;
+ isp116x->irqenb |= HCuPINT_ATL;
+ }
+}
+
+/*
Analyze transfer results, handle partial transfers and errors
*/
static void postproc_atl_queue(struct isp116x *isp116x)
@@ -284,6 +337,7 @@ static void postproc_atl_queue(struct isp116x *isp116x)
struct usb_device *udev;
struct ptd *ptd;
int short_not_ok;
+ int status;
u8 cc;
for (ep = isp116x->atl_active; ep; ep = ep->active) {
@@ -294,7 +348,7 @@ static void postproc_atl_queue(struct isp116x *isp116x)
ptd = &ep->ptd;
cc = PTD_GET_CC(ptd);
short_not_ok = 1;
- spin_lock(&urb->lock);
+ status = -EINPROGRESS;
/* Data underrun is special. For allowed underrun
we clear the error and continue as normal. For
@@ -302,47 +356,36 @@ static void postproc_atl_queue(struct isp116x *isp116x)
immediately while for control transfer,
we do a STATUS stage. */
if (cc == TD_DATAUNDERRUN) {
- if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) {
- DBG("Allowed data underrun\n");
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK) ||
+ usb_pipecontrol(urb->pipe)) {
+ DBG("Allowed or control data underrun\n");
cc = TD_CC_NOERROR;
short_not_ok = 0;
} else {
ep->error_count = 1;
- if (usb_pipecontrol(urb->pipe))
- ep->nextpid = USB_PID_ACK;
- else
- usb_settoggle(udev, ep->epnum,
- ep->nextpid ==
- USB_PID_OUT,
- PTD_GET_TOGGLE(ptd));
+ usb_settoggle(udev, ep->epnum,
+ ep->nextpid == USB_PID_OUT,
+ PTD_GET_TOGGLE(ptd));
urb->actual_length += PTD_GET_COUNT(ptd);
- urb->status = cc_to_error[TD_DATAUNDERRUN];
- spin_unlock(&urb->lock);
- continue;
+ status = cc_to_error[TD_DATAUNDERRUN];
+ goto done;
}
}
- /* Keep underrun error through the STATUS stage */
- if (urb->status == cc_to_error[TD_DATAUNDERRUN])
- cc = TD_DATAUNDERRUN;
if (cc != TD_CC_NOERROR && cc != TD_NOTACCESSED
&& (++ep->error_count >= 3 || cc == TD_CC_STALL
|| cc == TD_DATAOVERRUN)) {
- if (urb->status == -EINPROGRESS)
- urb->status = cc_to_error[cc];
+ status = cc_to_error[cc];
if (ep->nextpid == USB_PID_ACK)
ep->nextpid = 0;
- spin_unlock(&urb->lock);
- continue;
+ goto done;
}
/* According to usb spec, zero-length Int transfer signals
finishing of the urb. Hey, does this apply only
for IN endpoints? */
if (usb_pipeint(urb->pipe) && !PTD_GET_LEN(ptd)) {
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
- spin_unlock(&urb->lock);
- continue;
+ status = 0;
+ goto done;
}
/* Relax after previously failed, but later succeeded
@@ -381,8 +424,8 @@ static void postproc_atl_queue(struct isp116x *isp116x)
/* All data for this URB is transferred, let's finish */
if (usb_pipecontrol(urb->pipe))
ep->nextpid = USB_PID_ACK;
- else if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ else
+ status = 0;
break;
case USB_PID_SETUP:
if (PTD_GET_ACTIVE(ptd)
@@ -402,69 +445,27 @@ static void postproc_atl_queue(struct isp116x *isp116x)
if (PTD_GET_ACTIVE(ptd)
|| (cc != TD_CC_NOERROR && cc < 0x0E))
break;
- if (urb->status == -EINPROGRESS)
- urb->status = 0;
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+ urb->actual_length <
+ urb->transfer_buffer_length)
+ status = -EREMOTEIO;
+ else
+ status = 0;
ep->nextpid = 0;
break;
default:
BUG();
}
- spin_unlock(&urb->lock);
- }
-}
-
-/*
- Take done or failed requests out of schedule. Give back
- processed urbs.
-*/
-static void finish_request(struct isp116x *isp116x, struct isp116x_ep *ep,
- struct urb *urb)
-__releases(isp116x->lock) __acquires(isp116x->lock)
-{
- unsigned i;
-
- urb->hcpriv = NULL;
- ep->error_count = 0;
-
- if (usb_pipecontrol(urb->pipe))
- ep->nextpid = USB_PID_SETUP;
-
- urb_dbg(urb, "Finish");
-
- spin_unlock(&isp116x->lock);
- usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb);
- spin_lock(&isp116x->lock);
-
- /* take idle endpoints out of the schedule */
- if (!list_empty(&ep->hep->urb_list))
- return;
-
- /* async deschedule */
- if (!list_empty(&ep->schedule)) {
- list_del_init(&ep->schedule);
- return;
- }
- /* periodic deschedule */
- DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
- for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
- struct isp116x_ep *temp;
- struct isp116x_ep **prev = &isp116x->periodic[i];
-
- while (*prev && ((temp = *prev) != ep))
- prev = &temp->next;
- if (*prev)
- *prev = ep->next;
- isp116x->load[i] -= ep->load;
- }
- ep->branch = PERIODIC_SIZE;
- isp116x_to_hcd(isp116x)->self.bandwidth_allocated -=
- ep->load / ep->period;
-
- /* switch irq type? */
- if (!--isp116x->periodic_count) {
- isp116x->irqenb &= ~HCuPINT_SOF;
- isp116x->irqenb |= HCuPINT_ATL;
+ done:
+ if (status != -EINPROGRESS) {
+ spin_lock(&urb->lock);
+ if (urb->status == -EINPROGRESS)
+ urb->status = status;
+ spin_unlock(&urb->lock);
+ }
+ if (urb->status != -EINPROGRESS)
+ finish_request(isp116x, ep, urb);
}
}
@@ -570,9 +571,6 @@ static void start_atl_transfers(struct isp116x *isp116x)
*/
static void finish_atl_transfers(struct isp116x *isp116x)
{
- struct isp116x_ep *ep;
- struct urb *urb;
-
if (!isp116x->atl_active)
return;
/* Fifo not ready? */
@@ -582,16 +580,6 @@ static void finish_atl_transfers(struct isp116x *isp116x)
atomic_inc(&isp116x->atl_finishing);
unpack_fifo(isp116x);
postproc_atl_queue(isp116x);
- for (ep = isp116x->atl_active; ep; ep = ep->active) {
- urb =
- container_of(ep->hep->urb_list.next, struct urb, urb_list);
- /* USB_PID_ACK check here avoids finishing of
- control transfers, for which TD_DATAUNDERRUN
- occured, while URB_SHORT_NOT_OK was set */
- if (urb && urb->status != -EINPROGRESS
- && ep->nextpid != USB_PID_ACK)
- finish_request(isp116x, ep, urb);
- }
atomic_dec(&isp116x->atl_finishing);
}
@@ -821,15 +809,12 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
}
/* in case of unlink-during-submit */
- spin_lock(&urb->lock);
if (urb->status != -EINPROGRESS) {
- spin_unlock(&urb->lock);
finish_request(isp116x, ep, urb);
ret = 0;
goto fail;
}
urb->hcpriv = hep;
- spin_unlock(&urb->lock);
start_atl_transfers(isp116x);
fail:
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 273d5ddb72be..6f9e43e9a6ca 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -23,7 +23,7 @@
/* debug| print the main components of an URB
* small: 0) header + data packets 1) just header
*/
-static void __attribute__((unused))
+static void __maybe_unused
urb_print (struct urb * urb, char * str, int small)
{
unsigned int pipe= urb->pipe;
@@ -338,7 +338,7 @@ static void ohci_dump_td (const struct ohci_hcd *ohci, const char *label,
}
/* caller MUST own hcd spinlock if verbose is set! */
-static void __attribute__((unused))
+static void __maybe_unused
ohci_dump_ed (const struct ohci_hcd *ohci, const char *label,
const struct ed *ed, int verbose)
{
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index a66637e725f3..6edf4097d2d2 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -35,15 +35,13 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/reboot.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_PPC_PS3
-#include <asm/firmware.h>
-#endif
#include "../core/hcd.h"
@@ -82,6 +80,8 @@ static const char hcd_name [] = "ohci_hcd";
static void ohci_dump (struct ohci_hcd *ohci, int verbose);
static int ohci_init (struct ohci_hcd *ohci);
static void ohci_stop (struct usb_hcd *hcd);
+static int ohci_restart (struct ohci_hcd *ohci);
+static void ohci_quirk_nec_worker (struct work_struct *work);
#include "ohci-hub.c"
#include "ohci-dbg.c"
@@ -171,11 +171,10 @@ static int ohci_urb_enqueue (
}
/* allocate the private part of the URB */
- urb_priv = kmalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
+ urb_priv = kzalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
mem_flags);
if (!urb_priv)
return -ENOMEM;
- memset (urb_priv, 0, sizeof (urb_priv_t) + size * sizeof (struct td *));
INIT_LIST_HEAD (&urb_priv->pending);
urb_priv->length = size;
urb_priv->ed = ed;
@@ -510,15 +509,7 @@ static int ohci_run (struct ohci_hcd *ohci)
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(temp);
- temp = roothub_a (ohci);
- if (!(temp & RH_A_NPS)) {
- /* power down each port */
- for (temp = 0; temp < ohci->num_ports; temp++)
- ohci_writel (ohci, RH_PS_LSDA,
- &ohci->regs->roothub.portstatus [temp]);
- }
- // flush those writes
- (void) ohci_readl (ohci, &ohci->regs->control);
+
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
/* 2msec timelimit here means no irqs/preempt */
@@ -659,9 +650,20 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
}
if (ints & OHCI_INTR_UE) {
- disable (ohci);
- ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
// e.g. due to PCI Master/Target Abort
+ if (ohci->flags & OHCI_QUIRK_NEC) {
+ /* Workaround for a silicon bug in some NEC chips used
+ * in Apple's PowerBooks. Adapted from Darwin code.
+ */
+ ohci_err (ohci, "OHCI Unrecoverable Error, scheduling NEC chip restart\n");
+
+ ohci_writel (ohci, OHCI_INTR_UE, &regs->intrdisable);
+
+ schedule_work (&ohci->nec_work);
+ } else {
+ disable (ohci);
+ ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+ }
ohci_dump (ohci, 1);
ohci_usb_reset (ohci);
@@ -763,23 +765,16 @@ static void ohci_stop (struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
/* must not be called from interrupt context */
-
-#ifdef CONFIG_PM
-
static int ohci_restart (struct ohci_hcd *ohci)
{
int temp;
int i;
struct urb_priv *priv;
- /* mark any devices gone, so they do nothing till khubd disconnects.
- * recycle any "live" eds/tds (and urbs) right away.
- * later, khubd disconnect processing will recycle the other state,
- * (either as disconnect/reconnect, or maybe someday as a reset).
- */
spin_lock_irq(&ohci->lock);
disable (ohci);
- usb_root_hub_lost_power(ohci_to_hcd(ohci)->self.root_hub);
+
+ /* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
ohci_dbg(ohci, "abort schedule...\n");
list_for_each_entry (priv, &ohci->pending, pending) {
@@ -826,20 +821,31 @@ static int ohci_restart (struct ohci_hcd *ohci)
if ((temp = ohci_run (ohci)) < 0) {
ohci_err (ohci, "can't restart, %d\n", temp);
return temp;
- } else {
- /* here we "know" root ports should always stay powered,
- * and that if we try to turn them back on the root hub
- * will respond to CSC processing.
- */
- i = ohci->num_ports;
- while (i--)
- ohci_writel (ohci, RH_PS_PSS,
- &ohci->regs->roothub.portstatus [i]);
- ohci_dbg (ohci, "restart complete\n");
}
+ ohci_dbg(ohci, "restart complete\n");
return 0;
}
-#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* NEC workaround */
+static void ohci_quirk_nec_worker(struct work_struct *work)
+{
+ struct ohci_hcd *ohci = container_of(work, struct ohci_hcd, nec_work);
+ int status;
+
+ status = ohci_init(ohci);
+ if (status != 0) {
+ ohci_err(ohci, "Restarting NEC controller failed "
+ "in ohci_init, %d\n", status);
+ return;
+ }
+
+ status = ohci_restart(ohci);
+ if (status != 0)
+ ohci_err(ohci, "Restarting NEC controller failed "
+ "in ohci_restart, %d\n", status);
+}
/*-------------------------------------------------------------------------*/
@@ -917,7 +923,7 @@ MODULE_LICENSE ("GPL");
#ifdef CONFIG_PPC_PS3
#include "ohci-ps3.c"
-#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_sb_driver
+#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
#endif
#if !defined(PCI_DRIVER) && \
@@ -940,12 +946,9 @@ static int __init ohci_hcd_mod_init(void)
sizeof (struct ed), sizeof (struct td));
#ifdef PS3_SYSTEM_BUS_DRIVER
- if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- retval = ps3_system_bus_driver_register(
- &PS3_SYSTEM_BUS_DRIVER);
- if (retval < 0)
- goto error_ps3;
- }
+ retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
+ if (retval < 0)
+ goto error_ps3;
#endif
#ifdef PLATFORM_DRIVER
@@ -991,8 +994,7 @@ static int __init ohci_hcd_mod_init(void)
error_platform:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
- if (firmware_has_feature(FW_FEATURE_PS3_LV1))
- ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
return retval;
@@ -1014,8 +1016,7 @@ static void __exit ohci_hcd_mod_exit(void)
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
- if (firmware_has_feature(FW_FEATURE_PS3_LV1))
- ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+ ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
}
module_exit(ohci_hcd_mod_exit);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index bb9cc595219e..48e4b11f4d3e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -55,8 +55,6 @@ static void dl_done_list (struct ohci_hcd *);
static void finish_unlinks (struct ohci_hcd *, u16);
#ifdef CONFIG_PM
-static int ohci_restart(struct ohci_hcd *ohci);
-
static int ohci_rh_suspend (struct ohci_hcd *ohci, int autostop)
__releases(ohci->lock)
__acquires(ohci->lock)
@@ -191,6 +189,9 @@ __acquires(ohci->lock)
spin_unlock_irq (&ohci->lock);
(void) ohci_init (ohci);
status = ohci_restart (ohci);
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
spin_lock_irq (&ohci->lock);
}
return status;
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index 2f20d3dc895b..450c7b460c5a 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -28,6 +28,7 @@ static void ohci_hcd_init (struct ohci_hcd *ohci)
ohci->next_statechange = jiffies;
spin_lock_init (&ohci->lock);
INIT_LIST_HEAD (&ohci->pending);
+ INIT_WORK (&ohci->nec_work, ohci_quirk_nec_worker);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index ca62cb583221..a5e2eb85d073 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -111,6 +111,18 @@ static int ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
#endif
}
+/* Check for NEC chip and apply quirk for allegedly lost interrupts.
+ */
+static int ohci_quirk_nec(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+
+ ohci->flags |= OHCI_QUIRK_NEC;
+ ohci_dbg (ohci, "enabled NEC chipset lost interrupt quirk\n");
+
+ return 0;
+}
+
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
@@ -134,6 +146,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
.driver_data = (unsigned long)ohci_quirk_toshiba_scc,
},
{
+ PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB),
+ .driver_data = (unsigned long)ohci_quirk_nec,
+ },
+ {
/* Toshiba portege 4000 */
.vendor = PCI_VENDOR_ID_AL,
.device = 0x5237,
@@ -202,6 +218,42 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
return ret;
}
+#if defined(CONFIG_USB_PERSIST) && (defined(CONFIG_USB_EHCI_HCD) || \
+ defined(CONFIG_USB_EHCI_HCD_MODULE))
+
+/* Following a power loss, we must prepare to regain control of the ports
+ * we used to own. This means turning on the port power before ehci-hcd
+ * tries to switch ownership.
+ *
+ * This isn't a 100% perfect solution. On most systems the OHCI controllers
+ * lie at lower PCI addresses than the EHCI controller, so they will be
+ * discovered (and hence resumed) first. But there is no guarantee things
+ * will always work this way. If the EHCI controller is resumed first and
+ * the OHCI ports are unpowered, then the handover will fail.
+ */
+static void prepare_for_handover(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int port;
+
+ /* Here we "know" root ports should always stay powered */
+ ohci_dbg(ohci, "powerup ports\n");
+ for (port = 0; port < ohci->num_ports; port++)
+ ohci_writel(ohci, RH_PS_PPS,
+ &ohci->regs->roothub.portstatus[port]);
+
+ /* Flush those writes */
+ ohci_readl(ohci, &ohci->regs->control);
+ msleep(20);
+}
+
+#else
+
+static inline void prepare_for_handover(struct usb_hcd *hcd)
+{ }
+
+#endif /* CONFIG_USB_PERSIST etc. */
+
#ifdef CONFIG_PM
static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
@@ -241,7 +293,10 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
static int ohci_pci_resume (struct usb_hcd *hcd)
{
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- usb_hcd_resume_root_hub(hcd);
+
+ /* FIXME: we should try to detect loss of VBUS power here */
+ prepare_for_handover(hcd);
+
return 0;
}
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index d601bbb9387b..ca2a6abbc117 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -134,7 +134,7 @@ static int isp1301_attach(struct i2c_adapter *adap, int addr, int kind)
{
struct i2c_client *c;
- c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index d7cf07288b0b..01a0caeaa6bc 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <asm/firmware.h>
#include <asm/ps3.h>
static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
@@ -75,7 +76,7 @@ static const struct hc_driver ps3_ohci_hc_driver = {
#endif
};
-static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev)
+static int ps3_ohci_probe(struct ps3_system_bus_device *dev)
{
int result;
struct usb_hcd *hcd;
@@ -87,13 +88,31 @@ static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev)
goto fail_start;
}
+ result = ps3_open_hv_device(dev);
+
+ if (result) {
+ dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ result = -EPERM;
+ goto fail_open;
+ }
+
+ result = ps3_dma_region_create(dev->d_region);
+
+ if (result) {
+ dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: "
+ "(%d)\n", __func__, __LINE__, result);
+ BUG_ON("check region type");
+ goto fail_dma_region;
+ }
+
result = ps3_mmio_region_create(dev->m_region);
if (result) {
dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
__func__, __LINE__);
result = -EPERM;
- goto fail_mmio;
+ goto fail_mmio_region;
}
dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
@@ -122,6 +141,11 @@ static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev)
hcd->rsrc_start = dev->m_region->lpar_addr;
hcd->rsrc_len = dev->m_region->len;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name))
+ dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n",
+ __func__, __LINE__);
+
hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
if (!hcd->regs) {
@@ -155,34 +179,73 @@ static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev)
fail_add_hcd:
iounmap(hcd->regs);
fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
fail_create_hcd:
ps3_io_irq_destroy(virq);
fail_irq:
ps3_free_mmio_region(dev->m_region);
-fail_mmio:
+fail_mmio_region:
+ ps3_dma_region_free(dev->d_region);
+fail_dma_region:
+ ps3_close_hv_device(dev);
+fail_open:
fail_start:
return result;
}
-static int ps3_ohci_sb_remove (struct ps3_system_bus_device *dev)
+static int ps3_ohci_remove (struct ps3_system_bus_device *dev)
{
+ unsigned int tmp;
struct usb_hcd *hcd =
(struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
- usb_put_hcd(hcd);
+ BUG_ON(!hcd);
+
+ dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs);
+ dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq);
+
+ tmp = hcd->irq;
+
+ usb_remove_hcd(hcd);
+
ps3_system_bus_set_driver_data(dev, NULL);
+ BUG_ON(!hcd->regs);
+ iounmap(hcd->regs);
+
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ ps3_io_irq_destroy(tmp);
+ ps3_free_mmio_region(dev->m_region);
+
+ ps3_dma_region_free(dev->d_region);
+ ps3_close_hv_device(dev);
+
return 0;
}
-MODULE_ALIAS("ps3-ohci");
+static int ps3_ohci_driver_register(struct ps3_system_bus_driver *drv)
+{
+ return firmware_has_feature(FW_FEATURE_PS3_LV1)
+ ? ps3_system_bus_driver_register(drv)
+ : 0;
+}
+
+static void ps3_ohci_driver_unregister(struct ps3_system_bus_driver *drv)
+{
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1))
+ ps3_system_bus_driver_unregister(drv);
+}
+
+MODULE_ALIAS(PS3_MODULE_ALIAS_OHCI);
-static struct ps3_system_bus_driver ps3_ohci_sb_driver = {
+static struct ps3_system_bus_driver ps3_ohci_driver = {
+ .core.name = "ps3-ohci-driver",
+ .core.owner = THIS_MODULE,
.match_id = PS3_MATCH_ID_OHCI,
- .core = {
- .name = "ps3-ohci-driver",
- },
- .probe = ps3_ohci_sb_probe,
- .remove = ps3_ohci_sb_remove,
+ .probe = ps3_ohci_probe,
+ .remove = ps3_ohci_remove,
+ .shutdown = ps3_ohci_remove,
};
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index c2b5ecfe5e9f..4ada43cf1387 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -397,8 +397,10 @@ struct ohci_hcd {
#define OHCI_QUIRK_BE_DESC 0x08 /* BE descriptors */
#define OHCI_QUIRK_BE_MMIO 0x10 /* BE registers */
#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
+#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
// there are also chip quirks/bugs in init logic
+ struct work_struct nec_work; /* Worker for NEC quirk */
};
/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
new file mode 100644
index 000000000000..d60f1985320c
--- /dev/null
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -0,0 +1,2242 @@
+/*
+ * R8A66597 HCD (Host Controller Driver)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ * Portions Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Portions Copyright (C) 2004-2005 David Brownell
+ * Portions Copyright (C) 1999 Roman Weissgaerber
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include "../core/hcd.h"
+#include "r8a66597.h"
+
+MODULE_DESCRIPTION("R8A66597 USB Host Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+
+#define DRIVER_VERSION "29 May 2007"
+
+static const char hcd_name[] = "r8a66597_hcd";
+
+/* module parameters */
+static unsigned short clock = XTAL12;
+module_param(clock, ushort, 0644);
+MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
+ "(default=0)");
+
+static unsigned short vif = LDRV;
+module_param(vif, ushort, 0644);
+MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
+
+static unsigned short endian;
+module_param(endian, ushort, 0644);
+MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
+
+static unsigned short irq_sense = INTL;
+module_param(irq_sense, ushort, 0644);
+MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
+ "(default=32)");
+
+static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
+static int r8a66597_get_frame(struct usb_hcd *hcd);
+
+/* this function must be called with interrupt disabled */
+static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, INTENB0);
+ r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
+ r8a66597_bset(r8a66597, 1 << pipenum, reg);
+ r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+/* this function must be called with interrupt disabled */
+static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
+ unsigned long reg)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, INTENB0);
+ r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
+ r8a66597_bclr(r8a66597, 1 << pipenum, reg);
+ r8a66597_write(r8a66597, tmp, INTENB0);
+}
+
+static void set_devadd_reg(struct r8a66597 *r8a66597, u8 r8a66597_address,
+ u16 usbspd, u8 upphub, u8 hubport, int port)
+{
+ u16 val;
+ unsigned long devadd_reg = get_devadd_addr(r8a66597_address);
+
+ val = (upphub << 11) | (hubport << 8) | (usbspd << 6) | (port & 0x0001);
+ r8a66597_write(r8a66597, val, devadd_reg);
+}
+
+static int enable_controller(struct r8a66597 *r8a66597)
+{
+ u16 tmp;
+ int i = 0;
+
+ do {
+ r8a66597_write(r8a66597, USBE, SYSCFG0);
+ tmp = r8a66597_read(r8a66597, SYSCFG0);
+ if (i++ > 1000) {
+ err("register access fail.");
+ return -ENXIO;
+ }
+ } while ((tmp & USBE) != USBE);
+ r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+ r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0);
+
+ i = 0;
+ r8a66597_bset(r8a66597, XCKE, SYSCFG0);
+ do {
+ msleep(1);
+ tmp = r8a66597_read(r8a66597, SYSCFG0);
+ if (i++ > 500) {
+ err("register access fail.");
+ return -ENXIO;
+ }
+ } while ((tmp & SCKE) != SCKE);
+
+ r8a66597_bset(r8a66597, DCFM | DRPD, SYSCFG0);
+ r8a66597_bset(r8a66597, DRPD, SYSCFG1);
+
+ r8a66597_bset(r8a66597, vif & LDRV, PINCFG);
+ r8a66597_bset(r8a66597, HSE, SYSCFG0);
+ r8a66597_bset(r8a66597, HSE, SYSCFG1);
+ r8a66597_bset(r8a66597, USBE, SYSCFG0);
+
+ r8a66597_bset(r8a66597, BEMPE | NRDYE | BRDYE, INTENB0);
+ r8a66597_bset(r8a66597, irq_sense & INTL, SOFCFG);
+ r8a66597_bset(r8a66597, BRDY0, BRDYENB);
+ r8a66597_bset(r8a66597, BEMP0, BEMPENB);
+
+ r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR, DMA0CFG);
+ r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR, DMA1CFG);
+
+ r8a66597_bset(r8a66597, endian & BIGEND, CFIFOSEL);
+ r8a66597_bset(r8a66597, endian & BIGEND, D0FIFOSEL);
+ r8a66597_bset(r8a66597, endian & BIGEND, D1FIFOSEL);
+
+ r8a66597_bset(r8a66597, TRNENSEL, SOFCFG);
+
+ r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1);
+ r8a66597_bclr(r8a66597, DTCHE, INTENB1);
+ r8a66597_bset(r8a66597, ATTCHE, INTENB1);
+ r8a66597_bclr(r8a66597, DTCHE, INTENB2);
+ r8a66597_bset(r8a66597, ATTCHE, INTENB2);
+
+ return 0;
+}
+
+static void disable_controller(struct r8a66597 *r8a66597)
+{
+ u16 tmp;
+
+ r8a66597_write(r8a66597, 0, INTENB0);
+ r8a66597_write(r8a66597, 0, INTENB1);
+ r8a66597_write(r8a66597, 0, INTENB2);
+ r8a66597_write(r8a66597, 0, INTSTS0);
+ r8a66597_write(r8a66597, 0, INTSTS1);
+ r8a66597_write(r8a66597, 0, INTSTS2);
+
+ r8a66597_port_power(r8a66597, 0, 0);
+ r8a66597_port_power(r8a66597, 1, 0);
+
+ do {
+ tmp = r8a66597_read(r8a66597, SOFCFG) & EDGESTS;
+ udelay(640);
+ } while (tmp == EDGESTS);
+
+ r8a66597_bclr(r8a66597, DCFM | DRPD, SYSCFG0);
+ r8a66597_bclr(r8a66597, DRPD, SYSCFG1);
+ r8a66597_bclr(r8a66597, HSE, SYSCFG0);
+ r8a66597_bclr(r8a66597, HSE, SYSCFG1);
+
+ r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
+ udelay(1);
+ r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
+ r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
+ r8a66597_bclr(r8a66597, USBE, SYSCFG0);
+}
+
+static int get_parent_r8a66597_address(struct r8a66597 *r8a66597,
+ struct usb_device *udev)
+{
+ struct r8a66597_device *dev;
+
+ if (udev->parent && udev->parent->devnum != 1)
+ udev = udev->parent;
+
+ dev = dev_get_drvdata(&udev->dev);
+ if (dev)
+ return dev->address;
+ else
+ return 0;
+}
+
+static int is_child_device(char *devpath)
+{
+ return (devpath[2] ? 1 : 0);
+}
+
+static int is_hub_limit(char *devpath)
+{
+ return ((strlen(devpath) >= 4) ? 1 : 0);
+}
+
+static void get_port_number(char *devpath, u16 *root_port, u16 *hub_port)
+{
+ if (root_port) {
+ *root_port = (devpath[0] & 0x0F) - 1;
+ if (*root_port >= R8A66597_MAX_ROOT_HUB)
+ err("illegal root port number");
+ }
+ if (hub_port)
+ *hub_port = devpath[2] & 0x0F;
+}
+
+static u16 get_r8a66597_usb_speed(enum usb_device_speed speed)
+{
+ u16 usbspd = 0;
+
+ switch (speed) {
+ case USB_SPEED_LOW:
+ usbspd = LSMODE;
+ break;
+ case USB_SPEED_FULL:
+ usbspd = FSMODE;
+ break;
+ case USB_SPEED_HIGH:
+ usbspd = HSMODE;
+ break;
+ default:
+ err("unknown speed");
+ break;
+ }
+
+ return usbspd;
+}
+
+static void set_child_connect_map(struct r8a66597 *r8a66597, int address)
+{
+ int idx;
+
+ idx = address / 32;
+ r8a66597->child_connect_map[idx] |= 1 << (address % 32);
+}
+
+static void put_child_connect_map(struct r8a66597 *r8a66597, int address)
+{
+ int idx;
+
+ idx = address / 32;
+ r8a66597->child_connect_map[idx] &= ~(1 << (address % 32));
+}
+
+static void set_pipe_reg_addr(struct r8a66597_pipe *pipe, u8 dma_ch)
+{
+ u16 pipenum = pipe->info.pipenum;
+ unsigned long fifoaddr[] = {D0FIFO, D1FIFO, CFIFO};
+ unsigned long fifosel[] = {D0FIFOSEL, D1FIFOSEL, CFIFOSEL};
+ unsigned long fifoctr[] = {D0FIFOCTR, D1FIFOCTR, CFIFOCTR};
+
+ if (dma_ch > R8A66597_PIPE_NO_DMA) /* dma fifo not use? */
+ dma_ch = R8A66597_PIPE_NO_DMA;
+
+ pipe->fifoaddr = fifoaddr[dma_ch];
+ pipe->fifosel = fifosel[dma_ch];
+ pipe->fifoctr = fifoctr[dma_ch];
+
+ if (pipenum == 0)
+ pipe->pipectr = DCPCTR;
+ else
+ pipe->pipectr = get_pipectr_addr(pipenum);
+
+ if (check_bulk_or_isoc(pipenum)) {
+ pipe->pipetre = get_pipetre_addr(pipenum);
+ pipe->pipetrn = get_pipetrn_addr(pipenum);
+ } else {
+ pipe->pipetre = 0;
+ pipe->pipetrn = 0;
+ }
+}
+
+static struct r8a66597_device *
+get_urb_to_r8a66597_dev(struct r8a66597 *r8a66597, struct urb *urb)
+{
+ if (usb_pipedevice(urb->pipe) == 0)
+ return &r8a66597->device0;
+
+ return dev_get_drvdata(&urb->dev->dev);
+}
+
+static int make_r8a66597_device(struct r8a66597 *r8a66597,
+ struct urb *urb, u8 addr)
+{
+ struct r8a66597_device *dev;
+ int usb_address = urb->setup_packet[2]; /* urb->pipe is address 0 */
+
+ dev = kzalloc(sizeof(struct r8a66597_device), GFP_ATOMIC);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&urb->dev->dev, dev);
+ dev->udev = urb->dev;
+ dev->address = addr;
+ dev->usb_address = usb_address;
+ dev->state = USB_STATE_ADDRESS;
+ dev->ep_in_toggle = 0;
+ dev->ep_out_toggle = 0;
+ INIT_LIST_HEAD(&dev->device_list);
+ list_add_tail(&dev->device_list, &r8a66597->child_device);
+
+ get_port_number(urb->dev->devpath, &dev->root_port, &dev->hub_port);
+ if (!is_child_device(urb->dev->devpath))
+ r8a66597->root_hub[dev->root_port].dev = dev;
+
+ set_devadd_reg(r8a66597, dev->address,
+ get_r8a66597_usb_speed(urb->dev->speed),
+ get_parent_r8a66597_address(r8a66597, urb->dev),
+ dev->hub_port, dev->root_port);
+
+ return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
+{
+ u8 addr; /* R8A66597's address */
+ struct r8a66597_device *dev;
+
+ if (is_hub_limit(urb->dev->devpath)) {
+ err("Externel hub limit reached.");
+ return 0;
+ }
+
+ dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+ if (dev && dev->state >= USB_STATE_ADDRESS)
+ return dev->address;
+
+ for (addr = 1; addr <= R8A66597_MAX_DEVICE; addr++) {
+ if (r8a66597->address_map & (1 << addr))
+ continue;
+
+ dbg("alloc_address: r8a66597_addr=%d", addr);
+ r8a66597->address_map |= 1 << addr;
+
+ if (make_r8a66597_device(r8a66597, urb, addr) < 0)
+ return 0;
+
+ return addr;
+ }
+
+ err("cannot communicate with a USB device more than 10.(%x)",
+ r8a66597->address_map);
+
+ return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static void free_usb_address(struct r8a66597 *r8a66597,
+ struct r8a66597_device *dev)
+{
+ int port;
+
+ if (!dev)
+ return;
+
+ dbg("free_addr: addr=%d", dev->address);
+
+ dev->state = USB_STATE_DEFAULT;
+ r8a66597->address_map &= ~(1 << dev->address);
+ dev->address = 0;
+ dev_set_drvdata(&dev->udev->dev, NULL);
+ list_del(&dev->device_list);
+ kfree(dev);
+
+ for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) {
+ if (r8a66597->root_hub[port].dev == dev) {
+ r8a66597->root_hub[port].dev = NULL;
+ break;
+ }
+ }
+}
+
+static void r8a66597_reg_wait(struct r8a66597 *r8a66597, unsigned long reg,
+ u16 mask, u16 loop)
+{
+ u16 tmp;
+ int i = 0;
+
+ do {
+ tmp = r8a66597_read(r8a66597, reg);
+ if (i++ > 1000000) {
+ err("register%lx, loop %x is timeout", reg, loop);
+ break;
+ }
+ ndelay(1);
+ } while ((tmp & mask) != loop);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_start(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, pipe->pipectr) & PID;
+ if ((pipe->info.pipenum != 0) & ((tmp & PID_STALL) != 0)) /* stall? */
+ r8a66597_mdfy(r8a66597, PID_NAK, PID, pipe->pipectr);
+ r8a66597_mdfy(r8a66597, PID_BUF, PID, pipe->pipectr);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_stop(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe)
+{
+ u16 tmp;
+
+ tmp = r8a66597_read(r8a66597, pipe->pipectr) & PID;
+ if ((tmp & PID_STALL11) != PID_STALL11) /* force stall? */
+ r8a66597_mdfy(r8a66597, PID_STALL, PID, pipe->pipectr);
+ r8a66597_mdfy(r8a66597, PID_NAK, PID, pipe->pipectr);
+ r8a66597_reg_wait(r8a66597, pipe->pipectr, PBUSY, 0);
+}
+
+/* this function must be called with interrupt disabled */
+static void clear_all_buffer(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe *pipe)
+{
+ u16 tmp;
+
+ if (!pipe || pipe->info.pipenum == 0)
+ return;
+
+ pipe_stop(r8a66597, pipe);
+ r8a66597_bset(r8a66597, ACLRM, pipe->pipectr);
+ tmp = r8a66597_read(r8a66597, pipe->pipectr);
+ tmp = r8a66597_read(r8a66597, pipe->pipectr);
+ tmp = r8a66597_read(r8a66597, pipe->pipectr);
+ r8a66597_bclr(r8a66597, ACLRM, pipe->pipectr);
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_pipe_toggle(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe *pipe, int toggle)
+{
+ if (toggle)
+ r8a66597_bset(r8a66597, SQSET, pipe->pipectr);
+ else
+ r8a66597_bset(r8a66597, SQCLR, pipe->pipectr);
+}
+
+/* this function must be called with interrupt disabled */
+static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ r8a66597_mdfy(r8a66597, MBW | pipenum, MBW | CURPIPE, CFIFOSEL);
+ r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum);
+}
+
+/* this function must be called with interrupt disabled */
+static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe *pipe)
+{
+ cfifo_change(r8a66597, 0);
+ r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D0FIFOSEL);
+ r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D1FIFOSEL);
+
+ r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, MBW | CURPIPE,
+ pipe->fifosel);
+ r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum);
+}
+
+static u16 r8a66597_get_pipenum(struct urb *urb, struct usb_host_endpoint *hep)
+{
+ struct r8a66597_pipe *pipe = hep->hcpriv;
+
+ if (usb_pipeendpoint(urb->pipe) == 0)
+ return 0;
+ else
+ return pipe->info.pipenum;
+}
+
+static u16 get_urb_to_r8a66597_addr(struct r8a66597 *r8a66597, struct urb *urb)
+{
+ struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+
+ return (usb_pipedevice(urb->pipe) == 0) ? 0 : dev->address;
+}
+
+static unsigned short *get_toggle_pointer(struct r8a66597_device *dev,
+ int urb_pipe)
+{
+ if (!dev)
+ return NULL;
+
+ return usb_pipein(urb_pipe) ? &dev->ep_in_toggle : &dev->ep_out_toggle;
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_toggle_set(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe *pipe,
+ struct urb *urb, int set)
+{
+ struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+ unsigned char endpoint = usb_pipeendpoint(urb->pipe);
+ unsigned short *toggle = get_toggle_pointer(dev, urb->pipe);
+
+ if (!toggle)
+ return;
+
+ if (set)
+ *toggle |= 1 << endpoint;
+ else
+ *toggle &= ~(1 << endpoint);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_toggle_save(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe *pipe,
+ struct urb *urb)
+{
+ if (r8a66597_read(r8a66597, pipe->pipectr) & SQMON)
+ pipe_toggle_set(r8a66597, pipe, urb, 1);
+ else
+ pipe_toggle_set(r8a66597, pipe, urb, 0);
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_toggle_restore(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe *pipe,
+ struct urb *urb)
+{
+ struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+ unsigned char endpoint = usb_pipeendpoint(urb->pipe);
+ unsigned short *toggle = get_toggle_pointer(dev, urb->pipe);
+
+ if (!toggle)
+ return;
+
+ r8a66597_pipe_toggle(r8a66597, pipe, *toggle & (1 << endpoint));
+}
+
+/* this function must be called with interrupt disabled */
+static void pipe_buffer_setting(struct r8a66597 *r8a66597,
+ struct r8a66597_pipe_info *info)
+{
+ u16 val = 0;
+
+ if (info->pipenum == 0)
+ return;
+
+ r8a66597_bset(r8a66597, ACLRM, get_pipectr_addr(info->pipenum));
+ r8a66597_bclr(r8a66597, ACLRM, get_pipectr_addr(info->pipenum));
+ r8a66597_write(r8a66597, info->pipenum, PIPESEL);
+ if (!info->dir_in)
+ val |= R8A66597_DIR;
+ if (info->type == R8A66597_BULK && info->dir_in)
+ val |= R8A66597_DBLB | R8A66597_SHTNAK;
+ val |= info->type | info->epnum;
+ r8a66597_write(r8a66597, val, PIPECFG);
+
+ r8a66597_write(r8a66597, (info->buf_bsize << 10) | (info->bufnum),
+ PIPEBUF);
+ r8a66597_write(r8a66597, make_devsel(info->address) | info->maxpacket,
+ PIPEMAXP);
+ if (info->interval)
+ info->interval--;
+ r8a66597_write(r8a66597, info->interval, PIPEPERI);
+}
+
+
+
+/* this function must be called with interrupt disabled */
+static void pipe_setting(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+{
+ struct r8a66597_pipe_info *info;
+ struct urb *urb = td->urb;
+
+ if (td->pipenum > 0) {
+ info = &td->pipe->info;
+ cfifo_change(r8a66597, 0);
+ pipe_buffer_setting(r8a66597, info);
+
+ if (!usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe)) &&
+ !usb_pipecontrol(urb->pipe)) {
+ r8a66597_pipe_toggle(r8a66597, td->pipe, 0);
+ pipe_toggle_set(r8a66597, td->pipe, urb, 0);
+ clear_all_buffer(r8a66597, td->pipe);
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe), 1);
+ }
+ pipe_toggle_restore(r8a66597, td->pipe, urb);
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static u16 get_empty_pipenum(struct r8a66597 *r8a66597,
+ struct usb_endpoint_descriptor *ep)
+{
+ u16 array[R8A66597_MAX_NUM_PIPE], i = 0, min;
+
+ memset(array, 0, sizeof(array));
+ switch (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ array[i++] = 4;
+ else {
+ array[i++] = 3;
+ array[i++] = 5;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
+ array[i++] = 6;
+ array[i++] = 7;
+ array[i++] = 8;
+ } else
+ array[i++] = 9;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ array[i++] = 2;
+ else
+ array[i++] = 1;
+ break;
+ default:
+ err("Illegal type");
+ return 0;
+ }
+
+ i = 1;
+ min = array[0];
+ while (array[i] != 0) {
+ if (r8a66597->pipe_cnt[min] > r8a66597->pipe_cnt[array[i]])
+ min = array[i];
+ i++;
+ }
+
+ return min;
+}
+
+static u16 get_r8a66597_type(__u8 type)
+{
+ u16 r8a66597_type;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_BULK:
+ r8a66597_type = R8A66597_BULK;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ r8a66597_type = R8A66597_INT;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ r8a66597_type = R8A66597_ISO;
+ break;
+ default:
+ err("Illegal type");
+ r8a66597_type = 0x0000;
+ break;
+ }
+
+ return r8a66597_type;
+}
+
+static u16 get_bufnum(u16 pipenum)
+{
+ u16 bufnum = 0;
+
+ if (pipenum == 0)
+ bufnum = 0;
+ else if (check_bulk_or_isoc(pipenum))
+ bufnum = 8 + (pipenum - 1) * R8A66597_BUF_BSIZE*2;
+ else if (check_interrupt(pipenum))
+ bufnum = 4 + (pipenum - 6);
+ else
+ err("Illegal pipenum (%d)", pipenum);
+
+ return bufnum;
+}
+
+static u16 get_buf_bsize(u16 pipenum)
+{
+ u16 buf_bsize = 0;
+
+ if (pipenum == 0)
+ buf_bsize = 3;
+ else if (check_bulk_or_isoc(pipenum))
+ buf_bsize = R8A66597_BUF_BSIZE - 1;
+ else if (check_interrupt(pipenum))
+ buf_bsize = 0;
+ else
+ err("Illegal pipenum (%d)", pipenum);
+
+ return buf_bsize;
+}
+
+/* this function must be called with interrupt disabled */
+static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597,
+ struct r8a66597_device *dev,
+ struct r8a66597_pipe *pipe,
+ struct urb *urb)
+{
+ int i;
+ struct r8a66597_pipe_info *info = &pipe->info;
+
+ if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) {
+ for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) {
+ if ((r8a66597->dma_map & (1 << i)) != 0)
+ continue;
+
+ info("address %d, EndpointAddress 0x%02x use DMA FIFO",
+ usb_pipedevice(urb->pipe),
+ info->dir_in ? USB_ENDPOINT_DIR_MASK + info->epnum
+ : info->epnum);
+
+ r8a66597->dma_map |= 1 << i;
+ dev->dma_map |= 1 << i;
+ set_pipe_reg_addr(pipe, i);
+
+ cfifo_change(r8a66597, 0);
+ r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum,
+ MBW | CURPIPE, pipe->fifosel);
+
+ r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE,
+ pipe->info.pipenum);
+ r8a66597_bset(r8a66597, BCLR, pipe->fifoctr);
+ break;
+ }
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
+ struct usb_host_endpoint *hep,
+ struct r8a66597_pipe_info *info)
+{
+ struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+ struct r8a66597_pipe *pipe = hep->hcpriv;
+
+ dbg("enable_pipe:");
+
+ pipe->info = *info;
+ set_pipe_reg_addr(pipe, R8A66597_PIPE_NO_DMA);
+ r8a66597->pipe_cnt[pipe->info.pipenum]++;
+ dev->pipe_cnt[pipe->info.pipenum]++;
+
+ enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
+}
+
+/* this function must be called with interrupt disabled */
+static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
+{
+ struct r8a66597_td *td, *next;
+ struct urb *urb;
+ struct list_head *list = &r8a66597->pipe_queue[pipenum];
+
+ if (list_empty(list))
+ return;
+
+ list_for_each_entry_safe(td, next, list, queue) {
+ if (!td)
+ continue;
+ if (td->address != address)
+ continue;
+
+ urb = td->urb;
+ list_del(&td->queue);
+ kfree(td);
+
+ if (urb) {
+ urb->status = -ENODEV;
+ urb->hcpriv = NULL;
+ spin_unlock(&r8a66597->lock);
+ usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb);
+ spin_lock(&r8a66597->lock);
+ }
+ break;
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static void disable_r8a66597_pipe_all(struct r8a66597 *r8a66597,
+ struct r8a66597_device *dev)
+{
+ int check_ep0 = 0;
+ u16 pipenum;
+
+ if (!dev)
+ return;
+
+ for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ if (!dev->pipe_cnt[pipenum])
+ continue;
+
+ if (!check_ep0) {
+ check_ep0 = 1;
+ force_dequeue(r8a66597, 0, dev->address);
+ }
+
+ r8a66597->pipe_cnt[pipenum] -= dev->pipe_cnt[pipenum];
+ dev->pipe_cnt[pipenum] = 0;
+ force_dequeue(r8a66597, pipenum, dev->address);
+ }
+
+ dbg("disable_pipe");
+
+ r8a66597->dma_map &= ~(dev->dma_map);
+ dev->dma_map = 0;
+}
+
+/* this function must be called with interrupt disabled */
+static void init_pipe_info(struct r8a66597 *r8a66597, struct urb *urb,
+ struct usb_host_endpoint *hep,
+ struct usb_endpoint_descriptor *ep)
+{
+ struct r8a66597_pipe_info info;
+
+ info.pipenum = get_empty_pipenum(r8a66597, ep);
+ info.address = get_urb_to_r8a66597_addr(r8a66597, urb);
+ info.epnum = ep->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ info.maxpacket = ep->wMaxPacketSize;
+ info.type = get_r8a66597_type(ep->bmAttributes
+ & USB_ENDPOINT_XFERTYPE_MASK);
+ info.bufnum = get_bufnum(info.pipenum);
+ info.buf_bsize = get_buf_bsize(info.pipenum);
+ info.interval = ep->bInterval;
+ if (ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ info.dir_in = 1;
+ else
+ info.dir_in = 0;
+
+ enable_r8a66597_pipe(r8a66597, urb, hep, &info);
+}
+
+static void init_pipe_config(struct r8a66597 *r8a66597, struct urb *urb)
+{
+ struct r8a66597_device *dev;
+
+ dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+ dev->state = USB_STATE_CONFIGURED;
+}
+
+static void pipe_irq_enable(struct r8a66597 *r8a66597, struct urb *urb,
+ u16 pipenum)
+{
+ if (pipenum == 0 && usb_pipeout(urb->pipe))
+ enable_irq_empty(r8a66597, pipenum);
+ else
+ enable_irq_ready(r8a66597, pipenum);
+
+ if (!usb_pipeisoc(urb->pipe))
+ enable_irq_nrdy(r8a66597, pipenum);
+}
+
+static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ disable_irq_ready(r8a66597, pipenum);
+ disable_irq_nrdy(r8a66597, pipenum);
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_usb_preconnect(struct r8a66597 *r8a66597, int port)
+{
+ r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
+ | (1 << USB_PORT_FEAT_C_CONNECTION);
+ r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port));
+ r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port));
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port)
+{
+ u16 speed = get_rh_usb_speed(r8a66597, port);
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+ if (speed == HSMODE)
+ rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ else if (speed == LSMODE)
+ rh->port |= (1 << USB_PORT_FEAT_LOWSPEED);
+
+ rh->port &= ~(1 << USB_PORT_FEAT_RESET);
+ rh->port |= 1 << USB_PORT_FEAT_ENABLE;
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
+{
+ struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
+
+ r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
+ r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
+
+ disable_r8a66597_pipe_all(r8a66597, dev);
+ free_usb_address(r8a66597, dev);
+
+ r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_setup_packet(struct r8a66597 *r8a66597,
+ struct r8a66597_td *td)
+{
+ int i;
+ u16 *p = (u16 *)td->urb->setup_packet;
+ unsigned long setup_addr = USBREQ;
+
+ r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket,
+ DCPMAXP);
+ r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1);
+
+ for (i = 0; i < 4; i++) {
+ r8a66597_write(r8a66597, p[i], setup_addr);
+ setup_addr += 2;
+ }
+ r8a66597_write(r8a66597, SUREQ, DCPCTR);
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_packet_read(struct r8a66597 *r8a66597,
+ struct r8a66597_td *td)
+{
+ struct urb *urb = td->urb;
+
+ if (usb_pipecontrol(urb->pipe)) {
+ r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG);
+ r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL);
+ r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+ if (urb->actual_length == 0) {
+ r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
+ r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+ }
+ pipe_irq_disable(r8a66597, td->pipenum);
+ pipe_start(r8a66597, td->pipe);
+ pipe_irq_enable(r8a66597, urb, td->pipenum);
+ } else {
+ if (urb->actual_length == 0) {
+ pipe_irq_disable(r8a66597, td->pipenum);
+ pipe_setting(r8a66597, td);
+ pipe_stop(r8a66597, td->pipe);
+ r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS);
+
+ if (td->pipe->pipetre) {
+ r8a66597_write(r8a66597, TRCLR,
+ td->pipe->pipetre);
+ r8a66597_write(r8a66597,
+ (urb->transfer_buffer_length
+ + td->maxpacket - 1)
+ / td->maxpacket,
+ td->pipe->pipetrn);
+ r8a66597_bset(r8a66597, TRENB,
+ td->pipe->pipetre);
+ }
+
+ pipe_start(r8a66597, td->pipe);
+ pipe_irq_enable(r8a66597, urb, td->pipenum);
+ }
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_packet_write(struct r8a66597 *r8a66597,
+ struct r8a66597_td *td)
+{
+ u16 tmp;
+ struct urb *urb = td->urb;
+
+ if (usb_pipecontrol(urb->pipe)) {
+ pipe_stop(r8a66597, td->pipe);
+ r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG);
+ r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL);
+ r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+ if (urb->actual_length == 0) {
+ r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
+ r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+ }
+ } else {
+ if (urb->actual_length == 0)
+ pipe_setting(r8a66597, td);
+ if (td->pipe->pipetre)
+ r8a66597_bclr(r8a66597, TRENB, td->pipe->pipetre);
+ }
+ r8a66597_write(r8a66597, ~(1 << td->pipenum), BRDYSTS);
+
+ fifo_change_from_pipe(r8a66597, td->pipe);
+ tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
+ if (unlikely((tmp & FRDY) == 0))
+ pipe_irq_enable(r8a66597, urb, td->pipenum);
+ else
+ packet_write(r8a66597, td->pipenum);
+ pipe_start(r8a66597, td->pipe);
+}
+
+/* this function must be called with interrupt disabled */
+static void prepare_status_packet(struct r8a66597 *r8a66597,
+ struct r8a66597_td *td)
+{
+ struct urb *urb = td->urb;
+
+ r8a66597_pipe_toggle(r8a66597, td->pipe, 1);
+ pipe_stop(r8a66597, td->pipe);
+
+ if (urb->setup_packet[0] & USB_ENDPOINT_DIR_MASK) {
+ r8a66597_bset(r8a66597, R8A66597_DIR, DCPCFG);
+ r8a66597_mdfy(r8a66597, ISEL, ISEL | CURPIPE, CFIFOSEL);
+ r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+ r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
+ r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+ r8a66597_write(r8a66597, BVAL, CFIFOCTR);
+ enable_irq_empty(r8a66597, 0);
+ } else {
+ r8a66597_bclr(r8a66597, R8A66597_DIR, DCPCFG);
+ r8a66597_mdfy(r8a66597, 0, ISEL | CURPIPE, CFIFOSEL);
+ r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, 0);
+ r8a66597_write(r8a66597, BCLR, CFIFOCTR);
+ enable_irq_ready(r8a66597, 0);
+ }
+ enable_irq_nrdy(r8a66597, 0);
+ pipe_start(r8a66597, td->pipe);
+}
+
+/* this function must be called with interrupt disabled */
+static int start_transfer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+{
+ BUG_ON(!td);
+
+ switch (td->type) {
+ case USB_PID_SETUP:
+ if (td->urb->setup_packet[1] == USB_REQ_SET_ADDRESS) {
+ td->set_address = 1;
+ td->urb->setup_packet[2] = alloc_usb_address(r8a66597,
+ td->urb);
+ if (td->urb->setup_packet[2] == 0)
+ return -EPIPE;
+ }
+ prepare_setup_packet(r8a66597, td);
+ break;
+ case USB_PID_IN:
+ prepare_packet_read(r8a66597, td);
+ break;
+ case USB_PID_OUT:
+ prepare_packet_write(r8a66597, td);
+ break;
+ case USB_PID_ACK:
+ prepare_status_packet(r8a66597, td);
+ break;
+ default:
+ err("invalid type.");
+ break;
+ }
+
+ return 0;
+}
+
+static int check_transfer_finish(struct r8a66597_td *td, struct urb *urb)
+{
+ if (usb_pipeisoc(urb->pipe)) {
+ if (urb->number_of_packets == td->iso_cnt)
+ return 1;
+ }
+
+ /* control or bulk or interrupt */
+ if ((urb->transfer_buffer_length <= urb->actual_length) ||
+ (td->short_packet) || (td->zero_packet))
+ return 1;
+
+ return 0;
+}
+
+/* this function must be called with interrupt disabled */
+static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
+{
+ unsigned long time;
+
+ BUG_ON(!td);
+
+ if (!list_empty(&r8a66597->pipe_queue[td->pipenum]) &&
+ !usb_pipecontrol(td->urb->pipe) && usb_pipein(td->urb->pipe)) {
+ r8a66597->timeout_map |= 1 << td->pipenum;
+ switch (usb_pipetype(td->urb->pipe)) {
+ case PIPE_INTERRUPT:
+ case PIPE_ISOCHRONOUS:
+ time = 30;
+ break;
+ default:
+ time = 300;
+ break;
+ }
+
+ mod_timer(&r8a66597->td_timer[td->pipenum],
+ jiffies + msecs_to_jiffies(time));
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static void done(struct r8a66597 *r8a66597, struct r8a66597_td *td,
+ u16 pipenum, struct urb *urb)
+{
+ int restart = 0;
+ struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
+
+ r8a66597->timeout_map &= ~(1 << pipenum);
+
+ if (likely(td)) {
+ if (td->set_address && urb->status != 0)
+ r8a66597->address_map &= ~(1 << urb->setup_packet[2]);
+
+ pipe_toggle_save(r8a66597, td->pipe, urb);
+ list_del(&td->queue);
+ kfree(td);
+ }
+
+ if (!list_empty(&r8a66597->pipe_queue[pipenum]))
+ restart = 1;
+
+ if (likely(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ urb->start_frame = r8a66597_get_frame(hcd);
+
+ urb->hcpriv = NULL;
+ spin_unlock(&r8a66597->lock);
+ usb_hcd_giveback_urb(hcd, urb);
+ spin_lock(&r8a66597->lock);
+ }
+
+ if (restart) {
+ td = r8a66597_get_td(r8a66597, pipenum);
+ if (unlikely(!td))
+ return;
+
+ start_transfer(r8a66597, td);
+ set_td_timer(r8a66597, td);
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static void finish_request(struct r8a66597 *r8a66597, struct r8a66597_td *td,
+ u16 pipenum, struct urb *urb)
+__releases(r8a66597->lock) __acquires(r8a66597->lock)
+{
+ done(r8a66597, td, pipenum, urb);
+}
+
+static void packet_read(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ u16 tmp;
+ int rcv_len, bufsize, urb_len, size;
+ u16 *buf;
+ struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
+ struct urb *urb;
+ int finish = 0;
+
+ if (unlikely(!td))
+ return;
+ urb = td->urb;
+
+ fifo_change_from_pipe(r8a66597, td->pipe);
+ tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
+ if (unlikely((tmp & FRDY) == 0)) {
+ urb->status = -EPIPE;
+ pipe_stop(r8a66597, td->pipe);
+ pipe_irq_disable(r8a66597, pipenum);
+ err("in fifo not ready (%d)", pipenum);
+ finish_request(r8a66597, td, pipenum, td->urb);
+ return;
+ }
+
+ /* prepare parameters */
+ rcv_len = tmp & DTLN;
+ bufsize = td->maxpacket;
+ if (usb_pipeisoc(urb->pipe)) {
+ buf = (u16 *)(urb->transfer_buffer +
+ urb->iso_frame_desc[td->iso_cnt].offset);
+ urb_len = urb->iso_frame_desc[td->iso_cnt].length;
+ } else {
+ buf = (void *)urb->transfer_buffer + urb->actual_length;
+ urb_len = urb->transfer_buffer_length - urb->actual_length;
+ }
+ if (rcv_len < bufsize)
+ size = min(rcv_len, urb_len);
+ else
+ size = min(bufsize, urb_len);
+
+ /* update parameters */
+ urb->actual_length += size;
+ if (rcv_len == 0)
+ td->zero_packet = 1;
+ if ((size % td->maxpacket) > 0) {
+ td->short_packet = 1;
+ if (urb->transfer_buffer_length != urb->actual_length &&
+ urb->transfer_flags & URB_SHORT_NOT_OK)
+ td->urb->status = -EREMOTEIO;
+ }
+ if (usb_pipeisoc(urb->pipe)) {
+ urb->iso_frame_desc[td->iso_cnt].actual_length = size;
+ urb->iso_frame_desc[td->iso_cnt].status = 0;
+ td->iso_cnt++;
+ }
+
+ /* check transfer finish */
+ if (check_transfer_finish(td, urb)) {
+ pipe_stop(r8a66597, td->pipe);
+ pipe_irq_disable(r8a66597, pipenum);
+ finish = 1;
+ }
+
+ /* read fifo */
+ if (urb->transfer_buffer) {
+ if (size == 0)
+ r8a66597_write(r8a66597, BCLR, td->pipe->fifoctr);
+ else
+ r8a66597_read_fifo(r8a66597, td->pipe->fifoaddr,
+ buf, size);
+ }
+
+ if (finish && pipenum != 0) {
+ if (td->urb->status == -EINPROGRESS)
+ td->urb->status = 0;
+ finish_request(r8a66597, td, pipenum, urb);
+ }
+}
+
+static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ u16 tmp;
+ int bufsize, size;
+ u16 *buf;
+ struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
+ struct urb *urb;
+
+ if (unlikely(!td))
+ return;
+ urb = td->urb;
+
+ fifo_change_from_pipe(r8a66597, td->pipe);
+ tmp = r8a66597_read(r8a66597, td->pipe->fifoctr);
+ if (unlikely((tmp & FRDY) == 0)) {
+ urb->status = -EPIPE;
+ pipe_stop(r8a66597, td->pipe);
+ pipe_irq_disable(r8a66597, pipenum);
+ err("out write fifo not ready. (%d)", pipenum);
+ finish_request(r8a66597, td, pipenum, td->urb);
+ return;
+ }
+
+ /* prepare parameters */
+ bufsize = td->maxpacket;
+ if (usb_pipeisoc(urb->pipe)) {
+ buf = (u16 *)(urb->transfer_buffer +
+ urb->iso_frame_desc[td->iso_cnt].offset);
+ size = min(bufsize,
+ (int)urb->iso_frame_desc[td->iso_cnt].length);
+ } else {
+ buf = (u16 *)(urb->transfer_buffer + urb->actual_length);
+ size = min((int)bufsize,
+ urb->transfer_buffer_length - urb->actual_length);
+ }
+
+ /* write fifo */
+ if (pipenum > 0)
+ r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS);
+ if (urb->transfer_buffer) {
+ r8a66597_write_fifo(r8a66597, td->pipe->fifoaddr, buf, size);
+ if (!usb_pipebulk(urb->pipe) || td->maxpacket != size)
+ r8a66597_write(r8a66597, BVAL, td->pipe->fifoctr);
+ }
+
+ /* update parameters */
+ urb->actual_length += size;
+ if (usb_pipeisoc(urb->pipe)) {
+ urb->iso_frame_desc[td->iso_cnt].actual_length = size;
+ urb->iso_frame_desc[td->iso_cnt].status = 0;
+ td->iso_cnt++;
+ }
+
+ /* check transfer finish */
+ if (check_transfer_finish(td, urb)) {
+ disable_irq_ready(r8a66597, pipenum);
+ enable_irq_empty(r8a66597, pipenum);
+ if (!usb_pipeisoc(urb->pipe))
+ enable_irq_nrdy(r8a66597, pipenum);
+ } else
+ pipe_irq_enable(r8a66597, urb, pipenum);
+}
+
+
+static void check_next_phase(struct r8a66597 *r8a66597)
+{
+ struct r8a66597_td *td = r8a66597_get_td(r8a66597, 0);
+ struct urb *urb;
+ u8 finish = 0;
+
+ if (unlikely(!td))
+ return;
+ urb = td->urb;
+
+ switch (td->type) {
+ case USB_PID_IN:
+ case USB_PID_OUT:
+ if (urb->status != -EINPROGRESS) {
+ finish = 1;
+ break;
+ }
+ if (check_transfer_finish(td, urb))
+ td->type = USB_PID_ACK;
+ break;
+ case USB_PID_SETUP:
+ if (urb->status != -EINPROGRESS)
+ finish = 1;
+ else if (urb->transfer_buffer_length == urb->actual_length) {
+ td->type = USB_PID_ACK;
+ urb->status = 0;
+ } else if (usb_pipeout(urb->pipe))
+ td->type = USB_PID_OUT;
+ else
+ td->type = USB_PID_IN;
+ break;
+ case USB_PID_ACK:
+ finish = 1;
+ if (urb->status == -EINPROGRESS)
+ urb->status = 0;
+ break;
+ }
+
+ if (finish)
+ finish_request(r8a66597, td, 0, urb);
+ else
+ start_transfer(r8a66597, td);
+}
+
+static void set_urb_error(struct r8a66597 *r8a66597, u16 pipenum)
+{
+ struct r8a66597_td *td = r8a66597_get_td(r8a66597, pipenum);
+
+ if (td && td->urb) {
+ u16 pid = r8a66597_read(r8a66597, td->pipe->pipectr) & PID;
+
+ if (pid == PID_NAK)
+ td->urb->status = -ECONNRESET;
+ else
+ td->urb->status = -EPIPE;
+ }
+}
+
+static void irq_pipe_ready(struct r8a66597 *r8a66597)
+{
+ u16 check;
+ u16 pipenum;
+ u16 mask;
+ struct r8a66597_td *td;
+
+ mask = r8a66597_read(r8a66597, BRDYSTS)
+ & r8a66597_read(r8a66597, BRDYENB);
+ r8a66597_write(r8a66597, ~mask, BRDYSTS);
+ if (mask & BRDY0) {
+ td = r8a66597_get_td(r8a66597, 0);
+ if (td && td->type == USB_PID_IN)
+ packet_read(r8a66597, 0);
+ else
+ pipe_irq_disable(r8a66597, 0);
+ check_next_phase(r8a66597);
+ }
+
+ for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if (mask & check) {
+ td = r8a66597_get_td(r8a66597, pipenum);
+ if (unlikely(!td))
+ continue;
+
+ if (td->type == USB_PID_IN)
+ packet_read(r8a66597, pipenum);
+ else if (td->type == USB_PID_OUT)
+ packet_write(r8a66597, pipenum);
+ }
+ }
+}
+
+static void irq_pipe_empty(struct r8a66597 *r8a66597)
+{
+ u16 tmp;
+ u16 check;
+ u16 pipenum;
+ u16 mask;
+ struct r8a66597_td *td;
+
+ mask = r8a66597_read(r8a66597, BEMPSTS)
+ & r8a66597_read(r8a66597, BEMPENB);
+ r8a66597_write(r8a66597, ~mask, BEMPSTS);
+ if (mask & BEMP0) {
+ cfifo_change(r8a66597, 0);
+ td = r8a66597_get_td(r8a66597, 0);
+ if (td && td->type != USB_PID_OUT)
+ disable_irq_empty(r8a66597, 0);
+ check_next_phase(r8a66597);
+ }
+
+ for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if (mask & check) {
+ struct r8a66597_td *td;
+ td = r8a66597_get_td(r8a66597, pipenum);
+ if (unlikely(!td))
+ continue;
+
+ tmp = r8a66597_read(r8a66597, td->pipe->pipectr);
+ if ((tmp & INBUFM) == 0) {
+ disable_irq_empty(r8a66597, pipenum);
+ pipe_irq_disable(r8a66597, pipenum);
+ if (td->urb->status == -EINPROGRESS)
+ td->urb->status = 0;
+ finish_request(r8a66597, td, pipenum, td->urb);
+ }
+ }
+ }
+}
+
+static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
+{
+ u16 check;
+ u16 pipenum;
+ u16 mask;
+
+ mask = r8a66597_read(r8a66597, NRDYSTS)
+ & r8a66597_read(r8a66597, NRDYENB);
+ r8a66597_write(r8a66597, ~mask, NRDYSTS);
+ if (mask & NRDY0) {
+ cfifo_change(r8a66597, 0);
+ set_urb_error(r8a66597, 0);
+ pipe_irq_disable(r8a66597, 0);
+ check_next_phase(r8a66597);
+ }
+
+ for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ check = 1 << pipenum;
+ if (mask & check) {
+ struct r8a66597_td *td;
+ td = r8a66597_get_td(r8a66597, pipenum);
+ if (unlikely(!td))
+ continue;
+
+ set_urb_error(r8a66597, pipenum);
+ pipe_irq_disable(r8a66597, pipenum);
+ pipe_stop(r8a66597, td->pipe);
+ finish_request(r8a66597, td, pipenum, td->urb);
+ }
+ }
+}
+
+static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
+{
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+ rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+ rh->scount = R8A66597_MAX_SAMPLING;
+ mod_timer(&r8a66597->rh_timer, jiffies + msecs_to_jiffies(50));
+}
+
+static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ u16 intsts0, intsts1, intsts2;
+ u16 intenb0, intenb1, intenb2;
+ u16 mask0, mask1, mask2;
+
+ spin_lock(&r8a66597->lock);
+
+ intsts0 = r8a66597_read(r8a66597, INTSTS0);
+ intsts1 = r8a66597_read(r8a66597, INTSTS1);
+ intsts2 = r8a66597_read(r8a66597, INTSTS2);
+ intenb0 = r8a66597_read(r8a66597, INTENB0);
+ intenb1 = r8a66597_read(r8a66597, INTENB1);
+ intenb2 = r8a66597_read(r8a66597, INTENB2);
+
+ mask2 = intsts2 & intenb2;
+ mask1 = intsts1 & intenb1;
+ mask0 = intsts0 & intenb0 & (BEMP | NRDY | BRDY);
+ if (mask2) {
+ if (mask2 & ATTCH) {
+ r8a66597_write(r8a66597, ~ATTCH, INTSTS2);
+ r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
+
+ /* start usb bus sampling */
+ start_root_hub_sampling(r8a66597, 1);
+ }
+ if (mask2 & DTCH) {
+ r8a66597_write(r8a66597, ~DTCH, INTSTS2);
+ r8a66597_bclr(r8a66597, DTCHE, INTENB2);
+ r8a66597_usb_disconnect(r8a66597, 1);
+ }
+ }
+
+ if (mask1) {
+ if (mask1 & ATTCH) {
+ r8a66597_write(r8a66597, ~ATTCH, INTSTS1);
+ r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
+
+ /* start usb bus sampling */
+ start_root_hub_sampling(r8a66597, 0);
+ }
+ if (mask1 & DTCH) {
+ r8a66597_write(r8a66597, ~DTCH, INTSTS1);
+ r8a66597_bclr(r8a66597, DTCHE, INTENB1);
+ r8a66597_usb_disconnect(r8a66597, 0);
+ }
+ if (mask1 & SIGN) {
+ r8a66597_write(r8a66597, ~SIGN, INTSTS1);
+ set_urb_error(r8a66597, 0);
+ check_next_phase(r8a66597);
+ }
+ if (mask1 & SACK) {
+ r8a66597_write(r8a66597, ~SACK, INTSTS1);
+ check_next_phase(r8a66597);
+ }
+ }
+ if (mask0) {
+ if (mask0 & BRDY)
+ irq_pipe_ready(r8a66597);
+ if (mask0 & BEMP)
+ irq_pipe_empty(r8a66597);
+ if (mask0 & NRDY)
+ irq_pipe_nrdy(r8a66597);
+ }
+
+ spin_unlock(&r8a66597->lock);
+ return IRQ_HANDLED;
+}
+
+/* this function must be called with interrupt disabled */
+static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
+{
+ u16 tmp;
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+ if (rh->port & (1 << USB_PORT_FEAT_RESET)) {
+ unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+ tmp = r8a66597_read(r8a66597, dvstctr_reg);
+ if ((tmp & USBRST) == USBRST) {
+ r8a66597_mdfy(r8a66597, UACT, USBRST | UACT,
+ dvstctr_reg);
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(50));
+ } else
+ r8a66597_usb_connect(r8a66597, port);
+ }
+
+ if (rh->scount > 0) {
+ tmp = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+ if (tmp == rh->old_syssts) {
+ rh->scount--;
+ if (rh->scount == 0) {
+ if (tmp == FS_JSTS) {
+ r8a66597_bset(r8a66597, HSE,
+ get_syscfg_reg(port));
+ r8a66597_usb_preconnect(r8a66597, port);
+ } else if (tmp == LS_JSTS) {
+ r8a66597_bclr(r8a66597, HSE,
+ get_syscfg_reg(port));
+ r8a66597_usb_preconnect(r8a66597, port);
+ } else if (tmp == SE0)
+ r8a66597_bset(r8a66597, ATTCHE,
+ get_intenb_reg(port));
+ } else {
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ } else {
+ rh->scount = R8A66597_MAX_SAMPLING;
+ rh->old_syssts = tmp;
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ }
+}
+
+static void r8a66597_td_timer(unsigned long _r8a66597)
+{
+ struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ unsigned long flags;
+ u16 pipenum;
+ struct r8a66597_td *td, *new_td = NULL;
+ struct r8a66597_pipe *pipe;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
+ if (!(r8a66597->timeout_map & (1 << pipenum)))
+ continue;
+ if (timer_pending(&r8a66597->td_timer[pipenum]))
+ continue;
+
+ td = r8a66597_get_td(r8a66597, pipenum);
+ if (!td) {
+ r8a66597->timeout_map &= ~(1 << pipenum);
+ continue;
+ }
+
+ if (td->urb->actual_length) {
+ set_td_timer(r8a66597, td);
+ break;
+ }
+
+ pipe = td->pipe;
+ pipe_stop(r8a66597, pipe);
+
+ new_td = td;
+ do {
+ list_move_tail(&new_td->queue,
+ &r8a66597->pipe_queue[pipenum]);
+ new_td = r8a66597_get_td(r8a66597, pipenum);
+ if (!new_td) {
+ new_td = td;
+ break;
+ }
+ } while (td != new_td && td->address == new_td->address);
+
+ start_transfer(r8a66597, new_td);
+
+ if (td == new_td)
+ r8a66597->timeout_map &= ~(1 << pipenum);
+ else
+ set_td_timer(r8a66597, new_td);
+ break;
+ }
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static void r8a66597_timer(unsigned long _r8a66597)
+{
+ struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+
+ r8a66597_root_hub_control(r8a66597, 0);
+ r8a66597_root_hub_control(r8a66597, 1);
+
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static int check_pipe_config(struct r8a66597 *r8a66597, struct urb *urb)
+{
+ struct r8a66597_device *dev = get_urb_to_r8a66597_dev(r8a66597, urb);
+
+ if (dev && dev->address && dev->state != USB_STATE_CONFIGURED &&
+ (urb->dev->state == USB_STATE_CONFIGURED))
+ return 1;
+ else
+ return 0;
+}
+
+static int r8a66597_start(struct usb_hcd *hcd)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+
+ hcd->state = HC_STATE_RUNNING;
+ return enable_controller(r8a66597);
+}
+
+static void r8a66597_stop(struct usb_hcd *hcd)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+
+ disable_controller(r8a66597);
+}
+
+static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb)
+{
+ unsigned int usb_address = usb_pipedevice(urb->pipe);
+ u16 root_port, hub_port;
+
+ if (usb_address == 0) {
+ get_port_number(urb->dev->devpath,
+ &root_port, &hub_port);
+ set_devadd_reg(r8a66597, 0,
+ get_r8a66597_usb_speed(urb->dev->speed),
+ get_parent_r8a66597_address(r8a66597, urb->dev),
+ hub_port, root_port);
+ }
+}
+
+static struct r8a66597_td *r8a66597_make_td(struct r8a66597 *r8a66597,
+ struct urb *urb,
+ struct usb_host_endpoint *hep)
+{
+ struct r8a66597_td *td;
+ u16 pipenum;
+
+ td = kzalloc(sizeof(struct r8a66597_td), GFP_ATOMIC);
+ if (td == NULL)
+ return NULL;
+
+ pipenum = r8a66597_get_pipenum(urb, hep);
+ td->pipenum = pipenum;
+ td->pipe = hep->hcpriv;
+ td->urb = urb;
+ td->address = get_urb_to_r8a66597_addr(r8a66597, urb);
+ td->maxpacket = usb_maxpacket(urb->dev, urb->pipe,
+ !usb_pipein(urb->pipe));
+ if (usb_pipecontrol(urb->pipe))
+ td->type = USB_PID_SETUP;
+ else if (usb_pipein(urb->pipe))
+ td->type = USB_PID_IN;
+ else
+ td->type = USB_PID_OUT;
+ INIT_LIST_HEAD(&td->queue);
+
+ return td;
+}
+
+static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
+ struct usb_host_endpoint *hep,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ struct r8a66597_td *td = NULL;
+ int ret = 0, request = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ if (!get_urb_to_r8a66597_dev(r8a66597, urb)) {
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (!hep->hcpriv) {
+ hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe),
+ GFP_ATOMIC);
+ if (!hep->hcpriv) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ set_pipe_reg_addr(hep->hcpriv, R8A66597_PIPE_NO_DMA);
+ if (usb_pipeendpoint(urb->pipe))
+ init_pipe_info(r8a66597, urb, hep, &hep->desc);
+ }
+
+ if (unlikely(check_pipe_config(r8a66597, urb)))
+ init_pipe_config(r8a66597, urb);
+
+ set_address_zero(r8a66597, urb);
+ td = r8a66597_make_td(r8a66597, urb, hep);
+ if (td == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ if (list_empty(&r8a66597->pipe_queue[td->pipenum]))
+ request = 1;
+ list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
+
+ spin_lock(&urb->lock);
+ if (urb->status != -EINPROGRESS) {
+ spin_unlock(&urb->lock);
+ ret = -EPIPE;
+ goto error;
+ }
+ urb->hcpriv = td;
+ spin_unlock(&urb->lock);
+
+ if (request) {
+ ret = start_transfer(r8a66597, td);
+ if (ret < 0) {
+ list_del(&td->queue);
+ kfree(td);
+ }
+ } else
+ set_td_timer(r8a66597, td);
+
+error:
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+ return ret;
+}
+
+static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ struct r8a66597_td *td;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ if (urb->hcpriv) {
+ td = urb->hcpriv;
+ pipe_stop(r8a66597, td->pipe);
+ pipe_irq_disable(r8a66597, td->pipenum);
+ disable_irq_empty(r8a66597, td->pipenum);
+ done(r8a66597, td, td->pipenum, urb);
+ }
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+ return 0;
+}
+
+static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *hep)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
+ struct r8a66597_td *td;
+ struct urb *urb = NULL;
+ u16 pipenum;
+ unsigned long flags;
+
+ if (pipe == NULL)
+ return;
+ pipenum = pipe->info.pipenum;
+
+ if (pipenum == 0) {
+ kfree(hep->hcpriv);
+ hep->hcpriv = NULL;
+ return;
+ }
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ pipe_stop(r8a66597, pipe);
+ pipe_irq_disable(r8a66597, pipenum);
+ disable_irq_empty(r8a66597, pipenum);
+ td = r8a66597_get_td(r8a66597, pipenum);
+ if (td)
+ urb = td->urb;
+ done(r8a66597, td, pipenum, urb);
+ kfree(hep->hcpriv);
+ hep->hcpriv = NULL;
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+}
+
+static int r8a66597_get_frame(struct usb_hcd *hcd)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
+}
+
+static void collect_usb_address_map(struct usb_device *udev, unsigned long *map)
+{
+ int chix;
+
+ if (udev->state == USB_STATE_CONFIGURED &&
+ udev->parent && udev->parent->devnum > 1 &&
+ udev->parent->descriptor.bDeviceClass == USB_CLASS_HUB)
+ map[udev->devnum/32] |= (1 << (udev->devnum % 32));
+
+ for (chix = 0; chix < udev->maxchild; chix++) {
+ struct usb_device *childdev = udev->children[chix];
+
+ if (childdev)
+ collect_usb_address_map(childdev, map);
+ }
+}
+
+/* this function must be called with interrupt disabled */
+static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597,
+ int addr)
+{
+ struct r8a66597_device *dev;
+ struct list_head *list = &r8a66597->child_device;
+
+ list_for_each_entry(dev, list, device_list) {
+ if (!dev)
+ continue;
+ if (dev->usb_address != addr)
+ continue;
+
+ return dev;
+ }
+
+ err("get_r8a66597_device fail.(%d)\n", addr);
+ return NULL;
+}
+
+static void update_usb_address_map(struct r8a66597 *r8a66597,
+ struct usb_device *root_hub,
+ unsigned long *map)
+{
+ int i, j, addr;
+ unsigned long diff;
+ unsigned long flags;
+
+ for (i = 0; i < 4; i++) {
+ diff = r8a66597->child_connect_map[i] ^ map[i];
+ if (!diff)
+ continue;
+
+ for (j = 0; j < 32; j++) {
+ if (!(diff & (1 << j)))
+ continue;
+
+ addr = i * 32 + j;
+ if (map[i] & (1 << j))
+ set_child_connect_map(r8a66597, addr);
+ else {
+ struct r8a66597_device *dev;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ dev = get_r8a66597_device(r8a66597, addr);
+ disable_r8a66597_pipe_all(r8a66597, dev);
+ free_usb_address(r8a66597, dev);
+ put_child_connect_map(r8a66597, addr);
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+ }
+ }
+ }
+}
+
+static void r8a66597_check_detect_child(struct r8a66597 *r8a66597,
+ struct usb_hcd *hcd)
+{
+ struct usb_bus *bus;
+ unsigned long now_map[4];
+
+ memset(now_map, 0, sizeof(now_map));
+
+ list_for_each_entry(bus, &usb_bus_list, bus_list) {
+ if (!bus->root_hub)
+ continue;
+
+ if (bus->busnum != hcd->self.busnum)
+ continue;
+
+ collect_usb_address_map(bus->root_hub, now_map);
+ update_usb_address_map(r8a66597, bus->root_hub, now_map);
+ }
+}
+
+static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ unsigned long flags;
+ int i;
+
+ r8a66597_check_detect_child(r8a66597, hcd);
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+
+ *buf = 0; /* initialize (no change) */
+
+ for (i = 0; i < R8A66597_MAX_ROOT_HUB; i++) {
+ if (r8a66597->root_hub[i].port & 0xffff0000)
+ *buf |= 1 << (i + 1);
+ }
+
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+ return (*buf != 0);
+}
+
+static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597,
+ struct usb_hub_descriptor *desc)
+{
+ desc->bDescriptorType = 0x29;
+ desc->bHubContrCurrent = 0;
+ desc->bNbrPorts = R8A66597_MAX_ROOT_HUB;
+ desc->bDescLength = 9;
+ desc->bPwrOn2PwrGood = 0;
+ desc->wHubCharacteristics = cpu_to_le16(0x0011);
+ desc->bitmap[0] = ((1 << R8A66597_MAX_ROOT_HUB) - 1) << 1;
+ desc->bitmap[1] = ~0;
+}
+
+static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
+ int ret;
+ int port = (wIndex & 0x00FF) - 1;
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+ unsigned long flags;
+
+ ret = 0;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (wIndex > R8A66597_MAX_ROOT_HUB)
+ goto error;
+ if (wLength != 0)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ rh->port &= (1 << USB_PORT_FEAT_POWER);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ break;
+ case USB_PORT_FEAT_POWER:
+ r8a66597_port_power(r8a66597, port, 0);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ break;
+ default:
+ goto error;
+ }
+ rh->port &= ~(1 << wValue);
+ break;
+ case GetHubDescriptor:
+ r8a66597_hub_descriptor(r8a66597,
+ (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ *buf = 0x00;
+ break;
+ case GetPortStatus:
+ if (wIndex > R8A66597_MAX_ROOT_HUB)
+ goto error;
+ *(u32 *)buf = rh->port;
+ break;
+ case SetPortFeature:
+ if (wIndex > R8A66597_MAX_ROOT_HUB)
+ goto error;
+ if (wLength != 0)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ break;
+ case USB_PORT_FEAT_POWER:
+ r8a66597_port_power(r8a66597, port, 1);
+ rh->port |= (1 << USB_PORT_FEAT_POWER);
+ break;
+ case USB_PORT_FEAT_RESET: {
+ struct r8a66597_device *dev = rh->dev;
+
+ rh->port |= (1 << USB_PORT_FEAT_RESET);
+
+ disable_r8a66597_pipe_all(r8a66597, dev);
+ free_usb_address(r8a66597, dev);
+
+ r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
+ get_dvstctr_reg(port));
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(50));
+ }
+ break;
+ default:
+ goto error;
+ }
+ rh->port |= 1 << wValue;
+ break;
+ default:
+error:
+ ret = -EPIPE;
+ break;
+ }
+
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+ return ret;
+}
+
+static struct hc_driver r8a66597_hc_driver = {
+ .description = hcd_name,
+ .hcd_priv_size = sizeof(struct r8a66597),
+ .irq = r8a66597_irq,
+
+ /*
+ * generic hardware linkage
+ */
+ .flags = HCD_USB2,
+
+ .start = r8a66597_start,
+ .stop = r8a66597_stop,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = r8a66597_urb_enqueue,
+ .urb_dequeue = r8a66597_urb_dequeue,
+ .endpoint_disable = r8a66597_endpoint_disable,
+
+ /*
+ * periodic schedule support
+ */
+ .get_frame_number = r8a66597_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = r8a66597_hub_status_data,
+ .hub_control = r8a66597_hub_control,
+};
+
+#if defined(CONFIG_PM)
+static int r8a66597_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ pdev->dev.power.power_state = state;
+ return 0;
+}
+
+static int r8a66597_resume(struct platform_device *pdev)
+{
+ pdev->dev.power.power_state = PMSG_ON;
+ return 0;
+}
+#else /* if defined(CONFIG_PM) */
+#define r8a66597_suspend NULL
+#define r8a66597_resume NULL
+#endif
+
+static int __init_or_module r8a66597_remove(struct platform_device *pdev)
+{
+ struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
+ struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
+
+ del_timer_sync(&r8a66597->rh_timer);
+ iounmap((void *)r8a66597->reg);
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+#define resource_len(r) (((r)->end - (r)->start) + 1)
+static int __init r8a66597_probe(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ int irq = -1;
+ void __iomem *reg = NULL;
+ struct usb_hcd *hcd = NULL;
+ struct r8a66597 *r8a66597;
+ int ret = 0;
+ int i;
+
+ if (pdev->dev.dma_mask) {
+ ret = -EINVAL;
+ err("dma not support");
+ goto clean_up;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ (char *)hcd_name);
+ if (!res) {
+ ret = -ENODEV;
+ err("platform_get_resource_byname error.");
+ goto clean_up;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENODEV;
+ err("platform_get_irq error.");
+ goto clean_up;
+ }
+
+ reg = ioremap(res->start, resource_len(res));
+ if (reg == NULL) {
+ ret = -ENOMEM;
+ err("ioremap error.");
+ goto clean_up;
+ }
+
+ /* initialize hcd */
+ hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
+ if (!hcd) {
+ ret = -ENOMEM;
+ err("Failed to create hcd");
+ goto clean_up;
+ }
+ r8a66597 = hcd_to_r8a66597(hcd);
+ memset(r8a66597, 0, sizeof(struct r8a66597));
+ dev_set_drvdata(&pdev->dev, r8a66597);
+
+ spin_lock_init(&r8a66597->lock);
+ init_timer(&r8a66597->rh_timer);
+ r8a66597->rh_timer.function = r8a66597_timer;
+ r8a66597->rh_timer.data = (unsigned long)r8a66597;
+ r8a66597->reg = (unsigned long)reg;
+
+ for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
+ INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
+ init_timer(&r8a66597->td_timer[i]);
+ r8a66597->td_timer[i].function = r8a66597_td_timer;
+ r8a66597->td_timer[i].data = (unsigned long)r8a66597;
+ }
+ INIT_LIST_HEAD(&r8a66597->child_device);
+
+ hcd->rsrc_start = res->start;
+ ret = usb_add_hcd(hcd, irq, 0);
+ if (ret != 0) {
+ err("Failed to add hcd");
+ goto clean_up;
+ }
+
+ return 0;
+
+clean_up:
+ if (reg)
+ iounmap(reg);
+ if (res)
+ release_mem_region(res->start, 1);
+
+ return ret;
+}
+
+static struct platform_driver r8a66597_driver = {
+ .probe = r8a66597_probe,
+ .remove = r8a66597_remove,
+ .suspend = r8a66597_suspend,
+ .resume = r8a66597_resume,
+ .driver = {
+ .name = (char *) hcd_name,
+ },
+};
+
+static int __init r8a66597_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ info("driver %s, %s", hcd_name, DRIVER_VERSION);
+ return platform_driver_register(&r8a66597_driver);
+}
+module_init(r8a66597_init);
+
+static void __exit r8a66597_cleanup(void)
+{
+ platform_driver_unregister(&r8a66597_driver);
+}
+module_exit(r8a66597_cleanup);
+
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
new file mode 100644
index 000000000000..fe9ceb077d9b
--- /dev/null
+++ b/drivers/usb/host/r8a66597.h
@@ -0,0 +1,631 @@
+/*
+ * R8A66597 HCD (Host Controller Driver)
+ *
+ * Copyright (C) 2006-2007 Renesas Solutions Corp.
+ * Portions Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
+ * Portions Copyright (C) 2004-2005 David Brownell
+ * Portions Copyright (C) 1999 Roman Weissgaerber
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __R8A66597_H__
+#define __R8A66597_H__
+
+#define SYSCFG0 0x00
+#define SYSCFG1 0x02
+#define SYSSTS0 0x04
+#define SYSSTS1 0x06
+#define DVSTCTR0 0x08
+#define DVSTCTR1 0x0A
+#define TESTMODE 0x0C
+#define PINCFG 0x0E
+#define DMA0CFG 0x10
+#define DMA1CFG 0x12
+#define CFIFO 0x14
+#define D0FIFO 0x18
+#define D1FIFO 0x1C
+#define CFIFOSEL 0x20
+#define CFIFOCTR 0x22
+#define CFIFOSIE 0x24
+#define D0FIFOSEL 0x28
+#define D0FIFOCTR 0x2A
+#define D1FIFOSEL 0x2C
+#define D1FIFOCTR 0x2E
+#define INTENB0 0x30
+#define INTENB1 0x32
+#define INTENB2 0x34
+#define BRDYENB 0x36
+#define NRDYENB 0x38
+#define BEMPENB 0x3A
+#define SOFCFG 0x3C
+#define INTSTS0 0x40
+#define INTSTS1 0x42
+#define INTSTS2 0x44
+#define BRDYSTS 0x46
+#define NRDYSTS 0x48
+#define BEMPSTS 0x4A
+#define FRMNUM 0x4C
+#define UFRMNUM 0x4E
+#define USBADDR 0x50
+#define USBREQ 0x54
+#define USBVAL 0x56
+#define USBINDX 0x58
+#define USBLENG 0x5A
+#define DCPCFG 0x5C
+#define DCPMAXP 0x5E
+#define DCPCTR 0x60
+#define PIPESEL 0x64
+#define PIPECFG 0x68
+#define PIPEBUF 0x6A
+#define PIPEMAXP 0x6C
+#define PIPEPERI 0x6E
+#define PIPE1CTR 0x70
+#define PIPE2CTR 0x72
+#define PIPE3CTR 0x74
+#define PIPE4CTR 0x76
+#define PIPE5CTR 0x78
+#define PIPE6CTR 0x7A
+#define PIPE7CTR 0x7C
+#define PIPE8CTR 0x7E
+#define PIPE9CTR 0x80
+#define PIPE1TRE 0x90
+#define PIPE1TRN 0x92
+#define PIPE2TRE 0x94
+#define PIPE2TRN 0x96
+#define PIPE3TRE 0x98
+#define PIPE3TRN 0x9A
+#define PIPE4TRE 0x9C
+#define PIPE4TRN 0x9E
+#define PIPE5TRE 0xA0
+#define PIPE5TRN 0xA2
+#define DEVADD0 0xD0
+#define DEVADD1 0xD2
+#define DEVADD2 0xD4
+#define DEVADD3 0xD6
+#define DEVADD4 0xD8
+#define DEVADD5 0xDA
+#define DEVADD6 0xDC
+#define DEVADD7 0xDE
+#define DEVADD8 0xE0
+#define DEVADD9 0xE2
+#define DEVADDA 0xE4
+
+/* System Configuration Control Register */
+#define XTAL 0xC000 /* b15-14: Crystal selection */
+#define XTAL48 0x8000 /* 48MHz */
+#define XTAL24 0x4000 /* 24MHz */
+#define XTAL12 0x0000 /* 12MHz */
+#define XCKE 0x2000 /* b13: External clock enable */
+#define PLLC 0x0800 /* b11: PLL control */
+#define SCKE 0x0400 /* b10: USB clock enable */
+#define PCSDIS 0x0200 /* b9: not CS wakeup */
+#define LPSME 0x0100 /* b8: Low power sleep mode */
+#define HSE 0x0080 /* b7: Hi-speed enable */
+#define DCFM 0x0040 /* b6: Controller function select */
+#define DRPD 0x0020 /* b5: D+/- pull down control */
+#define DPRPU 0x0010 /* b4: D+ pull up control */
+#define USBE 0x0001 /* b0: USB module operation enable */
+
+/* System Configuration Status Register */
+#define OVCBIT 0x8000 /* b15-14: Over-current bit */
+#define OVCMON 0xC000 /* b15-14: Over-current monitor */
+#define SOFEA 0x0020 /* b5: SOF monitor */
+#define IDMON 0x0004 /* b3: ID-pin monitor */
+#define LNST 0x0003 /* b1-0: D+, D- line status */
+#define SE1 0x0003 /* SE1 */
+#define FS_KSTS 0x0002 /* Full-Speed K State */
+#define FS_JSTS 0x0001 /* Full-Speed J State */
+#define LS_JSTS 0x0002 /* Low-Speed J State */
+#define LS_KSTS 0x0001 /* Low-Speed K State */
+#define SE0 0x0000 /* SE0 */
+
+/* Device State Control Register */
+#define EXTLP0 0x0400 /* b10: External port */
+#define VBOUT 0x0200 /* b9: VBUS output */
+#define WKUP 0x0100 /* b8: Remote wakeup */
+#define RWUPE 0x0080 /* b7: Remote wakeup sense */
+#define USBRST 0x0040 /* b6: USB reset enable */
+#define RESUME 0x0020 /* b5: Resume enable */
+#define UACT 0x0010 /* b4: USB bus enable */
+#define RHST 0x0007 /* b1-0: Reset handshake status */
+#define HSPROC 0x0004 /* HS handshake is processing */
+#define HSMODE 0x0003 /* Hi-Speed mode */
+#define FSMODE 0x0002 /* Full-Speed mode */
+#define LSMODE 0x0001 /* Low-Speed mode */
+#define UNDECID 0x0000 /* Undecided */
+
+/* Test Mode Register */
+#define UTST 0x000F /* b3-0: Test select */
+#define H_TST_PACKET 0x000C /* HOST TEST Packet */
+#define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */
+#define H_TST_K 0x000A /* HOST TEST K */
+#define H_TST_J 0x0009 /* HOST TEST J */
+#define H_TST_NORMAL 0x0000 /* HOST Normal Mode */
+#define P_TST_PACKET 0x0004 /* PERI TEST Packet */
+#define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */
+#define P_TST_K 0x0002 /* PERI TEST K */
+#define P_TST_J 0x0001 /* PERI TEST J */
+#define P_TST_NORMAL 0x0000 /* PERI Normal Mode */
+
+/* Data Pin Configuration Register */
+#define LDRV 0x8000 /* b15: Drive Current Adjust */
+#define VIF1 0x0000 /* VIF = 1.8V */
+#define VIF3 0x8000 /* VIF = 3.3V */
+#define INTA 0x0001 /* b1: USB INT-pin active */
+
+/* DMAx Pin Configuration Register */
+#define DREQA 0x4000 /* b14: Dreq active select */
+#define BURST 0x2000 /* b13: Burst mode */
+#define DACKA 0x0400 /* b10: Dack active select */
+#define DFORM 0x0380 /* b9-7: DMA mode select */
+#define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */
+#define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */
+#define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */
+#define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */
+#define DENDA 0x0040 /* b6: Dend active select */
+#define PKTM 0x0020 /* b5: Packet mode */
+#define DENDE 0x0010 /* b4: Dend enable */
+#define OBUS 0x0004 /* b2: OUTbus mode */
+
+/* CFIFO/DxFIFO Port Select Register */
+#define RCNT 0x8000 /* b15: Read count mode */
+#define REW 0x4000 /* b14: Buffer rewind */
+#define DCLRM 0x2000 /* b13: DMA buffer clear mode */
+#define DREQE 0x1000 /* b12: DREQ output enable */
+#define MBW 0x0400 /* b10: Maximum bit width for FIFO access */
+#define MBW_8 0x0000 /* 8bit */
+#define MBW_16 0x0400 /* 16bit */
+#define BIGEND 0x0100 /* b8: Big endian mode */
+#define BYTE_LITTLE 0x0000 /* little dendian */
+#define BYTE_BIG 0x0100 /* big endifan */
+#define ISEL 0x0020 /* b5: DCP FIFO port direction select */
+#define CURPIPE 0x000F /* b2-0: PIPE select */
+
+/* CFIFO/DxFIFO Port Control Register */
+#define BVAL 0x8000 /* b15: Buffer valid flag */
+#define BCLR 0x4000 /* b14: Buffer clear */
+#define FRDY 0x2000 /* b13: FIFO ready */
+#define DTLN 0x0FFF /* b11-0: FIFO received data length */
+
+/* Interrupt Enable Register 0 */
+#define VBSE 0x8000 /* b15: VBUS interrupt */
+#define RSME 0x4000 /* b14: Resume interrupt */
+#define SOFE 0x2000 /* b13: Frame update interrupt */
+#define DVSE 0x1000 /* b12: Device state transition interrupt */
+#define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */
+#define BEMPE 0x0400 /* b10: Buffer empty interrupt */
+#define NRDYE 0x0200 /* b9: Buffer not ready interrupt */
+#define BRDYE 0x0100 /* b8: Buffer ready interrupt */
+
+/* Interrupt Enable Register 1 */
+#define OVRCRE 0x8000 /* b15: Over-current interrupt */
+#define BCHGE 0x4000 /* b14: USB us chenge interrupt */
+#define DTCHE 0x1000 /* b12: Detach sense interrupt */
+#define ATTCHE 0x0800 /* b11: Attach sense interrupt */
+#define EOFERRE 0x0040 /* b6: EOF error interrupt */
+#define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */
+#define SACKE 0x0010 /* b4: SETUP ACK interrupt */
+
+/* BRDY Interrupt Enable/Status Register */
+#define BRDY9 0x0200 /* b9: PIPE9 */
+#define BRDY8 0x0100 /* b8: PIPE8 */
+#define BRDY7 0x0080 /* b7: PIPE7 */
+#define BRDY6 0x0040 /* b6: PIPE6 */
+#define BRDY5 0x0020 /* b5: PIPE5 */
+#define BRDY4 0x0010 /* b4: PIPE4 */
+#define BRDY3 0x0008 /* b3: PIPE3 */
+#define BRDY2 0x0004 /* b2: PIPE2 */
+#define BRDY1 0x0002 /* b1: PIPE1 */
+#define BRDY0 0x0001 /* b1: PIPE0 */
+
+/* NRDY Interrupt Enable/Status Register */
+#define NRDY9 0x0200 /* b9: PIPE9 */
+#define NRDY8 0x0100 /* b8: PIPE8 */
+#define NRDY7 0x0080 /* b7: PIPE7 */
+#define NRDY6 0x0040 /* b6: PIPE6 */
+#define NRDY5 0x0020 /* b5: PIPE5 */
+#define NRDY4 0x0010 /* b4: PIPE4 */
+#define NRDY3 0x0008 /* b3: PIPE3 */
+#define NRDY2 0x0004 /* b2: PIPE2 */
+#define NRDY1 0x0002 /* b1: PIPE1 */
+#define NRDY0 0x0001 /* b1: PIPE0 */
+
+/* BEMP Interrupt Enable/Status Register */
+#define BEMP9 0x0200 /* b9: PIPE9 */
+#define BEMP8 0x0100 /* b8: PIPE8 */
+#define BEMP7 0x0080 /* b7: PIPE7 */
+#define BEMP6 0x0040 /* b6: PIPE6 */
+#define BEMP5 0x0020 /* b5: PIPE5 */
+#define BEMP4 0x0010 /* b4: PIPE4 */
+#define BEMP3 0x0008 /* b3: PIPE3 */
+#define BEMP2 0x0004 /* b2: PIPE2 */
+#define BEMP1 0x0002 /* b1: PIPE1 */
+#define BEMP0 0x0001 /* b0: PIPE0 */
+
+/* SOF Pin Configuration Register */
+#define TRNENSEL 0x0100 /* b8: Select transaction enable period */
+#define BRDYM 0x0040 /* b6: BRDY clear timing */
+#define INTL 0x0020 /* b5: Interrupt sense select */
+#define EDGESTS 0x0010 /* b4: */
+#define SOFMODE 0x000C /* b3-2: SOF pin select */
+#define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */
+#define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */
+#define SOF_DISABLE 0x0000 /* SOF OUT Disable */
+
+/* Interrupt Status Register 0 */
+#define VBINT 0x8000 /* b15: VBUS interrupt */
+#define RESM 0x4000 /* b14: Resume interrupt */
+#define SOFR 0x2000 /* b13: SOF frame update interrupt */
+#define DVST 0x1000 /* b12: Device state transition interrupt */
+#define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */
+#define BEMP 0x0400 /* b10: Buffer empty interrupt */
+#define NRDY 0x0200 /* b9: Buffer not ready interrupt */
+#define BRDY 0x0100 /* b8: Buffer ready interrupt */
+#define VBSTS 0x0080 /* b7: VBUS input port */
+#define DVSQ 0x0070 /* b6-4: Device state */
+#define DS_SPD_CNFG 0x0070 /* Suspend Configured */
+#define DS_SPD_ADDR 0x0060 /* Suspend Address */
+#define DS_SPD_DFLT 0x0050 /* Suspend Default */
+#define DS_SPD_POWR 0x0040 /* Suspend Powered */
+#define DS_SUSP 0x0040 /* Suspend */
+#define DS_CNFG 0x0030 /* Configured */
+#define DS_ADDS 0x0020 /* Address */
+#define DS_DFLT 0x0010 /* Default */
+#define DS_POWR 0x0000 /* Powered */
+#define DVSQS 0x0030 /* b5-4: Device state */
+#define VALID 0x0008 /* b3: Setup packet detected flag */
+#define CTSQ 0x0007 /* b2-0: Control transfer stage */
+#define CS_SQER 0x0006 /* Sequence error */
+#define CS_WRND 0x0005 /* Control write nodata status stage */
+#define CS_WRSS 0x0004 /* Control write status stage */
+#define CS_WRDS 0x0003 /* Control write data stage */
+#define CS_RDSS 0x0002 /* Control read status stage */
+#define CS_RDDS 0x0001 /* Control read data stage */
+#define CS_IDST 0x0000 /* Idle or setup stage */
+
+/* Interrupt Status Register 1 */
+#define OVRCR 0x8000 /* b15: Over-current interrupt */
+#define BCHG 0x4000 /* b14: USB bus chenge interrupt */
+#define DTCH 0x1000 /* b12: Detach sense interrupt */
+#define ATTCH 0x0800 /* b11: Attach sense interrupt */
+#define EOFERR 0x0040 /* b6: EOF-error interrupt */
+#define SIGN 0x0020 /* b5: Setup ignore interrupt */
+#define SACK 0x0010 /* b4: Setup acknowledge interrupt */
+
+/* Frame Number Register */
+#define OVRN 0x8000 /* b15: Overrun error */
+#define CRCE 0x4000 /* b14: Received data error */
+#define FRNM 0x07FF /* b10-0: Frame number */
+
+/* Micro Frame Number Register */
+#define UFRNM 0x0007 /* b2-0: Micro frame number */
+
+/* Default Control Pipe Maxpacket Size Register */
+/* Pipe Maxpacket Size Register */
+#define DEVSEL 0xF000 /* b15-14: Device address select */
+#define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */
+
+/* Default Control Pipe Control Register */
+#define BSTS 0x8000 /* b15: Buffer status */
+#define SUREQ 0x4000 /* b14: Send USB request */
+#define CSCLR 0x2000 /* b13: complete-split status clear */
+#define CSSTS 0x1000 /* b12: complete-split status */
+#define SUREQCLR 0x0800 /* b11: stop setup request */
+#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define PBUSY 0x0020 /* b5: pipe busy */
+#define PINGE 0x0010 /* b4: ping enable */
+#define CCPL 0x0004 /* b2: Enable control transfer complete */
+#define PID 0x0003 /* b1-0: Response PID */
+#define PID_STALL11 0x0003 /* STALL */
+#define PID_STALL 0x0002 /* STALL */
+#define PID_BUF 0x0001 /* BUF */
+#define PID_NAK 0x0000 /* NAK */
+
+/* Pipe Window Select Register */
+#define PIPENM 0x0007 /* b2-0: Pipe select */
+
+/* Pipe Configuration Register */
+#define R8A66597_TYP 0xC000 /* b15-14: Transfer type */
+#define R8A66597_ISO 0xC000 /* Isochronous */
+#define R8A66597_INT 0x8000 /* Interrupt */
+#define R8A66597_BULK 0x4000 /* Bulk */
+#define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */
+#define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */
+#define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */
+#define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */
+#define R8A66597_DIR 0x0010 /* b4: Transfer direction select */
+#define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */
+
+/* Pipe Buffer Configuration Register */
+#define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */
+#define BUFNMB 0x007F /* b6-0: Pipe buffer number */
+#define PIPE0BUF 256
+#define PIPExBUF 64
+
+/* Pipe Maxpacket Size Register */
+#define MXPS 0x07FF /* b10-0: Maxpacket size */
+
+/* Pipe Cycle Configuration Register */
+#define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */
+#define IITV 0x0007 /* b2-0: Isochronous interval */
+
+/* Pipex Control Register */
+#define BSTS 0x8000 /* b15: Buffer status */
+#define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */
+#define CSCLR 0x2000 /* b13: complete-split status clear */
+#define CSSTS 0x1000 /* b12: complete-split status */
+#define ATREPM 0x0400 /* b10: Auto repeat mode */
+#define ACLRM 0x0200 /* b9: Out buffer auto clear mode */
+#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define PBUSY 0x0020 /* b5: pipe busy */
+#define PID 0x0003 /* b1-0: Response PID */
+
+/* PIPExTRE */
+#define TRENB 0x0200 /* b9: Transaction counter enable */
+#define TRCLR 0x0100 /* b8: Transaction counter clear */
+
+/* PIPExTRN */
+#define TRNCNT 0xFFFF /* b15-0: Transaction counter */
+
+/* DEVADDx */
+#define UPPHUB 0x7800
+#define HUBPORT 0x0700
+#define USBSPD 0x00C0
+#define RTPORT 0x0001
+
+#define R8A66597_MAX_NUM_PIPE 10
+#define R8A66597_BUF_BSIZE 8
+#define R8A66597_MAX_DEVICE 10
+#define R8A66597_MAX_ROOT_HUB 2
+#define R8A66597_MAX_SAMPLING 10
+#define R8A66597_MAX_DMA_CHANNEL 2
+#define R8A66597_PIPE_NO_DMA R8A66597_MAX_DMA_CHANNEL
+#define check_bulk_or_isoc(pipenum) ((pipenum >= 1 && pipenum <= 5))
+#define check_interrupt(pipenum) ((pipenum >= 6 && pipenum <= 9))
+#define make_devsel(addr) (addr << 12)
+
+struct r8a66597_pipe_info {
+ u16 pipenum;
+ u16 address; /* R8A66597 HCD usb addres */
+ u16 epnum;
+ u16 maxpacket;
+ u16 type;
+ u16 bufnum;
+ u16 buf_bsize;
+ u16 interval;
+ u16 dir_in;
+};
+
+struct r8a66597_pipe {
+ struct r8a66597_pipe_info info;
+
+ unsigned long fifoaddr;
+ unsigned long fifosel;
+ unsigned long fifoctr;
+ unsigned long pipectr;
+ unsigned long pipetre;
+ unsigned long pipetrn;
+};
+
+struct r8a66597_td {
+ struct r8a66597_pipe *pipe;
+ struct urb *urb;
+ struct list_head queue;
+
+ u16 type;
+ u16 pipenum;
+ int iso_cnt;
+
+ u16 address; /* R8A66597's USB address */
+ u16 maxpacket;
+
+ unsigned zero_packet:1;
+ unsigned short_packet:1;
+ unsigned set_address:1;
+};
+
+struct r8a66597_device {
+ u16 address; /* R8A66597's USB address */
+ u16 hub_port;
+ u16 root_port;
+
+ unsigned short ep_in_toggle;
+ unsigned short ep_out_toggle;
+ unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE];
+ unsigned char dma_map;
+
+ enum usb_device_state state;
+
+ struct usb_device *udev;
+ int usb_address;
+ struct list_head device_list;
+};
+
+struct r8a66597_root_hub {
+ u32 port;
+ u16 old_syssts;
+ int scount;
+
+ struct r8a66597_device *dev;
+};
+
+struct r8a66597 {
+ spinlock_t lock;
+ unsigned long reg;
+
+ struct r8a66597_device device0;
+ struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
+ struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
+
+ struct timer_list rh_timer;
+ struct timer_list td_timer[R8A66597_MAX_NUM_PIPE];
+
+ unsigned short address_map;
+ unsigned short timeout_map;
+ unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE];
+ unsigned char dma_map;
+
+ struct list_head child_device;
+ unsigned long child_connect_map[4];
+};
+
+static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
+{
+ return (struct r8a66597 *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *r8a66597_to_hcd(struct r8a66597 *r8a66597)
+{
+ return container_of((void *)r8a66597, struct usb_hcd, hcd_priv);
+}
+
+static inline struct r8a66597_td *r8a66597_get_td(struct r8a66597 *r8a66597,
+ u16 pipenum)
+{
+ if (unlikely(list_empty(&r8a66597->pipe_queue[pipenum])))
+ return NULL;
+
+ return list_entry(r8a66597->pipe_queue[pipenum].next,
+ struct r8a66597_td, queue);
+}
+
+static inline struct urb *r8a66597_get_urb(struct r8a66597 *r8a66597,
+ u16 pipenum)
+{
+ struct r8a66597_td *td;
+
+ td = r8a66597_get_td(r8a66597, pipenum);
+ return (td ? td->urb : NULL);
+}
+
+static inline u16 r8a66597_read(struct r8a66597 *r8a66597, unsigned long offset)
+{
+ return inw(r8a66597->reg + offset);
+}
+
+static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597,
+ unsigned long offset, u16 *buf,
+ int len)
+{
+ len = (len + 1) / 2;
+ insw(r8a66597->reg + offset, buf, len);
+}
+
+static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
+ unsigned long offset)
+{
+ outw(val, r8a66597->reg + offset);
+}
+
+static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
+ unsigned long offset, u16 *buf,
+ int len)
+{
+ unsigned long fifoaddr = r8a66597->reg + offset;
+ int odd = len & 0x0001;
+
+ len = len / 2;
+ outsw(fifoaddr, buf, len);
+ if (unlikely(odd)) {
+ buf = &buf[len];
+ outb((unsigned char)*buf, fifoaddr);
+ }
+}
+
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+ u16 val, u16 pat, unsigned long offset)
+{
+ u16 tmp;
+ tmp = r8a66597_read(r8a66597, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, val, 0, offset)
+
+static inline unsigned long get_syscfg_reg(int port)
+{
+ return port == 0 ? SYSCFG0 : SYSCFG1;
+}
+
+static inline unsigned long get_syssts_reg(int port)
+{
+ return port == 0 ? SYSSTS0 : SYSSTS1;
+}
+
+static inline unsigned long get_dvstctr_reg(int port)
+{
+ return port == 0 ? DVSTCTR0 : DVSTCTR1;
+}
+
+static inline unsigned long get_intenb_reg(int port)
+{
+ return port == 0 ? INTENB1 : INTENB2;
+}
+
+static inline unsigned long get_intsts_reg(int port)
+{
+ return port == 0 ? INTSTS1 : INTSTS2;
+}
+
+static inline u16 get_rh_usb_speed(struct r8a66597 *r8a66597, int port)
+{
+ unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+ return r8a66597_read(r8a66597, dvstctr_reg) & RHST;
+}
+
+static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
+ int power)
+{
+ unsigned long dvstctr_reg = get_dvstctr_reg(port);
+
+ if (power)
+ r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
+ else
+ r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
+}
+
+#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
+#define get_pipetre_addr(pipenum) (PIPE1TRE + (pipenum - 1) * 4)
+#define get_pipetrn_addr(pipenum) (PIPE1TRN + (pipenum - 1) * 4)
+#define get_devadd_addr(address) (DEVADD0 + address * 2)
+
+#define enable_irq_ready(r8a66597, pipenum) \
+ enable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define disable_irq_ready(r8a66597, pipenum) \
+ disable_pipe_irq(r8a66597, pipenum, BRDYENB)
+#define enable_irq_empty(r8a66597, pipenum) \
+ enable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define disable_irq_empty(r8a66597, pipenum) \
+ disable_pipe_irq(r8a66597, pipenum, BEMPENB)
+#define enable_irq_nrdy(r8a66597, pipenum) \
+ enable_pipe_irq(r8a66597, pipenum, NRDYENB)
+#define disable_irq_nrdy(r8a66597, pipenum) \
+ disable_pipe_irq(r8a66597, pipenum, NRDYENB)
+
+#endif /* __R8A66597_H__ */
+
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 2d0e73b20099..5da63f535005 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -278,10 +278,9 @@ static int sl811_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
- local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
+ local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local)
return -ENOMEM;
- memset(local, 0, sizeof(local_info_t));
local->p_dev = link;
link->priv = local;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index e98df2ee9901..7f765ec038cd 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -52,6 +52,7 @@
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/pci_ids.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -83,7 +84,7 @@ static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
* u132_module_lock exists to protect access to global variables
*
*/
-static struct semaphore u132_module_lock;
+static struct mutex u132_module_lock;
static int u132_exiting = 0;
static int u132_instances = 0;
static struct list_head u132_static_list;
@@ -258,10 +259,10 @@ static void u132_hcd_delete(struct kref *kref)
struct platform_device *pdev = u132->platform_dev;
struct usb_hcd *hcd = u132_to_hcd(u132);
u132->going += 1;
- down(&u132_module_lock);
+ mutex_lock(&u132_module_lock);
list_del_init(&u132->u132_list);
u132_instances -= 1;
- up(&u132_module_lock);
+ mutex_unlock(&u132_module_lock);
dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
"2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev);
usb_put_hcd(hcd);
@@ -3111,10 +3112,10 @@ static int __devinit u132_probe(struct platform_device *pdev)
int retval = 0;
struct u132 *u132 = hcd_to_u132(hcd);
hcd->rsrc_start = 0;
- down(&u132_module_lock);
+ mutex_lock(&u132_module_lock);
list_add_tail(&u132->u132_list, &u132_static_list);
u132->sequence_num = ++u132_instances;
- up(&u132_module_lock);
+ mutex_unlock(&u132_module_lock);
u132_u132_init_kref(u132);
u132_initialise(u132, pdev);
hcd->product_desc = "ELAN U132 Host Controller";
@@ -3216,7 +3217,7 @@ static int __init u132_hcd_init(void)
INIT_LIST_HEAD(&u132_static_list);
u132_instances = 0;
u132_exiting = 0;
- init_MUTEX(&u132_module_lock);
+ mutex_init(&u132_module_lock);
if (usb_disabled())
return -ENODEV;
printk(KERN_INFO "driver %s built at %s on %s\n", hcd_name, __TIME__,
@@ -3232,9 +3233,9 @@ static void __exit u132_hcd_exit(void)
{
struct u132 *u132;
struct u132 *temp;
- down(&u132_module_lock);
+ mutex_lock(&u132_module_lock);
u132_exiting += 1;
- up(&u132_module_lock);
+ mutex_unlock(&u132_module_lock);
list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
platform_device_unregister(u132->platform_dev);
} platform_driver_unregister(&u132_platform_driver);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index d22da26ff167..805e5fc5f5db 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -730,10 +730,9 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
int rc = 0;
spin_lock_irq(&uhci->lock);
- if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
- dev_warn(&hcd->self.root_hub->dev, "HC isn't running!\n");
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
rc = -ESHUTDOWN;
- } else if (!uhci->dead)
+ else if (!uhci->dead)
wakeup_rh(uhci);
spin_unlock_irq(&uhci->lock);
return rc;
@@ -934,7 +933,7 @@ static int __init uhci_hcd_init(void)
}
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
- sizeof(struct urb_priv), 0, 0, NULL, NULL);
+ sizeof(struct urb_priv), 0, 0, NULL);
if (!uhci_up_cachep)
goto up_failed;
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 4aed305982ec..3bb908ca38e9 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -827,8 +827,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
* If direction is "send", change the packet ID from SETUP (0x2D)
* to OUT (0xE1). Else change it from SETUP to IN (0x69) and
* set Short Packet Detect (SPD) for all data packets.
+ *
+ * 0-length transfers always get treated as "send".
*/
- if (usb_pipeout(urb->pipe))
+ if (usb_pipeout(urb->pipe) || len == 0)
destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
else {
destination ^= (USB_PID_SETUP ^ USB_PID_IN);
@@ -839,7 +841,12 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
* Build the DATA TDs
*/
while (len > 0) {
- int pktsze = min(len, maxsze);
+ int pktsze = maxsze;
+
+ if (len <= pktsze) { /* The last data packet */
+ pktsze = len;
+ status &= ~TD_CTRL_SPD;
+ }
td = uhci_alloc_td(uhci);
if (!td)
@@ -866,20 +873,10 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
goto nomem;
*plink = LINK_TO_TD(td);
- /*
- * It's IN if the pipe is an output pipe or we're not expecting
- * data back.
- */
- destination &= ~TD_TOKEN_PID_MASK;
- if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
- destination |= USB_PID_IN;
- else
- destination |= USB_PID_OUT;
-
+ /* Change direction for the status transaction */
+ destination ^= (USB_PID_IN ^ USB_PID_OUT);
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
- status &= ~TD_CTRL_SPD;
-
uhci_add_td_to_urbp(td, urbp);
uhci_fill_td(td, status | TD_CTRL_IOC,
destination | uhci_explen(0), 0);
@@ -1185,10 +1182,18 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
}
}
+ /* Did we receive a short packet? */
} else if (len < uhci_expected_length(td_token(td))) {
- /* We received a short packet */
- if (urb->transfer_flags & URB_SHORT_NOT_OK)
+ /* For control transfers, go to the status TD if
+ * this isn't already the last data TD */
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+ if (td->list.next != urbp->td_list.prev)
+ ret = 1;
+ }
+
+ /* For bulk and interrupt, this may be an error */
+ else if (urb->transfer_flags & URB_SHORT_NOT_OK)
ret = -EREMOTEIO;
/* Fixup needed only if this isn't the URB's last TD */
@@ -1208,10 +1213,6 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
err:
if (ret < 0) {
- /* In case a control transfer gets an error
- * during the setup stage */
- urb->actual_length = max(urb->actual_length, 0);
-
/* Note that the queue has stopped and save
* the next toggle value */
qh->element = UHCI_PTR_TERM;
@@ -1489,9 +1490,25 @@ __acquires(uhci->lock)
{
struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
+ if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+
+ /* urb->actual_length < 0 means the setup transaction didn't
+ * complete successfully. Either it failed or the URB was
+ * unlinked first. Regardless, don't confuse people with a
+ * negative length. */
+ urb->actual_length = max(urb->actual_length, 0);
+
+ /* Report erroneous short transfers */
+ if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+ urb->actual_length <
+ urb->transfer_buffer_length &&
+ urb->status == 0))
+ urb->status = -EREMOTEIO;
+ }
+
/* When giving back the first URB in an Isochronous queue,
* reinitialize the QH's iso-related members for the next URB. */
- if (qh->type == USB_ENDPOINT_XFER_ISOC &&
+ else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
urbp->node.prev == &qh->queue &&
urbp->node.next != &qh->queue) {
struct urb *nurb = list_entry(urbp->node.next,
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 36502a06f73a..d1131a87a5b1 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -284,9 +284,9 @@ static void mdc800_usb_irq (struct urb *urb)
int data_received=0, wake_up;
unsigned char* b=urb->transfer_buffer;
struct mdc800_data* mdc800=urb->context;
+ int status = urb->status;
- if (urb->status >= 0)
- {
+ if (status >= 0) {
//dbg ("%i %i %i %i %i %i %i %i \n",b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7]);
@@ -324,7 +324,7 @@ static void mdc800_usb_irq (struct urb *urb)
||
((mdc800->camera_request_ready == 3) && (mdc800->camera_busy))
||
- (urb->status < 0)
+ (status < 0)
);
if (wake_up)
@@ -376,15 +376,12 @@ static int mdc800_usb_waitForIRQ (int mode, int msec)
static void mdc800_usb_write_notify (struct urb *urb)
{
struct mdc800_data* mdc800=urb->context;
+ int status = urb->status;
- if (urb->status != 0)
- {
- err ("writing command fails (status=%i)", urb->status);
- }
+ if (status != 0)
+ err ("writing command fails (status=%i)", status);
else
- {
mdc800->state=READY;
- }
mdc800->written = 1;
wake_up (&mdc800->write_wait);
}
@@ -396,9 +393,9 @@ static void mdc800_usb_write_notify (struct urb *urb)
static void mdc800_usb_download_notify (struct urb *urb)
{
struct mdc800_data* mdc800=urb->context;
+ int status = urb->status;
- if (urb->status == 0)
- {
+ if (status == 0) {
/* Fill output buffer with these data */
memcpy (mdc800->out, urb->transfer_buffer, 64);
mdc800->out_count=64;
@@ -408,10 +405,8 @@ static void mdc800_usb_download_notify (struct urb *urb)
{
mdc800->state=READY;
}
- }
- else
- {
- err ("request bytes fails (status:%i)", urb->status);
+ } else {
+ err ("request bytes fails (status:%i)", status);
}
mdc800->downloaded = 1;
wake_up (&mdc800->download_wait);
@@ -649,9 +644,9 @@ static int mdc800_device_open (struct inode* inode, struct file *file)
retval=0;
mdc800->irq_urb->dev = mdc800->dev;
- if (usb_submit_urb (mdc800->irq_urb, GFP_KERNEL))
- {
- err ("request USB irq fails (submit_retval=%i urb_status=%i).",retval, mdc800->irq_urb->status);
+ retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL);
+ if (retval) {
+ err ("request USB irq fails (submit_retval=%i).", retval);
errn = -EIO;
goto error_out;
}
@@ -698,6 +693,7 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
{
size_t left=len, sts=len; /* single transfer size */
char __user *ptr = buf;
+ int retval;
mutex_lock(&mdc800->io_lock);
if (mdc800->state == NOT_CONNECTED)
@@ -737,9 +733,9 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
/* Download -> Request new bytes */
mdc800->download_urb->dev = mdc800->dev;
- if (usb_submit_urb (mdc800->download_urb, GFP_KERNEL))
- {
- err ("Can't submit download urb (status=%i)",mdc800->download_urb->status);
+ retval = usb_submit_urb (mdc800->download_urb, GFP_KERNEL);
+ if (retval) {
+ err ("Can't submit download urb (retval=%i)",retval);
mutex_unlock(&mdc800->io_lock);
return len-left;
}
@@ -788,6 +784,7 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
static ssize_t mdc800_device_write (struct file *file, const char __user *buf, size_t len, loff_t *pos)
{
size_t i=0;
+ int retval;
mutex_lock(&mdc800->io_lock);
if (mdc800->state != READY)
@@ -854,9 +851,9 @@ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, s
mdc800->state=WORKING;
memcpy (mdc800->write_urb->transfer_buffer, mdc800->in,8);
mdc800->write_urb->dev = mdc800->dev;
- if (usb_submit_urb (mdc800->write_urb, GFP_KERNEL))
- {
- err ("submitting write urb fails (status=%i)", mdc800->write_urb->status);
+ retval = usb_submit_urb (mdc800->write_urb, GFP_KERNEL);
+ if (retval) {
+ err ("submitting write urb fails (retval=%i)", retval);
mutex_unlock(&mdc800->io_lock);
return -EIO;
}
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 51bd80d2b8cc..768b2c11a231 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -189,7 +189,7 @@ static struct usb_driver mts_usb_driver = {
#define MTS_DEBUG_INT() \
do { MTS_DEBUG_GOT_HERE(); \
MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \
- MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",(int)transfer->status,(int)context->data_length, (int)transfer->actual_length ); \
+ MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \
mts_debug_dump(context->instance);\
} while(0)
#else
@@ -393,8 +393,6 @@ void mts_int_submit_urb (struct urb* transfer,
context
);
- transfer->status = 0;
-
res = usb_submit_urb( transfer, GFP_ATOMIC );
if ( unlikely(res) ) {
MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res );
@@ -444,12 +442,13 @@ static void mts_get_status( struct urb *transfer )
static void mts_data_done( struct urb* transfer )
/* Interrupt context! */
{
+ int status = transfer->status;
MTS_INT_INIT();
if ( context->data_length != transfer->actual_length ) {
context->srb->resid = context->data_length - transfer->actual_length;
- } else if ( unlikely(transfer->status) ) {
- context->srb->result = (transfer->status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
+ } else if ( unlikely(status) ) {
+ context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
}
mts_get_status(transfer);
@@ -461,10 +460,11 @@ static void mts_data_done( struct urb* transfer )
static void mts_command_done( struct urb *transfer )
/* Interrupt context! */
{
+ int status = transfer->status;
MTS_INT_INIT();
- if ( unlikely(transfer->status) ) {
- if (transfer->status == -ENOENT) {
+ if ( unlikely(status) ) {
+ if (status == -ENOENT) {
/* We are being killed */
MTS_DEBUG_GOT_HERE();
context->srb->result = DID_ABORT<<16;
@@ -502,12 +502,13 @@ static void mts_command_done( struct urb *transfer )
static void mts_do_sg (struct urb* transfer)
{
struct scatterlist * sg;
+ int status = transfer->status;
MTS_INT_INIT();
MTS_DEBUG("Processing fragment %d of %d\n", context->fragment,context->srb->use_sg);
- if (unlikely(transfer->status)) {
- context->srb->result = (transfer->status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
+ if (unlikely(status)) {
+ context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16;
mts_transfer_cleanup(transfer);
}
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 77145f9db043..e9fdbc8997b3 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#ifdef CONFIG_USB_DEBUG
@@ -80,7 +81,7 @@ MODULE_DEVICE_TABLE(usb, device_table);
/* Structure to hold all of our device specific stuff */
struct adu_device {
- struct semaphore sem; /* locks this structure */
+ struct mutex mtx; /* locks this structure */
struct usb_device* udev; /* save off the usb device pointer */
struct usb_interface* interface;
unsigned char minor; /* the starting minor number for this device */
@@ -108,8 +109,6 @@ struct adu_device {
struct urb* interrupt_out_urb;
};
-/* prevent races between open() and disconnect */
-static DEFINE_MUTEX(disconnect_mutex);
static struct usb_driver adu_driver;
static void adu_debug_data(int level, const char *function, int size,
@@ -180,17 +179,18 @@ static void adu_delete(struct adu_device *dev)
static void adu_interrupt_in_callback(struct urb *urb)
{
struct adu_device *dev = urb->context;
+ int status = urb->status;
- dbg(4," %s : enter, status %d", __FUNCTION__, urb->status);
+ dbg(4," %s : enter, status %d", __FUNCTION__, status);
adu_debug_data(5, __FUNCTION__, urb->actual_length,
urb->transfer_buffer);
spin_lock(&dev->buflock);
- if (urb->status != 0) {
- if ((urb->status != -ENOENT) && (urb->status != -ECONNRESET)) {
+ if (status != 0) {
+ if ((status != -ENOENT) && (status != -ECONNRESET)) {
dbg(1," %s : nonzero status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
}
goto exit;
}
@@ -218,21 +218,22 @@ exit:
wake_up_interruptible(&dev->read_wait);
adu_debug_data(5, __FUNCTION__, urb->actual_length,
urb->transfer_buffer);
- dbg(4," %s : leave, status %d", __FUNCTION__, urb->status);
+ dbg(4," %s : leave, status %d", __FUNCTION__, status);
}
static void adu_interrupt_out_callback(struct urb *urb)
{
struct adu_device *dev = urb->context;
+ int status = urb->status;
- dbg(4," %s : enter, status %d", __FUNCTION__, urb->status);
+ dbg(4," %s : enter, status %d", __FUNCTION__, status);
adu_debug_data(5,__FUNCTION__, urb->actual_length, urb->transfer_buffer);
- if (urb->status != 0) {
- if ((urb->status != -ENOENT) &&
- (urb->status != -ECONNRESET)) {
+ if (status != 0) {
+ if ((status != -ENOENT) &&
+ (status != -ECONNRESET)) {
dbg(1, " %s :nonzero status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
}
goto exit;
}
@@ -242,7 +243,7 @@ exit:
adu_debug_data(5, __FUNCTION__, urb->actual_length,
urb->transfer_buffer);
- dbg(4," %s : leave, status %d", __FUNCTION__, urb->status);
+ dbg(4," %s : leave, status %d", __FUNCTION__, status);
}
static int adu_open(struct inode *inode, struct file *file)
@@ -256,8 +257,6 @@ static int adu_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
- mutex_lock(&disconnect_mutex);
-
interface = usb_find_interface(&adu_driver, subminor);
if (!interface) {
err("%s - error, can't find device for minor %d",
@@ -273,8 +272,8 @@ static int adu_open(struct inode *inode, struct file *file)
}
/* lock this device */
- if ((retval = down_interruptible(&dev->sem))) {
- dbg(2, "%s : sem down failed", __FUNCTION__);
+ if ((retval = mutex_lock_interruptible(&dev->mtx))) {
+ dbg(2, "%s : mutex lock failed", __FUNCTION__);
goto exit_no_device;
}
@@ -303,10 +302,9 @@ static int adu_open(struct inode *inode, struct file *file)
if (retval)
--dev->open_count;
}
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit_no_device:
- mutex_unlock(&disconnect_mutex);
dbg(2,"%s : leave, return value %d ", __FUNCTION__, retval);
return retval;
@@ -318,12 +316,6 @@ static int adu_release_internal(struct adu_device *dev)
dbg(2," %s : enter", __FUNCTION__);
- if (dev->udev == NULL) {
- /* the device was unplugged before the file was released */
- adu_delete(dev);
- goto exit;
- }
-
/* decrement our usage count for the device */
--dev->open_count;
dbg(2," %s : open count %d", __FUNCTION__, dev->open_count);
@@ -332,7 +324,6 @@ static int adu_release_internal(struct adu_device *dev)
dev->open_count = 0;
}
-exit:
dbg(2," %s : leave", __FUNCTION__);
return retval;
}
@@ -359,7 +350,7 @@ static int adu_release(struct inode *inode, struct file *file)
}
/* lock our device */
- down(&dev->sem); /* not interruptible */
+ mutex_lock(&dev->mtx); /* not interruptible */
if (dev->open_count <= 0) {
dbg(1," %s : device not opened", __FUNCTION__);
@@ -367,12 +358,19 @@ static int adu_release(struct inode *inode, struct file *file)
goto exit;
}
- /* do the work */
- retval = adu_release_internal(dev);
+ if (dev->udev == NULL) {
+ /* the device was unplugged before the file was released */
+ mutex_unlock(&dev->mtx);
+ adu_delete(dev);
+ dev = NULL;
+ } else {
+ /* do the work */
+ retval = adu_release_internal(dev);
+ }
exit:
if (dev)
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
dbg(2," %s : leave, return value %d", __FUNCTION__, retval);
return retval;
}
@@ -395,7 +393,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
dev = file->private_data;
dbg(2," %s : dev=%p", __FUNCTION__, dev);
/* lock this object */
- if (down_interruptible(&dev->sem))
+ if (mutex_lock_interruptible(&dev->mtx))
return -ERESTARTSYS;
/* verify that the device wasn't unplugged */
@@ -527,7 +525,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
exit:
/* unlock the device */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
dbg(2," %s : leave, return value %d", __FUNCTION__, retval);
return retval;
@@ -548,7 +546,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
dev = file->private_data;
/* lock this object */
- retval = down_interruptible(&dev->sem);
+ retval = mutex_lock_interruptible(&dev->mtx);
if (retval)
goto exit_nolock;
@@ -576,9 +574,9 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
retval = -EINTR;
goto exit;
}
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
timeout = interruptible_sleep_on_timeout(&dev->write_wait, timeout);
- retval = down_interruptible(&dev->sem);
+ retval = mutex_lock_interruptible(&dev->mtx);
if (retval) {
retval = bytes_written ? bytes_written : retval;
goto exit_nolock;
@@ -643,7 +641,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
exit:
/* unlock the device */
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
exit_nolock:
dbg(2," %s : leave, return value %d", __FUNCTION__, retval);
@@ -703,7 +701,7 @@ static int adu_probe(struct usb_interface *interface,
goto exit;
}
- init_MUTEX(&dev->sem);
+ mutex_init(&dev->mtx);
spin_lock_init(&dev->buflock);
dev->udev = udev;
init_waitqueue_head(&dev->read_wait);
@@ -831,31 +829,27 @@ static void adu_disconnect(struct usb_interface *interface)
dbg(2," %s : enter", __FUNCTION__);
- mutex_lock(&disconnect_mutex); /* not interruptible */
-
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
- down(&dev->sem); /* not interruptible */
-
minor = dev->minor;
/* give back our minor */
usb_deregister_dev(interface, &adu_class);
dev->minor = 0;
+ mutex_lock(&dev->mtx); /* not interruptible */
+
/* if the device is not opened, then we clean up right now */
dbg(2," %s : open count %d", __FUNCTION__, dev->open_count);
if (!dev->open_count) {
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
adu_delete(dev);
} else {
dev->udev = NULL;
- up(&dev->sem);
+ mutex_unlock(&dev->mtx);
}
- mutex_unlock(&disconnect_mutex);
-
dev_info(&interface->dev, "ADU device adutux%d now disconnected",
(minor - ADU_MINOR_BASE));
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index cf70c16f0e3f..1cb56f2d5c84 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -88,9 +88,10 @@ static void appledisplay_complete(struct urb *urb)
{
struct appledisplay *pdata = urb->context;
unsigned long flags;
+ int status = urb->status;
int retval;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -102,12 +103,12 @@ static void appledisplay_complete(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* This urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d",
- __FUNCTION__, urb->status);
+ dbg("%s - urb shuttingdown with status: %d",
+ __FUNCTION__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
goto exit;
}
@@ -137,7 +138,7 @@ exit:
static int appledisplay_bl_update_status(struct backlight_device *bd)
{
- struct appledisplay *pdata = class_get_devdata(&bd->class_dev);
+ struct appledisplay *pdata = bl_get_data(bd);
int retval;
pdata->msgdata[0] = 0x10;
@@ -158,7 +159,7 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
static int appledisplay_bl_get_brightness(struct backlight_device *bd)
{
- struct appledisplay *pdata = class_get_devdata(&bd->class_dev);
+ struct appledisplay *pdata = bl_get_data(bd);
int retval;
retval = usb_control_msg(
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index cac1500cba62..df7e1ecc810a 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -630,7 +630,7 @@ static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int time
} else
status = urb->status;
- if (actual_length)
+ if (status >= 0)
*actual_length = urb->actual_length;
return status;
@@ -664,7 +664,7 @@ static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsi
int ret;
struct usb_ctrlrequest *dr;
struct urb *urb;
- int length;
+ int uninitialized_var(length);
dbg ("auerchain_control_msg");
dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
@@ -862,14 +862,16 @@ static void auerswald_ctrlread_wretcomplete (struct urb * urb)
pauerbuf_t bp = (pauerbuf_t) urb->context;
pauerswald_t cp;
int ret;
+ int status = urb->status;
+
dbg ("auerswald_ctrlread_wretcomplete called");
- dbg ("complete with status: %d", urb->status);
+ dbg ("complete with status: %d", status);
cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
/* check if it is possible to advance */
- if (!auerswald_status_retry (urb->status) || !cp->usbdev) {
+ if (!auerswald_status_retry(status) || !cp->usbdev) {
/* reuse the buffer */
- err ("control dummy: transmission error %d, can not retry", urb->status);
+ err ("control dummy: transmission error %d, can not retry", status);
auerbuf_releasebuf (bp);
/* Wake up all processes waiting for a buffer */
wake_up (&cp->bufferwait);
@@ -902,21 +904,23 @@ static void auerswald_ctrlread_complete (struct urb * urb)
pauerswald_t cp;
pauerscon_t scp;
pauerbuf_t bp = (pauerbuf_t) urb->context;
+ int status = urb->status;
int ret;
+
dbg ("auerswald_ctrlread_complete called");
cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
/* check if there is valid data in this urb */
- if (urb->status) {
- dbg ("complete with non-zero status: %d", urb->status);
+ if (status) {
+ dbg ("complete with non-zero status: %d", status);
/* should we do a retry? */
- if (!auerswald_status_retry (urb->status)
+ if (!auerswald_status_retry(status)
|| !cp->usbdev
|| (cp->version < AUV_RETRY)
|| (bp->retries >= AU_RETRIES)) {
/* reuse the buffer */
- err ("control read: transmission error %d, can not retry", urb->status);
+ err ("control read: transmission error %d, can not retry", status);
auerbuf_releasebuf (bp);
/* Wake up all processes waiting for a buffer */
wake_up (&cp->bufferwait);
@@ -974,12 +978,13 @@ static void auerswald_int_complete (struct urb * urb)
unsigned int channelid;
unsigned int bytecount;
int ret;
+ int status = urb->status;
pauerbuf_t bp = NULL;
pauerswald_t cp = (pauerswald_t) urb->context;
dbg ("%s called", __FUNCTION__);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -987,10 +992,10 @@ static void auerswald_int_complete (struct urb * urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d", __FUNCTION__, status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero urb status received: %d", __FUNCTION__, status);
goto exit;
}
@@ -2034,12 +2039,12 @@ static void auerswald_disconnect (struct usb_interface *intf)
if (!cp)
return;
- down (&cp->mutex);
- info ("device /dev/%s now disconnecting", cp->name);
-
/* give back our USB minor number */
usb_deregister_dev(intf, &auerswald_class);
+ down (&cp->mutex);
+ info ("device /dev/%s now disconnecting", cp->name);
+
/* Stop the interrupt endpoint */
auerswald_int_release (cp);
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
index b15f2fd8dab4..92c1d2768df9 100644
--- a/drivers/usb/misc/berry_charge.c
+++ b/drivers/usb/misc/berry_charge.c
@@ -26,8 +26,11 @@
#define RIM_VENDOR 0x0fca
#define BLACKBERRY 0x0001
+#define BLACKBERRY_PEARL_DUAL 0x0004
+#define BLACKBERRY_PEARL 0x0006
static int debug;
+static int pearl_dual_mode = 1;
#ifdef dbg
#undef dbg
@@ -38,6 +41,8 @@ static int debug;
static struct usb_device_id id_table [] = {
{ USB_DEVICE(RIM_VENDOR, BLACKBERRY) },
+ { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) },
+ { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) },
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -86,6 +91,30 @@ static int magic_charge(struct usb_device *udev)
return retval;
}
+static int magic_dual_mode(struct usb_device *udev)
+{
+ char *dummy_buffer = kzalloc(2, GFP_KERNEL);
+ int retval;
+
+ if (!dummy_buffer)
+ return -ENOMEM;
+
+ /* send magic command so that the Blackberry Pearl device exposes
+ * two interfaces: both the USB mass-storage one and one which can
+ * be used for database access. */
+ dbg(&udev->dev, "Sending magic pearl command\n");
+ retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100);
+ dbg(&udev->dev, "Magic pearl command returned %d\n", retval);
+
+ dbg(&udev->dev, "Calling set_configuration\n");
+ retval = usb_driver_set_configuration(udev, 1);
+ if (retval)
+ dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
+
+ return retval;
+}
+
static int berry_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -105,6 +134,10 @@ static int berry_probe(struct usb_interface *intf,
/* turn the power on */
magic_charge(udev);
+ if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) &&
+ (pearl_dual_mode))
+ magic_dual_mode(udev);
+
/* we don't really want to bind to the device, userspace programs can
* handle the syncing just fine, so get outta here. */
return -ENODEV;
@@ -138,3 +171,5 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
+module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode");
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index e0f122e131d7..538b535e955b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@@ -64,7 +65,7 @@ static struct workqueue_struct *respond_queue;
* ftdi_module_lock exists to protect access to global variables
*
*/
-static struct semaphore ftdi_module_lock;
+static struct mutex ftdi_module_lock;
static int ftdi_instances = 0;
static struct list_head ftdi_static_list;
/*
@@ -199,10 +200,10 @@ static void ftdi_elan_delete(struct kref *kref)
dev_warn(&ftdi->udev->dev, "FREEING ftdi=%p\n", ftdi);
usb_put_dev(ftdi->udev);
ftdi->disconnected += 1;
- down(&ftdi_module_lock);
+ mutex_lock(&ftdi_module_lock);
list_del_init(&ftdi->ftdi_list);
ftdi_instances -= 1;
- up(&ftdi_module_lock);
+ mutex_unlock(&ftdi_module_lock);
kfree(ftdi->bulk_in_buffer);
ftdi->bulk_in_buffer = NULL;
}
@@ -746,10 +747,12 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
static void ftdi_elan_write_bulk_callback(struct urb *urb)
{
struct usb_ftdi *ftdi = (struct usb_ftdi *)urb->context;
- if (urb->status && !(urb->status == -ENOENT || urb->status ==
- -ECONNRESET || urb->status == -ESHUTDOWN)) {
+ int status = urb->status;
+
+ if (status && !(status == -ENOENT || status == -ECONNRESET ||
+ status == -ESHUTDOWN)) {
dev_err(&ftdi->udev->dev, "urb=%p write bulk status received: %"
- "d\n", urb, urb->status);
+ "d\n", urb, status);
}
usb_buffer_free(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
@@ -2780,10 +2783,10 @@ static int ftdi_elan_probe(struct usb_interface *interface,
return -ENOMEM;
}
memset(ftdi, 0x00, sizeof(struct usb_ftdi));
- down(&ftdi_module_lock);
+ mutex_lock(&ftdi_module_lock);
list_add_tail(&ftdi->ftdi_list, &ftdi_static_list);
ftdi->sequence_num = ++ftdi_instances;
- up(&ftdi_module_lock);
+ mutex_unlock(&ftdi_module_lock);
ftdi_elan_init_kref(ftdi);
init_MUTEX(&ftdi->sw_lock);
ftdi->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -2909,7 +2912,7 @@ static int __init ftdi_elan_init(void)
int result;
printk(KERN_INFO "driver %s built at %s on %s\n", ftdi_elan_driver.name,
__TIME__, __DATE__);
- init_MUTEX(&ftdi_module_lock);
+ mutex_init(&ftdi_module_lock);
INIT_LIST_HEAD(&ftdi_static_list);
status_queue = create_singlethread_workqueue("ftdi-status-control");
if (!status_queue)
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 8d0e360636e6..e6fd024024f5 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -119,9 +119,6 @@ static struct usb_driver idmouse_driver = {
.id_table = idmouse_table,
};
-/* prevent races between open() and disconnect() */
-static DEFINE_MUTEX(disconnect_mutex);
-
static int idmouse_create_image(struct usb_idmouse *dev)
{
int bytes_read;
@@ -211,21 +208,15 @@ static int idmouse_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int result;
- /* prevent disconnects */
- mutex_lock(&disconnect_mutex);
-
/* get the interface from minor number and driver information */
interface = usb_find_interface (&idmouse_driver, iminor (inode));
- if (!interface) {
- mutex_unlock(&disconnect_mutex);
+ if (!interface)
return -ENODEV;
- }
+
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&disconnect_mutex);
+ if (!dev)
return -ENODEV;
- }
/* lock this device */
down(&dev->sem);
@@ -255,9 +246,6 @@ error:
/* unlock this device */
up(&dev->sem);
-
- /* unlock the disconnect semaphore */
- mutex_unlock(&disconnect_mutex);
return result;
}
@@ -265,15 +253,10 @@ static int idmouse_release(struct inode *inode, struct file *file)
{
struct usb_idmouse *dev;
- /* prevent a race condition with open() */
- mutex_lock(&disconnect_mutex);
-
dev = file->private_data;
- if (dev == NULL) {
- mutex_unlock(&disconnect_mutex);
+ if (dev == NULL)
return -ENODEV;
- }
/* lock our device */
down(&dev->sem);
@@ -281,7 +264,6 @@ static int idmouse_release(struct inode *inode, struct file *file)
/* are we really open? */
if (dev->open <= 0) {
up(&dev->sem);
- mutex_unlock(&disconnect_mutex);
return -ENODEV;
}
@@ -291,12 +273,9 @@ static int idmouse_release(struct inode *inode, struct file *file)
/* the device was unplugged before the file was released */
up(&dev->sem);
idmouse_delete(dev);
- mutex_unlock(&disconnect_mutex);
- return 0;
+ } else {
+ up(&dev->sem);
}
-
- up(&dev->sem);
- mutex_unlock(&disconnect_mutex);
return 0;
}
@@ -391,30 +370,27 @@ static void idmouse_disconnect(struct usb_interface *interface)
{
struct usb_idmouse *dev;
- /* prevent races with open() */
- mutex_lock(&disconnect_mutex);
-
/* get device structure */
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
- /* lock it */
- down(&dev->sem);
-
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
+ /* lock it */
+ down(&dev->sem);
+
/* prevent device read, write and ioctl */
dev->present = 0;
- /* unlock */
- up(&dev->sem);
-
/* if the device is opened, idmouse_release will clean this up */
- if (!dev->open)
+ if (!dev->open) {
+ up(&dev->sem);
idmouse_delete(dev);
-
- mutex_unlock(&disconnect_mutex);
+ } else {
+ /* unlock */
+ up(&dev->sem);
+ }
info("%s disconnected", DRIVER_DESC);
}
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 3bb33f7bfa36..46d9f27ec173 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -100,8 +100,6 @@ struct iowarrior {
/*--------------*/
/* globals */
/*--------------*/
-/* prevent races between open() and disconnect() */
-static DECLARE_MUTEX(disconnect_sem);
/*
* USB spec identifies 5 second timeouts.
@@ -160,9 +158,10 @@ static void iowarrior_callback(struct urb *urb)
int read_idx;
int aux_idx;
int offset;
- int status;
+ int status = urb->status;
+ int retval;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -215,10 +214,10 @@ static void iowarrior_callback(struct urb *urb)
wake_up_interruptible(&dev->read_wait);
exit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
@@ -228,13 +227,15 @@ exit:
static void iowarrior_write_callback(struct urb *urb)
{
struct iowarrior *dev;
+ int status = urb->status;
+
dev = (struct iowarrior *)urb->context;
/* sync/async unlink faults aren't errors */
- if (urb->status &&
- !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) {
+ if (status &&
+ !(status == -ENOENT ||
+ status == -ECONNRESET || status == -ESHUTDOWN)) {
dbg("%s - nonzero write bulk status received: %d",
- __func__, urb->status);
+ __func__, status);
}
/* free up our allocated buffer */
usb_buffer_free(urb->dev, urb->transfer_buffer_length,
@@ -600,22 +601,18 @@ static int iowarrior_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
- /* prevent disconnects */
- down(&disconnect_sem);
-
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
err("%s - error, can't find device for minor %d", __FUNCTION__,
subminor);
- retval = -ENODEV;
- goto out;
+ return -ENODEV;
}
dev = usb_get_intfdata(interface);
- if (!dev) {
- retval = -ENODEV;
- goto out;
- }
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->mutex);
/* Only one process can open each device, no sharing. */
if (dev->opened) {
@@ -636,7 +633,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
retval = 0;
out:
- up(&disconnect_sem);
+ mutex_unlock(&dev->mutex);
return retval;
}
@@ -868,19 +865,16 @@ static void iowarrior_disconnect(struct usb_interface *interface)
struct iowarrior *dev;
int minor;
- /* prevent races with open() */
- down(&disconnect_sem);
-
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
- mutex_lock(&dev->mutex);
-
minor = dev->minor;
/* give back our minor */
usb_deregister_dev(interface, &iowarrior_class);
+ mutex_lock(&dev->mutex);
+
/* prevent device read, write and ioctl */
dev->present = 0;
@@ -898,7 +892,6 @@ static void iowarrior_disconnect(struct usb_interface *interface)
/* no process is using the device, cleanup now */
iowarrior_delete(dev);
}
- up(&disconnect_sem);
dev_info(&interface->dev, "I/O-Warror #%d now disconnected\n",
minor - IOWARRIOR_MINOR_BASE);
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 7bad49404762..8208496dfc63 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -176,9 +176,6 @@ struct ld_usb {
int interrupt_out_busy;
};
-/* prevent races between open() and disconnect() */
-static DEFINE_MUTEX(disconnect_mutex);
-
static struct usb_driver ld_usb_driver;
/**
@@ -222,16 +219,17 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
struct ld_usb *dev = urb->context;
size_t *actual_buffer;
unsigned int next_ring_head;
+ int status = urb->status;
int retval;
- if (urb->status) {
- if (urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN) {
+ if (status) {
+ if (status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -ESHUTDOWN) {
goto exit;
} else {
dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
spin_lock(&dev->rbsl);
goto resubmit; /* maybe we can recover */
}
@@ -278,14 +276,15 @@ exit:
static void ld_usb_interrupt_out_callback(struct urb *urb)
{
struct ld_usb *dev = urb->context;
+ int status = urb->status;
/* sync/async unlink faults aren't errors */
- if (urb->status && !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN))
+ if (status && !(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -ESHUTDOWN))
dbg_info(&dev->intf->dev,
"%s - nonzero write interrupt status received: %d\n",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
@@ -298,35 +297,28 @@ static int ld_usb_open(struct inode *inode, struct file *file)
{
struct ld_usb *dev;
int subminor;
- int retval = 0;
+ int retval;
struct usb_interface *interface;
nonseekable_open(inode, file);
subminor = iminor(inode);
- mutex_lock(&disconnect_mutex);
-
interface = usb_find_interface(&ld_usb_driver, subminor);
if (!interface) {
err("%s - error, can't find device for minor %d\n",
__FUNCTION__, subminor);
- retval = -ENODEV;
- goto unlock_disconnect_exit;
+ return -ENODEV;
}
dev = usb_get_intfdata(interface);
- if (!dev) {
- retval = -ENODEV;
- goto unlock_disconnect_exit;
- }
+ if (!dev)
+ return -ENODEV;
/* lock this device */
- if (down_interruptible(&dev->sem)) {
- retval = -ERESTARTSYS;
- goto unlock_disconnect_exit;
- }
+ if (down_interruptible(&dev->sem))
+ return -ERESTARTSYS;
/* allow opening only once */
if (dev->open_count) {
@@ -366,9 +358,6 @@ static int ld_usb_open(struct inode *inode, struct file *file)
unlock_exit:
up(&dev->sem);
-unlock_disconnect_exit:
- mutex_unlock(&disconnect_mutex);
-
return retval;
}
@@ -766,18 +755,16 @@ static void ld_usb_disconnect(struct usb_interface *intf)
struct ld_usb *dev;
int minor;
- mutex_lock(&disconnect_mutex);
-
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
- down(&dev->sem);
-
minor = intf->minor;
/* give back our minor */
usb_deregister_dev(intf, &ld_usb_class);
+ down(&dev->sem);
+
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
up(&dev->sem);
@@ -787,8 +774,6 @@ static void ld_usb_disconnect(struct usb_interface *intf)
up(&dev->sem);
}
- mutex_unlock(&disconnect_mutex);
-
dev_info(&intf->dev, "LD USB Device #%d now disconnected\n",
(minor - USB_LD_MINOR_BASE));
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 1713e19a7899..561970b889a5 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -254,9 +254,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_devic
static void tower_disconnect (struct usb_interface *interface);
-/* prevent races between open() and disconnect */
-static DEFINE_MUTEX (disconnect_mutex);
-
/* file operations needed when we register this driver */
static const struct file_operations tower_fops = {
.owner = THIS_MODULE,
@@ -344,28 +341,26 @@ static int tower_open (struct inode *inode, struct file *file)
nonseekable_open(inode, file);
subminor = iminor(inode);
- mutex_lock (&disconnect_mutex);
-
interface = usb_find_interface (&tower_driver, subminor);
if (!interface) {
err ("%s - error, can't find device for minor %d",
__FUNCTION__, subminor);
retval = -ENODEV;
- goto unlock_disconnect_exit;
+ goto exit;
}
dev = usb_get_intfdata(interface);
if (!dev) {
retval = -ENODEV;
- goto unlock_disconnect_exit;
+ goto exit;
}
/* lock this device */
if (down_interruptible (&dev->sem)) {
retval = -ERESTARTSYS;
- goto unlock_disconnect_exit;
+ goto exit;
}
/* allow opening only once */
@@ -421,9 +416,7 @@ static int tower_open (struct inode *inode, struct file *file)
unlock_exit:
up (&dev->sem);
-unlock_disconnect_exit:
- mutex_unlock (&disconnect_mutex);
-
+exit:
dbg(2, "%s: leave, return value %d ", __FUNCTION__, retval);
return retval;
@@ -749,19 +742,20 @@ exit:
static void tower_interrupt_in_callback (struct urb *urb)
{
struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context;
+ int status = urb->status;
int retval;
- dbg(4, "%s: enter, status %d", __FUNCTION__, urb->status);
+ dbg(4, "%s: enter, status %d", __FUNCTION__, status);
lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer);
- if (urb->status) {
- if (urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN) {
+ if (status) {
+ if (status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -ESHUTDOWN) {
goto exit;
} else {
- dbg(1, "%s: nonzero status received: %d", __FUNCTION__, urb->status);
+ dbg(1, "%s: nonzero status received: %d", __FUNCTION__, status);
goto resubmit; /* maybe we can recover */
}
}
@@ -795,7 +789,7 @@ exit:
wake_up_interruptible (&dev->read_wait);
lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer);
- dbg(4, "%s: leave, status %d", __FUNCTION__, urb->status);
+ dbg(4, "%s: leave, status %d", __FUNCTION__, status);
}
@@ -805,23 +799,24 @@ exit:
static void tower_interrupt_out_callback (struct urb *urb)
{
struct lego_usb_tower *dev = (struct lego_usb_tower *)urb->context;
+ int status = urb->status;
- dbg(4, "%s: enter, status %d", __FUNCTION__, urb->status);
+ dbg(4, "%s: enter, status %d", __FUNCTION__, status);
lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer);
/* sync/async unlink faults aren't errors */
- if (urb->status && !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN)) {
+ if (status && !(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -ESHUTDOWN)) {
dbg(1, "%s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
}
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
lego_usb_tower_debug_data(5, __FUNCTION__, urb->actual_length, urb->transfer_buffer);
- dbg(4, "%s: leave, status %d", __FUNCTION__, urb->status);
+ dbg(4, "%s: leave, status %d", __FUNCTION__, status);
}
@@ -993,19 +988,16 @@ static void tower_disconnect (struct usb_interface *interface)
dbg(2, "%s: enter", __FUNCTION__);
- mutex_lock (&disconnect_mutex);
-
dev = usb_get_intfdata (interface);
usb_set_intfdata (interface, NULL);
-
- down (&dev->sem);
-
minor = dev->minor;
/* give back our minor */
usb_deregister_dev (interface, &tower_class);
+ down (&dev->sem);
+
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
up (&dev->sem);
@@ -1015,8 +1007,6 @@ static void tower_disconnect (struct usb_interface *interface)
up (&dev->sem);
}
- mutex_unlock (&disconnect_mutex);
-
info("LEGO USB Tower #%d now disconnected", (minor - LEGO_USB_TOWER_MINOR_BASE));
dbg(2, "%s: leave", __FUNCTION__);
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c
index 371bf2b1197d..aa9bcceabe74 100644
--- a/drivers/usb/misc/phidgetkit.c
+++ b/drivers/usb/misc/phidgetkit.c
@@ -305,9 +305,10 @@ static void interfacekit_irq(struct urb *urb)
struct interfacekit *kit = urb->context;
unsigned char *buffer = kit->data;
int i, level, sensor;
- int status;
+ int retval;
+ int status = urb->status;
- switch (urb->status) {
+ switch (status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
@@ -377,11 +378,11 @@ static void interfacekit_irq(struct urb *urb)
schedule_delayed_work(&kit->do_notify, 0);
resubmit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
- err("can't resubmit intr, %s-%s/interfacekit0, status %d",
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ err("can't resubmit intr, %s-%s/interfacekit0, retval %d",
kit->udev->bus->bus_name,
- kit->udev->devpath, status);
+ kit->udev->devpath, retval);
}
static void do_notify(struct work_struct *work)
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c
index 5727e1ea2f91..df0ebcdb9d6a 100644
--- a/drivers/usb/misc/phidgetmotorcontrol.c
+++ b/drivers/usb/misc/phidgetmotorcontrol.c
@@ -95,9 +95,10 @@ static void motorcontrol_irq(struct urb *urb)
struct motorcontrol *mc = urb->context;
unsigned char *buffer = mc->data;
int i, level;
- int status;
+ int retval;
+ int status = urb->status;;
- switch (urb->status) {
+ switch (status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
@@ -151,12 +152,12 @@ static void motorcontrol_irq(struct urb *urb)
schedule_delayed_work(&mc->do_notify, 0);
resubmit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
dev_err(&mc->intf->dev,
- "can't resubmit intr, %s-%s/motorcontrol0, status %d",
+ "can't resubmit intr, %s-%s/motorcontrol0, retval %d",
mc->udev->bus->bus_name,
- mc->udev->devpath, status);
+ mc->udev->devpath, retval);
}
static void do_notify(struct work_struct *work)
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 6f8b134a79cb..9f37ba44c132 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -72,8 +72,6 @@ MODULE_PARM_DESC(last, "Number of last console to take over (1 - MAX_NR_CONSOLES
static struct usb_driver sisusb_driver;
-DEFINE_MUTEX(disconnect_mutex);
-
static void
sisusb_free_buffers(struct sisusb_usb_data *sisusb)
{
@@ -2511,31 +2509,24 @@ sisusb_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor = iminor(inode);
- mutex_lock(&disconnect_mutex);
-
if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
printk(KERN_ERR "sisusb[%d]: Failed to find interface\n",
subminor);
- mutex_unlock(&disconnect_mutex);
return -ENODEV;
}
- if (!(sisusb = usb_get_intfdata(interface))) {
- mutex_unlock(&disconnect_mutex);
+ if (!(sisusb = usb_get_intfdata(interface)))
return -ENODEV;
- }
mutex_lock(&sisusb->lock);
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
return -EBUSY;
}
@@ -2543,7 +2534,6 @@ sisusb_open(struct inode *inode, struct file *file)
if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH) {
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
printk(KERN_ERR
"sisusbvga[%d]: Failed to initialize "
"device\n",
@@ -2552,7 +2542,6 @@ sisusb_open(struct inode *inode, struct file *file)
}
} else {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
printk(KERN_ERR
"sisusbvga[%d]: Device not attached to "
"USB 2.0 hub\n",
@@ -2570,8 +2559,6 @@ sisusb_open(struct inode *inode, struct file *file)
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
-
return 0;
}
@@ -2601,12 +2588,8 @@ sisusb_release(struct inode *inode, struct file *file)
struct sisusb_usb_data *sisusb;
int myminor;
- mutex_lock(&disconnect_mutex);
-
- if (!(sisusb = (struct sisusb_usb_data *)file->private_data)) {
- mutex_unlock(&disconnect_mutex);
+ if (!(sisusb = (struct sisusb_usb_data *)file->private_data))
return -ENODEV;
- }
mutex_lock(&sisusb->lock);
@@ -2626,8 +2609,6 @@ sisusb_release(struct inode *inode, struct file *file)
/* decrement the usage count on our device */
kref_put(&sisusb->kref, sisusb_delete);
- mutex_unlock(&disconnect_mutex);
-
return 0;
}
@@ -3383,12 +3364,9 @@ static void sisusb_disconnect(struct usb_interface *intf)
sisusb_console_exit(sisusb);
#endif
- /* The above code doesn't need the disconnect
- * semaphore to be down; its meaning is to
- * protect all other routines from the disconnect
- * case, not the other way round.
- */
- mutex_lock(&disconnect_mutex);
+ minor = sisusb->minor;
+
+ usb_deregister_dev(intf, &usb_sisusb_class);
mutex_lock(&sisusb->lock);
@@ -3396,12 +3374,8 @@ static void sisusb_disconnect(struct usb_interface *intf)
if (!sisusb_wait_all_out_complete(sisusb))
sisusb_kill_all_busy(sisusb);
- minor = sisusb->minor;
-
usb_set_intfdata(intf, NULL);
- usb_deregister_dev(intf, &usb_sisusb_class);
-
#ifdef SISUSB_OLD_CONFIG_COMPAT
if (sisusb->ioctl32registered) {
int ret;
@@ -3426,8 +3400,6 @@ static void sisusb_disconnect(struct usb_interface *intf)
/* decrement our usage count */
kref_put(&sisusb->kref, sisusb_delete);
- mutex_unlock(&disconnect_mutex);
-
printk(KERN_INFO "sisusbvga[%d]: Disconnected\n", minor);
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 5947afb0017e..8d0edc867f33 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -214,18 +214,13 @@ sisusbcon_init(struct vc_data *c, int init)
* are set up/restored.
*/
- mutex_lock(&disconnect_mutex);
-
- if (!(sisusb = sisusb_get_sisusb(c->vc_num))) {
- mutex_unlock(&disconnect_mutex);
+ if (!(sisusb = sisusb_get_sisusb(c->vc_num)))
return;
- }
mutex_lock(&sisusb->lock);
if (!sisusb_sisusb_valid(sisusb)) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
return;
}
@@ -264,8 +259,6 @@ sisusbcon_init(struct vc_data *c, int init)
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
-
if (init) {
c->vc_cols = cols;
c->vc_rows = rows;
@@ -284,12 +277,8 @@ sisusbcon_deinit(struct vc_data *c)
* and others, ie not under our control.
*/
- mutex_lock(&disconnect_mutex);
-
- if (!(sisusb = sisusb_get_sisusb(c->vc_num))) {
- mutex_unlock(&disconnect_mutex);
+ if (!(sisusb = sisusb_get_sisusb(c->vc_num)))
return;
- }
mutex_lock(&sisusb->lock);
@@ -314,8 +303,6 @@ sisusbcon_deinit(struct vc_data *c)
/* decrement the usage count on our sisusb */
kref_put(&sisusb->kref, sisusb_delete);
-
- mutex_unlock(&disconnect_mutex);
}
/* interface routine */
@@ -1490,14 +1477,11 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
{
int i, ret, minor = sisusb->minor;
- mutex_lock(&disconnect_mutex);
-
mutex_lock(&sisusb->lock);
/* Erm.. that should not happen */
if (sisusb->haveconsole || !sisusb->SiS_Pr) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
return 1;
}
@@ -1508,14 +1492,12 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
first > MAX_NR_CONSOLES ||
last > MAX_NR_CONSOLES) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
return 1;
}
/* If gfxcore not initialized or no consoles given, quit graciously */
if (!sisusb->gfxinit || first < 1 || last < 1) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
return 0;
}
@@ -1526,7 +1508,6 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
/* Set up text mode (and upload default font) */
if (sisusb_reset_text_mode(sisusb, 1)) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
printk(KERN_ERR
"sisusbvga[%d]: Failed to set up text mode\n",
minor);
@@ -1550,7 +1531,6 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
/* Allocate screen buffer */
if (!(sisusb->scrbuf = (unsigned long)vmalloc(sisusb->scrbuf_size))) {
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
printk(KERN_ERR
"sisusbvga[%d]: Failed to allocate screen buffer\n",
minor);
@@ -1558,7 +1538,6 @@ sisusb_console_init(struct sisusb_usb_data *sisusb, int first, int last)
}
mutex_unlock(&sisusb->lock);
- mutex_unlock(&disconnect_mutex);
/* Now grab the desired console(s) */
ret = take_over_console(&sisusb_con, first - 1, last - 1, 0);
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index f05f83268af4..864bc0e96591 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -808,8 +808,6 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] =
{ 0x2b,0xc2, 35} /* 0x71 768@576@60 */
};
-extern struct mutex disconnect_mutex;
-
int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 12bad8a205a7..719842032712 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -45,13 +45,13 @@ struct usb_lcd {
struct kref kref;
struct semaphore limit_sem; /* to stop writes at full throttle from
* using up all RAM */
+ struct usb_anchor submitted; /* URBs to wait for before suspend */
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
#define USB_LCD_CONCURRENT_WRITES 5
static struct usb_driver lcd_driver;
-static DEFINE_MUTEX(usb_lcd_open_mutex);
static void lcd_delete(struct kref *kref)
@@ -68,35 +68,35 @@ static int lcd_open(struct inode *inode, struct file *file)
{
struct usb_lcd *dev;
struct usb_interface *interface;
- int subminor;
- int retval = 0;
+ int subminor, r;
subminor = iminor(inode);
- mutex_lock(&usb_lcd_open_mutex);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
err ("USBLCD: %s - error, can't find device for minor %d",
__FUNCTION__, subminor);
- retval = -ENODEV;
- goto exit;
+ return -ENODEV;
}
dev = usb_get_intfdata(interface);
- if (!dev) {
- retval = -ENODEV;
- goto exit;
- }
+ if (!dev)
+ return -ENODEV;
/* increment our usage count for the device */
kref_get(&dev->kref);
+ /* grab a power reference */
+ r = usb_autopm_get_interface(interface);
+ if (r < 0) {
+ kref_put(&dev->kref, lcd_delete);
+ return r;
+ }
+
/* save our object in the file's private structure */
file->private_data = dev;
-exit:
- mutex_unlock(&usb_lcd_open_mutex);
- return retval;
+ return 0;
}
static int lcd_release(struct inode *inode, struct file *file)
@@ -108,6 +108,7 @@ static int lcd_release(struct inode *inode, struct file *file)
return -ENODEV;
/* decrement the count on our device */
+ usb_autopm_put_interface(dev->interface);
kref_put(&dev->kref, lcd_delete);
return 0;
}
@@ -175,16 +176,17 @@ static int lcd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
static void lcd_write_bulk_callback(struct urb *urb)
{
struct usb_lcd *dev;
+ int status = urb->status;
dev = (struct usb_lcd *)urb->context;
/* sync/async unlink faults aren't errors */
- if (urb->status &&
- !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN)) {
+ if (status &&
+ !(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -ESHUTDOWN)) {
dbg("USBLCD: %s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
}
/* free up our allocated buffer */
@@ -233,12 +235,14 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, siz
usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
buf, count, lcd_write_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &dev->submitted);
/* send the data out the bulk port */
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
err("USBLCD: %s - failed submitting write urb, error %d", __FUNCTION__, retval);
- goto error;
+ goto error_unanchor;
}
/* release our reference to this urb, the USB core will eventually free it entirely */
@@ -246,7 +250,8 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer, siz
exit:
return count;
-
+error_unanchor:
+ usb_unanchor_urb(urb);
error:
usb_buffer_free(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
@@ -291,6 +296,7 @@ static int lcd_probe(struct usb_interface *interface, const struct usb_device_id
}
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+ init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
@@ -358,22 +364,41 @@ error:
return retval;
}
+static void lcd_draw_down(struct usb_lcd *dev)
+{
+ int time;
+
+ time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
+ if (!time)
+ usb_kill_anchored_urbs(&dev->submitted);
+}
+
+static int lcd_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usb_lcd *dev = usb_get_intfdata(intf);
+
+ if (!dev)
+ return 0;
+ lcd_draw_down(dev);
+ return 0;
+}
+
+static int lcd_resume (struct usb_interface *intf)
+{
+ return 0;
+}
+
static void lcd_disconnect(struct usb_interface *interface)
{
struct usb_lcd *dev;
int minor = interface->minor;
- /* prevent skel_open() from racing skel_disconnect() */
- mutex_lock(&usb_lcd_open_mutex);
-
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/* give back our minor */
usb_deregister_dev(interface, &lcd_class);
- mutex_unlock(&usb_lcd_open_mutex);
-
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
@@ -384,7 +409,10 @@ static struct usb_driver lcd_driver = {
.name = "usblcd",
.probe = lcd_probe,
.disconnect = lcd_disconnect,
+ .suspend = lcd_suspend,
+ .resume = lcd_resume,
.id_table = id_table,
+ .supports_autosuspend = 1,
};
static int __init usb_lcd_init(void)
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index fb321864a92d..e901d31e051b 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -768,8 +768,8 @@ static void ctrl_complete (struct urb *urb)
/* some faults are allowed, not required */
if (subcase->expected > 0 && (
- ((urb->status == -subcase->expected /* happened */
- || urb->status == 0)))) /* didn't */
+ ((status == -subcase->expected /* happened */
+ || status == 0)))) /* didn't */
status = 0;
/* sometimes more than one fault is allowed */
else if (subcase->number == 12 && status == -EPIPE)
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 1a60f9c473ad..2734fe2b9c43 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -111,12 +111,13 @@ static void async_complete(struct urb *urb)
struct uss720_async_request *rq;
struct parport *pp;
struct parport_uss720_private *priv;
+ int status = urb->status;
rq = urb->context;
priv = rq->priv;
pp = priv->pp;
- if (urb->status) {
- err("async_complete: urb error %d", urb->status);
+ if (status) {
+ err("async_complete: urb error %d", status);
} else if (rq->dr.bRequest == 3) {
memcpy(priv->reg, rq->reg, sizeof(priv->reg));
#if 0
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 0af11a66207c..c03dfd7a9d36 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -4,7 +4,7 @@
* This is a binary format reader.
*
* Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
- * Copyright (C) 2006 Pete Zaitcev (zaitcev@redhat.com)
+ * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com)
*/
#include <linux/kernel.h>
@@ -172,6 +172,7 @@ static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
+static struct class *mon_bin_class;
static dev_t mon_bin_dev0;
static struct cdev mon_bin_cdev;
@@ -1144,10 +1145,38 @@ static void mon_free_buff(struct mon_pgmap *map, int npages)
free_page((unsigned long) map[n].ptr);
}
+int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
+{
+ struct device *dev;
+ unsigned minor = ubus? ubus->busnum: 0;
+
+ if (minor >= MON_BIN_MAX_MINOR)
+ return 0;
+
+ dev = device_create(mon_bin_class, ubus? ubus->controller: NULL,
+ MKDEV(MAJOR(mon_bin_dev0), minor), "usbmon%d", minor);
+ if (IS_ERR(dev))
+ return 0;
+
+ mbus->classdev = dev;
+ return 1;
+}
+
+void mon_bin_del(struct mon_bus *mbus)
+{
+ device_destroy(mon_bin_class, mbus->classdev->devt);
+}
+
int __init mon_bin_init(void)
{
int rc;
+ mon_bin_class = class_create(THIS_MODULE, "usbmon");
+ if (IS_ERR(mon_bin_class)) {
+ rc = PTR_ERR(mon_bin_class);
+ goto err_class;
+ }
+
rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
if (rc < 0)
goto err_dev;
@@ -1164,6 +1193,8 @@ int __init mon_bin_init(void)
err_add:
unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
err_dev:
+ class_destroy(mon_bin_class);
+err_class:
return rc;
}
@@ -1171,4 +1202,5 @@ void mon_bin_exit(void)
{
cdev_del(&mon_bin_cdev);
unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
+ class_destroy(mon_bin_class);
}
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 8977ec0d0f99..ce61d8b0fd86 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -220,6 +220,8 @@ static void mon_bus_remove(struct usb_bus *ubus)
list_del(&mbus->bus_link);
if (mbus->text_inited)
mon_text_del(mbus);
+ if (mbus->bin_inited)
+ mon_bin_del(mbus);
mon_dissolve(mbus, ubus);
kref_put(&mbus->ref, mon_bus_drop);
@@ -301,8 +303,8 @@ static void mon_bus_init(struct usb_bus *ubus)
mbus->u_bus = ubus;
ubus->mon_bus = mbus;
- mbus->text_inited = mon_text_add(mbus, ubus->busnum);
- // mon_bin_add(...)
+ mbus->text_inited = mon_text_add(mbus, ubus);
+ mbus->bin_inited = mon_bin_add(mbus, ubus);
mutex_lock(&mon_lock);
list_add_tail(&mbus->bus_link, &mon_buses);
@@ -321,8 +323,8 @@ static void mon_bus0_init(void)
spin_lock_init(&mbus->lock);
INIT_LIST_HEAD(&mbus->r_list);
- mbus->text_inited = mon_text_add(mbus, 0);
- // mbus->bin_inited = mon_bin_add(mbus, 0);
+ mbus->text_inited = mon_text_add(mbus, NULL);
+ mbus->bin_inited = mon_bin_add(mbus, NULL);
}
/*
@@ -403,6 +405,8 @@ static void __exit mon_exit(void)
if (mbus->text_inited)
mon_text_del(mbus);
+ if (mbus->bin_inited)
+ mon_bin_del(mbus);
/*
* This never happens, because the open/close paths in
@@ -423,6 +427,8 @@ static void __exit mon_exit(void)
mbus = &mon_bus0;
if (mbus->text_inited)
mon_text_del(mbus);
+ if (mbus->bin_inited)
+ mon_bin_del(mbus);
mutex_unlock(&mon_lock);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index ec0cc51e39ac..8f27a9e1c36b 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -340,7 +340,7 @@ static int mon_text_open(struct inode *inode, struct file *file)
snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp);
rp->e_slab = kmem_cache_create(rp->slab_name,
sizeof(struct mon_event_text), sizeof(long), 0,
- mon_text_ctor, NULL);
+ mon_text_ctor);
if (rp->e_slab == NULL) {
rc = -ENOMEM;
goto err_slab;
@@ -655,20 +655,24 @@ static const struct file_operations mon_fops_text_u = {
.release = mon_text_release,
};
-int mon_text_add(struct mon_bus *mbus, int busnum)
+int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
{
struct dentry *d;
enum { NAMESZ = 10 };
char name[NAMESZ];
+ int busnum = ubus? ubus->busnum: 0;
int rc;
- rc = snprintf(name, NAMESZ, "%dt", busnum);
- if (rc <= 0 || rc >= NAMESZ)
- goto err_print_t;
- d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text_t);
- if (d == NULL)
- goto err_create_t;
- mbus->dent_t = d;
+ if (ubus != NULL) {
+ rc = snprintf(name, NAMESZ, "%dt", busnum);
+ if (rc <= 0 || rc >= NAMESZ)
+ goto err_print_t;
+ d = debugfs_create_file(name, 0600, mon_dir, mbus,
+ &mon_fops_text_t);
+ if (d == NULL)
+ goto err_create_t;
+ mbus->dent_t = d;
+ }
rc = snprintf(name, NAMESZ, "%du", busnum);
if (rc <= 0 || rc >= NAMESZ)
@@ -694,8 +698,10 @@ err_print_s:
mbus->dent_u = NULL;
err_create_u:
err_print_u:
- debugfs_remove(mbus->dent_t);
- mbus->dent_t = NULL;
+ if (ubus != NULL) {
+ debugfs_remove(mbus->dent_t);
+ mbus->dent_t = NULL;
+ }
err_create_t:
err_print_t:
return 0;
@@ -704,7 +710,8 @@ err_print_t:
void mon_text_del(struct mon_bus *mbus)
{
debugfs_remove(mbus->dent_u);
- debugfs_remove(mbus->dent_t);
+ if (mbus->dent_t != NULL)
+ debugfs_remove(mbus->dent_t);
debugfs_remove(mbus->dent_s);
}
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index 13d63255283e..f68ad6d99ad7 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -20,9 +20,11 @@ struct mon_bus {
struct usb_bus *u_bus;
int text_inited;
+ int bin_inited;
struct dentry *dent_s; /* Debugging file */
struct dentry *dent_t; /* Text interface file */
struct dentry *dent_u; /* Second text interface file */
+ struct device *classdev; /* Device in usbmon class */
/* Ref */
int nreaders; /* Under mon_lock AND mbus->lock */
@@ -52,9 +54,10 @@ void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r);
struct mon_bus *mon_bus_lookup(unsigned int num);
-int /*bool*/ mon_text_add(struct mon_bus *mbus, int busnum);
+int /*bool*/ mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus);
void mon_text_del(struct mon_bus *mbus);
-// void mon_bin_add(struct mon_bus *);
+int /*bool*/ mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus);
+void mon_bin_del(struct mon_bus *mbus);
int __init mon_text_init(void);
void mon_text_exit(void);
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 3efe67092f15..43d6db696f90 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -464,6 +464,16 @@ config USB_SERIAL_PL2303
To compile this driver as a module, choose M here: the
module will be called pl2303.
+config USB_SERIAL_OTI6858
+ tristate "USB Ours Technology Inc. OTi-6858 USB To RS232 Bridge Controller (EXPERIMENTAL)"
+ depends on USB_SERIAL
+ help
+ Say Y here if you want to use the OTi-6858 single port USB to serial
+ converter device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called oti6858.
+
config USB_SERIAL_HP4X
tristate "USB HP4x Calculators support"
depends on USB_SERIAL
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 61166ad450e6..07a976eca6b7 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
+obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o
obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index fbc8c27d5d99..1cd29cd6bd00 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -411,12 +411,13 @@ static int aircable_write(struct usb_serial_port *port,
static void aircable_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
+ int status = urb->status;
int result;
- dbg("%s - urb->status: %d", __FUNCTION__ , urb->status);
+ dbg("%s - urb status: %d", __FUNCTION__ , status);
/* This has been taken from cypress_m8.c cypress_write_int_callback */
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -425,14 +426,14 @@ static void aircable_write_bulk_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
port->write_urb_busy = 0;
return;
default:
/* error in the urb, so we have to resubmit it */
dbg("%s - Overflow in write", __FUNCTION__);
dbg("%s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
port->write_urb->transfer_buffer_length = 1;
port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -457,16 +458,17 @@ static void aircable_read_bulk_callback(struct urb *urb)
unsigned long no_packages, remaining, package_length, i;
int result, shift = 0;
unsigned char *temp;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - urb->status = %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - urb status = %d", __FUNCTION__, status);
if (!port->open_count) {
dbg("%s - port is closed, exiting.", __FUNCTION__);
return;
}
- if (urb->status == -EPROTO) {
+ if (status == -EPROTO) {
dbg("%s - caught -EPROTO, resubmitting the urb",
__FUNCTION__);
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index 39a498362594..cff6fd190a28 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -82,12 +82,13 @@ static void airprime_read_bulk_callback(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero read bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
return;
}
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data);
@@ -109,6 +110,7 @@ static void airprime_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct airprime_private *priv = usb_get_serial_port_data(port);
+ int status = urb->status;
unsigned long flags;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -116,9 +118,9 @@ static void airprime_write_bulk_callback(struct urb *urb)
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree (urb->transfer_buffer);
- if (urb->status)
+ if (status)
dbg("%s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index fe437125f14b..c9fd486c1c7d 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -172,7 +172,7 @@ static void ark3116_set_termios(struct usb_serial_port *port,
dbg("%s - port %d", __FUNCTION__, port->number);
- if ((!port->tty) || (!port->tty->termios)) {
+ if (!port->tty || !port->tty->termios) {
dbg("%s - no tty structures", __FUNCTION__);
return;
}
@@ -188,16 +188,6 @@ static void ark3116_set_termios(struct usb_serial_port *port,
cflag = port->tty->termios->c_cflag;
- /* check that they really want us to change something: */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(port->tty->termios->c_iflag) ==
- RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("%s - nothing to change...", __FUNCTION__);
- return;
- }
- }
-
buf = kmalloc(1, GFP_KERNEL);
if (!buf) {
dbg("error kmalloc");
@@ -220,7 +210,7 @@ static void ark3116_set_termios(struct usb_serial_port *port,
dbg("setting CS7");
break;
default:
- err("CSIZE was set but not CS5-CS8, using CS8!");
+ dbg("CSIZE was set but not CS5-CS8, using CS8!");
/* fall through */
case CS8:
config |= 0x03;
@@ -251,38 +241,33 @@ static void ark3116_set_termios(struct usb_serial_port *port,
}
/* set baudrate */
- baud = 0;
- switch (cflag & CBAUD) {
- case B0:
- err("can't set 0 baud, using 9600 instead");
+ baud = tty_get_baud_rate(port->tty);
+
+ switch (baud) {
+ case 75:
+ case 150:
+ case 300:
+ case 600:
+ case 1200:
+ case 1800:
+ case 2400:
+ case 4800:
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ case 230400:
+ case 460800:
break;
- case B75: baud = 75; break;
- case B150: baud = 150; break;
- case B300: baud = 300; break;
- case B600: baud = 600; break;
- case B1200: baud = 1200; break;
- case B1800: baud = 1800; break;
- case B2400: baud = 2400; break;
- case B4800: baud = 4800; break;
- case B9600: baud = 9600; break;
- case B19200: baud = 19200; break;
- case B38400: baud = 38400; break;
- case B57600: baud = 57600; break;
- case B115200: baud = 115200; break;
- case B230400: baud = 230400; break;
- case B460800: baud = 460800; break;
+ /* set 9600 as default (if given baudrate is invalid for example) */
default:
- dbg("does not support the baudrate requested (fix it)");
- break;
+ baud = 9600;
}
- /* set 9600 as default (if given baudrate is invalid for example) */
- if (baud == 0)
- baud = 9600;
-
/*
* found by try'n'error, be careful, maybe there are other options
- * for multiplicator etc!
+ * for multiplicator etc! (3.5 for example)
*/
if (baud == 460800)
/* strange, for 460800 the formula is wrong
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 3b800d277c4b..e67ce25f7512 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -255,9 +255,10 @@ static void belkin_sa_read_int_callback (struct urb *urb)
struct belkin_sa_private *priv;
unsigned char *data = urb->transfer_buffer;
int retval;
+ int status = urb->status;
unsigned long flags;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -265,10 +266,12 @@ static void belkin_sa_read_int_callback (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero urb status received: %d",
+ __FUNCTION__, status);
goto exit;
}
@@ -346,6 +349,7 @@ static void belkin_sa_set_termios (struct usb_serial_port *port, struct ktermios
unsigned long flags;
unsigned long control_state;
int bad_flow_control;
+ speed_t baud;
if ((!port->tty) || (!port->tty->termios)) {
dbg ("%s - no tty or termios structure", __FUNCTION__);
@@ -361,16 +365,8 @@ static void belkin_sa_set_termios (struct usb_serial_port *port, struct ktermios
bad_flow_control = priv->bad_flow_control;
spin_unlock_irqrestore(&priv->lock, flags);
- /* check that they really want us to change something */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(port->tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("%s - nothing to change...", __FUNCTION__);
- return;
- }
- old_iflag = old_termios->c_iflag;
- old_cflag = old_termios->c_cflag;
- }
+ old_iflag = old_termios->c_iflag;
+ old_cflag = old_termios->c_cflag;
/* Set the baud rate */
if( (cflag&CBAUD) != (old_cflag&CBAUD) ) {
@@ -384,38 +380,30 @@ static void belkin_sa_set_termios (struct usb_serial_port *port, struct ktermios
if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 1) < 0)
err("Set RTS error");
}
+ }
- switch(cflag & CBAUD) {
- case B0: /* handled below */ break;
- case B300: urb_value = BELKIN_SA_BAUD(300); break;
- case B600: urb_value = BELKIN_SA_BAUD(600); break;
- case B1200: urb_value = BELKIN_SA_BAUD(1200); break;
- case B2400: urb_value = BELKIN_SA_BAUD(2400); break;
- case B4800: urb_value = BELKIN_SA_BAUD(4800); break;
- case B9600: urb_value = BELKIN_SA_BAUD(9600); break;
- case B19200: urb_value = BELKIN_SA_BAUD(19200); break;
- case B38400: urb_value = BELKIN_SA_BAUD(38400); break;
- case B57600: urb_value = BELKIN_SA_BAUD(57600); break;
- case B115200: urb_value = BELKIN_SA_BAUD(115200); break;
- case B230400: urb_value = BELKIN_SA_BAUD(230400); break;
- default: err("BELKIN USB Serial Adapter: unsupported baudrate request, using default of 9600");
- urb_value = BELKIN_SA_BAUD(9600); break;
- }
- if ((cflag & CBAUD) != B0 ) {
- if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0)
- err("Set baudrate error");
- } else {
- /* Disable flow control */
- if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, BELKIN_SA_FLOW_NONE) < 0)
- err("Disable flowcontrol error");
-
- /* Drop RTS and DTR */
- control_state &= ~(TIOCM_DTR | TIOCM_RTS);
- if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0)
- err("DTR LOW error");
- if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0)
- err("RTS LOW error");
- }
+ baud = tty_get_baud_rate(port->tty);
+ urb_value = BELKIN_SA_BAUD(baud);
+ /* Clip to maximum speed */
+ if (urb_value == 0)
+ urb_value = 1;
+ /* Turn it back into a resulting real baud rate */
+ baud = BELKIN_SA_BAUD(urb_value);
+ /* FIXME: Once the tty updates are done then push this back to the tty */
+
+ if ((cflag & CBAUD) != B0 ) {
+ if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0)
+ err("Set baudrate error");
+ } else {
+ /* Disable flow control */
+ if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, BELKIN_SA_FLOW_NONE) < 0)
+ err("Disable flowcontrol error");
+ /* Drop RTS and DTR */
+ control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+ if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0)
+ err("DTR LOW error");
+ if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0)
+ err("RTS LOW error");
}
/* set the parity */
@@ -435,7 +423,7 @@ static void belkin_sa_set_termios (struct usb_serial_port *port, struct ktermios
case CS6: urb_value = BELKIN_SA_DATA_BITS(6); break;
case CS7: urb_value = BELKIN_SA_DATA_BITS(7); break;
case CS8: urb_value = BELKIN_SA_DATA_BITS(8); break;
- default: err("CSIZE was not CS5-CS8, using default of 8");
+ default: dbg("CSIZE was not CS5-CS8, using default of 8");
urb_value = BELKIN_SA_DATA_BITS(8);
break;
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 4167753ed31f..4353df92487f 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -305,12 +305,13 @@ static void cyberjack_read_int_callback( struct urb *urb )
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
/* the urb might have been killed. */
- if (urb->status)
+ if (status)
return;
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data);
@@ -365,12 +366,14 @@ static void cyberjack_read_bulk_callback (struct urb *urb)
unsigned char *data = urb->transfer_buffer;
short todo;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
-
+
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -411,12 +414,14 @@ static void cyberjack_write_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
port->write_urb_busy = 0;
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 57b8e27285fc..163386336a5d 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1275,10 +1275,11 @@ static void cypress_read_int_callback(struct urb *urb)
int bytes = 0;
int result;
int i = 0;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- switch (urb->status) {
+ switch (status) {
case 0: /* success */
break;
case -ECONNRESET:
@@ -1292,7 +1293,7 @@ static void cypress_read_int_callback(struct urb *urb)
default:
/* something ugly is going on... */
dev_err(&urb->dev->dev,"%s - unexpected nonzero read status received: %d\n",
- __FUNCTION__,urb->status);
+ __FUNCTION__, status);
cypress_set_dead(port);
return;
}
@@ -1419,10 +1420,11 @@ static void cypress_write_int_callback(struct urb *urb)
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct cypress_private *priv = usb_get_serial_port_data(port);
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
-
- switch (urb->status) {
+
+ switch (status) {
case 0:
/* success */
break;
@@ -1430,7 +1432,8 @@ static void cypress_write_int_callback(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
priv->write_urb_in_use = 0;
return;
case -EPIPE: /* no break needed; clear halt and resubmit */
@@ -1438,7 +1441,8 @@ static void cypress_write_int_callback(struct urb *urb)
break;
usb_clear_halt(port->serial->dev, 0x02);
/* error in the urb, so we have to resubmit it */
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
port->interrupt_out_urb->transfer_buffer_length = 1;
port->interrupt_out_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
@@ -1450,7 +1454,7 @@ static void cypress_write_int_callback(struct urb *urb)
break;
default:
dev_err(&urb->dev->dev,"%s - unexpected nonzero write status received: %d\n",
- __FUNCTION__,urb->status);
+ __FUNCTION__, status);
cypress_set_dead(port);
break;
}
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index d78692c01cfa..976f54ec26e6 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -416,9 +416,6 @@ struct digi_port {
int dp_port_num;
int dp_out_buf_len;
unsigned char dp_out_buf[DIGI_OUT_BUF_SIZE];
- int dp_in_buf_len;
- unsigned char dp_in_buf[DIGI_IN_BUF_SIZE];
- unsigned char dp_in_flag_buf[DIGI_IN_BUF_SIZE];
int dp_write_urb_in_use;
unsigned int dp_modem_signals;
wait_queue_head_t dp_modem_change_wait;
@@ -920,7 +917,6 @@ dbg( "digi_rx_throttle: TOP: port=%d", priv->dp_port_num );
spin_lock_irqsave( &priv->dp_port_lock, flags );
priv->dp_throttled = 1;
priv->dp_throttle_restart = 0;
- priv->dp_in_buf_len = 0;
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
}
@@ -930,23 +926,16 @@ static void digi_rx_unthrottle( struct usb_serial_port *port )
{
int ret = 0;
- int len;
unsigned long flags;
struct digi_port *priv = usb_get_serial_port_data(port);
- struct tty_struct *tty = port->tty;
-
dbg( "digi_rx_unthrottle: TOP: port=%d", priv->dp_port_num );
spin_lock_irqsave( &priv->dp_port_lock, flags );
- /* send any buffered chars from throttle time on to tty subsystem */
-
- len = tty_buffer_request_room(tty, priv->dp_in_buf_len);
- if( len > 0 ) {
- tty_insert_flip_string_flags(tty, priv->dp_in_buf, priv->dp_in_flag_buf, len);
- tty_flip_buffer_push( tty );
- }
+ /* turn throttle off */
+ priv->dp_throttled = 0;
+ priv->dp_throttle_restart = 0;
/* restart read chain */
if( priv->dp_throttle_restart ) {
@@ -954,11 +943,6 @@ dbg( "digi_rx_unthrottle: TOP: port=%d", priv->dp_port_num );
ret = usb_submit_urb( port->read_urb, GFP_ATOMIC );
}
- /* turn throttle off */
- priv->dp_throttled = 0;
- priv->dp_in_buf_len = 0;
- priv->dp_throttle_restart = 0;
-
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
if( ret ) {
@@ -1340,19 +1324,21 @@ static void digi_write_bulk_callback( struct urb *urb )
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret = 0;
+ int status = urb->status;
-dbg( "digi_write_bulk_callback: TOP, urb->status=%d", urb->status );
+ dbg("digi_write_bulk_callback: TOP, urb status=%d", status);
/* port and serial sanity check */
if( port == NULL || (priv=usb_get_serial_port_data(port)) == NULL ) {
- err("%s: port or port->private is NULL, status=%d", __FUNCTION__,
- urb->status );
+ err("%s: port or port->private is NULL, status=%d",
+ __FUNCTION__, status);
return;
}
serial = port->serial;
if( serial == NULL || (serial_priv=usb_get_serial_data(serial)) == NULL ) {
- err("%s: serial or serial->private is NULL, status=%d", __FUNCTION__, urb->status );
+ err("%s: serial or serial->private is NULL, status=%d",
+ __FUNCTION__, status);
return;
}
@@ -1687,7 +1673,6 @@ dbg( "digi_startup: TOP" );
spin_lock_init( &priv->dp_port_lock );
priv->dp_port_num = i;
priv->dp_out_buf_len = 0;
- priv->dp_in_buf_len = 0;
priv->dp_write_urb_in_use = 0;
priv->dp_modem_signals = 0;
init_waitqueue_head( &priv->dp_modem_change_wait );
@@ -1757,25 +1742,28 @@ static void digi_read_bulk_callback( struct urb *urb )
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret;
+ int status = urb->status;
dbg( "digi_read_bulk_callback: TOP" );
/* port sanity check, do not resubmit if port is not valid */
if( port == NULL || (priv=usb_get_serial_port_data(port)) == NULL ) {
- err("%s: port or port->private is NULL, status=%d", __FUNCTION__,
- urb->status );
+ err("%s: port or port->private is NULL, status=%d",
+ __FUNCTION__, status);
return;
}
if( port->serial == NULL
|| (serial_priv=usb_get_serial_data(port->serial)) == NULL ) {
- err("%s: serial is bad or serial->private is NULL, status=%d", __FUNCTION__, urb->status );
+ err("%s: serial is bad or serial->private is NULL, status=%d",
+ __FUNCTION__, status);
return;
}
/* do not resubmit urb if it has any status error */
- if( urb->status ) {
- err("%s: nonzero read bulk status: status=%d, port=%d", __FUNCTION__, urb->status, priv->dp_port_num );
+ if (status) {
+ err("%s: nonzero read bulk status: status=%d, port=%d",
+ __FUNCTION__, status, priv->dp_port_num);
return;
}
@@ -1816,10 +1804,11 @@ static int digi_read_inb_callback( struct urb *urb )
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode = ((unsigned char *)urb->transfer_buffer)[0];
int len = ((unsigned char *)urb->transfer_buffer)[1];
- int status = ((unsigned char *)urb->transfer_buffer)[2];
+ int port_status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer)+3;
int flag,throttled;
int i;
+ int status = urb->status;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
@@ -1828,7 +1817,10 @@ static int digi_read_inb_callback( struct urb *urb )
/* short/multiple packet check */
if( urb->actual_length != len + 2 ) {
- err("%s: INCOMPLETE OR MULTIPLE PACKET, urb->status=%d, port=%d, opcode=%d, len=%d, actual_length=%d, status=%d", __FUNCTION__, urb->status, priv->dp_port_num, opcode, len, urb->actual_length, status );
+ err("%s: INCOMPLETE OR MULTIPLE PACKET, urb status=%d, "
+ "port=%d, opcode=%d, len=%d, actual_length=%d, "
+ "port_status=%d", __FUNCTION__, status, priv->dp_port_num,
+ opcode, len, urb->actual_length, port_status);
return( -1 );
}
@@ -1843,52 +1835,37 @@ static int digi_read_inb_callback( struct urb *urb )
/* receive data */
if( opcode == DIGI_CMD_RECEIVE_DATA ) {
- /* get flag from status */
+ /* get flag from port_status */
flag = 0;
/* overrun is special, not associated with a char */
- if( status & DIGI_OVERRUN_ERROR ) {
+ if (port_status & DIGI_OVERRUN_ERROR) {
tty_insert_flip_char( tty, 0, TTY_OVERRUN );
}
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
- if( status & DIGI_BREAK_ERROR ) {
+ if (port_status & DIGI_BREAK_ERROR) {
flag = TTY_BREAK;
- } else if( status & DIGI_PARITY_ERROR ) {
+ } else if (port_status & DIGI_PARITY_ERROR) {
flag = TTY_PARITY;
- } else if( status & DIGI_FRAMING_ERROR ) {
+ } else if (port_status & DIGI_FRAMING_ERROR) {
flag = TTY_FRAME;
}
- /* data length is len-1 (one byte of len is status) */
+ /* data length is len-1 (one byte of len is port_status) */
--len;
- if( throttled ) {
-
- len = min( len,
- DIGI_IN_BUF_SIZE - priv->dp_in_buf_len );
-
- if( len > 0 ) {
- memcpy( priv->dp_in_buf + priv->dp_in_buf_len,
- data, len );
- memset( priv->dp_in_flag_buf
- + priv->dp_in_buf_len, flag, len );
- priv->dp_in_buf_len += len;
- }
-
- } else {
- len = tty_buffer_request_room(tty, len);
- if( len > 0 ) {
- /* Hot path */
- if(flag == TTY_NORMAL)
- tty_insert_flip_string(tty, data, len);
- else {
- for(i = 0; i < len; i++)
- tty_insert_flip_char(tty, data[i], flag);
- }
- tty_flip_buffer_push( tty );
+ len = tty_buffer_request_room(tty, len);
+ if( len > 0 ) {
+ /* Hot path */
+ if(flag == TTY_NORMAL)
+ tty_insert_flip_string(tty, data, len);
+ else {
+ for(i = 0; i < len; i++)
+ tty_insert_flip_char(tty, data[i], flag);
}
+ tty_flip_buffer_push( tty );
}
}
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 4703c8f85383..050fcc996f56 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -326,12 +326,14 @@ static int empeg_chars_in_buffer (struct usb_serial_port *port)
static void empeg_write_bulk_callback (struct urb *urb)
{
- struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
+ struct usb_serial_port *port = urb->context;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -345,11 +347,13 @@ static void empeg_read_bulk_callback (struct urb *urb)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index da1c6f7f82b8..7b1673a44077 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -271,26 +271,58 @@ static int debug;
static __u16 vendor = FTDI_VID;
static __u16 product;
+struct ftdi_private {
+ ftdi_chip_type_t chip_type;
+ /* type of the device, either SIO or FT8U232AM */
+ int baud_base; /* baud base clock for divisor setting */
+ int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */
+ __u16 last_set_data_urb_value ;
+ /* the last data state set - needed for doing a break */
+ int write_offset; /* This is the offset in the usb data block to write the serial data -
+ * it is different between devices
+ */
+ int flags; /* some ASYNC_xxxx flags are supported */
+ unsigned long last_dtr_rts; /* saved modem control outputs */
+ wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+ char prev_status, diff_status; /* Used for TIOCMIWAIT */
+ __u8 rx_flags; /* receive state flags (throttling) */
+ spinlock_t rx_lock; /* spinlock for receive state */
+ struct delayed_work rx_work;
+ struct usb_serial_port *port;
+ int rx_processed;
+ unsigned long rx_bytes;
+
+ __u16 interface; /* FT2232C port interface (0 for FT232/245) */
+
+ int force_baud; /* if non-zero, force the baud rate to this value */
+ int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */
+
+ spinlock_t tx_lock; /* spinlock for transmit state */
+ unsigned long tx_bytes;
+ unsigned long tx_outstanding_bytes;
+ unsigned long tx_outstanding_urbs;
+};
+
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
struct ftdi_sio_quirk {
int (*probe)(struct usb_serial *);
- void (*setup)(struct usb_serial *); /* Special settings during startup. */
+ void (*port_probe)(struct ftdi_private *); /* Special settings for probed ports. */
};
static int ftdi_olimex_probe (struct usb_serial *serial);
-static void ftdi_USB_UIRT_setup (struct usb_serial *serial);
-static void ftdi_HE_TIRA1_setup (struct usb_serial *serial);
+static void ftdi_USB_UIRT_setup (struct ftdi_private *priv);
+static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv);
static struct ftdi_sio_quirk ftdi_olimex_quirk = {
.probe = ftdi_olimex_probe,
};
static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
- .setup = ftdi_USB_UIRT_setup,
+ .port_probe = ftdi_USB_UIRT_setup,
};
static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
- .setup = ftdi_HE_TIRA1_setup,
+ .port_probe = ftdi_HE_TIRA1_setup,
};
/*
@@ -567,38 +599,6 @@ static const char *ftdi_chip_name[] = {
#define THROTTLED 0x01
#define ACTUALLY_THROTTLED 0x02
-struct ftdi_private {
- ftdi_chip_type_t chip_type;
- /* type of the device, either SIO or FT8U232AM */
- int baud_base; /* baud base clock for divisor setting */
- int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */
- __u16 last_set_data_urb_value ;
- /* the last data state set - needed for doing a break */
- int write_offset; /* This is the offset in the usb data block to write the serial data -
- * it is different between devices
- */
- int flags; /* some ASYNC_xxxx flags are supported */
- unsigned long last_dtr_rts; /* saved modem control outputs */
- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
- char prev_status, diff_status; /* Used for TIOCMIWAIT */
- __u8 rx_flags; /* receive state flags (throttling) */
- spinlock_t rx_lock; /* spinlock for receive state */
- struct delayed_work rx_work;
- struct usb_serial_port *port;
- int rx_processed;
- unsigned long rx_bytes;
-
- __u16 interface; /* FT2232C port interface (0 for FT232/245) */
-
- int force_baud; /* if non-zero, force the baud rate to this value */
- int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */
-
- spinlock_t tx_lock; /* spinlock for transmit state */
- unsigned long tx_bytes;
- unsigned long tx_outstanding_bytes;
- unsigned long tx_outstanding_urbs;
-};
-
/* Used for TIOCMIWAIT */
#define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD)
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
@@ -609,7 +609,6 @@ struct ftdi_private {
/* function prototypes for a FTDI serial converter */
static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id *id);
-static int ftdi_sio_attach (struct usb_serial *serial);
static void ftdi_shutdown (struct usb_serial *serial);
static int ftdi_sio_port_probe (struct usb_serial_port *port);
static int ftdi_sio_port_remove (struct usb_serial_port *port);
@@ -663,7 +662,6 @@ static struct usb_serial_driver ftdi_sio_device = {
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
- .attach = ftdi_sio_attach,
.shutdown = ftdi_shutdown,
};
@@ -1149,7 +1147,9 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
dbg("sysfs attributes for %s", ftdi_chip_name[priv->chip_type]);
retval = device_create_file(&port->dev, &dev_attr_event_char);
if ((!retval) &&
- (priv->chip_type == FT232BM || priv->chip_type == FT2232C)) {
+ (priv->chip_type == FT232BM ||
+ priv->chip_type == FT2232C ||
+ priv->chip_type == FT232RL)) {
retval = device_create_file(&port->dev,
&dev_attr_latency_timer);
}
@@ -1198,6 +1198,8 @@ static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id
static int ftdi_sio_port_probe(struct usb_serial_port *port)
{
struct ftdi_private *priv;
+ struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
+
dbg("%s",__FUNCTION__);
@@ -1214,6 +1216,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
than queue a task to deliver them */
priv->flags = ASYNC_LOW_LATENCY;
+ if (quirk && quirk->port_probe)
+ quirk->port_probe(priv);
+
/* Increase the size of read buffers */
kfree(port->bulk_in_buffer);
port->bulk_in_buffer = kmalloc (BUFSZ, GFP_KERNEL);
@@ -1244,29 +1249,13 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
return 0;
}
-/* attach subroutine */
-static int ftdi_sio_attach (struct usb_serial *serial)
-{
- /* Check for device requiring special set up. */
- struct ftdi_sio_quirk *quirk = usb_get_serial_data(serial);
-
- if (quirk && quirk->setup)
- quirk->setup(serial);
-
- return 0;
-} /* ftdi_sio_attach */
-
-
/* Setup for the USB-UIRT device, which requires hardwired
* baudrate (38400 gets mapped to 312500) */
/* Called from usbserial:serial_probe */
-static void ftdi_USB_UIRT_setup (struct usb_serial *serial)
+static void ftdi_USB_UIRT_setup (struct ftdi_private *priv)
{
- struct ftdi_private *priv;
-
dbg("%s",__FUNCTION__);
- priv = usb_get_serial_port_data(serial->port[0]);
priv->flags |= ASYNC_SPD_CUST;
priv->custom_divisor = 77;
priv->force_baud = B38400;
@@ -1274,13 +1263,10 @@ static void ftdi_USB_UIRT_setup (struct usb_serial *serial)
/* Setup for the HE-TIRA1 device, which requires hardwired
* baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */
-static void ftdi_HE_TIRA1_setup (struct usb_serial *serial)
+static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv)
{
- struct ftdi_private *priv;
-
dbg("%s",__FUNCTION__);
- priv = usb_get_serial_port_data(serial->port[0]);
priv->flags |= ASYNC_SPD_CUST;
priv->custom_divisor = 240;
priv->force_baud = B38400;
@@ -1574,14 +1560,15 @@ static void ftdi_write_bulk_callback (struct urb *urb)
struct ftdi_private *priv;
int data_offset; /* will be 1 for the SIO and 0 otherwise */
unsigned long countback;
+ int status = urb->status;
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree (urb->transfer_buffer);
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("nonzero write bulk status received: %d", urb->status);
+ if (status) {
+ dbg("nonzero write bulk status received: %d", status);
return;
}
@@ -1657,6 +1644,7 @@ static void ftdi_read_bulk_callback (struct urb *urb)
struct ftdi_private *priv;
unsigned long countread;
unsigned long flags;
+ int status = urb->status;
if (urb->number_of_packets > 0) {
err("%s transfer_buffer_length %d actual_length %d number of packets %d",__FUNCTION__,
@@ -1685,9 +1673,10 @@ static void ftdi_read_bulk_callback (struct urb *urb)
err("%s - Not my urb!", __FUNCTION__);
}
- if (urb->status) {
+ if (status) {
/* This will happen at close every time so it is a dbg not an err */
- dbg("(this is ok on close) nonzero read bulk status received: %d", urb->status);
+ dbg("(this is ok on close) nonzero read bulk status received: "
+ "%d", status);
return;
}
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 74660a3aa670..04bd3b7a2985 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1036,15 +1036,16 @@ static void garmin_write_bulk_callback (struct urb *urb)
unsigned long flags;
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct garmin_data * garmin_data_p = usb_get_serial_port_data(port);
+ int status = urb->status;
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree (urb->transfer_buffer);
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= CLEAR_HALT_REQUIRED;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
@@ -1281,7 +1282,8 @@ static void garmin_read_bulk_callback (struct urb *urb)
struct usb_serial *serial = port->serial;
struct garmin_data * garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
- int status;
+ int status = urb->status;
+ int retval;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -1290,9 +1292,9 @@ static void garmin_read_bulk_callback (struct urb *urb)
return;
}
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero read bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
return;
}
@@ -1306,19 +1308,19 @@ static void garmin_read_bulk_callback (struct urb *urb)
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
- status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (retval)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
} else if (urb->actual_length > 0) {
/* Continue trying to read until nothing more is received */
if (0 == (garmin_data_p->flags & FLAGS_THROTTLED)) {
- status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (retval)
dev_err(&port->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __FUNCTION__, status);
+ "%s - failed resubmitting read urb, "
+ "error %d\n", __FUNCTION__, retval);
}
} else {
dbg("%s - end of bulk data", __FUNCTION__);
@@ -1333,13 +1335,14 @@ static void garmin_read_bulk_callback (struct urb *urb)
static void garmin_read_int_callback (struct urb *urb)
{
unsigned long flags;
- int status;
+ int retval;
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct usb_serial *serial = port->serial;
struct garmin_data * garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -1348,11 +1351,11 @@ static void garmin_read_int_callback (struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
return;
}
@@ -1374,11 +1377,11 @@ static void garmin_read_int_callback (struct urb *urb)
port->read_urb->transfer_buffer,
port->read_urb->transfer_buffer_length,
garmin_read_bulk_callback, port);
- status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (status) {
+ retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (retval) {
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
} else {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
@@ -1422,11 +1425,11 @@ static void garmin_read_int_callback (struct urb *urb)
}
port->interrupt_in_urb->dev = port->serial->dev;
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb (urb, GFP_ATOMIC);
+ if (retval)
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 4f8282ad7720..88a2c7dce335 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -69,6 +69,7 @@ struct usb_serial_driver usb_serial_generic_device = {
.shutdown = usb_serial_generic_shutdown,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
+ .resume = usb_serial_generic_resume,
};
static int generic_probe(struct usb_interface *interface,
@@ -169,6 +170,23 @@ static void generic_cleanup (struct usb_serial_port *port)
}
}
+int usb_serial_generic_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port;
+ int i, c = 0, r;
+
+ for (i = 0; i < serial->num_ports; i++) {
+ port = serial->port[i];
+ if (port->open_count && port->read_urb) {
+ r = usb_submit_urb(port->read_urb, GFP_NOIO);
+ if (r < 0)
+ c++;
+ }
+ }
+
+ return c ? -EIO : 0;
+}
+
void usb_serial_generic_close (struct usb_serial_port *port, struct file * filp)
{
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -263,79 +281,82 @@ int usb_serial_generic_chars_in_buffer (struct usb_serial_port *port)
return (chars);
}
-/* Push data to tty layer and resubmit the bulk read URB */
-static void flush_and_resubmit_read_urb (struct usb_serial_port *port)
+
+static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
{
- struct usb_serial *serial = port->serial;
struct urb *urb = port->read_urb;
- struct tty_struct *tty = port->tty;
+ struct usb_serial *serial = port->serial;
int result;
- /* Push data to tty */
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
- tty_insert_flip_string(tty, urb->transfer_buffer, urb->actual_length);
- tty_flip_buffer_push(tty); /* is this allowed from an URB callback ? */
- }
-
/* Continue reading from device */
- usb_fill_bulk_urb (port->read_urb, serial->dev,
+ usb_fill_bulk_urb (urb, serial->dev,
usb_rcvbulkpipe (serial->dev,
port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
((serial->type->read_bulk_callback) ?
serial->type->read_bulk_callback :
usb_serial_generic_read_bulk_callback), port);
- result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ result = usb_submit_urb(urb, mem_flags);
if (result)
dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
}
+/* Push data to tty layer and resubmit the bulk read URB */
+static void flush_and_resubmit_read_urb (struct usb_serial_port *port)
+{
+ struct urb *urb = port->read_urb;
+ struct tty_struct *tty = port->tty;
+ int room;
+
+ /* Push data to tty */
+ if (tty && urb->actual_length) {
+ room = tty_buffer_request_room(tty, urb->actual_length);
+ if (room) {
+ tty_insert_flip_string(tty, urb->transfer_buffer, room);
+ tty_flip_buffer_push(tty); /* is this allowed from an URB callback ? */
+ }
+ }
+
+ resubmit_read_urb(port, GFP_ATOMIC);
+}
+
void usb_serial_generic_read_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
unsigned char *data = urb->transfer_buffer;
- int is_throttled;
- unsigned long flags;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (unlikely(status != 0)) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
usb_serial_debug_data(debug, &port->dev, __FUNCTION__, urb->actual_length, data);
/* Throttle the device if requested by tty */
- if (urb->actual_length) {
- spin_lock_irqsave(&port->lock, flags);
- is_throttled = port->throttled = port->throttle_req;
- spin_unlock_irqrestore(&port->lock, flags);
- if (is_throttled) {
- /* Let the received data linger in the read URB;
- * usb_serial_generic_unthrottle() will pick it
- * up later. */
- dbg("%s - throttling device", __FUNCTION__);
- return;
- }
- }
-
- /* Handle data and continue reading from device */
- flush_and_resubmit_read_urb(port);
+ spin_lock(&port->lock);
+ if (!(port->throttled = port->throttle_req))
+ /* Handle data and continue reading from device */
+ flush_and_resubmit_read_urb(port);
+ spin_unlock(&port->lock);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
void usb_serial_generic_write_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
port->write_urb_busy = 0;
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -370,8 +391,8 @@ void usb_serial_generic_unthrottle (struct usb_serial_port *port)
spin_unlock_irqrestore(&port->lock, flags);
if (was_throttled) {
- /* Handle pending data and resume reading from device */
- flush_and_resubmit_read_urb(port);
+ /* Resume reading from device */
+ resubmit_read_urb(port, GFP_KERNEL);
}
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 056e1923c4de..dd42f57089ff 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -599,10 +599,11 @@ static void edge_interrupt_callback (struct urb *urb)
int txCredits;
int portNumber;
int result;
+ int status = urb->status;
dbg("%s", __FUNCTION__);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -610,10 +611,12 @@ static void edge_interrupt_callback (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero urb status received: %d",
+ __FUNCTION__, status);
goto exit;
}
@@ -688,13 +691,15 @@ static void edge_bulk_in_callback (struct urb *urb)
{
struct edgeport_serial *edge_serial = (struct edgeport_serial *)urb->context;
unsigned char *data = urb->transfer_buffer;
- int status;
+ int retval;
__u16 raw_data_length;
+ int status = urb->status;
dbg("%s", __FUNCTION__);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
edge_serial->read_in_progress = false;
return;
}
@@ -722,9 +727,11 @@ static void edge_bulk_in_callback (struct urb *urb)
if (edge_serial->rxBytesAvail > 0) {
dbg("%s - posting a read", __FUNCTION__);
edge_serial->read_urb->dev = edge_serial->serial->dev;
- status = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
- if (status) {
- dev_err(&urb->dev->dev, "%s - usb_submit_urb(read bulk) failed, status = %d\n", __FUNCTION__, status);
+ retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
+ if (retval) {
+ dev_err(&urb->dev->dev,
+ "%s - usb_submit_urb(read bulk) failed, "
+ "retval = %d\n", __FUNCTION__, retval);
edge_serial->read_in_progress = false;
}
} else {
@@ -744,11 +751,13 @@ static void edge_bulk_out_data_callback (struct urb *urb)
{
struct edgeport_port *edge_port = (struct edgeport_port *)urb->context;
struct tty_struct *tty;
+ int status = urb->status;
dbg("%s", __FUNCTION__);
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
}
tty = edge_port->port->tty;
@@ -1504,15 +1513,6 @@ static void edge_set_termios (struct usb_serial_port *port, struct ktermios *old
}
cflag = tty->termios->c_cflag;
- /* check that they really want us to change something */
- if (old_termios) {
- if (cflag == old_termios->c_cflag &&
- tty->termios->c_iflag == old_termios->c_iflag) {
- dbg("%s - nothing to change", __FUNCTION__);
- return;
- }
- }
-
dbg("%s - clfag %08x iflag %08x", __FUNCTION__,
tty->termios->c_cflag, tty->termios->c_iflag);
if (old_termios) {
diff --git a/drivers/usb/serial/io_fw_down3.h b/drivers/usb/serial/io_fw_down3.h
index 93b56d68a27b..4496b068c50f 100644
--- a/drivers/usb/serial/io_fw_down3.h
+++ b/drivers/usb/serial/io_fw_down3.h
@@ -5,7 +5,7 @@
//**************************************************************
-static int IMAGE_SIZE = 12749;
+static int IMAGE_SIZE = 12938;
struct EDGE_FIRMWARE_VERSION_INFO
{
@@ -16,7 +16,7 @@ struct EDGE_FIRMWARE_VERSION_INFO
static struct EDGE_FIRMWARE_VERSION_INFO IMAGE_VERSION_NAME =
{
- 4, 10, 0 // Major, Minor, Build
+ 4, 80, 0 // Major, Minor, Build
};
@@ -27,16 +27,16 @@ static unsigned char IMAGE_ARRAY_NAME[] =
// WORD Length;
// BYTE CheckSum;
// };
-0xca, 0x31,
-0xa8,
+0x87, 0x32,
+0x9a,
-0x02, 0x26, 0xfe, 0x02, 0x21, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1e, 0x00, 0x00,
+0x02, 0x27, 0xbf, 0x02, 0x21, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1e, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x1a, 0x85, 0x3f,
0x8c, 0x85, 0x40, 0x8a, 0xc0, 0xe0, 0xc0, 0xd0, 0xc0, 0xf0, 0xc0, 0x82, 0xc0, 0x83, 0xc0, 0x00,
0xc0, 0x01, 0xc0, 0x02, 0xc0, 0x03, 0xc0, 0x04, 0xc0, 0x05, 0xc0, 0x06, 0xc0, 0x07, 0xe5, 0x3e,
0x24, 0x08, 0xf8, 0xe6, 0x60, 0x2b, 0xe5, 0x3e, 0x24, 0x10, 0xf8, 0xa6, 0x81, 0xe5, 0x3e, 0x75,
0xf0, 0x21, 0xa4, 0x24, 0x05, 0xf5, 0x82, 0xe4, 0x34, 0xf8, 0xf5, 0x83, 0x78, 0x8c, 0xe5, 0x81,
-0x04, 0xc3, 0x98, 0xf9, 0x94, 0x22, 0x40, 0x03, 0x02, 0x11, 0x94, 0xe6, 0xf0, 0x08, 0xa3, 0xd9,
+0x04, 0xc3, 0x98, 0xf9, 0x94, 0x22, 0x40, 0x03, 0x02, 0x11, 0xdc, 0xe6, 0xf0, 0x08, 0xa3, 0xd9,
0xfa, 0x74, 0x08, 0x25, 0x3e, 0xf8, 0x05, 0x3e, 0x08, 0xe6, 0x54, 0x80, 0x70, 0x0c, 0xe5, 0x3e,
0xb4, 0x07, 0xf3, 0x78, 0x08, 0x75, 0x3e, 0x00, 0x80, 0xef, 0xe5, 0x3e, 0x24, 0x10, 0xf8, 0x86,
0x81, 0xe5, 0x3e, 0x75, 0xf0, 0x21, 0xa4, 0x24, 0x05, 0xf5, 0x82, 0xe4, 0x34, 0xf8, 0xf5, 0x83,
@@ -49,387 +49,398 @@ static unsigned char IMAGE_ARRAY_NAME[] =
0xc9, 0xf0, 0x69, 0x60, 0x02, 0x7e, 0x04, 0xa3, 0xe0, 0xca, 0xf0, 0x6a, 0x60, 0x02, 0x7e, 0x04,
0xa3, 0xe0, 0xcb, 0xf0, 0x6b, 0x60, 0x02, 0x7e, 0x04, 0x22, 0xc0, 0xe0, 0xc0, 0xd0, 0xc0, 0xf0,
0xc0, 0x82, 0xc0, 0x83, 0xc0, 0x00, 0xc0, 0x01, 0xc0, 0x02, 0xc0, 0x03, 0xc0, 0x04, 0xc0, 0x05,
-0xc0, 0x06, 0xc0, 0x07, 0x90, 0xff, 0x93, 0x74, 0x01, 0xf0, 0xe5, 0x81, 0x94, 0xfd, 0x40, 0x03,
-0x02, 0x11, 0x94, 0x85, 0x41, 0x8d, 0x85, 0x42, 0x8b, 0x74, 0xaf, 0xf5, 0x82, 0x74, 0xfa, 0xf5,
-0x83, 0xe0, 0xb4, 0x01, 0x1b, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x4a, 0xe0, 0x30, 0xe7, 0x2c,
-0x90, 0xff, 0x4e, 0xe0, 0x30, 0xe7, 0x25, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x02, 0xf0, 0x80, 0x20,
-0xb4, 0x02, 0x1d, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x7a, 0xe0, 0x30, 0xe7, 0x05, 0x12, 0x27,
-0x8d, 0x80, 0x09, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x03, 0xf0, 0x80, 0x04, 0xd0, 0x83, 0xd0, 0x82,
-0xa3, 0xe0, 0xb4, 0x01, 0x1b, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x52, 0xe0, 0x30, 0xe7, 0x2c,
-0x90, 0xff, 0x56, 0xe0, 0x30, 0xe7, 0x25, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x02, 0xf0, 0x80, 0x25,
-0xb4, 0x02, 0x22, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x7a, 0xe0, 0x30, 0xe7, 0x05, 0x12, 0x27,
-0x8d, 0x80, 0x09, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x03, 0xf0, 0x80, 0x09, 0xd0, 0x83, 0xd0, 0x82,
-0x80, 0x03, 0x02, 0x02, 0x62, 0x74, 0x15, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0, 0x20, 0x04,
-0xf1, 0x20, 0x02, 0x03, 0x30, 0x01, 0xeb, 0x74, 0x18, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0,
-0x14, 0xfc, 0xf0, 0xa3, 0xe0, 0xfd, 0xa3, 0xe0, 0xfe, 0x64, 0x04, 0x70, 0x0f, 0xec, 0x70, 0x62,
-0x7e, 0x01, 0x12, 0x00, 0xc9, 0x7c, 0x0a, 0x7d, 0xfa, 0x02, 0x02, 0x33, 0x12, 0x00, 0xc9, 0xee,
-0x64, 0x04, 0x60, 0x1d, 0xec, 0x70, 0x4b, 0x7c, 0x0a, 0xed, 0x14, 0xfd, 0x70, 0x15, 0xee, 0x64,
-0x02, 0x60, 0x07, 0x7e, 0x02, 0x7d, 0x32, 0x02, 0x02, 0x33, 0x7e, 0x01, 0x7d, 0xfa, 0x02, 0x02,
-0x33, 0x7c, 0x0a, 0x74, 0x18, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xec, 0xf0, 0xa3, 0xed, 0xf0,
-0xa3, 0xee, 0xf0, 0x14, 0x60, 0x18, 0x20, 0xe1, 0x0f, 0x20, 0x01, 0x06, 0xd2, 0xb1, 0xc2, 0xb0,
-0x80, 0x10, 0xc2, 0xb1, 0xd2, 0xb0, 0x80, 0x0a, 0xc2, 0xb1, 0xc2, 0xb0, 0x80, 0x04, 0xd2, 0xb0,
-0xd2, 0xb1, 0x78, 0x19, 0x79, 0x09, 0x7a, 0x07, 0xe7, 0x70, 0x04, 0xa6, 0x00, 0x80, 0x0b, 0xe6,
-0x60, 0x08, 0x16, 0xe6, 0x70, 0x04, 0xe7, 0x44, 0x80, 0xf7, 0x08, 0x09, 0xda, 0xea, 0xe5, 0x3d,
-0x60, 0x13, 0x14, 0xf5, 0x3d, 0x70, 0x0e, 0xe5, 0x3e, 0x24, 0x08, 0xf8, 0x76, 0x00, 0x12, 0x11,
-0x0f, 0xd2, 0x8c, 0xd2, 0x8d, 0xd0, 0x07, 0xd0, 0x06, 0xd0, 0x05, 0xd0, 0x04, 0xd0, 0x03, 0xd0,
-0x02, 0xd0, 0x01, 0xd0, 0x00, 0xd0, 0x83, 0xd0, 0x82, 0xd0, 0xf0, 0xd0, 0xd0, 0xd0, 0xe0, 0x32,
-0x90, 0xff, 0x04, 0xe0, 0x90, 0xfa, 0xb6, 0xf0, 0x90, 0xff, 0x06, 0xe0, 0xfc, 0xa3, 0xe0, 0xfa,
-0xec, 0xff, 0xea, 0xfe, 0xef, 0xc3, 0x94, 0x08, 0xee, 0x94, 0x01, 0x50, 0x02, 0x80, 0x04, 0x7e,
-0x01, 0x7f, 0x08, 0x8e, 0x3b, 0x8f, 0x3c, 0x90, 0xff, 0x02, 0xe0, 0xfc, 0xa3, 0xe0, 0xfa, 0xec,
-0xff, 0xea, 0x90, 0xfa, 0xba, 0xf0, 0xef, 0xa3, 0xf0, 0x12, 0x1c, 0x30, 0xe4, 0xf5, 0x4d, 0xe5,
-0x4d, 0xc3, 0x94, 0x02, 0x50, 0x0f, 0x12, 0x1c, 0x11, 0xe4, 0x12, 0x1a, 0x38, 0x05, 0x4d, 0x04,
-0x12, 0x1c, 0x02, 0x80, 0xea, 0x12, 0x1c, 0x30, 0x90, 0xff, 0x00, 0xe0, 0xff, 0x54, 0x60, 0x24,
-0xc0, 0x70, 0x03, 0x02, 0x08, 0xc5, 0x24, 0x40, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6,
-0xe0, 0xfe, 0x54, 0x0f, 0xf5, 0x4d, 0xee, 0x30, 0xe7, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x92, 0x0a,
-0x90, 0xff, 0x01, 0xe0, 0x12, 0x1b, 0x4c, 0x03, 0x56, 0x00, 0x04, 0x29, 0x01, 0x05, 0x3c, 0x03,
-0x06, 0x03, 0x05, 0x06, 0x45, 0x06, 0x07, 0xa7, 0x08, 0x07, 0xef, 0x09, 0x08, 0x4b, 0x0a, 0x08,
-0x8b, 0x0b, 0x00, 0x00, 0x0f, 0x26, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa,
-0xba, 0xe0, 0x70, 0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x64, 0x02, 0x45,
-0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xef, 0x54, 0x1f, 0x14, 0x60, 0x2b, 0x14, 0x60, 0x47, 0x24,
-0x02, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xee, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0x11, 0x74,
-0x01, 0x12, 0x1a, 0x38, 0x78, 0x67, 0xe6, 0x30, 0xe0, 0x08, 0x12, 0x1c, 0x11, 0x74, 0x02, 0x12,
-0x1a, 0x38, 0x7f, 0x02, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x20, 0xe1, 0x09, 0x90, 0xfa, 0xb6, 0xe0,
-0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6, 0xe0, 0xd3, 0x94, 0x01, 0x40, 0x03, 0x02, 0x0f,
-0x26, 0x7f, 0x02, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x20, 0xe1, 0x0e, 0x90, 0xfa, 0xb6, 0xe0, 0xff,
-0x60, 0x07, 0x64, 0x80, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x0f, 0xb2, 0x40, 0x03, 0x02, 0x0f,
-0x26, 0xe5, 0x4d, 0x70, 0x19, 0x30, 0x0a, 0x0b, 0x90, 0xff, 0x80, 0x12, 0x1c, 0x0e, 0x12, 0x1a,
-0x38, 0x80, 0x24, 0x90, 0xff, 0x82, 0x12, 0x1c, 0x0e, 0x12, 0x1a, 0x38, 0x80, 0x19, 0x15, 0x4d,
-0x30, 0x0a, 0x0b, 0x12, 0x1c, 0xa5, 0x12, 0x1c, 0x0c, 0x12, 0x1a, 0x38, 0x80, 0x09, 0x12, 0x1c,
-0xb3, 0x12, 0x1c, 0x0c, 0x12, 0x1a, 0x38, 0x12, 0x1c, 0x11, 0x12, 0x19, 0xf2, 0x60, 0x05, 0x74,
-0x01, 0x12, 0x1a, 0x38, 0x7f, 0x02, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f,
-0x26, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0xc9, 0x14, 0x60, 0x2d,
-0x14, 0x60, 0x59, 0x24, 0x02, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xba, 0xe0, 0x70, 0x04,
-0xa3, 0xe0, 0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6, 0xe0, 0x60, 0x03, 0x02,
-0x0f, 0x26, 0x78, 0x67, 0xe6, 0x54, 0xfe, 0xf6, 0xe4, 0xff, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x20,
-0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x30, 0xe0, 0x09, 0x90, 0xfa, 0xb6,
-0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x30, 0xe1, 0x0c, 0x90, 0xfa, 0xb6, 0xe0, 0xd3,
-0x94, 0x01, 0x40, 0x03, 0x02, 0x0f, 0x26, 0xe4, 0xff, 0x02, 0x31, 0xb1, 0x90, 0xfa, 0xba, 0xe0,
-0x70, 0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x0f, 0xb2, 0x40, 0x03, 0x02, 0x0f,
-0x26, 0xe5, 0x35, 0x20, 0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x30, 0xe0,
-0x07, 0xe5, 0x4d, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x4d, 0x70, 0x0f, 0x90, 0xff, 0x82, 0xe0,
-0x54, 0xf7, 0xf0, 0x90, 0xff, 0x80, 0xe0, 0x54, 0xf7, 0xf0, 0x22, 0xe5, 0x4d, 0x24, 0xfe, 0x60,
-0x20, 0x24, 0xfb, 0x60, 0x34, 0x24, 0x06, 0x70, 0x35, 0x30, 0x0a, 0x0c, 0xa2, 0x0a, 0xe4, 0x33,
-0xfd, 0x7f, 0x03, 0x12, 0x2d, 0xa8, 0x80, 0x26, 0xe4, 0xfd, 0x7f, 0x03, 0x12, 0x2d, 0xa8, 0x80,
-0x1d, 0x30, 0x0a, 0x0c, 0xa2, 0x0a, 0xe4, 0x33, 0xfd, 0x7f, 0x04, 0x12, 0x2d, 0xa8, 0x80, 0x0e,
-0xe4, 0xfd, 0x7f, 0x04, 0x12, 0x2d, 0xa8, 0x80, 0x05, 0x7f, 0x87, 0x12, 0x31, 0x32, 0x15, 0x4d,
-0x30, 0x0a, 0x0b, 0x12, 0x1c, 0xa5, 0xf5, 0x83, 0xe0, 0x54, 0xf7, 0xf0, 0x80, 0x09, 0x12, 0x1c,
-0xb3, 0xf5, 0x83, 0xe0, 0x54, 0xf7, 0xf0, 0xe4, 0xff, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x30, 0xe7,
-0x03, 0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0xc9,
-0x14, 0x60, 0x2d, 0x14, 0x60, 0x55, 0x24, 0x02, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xba,
-0xe0, 0x70, 0x04, 0xa3, 0xe0, 0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6, 0xe0,
-0x60, 0x03, 0x02, 0x0f, 0x26, 0x78, 0x67, 0xe6, 0x44, 0x01, 0xf6, 0xe4, 0xff, 0x02, 0x31, 0xb1,
-0xe5, 0x35, 0x20, 0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x30, 0xe0, 0x07,
-0xe5, 0x4d, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x30, 0xe1, 0x0a, 0xe5, 0x4d, 0xd3, 0x94,
-0x01, 0x40, 0x03, 0x02, 0x0f, 0x26, 0xe4, 0xff, 0x02, 0x31, 0xb1, 0x90, 0xfa, 0xba, 0xe0, 0x70,
-0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6, 0xe0, 0xff, 0x12, 0x31, 0x82,
-0x40, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x20, 0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x26,
-0xe5, 0x4d, 0x70, 0x09, 0x30, 0x0a, 0x03, 0x02, 0x1d, 0x64, 0x02, 0x1d, 0x2f, 0xe5, 0x35, 0x20,
-0xe1, 0x03, 0x02, 0x0f, 0x26, 0x15, 0x4d, 0x30, 0x0a, 0x0b, 0x12, 0x1c, 0xa5, 0xf5, 0x83, 0xe0,
-0x44, 0x08, 0xf0, 0x80, 0x09, 0x12, 0x1c, 0xb3, 0xf5, 0x83, 0xe0, 0x44, 0x08, 0xf0, 0xe4, 0xff,
-0x02, 0x31, 0xb1, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x45, 0x3b, 0x60,
-0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0xc9,
-0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x30, 0xe1, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xbb,
-0xe0, 0x90, 0xff, 0xff, 0xf0, 0xe0, 0x60, 0x05, 0x43, 0x35, 0x01, 0x80, 0x03, 0x53, 0x35, 0xfe,
-0xe4, 0xff, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x45,
-0x3b, 0x70, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0xc9, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa,
-0xba, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0xec, 0x24, 0xfe, 0x60, 0x3a, 0x14, 0x60, 0x75, 0x24, 0x02,
-0x60, 0x03, 0x02, 0x0f, 0x26, 0xed, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0x30, 0x12, 0x1d,
-0x5d, 0x7d, 0x03, 0x12, 0x0f, 0x6d, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x0f, 0x2a, 0x90, 0xfa,
-0xb3, 0xe0, 0xfd, 0xa3, 0x12, 0x1c, 0x7b, 0x12, 0x0f, 0x89, 0x50, 0x02, 0x80, 0x04, 0xae, 0x3b,
-0xaf, 0x3c, 0x02, 0x0f, 0xba, 0x12, 0x1c, 0x30, 0x90, 0xf9, 0x15, 0xe0, 0x30, 0xe4, 0x0d, 0x12,
-0x1d, 0x5d, 0x7d, 0x14, 0x12, 0x0f, 0x6d, 0x60, 0x10, 0x02, 0x0f, 0x26, 0x12, 0x1d, 0x5d, 0x7d,
-0x04, 0x12, 0x0f, 0xc1, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x0f, 0x2a, 0x90, 0xfa, 0xb3, 0xe0,
-0xfd, 0xa3, 0x12, 0x1c, 0x7b, 0x12, 0x0f, 0x89, 0x50, 0x02, 0x80, 0x04, 0xae, 0x3b, 0xaf, 0x3c,
-0x02, 0x0f, 0xba, 0x12, 0x1d, 0x5d, 0x7d, 0x05, 0x12, 0x0f, 0xc1, 0x60, 0x03, 0x02, 0x0f, 0x26,
-0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xb3, 0x12, 0x1c, 0x78, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x90, 0xfa,
-0xb4, 0xe4, 0x75, 0xf0, 0x03, 0x12, 0x1a, 0x6c, 0x90, 0xfa, 0xbb, 0xe0, 0x90, 0xfa, 0xb2, 0xf0,
-0xe4, 0xf5, 0x4c, 0x90, 0xfa, 0xb2, 0xe0, 0xff, 0xe5, 0x4c, 0xc3, 0x9f, 0x50, 0x24, 0x12, 0x1c,
-0x72, 0x12, 0x0f, 0xcc, 0xff, 0xfd, 0x90, 0xfa, 0xb4, 0xe4, 0x8d, 0xf0, 0x12, 0x1a, 0x6c, 0x90,
-0xfa, 0xb3, 0xe0, 0xc3, 0x9f, 0xf0, 0xd3, 0x94, 0x00, 0x50, 0x03, 0x02, 0x0f, 0x26, 0x05, 0x4c,
-0x80, 0xd1, 0x12, 0x1c, 0x72, 0x12, 0x0f, 0xcc, 0x24, 0xfe, 0xff, 0x90, 0xfa, 0xb3, 0xf0, 0xfd,
-0xa3, 0xe4, 0x75, 0xf0, 0x02, 0x12, 0x1a, 0x6c, 0x7a, 0xf9, 0x79, 0x6f, 0x7b, 0x01, 0x8b, 0x36,
-0x8a, 0x37, 0x89, 0x38, 0xe9, 0x24, 0x02, 0xf9, 0xe4, 0x3a, 0xfa, 0x12, 0x1c, 0x78, 0x12, 0x25,
-0xd7, 0x8f, 0x4c, 0x05, 0x4c, 0x05, 0x4c, 0x12, 0x1c, 0x11, 0xe5, 0x4c, 0x12, 0x1a, 0x38, 0x12,
-0x1c, 0x11, 0x90, 0x00, 0x01, 0x74, 0x03, 0x12, 0x1a, 0x4a, 0xaf, 0x4c, 0x7e, 0x00, 0xc3, 0xef,
-0x95, 0x3c, 0xee, 0x95, 0x3b, 0x50, 0x02, 0x80, 0x04, 0xae, 0x3b, 0xaf, 0x3c, 0x8e, 0x39, 0x8f,
-0x3a, 0x02, 0x2c, 0x07, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f, 0x26, 0xe5,
-0x3c, 0x64, 0x01, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xb6, 0xe0, 0x60, 0x03,
-0x02, 0x0f, 0x26, 0x90, 0xfa, 0xba, 0xe0, 0x70, 0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26,
-0x12, 0x1c, 0xc9, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x20, 0xe0, 0x06, 0x20, 0xe1, 0x03,
-0x02, 0x0f, 0x26, 0x75, 0x36, 0x00, 0x75, 0x37, 0x00, 0x75, 0x38, 0x32, 0x02, 0x0f, 0xa9, 0xe5,
-0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26,
-0x90, 0xfa, 0xb6, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xd3, 0x90, 0xfa, 0xbb, 0xe0, 0x94, 0x01,
-0x90, 0xfa, 0xba, 0xe0, 0x94, 0x00, 0x40, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0xc9, 0x60, 0x03,
-0x02, 0x0f, 0x26, 0xe5, 0x35, 0x20, 0xe0, 0x06, 0x20, 0xe1, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa,
-0xbb, 0xe0, 0xf5, 0x32, 0xe5, 0x32, 0x70, 0x08, 0x43, 0x35, 0x01, 0x53, 0x35, 0xfd, 0x80, 0x06,
-0x53, 0x35, 0xfe, 0x43, 0x35, 0x02, 0xe4, 0xff, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x20, 0xe7, 0x03,
-0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x64, 0x01, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa,
-0xb6, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x26, 0x90, 0xfa, 0xba, 0xe0, 0x70, 0x02, 0xa3, 0xe0, 0x60,
-0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c, 0xc9, 0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35,
-0x20, 0xe1, 0x03, 0x02, 0x0f, 0x26, 0x7f, 0x01, 0x02, 0x31, 0xb1, 0xe5, 0x35, 0x30, 0xe7, 0x03,
-0x02, 0x0f, 0x26, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xd3, 0x90, 0xfa, 0xbb,
-0xe0, 0x94, 0x00, 0x90, 0xfa, 0xba, 0xe0, 0x94, 0x00, 0x40, 0x03, 0x02, 0x0f, 0x26, 0x12, 0x1c,
-0xc9, 0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x26, 0xe5, 0x35, 0x20, 0xe1, 0x03, 0x02, 0x0f, 0x26,
-0xe4, 0xff, 0x02, 0x31, 0xb1, 0x90, 0xff, 0x01, 0x12, 0x1d, 0x74, 0xef, 0x12, 0x1a, 0x38, 0x90,
-0xfa, 0xb6, 0x12, 0x1d, 0x74, 0x90, 0x00, 0x01, 0xef, 0x12, 0x1a, 0x4a, 0x90, 0x00, 0x02, 0xe4,
-0x12, 0x1a, 0x4a, 0x74, 0x03, 0x12, 0x1c, 0x02, 0x90, 0xfa, 0xba, 0xe0, 0xff, 0xa3, 0xe0, 0x85,
-0x38, 0x82, 0x85, 0x37, 0x83, 0xcf, 0xf0, 0xa3, 0xef, 0xf0, 0x90, 0xff, 0x01, 0xe0, 0x12, 0x1b,
-0x4c, 0x09, 0x4a, 0x02, 0x09, 0x6c, 0x04, 0x09, 0x8e, 0x05, 0x09, 0xba, 0x06, 0x09, 0xd8, 0x07,
-0x09, 0xf6, 0x08, 0x0a, 0x14, 0x09, 0x0a, 0x32, 0x0b, 0x0a, 0xe7, 0x80, 0x0d, 0x6f, 0x81, 0x0d,
-0xa0, 0x82, 0x0b, 0x2e, 0x83, 0x0b, 0x77, 0x84, 0x0b, 0x96, 0x85, 0x0b, 0xdb, 0x86, 0x0c, 0x26,
-0x87, 0x0c, 0xb7, 0x88, 0x0d, 0x42, 0x89, 0x0a, 0x50, 0x92, 0x0a, 0x50, 0x93, 0x0e, 0x53, 0xc0,
-0x0e, 0x7f, 0xc1, 0x0e, 0x90, 0xc2, 0x00, 0x00, 0x0f, 0x15, 0xe5, 0x35, 0x20, 0xe7, 0x05, 0x7f,
-0x05, 0x02, 0x30, 0xec, 0x12, 0x1c, 0xc1, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00,
-0x7f, 0x07, 0x02, 0x11, 0x16, 0xe4, 0xfd, 0x7f, 0x07, 0x02, 0x2f, 0x18, 0xe5, 0x35, 0x20, 0xe7,
-0x05, 0x7f, 0x05, 0x02, 0x30, 0xec, 0x12, 0x1c, 0xc1, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd,
-0x7c, 0x00, 0x7f, 0x0c, 0x02, 0x11, 0x16, 0xe4, 0xfd, 0x7f, 0x07, 0x02, 0x2f, 0x18, 0xe5, 0x35,
-0x30, 0xe7, 0x03, 0x02, 0x0f, 0x29, 0x12, 0x1d, 0x92, 0x50, 0x06, 0xe5, 0x3c, 0x45, 0x3b, 0x70,
-0x05, 0x7f, 0x02, 0x02, 0x30, 0xec, 0x90, 0xfa, 0xb6, 0xe0, 0x24, 0xfe, 0x24, 0xfd, 0x50, 0x02,
-0x80, 0x03, 0x02, 0x31, 0x6f, 0x7f, 0x07, 0x02, 0x30, 0xec, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02,
-0x0f, 0x29, 0x12, 0x1c, 0xc1, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x08,
-0x02, 0x11, 0x16, 0x7f, 0x07, 0x02, 0x30, 0xec, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x29,
-0x12, 0x1c, 0xc1, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x09, 0x02, 0x11,
-0x16, 0x7f, 0x07, 0x02, 0x30, 0xec, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x29, 0x12, 0x1c,
-0xc1, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0a, 0x02, 0x11, 0x16, 0x7f,
-0x07, 0x02, 0x30, 0xec, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x29, 0x12, 0x1c, 0xc1, 0x60,
-0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0b, 0x02, 0x11, 0x16, 0x7f, 0x07, 0x02,
-0x30, 0xec, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x29, 0x12, 0x1c, 0xc1, 0x60, 0x03, 0x04,
-0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0e, 0x02, 0x11, 0x16, 0x7f, 0x07, 0x02, 0x30, 0xec,
-0xe5, 0x35, 0x30, 0xe7, 0x56, 0x12, 0x1c, 0xc9, 0x70, 0x4a, 0x90, 0xff, 0x02, 0xe0, 0xf5, 0x4c,
-0xe5, 0x4c, 0xb4, 0x82, 0x05, 0x75, 0x4c, 0x61, 0x80, 0x12, 0xe5, 0x4c, 0xb4, 0x83, 0x05, 0x75,
-0x4c, 0x62, 0x80, 0x08, 0xe5, 0x4c, 0xc4, 0x54, 0xf0, 0x04, 0xf5, 0x4c, 0x12, 0x1b, 0x72, 0x12,
-0x1d, 0x8b, 0x12, 0x25, 0x39, 0x12, 0x1c, 0xd9, 0x12, 0x1a, 0x0b, 0x60, 0x05, 0x12, 0x31, 0xbd,
-0x80, 0x06, 0x85, 0x33, 0x39, 0x85, 0x34, 0x3a, 0x75, 0x36, 0x01, 0x75, 0x37, 0xf9, 0x75, 0x38,
-0x72, 0x02, 0x2c, 0x07, 0xe4, 0xfd, 0x7f, 0x05, 0x02, 0x2f, 0x18, 0x12, 0x1c, 0xc9, 0x60, 0x05,
-0x7f, 0x05, 0x02, 0x30, 0xec, 0x12, 0x1d, 0x92, 0x40, 0x05, 0x7f, 0x03, 0x02, 0x30, 0xec, 0x90,
-0xff, 0x02, 0xe0, 0xf5, 0x4c, 0xe5, 0x4c, 0xb4, 0x82, 0x05, 0x75, 0x4c, 0x61, 0x80, 0x12, 0xe5,
-0x4c, 0xb4, 0x83, 0x05, 0x75, 0x4c, 0x62, 0x80, 0x08, 0xe5, 0x4c, 0xc4, 0x54, 0xf0, 0x04, 0xf5,
-0x4c, 0x12, 0x1b, 0x72, 0x02, 0x31, 0x6f, 0x12, 0x1d, 0x9c, 0x12, 0x2a, 0x06, 0x12, 0x1c, 0x83,
-0xe0, 0x54, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0xe0, 0x90, 0xfa, 0xb7, 0xf0, 0x78, 0x68, 0x12, 0x1b,
-0x28, 0x90, 0x00, 0x02, 0x12, 0x1a, 0x0b, 0x30, 0xe7, 0xf2, 0x90, 0x00, 0x02, 0xe4, 0x12, 0x1a,
-0x4a, 0x90, 0xfa, 0xb7, 0xe0, 0x44, 0x80, 0xff, 0xf0, 0x78, 0x7c, 0xe6, 0xfc, 0x08, 0xe6, 0x8c,
-0x83, 0x12, 0x1c, 0x8b, 0xef, 0xf0, 0x12, 0x31, 0xc7, 0xe4, 0xff, 0x02, 0x30, 0xec, 0x90, 0xfa,
-0xb6, 0xe0, 0x64, 0x01, 0x70, 0x1f, 0x90, 0xfa, 0xba, 0xe0, 0xff, 0x7e, 0x00, 0x70, 0x06, 0xa3,
-0xe0, 0xf5, 0x90, 0x80, 0x2d, 0xc2, 0xaf, 0xef, 0xf4, 0x52, 0x90, 0x90, 0xfa, 0xbb, 0xe0, 0x42,
-0x90, 0xd2, 0xaf, 0x80, 0x1d, 0x90, 0xfa, 0xba, 0xe0, 0xff, 0x7e, 0x00, 0x70, 0x06, 0xa3, 0xe0,
-0xf5, 0xb0, 0x80, 0x0e, 0xc2, 0xaf, 0xef, 0xf4, 0x52, 0xb0, 0x90, 0xfa, 0xbb, 0xe0, 0x42, 0xb0,
-0xd2, 0xaf, 0xe4, 0xff, 0x02, 0x30, 0xec, 0x12, 0x1c, 0x30, 0x90, 0xfa, 0xb6, 0xe0, 0xb4, 0x01,
-0x0a, 0x12, 0x1c, 0x11, 0xe5, 0x90, 0x12, 0x1a, 0x38, 0x80, 0x08, 0x12, 0x1c, 0x11, 0xe5, 0xb0,
-0x12, 0x1a, 0x38, 0x02, 0x0f, 0xa9, 0x90, 0xfa, 0xb6, 0xe0, 0xff, 0x24, 0x12, 0x12, 0x1c, 0x41,
-0x20, 0xe1, 0x33, 0x12, 0x1c, 0xd0, 0xef, 0x24, 0xfc, 0x60, 0x18, 0x04, 0x70, 0x28, 0x90, 0xfa,
-0xb7, 0xe0, 0x60, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x10, 0xf0, 0x80, 0x19, 0x12, 0x1d, 0xa6,
-0xf0, 0x80, 0x13, 0x90, 0xfa, 0xb7, 0xe0, 0x60, 0x09, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x10, 0xf0,
-0x80, 0x04, 0x12, 0x1d, 0xad, 0xf0, 0xe4, 0xff, 0x02, 0x30, 0xec, 0x90, 0xfa, 0xb6, 0xe0, 0xff,
-0x24, 0x12, 0x12, 0x1c, 0x41, 0x20, 0xe1, 0x39, 0x12, 0x1c, 0xd0, 0xef, 0x24, 0xfc, 0x60, 0x1b,
-0x04, 0x70, 0x2e, 0x90, 0xfa, 0xb7, 0xe0, 0x60, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x20, 0xf0,
-0x80, 0x1f, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xdf, 0xf0, 0x80, 0x16, 0x90, 0xfa, 0xb7, 0xe0, 0x60,
-0x09, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x20, 0xf0, 0x80, 0x07, 0x90, 0xff, 0xb4, 0xe0, 0x54, 0xdf,
-0xf0, 0xe4, 0xff, 0x02, 0x30, 0xec, 0x12, 0x1c, 0xd0, 0x12, 0x1c, 0xc1, 0x60, 0x4d, 0x04, 0x60,
-0x03, 0x02, 0x0c, 0xb2, 0x90, 0xfa, 0xb7, 0xe0, 0x60, 0x0f, 0x90, 0xff, 0xa4, 0x12, 0x1c, 0x3a,
-0x30, 0xe1, 0x6f, 0x12, 0x1d, 0x7c, 0x02, 0x0c, 0xb2, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xfb, 0x12,
-0x1c, 0x3d, 0xfe, 0x30, 0xe1, 0x5c, 0x30, 0xe2, 0x11, 0x30, 0xb4, 0x05, 0x12, 0x1d, 0x7c, 0x80,
-0x51, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xfd, 0xf0, 0x80, 0x48, 0x30, 0x95, 0x05, 0x12, 0x1d, 0x7c,
-0x80, 0x40, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xfd, 0xf0, 0x80, 0x37, 0x90, 0xfa, 0xb7, 0xe0, 0x60,
-0x12, 0x90, 0xff, 0xb4, 0x12, 0x1c, 0x3a, 0x30, 0xe1, 0x28, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x02,
-0xf0, 0x80, 0x1f, 0x90, 0xff, 0xb4, 0xe0, 0x54, 0xfb, 0x12, 0x1c, 0x3d, 0x30, 0xe1, 0x13, 0x30,
-0x93, 0x09, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x02, 0xf0, 0x80, 0x07, 0x90, 0xff, 0xb4, 0xe0, 0x54,
-0xfd, 0xf0, 0xe4, 0xff, 0x02, 0x30, 0xec, 0x12, 0x1c, 0xd0, 0x90, 0xfa, 0xb6, 0xe0, 0x24, 0xfc,
-0x60, 0x40, 0x04, 0x70, 0x78, 0x90, 0xfa, 0xb7, 0xe0, 0x60, 0x1d, 0x90, 0xff, 0xa2, 0xe0, 0x44,
-0x40, 0xf0, 0xa3, 0xe0, 0xff, 0x30, 0xe7, 0x65, 0xd2, 0x03, 0xa3, 0xe0, 0x54, 0xdf, 0xf0, 0x90,
-0xff, 0xa3, 0xef, 0x54, 0x7f, 0xf0, 0x80, 0x55, 0x30, 0x03, 0x0e, 0x90, 0xff, 0xa3, 0xe0, 0x44,
-0x80, 0xf0, 0xc2, 0x03, 0xa3, 0xe0, 0x44, 0x20, 0xf0, 0x90, 0xff, 0xa2, 0xe0, 0x54, 0xbf, 0xf0,
-0x80, 0x3b, 0x90, 0xfa, 0xb7, 0xe0, 0x60, 0x1d, 0x90, 0xff, 0xb2, 0xe0, 0x44, 0x40, 0xf0, 0xa3,
-0xe0, 0xff, 0x30, 0xe7, 0x28, 0xd2, 0x04, 0xa3, 0xe0, 0x54, 0xdf, 0xf0, 0x90, 0xff, 0xb3, 0xef,
-0x54, 0x7f, 0xf0, 0x80, 0x18, 0x30, 0x04, 0x0e, 0x90, 0xff, 0xb3, 0xe0, 0x44, 0x80, 0xf0, 0xc2,
-0x04, 0xa3, 0xe0, 0x44, 0x20, 0xf0, 0x90, 0xff, 0xb2, 0xe0, 0x54, 0xbf, 0xf0, 0xe4, 0xff, 0x02,
-0x30, 0xec, 0x12, 0x1c, 0x30, 0x90, 0xfa, 0xb6, 0xe0, 0x24, 0xfc, 0x60, 0x0f, 0x04, 0x70, 0x16,
-0x90, 0xff, 0xa6, 0xe0, 0x12, 0x1c, 0x11, 0x12, 0x1a, 0x38, 0x80, 0x0a, 0x90, 0xff, 0xb6, 0xe0,
-0x12, 0x1c, 0x11, 0x12, 0x1a, 0x38, 0x75, 0x39, 0x00, 0x75, 0x3a, 0x01, 0x02, 0x2c, 0x07, 0xe4,
-0xff, 0x12, 0x30, 0xec, 0x12, 0x1d, 0x37, 0x7f, 0x03, 0x12, 0x12, 0x19, 0x90, 0xf9, 0x15, 0xe0,
-0x30, 0xe4, 0x08, 0x90, 0xff, 0x93, 0x74, 0x80, 0xf0, 0x80, 0x10, 0x90, 0xff, 0xfc, 0xe0, 0x54,
-0x7f, 0xf0, 0x7f, 0xff, 0x7e, 0x00, 0x12, 0x30, 0x16, 0xc2, 0x90, 0xc2, 0xaf, 0x00, 0x80, 0xfd,
-0xe4, 0xf5, 0x4e, 0xf5, 0x4f, 0x90, 0xfa, 0xbc, 0x74, 0x3e, 0xf0, 0xa3, 0xe4, 0xf0, 0x90, 0xfa,
-0xb4, 0xf0, 0xa3, 0x74, 0x15, 0xf0, 0xe0, 0x54, 0x3f, 0xff, 0xc3, 0x74, 0x40, 0x9f, 0x90, 0xfa,
-0xb9, 0xf0, 0xd3, 0x94, 0x00, 0xe4, 0x94, 0x3e, 0x40, 0x08, 0x90, 0xfa, 0xbd, 0xe0, 0x90, 0xfa,
-0xb9, 0xf0, 0x12, 0x0f, 0x50, 0xe5, 0x31, 0x45, 0x30, 0x70, 0x73, 0x12, 0x1c, 0x4a, 0x90, 0xfa,
-0xbc, 0x12, 0x1d, 0x56, 0x60, 0x27, 0xd3, 0xef, 0x94, 0x40, 0xee, 0x94, 0x00, 0x40, 0x08, 0x90,
-0xfa, 0xb9, 0x74, 0x40, 0xf0, 0x80, 0x08, 0x90, 0xfa, 0xbd, 0xe0, 0x90, 0xfa, 0xb9, 0xf0, 0x12,
-0x0f, 0x50, 0xe5, 0x31, 0x45, 0x30, 0x70, 0x46, 0x12, 0x1c, 0x4a, 0x80, 0xd1, 0x75, 0x4c, 0x02,
-0x90, 0xfa, 0xbc, 0xe4, 0xf0, 0xa3, 0x04, 0xf0, 0x90, 0xfa, 0xb4, 0xe4, 0xf0, 0xa3, 0x74, 0x0f,
-0xf0, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x4c, 0x90, 0xfa, 0xbd, 0xe0, 0xf5, 0x4a, 0x7d, 0x0f, 0x7c,
-0x00, 0x12, 0x28, 0x9f, 0x75, 0x30, 0x00, 0x8f, 0x31, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x4c, 0xe4,
-0xf5, 0x2d, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0xe4, 0xf5, 0x30, 0xf5, 0x31, 0xaf, 0x31,
-0x02, 0x30, 0xec, 0x12, 0x1c, 0xd0, 0x30, 0xe7, 0x10, 0xe0, 0x54, 0x0f, 0x90, 0xf9, 0x64, 0xf0,
-0xd3, 0x94, 0x00, 0x40, 0x15, 0xc2, 0x95, 0x80, 0x11, 0x90, 0xfa, 0xb7, 0xe0, 0x54, 0x0f, 0x90,
-0xf9, 0x63, 0xf0, 0xd3, 0x94, 0x00, 0x40, 0x02, 0xc2, 0x94, 0xe4, 0xff, 0x02, 0x30, 0xec, 0x12,
-0x1d, 0x9c, 0xbf, 0x01, 0x04, 0xd2, 0x93, 0x80, 0x02, 0xc2, 0x93, 0xe4, 0xff, 0x02, 0x30, 0xec,
-0x12, 0x1c, 0xd0, 0x54, 0x03, 0x14, 0x60, 0x0a, 0x14, 0x60, 0x0f, 0x14, 0x60, 0x08, 0x24, 0x03,
-0x70, 0x2b, 0xd2, 0x91, 0x80, 0x27, 0xc2, 0x91, 0x80, 0x23, 0x12, 0x1d, 0xa6, 0x12, 0x0f, 0x78,
-0x60, 0x04, 0xd2, 0x91, 0x80, 0x17, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x10, 0x12, 0x0f, 0x78, 0xff,
-0xbf, 0xa0, 0x04, 0xc2, 0x91, 0x80, 0x02, 0xd2, 0x91, 0x12, 0x1d, 0xa6, 0xf0, 0x90, 0xfa, 0xb7,
-0xe0, 0x54, 0x0c, 0xff, 0x13, 0x13, 0x54, 0x3f, 0x14, 0x60, 0x0a, 0x14, 0x60, 0x0f, 0x14, 0x60,
-0x08, 0x24, 0x03, 0x70, 0x2b, 0xd2, 0x92, 0x80, 0x27, 0xc2, 0x92, 0x80, 0x23, 0x12, 0x1d, 0xad,
-0x12, 0x0f, 0x98, 0x60, 0x04, 0xd2, 0x92, 0x80, 0x17, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x10, 0x12,
-0x0f, 0x98, 0xff, 0xbf, 0xa0, 0x04, 0xc2, 0x92, 0x80, 0x02, 0xd2, 0x92, 0x12, 0x1d, 0xad, 0xf0,
-0xe4, 0xff, 0x02, 0x30, 0xec, 0xe5, 0x35, 0x30, 0xe7, 0x07, 0xe4, 0xfd, 0x7f, 0x05, 0x02, 0x2f,
-0x18, 0x7f, 0x05, 0x02, 0x30, 0xec, 0x12, 0x31, 0xbd, 0x22, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xb3,
-0x90, 0xfa, 0xb4, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x90,
-0xfa, 0xb4, 0xe4, 0x75, 0xf0, 0x03, 0x12, 0x1a, 0x6c, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x22,
-0xaa, 0x4e, 0xa9, 0x4f, 0x7b, 0xff, 0x90, 0xfa, 0xb4, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0x90, 0xfa,
-0xb9, 0xe0, 0xf5, 0x4a, 0x12, 0x28, 0x9f, 0x75, 0x30, 0x00, 0x8f, 0x31, 0x22, 0x12, 0x22, 0xa0,
-0x7e, 0x00, 0x8e, 0x30, 0x8f, 0x31, 0xef, 0x22, 0xf0, 0x7f, 0x01, 0x12, 0x12, 0x19, 0x90, 0xff,
-0xa6, 0xe0, 0x90, 0xfa, 0xb8, 0xf0, 0x54, 0xa0, 0x22, 0x12, 0x25, 0xd7, 0x8f, 0x4c, 0x7e, 0x00,
-0xc3, 0xef, 0x95, 0x3c, 0xee, 0x95, 0x3b, 0x22, 0xf0, 0x7f, 0x01, 0x12, 0x12, 0x19, 0x90, 0xff,
-0xb6, 0xe0, 0x90, 0xfa, 0xb8, 0xf0, 0x54, 0xa0, 0x22, 0x75, 0x39, 0x00, 0x75, 0x3a, 0x01, 0x02,
-0x2c, 0x07, 0x90, 0xfa, 0xb6, 0xe0, 0xff, 0x02, 0x31, 0x82, 0x8e, 0x39, 0x8f, 0x3a, 0x02, 0x2c,
-0x07, 0x12, 0x22, 0xa0, 0x7e, 0x00, 0x8e, 0x30, 0x8f, 0x31, 0xef, 0x22, 0x7d, 0x01, 0x12, 0x25,
-0xd7, 0x90, 0xfa, 0xb1, 0xe0, 0x22, 0xef, 0x90, 0xf8, 0x04, 0xf0, 0x22, 0xc0, 0xa8, 0xc2, 0xaf,
-0xee, 0x60, 0x0a, 0xc0, 0x05, 0x7d, 0x7f, 0xdd, 0xfe, 0xde, 0xfa, 0xd0, 0x05, 0xef, 0xc3, 0x94,
-0x15, 0x50, 0x03, 0xd0, 0xa8, 0x22, 0x13, 0x70, 0x03, 0xd0, 0xa8, 0x22, 0xff, 0xd5, 0x07, 0xfd,
-0xd0, 0xa8, 0x22, 0xc0, 0x00, 0xc0, 0x01, 0xc0, 0x02, 0xc0, 0x04, 0xc0, 0x05, 0xe5, 0x3e, 0x24,
-0x08, 0xf8, 0x86, 0x05, 0x53, 0x05, 0x7f, 0x7c, 0xff, 0x12, 0x10, 0x78, 0x7f, 0x00, 0x7e, 0x00,
-0xe5, 0x43, 0x60, 0x46, 0xfc, 0x90, 0xf9, 0x1b, 0xe0, 0x54, 0x7f, 0x6d, 0x70, 0x0f, 0xc0, 0x83,
-0xc0, 0x82, 0xa3, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0xa3, 0x15, 0x43, 0x80, 0x07, 0xa3, 0xa3, 0xa3,
-0xdc, 0xe6, 0x80, 0x26, 0xdc, 0x06, 0xd0, 0x82, 0xd0, 0x83, 0x80, 0x1e, 0xe0, 0xf8, 0xa3, 0xe0,
-0xf9, 0xa3, 0xe0, 0xfa, 0xd0, 0x82, 0xd0, 0x83, 0xe8, 0xf0, 0xa3, 0xe9, 0xf0, 0xa3, 0xea, 0xf0,
-0xa3, 0xc0, 0x83, 0xc0, 0x82, 0xa3, 0xa3, 0xa3, 0x80, 0xda, 0x12, 0x11, 0x0f, 0xd0, 0x05, 0xd0,
-0x04, 0xd0, 0x02, 0xd0, 0x01, 0xd0, 0x00, 0x22, 0x85, 0xa8, 0x44, 0x75, 0xa8, 0x88, 0xec, 0x70,
-0x02, 0x7c, 0x3f, 0x8c, 0x3d, 0x22, 0xe5, 0x3e, 0x24, 0x08, 0xf8, 0x76, 0x00, 0x12, 0x11, 0x66,
-0x80, 0xfb, 0xc0, 0x00, 0xc0, 0x01, 0xc0, 0x02, 0xc0, 0x04, 0xc0, 0x06, 0x7c, 0xff, 0x12, 0x10,
-0x78, 0xe5, 0x43, 0x60, 0x42, 0xfe, 0x90, 0xf9, 0x1b, 0xe0, 0x54, 0x7f, 0x6f, 0x70, 0x0b, 0xc0,
-0x83, 0xc0, 0x82, 0xa3, 0xa3, 0xa3, 0x15, 0x43, 0x80, 0x07, 0xa3, 0xa3, 0xa3, 0xde, 0xea, 0x80,
-0x26, 0xde, 0x06, 0xd0, 0x82, 0xd0, 0x83, 0x80, 0xd8, 0xe0, 0xf8, 0xa3, 0xe0, 0xf9, 0xa3, 0xe0,
-0xfa, 0xd0, 0x82, 0xd0, 0x83, 0xe8, 0xf0, 0xa3, 0xe9, 0xf0, 0xa3, 0xea, 0xf0, 0xa3, 0xc0, 0x83,
-0xc0, 0x82, 0xa3, 0xa3, 0xa3, 0x80, 0xda, 0x78, 0x08, 0x08, 0x79, 0x18, 0x09, 0x7c, 0x01, 0xe6,
-0x54, 0x7f, 0x6f, 0x70, 0x06, 0x76, 0x00, 0x77, 0x00, 0x80, 0x06, 0x08, 0x09, 0x0c, 0xbc, 0x08,
-0xee, 0x12, 0x11, 0x0f, 0xd0, 0x06, 0xd0, 0x04, 0xd0, 0x02, 0xd0, 0x01, 0xd0, 0x00, 0x22, 0x75,
-0x3d, 0x00, 0x85, 0x44, 0xa8, 0x22, 0xc0, 0xf0, 0xc0, 0x82, 0xc0, 0x83, 0xc3, 0xe5, 0x43, 0x24,
-0xe8, 0x50, 0x05, 0x12, 0x11, 0x66, 0x80, 0xf4, 0xef, 0x60, 0x31, 0x90, 0x30, 0x54, 0xe4, 0x93,
-0xc3, 0x9f, 0x40, 0x2f, 0xc0, 0x04, 0x7c, 0xff, 0x12, 0x10, 0x78, 0xd0, 0x04, 0x43, 0x07, 0x80,
-0xe5, 0x43, 0x75, 0xf0, 0x03, 0xa4, 0x24, 0x1b, 0xf5, 0x82, 0xe4, 0x34, 0xf9, 0xf5, 0x83, 0xef,
-0xf0, 0xec, 0xa3, 0xf0, 0xed, 0xa3, 0xf0, 0x05, 0x43, 0x12, 0x11, 0x0f, 0xd0, 0x83, 0xd0, 0x82,
-0xd0, 0xf0, 0x22, 0x02, 0x11, 0x94, 0xc0, 0x04, 0x7c, 0x20, 0xd2, 0x8c, 0xd2, 0x8d, 0xd5, 0x04,
-0xfd, 0xd0, 0x04, 0x22, 0x75, 0xa8, 0x00, 0x75, 0x88, 0x00, 0x75, 0xb8, 0x00, 0x75, 0xf0, 0x00,
-0x75, 0xd0, 0x00, 0xe4, 0xf8, 0x90, 0xf8, 0x04, 0xf0, 0x90, 0x00, 0x00, 0xf6, 0x08, 0xb8, 0x00,
-0xfb, 0x02, 0x00, 0x00, 0xc2, 0xaf, 0xe4, 0x90, 0xff, 0x48, 0xf0, 0x90, 0xff, 0x50, 0xf0, 0x90,
-0xff, 0x08, 0xf0, 0x90, 0xff, 0x10, 0xf0, 0x90, 0xff, 0x80, 0xf0, 0xa3, 0xa3, 0xf0, 0xd2, 0xb1,
-0xc2, 0xb0, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x0f, 0xdc, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x0f, 0xdc,
-0x7e, 0xff, 0x7f, 0xff, 0x12, 0x0f, 0xdc, 0xd2, 0xb0, 0xd2, 0xb1, 0x7e, 0xff, 0x7f, 0xff, 0x12,
-0x0f, 0xdc, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x0f, 0xdc, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x0f, 0xdc,
-0x80, 0xcc, 0xc3, 0xee, 0x94, 0x02, 0x50, 0x04, 0x7e, 0x03, 0x7f, 0xe8, 0xef, 0xf4, 0xff, 0xee,
-0xf4, 0xfe, 0x0f, 0xbf, 0x00, 0x01, 0x0e, 0x8f, 0x42, 0x8e, 0x41, 0x22, 0xc3, 0xef, 0x94, 0xbc,
-0xee, 0x94, 0x02, 0x50, 0x04, 0x7e, 0x07, 0x7f, 0xd0, 0xef, 0xf4, 0xff, 0xee, 0xf4, 0xfe, 0x0f,
-0xbf, 0x00, 0x01, 0x0e, 0x8f, 0x40, 0x8e, 0x3f, 0x22, 0xef, 0x70, 0x01, 0x22, 0xc0, 0x00, 0xc0,
-0xa8, 0xc2, 0xaf, 0xe5, 0x3e, 0x24, 0x18, 0xf8, 0xa6, 0x07, 0xe5, 0x3e, 0x24, 0x08, 0xf8, 0xc6,
-0x54, 0x7f, 0xf6, 0xd0, 0xa8, 0xe6, 0x30, 0xe7, 0x03, 0xd0, 0x00, 0x22, 0x12, 0x11, 0x66, 0x80,
-0xf4, 0xc0, 0x00, 0x7f, 0x01, 0xef, 0x24, 0x08, 0xf8, 0xe6, 0x60, 0x09, 0x0f, 0xbf, 0x08, 0xf5,
-0x12, 0x11, 0x66, 0x80, 0xee, 0xd0, 0x00, 0x22, 0xc0, 0xf0, 0xc0, 0x82, 0xc0, 0x83, 0xc0, 0x00,
-0xc0, 0x06, 0xc0, 0x04, 0xed, 0x24, 0x10, 0xf8, 0x76, 0x9a, 0xed, 0x75, 0xf0, 0x21, 0xa4, 0x24,
-0x05, 0xf5, 0x82, 0xe4, 0x34, 0xf8, 0xf5, 0x83, 0xc0, 0x82, 0xc0, 0x83, 0xa3, 0xa3, 0xe4, 0x78,
-0x0d, 0xf0, 0xa3, 0xd8, 0xfc, 0xef, 0x54, 0x7f, 0x75, 0xf0, 0x02, 0xa4, 0x24, 0x36, 0xf5, 0x82,
-0xe5, 0xf0, 0x34, 0x30, 0xf5, 0x83, 0xe4, 0x93, 0xfe, 0x74, 0x01, 0x93, 0xfc, 0xd0, 0x83, 0xd0,
-0x82, 0xec, 0xf0, 0xa3, 0xee, 0xf0, 0xed, 0x24, 0x08, 0xf8, 0xef, 0x44, 0x80, 0xf6, 0xd0, 0x04,
-0xd0, 0x06, 0xd0, 0x00, 0xd0, 0x83, 0xd0, 0x82, 0xd0, 0xf0, 0x22, 0x75, 0x3e, 0x00, 0x75, 0x43,
-0x00, 0x7a, 0x08, 0x79, 0x18, 0x78, 0x08, 0x76, 0x00, 0x77, 0x00, 0x08, 0x09, 0xda, 0xf8, 0x90,
-0xf8, 0x04, 0xe0, 0xfc, 0x90, 0x30, 0x54, 0xe4, 0x93, 0xc3, 0x9c, 0x50, 0x05, 0xe4, 0x90, 0xf8,
-0x04, 0xf0, 0x78, 0x08, 0x74, 0x80, 0x44, 0x7f, 0xf6, 0x74, 0x01, 0x44, 0x10, 0xf5, 0x89, 0x75,
-0xb8, 0x00, 0xd2, 0xab, 0xd2, 0xa9, 0x22, 0x75, 0x81, 0x8b, 0xd2, 0x8e, 0xd2, 0x8c, 0xd2, 0xaf,
-0xe5, 0x43, 0x60, 0x36, 0xff, 0x90, 0xf9, 0x1b, 0xe0, 0x54, 0x80, 0x60, 0x28, 0x78, 0x08, 0x79,
-0x08, 0xe0, 0x54, 0x7f, 0xfa, 0x7b, 0x00, 0xe6, 0x54, 0x7f, 0xb5, 0x02, 0x02, 0x7b, 0xff, 0x08,
-0xd9, 0xf5, 0xeb, 0x70, 0x10, 0xea, 0xf0, 0xc0, 0x07, 0x12, 0x12, 0x41, 0xad, 0x07, 0xaf, 0x02,
-0x12, 0x12, 0x58, 0xd0, 0x07, 0xa3, 0xa3, 0xa3, 0xdf, 0xce, 0x12, 0x11, 0x66, 0x80, 0xc1, 0x8f,
-0x24, 0x12, 0x2a, 0x06, 0x78, 0x80, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0x8e, 0x83, 0x24, 0x08, 0x12,
-0x21, 0xf3, 0xe0, 0xfd, 0x12, 0x22, 0x8a, 0x8a, 0x83, 0x24, 0x0a, 0x12, 0x21, 0xf3, 0xed, 0xf0,
-0x12, 0x22, 0x56, 0x24, 0x07, 0x12, 0x21, 0xf3, 0xe0, 0xff, 0x12, 0x22, 0x99, 0x24, 0x09, 0x12,
-0x21, 0xf3, 0xef, 0xf0, 0x90, 0xf9, 0x15, 0xe0, 0x30, 0xe4, 0x20, 0x08, 0x12, 0x22, 0x09, 0xc0,
-0x83, 0xc0, 0x82, 0xa3, 0xe0, 0x25, 0xe0, 0xff, 0x05, 0x82, 0xd5, 0x82, 0x02, 0x15, 0x83, 0x15,
-0x82, 0xe0, 0x33, 0xd0, 0x82, 0xd0, 0x83, 0xf0, 0xa3, 0xef, 0xf0, 0x78, 0x80, 0x12, 0x22, 0x09,
-0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0xec, 0xff, 0x12, 0x22, 0x8a, 0x8a, 0x83, 0x24, 0x08, 0x12, 0x21,
-0xf3, 0xef, 0xf0, 0xed, 0x12, 0x22, 0x99, 0x24, 0x07, 0x12, 0x21, 0xf3, 0xed, 0xf0, 0x12, 0x21,
-0xfb, 0xe0, 0xff, 0x30, 0xe7, 0x19, 0x12, 0x22, 0x6e, 0x12, 0x21, 0xf3, 0xe0, 0x60, 0x09, 0x12,
-0x21, 0xfb, 0xef, 0x44, 0x02, 0xf0, 0x80, 0x07, 0x12, 0x21, 0xfb, 0xef, 0x54, 0xfd, 0xf0, 0x78,
-0x7e, 0x12, 0x22, 0x09, 0xa3, 0xa3, 0xe0, 0xff, 0x53, 0x07, 0xc7, 0x08, 0xe6, 0xfc, 0x08, 0xe6,
-0xfd, 0x12, 0x22, 0x43, 0xa3, 0xe0, 0x30, 0xe3, 0x12, 0x8d, 0x82, 0x8c, 0x83, 0xe5, 0x82, 0x24,
-0x05, 0x12, 0x21, 0xf3, 0xe0, 0x90, 0x31, 0x94, 0x93, 0x42, 0x07, 0x53, 0x07, 0xfb, 0x78, 0x80,
-0xe6, 0xfc, 0x08, 0xe6, 0x8c, 0x83, 0x24, 0x06, 0x12, 0x21, 0xf3, 0xe0, 0x60, 0x03, 0x43, 0x07,
-0x04, 0x53, 0x07, 0xfc, 0x78, 0x80, 0x12, 0x22, 0x7a, 0x24, 0x04, 0x12, 0x21, 0xf3, 0xe0, 0x42,
-0x07, 0x43, 0x07, 0x80, 0x12, 0x22, 0x8a, 0xf5, 0x82, 0x8a, 0x83, 0xa3, 0xa3, 0xef, 0xf0, 0x12,
-0x22, 0x99, 0x24, 0x04, 0x12, 0x21, 0xf3, 0xe0, 0xff, 0x8d, 0x82, 0x8c, 0x83, 0xa3, 0xa3, 0xe0,
-0xfc, 0xa3, 0xe0, 0xfd, 0x30, 0xe1, 0x05, 0x53, 0x07, 0xdf, 0x80, 0x03, 0x43, 0x07, 0x20, 0xec,
-0x30, 0xe4, 0x05, 0x53, 0x07, 0xef, 0x80, 0x03, 0x43, 0x07, 0x10, 0x12, 0x21, 0xfb, 0xe0, 0xfe,
-0x54, 0x03, 0x60, 0x73, 0x53, 0x07, 0xdf, 0xee, 0x30, 0xe1, 0x69, 0x78, 0x80, 0x12, 0x22, 0x6f,
-0x12, 0x21, 0xf3, 0xe0, 0x12, 0x1b, 0x4c, 0x14, 0xa6, 0x00, 0x14, 0xda, 0x01, 0x14, 0xdf, 0x03,
-0x14, 0xda, 0x05, 0x14, 0xdf, 0x07, 0x14, 0xda, 0x09, 0x14, 0xdf, 0x0b, 0x14, 0xda, 0x0d, 0x14,
-0xdf, 0x0f, 0x00, 0x00, 0x14, 0xe7, 0xe5, 0x24, 0x64, 0x03, 0x70, 0x21, 0x90, 0xf9, 0x15, 0xe0,
-0x30, 0xe2, 0x0d, 0x30, 0xb4, 0x05, 0x43, 0x07, 0x02, 0x80, 0x2c, 0x53, 0x07, 0xfd, 0x80, 0x27,
-0x30, 0x95, 0x05, 0x43, 0x07, 0x02, 0x80, 0x1f, 0x53, 0x07, 0xfd, 0x80, 0x1a, 0x30, 0x93, 0x05,
-0x43, 0x07, 0x02, 0x80, 0x12, 0x53, 0x07, 0xfd, 0x80, 0x0d, 0x43, 0x07, 0x02, 0x80, 0x08, 0x53,
-0x07, 0xfd, 0x80, 0x03, 0x53, 0x07, 0xfd, 0x12, 0x22, 0x78, 0x24, 0x04, 0x12, 0x21, 0xf3, 0xef,
-0xf0, 0x8d, 0x82, 0x8c, 0x83, 0xa3, 0xa3, 0xa3, 0xe0, 0xff, 0x12, 0x21, 0xfb, 0xe0, 0xfe, 0x54,
-0x03, 0x70, 0x03, 0x02, 0x15, 0xd7, 0xee, 0x20, 0xe1, 0x03, 0x02, 0x15, 0xd4, 0x12, 0x22, 0x6e,
-0x12, 0x21, 0xf3, 0xe0, 0x12, 0x1b, 0x4c, 0x15, 0x36, 0x00, 0x15, 0x6c, 0x01, 0x15, 0x6c, 0x03,
-0x15, 0xa0, 0x05, 0x15, 0xa0, 0x07, 0x15, 0x86, 0x09, 0x15, 0x86, 0x0b, 0x15, 0xba, 0x0d, 0x15,
-0xba, 0x0f, 0x00, 0x00, 0x15, 0xd7, 0xe5, 0x24, 0x64, 0x03, 0x70, 0x23, 0x90, 0xf9, 0x15, 0xe0,
-0x30, 0xe2, 0x0f, 0x30, 0xb1, 0x06, 0x53, 0x07, 0x7f, 0x02, 0x15, 0xd7, 0x43, 0x07, 0x80, 0x02,
-0x15, 0xd7, 0x30, 0x94, 0x05, 0x53, 0x07, 0x7f, 0x80, 0x7d, 0x43, 0x07, 0x80, 0x80, 0x78, 0x30,
-0x92, 0x05, 0x53, 0x07, 0x7f, 0x80, 0x70, 0x43, 0x07, 0x80, 0x80, 0x6b, 0xe5, 0x24, 0xb4, 0x03,
-0x09, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xef, 0xf0, 0x80, 0x07, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xdf,
-0xf0, 0x53, 0x07, 0x7f, 0x80, 0x51, 0xe5, 0x24, 0xb4, 0x03, 0x09, 0x90, 0xff, 0x9e, 0xe0, 0x44,
-0x10, 0xf0, 0x80, 0x07, 0x90, 0xff, 0x9e, 0xe0, 0x44, 0x20, 0xf0, 0x53, 0x07, 0x7f, 0x80, 0x37,
-0xe5, 0x24, 0xb4, 0x03, 0x09, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xef, 0xf0, 0x80, 0x07, 0x90, 0xff,
-0x9e, 0xe0, 0x54, 0xdf, 0xf0, 0x43, 0x07, 0x80, 0x80, 0x1d, 0xe5, 0x24, 0xb4, 0x03, 0x09, 0x90,
-0xff, 0x9e, 0xe0, 0x44, 0x10, 0xf0, 0x80, 0x07, 0x90, 0xff, 0x9e, 0xe0, 0x44, 0x20, 0xf0, 0x43,
-0x07, 0x80, 0x80, 0x03, 0x53, 0x07, 0x7f, 0x78, 0x80, 0x12, 0x22, 0x3f, 0xe0, 0xfc, 0xa3, 0xe0,
-0xfd, 0x30, 0xe0, 0x05, 0x43, 0x07, 0x20, 0x80, 0x03, 0x53, 0x07, 0xdf, 0xec, 0x30, 0xe3, 0x05,
-0x43, 0x07, 0x40, 0x80, 0x03, 0x53, 0x07, 0xbf, 0xec, 0x30, 0xe0, 0x05, 0x43, 0x07, 0x10, 0x80,
-0x03, 0x53, 0x07, 0xef, 0xed, 0x30, 0xe4, 0x05, 0x43, 0x07, 0x08, 0x80, 0x03, 0x53, 0x07, 0xf7,
-0xed, 0x30, 0xe5, 0x05, 0x43, 0x07, 0x04, 0x80, 0x03, 0x53, 0x07, 0xfb, 0xed, 0x30, 0xe6, 0x05,
-0x43, 0x07, 0x01, 0x80, 0x03, 0x53, 0x07, 0xfe, 0xed, 0x30, 0xe7, 0x05, 0x43, 0x07, 0x02, 0x80,
-0x03, 0x53, 0x07, 0xfd, 0x78, 0x7e, 0x12, 0x22, 0x3f, 0xa3, 0xef, 0xf0, 0x12, 0x31, 0xc7, 0x7f,
-0x00, 0x22, 0x90, 0xff, 0xfa, 0x74, 0x08, 0xf0, 0xa3, 0x74, 0x16, 0xf0, 0x90, 0xff, 0xf9, 0x74,
-0x02, 0xf0, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xcc, 0xe4, 0xfd, 0x12, 0x22, 0xa0, 0x90, 0xfa, 0xcc,
-0xe4, 0x75, 0xf0, 0x03, 0x12, 0x1a, 0x6c, 0x12, 0x18, 0xe2, 0xe5, 0x23, 0x30, 0xe7, 0x02, 0xd2,
-0x02, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x24, 0x90, 0xfa, 0xcc, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0, 0xf5,
-0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x90, 0xfa, 0xcc, 0xe4, 0xf0, 0xa3, 0x74, 0x0b, 0xf0, 0x7b,
-0x00, 0x7a, 0x00, 0x79, 0x23, 0x75, 0x2d, 0x00, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0xe5,
-0x23, 0x24, 0x80, 0x90, 0xff, 0xf8, 0xf0, 0xe5, 0x23, 0x64, 0x07, 0x60, 0x1e, 0xe5, 0x23, 0x64,
-0x06, 0x60, 0x18, 0xe5, 0x23, 0x64, 0x14, 0x60, 0x12, 0xe5, 0x23, 0x64, 0x41, 0x60, 0x0c, 0xe5,
-0x23, 0x64, 0x1a, 0x70, 0x46, 0xe5, 0x24, 0x64, 0x02, 0x70, 0x40, 0xe5, 0x23, 0xb4, 0x07, 0x16,
-0xd2, 0x94, 0xd2, 0x95, 0xd2, 0x92, 0xd2, 0x93, 0x90, 0xf9, 0x15, 0xe0, 0x44, 0x02, 0xf0, 0xa3,
-0xe0, 0x44, 0x02, 0xf0, 0x80, 0x1e, 0xe5, 0x23, 0xb4, 0x41, 0x12, 0x90, 0xf9, 0x15, 0xe0, 0x44,
-0x06, 0xf0, 0xa3, 0xe0, 0x44, 0x06, 0xf0, 0xd2, 0xb1, 0xd2, 0xb4, 0x80, 0x07, 0x90, 0xf9, 0x15,
-0xe0, 0x44, 0x01, 0xf0, 0x90, 0xf9, 0x16, 0xe0, 0x44, 0x01, 0xf0, 0xe5, 0x23, 0x64, 0x42, 0x60,
-0x05, 0xe5, 0x23, 0xb4, 0x43, 0x0c, 0x90, 0xf9, 0x15, 0xe0, 0x44, 0x80, 0xf0, 0xa3, 0xe0, 0x44,
-0x80, 0xf0, 0x90, 0xfa, 0xcc, 0xe4, 0xf0, 0xa3, 0x74, 0x0d, 0xf0, 0x12, 0x18, 0xe2, 0x90, 0xff,
-0xf5, 0xe5, 0x23, 0xf0, 0xe4, 0xf5, 0x35, 0xf5, 0x33, 0xf5, 0x34, 0xf5, 0x32, 0x12, 0x1d, 0x84,
-0x12, 0x1c, 0x30, 0x12, 0x1d, 0x8b, 0x90, 0xf9, 0x67, 0x12, 0x1b, 0x43, 0x90, 0xf9, 0x6c, 0x12,
-0x1b, 0x43, 0x90, 0xff, 0xff, 0xe4, 0xf0, 0x90, 0xff, 0x83, 0xe0, 0xe4, 0xf0, 0x90, 0xff, 0x81,
+0xc0, 0x06, 0xc0, 0x07, 0x74, 0x15, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0, 0x60, 0x23, 0x74,
+0x66, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0, 0x14, 0xf0, 0x70, 0x16, 0x74, 0xff, 0xf0, 0x74,
+0x1c, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0, 0x60, 0x04, 0x14, 0xf0, 0x70, 0x04, 0xc2, 0x90,
+0x80, 0xfc, 0x90, 0xff, 0x93, 0x74, 0x81, 0xf0, 0xe5, 0x81, 0x94, 0xfd, 0x40, 0x03, 0x02, 0x11,
+0xdc, 0x85, 0x41, 0x8d, 0x85, 0x42, 0x8b, 0x74, 0xb2, 0xf5, 0x82, 0x74, 0xfa, 0xf5, 0x83, 0xe0,
+0xb4, 0x01, 0x1b, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x4a, 0xe0, 0x30, 0xe7, 0x2c, 0x90, 0xff,
+0x4e, 0xe0, 0x30, 0xe7, 0x25, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x02, 0xf0, 0x80, 0x20, 0xb4, 0x02,
+0x1d, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x7a, 0xe0, 0x30, 0xe7, 0x05, 0x12, 0x28, 0x4e, 0x80,
+0x09, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x03, 0xf0, 0x80, 0x04, 0xd0, 0x83, 0xd0, 0x82, 0xa3, 0xe0,
+0xb4, 0x01, 0x1b, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x52, 0xe0, 0x30, 0xe7, 0x2c, 0x90, 0xff,
+0x56, 0xe0, 0x30, 0xe7, 0x25, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x02, 0xf0, 0x80, 0x25, 0xb4, 0x02,
+0x22, 0xc0, 0x82, 0xc0, 0x83, 0x90, 0xff, 0x7a, 0xe0, 0x30, 0xe7, 0x05, 0x12, 0x28, 0x4e, 0x80,
+0x09, 0xd0, 0x83, 0xd0, 0x82, 0x74, 0x03, 0xf0, 0x80, 0x09, 0xd0, 0x83, 0xd0, 0x82, 0x80, 0x03,
+0x02, 0x02, 0x90, 0x74, 0x16, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0, 0x20, 0x04, 0xf1, 0x20,
+0x02, 0x03, 0x30, 0x01, 0xeb, 0x74, 0x19, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xe0, 0x14, 0xfc,
+0xf0, 0xa3, 0xe0, 0xfd, 0xa3, 0xe0, 0xfe, 0x64, 0x04, 0x70, 0x0f, 0xec, 0x70, 0x62, 0x7e, 0x01,
+0x12, 0x00, 0xc9, 0x7c, 0x0a, 0x7d, 0xfa, 0x02, 0x02, 0x61, 0x12, 0x00, 0xc9, 0xee, 0x64, 0x04,
+0x60, 0x1d, 0xec, 0x70, 0x4b, 0x7c, 0x0a, 0xed, 0x14, 0xfd, 0x70, 0x15, 0xee, 0x64, 0x02, 0x60,
+0x07, 0x7e, 0x02, 0x7d, 0x32, 0x02, 0x02, 0x61, 0x7e, 0x01, 0x7d, 0xfa, 0x02, 0x02, 0x61, 0x7c,
+0x0a, 0x74, 0x19, 0xf5, 0x82, 0x74, 0xf9, 0xf5, 0x83, 0xec, 0xf0, 0xa3, 0xed, 0xf0, 0xa3, 0xee,
+0xf0, 0x14, 0x60, 0x18, 0x20, 0xe1, 0x0f, 0x20, 0x01, 0x06, 0xd2, 0xb1, 0xc2, 0xb0, 0x80, 0x10,
+0xc2, 0xb1, 0xd2, 0xb0, 0x80, 0x0a, 0xc2, 0xb1, 0xc2, 0xb0, 0x80, 0x04, 0xd2, 0xb0, 0xd2, 0xb1,
+0x78, 0x19, 0x79, 0x09, 0x7a, 0x07, 0xe7, 0x70, 0x04, 0xa6, 0x00, 0x80, 0x0b, 0xe6, 0x60, 0x08,
+0x16, 0xe6, 0x70, 0x04, 0xe7, 0x44, 0x80, 0xf7, 0x08, 0x09, 0xda, 0xea, 0xe5, 0x3d, 0x60, 0x13,
+0x14, 0xf5, 0x3d, 0x70, 0x0e, 0xe5, 0x3e, 0x24, 0x08, 0xf8, 0x76, 0x00, 0x12, 0x11, 0x57, 0xd2,
+0x8c, 0xd2, 0x8d, 0xd0, 0x07, 0xd0, 0x06, 0xd0, 0x05, 0xd0, 0x04, 0xd0, 0x03, 0xd0, 0x02, 0xd0,
+0x01, 0xd0, 0x00, 0xd0, 0x83, 0xd0, 0x82, 0xd0, 0xf0, 0xd0, 0xd0, 0xd0, 0xe0, 0x32, 0x90, 0xff,
+0x04, 0xe0, 0x90, 0xfa, 0xb9, 0xf0, 0x90, 0xff, 0x06, 0xe0, 0xfc, 0xa3, 0xe0, 0xfa, 0xec, 0xff,
+0xea, 0xfe, 0xef, 0xc3, 0x94, 0x08, 0xee, 0x94, 0x01, 0x50, 0x02, 0x80, 0x04, 0x7e, 0x01, 0x7f,
+0x08, 0x8e, 0x3b, 0x8f, 0x3c, 0x90, 0xff, 0x02, 0xe0, 0xfc, 0xa3, 0xe0, 0xfa, 0xec, 0xff, 0xea,
+0x90, 0xfa, 0xbd, 0xf0, 0xef, 0xa3, 0xf0, 0x12, 0x1c, 0xe0, 0xe4, 0xf5, 0x4d, 0xe5, 0x4d, 0xc3,
+0x94, 0x02, 0x50, 0x0f, 0x12, 0x1c, 0xc1, 0xe4, 0x12, 0x1a, 0xe8, 0x05, 0x4d, 0x04, 0x12, 0x1c,
+0xb2, 0x80, 0xea, 0x12, 0x1c, 0xe0, 0x90, 0xff, 0x00, 0xe0, 0xff, 0x54, 0x60, 0x24, 0xc0, 0x70,
+0x03, 0x02, 0x08, 0xf3, 0x24, 0x40, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0xfe,
+0x54, 0x0f, 0xf5, 0x4d, 0xee, 0x30, 0xe7, 0x03, 0xd3, 0x80, 0x01, 0xc3, 0x92, 0x0a, 0x90, 0xff,
+0x01, 0xe0, 0x12, 0x1b, 0xfc, 0x03, 0x84, 0x00, 0x04, 0x57, 0x01, 0x05, 0x6a, 0x03, 0x06, 0x31,
+0x05, 0x06, 0x73, 0x06, 0x07, 0xd5, 0x08, 0x08, 0x1d, 0x09, 0x08, 0x79, 0x0a, 0x08, 0xb9, 0x0b,
+0x00, 0x00, 0x0f, 0x6e, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbd, 0xe0,
+0x70, 0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x3c, 0x64, 0x02, 0x45, 0x3b, 0x60,
+0x03, 0x02, 0x0f, 0x6e, 0xef, 0x54, 0x1f, 0x14, 0x60, 0x2b, 0x14, 0x60, 0x47, 0x24, 0x02, 0x60,
+0x03, 0x02, 0x0f, 0x6e, 0xee, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1c, 0xc1, 0x74, 0x01, 0x12,
+0x1a, 0xe8, 0x78, 0x67, 0xe6, 0x30, 0xe0, 0x08, 0x12, 0x1c, 0xc1, 0x74, 0x02, 0x12, 0x1a, 0xe8,
+0x7f, 0x02, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x20, 0xe1, 0x09, 0x90, 0xfa, 0xb9, 0xe0, 0x60, 0x03,
+0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0xd3, 0x94, 0x01, 0x40, 0x03, 0x02, 0x0f, 0x6e, 0x7f,
+0x02, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x20, 0xe1, 0x0e, 0x90, 0xfa, 0xb9, 0xe0, 0xff, 0x60, 0x07,
+0x64, 0x80, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x0f, 0xfa, 0x40, 0x03, 0x02, 0x0f, 0x6e, 0xe5,
+0x4d, 0x70, 0x19, 0x30, 0x0a, 0x0b, 0x90, 0xff, 0x80, 0x12, 0x1c, 0xbe, 0x12, 0x1a, 0xe8, 0x80,
+0x24, 0x90, 0xff, 0x82, 0x12, 0x1c, 0xbe, 0x12, 0x1a, 0xe8, 0x80, 0x19, 0x15, 0x4d, 0x30, 0x0a,
+0x0b, 0x12, 0x1d, 0x55, 0x12, 0x1c, 0xbc, 0x12, 0x1a, 0xe8, 0x80, 0x09, 0x12, 0x1d, 0x63, 0x12,
+0x1c, 0xbc, 0x12, 0x1a, 0xe8, 0x12, 0x1c, 0xc1, 0x12, 0x1a, 0xa2, 0x60, 0x05, 0x74, 0x01, 0x12,
+0x1a, 0xe8, 0x7f, 0x02, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x6e, 0xe5,
+0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x14, 0x60, 0x2d, 0x14, 0x60,
+0x59, 0x24, 0x02, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbd, 0xe0, 0x70, 0x04, 0xa3, 0xe0,
+0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e,
+0x78, 0x67, 0xe6, 0x54, 0xfe, 0xf6, 0xe4, 0xff, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x20, 0xe1, 0x06,
+0x20, 0xe0, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x30, 0xe0, 0x09, 0x90, 0xfa, 0xb9, 0xe0, 0x60,
+0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x30, 0xe1, 0x0c, 0x90, 0xfa, 0xb9, 0xe0, 0xd3, 0x94, 0x01,
+0x40, 0x03, 0x02, 0x0f, 0x6e, 0xe4, 0xff, 0x02, 0x32, 0x6e, 0x90, 0xfa, 0xbd, 0xe0, 0x70, 0x02,
+0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x0f, 0xfa, 0x40, 0x03, 0x02, 0x0f, 0x6e, 0xe5,
+0x35, 0x20, 0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x30, 0xe0, 0x07, 0xe5,
+0x4d, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x4d, 0x70, 0x0f, 0x90, 0xff, 0x82, 0xe0, 0x54, 0xf7,
+0xf0, 0x90, 0xff, 0x80, 0xe0, 0x54, 0xf7, 0xf0, 0x22, 0xe5, 0x4d, 0x24, 0xfe, 0x60, 0x20, 0x24,
+0xfb, 0x60, 0x34, 0x24, 0x06, 0x70, 0x35, 0x30, 0x0a, 0x0c, 0xa2, 0x0a, 0xe4, 0x33, 0xfd, 0x7f,
+0x03, 0x12, 0x2e, 0x79, 0x80, 0x26, 0xe4, 0xfd, 0x7f, 0x03, 0x12, 0x2e, 0x79, 0x80, 0x1d, 0x30,
+0x0a, 0x0c, 0xa2, 0x0a, 0xe4, 0x33, 0xfd, 0x7f, 0x04, 0x12, 0x2e, 0x79, 0x80, 0x0e, 0xe4, 0xfd,
+0x7f, 0x04, 0x12, 0x2e, 0x79, 0x80, 0x05, 0x7f, 0x87, 0x12, 0x31, 0xef, 0x15, 0x4d, 0x30, 0x0a,
+0x0b, 0x12, 0x1d, 0x55, 0xf5, 0x83, 0xe0, 0x54, 0xf7, 0xf0, 0x80, 0x09, 0x12, 0x1d, 0x63, 0xf5,
+0x83, 0xe0, 0x54, 0xf7, 0xf0, 0xe4, 0xff, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02,
+0x0f, 0x6e, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x14, 0x60,
+0x2d, 0x14, 0x60, 0x55, 0x24, 0x02, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbd, 0xe0, 0x70,
+0x04, 0xa3, 0xe0, 0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0x60, 0x03,
+0x02, 0x0f, 0x6e, 0x78, 0x67, 0xe6, 0x44, 0x01, 0xf6, 0xe4, 0xff, 0x02, 0x32, 0x6e, 0xe5, 0x35,
+0x20, 0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x30, 0xe0, 0x07, 0xe5, 0x4d,
+0x60, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x30, 0xe1, 0x0a, 0xe5, 0x4d, 0xd3, 0x94, 0x01, 0x40,
+0x03, 0x02, 0x0f, 0x6e, 0xe4, 0xff, 0x02, 0x32, 0x6e, 0x90, 0xfa, 0xbd, 0xe0, 0x70, 0x02, 0xa3,
+0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0xff, 0x12, 0x32, 0x3f, 0x40, 0x03,
+0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x20, 0xe1, 0x06, 0x20, 0xe0, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x4d,
+0x70, 0x09, 0x30, 0x0a, 0x03, 0x02, 0x1e, 0x14, 0x02, 0x1d, 0xdf, 0xe5, 0x35, 0x20, 0xe1, 0x03,
+0x02, 0x0f, 0x6e, 0x15, 0x4d, 0x30, 0x0a, 0x0b, 0x12, 0x1d, 0x55, 0xf5, 0x83, 0xe0, 0x44, 0x08,
+0xf0, 0x80, 0x09, 0x12, 0x1d, 0x63, 0xf5, 0x83, 0xe0, 0x44, 0x08, 0xf0, 0xe4, 0xff, 0x02, 0x32,
+0x6e, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02,
+0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x60, 0x03,
+0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x30, 0xe1, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbe, 0xe0, 0x90,
+0xff, 0xff, 0xf0, 0xe0, 0x60, 0x05, 0x43, 0x35, 0x01, 0x80, 0x03, 0x53, 0x35, 0xfe, 0xe4, 0xff,
+0x02, 0x32, 0x6e, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x3c, 0x45, 0x3b, 0x70,
+0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbd, 0xe0,
+0xfc, 0xa3, 0xe0, 0xfd, 0xec, 0x24, 0xfe, 0x60, 0x3a, 0x14, 0x60, 0x75, 0x24, 0x02, 0x60, 0x03,
+0x02, 0x0f, 0x6e, 0xed, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1c, 0xe0, 0x12, 0x1e, 0x0d, 0x7d,
+0x03, 0x12, 0x0f, 0xb5, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x0f, 0x72, 0x90, 0xfa, 0xb6, 0xe0,
+0xfd, 0xa3, 0x12, 0x1d, 0x2b, 0x12, 0x0f, 0xd1, 0x50, 0x02, 0x80, 0x04, 0xae, 0x3b, 0xaf, 0x3c,
+0x02, 0x10, 0x02, 0x12, 0x1c, 0xe0, 0x90, 0xf9, 0x16, 0xe0, 0x30, 0xe4, 0x0d, 0x12, 0x1e, 0x0d,
+0x7d, 0x14, 0x12, 0x0f, 0xb5, 0x60, 0x10, 0x02, 0x0f, 0x6e, 0x12, 0x1e, 0x0d, 0x7d, 0x04, 0x12,
+0x10, 0x09, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x0f, 0x72, 0x90, 0xfa, 0xb6, 0xe0, 0xfd, 0xa3,
+0x12, 0x1d, 0x2b, 0x12, 0x0f, 0xd1, 0x50, 0x02, 0x80, 0x04, 0xae, 0x3b, 0xaf, 0x3c, 0x02, 0x10,
+0x02, 0x12, 0x1e, 0x0d, 0x7d, 0x05, 0x12, 0x10, 0x09, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x7b, 0x01,
+0x7a, 0xfa, 0x79, 0xb6, 0x12, 0x1d, 0x28, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x90, 0xfa, 0xb7, 0xe4,
+0x75, 0xf0, 0x03, 0x12, 0x1b, 0x1c, 0x90, 0xfa, 0xbe, 0xe0, 0x90, 0xfa, 0xb5, 0xf0, 0xe4, 0xf5,
+0x4c, 0x90, 0xfa, 0xb5, 0xe0, 0xff, 0xe5, 0x4c, 0xc3, 0x9f, 0x50, 0x24, 0x12, 0x1d, 0x22, 0x12,
+0x10, 0x14, 0xff, 0xfd, 0x90, 0xfa, 0xb7, 0xe4, 0x8d, 0xf0, 0x12, 0x1b, 0x1c, 0x90, 0xfa, 0xb6,
+0xe0, 0xc3, 0x9f, 0xf0, 0xd3, 0x94, 0x00, 0x50, 0x03, 0x02, 0x0f, 0x6e, 0x05, 0x4c, 0x80, 0xd1,
+0x12, 0x1d, 0x22, 0x12, 0x10, 0x14, 0x24, 0xfe, 0xff, 0x90, 0xfa, 0xb6, 0xf0, 0xfd, 0xa3, 0xe4,
+0x75, 0xf0, 0x02, 0x12, 0x1b, 0x1c, 0x7a, 0xf9, 0x79, 0x72, 0x7b, 0x01, 0x8b, 0x36, 0x8a, 0x37,
+0x89, 0x38, 0xe9, 0x24, 0x02, 0xf9, 0xe4, 0x3a, 0xfa, 0x12, 0x1d, 0x28, 0x12, 0x26, 0x98, 0x8f,
+0x4c, 0x05, 0x4c, 0x05, 0x4c, 0x12, 0x1c, 0xc1, 0xe5, 0x4c, 0x12, 0x1a, 0xe8, 0x12, 0x1c, 0xc1,
+0x90, 0x00, 0x01, 0x74, 0x03, 0x12, 0x1a, 0xfa, 0xaf, 0x4c, 0x7e, 0x00, 0xc3, 0xef, 0x95, 0x3c,
+0xee, 0x95, 0x3b, 0x50, 0x02, 0x80, 0x04, 0xae, 0x3b, 0xaf, 0x3c, 0x8e, 0x39, 0x8f, 0x3a, 0x02,
+0x2c, 0xd8, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x3c, 0x64,
+0x01, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0, 0x60, 0x03, 0x02, 0x0f,
+0x6e, 0x90, 0xfa, 0xbd, 0xe0, 0x70, 0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d,
+0x79, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x20, 0xe0, 0x06, 0x20, 0xe1, 0x03, 0x02, 0x0f,
+0x6e, 0x75, 0x36, 0x00, 0x75, 0x37, 0x00, 0x75, 0x38, 0x32, 0x02, 0x0f, 0xf1, 0xe5, 0x35, 0x30,
+0xe7, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa,
+0xb9, 0xe0, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xd3, 0x90, 0xfa, 0xbe, 0xe0, 0x94, 0x01, 0x90, 0xfa,
+0xbd, 0xe0, 0x94, 0x00, 0x40, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x60, 0x03, 0x02, 0x0f,
+0x6e, 0xe5, 0x35, 0x20, 0xe0, 0x06, 0x20, 0xe1, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbe, 0xe0,
+0xf5, 0x32, 0xe5, 0x32, 0x70, 0x08, 0x43, 0x35, 0x01, 0x53, 0x35, 0xfd, 0x80, 0x06, 0x53, 0x35,
+0xfe, 0x43, 0x35, 0x02, 0xe4, 0xff, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x20, 0xe7, 0x03, 0x02, 0x0f,
+0x6e, 0xe5, 0x3c, 0x64, 0x01, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xb9, 0xe0,
+0x60, 0x03, 0x02, 0x0f, 0x6e, 0x90, 0xfa, 0xbd, 0xe0, 0x70, 0x02, 0xa3, 0xe0, 0x60, 0x03, 0x02,
+0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x64, 0x01, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x20, 0xe1,
+0x03, 0x02, 0x0f, 0x6e, 0x7f, 0x01, 0x02, 0x32, 0x6e, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f,
+0x6e, 0xe5, 0x3c, 0x45, 0x3b, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xd3, 0x90, 0xfa, 0xbe, 0xe0, 0x94,
+0x00, 0x90, 0xfa, 0xbd, 0xe0, 0x94, 0x00, 0x40, 0x03, 0x02, 0x0f, 0x6e, 0x12, 0x1d, 0x79, 0x64,
+0x01, 0x60, 0x03, 0x02, 0x0f, 0x6e, 0xe5, 0x35, 0x20, 0xe1, 0x03, 0x02, 0x0f, 0x6e, 0xe4, 0xff,
+0x02, 0x32, 0x6e, 0x90, 0xff, 0x01, 0x12, 0x1e, 0x24, 0xef, 0x12, 0x1a, 0xe8, 0x90, 0xfa, 0xb9,
+0x12, 0x1e, 0x24, 0x90, 0x00, 0x01, 0xef, 0x12, 0x1a, 0xfa, 0x90, 0x00, 0x02, 0xe4, 0x12, 0x1a,
+0xfa, 0x74, 0x03, 0x12, 0x1c, 0xb2, 0x90, 0xfa, 0xbd, 0xe0, 0xff, 0xa3, 0xe0, 0x85, 0x38, 0x82,
+0x85, 0x37, 0x83, 0xcf, 0xf0, 0xa3, 0xef, 0xf0, 0x90, 0xff, 0x01, 0xe0, 0x12, 0x1b, 0xfc, 0x09,
+0x7b, 0x02, 0x09, 0x9d, 0x04, 0x09, 0xbf, 0x05, 0x09, 0xeb, 0x06, 0x0a, 0x09, 0x07, 0x0a, 0x27,
+0x08, 0x0a, 0x45, 0x09, 0x0a, 0x63, 0x0b, 0x0b, 0x18, 0x80, 0x0d, 0xb7, 0x81, 0x0d, 0xe8, 0x82,
+0x0b, 0x5f, 0x83, 0x0b, 0xa8, 0x84, 0x0b, 0xc7, 0x85, 0x0c, 0x0c, 0x86, 0x0c, 0x57, 0x87, 0x0c,
+0xe8, 0x88, 0x0d, 0x73, 0x89, 0x0a, 0x81, 0x92, 0x0a, 0x81, 0x93, 0x0d, 0xa0, 0xb0, 0x0e, 0x9b,
+0xc0, 0x0e, 0xc7, 0xc1, 0x0e, 0xd8, 0xc2, 0x00, 0x00, 0x0f, 0x5d, 0xe5, 0x35, 0x20, 0xe7, 0x05,
+0x7f, 0x05, 0x02, 0x31, 0xa9, 0x12, 0x1d, 0x71, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c,
+0x00, 0x7f, 0x07, 0x02, 0x11, 0x5e, 0xe4, 0xfd, 0x7f, 0x07, 0x02, 0x2f, 0xb4, 0xe5, 0x35, 0x20,
+0xe7, 0x05, 0x7f, 0x05, 0x02, 0x31, 0xa9, 0x12, 0x1d, 0x71, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef,
+0xfd, 0x7c, 0x00, 0x7f, 0x0c, 0x02, 0x11, 0x5e, 0xe4, 0xfd, 0x7f, 0x07, 0x02, 0x2f, 0xb4, 0xe5,
+0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x71, 0x12, 0x1e, 0x42, 0x50, 0x06, 0xe5, 0x3c, 0x45, 0x3b,
+0x70, 0x05, 0x7f, 0x02, 0x02, 0x31, 0xa9, 0x90, 0xfa, 0xb9, 0xe0, 0x24, 0xfe, 0x24, 0xfd, 0x50,
+0x02, 0x80, 0x03, 0x02, 0x32, 0x2c, 0x7f, 0x07, 0x02, 0x31, 0xa9, 0xe5, 0x35, 0x30, 0xe7, 0x03,
+0x02, 0x0f, 0x71, 0x12, 0x1d, 0x71, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f,
+0x08, 0x02, 0x11, 0x5e, 0x7f, 0x07, 0x02, 0x31, 0xa9, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f,
+0x71, 0x12, 0x1d, 0x71, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x09, 0x02,
+0x11, 0x5e, 0x7f, 0x07, 0x02, 0x31, 0xa9, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x71, 0x12,
+0x1d, 0x71, 0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0a, 0x02, 0x11, 0x5e,
+0x7f, 0x07, 0x02, 0x31, 0xa9, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x71, 0x12, 0x1d, 0x71,
+0x60, 0x03, 0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0b, 0x02, 0x11, 0x5e, 0x7f, 0x07,
+0x02, 0x31, 0xa9, 0xe5, 0x35, 0x30, 0xe7, 0x03, 0x02, 0x0f, 0x71, 0x12, 0x1d, 0x71, 0x60, 0x03,
+0x04, 0x70, 0x09, 0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0e, 0x02, 0x11, 0x5e, 0x7f, 0x07, 0x02, 0x31,
+0xa9, 0xe5, 0x35, 0x30, 0xe7, 0x56, 0x12, 0x1d, 0x79, 0x70, 0x4a, 0x90, 0xff, 0x02, 0xe0, 0xf5,
+0x4c, 0xe5, 0x4c, 0xb4, 0x82, 0x05, 0x75, 0x4c, 0x61, 0x80, 0x12, 0xe5, 0x4c, 0xb4, 0x83, 0x05,
+0x75, 0x4c, 0x62, 0x80, 0x08, 0xe5, 0x4c, 0xc4, 0x54, 0xf0, 0x04, 0xf5, 0x4c, 0x12, 0x1c, 0x22,
+0x12, 0x1e, 0x3b, 0x12, 0x25, 0xfa, 0x12, 0x1d, 0x89, 0x12, 0x1a, 0xbb, 0x60, 0x05, 0x12, 0x32,
+0x7a, 0x80, 0x06, 0x85, 0x33, 0x39, 0x85, 0x34, 0x3a, 0x75, 0x36, 0x01, 0x75, 0x37, 0xf9, 0x75,
+0x38, 0x75, 0x02, 0x2c, 0xd8, 0xe4, 0xfd, 0x7f, 0x05, 0x02, 0x2f, 0xb4, 0x12, 0x1d, 0x79, 0x60,
+0x05, 0x7f, 0x05, 0x02, 0x31, 0xa9, 0x12, 0x1e, 0x42, 0x40, 0x05, 0x7f, 0x03, 0x02, 0x31, 0xa9,
+0x90, 0xff, 0x02, 0xe0, 0xf5, 0x4c, 0xe5, 0x4c, 0xb4, 0x82, 0x05, 0x75, 0x4c, 0x61, 0x80, 0x12,
+0xe5, 0x4c, 0xb4, 0x83, 0x05, 0x75, 0x4c, 0x62, 0x80, 0x08, 0xe5, 0x4c, 0xc4, 0x54, 0xf0, 0x04,
+0xf5, 0x4c, 0x12, 0x1c, 0x22, 0x02, 0x32, 0x2c, 0x12, 0x1e, 0x4c, 0x12, 0x2a, 0xc7, 0x12, 0x1d,
+0x33, 0xe0, 0x54, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0xe0, 0x90, 0xfa, 0xba, 0xf0, 0x78, 0x68, 0x12,
+0x1b, 0xd8, 0x90, 0x00, 0x02, 0x12, 0x1a, 0xbb, 0x30, 0xe7, 0xf2, 0x90, 0x00, 0x02, 0xe4, 0x12,
+0x1a, 0xfa, 0x90, 0xfa, 0xba, 0xe0, 0x44, 0x80, 0xff, 0xf0, 0x78, 0x7c, 0xe6, 0xfc, 0x08, 0xe6,
+0x8c, 0x83, 0x12, 0x1d, 0x3b, 0xef, 0xf0, 0x12, 0x32, 0x84, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x90,
+0xfa, 0xb9, 0xe0, 0x64, 0x01, 0x70, 0x1f, 0x90, 0xfa, 0xbd, 0xe0, 0xff, 0x7e, 0x00, 0x70, 0x06,
+0xa3, 0xe0, 0xf5, 0x90, 0x80, 0x2d, 0xc2, 0xaf, 0xef, 0xf4, 0x52, 0x90, 0x90, 0xfa, 0xbe, 0xe0,
+0x42, 0x90, 0xd2, 0xaf, 0x80, 0x1d, 0x90, 0xfa, 0xbd, 0xe0, 0xff, 0x7e, 0x00, 0x70, 0x06, 0xa3,
+0xe0, 0xf5, 0xb0, 0x80, 0x0e, 0xc2, 0xaf, 0xef, 0xf4, 0x52, 0xb0, 0x90, 0xfa, 0xbe, 0xe0, 0x42,
+0xb0, 0xd2, 0xaf, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x12, 0x1c, 0xe0, 0x90, 0xfa, 0xb9, 0xe0, 0xb4,
+0x01, 0x0a, 0x12, 0x1c, 0xc1, 0xe5, 0x90, 0x12, 0x1a, 0xe8, 0x80, 0x08, 0x12, 0x1c, 0xc1, 0xe5,
+0xb0, 0x12, 0x1a, 0xe8, 0x02, 0x0f, 0xf1, 0x90, 0xfa, 0xb9, 0xe0, 0xff, 0x24, 0x13, 0x12, 0x1c,
+0xf1, 0x20, 0xe1, 0x33, 0x12, 0x1d, 0x80, 0xef, 0x24, 0xfc, 0x60, 0x18, 0x04, 0x70, 0x28, 0x90,
+0xfa, 0xba, 0xe0, 0x60, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x10, 0xf0, 0x80, 0x19, 0x12, 0x1e,
+0x56, 0xf0, 0x80, 0x13, 0x90, 0xfa, 0xba, 0xe0, 0x60, 0x09, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x10,
+0xf0, 0x80, 0x04, 0x12, 0x1e, 0x5d, 0xf0, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x90, 0xfa, 0xb9, 0xe0,
+0xff, 0x24, 0x13, 0x12, 0x1c, 0xf1, 0x20, 0xe1, 0x39, 0x12, 0x1d, 0x80, 0xef, 0x24, 0xfc, 0x60,
+0x1b, 0x04, 0x70, 0x2e, 0x90, 0xfa, 0xba, 0xe0, 0x60, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x20,
+0xf0, 0x80, 0x1f, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xdf, 0xf0, 0x80, 0x16, 0x90, 0xfa, 0xba, 0xe0,
+0x60, 0x09, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x20, 0xf0, 0x80, 0x07, 0x90, 0xff, 0xb4, 0xe0, 0x54,
+0xdf, 0xf0, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x12, 0x1d, 0x80, 0x12, 0x1d, 0x71, 0x60, 0x4d, 0x04,
+0x60, 0x03, 0x02, 0x0c, 0xe3, 0x90, 0xfa, 0xba, 0xe0, 0x60, 0x0f, 0x90, 0xff, 0xa4, 0x12, 0x1c,
+0xea, 0x30, 0xe1, 0x6f, 0x12, 0x1e, 0x2c, 0x02, 0x0c, 0xe3, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xfb,
+0x12, 0x1c, 0xed, 0xfe, 0x30, 0xe1, 0x5c, 0x30, 0xe2, 0x11, 0x30, 0xb4, 0x05, 0x12, 0x1e, 0x2c,
+0x80, 0x51, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xfd, 0xf0, 0x80, 0x48, 0x30, 0x95, 0x05, 0x12, 0x1e,
+0x2c, 0x80, 0x40, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xfd, 0xf0, 0x80, 0x37, 0x90, 0xfa, 0xba, 0xe0,
+0x60, 0x12, 0x90, 0xff, 0xb4, 0x12, 0x1c, 0xea, 0x30, 0xe1, 0x28, 0x90, 0xff, 0xb4, 0xe0, 0x44,
+0x02, 0xf0, 0x80, 0x1f, 0x90, 0xff, 0xb4, 0xe0, 0x54, 0xfb, 0x12, 0x1c, 0xed, 0x30, 0xe1, 0x13,
+0x30, 0x93, 0x09, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x02, 0xf0, 0x80, 0x07, 0x90, 0xff, 0xb4, 0xe0,
+0x54, 0xfd, 0xf0, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x12, 0x1d, 0x80, 0x90, 0xfa, 0xb9, 0xe0, 0x24,
+0xfc, 0x60, 0x40, 0x04, 0x70, 0x78, 0x90, 0xfa, 0xba, 0xe0, 0x60, 0x1d, 0x90, 0xff, 0xa2, 0xe0,
+0x44, 0x40, 0xf0, 0xa3, 0xe0, 0xff, 0x30, 0xe7, 0x65, 0xd2, 0x03, 0xa3, 0xe0, 0x54, 0xdf, 0xf0,
+0x90, 0xff, 0xa3, 0xef, 0x54, 0x7f, 0xf0, 0x80, 0x55, 0x30, 0x03, 0x0e, 0x90, 0xff, 0xa3, 0xe0,
+0x44, 0x80, 0xf0, 0xc2, 0x03, 0xa3, 0xe0, 0x44, 0x20, 0xf0, 0x90, 0xff, 0xa2, 0xe0, 0x54, 0xbf,
+0xf0, 0x80, 0x3b, 0x90, 0xfa, 0xba, 0xe0, 0x60, 0x1d, 0x90, 0xff, 0xb2, 0xe0, 0x44, 0x40, 0xf0,
+0xa3, 0xe0, 0xff, 0x30, 0xe7, 0x28, 0xd2, 0x04, 0xa3, 0xe0, 0x54, 0xdf, 0xf0, 0x90, 0xff, 0xb3,
+0xef, 0x54, 0x7f, 0xf0, 0x80, 0x18, 0x30, 0x04, 0x0e, 0x90, 0xff, 0xb3, 0xe0, 0x44, 0x80, 0xf0,
+0xc2, 0x04, 0xa3, 0xe0, 0x44, 0x20, 0xf0, 0x90, 0xff, 0xb2, 0xe0, 0x54, 0xbf, 0xf0, 0xe4, 0xff,
+0x02, 0x31, 0xa9, 0x12, 0x1c, 0xe0, 0x90, 0xfa, 0xb9, 0xe0, 0x24, 0xfc, 0x60, 0x0f, 0x04, 0x70,
+0x16, 0x90, 0xff, 0xa6, 0xe0, 0x12, 0x1c, 0xc1, 0x12, 0x1a, 0xe8, 0x80, 0x0a, 0x90, 0xff, 0xb6,
+0xe0, 0x12, 0x1c, 0xc1, 0x12, 0x1a, 0xe8, 0x75, 0x39, 0x00, 0x75, 0x3a, 0x01, 0x02, 0x2c, 0xd8,
+0x90, 0xf9, 0x15, 0x74, 0x01, 0xf0, 0x90, 0xf9, 0x1c, 0x74, 0x19, 0xf0, 0x90, 0xf9, 0x66, 0x74,
+0xff, 0xf0, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0xe4, 0xff, 0x12, 0x31, 0xa9, 0x12, 0x1d, 0xe7, 0x7f,
+0x03, 0x12, 0x12, 0x61, 0x90, 0xf9, 0x16, 0xe0, 0x30, 0xe4, 0x08, 0x90, 0xff, 0x93, 0x74, 0x80,
+0xf0, 0x80, 0x10, 0x90, 0xff, 0xfc, 0xe0, 0x54, 0x7f, 0xf0, 0x7f, 0xff, 0x7e, 0x00, 0x12, 0x30,
+0xd3, 0xc2, 0x90, 0xc2, 0xaf, 0x00, 0x80, 0xfd, 0xe4, 0xf5, 0x4e, 0xf5, 0x4f, 0x90, 0xfa, 0xbf,
+0x74, 0x3e, 0xf0, 0xa3, 0xe4, 0xf0, 0x90, 0xfa, 0xb7, 0xf0, 0xa3, 0x74, 0x15, 0xf0, 0xe0, 0x54,
+0x3f, 0xff, 0xc3, 0x74, 0x40, 0x9f, 0x90, 0xfa, 0xbc, 0xf0, 0xd3, 0x94, 0x00, 0xe4, 0x94, 0x3e,
+0x40, 0x08, 0x90, 0xfa, 0xc0, 0xe0, 0x90, 0xfa, 0xbc, 0xf0, 0x12, 0x0f, 0x98, 0xe5, 0x31, 0x45,
+0x30, 0x70, 0x73, 0x12, 0x1c, 0xfa, 0x90, 0xfa, 0xbf, 0x12, 0x1e, 0x06, 0x60, 0x27, 0xd3, 0xef,
+0x94, 0x40, 0xee, 0x94, 0x00, 0x40, 0x08, 0x90, 0xfa, 0xbc, 0x74, 0x40, 0xf0, 0x80, 0x08, 0x90,
+0xfa, 0xc0, 0xe0, 0x90, 0xfa, 0xbc, 0xf0, 0x12, 0x0f, 0x98, 0xe5, 0x31, 0x45, 0x30, 0x70, 0x46,
+0x12, 0x1c, 0xfa, 0x80, 0xd1, 0x75, 0x4c, 0x02, 0x90, 0xfa, 0xbf, 0xe4, 0xf0, 0xa3, 0x04, 0xf0,
+0x90, 0xfa, 0xb7, 0xe4, 0xf0, 0xa3, 0x74, 0x0f, 0xf0, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x4c, 0x90,
+0xfa, 0xc0, 0xe0, 0xf5, 0x4a, 0x7d, 0x0f, 0x7c, 0x00, 0x12, 0x29, 0x60, 0x75, 0x30, 0x00, 0x8f,
+0x31, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x4c, 0xe4, 0xf5, 0x2d, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x26,
+0x98, 0xe4, 0xf5, 0x30, 0xf5, 0x31, 0xaf, 0x31, 0x02, 0x31, 0xa9, 0x12, 0x1d, 0x80, 0x30, 0xe7,
+0x10, 0xe0, 0x54, 0x0f, 0x90, 0xf9, 0x67, 0xf0, 0xd3, 0x94, 0x00, 0x40, 0x15, 0xc2, 0x95, 0x80,
+0x11, 0x90, 0xfa, 0xba, 0xe0, 0x54, 0x0f, 0x90, 0xf9, 0x65, 0xf0, 0xd3, 0x94, 0x00, 0x40, 0x02,
+0xc2, 0x94, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x12, 0x1e, 0x4c, 0xbf, 0x01, 0x04, 0xd2, 0x93, 0x80,
+0x02, 0xc2, 0x93, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0x12, 0x1d, 0x80, 0x54, 0x03, 0x14, 0x60, 0x0a,
+0x14, 0x60, 0x0f, 0x14, 0x60, 0x08, 0x24, 0x03, 0x70, 0x2b, 0xd2, 0x91, 0x80, 0x27, 0xc2, 0x91,
+0x80, 0x23, 0x12, 0x1e, 0x56, 0x12, 0x0f, 0xc0, 0x60, 0x04, 0xd2, 0x91, 0x80, 0x17, 0x90, 0xff,
+0xa4, 0xe0, 0x44, 0x10, 0x12, 0x0f, 0xc0, 0xff, 0xbf, 0xa0, 0x04, 0xc2, 0x91, 0x80, 0x02, 0xd2,
+0x91, 0x12, 0x1e, 0x56, 0xf0, 0x90, 0xfa, 0xba, 0xe0, 0x54, 0x0c, 0xff, 0x13, 0x13, 0x54, 0x3f,
+0x14, 0x60, 0x0a, 0x14, 0x60, 0x0f, 0x14, 0x60, 0x08, 0x24, 0x03, 0x70, 0x2b, 0xd2, 0x92, 0x80,
+0x27, 0xc2, 0x92, 0x80, 0x23, 0x12, 0x1e, 0x5d, 0x12, 0x0f, 0xe0, 0x60, 0x04, 0xd2, 0x92, 0x80,
+0x17, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x10, 0x12, 0x0f, 0xe0, 0xff, 0xbf, 0xa0, 0x04, 0xc2, 0x92,
+0x80, 0x02, 0xd2, 0x92, 0x12, 0x1e, 0x5d, 0xf0, 0xe4, 0xff, 0x02, 0x31, 0xa9, 0xe5, 0x35, 0x30,
+0xe7, 0x07, 0xe4, 0xfd, 0x7f, 0x05, 0x02, 0x2f, 0xb4, 0x7f, 0x05, 0x02, 0x31, 0xa9, 0x12, 0x32,
+0x7a, 0x22, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xb6, 0x90, 0xfa, 0xb7, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0,
+0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x90, 0xfa, 0xb7, 0xe4, 0x75, 0xf0, 0x03, 0x12, 0x1b,
+0x1c, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x22, 0xaa, 0x4e, 0xa9, 0x4f, 0x7b, 0xff, 0x90, 0xfa,
+0xb7, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0x90, 0xfa, 0xbc, 0xe0, 0xf5, 0x4a, 0x12, 0x29, 0x60, 0x75,
+0x30, 0x00, 0x8f, 0x31, 0x22, 0x12, 0x23, 0x61, 0x7e, 0x00, 0x8e, 0x30, 0x8f, 0x31, 0xef, 0x22,
+0xf0, 0x7f, 0x01, 0x12, 0x12, 0x61, 0x90, 0xff, 0xa6, 0xe0, 0x90, 0xfa, 0xbb, 0xf0, 0x54, 0xa0,
+0x22, 0x12, 0x26, 0x98, 0x8f, 0x4c, 0x7e, 0x00, 0xc3, 0xef, 0x95, 0x3c, 0xee, 0x95, 0x3b, 0x22,
+0xf0, 0x7f, 0x01, 0x12, 0x12, 0x61, 0x90, 0xff, 0xb6, 0xe0, 0x90, 0xfa, 0xbb, 0xf0, 0x54, 0xa0,
+0x22, 0x75, 0x39, 0x00, 0x75, 0x3a, 0x01, 0x02, 0x2c, 0xd8, 0x90, 0xfa, 0xb9, 0xe0, 0xff, 0x02,
+0x32, 0x3f, 0x8e, 0x39, 0x8f, 0x3a, 0x02, 0x2c, 0xd8, 0x12, 0x23, 0x61, 0x7e, 0x00, 0x8e, 0x30,
+0x8f, 0x31, 0xef, 0x22, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x90, 0xfa, 0xb4, 0xe0, 0x22, 0xef, 0x90,
+0xf8, 0x04, 0xf0, 0x22, 0xc0, 0xa8, 0xc2, 0xaf, 0xee, 0x60, 0x0a, 0xc0, 0x05, 0x7d, 0x7f, 0xdd,
+0xfe, 0xde, 0xfa, 0xd0, 0x05, 0xef, 0xc3, 0x94, 0x15, 0x50, 0x03, 0xd0, 0xa8, 0x22, 0x13, 0x70,
+0x03, 0xd0, 0xa8, 0x22, 0xff, 0xd5, 0x07, 0xfd, 0xd0, 0xa8, 0x22, 0xc0, 0x00, 0xc0, 0x01, 0xc0,
+0x02, 0xc0, 0x04, 0xc0, 0x05, 0xe5, 0x3e, 0x24, 0x08, 0xf8, 0x86, 0x05, 0x53, 0x05, 0x7f, 0x7c,
+0xff, 0x12, 0x10, 0xc0, 0x7f, 0x00, 0x7e, 0x00, 0xe5, 0x43, 0x60, 0x46, 0xfc, 0x90, 0xf9, 0x1d,
+0xe0, 0x54, 0x7f, 0x6d, 0x70, 0x0f, 0xc0, 0x83, 0xc0, 0x82, 0xa3, 0xe0, 0xfe, 0xa3, 0xe0, 0xff,
+0xa3, 0x15, 0x43, 0x80, 0x07, 0xa3, 0xa3, 0xa3, 0xdc, 0xe6, 0x80, 0x26, 0xdc, 0x06, 0xd0, 0x82,
+0xd0, 0x83, 0x80, 0x1e, 0xe0, 0xf8, 0xa3, 0xe0, 0xf9, 0xa3, 0xe0, 0xfa, 0xd0, 0x82, 0xd0, 0x83,
+0xe8, 0xf0, 0xa3, 0xe9, 0xf0, 0xa3, 0xea, 0xf0, 0xa3, 0xc0, 0x83, 0xc0, 0x82, 0xa3, 0xa3, 0xa3,
+0x80, 0xda, 0x12, 0x11, 0x57, 0xd0, 0x05, 0xd0, 0x04, 0xd0, 0x02, 0xd0, 0x01, 0xd0, 0x00, 0x22,
+0x85, 0xa8, 0x44, 0x75, 0xa8, 0x88, 0xec, 0x70, 0x02, 0x7c, 0x3f, 0x8c, 0x3d, 0x22, 0xe5, 0x3e,
+0x24, 0x08, 0xf8, 0x76, 0x00, 0x12, 0x11, 0xae, 0x80, 0xfb, 0xc0, 0x00, 0xc0, 0x01, 0xc0, 0x02,
+0xc0, 0x04, 0xc0, 0x06, 0x7c, 0xff, 0x12, 0x10, 0xc0, 0xe5, 0x43, 0x60, 0x42, 0xfe, 0x90, 0xf9,
+0x1d, 0xe0, 0x54, 0x7f, 0x6f, 0x70, 0x0b, 0xc0, 0x83, 0xc0, 0x82, 0xa3, 0xa3, 0xa3, 0x15, 0x43,
+0x80, 0x07, 0xa3, 0xa3, 0xa3, 0xde, 0xea, 0x80, 0x26, 0xde, 0x06, 0xd0, 0x82, 0xd0, 0x83, 0x80,
+0xd8, 0xe0, 0xf8, 0xa3, 0xe0, 0xf9, 0xa3, 0xe0, 0xfa, 0xd0, 0x82, 0xd0, 0x83, 0xe8, 0xf0, 0xa3,
+0xe9, 0xf0, 0xa3, 0xea, 0xf0, 0xa3, 0xc0, 0x83, 0xc0, 0x82, 0xa3, 0xa3, 0xa3, 0x80, 0xda, 0x78,
+0x08, 0x08, 0x79, 0x18, 0x09, 0x7c, 0x01, 0xe6, 0x54, 0x7f, 0x6f, 0x70, 0x06, 0x76, 0x00, 0x77,
+0x00, 0x80, 0x06, 0x08, 0x09, 0x0c, 0xbc, 0x08, 0xee, 0x12, 0x11, 0x57, 0xd0, 0x06, 0xd0, 0x04,
+0xd0, 0x02, 0xd0, 0x01, 0xd0, 0x00, 0x22, 0x75, 0x3d, 0x00, 0x85, 0x44, 0xa8, 0x22, 0xc0, 0xf0,
+0xc0, 0x82, 0xc0, 0x83, 0xc3, 0xe5, 0x43, 0x24, 0xe8, 0x50, 0x05, 0x12, 0x11, 0xae, 0x80, 0xf4,
+0xef, 0x60, 0x31, 0x90, 0x31, 0x11, 0xe4, 0x93, 0xc3, 0x9f, 0x40, 0x2f, 0xc0, 0x04, 0x7c, 0xff,
+0x12, 0x10, 0xc0, 0xd0, 0x04, 0x43, 0x07, 0x80, 0xe5, 0x43, 0x75, 0xf0, 0x03, 0xa4, 0x24, 0x1d,
+0xf5, 0x82, 0xe4, 0x34, 0xf9, 0xf5, 0x83, 0xef, 0xf0, 0xec, 0xa3, 0xf0, 0xed, 0xa3, 0xf0, 0x05,
+0x43, 0x12, 0x11, 0x57, 0xd0, 0x83, 0xd0, 0x82, 0xd0, 0xf0, 0x22, 0x02, 0x11, 0xdc, 0xc0, 0x04,
+0x7c, 0x20, 0xd2, 0x8c, 0xd2, 0x8d, 0xd5, 0x04, 0xfd, 0xd0, 0x04, 0x22, 0x75, 0xa8, 0x00, 0x75,
+0x88, 0x00, 0x75, 0xb8, 0x00, 0x75, 0xf0, 0x00, 0x75, 0xd0, 0x00, 0xe4, 0xf8, 0x90, 0xf8, 0x04,
+0xf0, 0x90, 0x00, 0x00, 0xf6, 0x08, 0xb8, 0x00, 0xfb, 0x02, 0x00, 0x00, 0xc2, 0xaf, 0xe4, 0x90,
+0xff, 0x48, 0xf0, 0x90, 0xff, 0x50, 0xf0, 0x90, 0xff, 0x08, 0xf0, 0x90, 0xff, 0x10, 0xf0, 0x90,
+0xff, 0x80, 0xf0, 0xa3, 0xa3, 0xf0, 0xd2, 0xb1, 0xc2, 0xb0, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x10,
+0x24, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x10, 0x24, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x10, 0x24, 0xd2,
+0xb0, 0xd2, 0xb1, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x10, 0x24, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x10,
+0x24, 0x7e, 0xff, 0x7f, 0xff, 0x12, 0x10, 0x24, 0x80, 0xcc, 0xc3, 0xee, 0x94, 0x02, 0x50, 0x04,
+0x7e, 0x03, 0x7f, 0xe8, 0xef, 0xf4, 0xff, 0xee, 0xf4, 0xfe, 0x0f, 0xbf, 0x00, 0x01, 0x0e, 0x8f,
+0x42, 0x8e, 0x41, 0x22, 0xc3, 0xef, 0x94, 0xbc, 0xee, 0x94, 0x02, 0x50, 0x04, 0x7e, 0x07, 0x7f,
+0xd0, 0xef, 0xf4, 0xff, 0xee, 0xf4, 0xfe, 0x0f, 0xbf, 0x00, 0x01, 0x0e, 0x8f, 0x40, 0x8e, 0x3f,
+0x22, 0xef, 0x70, 0x01, 0x22, 0xc0, 0x00, 0xc0, 0xa8, 0xc2, 0xaf, 0xe5, 0x3e, 0x24, 0x18, 0xf8,
+0xa6, 0x07, 0xe5, 0x3e, 0x24, 0x08, 0xf8, 0xc6, 0x54, 0x7f, 0xf6, 0xd0, 0xa8, 0xe6, 0x30, 0xe7,
+0x03, 0xd0, 0x00, 0x22, 0x12, 0x11, 0xae, 0x80, 0xf4, 0xc0, 0x00, 0x7f, 0x01, 0xef, 0x24, 0x08,
+0xf8, 0xe6, 0x60, 0x09, 0x0f, 0xbf, 0x08, 0xf5, 0x12, 0x11, 0xae, 0x80, 0xee, 0xd0, 0x00, 0x22,
+0xc0, 0xf0, 0xc0, 0x82, 0xc0, 0x83, 0xc0, 0x00, 0xc0, 0x06, 0xc0, 0x04, 0xed, 0x24, 0x10, 0xf8,
+0x76, 0x9a, 0xed, 0x75, 0xf0, 0x21, 0xa4, 0x24, 0x05, 0xf5, 0x82, 0xe4, 0x34, 0xf8, 0xf5, 0x83,
+0xc0, 0x82, 0xc0, 0x83, 0xa3, 0xa3, 0xe4, 0x78, 0x0d, 0xf0, 0xa3, 0xd8, 0xfc, 0xef, 0x54, 0x7f,
+0x75, 0xf0, 0x02, 0xa4, 0x24, 0xf3, 0xf5, 0x82, 0xe5, 0xf0, 0x34, 0x30, 0xf5, 0x83, 0xe4, 0x93,
+0xfe, 0x74, 0x01, 0x93, 0xfc, 0xd0, 0x83, 0xd0, 0x82, 0xec, 0xf0, 0xa3, 0xee, 0xf0, 0xed, 0x24,
+0x08, 0xf8, 0xef, 0x44, 0x80, 0xf6, 0xd0, 0x04, 0xd0, 0x06, 0xd0, 0x00, 0xd0, 0x83, 0xd0, 0x82,
+0xd0, 0xf0, 0x22, 0x75, 0x3e, 0x00, 0x75, 0x43, 0x00, 0x7a, 0x08, 0x79, 0x18, 0x78, 0x08, 0x76,
+0x00, 0x77, 0x00, 0x08, 0x09, 0xda, 0xf8, 0x90, 0xf8, 0x04, 0xe0, 0xfc, 0x90, 0x31, 0x11, 0xe4,
+0x93, 0xc3, 0x9c, 0x50, 0x05, 0xe4, 0x90, 0xf8, 0x04, 0xf0, 0x78, 0x08, 0x74, 0x80, 0x44, 0x7f,
+0xf6, 0x74, 0x01, 0x44, 0x10, 0xf5, 0x89, 0x75, 0xb8, 0x00, 0xd2, 0xab, 0xd2, 0xa9, 0x22, 0x75,
+0x81, 0x8b, 0xd2, 0x8e, 0xd2, 0x8c, 0xd2, 0xaf, 0xe5, 0x43, 0x60, 0x36, 0xff, 0x90, 0xf9, 0x1d,
+0xe0, 0x54, 0x80, 0x60, 0x28, 0x78, 0x08, 0x79, 0x08, 0xe0, 0x54, 0x7f, 0xfa, 0x7b, 0x00, 0xe6,
+0x54, 0x7f, 0xb5, 0x02, 0x02, 0x7b, 0xff, 0x08, 0xd9, 0xf5, 0xeb, 0x70, 0x10, 0xea, 0xf0, 0xc0,
+0x07, 0x12, 0x12, 0x89, 0xad, 0x07, 0xaf, 0x02, 0x12, 0x12, 0xa0, 0xd0, 0x07, 0xa3, 0xa3, 0xa3,
+0xdf, 0xce, 0x12, 0x11, 0xae, 0x80, 0xc1, 0x8f, 0x24, 0x12, 0x2a, 0xc7, 0x12, 0x22, 0xb5, 0xa3,
+0xa3, 0xe0, 0xa3, 0x30, 0xe7, 0x28, 0x78, 0x7e, 0x12, 0x22, 0x99, 0xe0, 0x44, 0x01, 0xf0, 0x12,
+0x22, 0xfa, 0x12, 0x22, 0x9d, 0xe0, 0x20, 0xe0, 0xf6, 0x12, 0x23, 0x50, 0x74, 0x02, 0xf0, 0x12,
+0x22, 0xda, 0xe0, 0xa3, 0x30, 0xe5, 0x07, 0x12, 0x23, 0x50, 0xe0, 0x44, 0x01, 0xf0, 0x78, 0x80,
+0xe6, 0xfe, 0x08, 0xe6, 0xff, 0x8e, 0x83, 0x24, 0x08, 0x12, 0x22, 0xa1, 0xe0, 0xfd, 0x12, 0x23,
+0x39, 0x8a, 0x83, 0x24, 0x0a, 0x12, 0x22, 0xa1, 0xed, 0xf0, 0x12, 0x23, 0x06, 0x24, 0x07, 0x12,
+0x22, 0xa1, 0xe0, 0xff, 0x12, 0x23, 0x5a, 0x24, 0x09, 0x12, 0x22, 0xa1, 0xef, 0xf0, 0x90, 0xf9,
+0x16, 0xe0, 0x30, 0xe4, 0x20, 0x08, 0x12, 0x22, 0xb7, 0xc0, 0x83, 0xc0, 0x82, 0xa3, 0xe0, 0x25,
+0xe0, 0xff, 0x05, 0x82, 0xd5, 0x82, 0x02, 0x15, 0x83, 0x15, 0x82, 0xe0, 0x33, 0xd0, 0x82, 0xd0,
+0x83, 0xf0, 0xa3, 0xef, 0xf0, 0x12, 0x22, 0xb5, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0xec, 0xff, 0x12,
+0x23, 0x39, 0x8a, 0x83, 0x24, 0x08, 0x12, 0x22, 0xa1, 0xef, 0xf0, 0xed, 0x12, 0x23, 0x5a, 0x24,
+0x07, 0x12, 0x22, 0xa1, 0xed, 0xf0, 0x12, 0x22, 0xa9, 0xe0, 0x30, 0xe6, 0x0a, 0x12, 0x23, 0x41,
+0x24, 0x09, 0x12, 0x22, 0xa1, 0xe4, 0xf0, 0x12, 0x22, 0xa9, 0xe0, 0xff, 0x30, 0xe7, 0x1b, 0x12,
+0x23, 0x1e, 0x24, 0x09, 0x12, 0x22, 0xa1, 0xe0, 0x60, 0x09, 0x12, 0x22, 0xa9, 0xef, 0x44, 0x02,
+0xf0, 0x80, 0x07, 0x12, 0x22, 0xa9, 0xef, 0x54, 0xfd, 0xf0, 0x78, 0x7e, 0x12, 0x22, 0xb7, 0xa3,
+0xa3, 0xe0, 0xff, 0x53, 0x07, 0xc7, 0x08, 0xe6, 0xfc, 0x08, 0xe6, 0xfd, 0x12, 0x22, 0xe0, 0xa3,
+0xe0, 0x30, 0xe3, 0x12, 0x8d, 0x82, 0x8c, 0x83, 0xe5, 0x82, 0x24, 0x05, 0x12, 0x22, 0xa1, 0xe0,
+0x90, 0x32, 0x51, 0x93, 0x42, 0x07, 0x53, 0x07, 0xfb, 0x12, 0x23, 0x1e, 0x24, 0x06, 0x12, 0x22,
+0xa1, 0xe0, 0x60, 0x03, 0x43, 0x07, 0x04, 0x53, 0x07, 0xfc, 0x78, 0x80, 0x12, 0x23, 0x29, 0x24,
+0x04, 0x12, 0x22, 0xa1, 0xe0, 0x42, 0x07, 0x43, 0x07, 0x80, 0x12, 0x23, 0x39, 0xf5, 0x82, 0x8a,
+0x83, 0xa3, 0xa3, 0xef, 0xf0, 0x12, 0x23, 0x5a, 0x24, 0x04, 0x12, 0x22, 0xa1, 0xe0, 0xff, 0x8d,
+0x82, 0x8c, 0x83, 0xa3, 0xa3, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0x30, 0xe1, 0x05, 0x53, 0x07, 0xdf,
+0x80, 0x03, 0x43, 0x07, 0x20, 0xec, 0x30, 0xe4, 0x05, 0x53, 0x07, 0xef, 0x80, 0x03, 0x43, 0x07,
+0x10, 0x12, 0x22, 0xa9, 0xe0, 0xfe, 0x54, 0x03, 0x60, 0x73, 0x53, 0x07, 0xdf, 0xee, 0x30, 0xe1,
+0x69, 0x12, 0x23, 0x1e, 0x24, 0x09, 0x12, 0x22, 0xa1, 0xe0, 0x12, 0x1b, 0xfc, 0x15, 0x2c, 0x00,
+0x15, 0x60, 0x01, 0x15, 0x65, 0x03, 0x15, 0x60, 0x05, 0x15, 0x65, 0x07, 0x15, 0x60, 0x09, 0x15,
+0x65, 0x0b, 0x15, 0x60, 0x0d, 0x15, 0x65, 0x0f, 0x00, 0x00, 0x15, 0x6d, 0xe5, 0x24, 0x64, 0x03,
+0x70, 0x21, 0x90, 0xf9, 0x16, 0xe0, 0x30, 0xe2, 0x0d, 0x30, 0xb4, 0x05, 0x43, 0x07, 0x02, 0x80,
+0x2c, 0x53, 0x07, 0xfd, 0x80, 0x27, 0x30, 0x95, 0x05, 0x43, 0x07, 0x02, 0x80, 0x1f, 0x53, 0x07,
+0xfd, 0x80, 0x1a, 0x30, 0x93, 0x05, 0x43, 0x07, 0x02, 0x80, 0x12, 0x53, 0x07, 0xfd, 0x80, 0x0d,
+0x43, 0x07, 0x02, 0x80, 0x08, 0x53, 0x07, 0xfd, 0x80, 0x03, 0x53, 0x07, 0xfd, 0x12, 0x23, 0x27,
+0x24, 0x04, 0x12, 0x22, 0xa1, 0xef, 0xf0, 0x8d, 0x82, 0x8c, 0x83, 0xa3, 0xa3, 0xa3, 0xe0, 0xff,
+0x12, 0x22, 0xa9, 0xe0, 0xfe, 0x54, 0x03, 0x70, 0x03, 0x02, 0x16, 0x60, 0xee, 0x20, 0xe1, 0x03,
+0x02, 0x16, 0x5d, 0x08, 0x12, 0x23, 0x20, 0x24, 0x09, 0x12, 0x22, 0xa1, 0xe0, 0x12, 0x1b, 0xfc,
+0x15, 0xbf, 0x00, 0x15, 0xf5, 0x01, 0x15, 0xf5, 0x03, 0x16, 0x29, 0x05, 0x16, 0x29, 0x07, 0x16,
+0x0f, 0x09, 0x16, 0x0f, 0x0b, 0x16, 0x43, 0x0d, 0x16, 0x43, 0x0f, 0x00, 0x00, 0x16, 0x60, 0xe5,
+0x24, 0x64, 0x03, 0x70, 0x23, 0x90, 0xf9, 0x16, 0xe0, 0x30, 0xe2, 0x0f, 0x30, 0xb1, 0x06, 0x53,
+0x07, 0x7f, 0x02, 0x16, 0x60, 0x43, 0x07, 0x80, 0x02, 0x16, 0x60, 0x30, 0x94, 0x05, 0x53, 0x07,
+0x7f, 0x80, 0x7d, 0x43, 0x07, 0x80, 0x80, 0x78, 0x30, 0x92, 0x05, 0x53, 0x07, 0x7f, 0x80, 0x70,
+0x43, 0x07, 0x80, 0x80, 0x6b, 0xe5, 0x24, 0xb4, 0x03, 0x09, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xef,
+0xf0, 0x80, 0x07, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xdf, 0xf0, 0x53, 0x07, 0x7f, 0x80, 0x51, 0xe5,
+0x24, 0xb4, 0x03, 0x09, 0x90, 0xff, 0x9e, 0xe0, 0x44, 0x10, 0xf0, 0x80, 0x07, 0x90, 0xff, 0x9e,
+0xe0, 0x44, 0x20, 0xf0, 0x53, 0x07, 0x7f, 0x80, 0x37, 0xe5, 0x24, 0xb4, 0x03, 0x09, 0x90, 0xff,
+0x9e, 0xe0, 0x54, 0xef, 0xf0, 0x80, 0x07, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xdf, 0xf0, 0x43, 0x07,
+0x80, 0x80, 0x1d, 0xe5, 0x24, 0xb4, 0x03, 0x09, 0x90, 0xff, 0x9e, 0xe0, 0x44, 0x10, 0xf0, 0x80,
+0x07, 0x90, 0xff, 0x9e, 0xe0, 0x44, 0x20, 0xf0, 0x43, 0x07, 0x80, 0x80, 0x03, 0x53, 0x07, 0x7f,
+0x12, 0x22, 0xda, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0x30, 0xe0, 0x05, 0x43, 0x07, 0x20, 0x80, 0x03,
+0x53, 0x07, 0xdf, 0xec, 0x30, 0xe3, 0x05, 0x43, 0x07, 0x40, 0x80, 0x03, 0x53, 0x07, 0xbf, 0xec,
+0x30, 0xe0, 0x05, 0x43, 0x07, 0x10, 0x80, 0x03, 0x53, 0x07, 0xef, 0xed, 0x30, 0xe4, 0x05, 0x43,
+0x07, 0x08, 0x80, 0x03, 0x53, 0x07, 0xf7, 0xed, 0x30, 0xe5, 0x05, 0x43, 0x07, 0x04, 0x80, 0x03,
+0x53, 0x07, 0xfb, 0xed, 0x30, 0xe6, 0x05, 0x43, 0x07, 0x01, 0x80, 0x03, 0x53, 0x07, 0xfe, 0xed,
+0x30, 0xe7, 0x05, 0x43, 0x07, 0x02, 0x80, 0x03, 0x53, 0x07, 0xfd, 0x78, 0x7e, 0x12, 0x22, 0xdc,
+0xa3, 0xef, 0xf0, 0x12, 0x32, 0x84, 0x7f, 0x00, 0x22, 0x90, 0xff, 0xfa, 0x74, 0x08, 0xf0, 0xa3,
+0x74, 0x16, 0xf0, 0x90, 0xff, 0xf9, 0x74, 0x02, 0xf0, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xcf, 0xe4,
+0xfd, 0x12, 0x23, 0x61, 0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x03, 0x12, 0x1b, 0x1c, 0x12, 0x19,
+0x92, 0xe5, 0x23, 0x30, 0xe7, 0x02, 0xd2, 0x02, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x24, 0x90, 0xfa,
+0xcf, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x90, 0xfa, 0xcf,
+0xe4, 0xf0, 0xa3, 0x74, 0x0b, 0xf0, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23, 0x75, 0x2d, 0x00, 0xf5,
+0x2e, 0x7d, 0x01, 0x12, 0x26, 0x98, 0xe5, 0x23, 0x24, 0x80, 0x90, 0xff, 0xf8, 0xf0, 0xe5, 0x23,
+0x64, 0x07, 0x60, 0x1e, 0xe5, 0x23, 0x64, 0x06, 0x60, 0x18, 0xe5, 0x23, 0x64, 0x14, 0x60, 0x12,
+0xe5, 0x23, 0x64, 0x41, 0x60, 0x0c, 0xe5, 0x23, 0x64, 0x1a, 0x70, 0x46, 0xe5, 0x24, 0x64, 0x02,
+0x70, 0x40, 0xe5, 0x23, 0xb4, 0x07, 0x16, 0xd2, 0x94, 0xd2, 0x95, 0xd2, 0x92, 0xd2, 0x93, 0x90,
+0xf9, 0x16, 0xe0, 0x44, 0x02, 0xf0, 0xa3, 0xe0, 0x44, 0x02, 0xf0, 0x80, 0x1e, 0xe5, 0x23, 0xb4,
+0x41, 0x12, 0x90, 0xf9, 0x16, 0xe0, 0x44, 0x06, 0xf0, 0xa3, 0xe0, 0x44, 0x06, 0xf0, 0xd2, 0xb1,
+0xd2, 0xb4, 0x80, 0x07, 0x90, 0xf9, 0x16, 0xe0, 0x44, 0x01, 0xf0, 0x90, 0xf9, 0x17, 0xe0, 0x44,
+0x01, 0xf0, 0xe5, 0x23, 0x64, 0x42, 0x60, 0x0c, 0xe5, 0x23, 0x64, 0x43, 0x60, 0x06, 0xe5, 0x23,
+0x64, 0x44, 0x70, 0x2e, 0x90, 0xf9, 0x16, 0xe0, 0xff, 0xe5, 0x23, 0xb4, 0x44, 0x04, 0x7e, 0x40,
+0x80, 0x02, 0x7e, 0x00, 0xee, 0x24, 0x80, 0x4f, 0x90, 0xf9, 0x16, 0xf0, 0xa3, 0xe0, 0xff, 0xe5,
+0x23, 0xb4, 0x44, 0x04, 0x7e, 0x40, 0x80, 0x02, 0x7e, 0x00, 0xee, 0x24, 0x80, 0x4f, 0x90, 0xf9,
+0x17, 0xf0, 0x90, 0xfa, 0xcf, 0xe4, 0xf0, 0xa3, 0x74, 0x0d, 0xf0, 0x12, 0x19, 0x92, 0x90, 0xff,
+0xf5, 0xe5, 0x23, 0xf0, 0xe4, 0xf5, 0x35, 0xf5, 0x33, 0xf5, 0x34, 0xf5, 0x32, 0x12, 0x1e, 0x34,
+0x12, 0x1c, 0xe0, 0x12, 0x1e, 0x3b, 0x90, 0xf9, 0x6a, 0x12, 0x1b, 0xf3, 0x90, 0xf9, 0x6f, 0x12,
+0x1b, 0xf3, 0x90, 0xff, 0xff, 0xe4, 0xf0, 0x90, 0xff, 0x83, 0xe0, 0xe4, 0xf0, 0x90, 0xff, 0x81,
0x74, 0x80, 0xf0, 0xa3, 0x74, 0x84, 0xf0, 0x90, 0xff, 0x80, 0xf0, 0xe4, 0xf5, 0x23, 0xe5, 0x23,
-0x12, 0x1c, 0xa7, 0xf5, 0x83, 0xe4, 0xf0, 0xe5, 0x23, 0x12, 0x1c, 0xb5, 0xf5, 0x83, 0xe4, 0xf0,
-0x05, 0x23, 0xe5, 0x23, 0xb4, 0x07, 0xe7, 0x78, 0x7a, 0x76, 0xfe, 0x08, 0x76, 0xf0, 0x90, 0x31,
-0x4d, 0xe4, 0x93, 0xff, 0x78, 0x78, 0xf6, 0xfd, 0xad, 0x07, 0x90, 0x31, 0x5a, 0xe4, 0x93, 0xff,
-0x08, 0xf6, 0xff, 0xed, 0x54, 0x0f, 0xfd, 0x12, 0x1c, 0x97, 0x74, 0x84, 0xf0, 0xed, 0x75, 0xf0,
+0x12, 0x1d, 0x57, 0xf5, 0x83, 0xe4, 0xf0, 0xe5, 0x23, 0x12, 0x1d, 0x65, 0xf5, 0x83, 0xe4, 0xf0,
+0x05, 0x23, 0xe5, 0x23, 0xb4, 0x07, 0xe7, 0x78, 0x7a, 0x76, 0xfe, 0x08, 0x76, 0xf0, 0x90, 0x32,
+0x0a, 0xe4, 0x93, 0xff, 0x78, 0x78, 0xf6, 0xfd, 0xad, 0x07, 0x90, 0x32, 0x17, 0xe4, 0x93, 0xff,
+0x08, 0xf6, 0xff, 0xed, 0x54, 0x0f, 0xfd, 0x12, 0x1d, 0x47, 0x74, 0x84, 0xf0, 0xed, 0x75, 0xf0,
0x08, 0xa4, 0x24, 0x47, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0xef, 0xf0, 0xc3, 0x74, 0xf0,
-0x9f, 0x78, 0x7b, 0xf6, 0x74, 0xfe, 0x94, 0x00, 0x18, 0x12, 0x1c, 0x28, 0xce, 0xc3, 0x13, 0xce,
-0x13, 0xd8, 0xf9, 0xff, 0xed, 0x12, 0x1c, 0xf8, 0xef, 0xf0, 0xed, 0x12, 0x1d, 0x1e, 0xe4, 0xf5,
-0x23, 0xe5, 0x23, 0x90, 0x31, 0x47, 0x93, 0xff, 0x78, 0x78, 0xf6, 0xfd, 0xe5, 0x23, 0x25, 0xe0,
-0x24, 0x4e, 0xf5, 0x82, 0xe4, 0x34, 0x31, 0xf5, 0x83, 0xe4, 0x93, 0x08, 0xf6, 0xed, 0x30, 0xe7,
-0x53, 0x18, 0xe6, 0x54, 0x0f, 0xf9, 0x12, 0x1c, 0x97, 0x12, 0x1d, 0x06, 0x24, 0x47, 0xf5, 0x82,
-0xe4, 0x34, 0xff, 0x12, 0x1c, 0x18, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8, 0xf9, 0xff, 0xe9, 0x12,
-0x1c, 0xf8, 0xef, 0xf0, 0x12, 0x1c, 0x1f, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8, 0xf9, 0x12, 0x1d,
-0x0b, 0x24, 0x45, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0xef, 0xf0, 0xe9, 0x12, 0x1d, 0x1e,
+0x9f, 0x78, 0x7b, 0xf6, 0x74, 0xfe, 0x94, 0x00, 0x18, 0x12, 0x1c, 0xd8, 0xce, 0xc3, 0x13, 0xce,
+0x13, 0xd8, 0xf9, 0xff, 0xed, 0x12, 0x1d, 0xa8, 0xef, 0xf0, 0xed, 0x12, 0x1d, 0xce, 0xe4, 0xf5,
+0x23, 0xe5, 0x23, 0x90, 0x32, 0x04, 0x93, 0xff, 0x78, 0x78, 0xf6, 0xfd, 0xe5, 0x23, 0x25, 0xe0,
+0x24, 0x0b, 0xf5, 0x82, 0xe4, 0x34, 0x32, 0xf5, 0x83, 0xe4, 0x93, 0x08, 0xf6, 0xed, 0x30, 0xe7,
+0x53, 0x18, 0xe6, 0x54, 0x0f, 0xf9, 0x12, 0x1d, 0x47, 0x12, 0x1d, 0xb6, 0x24, 0x47, 0xf5, 0x82,
+0xe4, 0x34, 0xff, 0x12, 0x1c, 0xc8, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8, 0xf9, 0xff, 0xe9, 0x12,
+0x1d, 0xa8, 0xef, 0xf0, 0x12, 0x1c, 0xcf, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8, 0xf9, 0x12, 0x1d,
+0xbb, 0x24, 0x45, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0xef, 0xf0, 0xe9, 0x12, 0x1d, 0xce,
0xe9, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x46, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0x74, 0x80,
-0xf0, 0x02, 0x18, 0xb7, 0x78, 0x78, 0xe6, 0x54, 0x0f, 0xf9, 0x12, 0x1c, 0xea, 0x12, 0x1d, 0x06,
-0x24, 0x07, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0x12, 0x1c, 0x18, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8,
-0xf9, 0x12, 0x1d, 0x0b, 0x24, 0x01, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0xef, 0xf0, 0x12,
-0x1c, 0x1f, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8, 0xf9, 0x12, 0x1d, 0x0b, 0x24, 0x05, 0xf5, 0x82,
+0xf0, 0x02, 0x19, 0x67, 0x78, 0x78, 0xe6, 0x54, 0x0f, 0xf9, 0x12, 0x1d, 0x9a, 0x12, 0x1d, 0xb6,
+0x24, 0x07, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0x12, 0x1c, 0xc8, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8,
+0xf9, 0x12, 0x1d, 0xbb, 0x24, 0x01, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0xef, 0xf0, 0x12,
+0x1c, 0xcf, 0xce, 0xc3, 0x13, 0xce, 0x13, 0xd8, 0xf9, 0x12, 0x1d, 0xbb, 0x24, 0x05, 0xf5, 0x82,
0xe4, 0x34, 0xff, 0xf5, 0x83, 0xef, 0xf0, 0xe9, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x02, 0xf5, 0x82,
0xe4, 0x34, 0xff, 0xf5, 0x83, 0xe4, 0xf0, 0xe9, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x06, 0xf5, 0x82,
0xe4, 0x34, 0xff, 0xf5, 0x83, 0xe4, 0xf0, 0x05, 0x23, 0xe5, 0x23, 0x64, 0x04, 0x60, 0x03, 0x02,
-0x17, 0xe1, 0x90, 0x31, 0x4c, 0xe4, 0x93, 0xff, 0x78, 0x78, 0xf6, 0x12, 0x1c, 0xe8, 0xe4, 0xf0,
-0x90, 0x31, 0x4b, 0x93, 0xff, 0xf6, 0x12, 0x1c, 0x95, 0xe4, 0xf0, 0x90, 0xff, 0xfd, 0x74, 0x05,
-0xf0, 0x22, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23, 0x90, 0xfa, 0xcc, 0xe4, 0x75, 0xf0, 0x01, 0x12,
-0x1a, 0x82, 0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01, 0x02, 0x25, 0xd7, 0xe7, 0x09, 0xf6, 0x08,
+0x18, 0x91, 0x90, 0x32, 0x09, 0xe4, 0x93, 0xff, 0x78, 0x78, 0xf6, 0x12, 0x1d, 0x98, 0xe4, 0xf0,
+0x90, 0x32, 0x08, 0x93, 0xff, 0xf6, 0x12, 0x1d, 0x45, 0xe4, 0xf0, 0x90, 0xff, 0xfd, 0x74, 0x05,
+0xf0, 0x22, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23, 0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x01, 0x12,
+0x1b, 0x32, 0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01, 0x02, 0x26, 0x98, 0xe7, 0x09, 0xf6, 0x08,
0xdf, 0xfa, 0x80, 0x46, 0xe7, 0x09, 0xf2, 0x08, 0xdf, 0xfa, 0x80, 0x3e, 0x88, 0x82, 0x8c, 0x83,
0xe7, 0x09, 0xf0, 0xa3, 0xdf, 0xfa, 0x80, 0x32, 0xe3, 0x09, 0xf6, 0x08, 0xdf, 0xfa, 0x80, 0x78,
0xe3, 0x09, 0xf2, 0x08, 0xdf, 0xfa, 0x80, 0x70, 0x88, 0x82, 0x8c, 0x83, 0xe3, 0x09, 0xf0, 0xa3,
@@ -445,7 +456,7 @@ static unsigned char IMAGE_ARRAY_NAME[] =
0x82, 0x8a, 0x83, 0xe4, 0x93, 0xa3, 0xf2, 0x08, 0xdf, 0xf9, 0x80, 0xcc, 0x88, 0xf0, 0xef, 0x60,
0x01, 0x0e, 0x4e, 0x60, 0xc3, 0x88, 0xf0, 0xed, 0x24, 0x02, 0xb4, 0x04, 0x00, 0x50, 0xb9, 0xf5,
0x82, 0xeb, 0x24, 0x02, 0xb4, 0x04, 0x00, 0x50, 0xaf, 0x23, 0x23, 0x45, 0x82, 0x23, 0x90, 0x19,
-0x4c, 0x73, 0xbb, 0x01, 0x06, 0x89, 0x82, 0x8a, 0x83, 0xe0, 0x22, 0x50, 0x02, 0xe7, 0x22, 0xbb,
+0xfc, 0x73, 0xbb, 0x01, 0x06, 0x89, 0x82, 0x8a, 0x83, 0xe0, 0x22, 0x50, 0x02, 0xe7, 0x22, 0xbb,
0xfe, 0x02, 0xe3, 0x22, 0x89, 0x82, 0x8a, 0x83, 0xe4, 0x93, 0x22, 0xbb, 0x01, 0x0c, 0xe5, 0x82,
0x29, 0xf5, 0x82, 0xe5, 0x83, 0x3a, 0xf5, 0x83, 0xe0, 0x22, 0x50, 0x06, 0xe9, 0x25, 0x82, 0xf8,
0xe6, 0x22, 0xbb, 0xfe, 0x06, 0xe9, 0x25, 0x82, 0xf8, 0xe2, 0x22, 0xe5, 0x82, 0x29, 0xf5, 0x82,
@@ -469,364 +480,365 @@ static unsigned char IMAGE_ARRAY_NAME[] =
0xe0, 0xf9, 0x22, 0xeb, 0xf0, 0xa3, 0xea, 0xf0, 0xa3, 0xe9, 0xf0, 0x22, 0xd0, 0x83, 0xd0, 0x82,
0xf8, 0xe4, 0x93, 0x70, 0x12, 0x74, 0x01, 0x93, 0x70, 0x0d, 0xa3, 0xa3, 0x93, 0xf8, 0x74, 0x01,
0x93, 0xf5, 0x82, 0x88, 0x83, 0xe4, 0x73, 0x74, 0x02, 0x93, 0x68, 0x60, 0xef, 0xa3, 0xa3, 0xa3,
-0x80, 0xdf, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0xe5, 0x4c, 0x12, 0x1a, 0x38, 0x74, 0x01, 0x25,
+0x80, 0xdf, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0xe5, 0x4c, 0x12, 0x1a, 0xe8, 0x74, 0x01, 0x25,
0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0xab, 0x36, 0xfa, 0xa9, 0x38, 0x74, 0x11, 0x12,
-0x1a, 0x38, 0x74, 0x01, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0x90, 0xff, 0x06,
-0xe0, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x12, 0x1a, 0x38, 0x74, 0x01, 0x25, 0x38, 0xf5, 0x38,
-0xe4, 0x35, 0x37, 0xf5, 0x37, 0xab, 0x36, 0xfa, 0xa9, 0x38, 0xe4, 0x12, 0x1a, 0x38, 0x04, 0x25,
+0x1a, 0xe8, 0x74, 0x01, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0x90, 0xff, 0x06,
+0xe0, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x12, 0x1a, 0xe8, 0x74, 0x01, 0x25, 0x38, 0xf5, 0x38,
+0xe4, 0x35, 0x37, 0xf5, 0x37, 0xab, 0x36, 0xfa, 0xa9, 0x38, 0xe4, 0x12, 0x1a, 0xe8, 0x04, 0x25,
0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0xab, 0x36, 0xfa, 0xa9, 0x38, 0xe4, 0x12, 0x1a,
-0x38, 0x04, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0x90, 0xff, 0x04, 0xe0, 0xab,
-0x36, 0xaa, 0x37, 0xa9, 0x38, 0x12, 0x1a, 0x38, 0x74, 0x01, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35,
-0x37, 0xf5, 0x37, 0x90, 0xff, 0x05, 0xe0, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x12, 0x1a, 0x38,
+0xe8, 0x04, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0x90, 0xff, 0x04, 0xe0, 0xab,
+0x36, 0xaa, 0x37, 0xa9, 0x38, 0x12, 0x1a, 0xe8, 0x74, 0x01, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35,
+0x37, 0xf5, 0x37, 0x90, 0xff, 0x05, 0xe0, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x12, 0x1a, 0xe8,
0x74, 0x01, 0x25, 0x38, 0xf5, 0x38, 0xe4, 0x35, 0x37, 0xf5, 0x37, 0x22, 0xf5, 0x83, 0xe0, 0x54,
0x08, 0xab, 0x36, 0xaa, 0x37, 0xa9, 0x38, 0x22, 0xf5, 0x83, 0xef, 0xf0, 0xfd, 0x7c, 0x00, 0xc3,
0x78, 0x7b, 0xe6, 0x9d, 0xf6, 0x18, 0xe6, 0x9c, 0xf6, 0xe6, 0xfe, 0x08, 0xe6, 0x78, 0x03, 0x22,
-0x75, 0x36, 0x01, 0x75, 0x37, 0xf9, 0x75, 0x38, 0x6f, 0x22, 0xe0, 0x44, 0x04, 0xf0, 0x74, 0x12,
-0x2f, 0xf5, 0x82, 0xe4, 0x34, 0xf9, 0xf5, 0x83, 0xe0, 0x22, 0x90, 0xfa, 0xb9, 0xe0, 0xff, 0x7e,
-0x00, 0xc3, 0x90, 0xfa, 0xbd, 0xe0, 0x9f, 0xf0, 0x90, 0xfa, 0xbc, 0xe0, 0x9e, 0xf0, 0x90, 0xfa,
-0xb4, 0xee, 0x8f, 0xf0, 0x12, 0x1a, 0x6c, 0xef, 0x25, 0x4f, 0xf5, 0x4f, 0xee, 0x35, 0x4e, 0xf5,
-0x4e, 0x22, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xb1, 0x90, 0xfa, 0xb4, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0,
+0x75, 0x36, 0x01, 0x75, 0x37, 0xf9, 0x75, 0x38, 0x72, 0x22, 0xe0, 0x44, 0x04, 0xf0, 0x74, 0x13,
+0x2f, 0xf5, 0x82, 0xe4, 0x34, 0xf9, 0xf5, 0x83, 0xe0, 0x22, 0x90, 0xfa, 0xbc, 0xe0, 0xff, 0x7e,
+0x00, 0xc3, 0x90, 0xfa, 0xc0, 0xe0, 0x9f, 0xf0, 0x90, 0xfa, 0xbf, 0xe0, 0x9e, 0xf0, 0x90, 0xfa,
+0xb7, 0xee, 0x8f, 0xf0, 0x12, 0x1b, 0x1c, 0xef, 0x25, 0x4f, 0xf5, 0x4f, 0xee, 0x35, 0x4e, 0xf5,
+0x4e, 0x22, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xb4, 0x90, 0xfa, 0xb7, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0,
0xf5, 0x2e, 0x22, 0x78, 0x7c, 0xe6, 0xfe, 0x08, 0xe6, 0x8e, 0x83, 0x24, 0x04, 0xf5, 0x82, 0xe4,
0x35, 0x83, 0xf5, 0x83, 0x22, 0x54, 0x0f, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x40, 0xf5, 0x82, 0xe4,
0x34, 0xff, 0xf5, 0x83, 0x22, 0xe5, 0x4d, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x48, 0xf5, 0x82, 0xe4,
0x34, 0xff, 0x22, 0xe5, 0x4d, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x08, 0xf5, 0x82, 0xe4, 0x34, 0xff,
-0x22, 0x90, 0xfa, 0xb6, 0xe0, 0xff, 0x24, 0xfc, 0x22, 0x90, 0xff, 0x00, 0xe0, 0x54, 0x1f, 0x22,
-0x90, 0xfa, 0xbb, 0xe0, 0x90, 0xfa, 0xb7, 0xf0, 0x22, 0x75, 0x33, 0x00, 0x8f, 0x34, 0x90, 0xf9,
-0x6c, 0x12, 0x1b, 0x3a, 0x90, 0x00, 0x02, 0x22, 0x54, 0x0f, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x00,
+0x22, 0x90, 0xfa, 0xb9, 0xe0, 0xff, 0x24, 0xfc, 0x22, 0x90, 0xff, 0x00, 0xe0, 0x54, 0x1f, 0x22,
+0x90, 0xfa, 0xbe, 0xe0, 0x90, 0xfa, 0xba, 0xf0, 0x22, 0x75, 0x33, 0x00, 0x8f, 0x34, 0x90, 0xf9,
+0x6f, 0x12, 0x1b, 0xea, 0x90, 0x00, 0x02, 0x22, 0x54, 0x0f, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x00,
0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0x22, 0x75, 0xf0, 0x08, 0xa4, 0x24, 0x41, 0xf5, 0x82,
0xe4, 0x34, 0xff, 0xf5, 0x83, 0x22, 0x74, 0x80, 0xf0, 0x08, 0xe6, 0xff, 0xe9, 0x75, 0xf0, 0x08,
-0xa4, 0x22, 0x74, 0xaf, 0x25, 0x22, 0xf5, 0x82, 0xe4, 0x34, 0xfa, 0xf5, 0x83, 0x22, 0x75, 0xf0,
+0xa4, 0x22, 0x74, 0xb2, 0x25, 0x22, 0xf5, 0x82, 0xe4, 0x34, 0xfa, 0xf5, 0x83, 0x22, 0x75, 0xf0,
0x08, 0xa4, 0x24, 0x42, 0xf5, 0x82, 0xe4, 0x34, 0xff, 0xf5, 0x83, 0x74, 0x80, 0xf0, 0x22, 0x90,
0xff, 0x82, 0xe0, 0x44, 0x08, 0xf0, 0x22, 0x90, 0xff, 0xfe, 0xe0, 0x44, 0x03, 0xf0, 0x90, 0xff,
0xfc, 0xe0, 0x54, 0xfd, 0xf0, 0x22, 0x78, 0x67, 0xe6, 0x54, 0xfd, 0xf6, 0x90, 0xff, 0xfd, 0x74,
-0x65, 0xf0, 0x22, 0x12, 0x1b, 0x1c, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0x4e, 0x22, 0x7b, 0x01, 0x7a,
-0xfa, 0x79, 0xb4, 0x22, 0x90, 0xff, 0x80, 0xe0, 0x44, 0x08, 0xf0, 0x22, 0x90, 0xff, 0x83, 0xe0,
-0x54, 0x7f, 0xf0, 0x22, 0xe0, 0xff, 0x90, 0xf9, 0x67, 0x02, 0x1b, 0x3a, 0x90, 0xff, 0xa4, 0xe0,
+0x65, 0xf0, 0x22, 0x12, 0x1b, 0xcc, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0x4e, 0x22, 0x7b, 0x01, 0x7a,
+0xfa, 0x79, 0xb7, 0x22, 0x90, 0xff, 0x80, 0xe0, 0x44, 0x08, 0xf0, 0x22, 0x90, 0xff, 0x83, 0xe0,
+0x54, 0x7f, 0xf0, 0x22, 0xe0, 0xff, 0x90, 0xf9, 0x6a, 0x02, 0x1b, 0xea, 0x90, 0xff, 0xa4, 0xe0,
0x44, 0x02, 0xf0, 0x22, 0x75, 0x39, 0x01, 0x75, 0x3a, 0x09, 0x22, 0x7b, 0x01, 0x7a, 0xf9, 0x79,
-0x6f, 0x22, 0xd3, 0xe5, 0x3c, 0x94, 0x08, 0xe5, 0x3b, 0x94, 0x01, 0x22, 0x90, 0xfa, 0xbb, 0xe0,
-0xff, 0x90, 0xfa, 0xb7, 0xf0, 0x22, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xef, 0x22, 0x90, 0xff, 0xb4,
-0xe0, 0x54, 0xef, 0x22, 0x12, 0x10, 0x03, 0x78, 0x88, 0xef, 0xf6, 0x12, 0x2a, 0x06, 0x12, 0x22,
-0x4a, 0x8e, 0x83, 0x24, 0x09, 0x12, 0x21, 0xf3, 0xe0, 0xfd, 0x12, 0x22, 0x2d, 0x90, 0x00, 0x0a,
-0x12, 0x22, 0x52, 0x24, 0x0a, 0x12, 0x21, 0xf3, 0xe0, 0x90, 0x00, 0x0b, 0x12, 0x1a, 0x4a, 0x12,
-0x22, 0x4a, 0xf5, 0x82, 0x8e, 0x83, 0xa3, 0xa3, 0xa3, 0xe0, 0xf5, 0x53, 0x12, 0x22, 0x56, 0x24,
-0x04, 0x12, 0x21, 0xf3, 0xe0, 0xf5, 0x54, 0x8f, 0x82, 0x8e, 0x83, 0xa3, 0xa3, 0xe0, 0xf5, 0x55,
+0x72, 0x22, 0xd3, 0xe5, 0x3c, 0x94, 0x08, 0xe5, 0x3b, 0x94, 0x01, 0x22, 0x90, 0xfa, 0xbe, 0xe0,
+0xff, 0x90, 0xfa, 0xba, 0xf0, 0x22, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xef, 0x22, 0x90, 0xff, 0xb4,
+0xe0, 0x54, 0xef, 0x22, 0x12, 0x10, 0x4b, 0x78, 0x88, 0xef, 0xf6, 0x12, 0x2a, 0xc7, 0x12, 0x22,
+0xfa, 0x8e, 0x83, 0x24, 0x09, 0x12, 0x22, 0xa1, 0xe0, 0xfd, 0x12, 0x22, 0xe8, 0x90, 0x00, 0x0a,
+0x12, 0x23, 0x02, 0x24, 0x0a, 0x12, 0x22, 0xa1, 0xe0, 0x90, 0x00, 0x0b, 0x12, 0x1a, 0xfa, 0x12,
+0x22, 0xfa, 0xf5, 0x82, 0x8e, 0x83, 0xa3, 0xa3, 0xa3, 0xe0, 0xf5, 0x53, 0x12, 0x23, 0x06, 0x24,
+0x04, 0x12, 0x22, 0xa1, 0xe0, 0xf5, 0x54, 0x8f, 0x82, 0x8e, 0x83, 0xa3, 0xa3, 0xe0, 0xf5, 0x55,
0xe5, 0x53, 0xc4, 0x13, 0x13, 0x13, 0x54, 0x01, 0x78, 0x88, 0xf6, 0xd3, 0x94, 0x00, 0x40, 0x06,
-0xe5, 0x54, 0x30, 0xe1, 0x01, 0x06, 0x78, 0x88, 0xe6, 0x12, 0x22, 0x2c, 0x90, 0x00, 0x0c, 0xef,
-0x12, 0x1a, 0x4a, 0x78, 0x80, 0x12, 0x22, 0x09, 0xa3, 0xa3, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0x53,
-0x07, 0x0c, 0x53, 0x06, 0xe6, 0xe5, 0x53, 0x30, 0xe5, 0x03, 0x43, 0x07, 0x01, 0xe5, 0x54, 0x20,
-0xe5, 0x0e, 0xe5, 0x53, 0x54, 0x7f, 0x70, 0x08, 0xe5, 0x53, 0x20, 0xe7, 0x03, 0x43, 0x07, 0x02,
-0xe5, 0x53, 0x30, 0xe3, 0x03, 0x43, 0x07, 0x10, 0xe5, 0x53, 0x30, 0xe2, 0x03, 0x43, 0x07, 0x20,
-0xe5, 0x53, 0x54, 0x03, 0x60, 0x03, 0x43, 0x07, 0x40, 0xe5, 0x53, 0x30, 0xe1, 0x03, 0x43, 0x07,
-0x80, 0xe5, 0x53, 0x30, 0xe4, 0x03, 0x43, 0x06, 0x01, 0xe5, 0x53, 0x30, 0xe6, 0x03, 0x43, 0x06,
-0x08, 0xe5, 0x54, 0x20, 0xe4, 0x0e, 0xe5, 0x53, 0x54, 0x7f, 0x70, 0x08, 0xe5, 0x53, 0x20, 0xe7,
-0x03, 0x43, 0x06, 0x10, 0x53, 0x07, 0xfb, 0x53, 0x06, 0x79, 0x90, 0x00, 0x05, 0xee, 0x8f, 0xf0,
-0x12, 0x1a, 0xef, 0xe5, 0x55, 0x30, 0xe3, 0x12, 0x54, 0x30, 0xff, 0xc4, 0x54, 0x0f, 0x12, 0x22,
-0x2c, 0x90, 0x00, 0x08, 0xef, 0x12, 0x1a, 0x4a, 0x80, 0x0a, 0x12, 0x22, 0x2d, 0x90, 0x00, 0x08,
-0xe4, 0x12, 0x1a, 0x4a, 0xe5, 0x55, 0x54, 0x03, 0x12, 0x22, 0x2c, 0x90, 0x00, 0x07, 0xef, 0x12,
-0x1a, 0x4a, 0xe5, 0x55, 0x54, 0x04, 0xff, 0xc3, 0x13, 0x90, 0x00, 0x09, 0x12, 0x1a, 0x4a, 0x90,
-0x00, 0x07, 0x12, 0x1a, 0x0b, 0x70, 0x13, 0x12, 0x22, 0x2d, 0xe9, 0x24, 0x09, 0xf9, 0xe4, 0x3a,
-0xfa, 0x12, 0x19, 0xf2, 0xff, 0xc3, 0x13, 0x12, 0x1a, 0x38, 0x12, 0x22, 0x78, 0x24, 0x08, 0x12,
-0x21, 0xf3, 0xe0, 0xfe, 0x8d, 0x82, 0x8c, 0x83, 0xe5, 0x82, 0x24, 0x07, 0x12, 0x21, 0xf3, 0xe0,
-0xfd, 0xee, 0xed, 0x12, 0x22, 0x2c, 0x90, 0x00, 0x03, 0xee, 0x8f, 0xf0, 0x12, 0x1a, 0xef, 0x12,
-0x31, 0xc7, 0x7d, 0x0a, 0xe4, 0xff, 0x12, 0x2f, 0x18, 0x02, 0x10, 0x86, 0x90, 0xfa, 0xe3, 0xe0,
-0xb4, 0x03, 0x06, 0x7e, 0x00, 0x7f, 0x40, 0x80, 0x04, 0x7e, 0x00, 0x7f, 0x08, 0x90, 0xfa, 0xd7,
-0xee, 0xf0, 0xa3, 0xef, 0xf0, 0x90, 0x00, 0x05, 0x12, 0x1a, 0x0b, 0xff, 0x7e, 0x00, 0x90, 0xfa,
-0xd3, 0xee, 0xf0, 0xa3, 0xef, 0xf0, 0x70, 0x03, 0x7f, 0x08, 0x22, 0x90, 0x00, 0x08, 0x12, 0x1a,
-0x98, 0xff, 0x90, 0xfa, 0xd5, 0xe5, 0xf0, 0xf0, 0xa3, 0xef, 0xf0, 0xae, 0x02, 0xaf, 0x01, 0x8e,
-0x50, 0x8f, 0x51, 0x74, 0x0a, 0x25, 0x51, 0xf5, 0x51, 0xe4, 0x35, 0x50, 0xf5, 0x50, 0x90, 0xfa,
-0xd8, 0xe0, 0xff, 0x14, 0xfe, 0x90, 0xfa, 0xd6, 0xe0, 0x5e, 0xfe, 0xc3, 0xef, 0x9e, 0xff, 0x90,
-0xfa, 0xda, 0xf0, 0xc3, 0x90, 0xfa, 0xd4, 0xe0, 0x9f, 0x90, 0xfa, 0xd3, 0xe0, 0x94, 0x00, 0x50,
-0x06, 0xa3, 0xe0, 0x90, 0xfa, 0xda, 0xf0, 0x12, 0x1f, 0xfb, 0x60, 0x03, 0xe0, 0xff, 0x22, 0x12,
-0x2d, 0x5a, 0x90, 0xfa, 0xd3, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0x4e, 0x60, 0x2b, 0x90, 0xfa, 0xd7,
-0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0xd3, 0xef, 0x9d, 0xee, 0x9c, 0x40, 0x07, 0xe0, 0x90, 0xfa, 0xda,
-0xf0, 0x80, 0x08, 0x90, 0xfa, 0xd4, 0xe0, 0x90, 0xfa, 0xda, 0xf0, 0x12, 0x1f, 0xfb, 0x60, 0x03,
-0xe0, 0xff, 0x22, 0x12, 0x2d, 0x5a, 0x80, 0xca, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x52, 0xe4, 0xf5,
-0x2d, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x7f, 0x00, 0x22, 0xaa, 0x50, 0xa9, 0x51, 0x7b,
-0x01, 0x90, 0xfa, 0xd5, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0x90, 0xfa, 0xda, 0xe0, 0xf5, 0x4a, 0x12,
-0x28, 0x9f, 0x90, 0xfa, 0xd9, 0xef, 0xf0, 0x22, 0xef, 0x24, 0xae, 0x60, 0x52, 0x24, 0xfe, 0x60,
-0x2e, 0x24, 0xfe, 0x70, 0x03, 0x02, 0x20, 0xbb, 0x24, 0x06, 0x60, 0x03, 0x02, 0x21, 0x03, 0x78,
-0x71, 0xe6, 0x54, 0xfb, 0xf6, 0x90, 0xff, 0xa5, 0xe0, 0xf5, 0x22, 0x44, 0x0f, 0xf0, 0x74, 0x33,
-0x90, 0xfa, 0x91, 0xf0, 0xe5, 0x22, 0xa3, 0xf0, 0x90, 0xfa, 0xaf, 0x74, 0x01, 0xf0, 0x22, 0x78,
-0x72, 0xe6, 0x54, 0xfb, 0xf6, 0x90, 0xff, 0xb5, 0xe0, 0xf5, 0x22, 0x44, 0x0f, 0xf0, 0x74, 0x43,
-0x90, 0xfa, 0x93, 0xf0, 0xe5, 0x22, 0xa3, 0xf0, 0x90, 0xfa, 0xb0, 0x74, 0x01, 0xf0, 0x22, 0x90,
-0xfa, 0x9d, 0xe0, 0xa3, 0x20, 0xe5, 0x03, 0x02, 0x21, 0x03, 0x90, 0xff, 0xa6, 0xe0, 0x90, 0xfa,
-0xca, 0xf0, 0xa3, 0xf0, 0x90, 0xfa, 0xca, 0xe0, 0xff, 0x54, 0x0f, 0xfe, 0x60, 0x10, 0x90, 0xff,
-0xa6, 0x12, 0x22, 0x5d, 0x90, 0xff, 0xa6, 0xe0, 0x90, 0xfa, 0xca, 0xf0, 0x80, 0xe6, 0x90, 0xfa,
-0xcb, 0xe0, 0xff, 0x74, 0x34, 0xfe, 0x12, 0x2c, 0xb4, 0xef, 0x70, 0x57, 0x90, 0xfa, 0xcb, 0xe0,
-0xff, 0x74, 0x34, 0x90, 0xfa, 0x95, 0xf0, 0xef, 0xa3, 0xf0, 0x22, 0x90, 0xfa, 0xa7, 0xe0, 0xa3,
-0x30, 0xe5, 0x40, 0x90, 0xff, 0xb6, 0xe0, 0x90, 0xfa, 0xca, 0xf0, 0xa3, 0xf0, 0x90, 0xfa, 0xca,
-0xe0, 0xff, 0x54, 0x0f, 0xfe, 0x60, 0x10, 0x90, 0xff, 0xb6, 0x12, 0x22, 0x5d, 0x90, 0xff, 0xb6,
-0xe0, 0x90, 0xfa, 0xca, 0xf0, 0x80, 0xe6, 0x90, 0xfa, 0xcb, 0xe0, 0xff, 0x74, 0x44, 0xfe, 0x12,
-0x2c, 0xb4, 0xef, 0x70, 0x0e, 0x90, 0xfa, 0xcb, 0xe0, 0xff, 0x74, 0x44, 0x90, 0xfa, 0x97, 0xf0,
-0xef, 0xa3, 0xf0, 0x22, 0xc0, 0xe0, 0xc0, 0xf0, 0xc0, 0x83, 0xc0, 0x82, 0xc0, 0xd0, 0x75, 0xd0,
-0x00, 0xc0, 0x00, 0xc0, 0x01, 0xc0, 0x02, 0xc0, 0x03, 0xc0, 0x04, 0xc0, 0x05, 0xc0, 0x06, 0xc0,
-0x07, 0x90, 0xff, 0x92, 0xe0, 0xff, 0x90, 0xfa, 0xc9, 0xf0, 0x90, 0xff, 0x92, 0xe4, 0xf0, 0xef,
-0x12, 0x1b, 0x4c, 0x21, 0xbb, 0x26, 0x21, 0xbb, 0x2e, 0x21, 0x5e, 0x30, 0x21, 0x5e, 0x32, 0x21,
-0x6c, 0x38, 0x21, 0x7e, 0x3a, 0x21, 0xb0, 0x3e, 0x21, 0x9b, 0x44, 0x21, 0x90, 0x46, 0x21, 0xa6,
-0x50, 0x21, 0xa6, 0x52, 0x21, 0xa6, 0x54, 0x21, 0xa6, 0x56, 0x00, 0x00, 0x21, 0xc0, 0x90, 0xfa,
-0xc9, 0xe0, 0xfd, 0x7c, 0x00, 0x7f, 0x01, 0x12, 0x11, 0x16, 0x80, 0x62, 0x7c, 0x00, 0x7d, 0x01,
-0x7f, 0x03, 0x12, 0x11, 0x16, 0x90, 0xff, 0xfe, 0xe0, 0x44, 0x20, 0xf0, 0x80, 0x50, 0x7c, 0x00,
-0x7d, 0x01, 0x7f, 0x02, 0x12, 0x11, 0x16, 0x90, 0xff, 0xfe, 0xe0, 0x44, 0x40, 0xf0, 0x80, 0x3e,
-0x7c, 0x00, 0x7d, 0x01, 0x7f, 0x05, 0x12, 0x11, 0x16, 0x80, 0x33, 0x7c, 0x00, 0x7d, 0x01, 0x7f,
-0x06, 0x12, 0x11, 0x16, 0x80, 0x28, 0x90, 0xfa, 0xc9, 0xe0, 0xff, 0x12, 0x20, 0x18, 0x80, 0x1e,
-0x7c, 0x00, 0x7d, 0x01, 0x7f, 0x04, 0x12, 0x11, 0x16, 0x80, 0x13, 0x12, 0x27, 0x8d, 0x80, 0x0e,
-0x90, 0xfa, 0xc9, 0xe0, 0x24, 0x00, 0xff, 0xe4, 0x34, 0xff, 0xfe, 0x12, 0x2c, 0xb4, 0xd0, 0x07,
-0xd0, 0x06, 0xd0, 0x05, 0xd0, 0x04, 0xd0, 0x03, 0xd0, 0x02, 0xd0, 0x01, 0xd0, 0x00, 0xd0, 0xd0,
-0xd0, 0x82, 0xd0, 0x83, 0xd0, 0xf0, 0xd0, 0xe0, 0x32, 0x78, 0x7c, 0xe6, 0xfe, 0x08, 0xe6, 0x24,
-0x04, 0x8e, 0x83, 0xf5, 0x82, 0xe4, 0x35, 0x83, 0xf5, 0x83, 0x22, 0x74, 0x12, 0x25, 0x24, 0xf5,
-0x82, 0xe4, 0x34, 0xf9, 0xf5, 0x83, 0x22, 0x78, 0x7c, 0xe6, 0xfe, 0x08, 0xe6, 0xf5, 0x82, 0x8e,
-0x83, 0x22, 0x78, 0x80, 0xe6, 0xfe, 0x08, 0xe6, 0xaa, 0x06, 0xf8, 0xac, 0x02, 0x7d, 0x01, 0x7b,
-0xff, 0x7a, 0x31, 0x79, 0x99, 0x7e, 0x00, 0x7f, 0x0a, 0x02, 0x19, 0xcc, 0xff, 0x90, 0xf9, 0x6c,
-0x02, 0x1b, 0x3a, 0x90, 0xf9, 0x67, 0x12, 0x1b, 0x3a, 0x90, 0x00, 0x04, 0x02, 0x1a, 0x0b, 0xe6,
-0xfc, 0x08, 0xe6, 0xf5, 0x82, 0x8c, 0x83, 0xa3, 0xa3, 0x22, 0x78, 0x7e, 0xe6, 0xfe, 0x08, 0xe6,
-0xff, 0x22, 0xed, 0x12, 0x1a, 0x4a, 0x8f, 0x82, 0x8e, 0x83, 0xe5, 0x82, 0x22, 0xef, 0xf0, 0x90,
-0xfa, 0xcb, 0xe0, 0x54, 0x0f, 0x4e, 0xfe, 0xf0, 0xef, 0x54, 0xf0, 0x4e, 0xf0, 0x22, 0x08, 0xe6,
-0xfc, 0x08, 0xe6, 0x8c, 0x83, 0x24, 0x09, 0x22, 0x78, 0x7e, 0xe6, 0xfc, 0x08, 0xe6, 0xfd, 0x8c,
-0x83, 0x22, 0xa6, 0x07, 0xe6, 0x24, 0x6e, 0xf8, 0xe6, 0x22, 0x78, 0x7e, 0xe6, 0xfa, 0x08, 0xe6,
-0xfb, 0x22, 0x26, 0xf6, 0x18, 0xee, 0x36, 0xf6, 0x22, 0x8b, 0x82, 0x8a, 0x83, 0xe5, 0x82, 0x22,
-0x8b, 0x25, 0x8a, 0x26, 0x89, 0x27, 0x8d, 0x28, 0x90, 0xfa, 0xcf, 0xe4, 0xf0, 0xa3, 0x74, 0x02,
-0xf0, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xce, 0x90, 0xfa, 0xcf, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0, 0xf5,
-0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x90, 0xfa, 0xce, 0xe0, 0x65, 0x28, 0x60, 0x46, 0xa3, 0xe0,
-0xff, 0xa3, 0xe0, 0xa3, 0xcf, 0xf0, 0xa3, 0xef, 0xf0, 0x12, 0x23, 0x2f, 0x90, 0xfa, 0xce, 0xe0,
-0xff, 0x90, 0xfa, 0xd1, 0xe4, 0x8f, 0xf0, 0x12, 0x1a, 0x6c, 0x12, 0x23, 0x2f, 0x90, 0xfa, 0xd1,
-0xe0, 0xff, 0xa3, 0xe0, 0x90, 0xfa, 0xcf, 0xcf, 0xf0, 0xa3, 0xef, 0xf0, 0x90, 0xfa, 0xce, 0xe0,
-0xa3, 0x75, 0xf0, 0x00, 0x12, 0x1a, 0x6c, 0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x04, 0x12, 0x1a,
-0x6c, 0x02, 0x22, 0xb1, 0x90, 0xfa, 0xd0, 0xe0, 0x24, 0x01, 0xff, 0x90, 0xfa, 0xcf, 0xe0, 0x34,
-0x00, 0xab, 0x25, 0xaa, 0x26, 0xa9, 0x27, 0x8f, 0xf0, 0x12, 0x1a, 0xd0, 0x7f, 0x00, 0x22, 0x7b,
-0x01, 0x7a, 0xfa, 0x79, 0xce, 0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x01, 0x12, 0x1a, 0x6c, 0x85,
-0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01, 0x02, 0x25, 0xd7, 0x8f, 0x62, 0x12, 0x2a, 0x06, 0x12, 0x22,
-0x4a, 0x8e, 0x83, 0x24, 0x0b, 0x12, 0x21, 0xf3, 0xe0, 0x54, 0xfb, 0xf0, 0x44, 0x02, 0xf0, 0x08,
-0x12, 0x22, 0x3f, 0xe0, 0xa3, 0x30, 0xe5, 0x0c, 0x12, 0x22, 0x56, 0x24, 0x0b, 0x12, 0x21, 0xf3,
-0xe0, 0x44, 0x01, 0xf0, 0x78, 0x7c, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0xf5, 0x82, 0x8e, 0x83, 0xe0,
-0x54, 0xb8, 0xfd, 0xf0, 0xe5, 0x62, 0x24, 0xfe, 0x44, 0x20, 0xfc, 0x4d, 0xf0, 0xe5, 0x82, 0x24,
-0x04, 0x12, 0x21, 0xf3, 0xe0, 0x54, 0xb8, 0xf0, 0x4c, 0xf0, 0x8f, 0x82, 0x8e, 0x83, 0xa3, 0x74,
-0x03, 0xf0, 0x18, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0x8e, 0x83, 0x24, 0x05, 0x12, 0x21, 0xf3, 0xc0,
-0x83, 0xc0, 0x82, 0xe0, 0xfd, 0x74, 0x96, 0x25, 0x62, 0xf5, 0x82, 0xe4, 0x34, 0xfa, 0xf5, 0x83,
-0xe0, 0x54, 0xfc, 0x44, 0x03, 0xfc, 0xed, 0x4c, 0xd0, 0x82, 0xd0, 0x83, 0xf0, 0x8f, 0x82, 0x8e,
-0x83, 0xe0, 0x44, 0x80, 0xf0, 0xe5, 0x82, 0x24, 0x04, 0x12, 0x21, 0xf3, 0xe0, 0x44, 0x80, 0xf0,
-0x12, 0x31, 0xc7, 0x74, 0x6e, 0x25, 0x62, 0xf8, 0x74, 0x04, 0x46, 0xf6, 0x7f, 0x00, 0x22, 0x12,
-0x10, 0x03, 0x7f, 0x02, 0x12, 0x12, 0x19, 0x78, 0x67, 0xe6, 0x44, 0x02, 0xf6, 0xd2, 0xb0, 0xd2,
-0xb1, 0x90, 0xf9, 0x15, 0xe0, 0x30, 0xe7, 0x07, 0x90, 0xff, 0x9e, 0xe4, 0xf0, 0x80, 0x36, 0xd2,
-0xb3, 0x90, 0xff, 0xa4, 0xe0, 0x90, 0xfa, 0x7b, 0xf0, 0x90, 0xff, 0xb4, 0xe0, 0x90, 0xfa, 0x7c,
-0xf0, 0x90, 0xff, 0xa2, 0xe0, 0x90, 0xfa, 0x79, 0xf0, 0x90, 0xff, 0xb2, 0xe0, 0x90, 0xfa, 0x7a,
-0xf0, 0x90, 0xff, 0xa4, 0x74, 0x30, 0xf0, 0x90, 0xff, 0xb4, 0xf0, 0x90, 0xff, 0xa2, 0x74, 0x40,
-0xf0, 0x90, 0xff, 0xb2, 0xf0, 0x90, 0xfa, 0xe4, 0xe5, 0xa8, 0xf0, 0x75, 0xa8, 0x81, 0x90, 0xff,
-0x92, 0xe0, 0x60, 0x04, 0xe4, 0xf0, 0x80, 0xf6, 0x90, 0xff, 0xfd, 0x74, 0x3a, 0xf0, 0x43, 0x87,
-0x01, 0x00, 0x00, 0x00, 0x90, 0xfa, 0x7b, 0xe0, 0x90, 0xff, 0xa4, 0xf0, 0x90, 0xfa, 0x7c, 0xe0,
-0x90, 0xff, 0xb4, 0xf0, 0x90, 0xfa, 0x79, 0xe0, 0x90, 0xff, 0xa2, 0xf0, 0x90, 0xfa, 0x7a, 0xe0,
-0x90, 0xff, 0xb2, 0xf0, 0x90, 0xf9, 0x17, 0xe0, 0x60, 0x02, 0xc2, 0xb3, 0x90, 0xfa, 0xe4, 0xe0,
-0xf5, 0xa8, 0x02, 0x10, 0x86, 0x8b, 0x5c, 0x8a, 0x5d, 0x89, 0x5e, 0x12, 0x2d, 0x3c, 0x90, 0xfa,
-0xc0, 0x12, 0x1b, 0x43, 0xaa, 0x5d, 0xa9, 0x5e, 0x90, 0xfa, 0xc3, 0x12, 0x1b, 0x43, 0x90, 0xfa,
-0xc4, 0xe4, 0x75, 0xf0, 0x0a, 0x12, 0x1a, 0x6c, 0x90, 0xfa, 0xc3, 0x12, 0x1b, 0x3a, 0xe9, 0x24,
-0x01, 0xf9, 0xe4, 0x3a, 0xfa, 0x90, 0xfa, 0xc6, 0x12, 0x1b, 0x43, 0xab, 0x5c, 0xaa, 0x5d, 0xa9,
-0x5e, 0x12, 0x2d, 0x48, 0xe0, 0xff, 0xc3, 0x13, 0xf0, 0xe4, 0x78, 0x82, 0xf6, 0x90, 0xfa, 0xbe,
-0xe0, 0xff, 0x78, 0x82, 0xe6, 0xc3, 0x9f, 0x50, 0x4a, 0x90, 0xfa, 0xc0, 0x12, 0x2d, 0x1d, 0xff,
-0x78, 0x83, 0xf6, 0x90, 0xfa, 0xc3, 0x12, 0x2d, 0x1d, 0xfe, 0xf4, 0x5f, 0xff, 0x78, 0x83, 0xf6,
-0x12, 0x2d, 0x1a, 0x5e, 0x4f, 0xff, 0x78, 0x83, 0xf6, 0x12, 0x2d, 0x23, 0x75, 0xf0, 0x02, 0x12,
-0x1a, 0x6c, 0x90, 0xfa, 0xc4, 0xe4, 0x75, 0xf0, 0x02, 0x12, 0x1a, 0x6c, 0xab, 0x5c, 0xaa, 0x5d,
-0xa9, 0x5e, 0x90, 0x00, 0x04, 0x12, 0x1a, 0x0b, 0x30, 0xe4, 0x03, 0x12, 0x2d, 0x32, 0x78, 0x82,
-0x06, 0x80, 0xaa, 0xe4, 0x90, 0xfa, 0xbf, 0xf0, 0x22, 0x8b, 0x56, 0x8a, 0x57, 0x89, 0x58, 0x90,
-0xfa, 0xbf, 0x74, 0x06, 0xf0, 0xe4, 0x90, 0xfa, 0xbe, 0xf0, 0x12, 0x19, 0xf2, 0x24, 0x6e, 0x60,
-0x26, 0x14, 0x70, 0x70, 0x12, 0x2d, 0x09, 0x60, 0x09, 0x24, 0x30, 0x70, 0x12, 0x12, 0x24, 0x95,
-0x80, 0x62, 0x12, 0x2d, 0x53, 0x12, 0x1f, 0x2c, 0x90, 0xfa, 0xbf, 0xef, 0xf0, 0x80, 0x55, 0x90,
-0xfa, 0xbf, 0x74, 0x81, 0xf0, 0x80, 0x4d, 0x12, 0x2d, 0x09, 0x60, 0x09, 0x24, 0x30, 0x70, 0x3e,
-0x12, 0x2c, 0x5f, 0x80, 0x3f, 0xe5, 0x58, 0x24, 0x03, 0xf9, 0xe4, 0x35, 0x57, 0xfa, 0x7b, 0x01,
-0xc0, 0x03, 0xc0, 0x02, 0xc0, 0x01, 0x12, 0x2d, 0x53, 0x90, 0x00, 0x05, 0x12, 0x1a, 0x0b, 0xfd,
-0x90, 0x00, 0x08, 0x12, 0x1a, 0x98, 0xf5, 0x2e, 0x85, 0xf0, 0x2d, 0xd0, 0x01, 0xd0, 0x02, 0xd0,
-0x03, 0x12, 0x25, 0xd7, 0x90, 0xfa, 0xbe, 0xef, 0xf0, 0xe4, 0xa3, 0xf0, 0x80, 0x06, 0x90, 0xfa,
-0xbf, 0x74, 0x81, 0xf0, 0x90, 0xfa, 0xbf, 0xe0, 0x12, 0x2d, 0x53, 0x90, 0x00, 0x02, 0x12, 0x1a,
-0x4a, 0x90, 0xfa, 0xbe, 0xe0, 0xff, 0x22, 0x8b, 0x29, 0x8a, 0x2a, 0x89, 0x2b, 0x8d, 0x2c, 0xe5,
-0x2c, 0x70, 0x03, 0xaf, 0x2c, 0x22, 0x12, 0x2d, 0x82, 0x70, 0x16, 0x12, 0x2d, 0xa1, 0xe5, 0x2d,
-0x90, 0xff, 0xf1, 0xf0, 0x12, 0x31, 0x1b, 0x50, 0xf2, 0x12, 0x26, 0x64, 0x40, 0x0b, 0x7f, 0x00,
-0x22, 0x12, 0x2d, 0xa1, 0x12, 0x26, 0x64, 0x50, 0xf8, 0x90, 0xff, 0xf3, 0x74, 0xa1, 0xf0, 0xe5,
-0x2c, 0xb4, 0x01, 0x07, 0x90, 0xff, 0xf0, 0xe0, 0x44, 0x02, 0xf0, 0x90, 0xff, 0xf1, 0xe4, 0xf0,
-0xf5, 0x2f, 0xe5, 0x2c, 0x14, 0xff, 0xe5, 0x2f, 0xc3, 0x9f, 0x50, 0x2a, 0x12, 0x31, 0x04, 0x40,
-0x03, 0xaf, 0x2f, 0x22, 0xc3, 0xe5, 0x2c, 0x95, 0x2f, 0xff, 0xbf, 0x02, 0x07, 0x90, 0xff, 0xf0,
-0xe0, 0x44, 0x02, 0xf0, 0x12, 0x2d, 0x94, 0x05, 0x2f, 0x74, 0x01, 0x25, 0x2b, 0xf5, 0x2b, 0xe4,
-0x35, 0x2a, 0xf5, 0x2a, 0x80, 0xcc, 0x12, 0x31, 0x04, 0x40, 0x03, 0x7f, 0x18, 0x22, 0x12, 0x2d,
-0x94, 0xaf, 0x2c, 0x22, 0x90, 0xff, 0xf1, 0xe5, 0x2e, 0xf0, 0x02, 0x31, 0x1b, 0x12, 0x10, 0x03,
-0x78, 0x84, 0x12, 0x22, 0x82, 0x30, 0xe1, 0x08, 0x7f, 0x13, 0x12, 0x30, 0xec, 0x02, 0x26, 0xfb,
-0x78, 0x84, 0xe6, 0xf9, 0x24, 0x12, 0x12, 0x21, 0xff, 0xe0, 0xff, 0x30, 0xe7, 0x40, 0x54, 0x03,
-0x60, 0x1e, 0xe9, 0xb4, 0x03, 0x0d, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xfe, 0xf0, 0xe0, 0x44, 0x04,
-0xf0, 0x80, 0x46, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xfd, 0xf0, 0xe0, 0x44, 0x08, 0xf0, 0x80, 0x39,
-0xe9, 0xb4, 0x03, 0x0d, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xfb, 0xf0, 0xe0, 0x44, 0x01, 0xf0, 0x80,
-0x28, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xf7, 0xf0, 0xe0, 0x44, 0x02, 0xf0, 0x80, 0x1b, 0xef, 0x54,
-0x03, 0x60, 0x14, 0xe9, 0xb4, 0x03, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xdf, 0xf0, 0x80, 0x07,
-0x90, 0xff, 0xb4, 0xe0, 0x54, 0xdf, 0xf0, 0xc2, 0xb3, 0x90, 0xf9, 0x17, 0xe0, 0x04, 0xf0, 0xaf,
-0x01, 0x12, 0x22, 0x33, 0xfd, 0x12, 0x2f, 0x49, 0x12, 0x30, 0xec, 0x02, 0x10, 0x86, 0x75, 0xa8,
-0x40, 0x78, 0x7f, 0xe4, 0xf6, 0xd8, 0xfd, 0x75, 0x81, 0x8b, 0x02, 0x27, 0x48, 0x02, 0x30, 0xcf,
-0xe4, 0x93, 0xa3, 0xf8, 0xe4, 0x93, 0xa3, 0x40, 0x03, 0xf6, 0x80, 0x01, 0xf2, 0x08, 0xdf, 0xf4,
-0x80, 0x29, 0xe4, 0x93, 0xa3, 0xf8, 0x54, 0x07, 0x24, 0x0c, 0xc8, 0xc3, 0x33, 0xc4, 0x54, 0x0f,
-0x44, 0x20, 0xc8, 0x83, 0x40, 0x04, 0xf4, 0x56, 0x80, 0x01, 0x46, 0xf6, 0xdf, 0xe4, 0x80, 0x0b,
-0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x90, 0x2b, 0x4c, 0xe4, 0x7e, 0x01, 0x93, 0x60,
-0xbc, 0xa3, 0xff, 0x54, 0x3f, 0x30, 0xe5, 0x09, 0x54, 0x1f, 0xfe, 0xe4, 0x93, 0xa3, 0x60, 0x01,
-0x0e, 0xcf, 0x54, 0xc0, 0x25, 0xe0, 0x60, 0xa8, 0x40, 0xb8, 0xe4, 0x93, 0xa3, 0xfa, 0xe4, 0x93,
-0xa3, 0xf8, 0xe4, 0x93, 0xa3, 0xc8, 0xc5, 0x82, 0xc8, 0xca, 0xc5, 0x83, 0xca, 0xf0, 0xa3, 0xc8,
-0xc5, 0x82, 0xc8, 0xca, 0xc5, 0x83, 0xca, 0xdf, 0xe9, 0xde, 0xe7, 0x80, 0xbe, 0xe4, 0xf5, 0x22,
-0x12, 0x1d, 0x12, 0xe0, 0xb4, 0x04, 0x0d, 0xe5, 0x22, 0x24, 0x03, 0xff, 0x12, 0x2f, 0x77, 0x12,
-0x1d, 0x12, 0xe4, 0xf0, 0x05, 0x22, 0xe5, 0x22, 0xc3, 0x94, 0x02, 0x40, 0xe3, 0xe4, 0xf5, 0x22,
-0x75, 0xf0, 0x02, 0xe5, 0x22, 0x90, 0xfa, 0x91, 0x12, 0x1d, 0x53, 0x60, 0x2c, 0x12, 0x2c, 0xb4,
-0xef, 0x60, 0x52, 0x75, 0xf0, 0x02, 0xe5, 0x22, 0x90, 0xfa, 0x91, 0x12, 0x1b, 0x1c, 0xe4, 0xf0,
-0xa3, 0xf0, 0x75, 0xf0, 0x0a, 0xe5, 0x22, 0x90, 0xfa, 0x9d, 0x12, 0x1b, 0x1c, 0xe0, 0xa3, 0x30,
-0xe6, 0x33, 0x12, 0x1d, 0x12, 0x74, 0x04, 0xf0, 0x22, 0x75, 0xf0, 0x02, 0xe5, 0x22, 0x90, 0xfa,
-0x95, 0x12, 0x1d, 0x53, 0x60, 0x16, 0x12, 0x2c, 0xb4, 0xef, 0x60, 0x19, 0x75, 0xf0, 0x02, 0xe5,
-0x22, 0x90, 0xfa, 0x95, 0x12, 0x1b, 0x1c, 0xe4, 0xf0, 0xa3, 0xf0, 0x22, 0x05, 0x22, 0xe5, 0x22,
-0xc3, 0x94, 0x02, 0x40, 0x9b, 0x22, 0xe4, 0xff, 0x90, 0xff, 0x83, 0xe0, 0x54, 0x0f, 0xfe, 0xef,
-0xc3, 0x9e, 0x50, 0x17, 0x74, 0xf0, 0x2f, 0xf5, 0x82, 0xe4, 0x34, 0xfe, 0xf5, 0x83, 0xe0, 0x12,
-0x1c, 0x11, 0x12, 0x1a, 0x38, 0x0f, 0x12, 0x1c, 0x00, 0x80, 0xdd, 0xef, 0xfd, 0xc3, 0xe5, 0x3a,
-0x9d, 0xf5, 0x3a, 0xe5, 0x39, 0x94, 0x00, 0xf5, 0x39, 0xd3, 0xe5, 0x3a, 0x94, 0x00, 0xe5, 0x39,
-0x94, 0x00, 0x40, 0x06, 0xe4, 0x90, 0xff, 0x83, 0xf0, 0x22, 0x12, 0x1d, 0x2f, 0x12, 0x1d, 0x84,
-0x12, 0x1d, 0x76, 0x12, 0x19, 0xf2, 0x24, 0x6e, 0x60, 0x1e, 0x14, 0x60, 0x1b, 0x24, 0x8e, 0x70,
-0x2d, 0x90, 0x00, 0x01, 0x12, 0x1a, 0x0b, 0xff, 0x24, 0xfc, 0x60, 0x03, 0x04, 0x70, 0x1f, 0xef,
-0xfd, 0x7c, 0x00, 0x7f, 0x0d, 0x02, 0x11, 0x16, 0x12, 0x1d, 0x8b, 0x12, 0x25, 0x39, 0x12, 0x1c,
-0xd9, 0x12, 0x1a, 0x0b, 0x60, 0x03, 0x02, 0x31, 0xbd, 0xe4, 0xff, 0x12, 0x31, 0xb1, 0x22, 0x8b,
-0x45, 0x8a, 0x46, 0x89, 0x47, 0x8c, 0x48, 0x8d, 0x49, 0xd2, 0x00, 0x12, 0x2d, 0x82, 0x70, 0x16,
-0x12, 0x2d, 0xa1, 0xe5, 0x48, 0x90, 0xff, 0xf1, 0xf0, 0x12, 0x31, 0x1b, 0x50, 0xf2, 0x12, 0x29,
-0x14, 0x40, 0x0b, 0x7f, 0x18, 0x22, 0x12, 0x2d, 0xa1, 0x12, 0x29, 0x14, 0x50, 0xf8, 0xe4, 0xf5,
-0x4b, 0xe5, 0x4a, 0x14, 0xff, 0xe5, 0x4b, 0xc3, 0x9f, 0x50, 0x17, 0x12, 0x29, 0x04, 0x40, 0x03,
-0x7f, 0x18, 0x22, 0x05, 0x4b, 0x74, 0x01, 0x25, 0x47, 0xf5, 0x47, 0xe4, 0x35, 0x46, 0xf5, 0x46,
-0x80, 0xdf, 0x90, 0xff, 0xf0, 0xe0, 0x44, 0x01, 0xf0, 0x12, 0x29, 0x04, 0x40, 0x03, 0x7f, 0x18,
-0x22, 0x7f, 0x00, 0x22, 0xab, 0x45, 0xaa, 0x46, 0xa9, 0x47, 0x12, 0x19, 0xf2, 0x90, 0xff, 0xf1,
-0xf0, 0x02, 0x31, 0x1b, 0x90, 0xff, 0xf1, 0xe5, 0x49, 0xf0, 0x02, 0x31, 0x1b, 0x7b, 0x01, 0x7a,
-0xfa, 0x79, 0xcc, 0xe4, 0xfd, 0x12, 0x22, 0xa0, 0x90, 0xfa, 0xcc, 0xe4, 0x75, 0xf0, 0x09, 0x12,
-0x1a, 0x6c, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23, 0x90, 0xfa, 0xcc, 0xe4, 0x75, 0xf0, 0x01, 0x12,
-0x1a, 0x82, 0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x90, 0xff, 0xf7, 0xe5,
-0x23, 0x12, 0x29, 0x78, 0x90, 0xff, 0xf6, 0xe5, 0x23, 0xf0, 0x90, 0xfa, 0xcc, 0xe4, 0xf0, 0xa3,
-0x74, 0x06, 0x12, 0x29, 0x78, 0xe5, 0x23, 0x30, 0xe0, 0x07, 0x90, 0xff, 0xfc, 0x74, 0x94, 0xf0,
-0x22, 0x90, 0xff, 0xfc, 0x74, 0x90, 0xf0, 0x22, 0xf0, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23, 0x90,
-0xfa, 0xcc, 0xe4, 0x75, 0xf0, 0x01, 0x12, 0x1a, 0x82, 0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01,
-0x02, 0x25, 0xd7, 0x90, 0xff, 0x93, 0x74, 0x2a, 0xf0, 0x90, 0xff, 0xff, 0xe0, 0x60, 0x06, 0x90,
-0xff, 0xfc, 0x74, 0x10, 0xf0, 0x90, 0xff, 0x91, 0xe0, 0x44, 0x90, 0xf0, 0xe4, 0x90, 0xf9, 0x15,
-0xf0, 0xa3, 0xf0, 0x12, 0x2a, 0x78, 0x12, 0x16, 0x42, 0x12, 0x2f, 0xcd, 0x7e, 0x07, 0x7f, 0xd0,
-0x12, 0x11, 0xe2, 0x7e, 0x0f, 0x7f, 0xa0, 0x12, 0x11, 0xfc, 0xe4, 0x78, 0x77, 0xf6, 0x78, 0x77,
-0xe6, 0xff, 0xc3, 0x94, 0x06, 0x50, 0x0b, 0x74, 0x6e, 0x2f, 0xf8, 0xe4, 0xf6, 0x78, 0x77, 0x06,
-0x80, 0xec, 0x7f, 0x03, 0x12, 0x2e, 0xb3, 0x90, 0xf9, 0x15, 0xe0, 0x20, 0xe4, 0x05, 0x7f, 0x04,
-0x12, 0x2e, 0xb3, 0x90, 0xff, 0x9b, 0xe4, 0xf0, 0x90, 0xff, 0x9a, 0xf0, 0x90, 0xff, 0xe8, 0xe0,
-0x54, 0x1f, 0xf0, 0xd2, 0xa8, 0x22, 0x15, 0x65, 0xa8, 0x65, 0xa6, 0x07, 0x30, 0x08, 0x05, 0x12,
-0x11, 0x66, 0x80, 0xf8, 0xd2, 0x08, 0xa8, 0x65, 0xe6, 0xff, 0xb4, 0x03, 0x0f, 0x78, 0x7c, 0x76,
-0xff, 0x08, 0x76, 0xe0, 0x08, 0x76, 0xff, 0x08, 0x76, 0xa0, 0x80, 0x0d, 0x78, 0x7c, 0x76, 0xff,
-0x08, 0x76, 0xe2, 0x08, 0x76, 0xff, 0x08, 0x76, 0xb0, 0x78, 0x80, 0x76, 0xfa, 0x08, 0x76, 0x9b,
-0xef, 0x24, 0xfd, 0x75, 0xf0, 0x0a, 0xa4, 0xae, 0xf0, 0x12, 0x22, 0x92, 0x7b, 0x01, 0x7a, 0xff,
-0x79, 0x48, 0x78, 0x68, 0x12, 0x1b, 0x31, 0xa8, 0x65, 0xe6, 0x24, 0xfd, 0x75, 0xf0, 0x08, 0xa4,
-0xff, 0xae, 0xf0, 0x78, 0x6a, 0x12, 0x22, 0x92, 0x79, 0x08, 0x78, 0x6b, 0x12, 0x1b, 0x31, 0x78,
-0x6d, 0xef, 0x12, 0x22, 0x92, 0x05, 0x65, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0x54, 0xab, 0xf0, 0xe0,
-0x44, 0x20, 0xf0, 0x90, 0xfa, 0xe3, 0x74, 0x02, 0xf0, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xcc, 0xe4,
-0xf5, 0x2d, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x25, 0xd7, 0x7e, 0x00, 0x90, 0xfa, 0xe1, 0xee, 0xf0,
-0xa3, 0xef, 0xf0, 0x64, 0x01, 0x70, 0x10, 0x90, 0xfa, 0xcc, 0xe0, 0xb4, 0x52, 0x09, 0x90, 0xf9,
-0x15, 0xe0, 0x54, 0xef, 0xf0, 0x80, 0x29, 0x90, 0xfa, 0xe1, 0xe0, 0x70, 0x04, 0xa3, 0xe0, 0x64,
-0x01, 0x70, 0x10, 0x90, 0xfa, 0xcc, 0xe0, 0xb4, 0x10, 0x09, 0x90, 0xf9, 0x15, 0xe0, 0x44, 0x10,
-0xf0, 0x80, 0x0d, 0x90, 0xfa, 0xe3, 0x74, 0x03, 0xf0, 0x90, 0xf9, 0x15, 0xe0, 0x54, 0xef, 0xf0,
-0x90, 0xff, 0xf0, 0xe0, 0x44, 0x20, 0xf0, 0x22, 0x12, 0x10, 0x03, 0x78, 0x8a, 0xef, 0xf6, 0x12,
-0x2a, 0x06, 0x12, 0x22, 0x33, 0x30, 0xe0, 0x25, 0x12, 0x22, 0x07, 0xe0, 0x54, 0x7f, 0xf0, 0x78,
-0x6b, 0x12, 0x1b, 0x28, 0x90, 0x00, 0x02, 0x12, 0x1a, 0x0b, 0x30, 0xe7, 0x09, 0x90, 0x00, 0x02,
-0xe4, 0x12, 0x1a, 0x4a, 0x80, 0xe9, 0x12, 0x22, 0x07, 0xe0, 0x44, 0x80, 0xf0, 0x12, 0x22, 0x33,
-0x30, 0xe1, 0x1e, 0x12, 0x21, 0xe9, 0xe0, 0x54, 0x7f, 0xf0, 0x12, 0x31, 0x5c, 0x78, 0x68, 0x12,
-0x1b, 0x28, 0x90, 0x00, 0x02, 0x74, 0x80, 0x12, 0x1a, 0x4a, 0x12, 0x21, 0xe9, 0xe0, 0x44, 0x80,
-0xf0, 0x12, 0x31, 0xc7, 0xe4, 0xff, 0x12, 0x30, 0xec, 0x02, 0x10, 0x86, 0x03, 0x68, 0x01, 0xff,
-0x48, 0x03, 0x6b, 0x01, 0xff, 0x08, 0x02, 0x66, 0x00, 0x00, 0x44, 0xfa, 0x95, 0x00, 0x00, 0x00,
-0x00, 0x44, 0xfa, 0x91, 0x00, 0x00, 0x00, 0x00, 0x42, 0xfa, 0xaf, 0x00, 0x00, 0x42, 0xfa, 0x7b,
-0x00, 0x00, 0x42, 0xfa, 0x79, 0x00, 0x00, 0x42, 0xf9, 0x6a, 0xff, 0xff, 0x42, 0xfa, 0x77, 0x00,
-0x00, 0x43, 0xf9, 0x18, 0x0a, 0x32, 0x02, 0x41, 0xf9, 0x65, 0x20, 0x41, 0xf9, 0x66, 0x20, 0x41,
-0xf9, 0x63, 0x00, 0x41, 0xf9, 0x64, 0x00, 0x44, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0xf9,
-0x15, 0x00, 0x00, 0x41, 0xf9, 0x17, 0x00, 0x01, 0x20, 0x00, 0x41, 0xf8, 0x04, 0x00, 0x00, 0x12,
-0x10, 0x03, 0x78, 0x85, 0xef, 0xf6, 0x12, 0x30, 0x93, 0x12, 0x30, 0xec, 0x78, 0x85, 0xe6, 0xff,
-0x24, 0x12, 0x12, 0x21, 0xff, 0xe0, 0xfe, 0x30, 0xe7, 0x16, 0xef, 0xb4, 0x03, 0x09, 0x90, 0xff,
-0x9e, 0xe0, 0x54, 0xfa, 0xf0, 0x80, 0x22, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xf5, 0xf0, 0x80, 0x19,
-0xee, 0x54, 0x03, 0x60, 0x14, 0xef, 0xb4, 0x03, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x20, 0xf0,
-0x80, 0x07, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x20, 0xf0, 0x90, 0xf9, 0x17, 0xe0, 0x14, 0xf0, 0xe0,
-0x70, 0x02, 0xd2, 0xb3, 0x02, 0x10, 0x86, 0x12, 0x1d, 0x6c, 0xe5, 0x3a, 0x64, 0x09, 0x70, 0x04,
-0xe5, 0x39, 0x64, 0x01, 0x60, 0x48, 0xc3, 0xe5, 0x3a, 0x94, 0x08, 0xe5, 0x39, 0x94, 0x00, 0x40,
-0x11, 0x7f, 0x08, 0xef, 0xe5, 0x3a, 0x94, 0x08, 0xf5, 0x3a, 0xe5, 0x39, 0x94, 0x00, 0xf5, 0x39,
-0x80, 0x05, 0xaf, 0x3a, 0x12, 0x1d, 0x84, 0xe4, 0xfe, 0xee, 0xc3, 0x9f, 0x50, 0x19, 0x12, 0x1c,
-0x11, 0x12, 0x19, 0xf2, 0xfd, 0x74, 0xf8, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0xfe, 0xf5, 0x83, 0xed,
-0xf0, 0x0e, 0x12, 0x1c, 0x00, 0x80, 0xe2, 0xef, 0x54, 0x7f, 0x90, 0xff, 0x81, 0xf0, 0x22, 0x8b,
-0x59, 0x8a, 0x5a, 0x89, 0x5b, 0x12, 0x2d, 0x48, 0x70, 0x05, 0xa3, 0x74, 0x08, 0xf0, 0x22, 0xab,
-0x59, 0xaa, 0x5a, 0xa9, 0x5b, 0x12, 0x2d, 0x3c, 0x90, 0xfa, 0xc6, 0x12, 0x1b, 0x43, 0xe5, 0x5b,
-0x24, 0x03, 0xf9, 0xe4, 0x35, 0x5a, 0xfa, 0x90, 0xfa, 0xc0, 0x12, 0x1b, 0x43, 0xe4, 0x90, 0xfa,
-0xbf, 0xf0, 0x78, 0x8b, 0xf6, 0x90, 0xfa, 0xbe, 0xe0, 0xff, 0x78, 0x8b, 0xe6, 0xc3, 0x9f, 0x50,
-0x12, 0x12, 0x2d, 0x1a, 0xff, 0x12, 0x2d, 0x23, 0x12, 0x2d, 0x36, 0x78, 0x8b, 0x06, 0x12, 0x2d,
-0x32, 0x80, 0xe2, 0x22, 0xad, 0x07, 0xac, 0x06, 0x90, 0x31, 0x4d, 0xe4, 0x93, 0xff, 0x78, 0x74,
-0xf6, 0x54, 0x0f, 0x12, 0x1c, 0xf8, 0xe0, 0x08, 0x76, 0x00, 0x08, 0xf6, 0x18, 0x12, 0x1c, 0x29,
-0xc3, 0x33, 0xce, 0x33, 0xce, 0xd8, 0xf9, 0xff, 0x78, 0x75, 0xee, 0xf6, 0x08, 0xef, 0xf6, 0xee,
-0x44, 0xf8, 0x18, 0xf6, 0xef, 0x08, 0xf6, 0x90, 0xff, 0x7a, 0xe0, 0x20, 0xe7, 0x03, 0x7f, 0x00,
-0x22, 0x78, 0x75, 0xe6, 0xfe, 0x08, 0xe6, 0xf5, 0x82, 0x8e, 0x83, 0xec, 0xf0, 0xa3, 0xed, 0xf0,
-0x90, 0xff, 0x7a, 0x74, 0x02, 0xf0, 0x7f, 0x01, 0x22, 0xab, 0x56, 0xaa, 0x57, 0xa9, 0x58, 0x90,
-0x00, 0x03, 0x12, 0x1a, 0x0b, 0x54, 0xf0, 0x24, 0xa0, 0x22, 0x90, 0xfa, 0xc6, 0x12, 0x1b, 0x3a,
-0x02, 0x19, 0xf2, 0x90, 0xfa, 0xc0, 0x12, 0x1b, 0x3a, 0xef, 0x12, 0x1a, 0x38, 0x90, 0xfa, 0xc7,
-0xe4, 0x22, 0x90, 0xfa, 0xc1, 0xe4, 0x75, 0xf0, 0x01, 0x02, 0x1a, 0x6c, 0x90, 0x00, 0x08, 0x12,
-0x1a, 0x98, 0xaa, 0xf0, 0xf9, 0x7b, 0x01, 0x22, 0x90, 0x00, 0x05, 0x12, 0x1a, 0x0b, 0x90, 0xfa,
-0xbe, 0xf0, 0x22, 0xab, 0x56, 0xaa, 0x57, 0xa9, 0x58, 0x22, 0x90, 0xfa, 0xda, 0xe0, 0xff, 0x7e,
-0x00, 0xc3, 0x90, 0xfa, 0xd4, 0xe0, 0x9f, 0xf0, 0x90, 0xfa, 0xd3, 0xe0, 0x9e, 0xf0, 0x90, 0xfa,
-0xd5, 0xee, 0x8f, 0xf0, 0x12, 0x1a, 0x6c, 0xef, 0x25, 0x51, 0xf5, 0x51, 0xee, 0x35, 0x50, 0xf5,
-0x50, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0x54, 0xfe, 0xf0, 0xe0, 0x54, 0xfd, 0xf0, 0x90, 0xfa, 0xe3,
-0xe0, 0x64, 0x03, 0x22, 0x90, 0xff, 0xf2, 0xe0, 0xab, 0x29, 0xaa, 0x2a, 0xa9, 0x2b, 0x02, 0x1a,
-0x38, 0x90, 0xff, 0xf3, 0x74, 0xa0, 0xf0, 0x22, 0x8f, 0x64, 0xed, 0x70, 0x0f, 0xe5, 0x64, 0xb4,
-0x03, 0x05, 0x7f, 0x01, 0x02, 0x31, 0x32, 0x7f, 0x02, 0x02, 0x31, 0x32, 0xaf, 0x64, 0x12, 0x2a,
-0x06, 0x74, 0x6e, 0x25, 0x64, 0xf8, 0xe6, 0x30, 0xe2, 0x0b, 0xd2, 0x09, 0x12, 0x1c, 0x83, 0xe0,
-0x54, 0x7f, 0xf0, 0x80, 0x02, 0xc2, 0x09, 0xe5, 0x64, 0xb4, 0x03, 0x07, 0x7f, 0x81, 0x12, 0x31,
-0x32, 0x80, 0x05, 0x7f, 0x82, 0x12, 0x31, 0x32, 0x30, 0x09, 0x07, 0x12, 0x1c, 0x83, 0xe0, 0x44,
-0x80, 0xf0, 0x12, 0x31, 0xc7, 0x22, 0x12, 0x10, 0x03, 0x90, 0xff, 0xfd, 0xe0, 0x44, 0x60, 0xf0,
-0xd2, 0x01, 0x90, 0xff, 0xfc, 0xe0, 0x44, 0x02, 0xf0, 0x90, 0xff, 0x00, 0xe0, 0x30, 0xe7, 0x13,
-0x90, 0xff, 0x83, 0xe0, 0x44, 0x80, 0xf0, 0x43, 0x35, 0x80, 0x90, 0xff, 0xfc, 0xe0, 0x44, 0x01,
-0xf0, 0x80, 0x0d, 0x12, 0x1d, 0x2f, 0x53, 0x35, 0x7f, 0x90, 0xff, 0xfc, 0xe0, 0x54, 0xfe, 0xf0,
-0x90, 0xff, 0x81, 0xe0, 0x44, 0x80, 0xf0, 0x12, 0x02, 0xb0, 0x12, 0x1d, 0x37, 0x02, 0x10, 0x86,
-0x12, 0x10, 0x03, 0x78, 0x89, 0xef, 0xf6, 0xd2, 0x00, 0x12, 0x2a, 0x06, 0x90, 0xf9, 0x67, 0x12,
-0x1b, 0x3a, 0xe9, 0x24, 0x03, 0xf9, 0xe4, 0x3a, 0xfa, 0xc0, 0x02, 0x78, 0x80, 0xe6, 0xfe, 0x08,
-0xe6, 0xaa, 0x06, 0xf8, 0xac, 0x02, 0x7d, 0x01, 0xd0, 0x02, 0x12, 0x22, 0x25, 0x12, 0x31, 0xc7,
-0x78, 0x89, 0xe6, 0xff, 0x12, 0x13, 0x3f, 0x12, 0x30, 0xec, 0x02, 0x10, 0x86, 0x8f, 0x63, 0x12,
-0x2a, 0x06, 0x12, 0x22, 0x07, 0xe0, 0x54, 0x3f, 0xf0, 0xe5, 0x82, 0x24, 0x04, 0x12, 0x21, 0xf3,
-0xe0, 0x54, 0x3f, 0xf0, 0x08, 0xe6, 0xfe, 0x08, 0xe6, 0x8e, 0x83, 0x24, 0x0b, 0x12, 0x21, 0xf3,
-0xe0, 0x54, 0xf8, 0xf0, 0x12, 0x31, 0xc7, 0x74, 0x6e, 0x25, 0x63, 0xf8, 0x74, 0xfb, 0x56, 0xf6,
-0x7f, 0x00, 0x22, 0x8f, 0x23, 0xc2, 0x08, 0x12, 0x2a, 0x06, 0x12, 0x22, 0x12, 0x78, 0x7e, 0x12,
-0x21, 0xeb, 0xe0, 0x44, 0x01, 0xf0, 0x12, 0x22, 0x4a, 0x12, 0x21, 0xef, 0xe0, 0x20, 0xe0, 0xf6,
-0xef, 0x24, 0x0b, 0xf5, 0x82, 0xe4, 0x3e, 0xf5, 0x83, 0xe0, 0x54, 0xf8, 0xf0, 0x12, 0x31, 0xc7,
-0xaf, 0x23, 0x12, 0x13, 0x3f, 0x22, 0x12, 0x10, 0x03, 0x12, 0x2a, 0x06, 0x12, 0x22, 0x4a, 0x24,
-0x06, 0x12, 0x21, 0xf1, 0xe0, 0xfd, 0x12, 0x22, 0x2d, 0x90, 0x00, 0x03, 0x12, 0x22, 0x52, 0x24,
-0x05, 0x12, 0x21, 0xf3, 0xe0, 0x90, 0x00, 0x04, 0x12, 0x1a, 0x4a, 0x12, 0x31, 0xc7, 0x7d, 0x02,
-0xe4, 0xff, 0x12, 0x2f, 0x18, 0x02, 0x10, 0x86, 0xae, 0x05, 0x12, 0x1c, 0xde, 0xef, 0x12, 0x1a,
-0x4a, 0x0e, 0x0e, 0x0e, 0xee, 0xd3, 0x95, 0x3c, 0xe4, 0x95, 0x3b, 0x40, 0x02, 0xae, 0x3c, 0xee,
-0xd3, 0x94, 0x08, 0x74, 0x80, 0x94, 0x81, 0x40, 0x0a, 0x7e, 0x03, 0x90, 0x00, 0x02, 0x74, 0x02,
-0x12, 0x1a, 0x4a, 0xaf, 0x06, 0x12, 0x31, 0xb1, 0x22, 0xae, 0x07, 0xed, 0x54, 0x03, 0x64, 0x01,
-0x60, 0x03, 0x7f, 0x10, 0x22, 0xed, 0x54, 0x7c, 0xc3, 0x94, 0x04, 0x50, 0x03, 0x7f, 0x0b, 0x22,
-0x74, 0x6e, 0x2e, 0xf8, 0x74, 0x02, 0x46, 0xf6, 0x74, 0x96, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0xfa,
-0xf5, 0x83, 0xed, 0xf0, 0x7f, 0x00, 0x22, 0xbf, 0x03, 0x06, 0x7c, 0xff, 0x7d, 0xe0, 0x80, 0x04,
-0x7c, 0xff, 0x7d, 0xe2, 0x8d, 0x82, 0x8c, 0x83, 0xe0, 0x44, 0x80, 0xf0, 0xe5, 0x82, 0x24, 0x04,
-0x12, 0x21, 0xf3, 0xe0, 0x44, 0x80, 0xf0, 0x74, 0x6e, 0x2f, 0xf8, 0x74, 0x04, 0x46, 0xf6, 0x7f,
-0x00, 0x22, 0x12, 0x10, 0x03, 0xe5, 0x3a, 0x64, 0x09, 0x70, 0x04, 0xe5, 0x39, 0x64, 0x01, 0x60,
-0x16, 0x90, 0xff, 0x83, 0xe0, 0x54, 0x0f, 0xff, 0xc3, 0xe5, 0x3a, 0x9f, 0xe5, 0x39, 0x94, 0x00,
-0x40, 0x05, 0x12, 0x28, 0x16, 0x80, 0x03, 0x12, 0x31, 0xbd, 0x02, 0x10, 0x86, 0x90, 0xff, 0xfc,
-0xe0, 0x20, 0xe7, 0x1f, 0xc2, 0xaf, 0x7d, 0xff, 0xac, 0x05, 0x1d, 0xec, 0x60, 0x15, 0x7e, 0x04,
-0x7f, 0x00, 0xef, 0x1f, 0xaa, 0x06, 0x70, 0x01, 0x1e, 0x4a, 0x60, 0xec, 0x90, 0xff, 0x92, 0xe4,
-0xf0, 0x80, 0xef, 0x22, 0x12, 0x10, 0x03, 0x78, 0x66, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0x30, 0xe0,
-0x12, 0x30, 0xe1, 0x0f, 0x90, 0xff, 0xfc, 0xe0, 0x44, 0x20, 0xf0, 0x7f, 0x04, 0x12, 0x12, 0x19,
-0x12, 0x1d, 0x46, 0x02, 0x10, 0x86, 0x8e, 0x5f, 0x8f, 0x60, 0xe5, 0x60, 0x15, 0x60, 0xae, 0x5f,
-0x70, 0x02, 0x15, 0x5f, 0xd3, 0x94, 0x00, 0xee, 0x94, 0x00, 0x40, 0x09, 0x7e, 0x07, 0x7f, 0xd0,
-0x12, 0x0f, 0xdc, 0x80, 0xe5, 0x22, 0x11, 0x94, 0x2d, 0xf6, 0x23, 0xef, 0x31, 0xa3, 0x2f, 0xf4,
-0x2f, 0xa2, 0x30, 0xb2, 0x2e, 0xe6, 0x26, 0x6d, 0x2b, 0xaf, 0x30, 0x55, 0x30, 0x74, 0x1d, 0xb4,
-0x2e, 0x40, 0x2a, 0xe8, 0x0e, 0x12, 0x10, 0x03, 0x78, 0x86, 0x12, 0x22, 0x82, 0x20, 0xe1, 0x07,
-0x7f, 0x12, 0x12, 0x30, 0xec, 0x80, 0x0a, 0x78, 0x86, 0xe6, 0xff, 0x12, 0x23, 0x49, 0x12, 0x30,
-0xec, 0x02, 0x10, 0x86, 0x12, 0x10, 0x03, 0x78, 0x87, 0x12, 0x22, 0x82, 0x20, 0xe2, 0x07, 0x7f,
-0x11, 0x12, 0x30, 0xec, 0x80, 0x0a, 0x78, 0x87, 0xe6, 0xff, 0x12, 0x2e, 0x7d, 0x12, 0x30, 0xec,
-0x02, 0x10, 0x86, 0x8f, 0x61, 0x12, 0x2e, 0x7d, 0xaf, 0x61, 0x12, 0x2a, 0x06, 0x12, 0x22, 0x12,
-0x12, 0x31, 0xc7, 0x74, 0x6e, 0x25, 0x61, 0xf8, 0x74, 0xfd, 0x56, 0xf6, 0xaf, 0x61, 0x12, 0x13,
-0x3f, 0x22, 0x12, 0x10, 0x03, 0xe5, 0x3a, 0x64, 0x09, 0x70, 0x04, 0xe5, 0x39, 0x64, 0x01, 0x60,
-0x05, 0x12, 0x2c, 0x07, 0x80, 0x06, 0x12, 0x1d, 0x64, 0x12, 0x1d, 0x6c, 0x02, 0x10, 0x86, 0x12,
-0x29, 0x93, 0x12, 0x12, 0xbb, 0x90, 0xf8, 0x04, 0xe0, 0xff, 0x60, 0x05, 0x7d, 0x01, 0x12, 0x12,
-0x58, 0x12, 0x29, 0x1d, 0x12, 0x12, 0xf7, 0x12, 0x11, 0x74, 0x80, 0xe3, 0x12, 0x1c, 0xde, 0xef,
-0x12, 0x1a, 0x4a, 0xe4, 0xf5, 0x33, 0xf5, 0x34, 0xef, 0x60, 0x03, 0x02, 0x31, 0xbd, 0xe4, 0xff,
-0x12, 0x31, 0xb1, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0xff, 0x54, 0xa0, 0x60, 0xf7, 0xef, 0x30, 0xe5,
-0x08, 0x90, 0xff, 0xf0, 0x44, 0x20, 0xf0, 0xc3, 0x22, 0xd3, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0xff,
-0x54, 0x28, 0x60, 0xf7, 0xef, 0x30, 0xe5, 0x08, 0x90, 0xff, 0xf0, 0x44, 0x20, 0xf0, 0xc3, 0x22,
-0xd3, 0x22, 0xef, 0x30, 0xe7, 0x08, 0x12, 0x1c, 0x95, 0xe0, 0x54, 0xdf, 0xf0, 0x22, 0xef, 0x12,
-0x1c, 0xe8, 0xe0, 0x54, 0xdf, 0xf0, 0x22, 0x81, 0x01, 0x82, 0x02, 0x83, 0x03, 0x87, 0x40, 0x00,
-0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x08, 0x00, 0x78, 0x7e, 0x12, 0x22,
-0x09, 0xa3, 0xa3, 0xe0, 0xff, 0x30, 0xe7, 0x06, 0x54, 0x7f, 0xf0, 0x44, 0x80, 0xf0, 0x22, 0x85,
-0x3b, 0x39, 0x85, 0x3c, 0x3a, 0x90, 0xff, 0x82, 0xe0, 0x54, 0xf7, 0xf0, 0xa3, 0xe0, 0x54, 0x7f,
-0xf0, 0x22, 0xe4, 0xfe, 0xee, 0x90, 0x31, 0x47, 0x93, 0xb5, 0x07, 0x02, 0xd3, 0x22, 0x0e, 0xbe,
-0x07, 0xf2, 0xc3, 0x22, 0x00, 0x08, 0x18, 0x28, 0x38, 0x01, 0x81, 0x10, 0x0a, 0x02, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x12, 0x10, 0x03, 0x7f, 0x02, 0x12, 0x10, 0x92, 0x12, 0x1d, 0x46, 0x02, 0x10,
-0x86, 0x75, 0x39, 0x00, 0x8f, 0x3a, 0x12, 0x1c, 0x30, 0x12, 0x2c, 0x07, 0x22, 0x12, 0x1d, 0x6c,
-0x12, 0x1d, 0x2f, 0x12, 0x1d, 0x64, 0x22, 0xc2, 0x08, 0x22,
+0xe5, 0x54, 0x30, 0xe1, 0x01, 0x06, 0x78, 0x88, 0xe6, 0x12, 0x22, 0xe7, 0x90, 0x00, 0x0c, 0xef,
+0x12, 0x1a, 0xfa, 0x12, 0x22, 0xb5, 0xa3, 0xa3, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0x53, 0x07, 0x0c,
+0x53, 0x06, 0xe6, 0xe5, 0x53, 0x30, 0xe5, 0x03, 0x43, 0x07, 0x01, 0xe5, 0x54, 0x20, 0xe5, 0x0e,
+0xe5, 0x53, 0x54, 0x7f, 0x70, 0x08, 0xe5, 0x53, 0x20, 0xe7, 0x03, 0x43, 0x07, 0x02, 0xe5, 0x53,
+0x30, 0xe3, 0x03, 0x43, 0x07, 0x10, 0xe5, 0x53, 0x30, 0xe2, 0x03, 0x43, 0x07, 0x20, 0xe5, 0x53,
+0x54, 0x03, 0x60, 0x03, 0x43, 0x07, 0x40, 0xe5, 0x53, 0x30, 0xe1, 0x03, 0x43, 0x07, 0x80, 0xe5,
+0x53, 0x30, 0xe4, 0x03, 0x43, 0x06, 0x01, 0xe5, 0x53, 0x30, 0xe6, 0x03, 0x43, 0x06, 0x08, 0xe5,
+0x54, 0x20, 0xe4, 0x0e, 0xe5, 0x53, 0x54, 0x7f, 0x70, 0x08, 0xe5, 0x53, 0x20, 0xe7, 0x03, 0x43,
+0x06, 0x10, 0x53, 0x07, 0xfb, 0x53, 0x06, 0x79, 0x90, 0x00, 0x05, 0xee, 0x8f, 0xf0, 0x12, 0x1b,
+0x9f, 0xe5, 0x55, 0x30, 0xe3, 0x12, 0x54, 0x30, 0xff, 0xc4, 0x54, 0x0f, 0x12, 0x22, 0xe7, 0x90,
+0x00, 0x08, 0xef, 0x12, 0x1a, 0xfa, 0x80, 0x0a, 0x12, 0x22, 0xe8, 0x90, 0x00, 0x08, 0xe4, 0x12,
+0x1a, 0xfa, 0xe5, 0x55, 0x54, 0x03, 0x12, 0x22, 0xe7, 0x90, 0x00, 0x07, 0xef, 0x12, 0x1a, 0xfa,
+0xe5, 0x55, 0x54, 0x04, 0xff, 0xc3, 0x13, 0x90, 0x00, 0x09, 0x12, 0x1a, 0xfa, 0x90, 0x00, 0x07,
+0x12, 0x1a, 0xbb, 0x70, 0x13, 0x12, 0x22, 0xe8, 0xe9, 0x24, 0x09, 0xf9, 0xe4, 0x3a, 0xfa, 0x12,
+0x1a, 0xa2, 0xff, 0xc3, 0x13, 0x12, 0x1a, 0xe8, 0x12, 0x23, 0x27, 0x24, 0x08, 0x12, 0x22, 0xa1,
+0xe0, 0xfe, 0x8d, 0x82, 0x8c, 0x83, 0xe5, 0x82, 0x24, 0x07, 0x12, 0x22, 0xa1, 0xe0, 0xfd, 0xee,
+0xed, 0x12, 0x22, 0xe7, 0x90, 0x00, 0x03, 0xee, 0x8f, 0xf0, 0x12, 0x1b, 0x9f, 0x12, 0x32, 0x84,
+0x7d, 0x0a, 0xe4, 0xff, 0x12, 0x2f, 0xb4, 0x02, 0x10, 0xce, 0x90, 0xfa, 0xe6, 0xe0, 0xb4, 0x03,
+0x06, 0x7e, 0x00, 0x7f, 0x40, 0x80, 0x04, 0x7e, 0x00, 0x7f, 0x08, 0x90, 0xfa, 0xda, 0xee, 0xf0,
+0xa3, 0xef, 0xf0, 0x90, 0x00, 0x05, 0x12, 0x1a, 0xbb, 0xff, 0x7e, 0x00, 0x90, 0xfa, 0xd6, 0xee,
+0xf0, 0xa3, 0xef, 0xf0, 0x70, 0x03, 0x7f, 0x08, 0x22, 0x90, 0x00, 0x08, 0x12, 0x1b, 0x48, 0xff,
+0x90, 0xfa, 0xd8, 0xe5, 0xf0, 0xf0, 0xa3, 0xef, 0xf0, 0xae, 0x02, 0xaf, 0x01, 0x8e, 0x50, 0x8f,
+0x51, 0x74, 0x0a, 0x25, 0x51, 0xf5, 0x51, 0xe4, 0x35, 0x50, 0xf5, 0x50, 0x90, 0xfa, 0xdb, 0xe0,
+0xff, 0x14, 0xfe, 0x90, 0xfa, 0xd9, 0xe0, 0x5e, 0xfe, 0xc3, 0xef, 0x9e, 0xff, 0x90, 0xfa, 0xdd,
+0xf0, 0xc3, 0x90, 0xfa, 0xd7, 0xe0, 0x9f, 0x90, 0xfa, 0xd6, 0xe0, 0x94, 0x00, 0x50, 0x06, 0xa3,
+0xe0, 0x90, 0xfa, 0xdd, 0xf0, 0x12, 0x20, 0xa9, 0x60, 0x03, 0xe0, 0xff, 0x22, 0x12, 0x2e, 0x2b,
+0x90, 0xfa, 0xd6, 0xe0, 0xfe, 0xa3, 0xe0, 0xff, 0x4e, 0x60, 0x2b, 0x90, 0xfa, 0xda, 0xe0, 0xfc,
+0xa3, 0xe0, 0xfd, 0xd3, 0xef, 0x9d, 0xee, 0x9c, 0x40, 0x07, 0xe0, 0x90, 0xfa, 0xdd, 0xf0, 0x80,
+0x08, 0x90, 0xfa, 0xd7, 0xe0, 0x90, 0xfa, 0xdd, 0xf0, 0x12, 0x20, 0xa9, 0x60, 0x03, 0xe0, 0xff,
+0x22, 0x12, 0x2e, 0x2b, 0x80, 0xca, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x52, 0xe4, 0xf5, 0x2d, 0xf5,
+0x2e, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x7f, 0x00, 0x22, 0xaa, 0x50, 0xa9, 0x51, 0x7b, 0x01, 0x90,
+0xfa, 0xd8, 0xe0, 0xfc, 0xa3, 0xe0, 0xfd, 0x90, 0xfa, 0xdd, 0xe0, 0xf5, 0x4a, 0x12, 0x29, 0x60,
+0x90, 0xfa, 0xdc, 0xef, 0xf0, 0x22, 0xef, 0x24, 0xae, 0x60, 0x52, 0x24, 0xfe, 0x60, 0x2e, 0x24,
+0xfe, 0x70, 0x03, 0x02, 0x21, 0x69, 0x24, 0x06, 0x60, 0x03, 0x02, 0x21, 0xb1, 0x78, 0x71, 0xe6,
+0x54, 0xfb, 0xf6, 0x90, 0xff, 0xa5, 0xe0, 0xf5, 0x22, 0x44, 0x0f, 0xf0, 0x74, 0x33, 0x90, 0xfa,
+0x94, 0xf0, 0xe5, 0x22, 0xa3, 0xf0, 0x90, 0xfa, 0xb2, 0x74, 0x01, 0xf0, 0x22, 0x78, 0x72, 0xe6,
+0x54, 0xfb, 0xf6, 0x90, 0xff, 0xb5, 0xe0, 0xf5, 0x22, 0x44, 0x0f, 0xf0, 0x74, 0x43, 0x90, 0xfa,
+0x96, 0xf0, 0xe5, 0x22, 0xa3, 0xf0, 0x90, 0xfa, 0xb3, 0x74, 0x01, 0xf0, 0x22, 0x90, 0xfa, 0xa0,
+0xe0, 0xa3, 0x20, 0xe5, 0x03, 0x02, 0x21, 0xb1, 0x90, 0xff, 0xa6, 0xe0, 0x90, 0xfa, 0xcd, 0xf0,
+0xa3, 0xf0, 0x90, 0xfa, 0xcd, 0xe0, 0xff, 0x54, 0x0f, 0xfe, 0x60, 0x10, 0x90, 0xff, 0xa6, 0x12,
+0x23, 0x0d, 0x90, 0xff, 0xa6, 0xe0, 0x90, 0xfa, 0xcd, 0xf0, 0x80, 0xe6, 0x90, 0xfa, 0xce, 0xe0,
+0xff, 0x74, 0x34, 0xfe, 0x12, 0x2d, 0x85, 0xef, 0x70, 0x57, 0x90, 0xfa, 0xce, 0xe0, 0xff, 0x74,
+0x34, 0x90, 0xfa, 0x98, 0xf0, 0xef, 0xa3, 0xf0, 0x22, 0x90, 0xfa, 0xaa, 0xe0, 0xa3, 0x30, 0xe5,
+0x40, 0x90, 0xff, 0xb6, 0xe0, 0x90, 0xfa, 0xcd, 0xf0, 0xa3, 0xf0, 0x90, 0xfa, 0xcd, 0xe0, 0xff,
+0x54, 0x0f, 0xfe, 0x60, 0x10, 0x90, 0xff, 0xb6, 0x12, 0x23, 0x0d, 0x90, 0xff, 0xb6, 0xe0, 0x90,
+0xfa, 0xcd, 0xf0, 0x80, 0xe6, 0x90, 0xfa, 0xce, 0xe0, 0xff, 0x74, 0x44, 0xfe, 0x12, 0x2d, 0x85,
+0xef, 0x70, 0x0e, 0x90, 0xfa, 0xce, 0xe0, 0xff, 0x74, 0x44, 0x90, 0xfa, 0x9a, 0xf0, 0xef, 0xa3,
+0xf0, 0x22, 0xc0, 0xe0, 0xc0, 0xf0, 0xc0, 0x83, 0xc0, 0x82, 0xc0, 0xd0, 0x75, 0xd0, 0x00, 0xc0,
+0x00, 0xc0, 0x01, 0xc0, 0x02, 0xc0, 0x03, 0xc0, 0x04, 0xc0, 0x05, 0xc0, 0x06, 0xc0, 0x07, 0x90,
+0xff, 0x92, 0xe0, 0xff, 0x90, 0xfa, 0xcc, 0xf0, 0x90, 0xff, 0x92, 0xe4, 0xf0, 0xef, 0x12, 0x1b,
+0xfc, 0x22, 0x69, 0x26, 0x22, 0x69, 0x2e, 0x22, 0x0c, 0x30, 0x22, 0x0c, 0x32, 0x22, 0x1a, 0x38,
+0x22, 0x2c, 0x3a, 0x22, 0x5e, 0x3e, 0x22, 0x49, 0x44, 0x22, 0x3e, 0x46, 0x22, 0x54, 0x50, 0x22,
+0x54, 0x52, 0x22, 0x54, 0x54, 0x22, 0x54, 0x56, 0x00, 0x00, 0x22, 0x6e, 0x90, 0xfa, 0xcc, 0xe0,
+0xfd, 0x7c, 0x00, 0x7f, 0x01, 0x12, 0x11, 0x5e, 0x80, 0x62, 0x7c, 0x00, 0x7d, 0x01, 0x7f, 0x03,
+0x12, 0x11, 0x5e, 0x90, 0xff, 0xfe, 0xe0, 0x44, 0x20, 0xf0, 0x80, 0x50, 0x7c, 0x00, 0x7d, 0x01,
+0x7f, 0x02, 0x12, 0x11, 0x5e, 0x90, 0xff, 0xfe, 0xe0, 0x44, 0x40, 0xf0, 0x80, 0x3e, 0x7c, 0x00,
+0x7d, 0x01, 0x7f, 0x05, 0x12, 0x11, 0x5e, 0x80, 0x33, 0x7c, 0x00, 0x7d, 0x01, 0x7f, 0x06, 0x12,
+0x11, 0x5e, 0x80, 0x28, 0x90, 0xfa, 0xcc, 0xe0, 0xff, 0x12, 0x20, 0xc6, 0x80, 0x1e, 0x7c, 0x00,
+0x7d, 0x01, 0x7f, 0x04, 0x12, 0x11, 0x5e, 0x80, 0x13, 0x12, 0x28, 0x4e, 0x80, 0x0e, 0x90, 0xfa,
+0xcc, 0xe0, 0x24, 0x00, 0xff, 0xe4, 0x34, 0xff, 0xfe, 0x12, 0x2d, 0x85, 0xd0, 0x07, 0xd0, 0x06,
+0xd0, 0x05, 0xd0, 0x04, 0xd0, 0x03, 0xd0, 0x02, 0xd0, 0x01, 0xd0, 0x00, 0xd0, 0xd0, 0xd0, 0x82,
+0xd0, 0x83, 0xd0, 0xf0, 0xd0, 0xe0, 0x32, 0x78, 0x7c, 0xe6, 0xfe, 0x08, 0xe6, 0x24, 0x04, 0x8e,
+0x83, 0xf5, 0x82, 0xe4, 0x35, 0x83, 0xf5, 0x83, 0x22, 0x74, 0x13, 0x25, 0x24, 0xf5, 0x82, 0xe4,
+0x34, 0xf9, 0xf5, 0x83, 0x22, 0x78, 0x80, 0xe6, 0xfe, 0x08, 0xe6, 0xf5, 0x82, 0x8e, 0x83, 0x22,
+0x78, 0x80, 0xe6, 0xfe, 0x08, 0xe6, 0xaa, 0x06, 0xf8, 0xac, 0x02, 0x7d, 0x01, 0x7b, 0xff, 0x7a,
+0x32, 0x79, 0x56, 0x7e, 0x00, 0x7f, 0x0a, 0x02, 0x1a, 0x7c, 0x78, 0x80, 0xe6, 0xfc, 0x08, 0xe6,
+0xf5, 0x82, 0x8c, 0x83, 0xa3, 0xa3, 0x22, 0xff, 0x90, 0xf9, 0x6f, 0x02, 0x1b, 0xea, 0x90, 0xf9,
+0x6a, 0x12, 0x1b, 0xea, 0x90, 0x00, 0x04, 0x02, 0x1a, 0xbb, 0x78, 0x7e, 0xe6, 0xfe, 0x08, 0xe6,
+0xff, 0x22, 0xed, 0x12, 0x1a, 0xfa, 0x8f, 0x82, 0x8e, 0x83, 0xe5, 0x82, 0x22, 0xef, 0xf0, 0x90,
+0xfa, 0xce, 0xe0, 0x54, 0x0f, 0x4e, 0xfe, 0xf0, 0xef, 0x54, 0xf0, 0x4e, 0xf0, 0x22, 0x78, 0x80,
+0xe6, 0xfc, 0x08, 0xe6, 0x8c, 0x83, 0x22, 0x78, 0x7e, 0xe6, 0xfc, 0x08, 0xe6, 0xfd, 0x8c, 0x83,
+0x22, 0xa6, 0x07, 0xe6, 0x24, 0x6e, 0xf8, 0xe6, 0x22, 0x78, 0x7e, 0xe6, 0xfa, 0x08, 0xe6, 0xfb,
+0x22, 0x08, 0xe6, 0xfe, 0x08, 0xe6, 0x8e, 0x83, 0x22, 0x26, 0xf6, 0x18, 0xee, 0x36, 0xf6, 0x22,
+0xef, 0x24, 0x0b, 0xf5, 0x82, 0xe4, 0x3e, 0xf5, 0x83, 0x22, 0x8b, 0x82, 0x8a, 0x83, 0xe5, 0x82,
+0x22, 0x8b, 0x25, 0x8a, 0x26, 0x89, 0x27, 0x8d, 0x28, 0x90, 0xfa, 0xd2, 0xe4, 0xf0, 0xa3, 0x74,
+0x02, 0xf0, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xd1, 0x90, 0xfa, 0xd2, 0xe0, 0xf5, 0x2d, 0xa3, 0xe0,
+0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x90, 0xfa, 0xd1, 0xe0, 0x65, 0x28, 0x60, 0x46, 0xa3,
+0xe0, 0xff, 0xa3, 0xe0, 0xa3, 0xcf, 0xf0, 0xa3, 0xef, 0xf0, 0x12, 0x23, 0xf0, 0x90, 0xfa, 0xd1,
+0xe0, 0xff, 0x90, 0xfa, 0xd4, 0xe4, 0x8f, 0xf0, 0x12, 0x1b, 0x1c, 0x12, 0x23, 0xf0, 0x90, 0xfa,
+0xd4, 0xe0, 0xff, 0xa3, 0xe0, 0x90, 0xfa, 0xd2, 0xcf, 0xf0, 0xa3, 0xef, 0xf0, 0x90, 0xfa, 0xd1,
+0xe0, 0xa3, 0x75, 0xf0, 0x00, 0x12, 0x1b, 0x1c, 0x90, 0xfa, 0xd2, 0xe4, 0x75, 0xf0, 0x04, 0x12,
+0x1b, 0x1c, 0x02, 0x23, 0x72, 0x90, 0xfa, 0xd3, 0xe0, 0x24, 0x01, 0xff, 0x90, 0xfa, 0xd2, 0xe0,
+0x34, 0x00, 0xab, 0x25, 0xaa, 0x26, 0xa9, 0x27, 0x8f, 0xf0, 0x12, 0x1b, 0x80, 0x7f, 0x00, 0x22,
+0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xd1, 0x90, 0xfa, 0xd2, 0xe4, 0x75, 0xf0, 0x01, 0x12, 0x1b, 0x1c,
+0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01, 0x02, 0x26, 0x98, 0x8f, 0x62, 0x12, 0x2a, 0xc7, 0x12,
+0x22, 0xfa, 0x8e, 0x83, 0x24, 0x0b, 0x12, 0x22, 0xa1, 0xe0, 0x54, 0xfb, 0xf0, 0x44, 0x02, 0xf0,
+0x08, 0x12, 0x22, 0xdc, 0xe0, 0xa3, 0x30, 0xe5, 0x0c, 0x12, 0x23, 0x06, 0x24, 0x0b, 0x12, 0x22,
+0xa1, 0xe0, 0x44, 0x01, 0xf0, 0x78, 0x7c, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0xf5, 0x82, 0x8e, 0x83,
+0xe0, 0x54, 0xb8, 0xfd, 0xf0, 0xe5, 0x62, 0x24, 0xfe, 0x44, 0x20, 0xfc, 0x4d, 0xf0, 0xe5, 0x82,
+0x24, 0x04, 0x12, 0x22, 0xa1, 0xe0, 0x54, 0xb8, 0xf0, 0x4c, 0xf0, 0x8f, 0x82, 0x8e, 0x83, 0xa3,
+0x74, 0x03, 0xf0, 0x18, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0x8e, 0x83, 0x24, 0x05, 0x12, 0x22, 0xa1,
+0xc0, 0x83, 0xc0, 0x82, 0xe0, 0xfd, 0x74, 0x99, 0x25, 0x62, 0xf5, 0x82, 0xe4, 0x34, 0xfa, 0xf5,
+0x83, 0xe0, 0x54, 0xfc, 0x44, 0x03, 0xfc, 0xed, 0x4c, 0xd0, 0x82, 0xd0, 0x83, 0xf0, 0x8f, 0x82,
+0x8e, 0x83, 0xe0, 0x44, 0x80, 0xf0, 0xe5, 0x82, 0x24, 0x04, 0x12, 0x22, 0xa1, 0xe0, 0x44, 0x80,
+0xf0, 0x12, 0x32, 0x84, 0x74, 0x6e, 0x25, 0x62, 0xf8, 0x74, 0x04, 0x46, 0xf6, 0x7f, 0x00, 0x22,
+0x12, 0x10, 0x4b, 0x7f, 0x02, 0x12, 0x12, 0x61, 0x78, 0x67, 0xe6, 0x44, 0x02, 0xf6, 0xd2, 0xb0,
+0xd2, 0xb1, 0x90, 0xf9, 0x16, 0xe0, 0x30, 0xe7, 0x07, 0x90, 0xff, 0x9e, 0xe4, 0xf0, 0x80, 0x36,
+0xd2, 0xb3, 0x90, 0xff, 0xa4, 0xe0, 0x90, 0xfa, 0x7e, 0xf0, 0x90, 0xff, 0xb4, 0xe0, 0x90, 0xfa,
+0x7f, 0xf0, 0x90, 0xff, 0xa2, 0xe0, 0x90, 0xfa, 0x7c, 0xf0, 0x90, 0xff, 0xb2, 0xe0, 0x90, 0xfa,
+0x7d, 0xf0, 0x90, 0xff, 0xa4, 0x74, 0x30, 0xf0, 0x90, 0xff, 0xb4, 0xf0, 0x90, 0xff, 0xa2, 0x74,
+0x40, 0xf0, 0x90, 0xff, 0xb2, 0xf0, 0x90, 0xfa, 0xe7, 0xe5, 0xa8, 0xf0, 0x75, 0xa8, 0x81, 0x90,
+0xff, 0x92, 0xe0, 0x60, 0x04, 0xe4, 0xf0, 0x80, 0xf6, 0x90, 0xff, 0xfd, 0x74, 0x3a, 0xf0, 0x43,
+0x87, 0x01, 0x00, 0x00, 0x00, 0x90, 0xfa, 0x7e, 0xe0, 0x90, 0xff, 0xa4, 0xf0, 0x90, 0xfa, 0x7f,
+0xe0, 0x90, 0xff, 0xb4, 0xf0, 0x90, 0xfa, 0x7c, 0xe0, 0x90, 0xff, 0xa2, 0xf0, 0x90, 0xfa, 0x7d,
+0xe0, 0x90, 0xff, 0xb2, 0xf0, 0x90, 0xf9, 0x18, 0xe0, 0x60, 0x02, 0xc2, 0xb3, 0x90, 0xfa, 0xe7,
+0xe0, 0xf5, 0xa8, 0x02, 0x10, 0xce, 0x8b, 0x5c, 0x8a, 0x5d, 0x89, 0x5e, 0x12, 0x2e, 0x0d, 0x90,
+0xfa, 0xc3, 0x12, 0x1b, 0xf3, 0xaa, 0x5d, 0xa9, 0x5e, 0x90, 0xfa, 0xc6, 0x12, 0x1b, 0xf3, 0x90,
+0xfa, 0xc7, 0xe4, 0x75, 0xf0, 0x0a, 0x12, 0x1b, 0x1c, 0x90, 0xfa, 0xc6, 0x12, 0x1b, 0xea, 0xe9,
+0x24, 0x01, 0xf9, 0xe4, 0x3a, 0xfa, 0x90, 0xfa, 0xc9, 0x12, 0x1b, 0xf3, 0xab, 0x5c, 0xaa, 0x5d,
+0xa9, 0x5e, 0x12, 0x2e, 0x19, 0xe0, 0xff, 0xc3, 0x13, 0xf0, 0xe4, 0x78, 0x82, 0xf6, 0x90, 0xfa,
+0xc1, 0xe0, 0xff, 0x78, 0x82, 0xe6, 0xc3, 0x9f, 0x50, 0x4a, 0x90, 0xfa, 0xc3, 0x12, 0x2d, 0xee,
+0xff, 0x78, 0x83, 0xf6, 0x90, 0xfa, 0xc6, 0x12, 0x2d, 0xee, 0xfe, 0xf4, 0x5f, 0xff, 0x78, 0x83,
+0xf6, 0x12, 0x2d, 0xeb, 0x5e, 0x4f, 0xff, 0x78, 0x83, 0xf6, 0x12, 0x2d, 0xf4, 0x75, 0xf0, 0x02,
+0x12, 0x1b, 0x1c, 0x90, 0xfa, 0xc7, 0xe4, 0x75, 0xf0, 0x02, 0x12, 0x1b, 0x1c, 0xab, 0x5c, 0xaa,
+0x5d, 0xa9, 0x5e, 0x90, 0x00, 0x04, 0x12, 0x1a, 0xbb, 0x30, 0xe4, 0x03, 0x12, 0x2e, 0x03, 0x78,
+0x82, 0x06, 0x80, 0xaa, 0xe4, 0x90, 0xfa, 0xc2, 0xf0, 0x22, 0x8b, 0x56, 0x8a, 0x57, 0x89, 0x58,
+0x90, 0xfa, 0xc2, 0x74, 0x06, 0xf0, 0xe4, 0x90, 0xfa, 0xc1, 0xf0, 0x12, 0x1a, 0xa2, 0x24, 0x6e,
+0x60, 0x26, 0x14, 0x70, 0x70, 0x12, 0x2d, 0xda, 0x60, 0x09, 0x24, 0x30, 0x70, 0x12, 0x12, 0x25,
+0x56, 0x80, 0x62, 0x12, 0x2e, 0x24, 0x12, 0x1f, 0xda, 0x90, 0xfa, 0xc2, 0xef, 0xf0, 0x80, 0x55,
+0x90, 0xfa, 0xc2, 0x74, 0x81, 0xf0, 0x80, 0x4d, 0x12, 0x2d, 0xda, 0x60, 0x09, 0x24, 0x30, 0x70,
+0x3e, 0x12, 0x2d, 0x30, 0x80, 0x3f, 0xe5, 0x58, 0x24, 0x03, 0xf9, 0xe4, 0x35, 0x57, 0xfa, 0x7b,
+0x01, 0xc0, 0x03, 0xc0, 0x02, 0xc0, 0x01, 0x12, 0x2e, 0x24, 0x90, 0x00, 0x05, 0x12, 0x1a, 0xbb,
+0xfd, 0x90, 0x00, 0x08, 0x12, 0x1b, 0x48, 0xf5, 0x2e, 0x85, 0xf0, 0x2d, 0xd0, 0x01, 0xd0, 0x02,
+0xd0, 0x03, 0x12, 0x26, 0x98, 0x90, 0xfa, 0xc1, 0xef, 0xf0, 0xe4, 0xa3, 0xf0, 0x80, 0x06, 0x90,
+0xfa, 0xc2, 0x74, 0x81, 0xf0, 0x90, 0xfa, 0xc2, 0xe0, 0x12, 0x2e, 0x24, 0x90, 0x00, 0x02, 0x12,
+0x1a, 0xfa, 0x90, 0xfa, 0xc1, 0xe0, 0xff, 0x22, 0x8b, 0x29, 0x8a, 0x2a, 0x89, 0x2b, 0x8d, 0x2c,
+0xe5, 0x2c, 0x70, 0x03, 0xaf, 0x2c, 0x22, 0x12, 0x2e, 0x53, 0x70, 0x16, 0x12, 0x2e, 0x72, 0xe5,
+0x2d, 0x90, 0xff, 0xf1, 0xf0, 0x12, 0x31, 0xd8, 0x50, 0xf2, 0x12, 0x27, 0x25, 0x40, 0x0b, 0x7f,
+0x00, 0x22, 0x12, 0x2e, 0x72, 0x12, 0x27, 0x25, 0x50, 0xf8, 0x90, 0xff, 0xf3, 0x74, 0xa1, 0xf0,
+0xe5, 0x2c, 0xb4, 0x01, 0x07, 0x90, 0xff, 0xf0, 0xe0, 0x44, 0x02, 0xf0, 0x90, 0xff, 0xf1, 0xe4,
+0xf0, 0xf5, 0x2f, 0xe5, 0x2c, 0x14, 0xff, 0xe5, 0x2f, 0xc3, 0x9f, 0x50, 0x2a, 0x12, 0x31, 0xc1,
+0x40, 0x03, 0xaf, 0x2f, 0x22, 0xc3, 0xe5, 0x2c, 0x95, 0x2f, 0xff, 0xbf, 0x02, 0x07, 0x90, 0xff,
+0xf0, 0xe0, 0x44, 0x02, 0xf0, 0x12, 0x2e, 0x65, 0x05, 0x2f, 0x74, 0x01, 0x25, 0x2b, 0xf5, 0x2b,
+0xe4, 0x35, 0x2a, 0xf5, 0x2a, 0x80, 0xcc, 0x12, 0x31, 0xc1, 0x40, 0x03, 0x7f, 0x18, 0x22, 0x12,
+0x2e, 0x65, 0xaf, 0x2c, 0x22, 0x90, 0xff, 0xf1, 0xe5, 0x2e, 0xf0, 0x02, 0x31, 0xd8, 0x12, 0x10,
+0x4b, 0x78, 0x84, 0x12, 0x23, 0x31, 0x30, 0xe1, 0x08, 0x7f, 0x13, 0x12, 0x31, 0xa9, 0x02, 0x27,
+0xbc, 0x78, 0x84, 0xe6, 0xf9, 0x24, 0x13, 0x12, 0x22, 0xad, 0xe0, 0xff, 0x30, 0xe7, 0x40, 0x54,
+0x03, 0x60, 0x1e, 0xe9, 0xb4, 0x03, 0x0d, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xfe, 0xf0, 0xe0, 0x44,
+0x04, 0xf0, 0x80, 0x46, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xfd, 0xf0, 0xe0, 0x44, 0x08, 0xf0, 0x80,
+0x39, 0xe9, 0xb4, 0x03, 0x0d, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xfb, 0xf0, 0xe0, 0x44, 0x01, 0xf0,
+0x80, 0x28, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xf7, 0xf0, 0xe0, 0x44, 0x02, 0xf0, 0x80, 0x1b, 0xef,
+0x54, 0x03, 0x60, 0x14, 0xe9, 0xb4, 0x03, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x54, 0xdf, 0xf0, 0x80,
+0x07, 0x90, 0xff, 0xb4, 0xe0, 0x54, 0xdf, 0xf0, 0xc2, 0xb3, 0x90, 0xf9, 0x18, 0xe0, 0x04, 0xf0,
+0xaf, 0x01, 0x12, 0x22, 0xee, 0xfd, 0x12, 0x2f, 0xe5, 0x12, 0x31, 0xa9, 0x02, 0x10, 0xce, 0x75,
+0xa8, 0x40, 0x78, 0x7f, 0xe4, 0xf6, 0xd8, 0xfd, 0x75, 0x81, 0x8b, 0x02, 0x28, 0x09, 0x02, 0x31,
+0x8c, 0xe4, 0x93, 0xa3, 0xf8, 0xe4, 0x93, 0xa3, 0x40, 0x03, 0xf6, 0x80, 0x01, 0xf2, 0x08, 0xdf,
+0xf4, 0x80, 0x29, 0xe4, 0x93, 0xa3, 0xf8, 0x54, 0x07, 0x24, 0x0c, 0xc8, 0xc3, 0x33, 0xc4, 0x54,
+0x0f, 0x44, 0x20, 0xc8, 0x83, 0x40, 0x04, 0xf4, 0x56, 0x80, 0x01, 0x46, 0xf6, 0xdf, 0xe4, 0x80,
+0x0b, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x90, 0x2b, 0xa9, 0xe4, 0x7e, 0x01, 0x93,
+0x60, 0xbc, 0xa3, 0xff, 0x54, 0x3f, 0x30, 0xe5, 0x09, 0x54, 0x1f, 0xfe, 0xe4, 0x93, 0xa3, 0x60,
+0x01, 0x0e, 0xcf, 0x54, 0xc0, 0x25, 0xe0, 0x60, 0xa8, 0x40, 0xb8, 0xe4, 0x93, 0xa3, 0xfa, 0xe4,
+0x93, 0xa3, 0xf8, 0xe4, 0x93, 0xa3, 0xc8, 0xc5, 0x82, 0xc8, 0xca, 0xc5, 0x83, 0xca, 0xf0, 0xa3,
+0xc8, 0xc5, 0x82, 0xc8, 0xca, 0xc5, 0x83, 0xca, 0xdf, 0xe9, 0xde, 0xe7, 0x80, 0xbe, 0xe4, 0xf5,
+0x22, 0x12, 0x1d, 0xc2, 0xe0, 0xb4, 0x04, 0x0d, 0xe5, 0x22, 0x24, 0x03, 0xff, 0x12, 0x30, 0x13,
+0x12, 0x1d, 0xc2, 0xe4, 0xf0, 0x05, 0x22, 0xe5, 0x22, 0xc3, 0x94, 0x02, 0x40, 0xe3, 0xe4, 0xf5,
+0x22, 0x75, 0xf0, 0x02, 0xe5, 0x22, 0x90, 0xfa, 0x94, 0x12, 0x1e, 0x03, 0x60, 0x2c, 0x12, 0x2d,
+0x85, 0xef, 0x60, 0x52, 0x75, 0xf0, 0x02, 0xe5, 0x22, 0x90, 0xfa, 0x94, 0x12, 0x1b, 0xcc, 0xe4,
+0xf0, 0xa3, 0xf0, 0x75, 0xf0, 0x0a, 0xe5, 0x22, 0x90, 0xfa, 0xa0, 0x12, 0x1b, 0xcc, 0xe0, 0xa3,
+0x30, 0xe6, 0x33, 0x12, 0x1d, 0xc2, 0x74, 0x04, 0xf0, 0x22, 0x75, 0xf0, 0x02, 0xe5, 0x22, 0x90,
+0xfa, 0x98, 0x12, 0x1e, 0x03, 0x60, 0x16, 0x12, 0x2d, 0x85, 0xef, 0x60, 0x19, 0x75, 0xf0, 0x02,
+0xe5, 0x22, 0x90, 0xfa, 0x98, 0x12, 0x1b, 0xcc, 0xe4, 0xf0, 0xa3, 0xf0, 0x22, 0x05, 0x22, 0xe5,
+0x22, 0xc3, 0x94, 0x02, 0x40, 0x9b, 0x22, 0xe4, 0xff, 0x90, 0xff, 0x83, 0xe0, 0x54, 0x0f, 0xfe,
+0xef, 0xc3, 0x9e, 0x50, 0x17, 0x74, 0xf0, 0x2f, 0xf5, 0x82, 0xe4, 0x34, 0xfe, 0xf5, 0x83, 0xe0,
+0x12, 0x1c, 0xc1, 0x12, 0x1a, 0xe8, 0x0f, 0x12, 0x1c, 0xb0, 0x80, 0xdd, 0xef, 0xfd, 0xc3, 0xe5,
+0x3a, 0x9d, 0xf5, 0x3a, 0xe5, 0x39, 0x94, 0x00, 0xf5, 0x39, 0xd3, 0xe5, 0x3a, 0x94, 0x00, 0xe5,
+0x39, 0x94, 0x00, 0x40, 0x06, 0xe4, 0x90, 0xff, 0x83, 0xf0, 0x22, 0x12, 0x1d, 0xdf, 0x12, 0x1e,
+0x34, 0x12, 0x1e, 0x26, 0x12, 0x1a, 0xa2, 0x24, 0x6e, 0x60, 0x1e, 0x14, 0x60, 0x1b, 0x24, 0x8e,
+0x70, 0x2d, 0x90, 0x00, 0x01, 0x12, 0x1a, 0xbb, 0xff, 0x24, 0xfc, 0x60, 0x03, 0x04, 0x70, 0x1f,
+0xef, 0xfd, 0x7c, 0x00, 0x7f, 0x0d, 0x02, 0x11, 0x5e, 0x12, 0x1e, 0x3b, 0x12, 0x25, 0xfa, 0x12,
+0x1d, 0x89, 0x12, 0x1a, 0xbb, 0x60, 0x03, 0x02, 0x32, 0x7a, 0xe4, 0xff, 0x12, 0x32, 0x6e, 0x22,
+0x8b, 0x45, 0x8a, 0x46, 0x89, 0x47, 0x8c, 0x48, 0x8d, 0x49, 0xd2, 0x00, 0x12, 0x2e, 0x53, 0x70,
+0x16, 0x12, 0x2e, 0x72, 0xe5, 0x48, 0x90, 0xff, 0xf1, 0xf0, 0x12, 0x31, 0xd8, 0x50, 0xf2, 0x12,
+0x29, 0xd5, 0x40, 0x0b, 0x7f, 0x18, 0x22, 0x12, 0x2e, 0x72, 0x12, 0x29, 0xd5, 0x50, 0xf8, 0xe4,
+0xf5, 0x4b, 0xe5, 0x4a, 0x14, 0xff, 0xe5, 0x4b, 0xc3, 0x9f, 0x50, 0x17, 0x12, 0x29, 0xc5, 0x40,
+0x03, 0x7f, 0x18, 0x22, 0x05, 0x4b, 0x74, 0x01, 0x25, 0x47, 0xf5, 0x47, 0xe4, 0x35, 0x46, 0xf5,
+0x46, 0x80, 0xdf, 0x90, 0xff, 0xf0, 0xe0, 0x44, 0x01, 0xf0, 0x12, 0x29, 0xc5, 0x40, 0x03, 0x7f,
+0x18, 0x22, 0x7f, 0x00, 0x22, 0xab, 0x45, 0xaa, 0x46, 0xa9, 0x47, 0x12, 0x1a, 0xa2, 0x90, 0xff,
+0xf1, 0xf0, 0x02, 0x31, 0xd8, 0x90, 0xff, 0xf1, 0xe5, 0x49, 0xf0, 0x02, 0x31, 0xd8, 0x7b, 0x01,
+0x7a, 0xfa, 0x79, 0xcf, 0xe4, 0xfd, 0x12, 0x23, 0x61, 0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x09,
+0x12, 0x1b, 0x1c, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23, 0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x01,
+0x12, 0x1b, 0x32, 0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x90, 0xff, 0xf7,
+0xe5, 0x23, 0x12, 0x2a, 0x39, 0x90, 0xff, 0xf6, 0xe5, 0x23, 0xf0, 0x90, 0xfa, 0xcf, 0xe4, 0xf0,
+0xa3, 0x74, 0x06, 0x12, 0x2a, 0x39, 0xe5, 0x23, 0x30, 0xe0, 0x07, 0x90, 0xff, 0xfc, 0x74, 0x94,
+0xf0, 0x22, 0x90, 0xff, 0xfc, 0x74, 0x90, 0xf0, 0x22, 0xf0, 0x7b, 0x00, 0x7a, 0x00, 0x79, 0x23,
+0x90, 0xfa, 0xcf, 0xe4, 0x75, 0xf0, 0x01, 0x12, 0x1b, 0x32, 0x85, 0xf0, 0x2e, 0xf5, 0x2d, 0x7d,
+0x01, 0x02, 0x26, 0x98, 0x90, 0xff, 0x93, 0x74, 0x81, 0xf0, 0x90, 0xff, 0xff, 0xe0, 0x60, 0x06,
+0x90, 0xff, 0xfc, 0x74, 0x10, 0xf0, 0x90, 0xff, 0x91, 0xe0, 0x44, 0x90, 0xf0, 0xe4, 0x90, 0xf9,
+0x16, 0xf0, 0xa3, 0xf0, 0x12, 0x2b, 0x39, 0x12, 0x16, 0xc9, 0x12, 0x30, 0x69, 0x7e, 0x07, 0x7f,
+0xd0, 0x12, 0x12, 0x2a, 0x7e, 0x0f, 0x7f, 0xa0, 0x12, 0x12, 0x44, 0xe4, 0x78, 0x77, 0xf6, 0x78,
+0x77, 0xe6, 0xff, 0xc3, 0x94, 0x06, 0x50, 0x0b, 0x74, 0x6e, 0x2f, 0xf8, 0xe4, 0xf6, 0x78, 0x77,
+0x06, 0x80, 0xec, 0x7f, 0x03, 0x12, 0x30, 0xb2, 0x90, 0xf9, 0x16, 0xe0, 0x20, 0xe4, 0x05, 0x7f,
+0x04, 0x12, 0x30, 0xb2, 0x90, 0xff, 0x9b, 0xe4, 0xf0, 0x90, 0xff, 0x9a, 0xf0, 0x90, 0xff, 0xe8,
+0xe0, 0x54, 0x1f, 0xf0, 0xd2, 0xa8, 0x22, 0x15, 0x65, 0xa8, 0x65, 0xa6, 0x07, 0x30, 0x08, 0x05,
+0x12, 0x11, 0xae, 0x80, 0xf8, 0xd2, 0x08, 0xa8, 0x65, 0xe6, 0xff, 0xb4, 0x03, 0x0f, 0x78, 0x7c,
+0x76, 0xff, 0x08, 0x76, 0xe0, 0x08, 0x76, 0xff, 0x08, 0x76, 0xa0, 0x80, 0x0d, 0x78, 0x7c, 0x76,
+0xff, 0x08, 0x76, 0xe2, 0x08, 0x76, 0xff, 0x08, 0x76, 0xb0, 0x78, 0x80, 0x76, 0xfa, 0x08, 0x76,
+0x9e, 0xef, 0x24, 0xfd, 0x75, 0xf0, 0x0a, 0xa4, 0xae, 0xf0, 0x12, 0x23, 0x49, 0x7b, 0x01, 0x7a,
+0xff, 0x79, 0x48, 0x78, 0x68, 0x12, 0x1b, 0xe1, 0xa8, 0x65, 0xe6, 0x24, 0xfd, 0x75, 0xf0, 0x08,
+0xa4, 0xff, 0xae, 0xf0, 0x78, 0x6a, 0x12, 0x23, 0x49, 0x79, 0x08, 0x78, 0x6b, 0x12, 0x1b, 0xe1,
+0x78, 0x6d, 0xef, 0x12, 0x23, 0x49, 0x05, 0x65, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0x54, 0xab, 0xf0,
+0xe0, 0x44, 0x20, 0xf0, 0x90, 0xfa, 0xe6, 0x74, 0x02, 0xf0, 0x7b, 0x01, 0x7a, 0xfa, 0x79, 0xcf,
+0xe4, 0xf5, 0x2d, 0xf5, 0x2e, 0x7d, 0x01, 0x12, 0x26, 0x98, 0x7e, 0x00, 0x90, 0xfa, 0xe4, 0xee,
+0xf0, 0xa3, 0xef, 0xf0, 0x64, 0x01, 0x70, 0x10, 0x90, 0xfa, 0xcf, 0xe0, 0xb4, 0x52, 0x09, 0x90,
+0xf9, 0x16, 0xe0, 0x54, 0xef, 0xf0, 0x80, 0x29, 0x90, 0xfa, 0xe4, 0xe0, 0x70, 0x04, 0xa3, 0xe0,
+0x64, 0x01, 0x70, 0x10, 0x90, 0xfa, 0xcf, 0xe0, 0xb4, 0x10, 0x09, 0x90, 0xf9, 0x16, 0xe0, 0x44,
+0x10, 0xf0, 0x80, 0x0d, 0x90, 0xfa, 0xe6, 0x74, 0x03, 0xf0, 0x90, 0xf9, 0x16, 0xe0, 0x54, 0xef,
+0xf0, 0x90, 0xff, 0xf0, 0xe0, 0x44, 0x20, 0xf0, 0x22, 0x03, 0x68, 0x01, 0xff, 0x48, 0x03, 0x6b,
+0x01, 0xff, 0x08, 0x02, 0x66, 0x00, 0x00, 0x44, 0xfa, 0x98, 0x00, 0x00, 0x00, 0x00, 0x44, 0xfa,
+0x94, 0x00, 0x00, 0x00, 0x00, 0x42, 0xfa, 0xb2, 0x00, 0x00, 0x42, 0xfa, 0x7e, 0x00, 0x00, 0x42,
+0xfa, 0x7c, 0x00, 0x00, 0x42, 0xf9, 0x6d, 0xff, 0xff, 0x42, 0xfa, 0x7a, 0x00, 0x00, 0x41, 0xf9,
+0x66, 0xff, 0x41, 0xf9, 0x1c, 0x19, 0x41, 0xf9, 0x15, 0x00, 0x43, 0xf9, 0x19, 0x0a, 0x32, 0x02,
+0x41, 0xf9, 0x68, 0x20, 0x41, 0xf9, 0x69, 0x20, 0x41, 0xf9, 0x65, 0x00, 0x41, 0xf9, 0x67, 0x00,
+0x44, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0xf9, 0x16, 0x00, 0x00, 0x41, 0xf9, 0x18, 0x00,
+0x01, 0x20, 0x00, 0x41, 0xf8, 0x04, 0x00, 0x00, 0x12, 0x10, 0x4b, 0x78, 0x8a, 0xef, 0xf6, 0x12,
+0x2a, 0xc7, 0x12, 0x22, 0xee, 0x30, 0xe0, 0x29, 0x78, 0x7c, 0x12, 0x22, 0xb7, 0xe0, 0x54, 0x7f,
+0xf0, 0x78, 0x6b, 0x12, 0x1b, 0xd8, 0x90, 0x00, 0x02, 0x12, 0x1a, 0xbb, 0x30, 0xe7, 0x09, 0x90,
+0x00, 0x02, 0xe4, 0x12, 0x1a, 0xfa, 0x80, 0xe9, 0x78, 0x7c, 0x12, 0x22, 0xb7, 0xe0, 0x44, 0x80,
+0xf0, 0x12, 0x22, 0xee, 0x30, 0xe1, 0x1e, 0x12, 0x22, 0x97, 0xe0, 0x54, 0x7f, 0xf0, 0x12, 0x32,
+0x19, 0x78, 0x68, 0x12, 0x1b, 0xd8, 0x90, 0x00, 0x02, 0x74, 0x80, 0x12, 0x1a, 0xfa, 0x12, 0x22,
+0x97, 0xe0, 0x44, 0x80, 0xf0, 0x12, 0x32, 0x84, 0xe4, 0xff, 0x12, 0x31, 0xa9, 0x02, 0x10, 0xce,
+0x12, 0x10, 0x4b, 0x78, 0x85, 0xef, 0xf6, 0x12, 0x31, 0x50, 0x12, 0x31, 0xa9, 0x78, 0x85, 0xe6,
+0xff, 0x24, 0x13, 0x12, 0x22, 0xad, 0xe0, 0xfe, 0x30, 0xe7, 0x16, 0xef, 0xb4, 0x03, 0x09, 0x90,
+0xff, 0x9e, 0xe0, 0x54, 0xfa, 0xf0, 0x80, 0x22, 0x90, 0xff, 0x9e, 0xe0, 0x54, 0xf5, 0xf0, 0x80,
+0x19, 0xee, 0x54, 0x03, 0x60, 0x14, 0xef, 0xb4, 0x03, 0x09, 0x90, 0xff, 0xa4, 0xe0, 0x44, 0x20,
+0xf0, 0x80, 0x07, 0x90, 0xff, 0xb4, 0xe0, 0x44, 0x20, 0xf0, 0x90, 0xf9, 0x18, 0xe0, 0x14, 0xf0,
+0xe0, 0x70, 0x02, 0xd2, 0xb3, 0x02, 0x10, 0xce, 0x12, 0x1e, 0x1c, 0xe5, 0x3a, 0x64, 0x09, 0x70,
+0x04, 0xe5, 0x39, 0x64, 0x01, 0x60, 0x48, 0xc3, 0xe5, 0x3a, 0x94, 0x08, 0xe5, 0x39, 0x94, 0x00,
+0x40, 0x11, 0x7f, 0x08, 0xef, 0xe5, 0x3a, 0x94, 0x08, 0xf5, 0x3a, 0xe5, 0x39, 0x94, 0x00, 0xf5,
+0x39, 0x80, 0x05, 0xaf, 0x3a, 0x12, 0x1e, 0x34, 0xe4, 0xfe, 0xee, 0xc3, 0x9f, 0x50, 0x19, 0x12,
+0x1c, 0xc1, 0x12, 0x1a, 0xa2, 0xfd, 0x74, 0xf8, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0xfe, 0xf5, 0x83,
+0xed, 0xf0, 0x0e, 0x12, 0x1c, 0xb0, 0x80, 0xe2, 0xef, 0x54, 0x7f, 0x90, 0xff, 0x81, 0xf0, 0x22,
+0x8b, 0x59, 0x8a, 0x5a, 0x89, 0x5b, 0x12, 0x2e, 0x19, 0x70, 0x05, 0xa3, 0x74, 0x08, 0xf0, 0x22,
+0xab, 0x59, 0xaa, 0x5a, 0xa9, 0x5b, 0x12, 0x2e, 0x0d, 0x90, 0xfa, 0xc9, 0x12, 0x1b, 0xf3, 0xe5,
+0x5b, 0x24, 0x03, 0xf9, 0xe4, 0x35, 0x5a, 0xfa, 0x90, 0xfa, 0xc3, 0x12, 0x1b, 0xf3, 0xe4, 0x90,
+0xfa, 0xc2, 0xf0, 0x78, 0x8b, 0xf6, 0x90, 0xfa, 0xc1, 0xe0, 0xff, 0x78, 0x8b, 0xe6, 0xc3, 0x9f,
+0x50, 0x12, 0x12, 0x2d, 0xeb, 0xff, 0x12, 0x2d, 0xf4, 0x12, 0x2e, 0x07, 0x78, 0x8b, 0x06, 0x12,
+0x2e, 0x03, 0x80, 0xe2, 0x22, 0xad, 0x07, 0xac, 0x06, 0x90, 0x32, 0x0a, 0xe4, 0x93, 0xff, 0x78,
+0x74, 0xf6, 0x54, 0x0f, 0x12, 0x1d, 0xa8, 0xe0, 0x08, 0x76, 0x00, 0x08, 0xf6, 0x18, 0x12, 0x1c,
+0xd9, 0xc3, 0x33, 0xce, 0x33, 0xce, 0xd8, 0xf9, 0xff, 0x78, 0x75, 0xee, 0xf6, 0x08, 0xef, 0xf6,
+0xee, 0x44, 0xf8, 0x18, 0xf6, 0xef, 0x08, 0xf6, 0x90, 0xff, 0x7a, 0xe0, 0x20, 0xe7, 0x03, 0x7f,
+0x00, 0x22, 0x78, 0x75, 0xe6, 0xfe, 0x08, 0xe6, 0xf5, 0x82, 0x8e, 0x83, 0xec, 0xf0, 0xa3, 0xed,
+0xf0, 0x90, 0xff, 0x7a, 0x74, 0x02, 0xf0, 0x7f, 0x01, 0x22, 0xab, 0x56, 0xaa, 0x57, 0xa9, 0x58,
+0x90, 0x00, 0x03, 0x12, 0x1a, 0xbb, 0x54, 0xf0, 0x24, 0xa0, 0x22, 0x90, 0xfa, 0xc9, 0x12, 0x1b,
+0xea, 0x02, 0x1a, 0xa2, 0x90, 0xfa, 0xc3, 0x12, 0x1b, 0xea, 0xef, 0x12, 0x1a, 0xe8, 0x90, 0xfa,
+0xca, 0xe4, 0x22, 0x90, 0xfa, 0xc4, 0xe4, 0x75, 0xf0, 0x01, 0x02, 0x1b, 0x1c, 0x90, 0x00, 0x08,
+0x12, 0x1b, 0x48, 0xaa, 0xf0, 0xf9, 0x7b, 0x01, 0x22, 0x90, 0x00, 0x05, 0x12, 0x1a, 0xbb, 0x90,
+0xfa, 0xc1, 0xf0, 0x22, 0xab, 0x56, 0xaa, 0x57, 0xa9, 0x58, 0x22, 0x90, 0xfa, 0xdd, 0xe0, 0xff,
+0x7e, 0x00, 0xc3, 0x90, 0xfa, 0xd7, 0xe0, 0x9f, 0xf0, 0x90, 0xfa, 0xd6, 0xe0, 0x9e, 0xf0, 0x90,
+0xfa, 0xd8, 0xee, 0x8f, 0xf0, 0x12, 0x1b, 0x1c, 0xef, 0x25, 0x51, 0xf5, 0x51, 0xee, 0x35, 0x50,
+0xf5, 0x50, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0x54, 0xfe, 0xf0, 0xe0, 0x54, 0xfd, 0xf0, 0x90, 0xfa,
+0xe6, 0xe0, 0x64, 0x03, 0x22, 0x90, 0xff, 0xf2, 0xe0, 0xab, 0x29, 0xaa, 0x2a, 0xa9, 0x2b, 0x02,
+0x1a, 0xe8, 0x90, 0xff, 0xf3, 0x74, 0xa0, 0xf0, 0x22, 0x8f, 0x64, 0xed, 0x70, 0x0f, 0xe5, 0x64,
+0xb4, 0x03, 0x05, 0x7f, 0x01, 0x02, 0x31, 0xef, 0x7f, 0x02, 0x02, 0x31, 0xef, 0xaf, 0x64, 0x12,
+0x2a, 0xc7, 0x74, 0x6e, 0x25, 0x64, 0xf8, 0xe6, 0x30, 0xe2, 0x0b, 0xd2, 0x09, 0x12, 0x1d, 0x33,
+0xe0, 0x54, 0x7f, 0xf0, 0x80, 0x02, 0xc2, 0x09, 0xe5, 0x64, 0xb4, 0x03, 0x07, 0x7f, 0x81, 0x12,
+0x31, 0xef, 0x80, 0x05, 0x7f, 0x82, 0x12, 0x31, 0xef, 0x30, 0x09, 0x07, 0x12, 0x1d, 0x33, 0xe0,
+0x44, 0x80, 0xf0, 0x12, 0x32, 0x84, 0x22, 0x12, 0x10, 0x4b, 0x90, 0xff, 0xfd, 0xe0, 0x44, 0x60,
+0xf0, 0xd2, 0x01, 0x90, 0xff, 0xfc, 0xe0, 0x44, 0x02, 0xf0, 0x90, 0xff, 0x00, 0xe0, 0x30, 0xe7,
+0x13, 0x90, 0xff, 0x83, 0xe0, 0x44, 0x80, 0xf0, 0x43, 0x35, 0x80, 0x90, 0xff, 0xfc, 0xe0, 0x44,
+0x01, 0xf0, 0x80, 0x0d, 0x12, 0x1d, 0xdf, 0x53, 0x35, 0x7f, 0x90, 0xff, 0xfc, 0xe0, 0x54, 0xfe,
+0xf0, 0x90, 0xff, 0x81, 0xe0, 0x44, 0x80, 0xf0, 0x12, 0x02, 0xde, 0x12, 0x1d, 0xe7, 0x02, 0x10,
+0xce, 0x12, 0x10, 0x4b, 0x78, 0x89, 0xef, 0xf6, 0xd2, 0x00, 0x12, 0x2a, 0xc7, 0x90, 0xf9, 0x6a,
+0x12, 0x1b, 0xea, 0xe9, 0x24, 0x03, 0xf9, 0xe4, 0x3a, 0xfa, 0xc0, 0x02, 0x78, 0x80, 0xe6, 0xfe,
+0x08, 0xe6, 0xaa, 0x06, 0xf8, 0xac, 0x02, 0x7d, 0x01, 0xd0, 0x02, 0x12, 0x22, 0xd3, 0x12, 0x32,
+0x84, 0x78, 0x89, 0xe6, 0xff, 0x12, 0x13, 0x87, 0x12, 0x31, 0xa9, 0x02, 0x10, 0xce, 0x8f, 0x63,
+0x12, 0x2a, 0xc7, 0x78, 0x7c, 0x12, 0x22, 0xb7, 0xe0, 0x54, 0x3f, 0xf0, 0xe5, 0x82, 0x24, 0x04,
+0x12, 0x22, 0xa1, 0xe0, 0x54, 0x3f, 0xf0, 0x12, 0x23, 0x41, 0x24, 0x0b, 0x12, 0x22, 0xa1, 0xe0,
+0x54, 0xf8, 0xf0, 0x12, 0x32, 0x84, 0x74, 0x6e, 0x25, 0x63, 0xf8, 0x74, 0xfb, 0x56, 0xf6, 0x7f,
+0x00, 0x22, 0x12, 0x10, 0x4b, 0x12, 0x2a, 0xc7, 0x12, 0x22, 0xfa, 0x24, 0x06, 0x12, 0x22, 0x9f,
+0xe0, 0xfd, 0x12, 0x22, 0xe8, 0x90, 0x00, 0x03, 0x12, 0x23, 0x02, 0x24, 0x05, 0x12, 0x22, 0xa1,
+0xe0, 0x90, 0x00, 0x04, 0x12, 0x1a, 0xfa, 0x12, 0x32, 0x84, 0x7d, 0x02, 0xe4, 0xff, 0x12, 0x2f,
+0xb4, 0x02, 0x10, 0xce, 0xae, 0x05, 0x12, 0x1d, 0x8e, 0xef, 0x12, 0x1a, 0xfa, 0x0e, 0x0e, 0x0e,
+0xee, 0xd3, 0x95, 0x3c, 0xe4, 0x95, 0x3b, 0x40, 0x02, 0xae, 0x3c, 0xee, 0xd3, 0x94, 0x08, 0x74,
+0x80, 0x94, 0x81, 0x40, 0x0a, 0x7e, 0x03, 0x90, 0x00, 0x02, 0x74, 0x02, 0x12, 0x1a, 0xfa, 0xaf,
+0x06, 0x12, 0x32, 0x6e, 0x22, 0xae, 0x07, 0xed, 0x54, 0x03, 0x64, 0x01, 0x60, 0x03, 0x7f, 0x10,
+0x22, 0xed, 0x54, 0x7c, 0xc3, 0x94, 0x04, 0x50, 0x03, 0x7f, 0x0b, 0x22, 0x74, 0x6e, 0x2e, 0xf8,
+0x74, 0x02, 0x46, 0xf6, 0x74, 0x99, 0x2e, 0xf5, 0x82, 0xe4, 0x34, 0xfa, 0xf5, 0x83, 0xed, 0xf0,
+0x7f, 0x00, 0x22, 0xbf, 0x03, 0x06, 0x7c, 0xff, 0x7d, 0xe0, 0x80, 0x04, 0x7c, 0xff, 0x7d, 0xe2,
+0x8d, 0x82, 0x8c, 0x83, 0xe0, 0x44, 0x80, 0xf0, 0xe5, 0x82, 0x24, 0x04, 0x12, 0x22, 0xa1, 0xe0,
+0x44, 0x80, 0xf0, 0x74, 0x6e, 0x2f, 0xf8, 0x74, 0x04, 0x46, 0xf6, 0x7f, 0x00, 0x22, 0x12, 0x10,
+0x4b, 0xe5, 0x3a, 0x64, 0x09, 0x70, 0x04, 0xe5, 0x39, 0x64, 0x01, 0x60, 0x16, 0x90, 0xff, 0x83,
+0xe0, 0x54, 0x0f, 0xff, 0xc3, 0xe5, 0x3a, 0x9f, 0xe5, 0x39, 0x94, 0x00, 0x40, 0x05, 0x12, 0x28,
+0xd7, 0x80, 0x03, 0x12, 0x32, 0x7a, 0x02, 0x10, 0xce, 0x90, 0xff, 0xfc, 0xe0, 0x20, 0xe7, 0x1f,
+0xc2, 0xaf, 0x7d, 0xff, 0xac, 0x05, 0x1d, 0xec, 0x60, 0x15, 0x7e, 0x04, 0x7f, 0x00, 0xef, 0x1f,
+0xaa, 0x06, 0x70, 0x01, 0x1e, 0x4a, 0x60, 0xec, 0x90, 0xff, 0x92, 0xe4, 0xf0, 0x80, 0xef, 0x22,
+0x12, 0x10, 0x4b, 0x78, 0x66, 0xe6, 0xfe, 0x08, 0xe6, 0xff, 0x30, 0xe0, 0x12, 0x30, 0xe1, 0x0f,
+0x90, 0xff, 0xfc, 0xe0, 0x44, 0x20, 0xf0, 0x7f, 0x04, 0x12, 0x12, 0x61, 0x12, 0x1d, 0xf6, 0x02,
+0x10, 0xce, 0x8f, 0x23, 0xc2, 0x08, 0x12, 0x2a, 0xc7, 0x12, 0x22, 0xc0, 0x78, 0x7e, 0x12, 0x23,
+0x42, 0x24, 0x0b, 0x12, 0x22, 0xa1, 0xe0, 0x54, 0xf8, 0xf0, 0x12, 0x32, 0x84, 0xaf, 0x23, 0x12,
+0x13, 0x87, 0x22, 0x8e, 0x5f, 0x8f, 0x60, 0xe5, 0x60, 0x15, 0x60, 0xae, 0x5f, 0x70, 0x02, 0x15,
+0x5f, 0xd3, 0x94, 0x00, 0xee, 0x94, 0x00, 0x40, 0x09, 0x7e, 0x07, 0x7f, 0xd0, 0x12, 0x10, 0x24,
+0x80, 0xe5, 0x22, 0x11, 0xdc, 0x2e, 0xc7, 0x24, 0xb0, 0x32, 0x60, 0x30, 0x90, 0x30, 0x3e, 0x31,
+0x6f, 0x2f, 0x82, 0x27, 0x2e, 0x2c, 0x80, 0x31, 0x12, 0x31, 0x31, 0x1e, 0x64, 0x2f, 0x11, 0x2c,
+0x18, 0x0e, 0x12, 0x10, 0x4b, 0x78, 0x86, 0x12, 0x23, 0x31, 0x20, 0xe1, 0x07, 0x7f, 0x12, 0x12,
+0x31, 0xa9, 0x80, 0x0a, 0x78, 0x86, 0xe6, 0xff, 0x12, 0x24, 0x0a, 0x12, 0x31, 0xa9, 0x02, 0x10,
+0xce, 0x12, 0x10, 0x4b, 0x78, 0x87, 0x12, 0x23, 0x31, 0x20, 0xe2, 0x07, 0x7f, 0x11, 0x12, 0x31,
+0xa9, 0x80, 0x0a, 0x78, 0x87, 0xe6, 0xff, 0x12, 0x2f, 0x4e, 0x12, 0x31, 0xa9, 0x02, 0x10, 0xce,
+0x8f, 0x61, 0x12, 0x2f, 0x4e, 0xaf, 0x61, 0x12, 0x2a, 0xc7, 0x12, 0x22, 0xc0, 0x12, 0x32, 0x84,
+0x74, 0x6e, 0x25, 0x61, 0xf8, 0x74, 0xfd, 0x56, 0xf6, 0xaf, 0x61, 0x12, 0x13, 0x87, 0x22, 0x12,
+0x10, 0x4b, 0xe5, 0x3a, 0x64, 0x09, 0x70, 0x04, 0xe5, 0x39, 0x64, 0x01, 0x60, 0x05, 0x12, 0x2c,
+0xd8, 0x80, 0x06, 0x12, 0x1e, 0x14, 0x12, 0x1e, 0x1c, 0x02, 0x10, 0xce, 0x12, 0x2a, 0x54, 0x12,
+0x13, 0x03, 0x90, 0xf8, 0x04, 0xe0, 0xff, 0x60, 0x05, 0x7d, 0x01, 0x12, 0x12, 0xa0, 0x12, 0x29,
+0xde, 0x12, 0x13, 0x3f, 0x12, 0x11, 0xbc, 0x80, 0xe3, 0x12, 0x1d, 0x8e, 0xef, 0x12, 0x1a, 0xfa,
+0xe4, 0xf5, 0x33, 0xf5, 0x34, 0xef, 0x60, 0x03, 0x02, 0x32, 0x7a, 0xe4, 0xff, 0x12, 0x32, 0x6e,
+0x22, 0x90, 0xff, 0xf0, 0xe0, 0xff, 0x54, 0xa0, 0x60, 0xf7, 0xef, 0x30, 0xe5, 0x08, 0x90, 0xff,
+0xf0, 0x44, 0x20, 0xf0, 0xc3, 0x22, 0xd3, 0x22, 0x90, 0xff, 0xf0, 0xe0, 0xff, 0x54, 0x28, 0x60,
+0xf7, 0xef, 0x30, 0xe5, 0x08, 0x90, 0xff, 0xf0, 0x44, 0x20, 0xf0, 0xc3, 0x22, 0xd3, 0x22, 0xef,
+0x30, 0xe7, 0x08, 0x12, 0x1d, 0x45, 0xe0, 0x54, 0xdf, 0xf0, 0x22, 0xef, 0x12, 0x1d, 0x98, 0xe0,
+0x54, 0xdf, 0xf0, 0x22, 0x81, 0x01, 0x82, 0x02, 0x83, 0x03, 0x87, 0x40, 0x00, 0x40, 0x00, 0x40,
+0x00, 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x08, 0x00, 0x78, 0x7e, 0x12, 0x22, 0xb7, 0xa3, 0xa3,
+0xe0, 0xff, 0x30, 0xe7, 0x06, 0x54, 0x7f, 0xf0, 0x44, 0x80, 0xf0, 0x22, 0x85, 0x3b, 0x39, 0x85,
+0x3c, 0x3a, 0x90, 0xff, 0x82, 0xe0, 0x54, 0xf7, 0xf0, 0xa3, 0xe0, 0x54, 0x7f, 0xf0, 0x22, 0xe4,
+0xfe, 0xee, 0x90, 0x32, 0x04, 0x93, 0xb5, 0x07, 0x02, 0xd3, 0x22, 0x0e, 0xbe, 0x07, 0xf2, 0xc3,
+0x22, 0x00, 0x08, 0x18, 0x28, 0x38, 0x01, 0x81, 0x90, 0x0a, 0x02, 0x00, 0x00, 0x11, 0x13, 0x00,
+0x12, 0x10, 0x4b, 0x7f, 0x02, 0x12, 0x10, 0xda, 0x12, 0x1d, 0xf6, 0x02, 0x10, 0xce, 0x75, 0x39,
+0x00, 0x8f, 0x3a, 0x12, 0x1c, 0xe0, 0x12, 0x2c, 0xd8, 0x22, 0x12, 0x1e, 0x1c, 0x12, 0x1d, 0xdf,
+0x12, 0x1e, 0x14, 0x22, 0xc2, 0x08, 0x22,
};
#undef IMAGE_VERSION_NAME
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 544098d2b775..b8670905bc3a 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -48,7 +48,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.7"
+#define DRIVER_VERSION "v0.7mode043006"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com> and David Iacovelli"
#define DRIVER_DESC "Edgeport USB Serial Driver"
@@ -173,8 +173,12 @@ static struct usb_device_id edgeport_2port_id_table [] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_221C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) },
-// The 4-port shows up as two 2-port devices
+ /* The 4, 8 and 16 port devices show up as multiple 2 port devices */
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ }
};
@@ -209,6 +213,10 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_22C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_21C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_4S) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
+ { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
{ }
};
@@ -231,6 +239,7 @@ static int TIStayInBootMode = 0;
static int low_latency = EDGE_LOW_LATENCY;
static int closing_wait = EDGE_CLOSING_WAIT;
static int ignore_cpu_rev = 0;
+static int default_uart_mode = 0; /* RS232 */
static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length);
@@ -241,6 +250,10 @@ static int restart_read(struct edgeport_port *edge_port);
static void edge_set_termios (struct usb_serial_port *port, struct ktermios *old_termios);
static void edge_send(struct usb_serial_port *port);
+/* sysfs attributes */
+static int edge_create_sysfs_attrs(struct usb_serial_port *port);
+static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
+
/* circular buffer */
static struct edge_buf *edge_buf_alloc(unsigned int size);
static void edge_buf_free(struct edge_buf *eb);
@@ -1706,13 +1719,14 @@ static void edge_interrupt_callback (struct urb *urb)
int length = urb->actual_length;
int port_number;
int function;
- int status;
+ int retval;
__u8 lsr;
__u8 msr;
+ int status = urb->status;
dbg("%s", __FUNCTION__);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -1720,10 +1734,12 @@ static void edge_interrupt_callback (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
- dev_err(&urb->dev->dev, "%s - nonzero urb status received: %d\n", __FUNCTION__, urb->status);
+ dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
+ "%d\n", __FUNCTION__, status);
goto exit;
}
@@ -1781,10 +1797,10 @@ static void edge_interrupt_callback (struct urb *urb)
}
exit:
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb (urb, GFP_ATOMIC);
+ if (retval)
dev_err (&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
static void edge_bulk_in_callback (struct urb *urb)
@@ -1792,12 +1808,13 @@ static void edge_bulk_in_callback (struct urb *urb)
struct edgeport_port *edge_port = (struct edgeport_port *)urb->context;
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
- int status = 0;
+ int retval = 0;
int port_number;
+ int status = urb->status;
dbg("%s", __FUNCTION__);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -1805,17 +1822,18 @@ static void edge_bulk_in_callback (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
dev_err (&urb->dev->dev,"%s - nonzero read bulk status received: %d\n",
- __FUNCTION__, urb->status );
+ __FUNCTION__, status);
}
- if (urb->status == -EPIPE)
+ if (status == -EPIPE)
goto exit;
- if (urb->status) {
+ if (status) {
dev_err(&urb->dev->dev,"%s - stopping read!\n", __FUNCTION__);
return;
}
@@ -1849,14 +1867,14 @@ exit:
spin_lock(&edge_port->ep_lock);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) {
urb->dev = edge_port->port->serial->dev;
- status = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
} else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) {
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
}
spin_unlock(&edge_port->ep_lock);
- if (status)
+ if (retval)
dev_err (&urb->dev->dev, "%s - usb_submit_urb failed with result %d\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned char *data, int length)
@@ -1883,12 +1901,13 @@ static void edge_bulk_out_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+ int status = urb->status;
dbg ("%s - port %d", __FUNCTION__, port->number);
edge_port->ep_write_urb_in_use = 0;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -1896,11 +1915,12 @@ static void edge_bulk_out_callback (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
- dev_err (&urb->dev->dev,"%s - nonzero write bulk status received: %d\n",
- __FUNCTION__, urb->status);
+ dev_err(&urb->dev->dev, "%s - nonzero write bulk status "
+ "received: %d\n", __FUNCTION__, status);
}
/* send any buffered data */
@@ -2351,7 +2371,7 @@ static int restart_read(struct edgeport_port *edge_port)
urb->complete = edge_bulk_in_callback;
urb->context = edge_port;
urb->dev = edge_port->port->serial->dev;
- status = usb_submit_urb(urb, GFP_KERNEL);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
edge_port->shadow_mcr |= MCR_RTS;
@@ -2524,14 +2544,6 @@ static void edge_set_termios (struct usb_serial_port *port, struct ktermios *old
}
cflag = tty->termios->c_cflag;
- /* check that they really want us to change something */
- if (old_termios) {
- if (cflag == old_termios->c_cflag &&
- tty->termios->c_iflag == old_termios->c_iflag) {
- dbg ("%s - nothing to change", __FUNCTION__);
- return;
- }
- }
dbg("%s - clfag %08x iflag %08x", __FUNCTION__,
tty->termios->c_cflag, tty->termios->c_iflag);
@@ -2758,7 +2770,7 @@ static int edge_startup (struct usb_serial *serial)
edge_port->port = serial->port[i];
edge_port->edge_serial = edge_serial;
usb_set_serial_port_data(serial->port[i], edge_port);
- edge_port->bUartMode = 0; /* Default is RS232 */
+ edge_port->bUartMode = default_uart_mode;
}
return 0;
@@ -2782,19 +2794,60 @@ static void edge_shutdown (struct usb_serial *serial)
dbg ("%s", __FUNCTION__);
- for (i=0; i < serial->num_ports; ++i) {
+ for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
- if (edge_port) {
- edge_buf_free(edge_port->ep_out_buf);
- kfree(edge_port);
- }
+ edge_remove_sysfs_attrs(edge_port->port);
+ edge_buf_free(edge_port->ep_out_buf);
+ kfree(edge_port);
usb_set_serial_port_data(serial->port[i], NULL);
}
- kfree (usb_get_serial_data(serial));
+ kfree(usb_get_serial_data(serial));
usb_set_serial_data(serial, NULL);
}
+/* Sysfs Attributes */
+
+static ssize_t show_uart_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_serial_port *port = to_usb_serial_port(dev);
+ struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+
+ return sprintf(buf, "%d\n", edge_port->bUartMode);
+}
+
+static ssize_t store_uart_mode(struct device *dev,
+ struct device_attribute *attr, const char *valbuf, size_t count)
+{
+ struct usb_serial_port *port = to_usb_serial_port(dev);
+ struct edgeport_port *edge_port = usb_get_serial_port_data(port);
+ unsigned int v = simple_strtoul(valbuf, NULL, 0);
+
+ dbg("%s: setting uart_mode = %d", __FUNCTION__, v);
+
+ if (v < 256)
+ edge_port->bUartMode = v;
+ else
+ dev_err(dev, "%s - uart_mode %d is invalid\n", __FUNCTION__, v);
+
+ return count;
+}
+
+static DEVICE_ATTR(uart_mode, S_IWUSR | S_IRUGO, show_uart_mode, store_uart_mode);
+
+static int edge_create_sysfs_attrs(struct usb_serial_port *port)
+{
+ return device_create_file(&port->dev, &dev_attr_uart_mode);
+}
+
+static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
+{
+ device_remove_file(&port->dev, &dev_attr_uart_mode);
+ return 0;
+}
+
+
/* Circular Buffer */
/*
@@ -2991,6 +3044,7 @@ static struct usb_serial_driver edgeport_1port_device = {
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.shutdown = edge_shutdown,
+ .port_probe = edge_create_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
@@ -3022,6 +3076,7 @@ static struct usb_serial_driver edgeport_2port_device = {
.unthrottle = edge_unthrottle,
.attach = edge_startup,
.shutdown = edge_shutdown,
+ .port_probe = edge_create_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
@@ -3085,3 +3140,6 @@ MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs");
module_param(ignore_cpu_rev, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ignore_cpu_rev, "Ignore the cpu revision when connecting to a device");
+module_param(default_uart_mode, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_uart_mode, "Default uart_mode, 0=RS232, ...");
+
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index e57fa117e486..8e1a491e52a9 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -131,7 +131,7 @@
#define ION_DEVICE_ID_TI_EDGEPORT_2I 0x0207 // Edgeport/2i RS422/RS485
#define ION_DEVICE_ID_TI_EDGEPORT_421 0x020C // Edgeport/421 4 hub 2 RS232 + Parallel (lucent on a different hub port)
#define ION_DEVICE_ID_TI_EDGEPORT_21 0x020D // Edgeport/21 2 RS232 + Parallel (lucent on a different hub port)
-#define ION_DEVICE_ID_TI_EDGEPORT_8 0x020F // Edgeport/8 (single-CPU)
+#define ION_DEVICE_ID_TI_EDGEPORT_416 0x0212 // Edgeport/416
#define ION_DEVICE_ID_TI_EDGEPORT_1 0x0215 // Edgeport/1 RS232
#define ION_DEVICE_ID_TI_EDGEPORT_42 0x0217 // Edgeport/42 4 hub 2 RS232
#define ION_DEVICE_ID_TI_EDGEPORT_22I 0x021A // Edgeport/22I is an Edgeport/4 with ports 1&2 RS422 and ports 3&4 RS232
@@ -143,12 +143,14 @@
#define ION_DEVICE_ID_TI_EDGEPORT_21C 0x021E // Edgeport/21c is a TI based Edgeport/2 with lucent chip
// Generation 3 devices -- 3410 based edgport/1 (256 byte I2C)
-#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1 0x240 // Edgeport/1 RS232
-#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I 0x241 // Edgeport/1i- RS422 model
+#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1 0x0240 // Edgeport/1 RS232
+#define ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I 0x0241 // Edgeport/1i- RS422 model
// Ti based software switchable RS232/RS422/RS485 devices
-#define ION_DEVICE_ID_TI_EDGEPORT_4S 0x242 // Edgeport/4s - software switchable model
-#define ION_DEVICE_ID_IT_EDGEPORT_8S 0x243 // Edgeport/8s - software switchable model
+#define ION_DEVICE_ID_TI_EDGEPORT_4S 0x0242 // Edgeport/4s - software switchable model
+#define ION_DEVICE_ID_TI_EDGEPORT_8S 0x0243 // Edgeport/8s - software switchable model
+#define ION_DEVICE_ID_TI_EDGEPORT_8 0x0244 // Edgeport/8 (single-CPU)
+#define ION_DEVICE_ID_TI_EDGEPORT_416B 0x0247 // Edgeport/416
/************************************************************************
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 4df0ec74e0b1..0455c1552ae9 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -732,11 +732,13 @@ static void ipaq_read_bulk_callback(struct urb *urb)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -870,11 +872,13 @@ static void ipaq_write_bulk_callback(struct urb *urb)
struct ipaq_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
-
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 1bc586064c77..1b94daa61584 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -167,11 +167,13 @@ static void ipw_read_bulk_callback(struct urb *urb)
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -369,13 +371,15 @@ static void ipw_close(struct usb_serial_port *port, struct file * filp)
static void ipw_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
+ int status = urb->status;
dbg("%s", __FUNCTION__);
port->write_urb_busy = 0;
- if (urb->status)
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status)
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
usb_serial_port_softint(port);
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 9d847f69291c..5ab6a0c5ac52 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -21,6 +21,10 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
+ * 2007_Jun_21 Alan Cox <alan@redhat.com>
+ * Minimal cleanups for some of the driver problens and tty layer abuse.
+ * Still needs fixing to allow multiple dongles.
+ *
* 2002_Mar_07 greg kh
* moved some needed structures and #define values from the
* net/irda/irda-usb.h file into our file, as we don't want to depend on
@@ -109,6 +113,7 @@ static void ir_write_bulk_callback (struct urb *urb);
static void ir_read_bulk_callback (struct urb *urb);
static void ir_set_termios (struct usb_serial_port *port, struct ktermios *old_termios);
+/* Not that this lot means you can only have one per system */
static u8 ir_baud = 0;
static u8 ir_xbof = 0;
static u8 ir_add_bof = 0;
@@ -392,12 +397,14 @@ static int ir_write (struct usb_serial_port *port, const unsigned char *buf, int
static void ir_write_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
port->write_urb_busy = 0;
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -417,6 +424,7 @@ static void ir_read_bulk_callback (struct urb *urb)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -425,8 +433,7 @@ static void ir_read_bulk_callback (struct urb *urb)
return;
}
- switch (urb->status) {
-
+ switch (status) {
case 0: /* Successful */
/*
@@ -444,22 +451,12 @@ static void ir_read_bulk_callback (struct urb *urb)
urb->actual_length,
data);
- /*
- * Bypass flip-buffers, and feed the ldisc directly
- * due to our potentially large buffer size. Since we
- * used to set low_latency, this is exactly what the
- * tty layer did anyway :)
- */
tty = port->tty;
- /*
- * FIXME: must not do this in IRQ context
- */
- tty->ldisc.receive_buf(
- tty,
- data+1,
- NULL,
- urb->actual_length-1);
+ if (tty_buffer_request_room(tty, urb->actual_length - 1)) {
+ tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
+ tty_flip_buffer_push(tty);
+ }
/*
* No break here.
@@ -490,7 +487,7 @@ static void ir_read_bulk_callback (struct urb *urb)
default:
dbg("%s - nonzero read bulk status received: %d",
__FUNCTION__,
- urb->status);
+ status);
break ;
}
@@ -501,8 +498,9 @@ static void ir_read_bulk_callback (struct urb *urb)
static void ir_set_termios (struct usb_serial_port *port, struct ktermios *old_termios)
{
unsigned char *transfer_buffer;
- unsigned int cflag;
int result;
+ speed_t baud;
+ int ir_baud;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -511,77 +509,59 @@ static void ir_set_termios (struct usb_serial_port *port, struct ktermios *old_t
return;
}
- cflag = port->tty->termios->c_cflag;
- /* check that they really want us to change something */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(port->tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("%s - nothing to change...", __FUNCTION__);
- return;
- }
+ baud = tty_get_baud_rate(port->tty);
+
+ /*
+ * FIXME, we should compare the baud request against the
+ * capability stated in the IR header that we got in the
+ * startup function.
+ */
+
+ switch (baud) {
+ case 2400: ir_baud = SPEED_2400; break;
+ case 9600: ir_baud = SPEED_9600; break;
+ case 19200: ir_baud = SPEED_19200; break;
+ case 38400: ir_baud = SPEED_38400; break;
+ case 57600: ir_baud = SPEED_57600; break;
+ case 115200: ir_baud = SPEED_115200; break;
+ case 576000: ir_baud = SPEED_576000; break;
+ case 1152000: ir_baud = SPEED_1152000; break;
+ case 4000000: ir_baud = SPEED_4000000; break;
+ break;
+ default:
+ ir_baud = SPEED_9600;
+ baud = 9600;
+ /* And once the new tty stuff is all done we need to
+ call back to correct the baud bits */
}
- /* All we can change is the baud rate */
- if (cflag & CBAUD) {
-
- dbg ("%s - asking for baud %d",
- __FUNCTION__,
- tty_get_baud_rate(port->tty));
-
- /*
- * FIXME, we should compare the baud request against the
- * capability stated in the IR header that we got in the
- * startup function.
- */
- switch (cflag & CBAUD) {
- case B2400: ir_baud = SPEED_2400; break;
- default:
- case B9600: ir_baud = SPEED_9600; break;
- case B19200: ir_baud = SPEED_19200; break;
- case B38400: ir_baud = SPEED_38400; break;
- case B57600: ir_baud = SPEED_57600; break;
- case B115200: ir_baud = SPEED_115200; break;
- case B576000: ir_baud = SPEED_576000; break;
- case B1152000: ir_baud = SPEED_1152000; break;
-#ifdef B4000000
- case B4000000: ir_baud = SPEED_4000000; break;
-#endif
- }
+ if (xbof == -1)
+ ir_xbof = ir_xbof_change(ir_add_bof);
+ else
+ ir_xbof = ir_xbof_change(xbof) ;
- if (xbof == -1) {
- ir_xbof = ir_xbof_change(ir_add_bof);
- } else {
- ir_xbof = ir_xbof_change(xbof) ;
- }
+ /* FIXME need to check to see if our write urb is busy right
+ * now, or use a urb pool.
+ *
+ * send the baud change out on an "empty" data packet
+ */
+ transfer_buffer = port->write_urb->transfer_buffer;
+ *transfer_buffer = ir_xbof | ir_baud;
- /* Notify the tty driver that the termios have changed. */
- port->tty->ldisc.set_termios(port->tty, NULL);
-
- /* FIXME need to check to see if our write urb is busy right
- * now, or use a urb pool.
- *
- * send the baud change out on an "empty" data packet
- */
- transfer_buffer = port->write_urb->transfer_buffer;
- *transfer_buffer = ir_xbof | ir_baud;
-
- usb_fill_bulk_urb (
- port->write_urb,
- port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer,
- 1,
- ir_write_bulk_callback,
- port);
-
- port->write_urb->transfer_flags = URB_ZERO_PACKET;
-
- result = usb_submit_urb (port->write_urb, GFP_KERNEL);
- if (result)
- dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __FUNCTION__, result);
- }
- return;
+ usb_fill_bulk_urb (
+ port->write_urb,
+ port->serial->dev,
+ usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress),
+ port->write_urb->transfer_buffer,
+ 1,
+ ir_write_bulk_callback,
+ port);
+
+ port->write_urb->transfer_flags = URB_ZERO_PACKET;
+
+ result = usb_submit_urb (port->write_urb, GFP_KERNEL);
+ if (result)
+ dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __FUNCTION__, result);
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index e6966f12ed5a..f2a6fce5de1e 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -115,12 +115,13 @@ static int debug;
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.1.4"
+#define DRIVER_VERSION "v1.1.5"
#define DRIVER_AUTHOR "Hugh Blemings <hugh@misc.nu"
#define DRIVER_DESC "Keyspan USB to Serial Converter Driver"
#define INSTAT_BUFLEN 32
#define GLOCONT_BUFLEN 64
+#define INDAT49W_BUFLEN 512
/* Per device and per port private data */
struct keyspan_serial_private {
@@ -129,9 +130,15 @@ struct keyspan_serial_private {
struct urb *instat_urb;
char instat_buf[INSTAT_BUFLEN];
+ /* added to support 49wg, where data from all 4 ports comes in on 1 EP */
+ /* and high-speed supported */
+ struct urb *indat_urb;
+ char indat_buf[INDAT49W_BUFLEN];
+
/* XXX this one probably will need a lock */
struct urb *glocont_urb;
char glocont_buf[GLOCONT_BUFLEN];
+ char ctrl_buf[8]; // for EP0 control message
};
struct keyspan_port_private {
@@ -179,12 +186,13 @@ struct keyspan_port_private {
/* Include Keyspan message headers. All current Keyspan Adapters
- make use of one of four message formats which are referred
- to as USA-26, USA-28 and USA-49, USA-90 by Keyspan and within this driver. */
+ make use of one of five message formats which are referred
+ to as USA-26, USA-28, USA-49, USA-90, USA-67 by Keyspan and within this driver. */
#include "keyspan_usa26msg.h"
#include "keyspan_usa28msg.h"
#include "keyspan_usa49msg.h"
#include "keyspan_usa90msg.h"
+#include "keyspan_usa67msg.h"
/* Functions used by new usb-serial code. */
@@ -419,14 +427,15 @@ static void usa26_indat_callback(struct urb *urb)
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
dbg ("%s", __FUNCTION__);
endpoint = usb_pipeendpoint(urb->pipe);
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero status: %x on endpoint %d.",
- __FUNCTION__, urb->status, endpoint);
+ __FUNCTION__, status, endpoint);
return;
}
@@ -511,11 +520,12 @@ static void usa26_instat_callback(struct urb *urb)
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state, err;
+ int status = urb->status;
serial = (struct usb_serial *) urb->context;
- if (urb->status) {
- dbg("%s - nonzero status: %x", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero status: %x", __FUNCTION__, status);
return;
}
if (urb->actual_length != 9) {
@@ -579,6 +589,7 @@ static void usa28_indat_callback(struct urb *urb)
struct tty_struct *tty;
unsigned char *data;
struct keyspan_port_private *p_priv;
+ int status = urb->status;
dbg ("%s", __FUNCTION__);
@@ -590,9 +601,9 @@ static void usa28_indat_callback(struct urb *urb)
return;
do {
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero status: %x on endpoint %d.",
- __FUNCTION__, urb->status, usb_pipeendpoint(urb->pipe));
+ __FUNCTION__, status, usb_pipeendpoint(urb->pipe));
return;
}
@@ -648,11 +659,12 @@ static void usa28_instat_callback(struct urb *urb)
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
+ int status = urb->status;
serial = (struct usb_serial *) urb->context;
- if (urb->status) {
- dbg("%s - nonzero status: %x", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero status: %x", __FUNCTION__, status);
return;
}
@@ -739,13 +751,14 @@ static void usa49_instat_callback(struct urb *urb)
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state;
+ int status = urb->status;
dbg ("%s", __FUNCTION__);
serial = (struct usb_serial *) urb->context;
- if (urb->status) {
- dbg("%s - nonzero status: %x", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero status: %x", __FUNCTION__, status);
return;
}
@@ -805,14 +818,15 @@ static void usa49_indat_callback(struct urb *urb)
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
dbg ("%s", __FUNCTION__);
endpoint = usb_pipeendpoint(urb->pipe);
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero status: %x on endpoint %d.", __FUNCTION__,
- urb->status, endpoint);
+ status, endpoint);
return;
}
@@ -850,13 +864,90 @@ static void usa49_indat_callback(struct urb *urb)
}
}
+static void usa49wg_indat_callback(struct urb *urb)
+{
+ int i, len, x, err;
+ struct usb_serial *serial;
+ struct usb_serial_port *port;
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
+
+ dbg ("%s", __FUNCTION__);
+
+ serial = urb->context;
+
+ if (status) {
+ dbg("%s - nonzero status: %x", __FUNCTION__, status);
+ return;
+ }
+
+ /* inbound data is in the form P#, len, status, data */
+ i = 0;
+ len = 0;
+
+ if (urb->actual_length) {
+ while (i < urb->actual_length) {
+
+ /* Check port number from message*/
+ if (data[i] >= serial->num_ports) {
+ dbg ("%s - Unexpected port number %d",
+ __FUNCTION__, data[i]);
+ return;
+ }
+ port = serial->port[data[i++]];
+ tty = port->tty;
+ len = data[i++];
+
+ /* 0x80 bit is error flag */
+ if ((data[i] & 0x80) == 0) {
+ /* no error on any byte */
+ i++;
+ for (x = 1; x < len ; ++x)
+ if (port->open_count)
+ tty_insert_flip_char(tty,
+ data[i++], 0);
+ else
+ i++;
+ } else {
+ /*
+ * some bytes had errors, every byte has status
+ */
+ for (x = 0; x + 1 < len; x += 2) {
+ int stat = data[i], flag = 0;
+ if (stat & RXERROR_OVERRUN)
+ flag |= TTY_OVERRUN;
+ if (stat & RXERROR_FRAMING)
+ flag |= TTY_FRAME;
+ if (stat & RXERROR_PARITY)
+ flag |= TTY_PARITY;
+ /* XXX should handle break (0x10) */
+ if (port->open_count)
+ tty_insert_flip_char(tty,
+ data[i+1], flag);
+ i += 2;
+ }
+ }
+ if (port->open_count)
+ tty_flip_buffer_push(tty);
+ }
+ }
+
+ /* Resubmit urb so we continue receiving */
+ urb->dev = serial->dev;
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __FUNCTION__, err);
+}
+
/* not used, usa-49 doesn't have per-port control endpoints */
-static void usa49_outcont_callback(struct urb *urb)
+static void usa49_outcont_callback(struct urb *urb)
{
dbg ("%s", __FUNCTION__);
}
-static void usa90_indat_callback(struct urb *urb)
+static void usa90_indat_callback(struct urb *urb)
{
int i, err;
int endpoint;
@@ -864,15 +955,15 @@ static void usa90_indat_callback(struct urb *urb)
struct keyspan_port_private *p_priv;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
dbg ("%s", __FUNCTION__);
endpoint = usb_pipeendpoint(urb->pipe);
-
- if (urb->status) {
+ if (status) {
dbg("%s - nonzero status: %x on endpoint %d.",
- __FUNCTION__, urb->status, endpoint);
+ __FUNCTION__, status, endpoint);
return;
}
@@ -938,11 +1029,12 @@ static void usa90_instat_callback(struct urb *urb)
struct usb_serial_port *port;
struct keyspan_port_private *p_priv;
int old_dcd_state, err;
+ int status = urb->status;
serial = (struct usb_serial *) urb->context;
- if (urb->status) {
- dbg("%s - nonzero status: %x", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero status: %x", __FUNCTION__, status);
return;
}
if (urb->actual_length < 14) {
@@ -995,6 +1087,88 @@ static void usa90_outcont_callback(struct urb *urb)
}
}
+/* Status messages from the 28xg */
+static void usa67_instat_callback(struct urb *urb)
+{
+ int err;
+ unsigned char *data = urb->transfer_buffer;
+ struct keyspan_usa67_portStatusMessage *msg;
+ struct usb_serial *serial;
+ struct usb_serial_port *port;
+ struct keyspan_port_private *p_priv;
+ int old_dcd_state;
+ int status = urb->status;
+
+ dbg ("%s", __FUNCTION__);
+
+ serial = urb->context;
+
+ if (status) {
+ dbg("%s - nonzero status: %x", __FUNCTION__, status);
+ return;
+ }
+
+ if (urb->actual_length != sizeof(struct keyspan_usa67_portStatusMessage)) {
+ dbg("%s - bad length %d", __FUNCTION__, urb->actual_length);
+ return;
+ }
+
+
+ /* Now do something useful with the data */
+ msg = (struct keyspan_usa67_portStatusMessage *)data;
+
+ /* Check port number from message and retrieve private data */
+ if (msg->port >= serial->num_ports) {
+ dbg ("%s - Unexpected port number %d", __FUNCTION__, msg->port);
+ return;
+ }
+
+ port = serial->port[msg->port];
+ p_priv = usb_get_serial_port_data(port);
+
+ /* Update handshaking pin state information */
+ old_dcd_state = p_priv->dcd_state;
+ p_priv->cts_state = ((msg->hskia_cts) ? 1 : 0);
+ p_priv->dcd_state = ((msg->gpia_dcd) ? 1 : 0);
+
+ if (port->tty && !C_CLOCAL(port->tty)
+ && old_dcd_state != p_priv->dcd_state) {
+ if (old_dcd_state)
+ tty_hangup(port->tty);
+ /* else */
+ /* wake_up_interruptible(&p_priv->open_wait); */
+ }
+
+ /* Resubmit urb so we continue receiving */
+ urb->dev = serial->dev;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __FUNCTION__, err);
+}
+
+static void usa67_glocont_callback(struct urb *urb)
+{
+ struct usb_serial *serial;
+ struct usb_serial_port *port;
+ struct keyspan_port_private *p_priv;
+ int i;
+
+ dbg ("%s", __FUNCTION__);
+
+ serial = urb->context;
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ p_priv = usb_get_serial_port_data(port);
+
+ if (p_priv->resend_cont) {
+ dbg ("%s - sending setup", __FUNCTION__);
+ keyspan_usa67_send_setup(serial, port,
+ p_priv->resend_cont - 1);
+ break;
+ }
+ }
+}
+
static int keyspan_write_room (struct usb_serial_port *port)
{
struct keyspan_port_private *p_priv;
@@ -1311,6 +1485,11 @@ static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint,
return NULL;
}
+ if (endpoint == 0) {
+ /* control EP filled in when used */
+ return urb;
+ }
+
ep_desc = find_ep(serial, endpoint);
if (!ep_desc) {
/* leak the urb, something's wrong and the callers don't care */
@@ -1380,6 +1559,14 @@ static struct callbacks {
.outdat_callback = usa2x_outdat_callback,
.inack_callback = usa28_inack_callback,
.outcont_callback = usa90_outcont_callback,
+ }, {
+ /* msg_usa67 callbacks */
+ .instat_callback = usa67_instat_callback,
+ .glocont_callback = usa67_glocont_callback,
+ .indat_callback = usa26_indat_callback,
+ .outdat_callback = usa2x_outdat_callback,
+ .inack_callback = usa26_inack_callback,
+ .outcont_callback = usa26_outcont_callback,
}
};
@@ -1410,6 +1597,11 @@ static void keyspan_setup_urbs(struct usb_serial *serial)
serial, s_priv->instat_buf, INSTAT_BUFLEN,
cback->instat_callback);
+ s_priv->indat_urb = keyspan_setup_urb
+ (serial, d_details->indat_endpoint, USB_DIR_IN,
+ serial, s_priv->indat_buf, INDAT49W_BUFLEN,
+ usa49wg_indat_callback);
+
s_priv->glocont_urb = keyspan_setup_urb
(serial, d_details->glocont_endpoint, USB_DIR_OUT,
serial, s_priv->glocont_buf, GLOCONT_BUFLEN,
@@ -1685,8 +1877,8 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
}
/* Save reset port val for resend.
- Don't overwrite resend for close condition. */
- if (p_priv->resend_cont != 3)
+ Don't overwrite resend for open/close condition. */
+ if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
/* dbg ("%s - already writing", __FUNCTION__); */
@@ -1836,8 +2028,8 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
}
/* Save reset port val for resend.
- Don't overwrite resend for close condition. */
- if (p_priv->resend_cont != 3)
+ Don't overwrite resend for open/close condition. */
+ if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
if (this_urb->status == -EINPROGRESS) {
dbg ("%s already writing", __FUNCTION__);
@@ -1940,11 +2132,11 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port)
{
- struct keyspan_usa49_portControlMessage msg;
+ struct keyspan_usa49_portControlMessage msg;
+ struct usb_ctrlrequest *dr = NULL;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
const struct keyspan_device_details *d_details;
- int glocont_urb;
struct urb *this_urb;
int err, device_port;
@@ -1954,10 +2146,9 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
p_priv = usb_get_serial_port_data(port);
d_details = s_priv->device_details;
- glocont_urb = d_details->glocont_endpoint;
this_urb = s_priv->glocont_urb;
- /* Work out which port within the device is being setup */
+ /* Work out which port within the device is being setup */
device_port = port->number - port->serial->minor;
dbg("%s - endpoint %d port %d (%d)",__FUNCTION__, usb_pipeendpoint(this_urb->pipe), port->number, device_port);
@@ -1969,9 +2160,10 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
}
/* Save reset port val for resend.
- Don't overwrite resend for close condition. */
- if (p_priv->resend_cont != 3)
+ Don't overwrite resend for open/close condition. */
+ if ((reset_port + 1) > p_priv->resend_cont)
p_priv->resend_cont = reset_port + 1;
+
if (this_urb->status == -EINPROGRESS) {
/* dbg ("%s - already writing", __FUNCTION__); */
mdelay(5);
@@ -2083,20 +2275,39 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
msg.dtr = p_priv->dtr_state;
p_priv->resend_cont = 0;
- memcpy (this_urb->transfer_buffer, &msg, sizeof(msg));
+
+ /* if the device is a 49wg, we send control message on usb control EP 0 */
+
+ if (d_details->product_id == keyspan_usa49wg_product_id) {
+ dr = (void *)(s_priv->ctrl_buf);
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT;
+ dr->bRequest = 0xB0; /* 49wg control message */;
+ dr->wValue = 0;
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(sizeof(msg));
+
+ memcpy (s_priv->glocont_buf, &msg, sizeof(msg));
+
+ usb_fill_control_urb(this_urb, serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ (unsigned char *)dr, s_priv->glocont_buf, sizeof(msg),
+ usa49_glocont_callback, serial);
+
+ } else {
+ memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
- /* send the data out the device on control endpoint */
- this_urb->transfer_buffer_length = sizeof(msg);
+ /* send the data out the device on control endpoint */
+ this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
+ this_urb->dev = serial->dev;
+ }
if ((err = usb_submit_urb(this_urb, GFP_ATOMIC)) != 0) {
dbg("%s - usb_submit_urb(setup) failed (%d)", __FUNCTION__, err);
}
#if 0
else {
dbg("%s - usb_submit_urb(%d) OK %d bytes (end %d)", __FUNCTION__,
- outcont_urb, this_urb->transfer_buffer_length,
- usb_pipeendpoint(this_urb->pipe));
+ outcont_urb, this_urb->transfer_buffer_length,
+ usb_pipeendpoint(this_urb->pipe));
}
#endif
@@ -2241,6 +2452,154 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
return (0);
}
+static int keyspan_usa67_send_setup(struct usb_serial *serial,
+ struct usb_serial_port *port,
+ int reset_port)
+{
+ struct keyspan_usa67_portControlMessage msg;
+ struct keyspan_serial_private *s_priv;
+ struct keyspan_port_private *p_priv;
+ const struct keyspan_device_details *d_details;
+ struct urb *this_urb;
+ int err, device_port;
+
+ dbg ("%s", __FUNCTION__);
+
+ s_priv = usb_get_serial_data(serial);
+ p_priv = usb_get_serial_port_data(port);
+ d_details = s_priv->device_details;
+
+ this_urb = s_priv->glocont_urb;
+
+ /* Work out which port within the device is being setup */
+ device_port = port->number - port->serial->minor;
+
+ /* Make sure we have an urb then send the message */
+ if (this_urb == NULL) {
+ dbg("%s - oops no urb for port %d.", __FUNCTION__,
+ port->number);
+ return -1;
+ }
+
+ /* Save reset port val for resend.
+ Don't overwrite resend for open/close condition. */
+ if ((reset_port + 1) > p_priv->resend_cont)
+ p_priv->resend_cont = reset_port + 1;
+ if (this_urb->status == -EINPROGRESS) {
+ /* dbg ("%s - already writing", __FUNCTION__); */
+ mdelay(5);
+ return(-1);
+ }
+
+ memset(&msg, 0, sizeof(struct keyspan_usa67_portControlMessage));
+
+ msg.port = device_port;
+
+ /* Only set baud rate if it's changed */
+ if (p_priv->old_baud != p_priv->baud) {
+ p_priv->old_baud = p_priv->baud;
+ msg.setClocking = 0xff;
+ if (d_details->calculate_baud_rate
+ (p_priv->baud, d_details->baudclk, &msg.baudHi,
+ &msg.baudLo, &msg.prescaler, device_port) == KEYSPAN_INVALID_BAUD_RATE ) {
+ dbg("%s - Invalid baud rate %d requested, using 9600.", __FUNCTION__,
+ p_priv->baud);
+ msg.baudLo = 0;
+ msg.baudHi = 125; /* Values for 9600 baud */
+ msg.prescaler = 10;
+ }
+ msg.setPrescaler = 0xff;
+ }
+
+ msg.lcr = (p_priv->cflag & CSTOPB) ? STOPBITS_678_2 : STOPBITS_5678_1;
+ switch (p_priv->cflag & CSIZE) {
+ case CS5:
+ msg.lcr |= USA_DATABITS_5;
+ break;
+ case CS6:
+ msg.lcr |= USA_DATABITS_6;
+ break;
+ case CS7:
+ msg.lcr |= USA_DATABITS_7;
+ break;
+ case CS8:
+ msg.lcr |= USA_DATABITS_8;
+ break;
+ }
+ if (p_priv->cflag & PARENB) {
+ /* note USA_PARITY_NONE == 0 */
+ msg.lcr |= (p_priv->cflag & PARODD)?
+ USA_PARITY_ODD: USA_PARITY_EVEN;
+ }
+ msg.setLcr = 0xff;
+
+ msg.ctsFlowControl = (p_priv->flow_control == flow_cts);
+ msg.xonFlowControl = 0;
+ msg.setFlowControl = 0xff;
+ msg.forwardingLength = 16;
+ msg.xonChar = 17;
+ msg.xoffChar = 19;
+
+ if (reset_port == 1) {
+ /* Opening port */
+ msg._txOn = 1;
+ msg._txOff = 0;
+ msg.txFlush = 0;
+ msg.txBreak = 0;
+ msg.rxOn = 1;
+ msg.rxOff = 0;
+ msg.rxFlush = 1;
+ msg.rxForward = 0;
+ msg.returnStatus = 0;
+ msg.resetDataToggle = 0xff;
+ } else if (reset_port == 2) {
+ /* Closing port */
+ msg._txOn = 0;
+ msg._txOff = 1;
+ msg.txFlush = 0;
+ msg.txBreak = 0;
+ msg.rxOn = 0;
+ msg.rxOff = 1;
+ msg.rxFlush = 1;
+ msg.rxForward = 0;
+ msg.returnStatus = 0;
+ msg.resetDataToggle = 0;
+ } else {
+ /* Sending intermediate configs */
+ msg._txOn = (! p_priv->break_on);
+ msg._txOff = 0;
+ msg.txFlush = 0;
+ msg.txBreak = (p_priv->break_on);
+ msg.rxOn = 0;
+ msg.rxOff = 0;
+ msg.rxFlush = 0;
+ msg.rxForward = 0;
+ msg.returnStatus = 0;
+ msg.resetDataToggle = 0x0;
+ }
+
+ /* Do handshaking outputs */
+ msg.setTxTriState_setRts = 0xff;
+ msg.txTriState_rts = p_priv->rts_state;
+
+ msg.setHskoa_setDtr = 0xff;
+ msg.hskoa_dtr = p_priv->dtr_state;
+
+ p_priv->resend_cont = 0;
+
+ memcpy(this_urb->transfer_buffer, &msg, sizeof(msg));
+
+ /* send the data out the device on control endpoint */
+ this_urb->transfer_buffer_length = sizeof(msg);
+ this_urb->dev = serial->dev;
+
+ err = usb_submit_urb(this_urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - usb_submit_urb(setup) failed (%d)", __FUNCTION__,
+ err);
+ return (0);
+}
+
static void keyspan_send_setup(struct usb_serial_port *port, int reset_port)
{
struct usb_serial *serial = port->serial;
@@ -2265,6 +2624,9 @@ static void keyspan_send_setup(struct usb_serial_port *port, int reset_port)
case msg_usa90:
keyspan_usa90_send_setup(serial, port, reset_port);
break;
+ case msg_usa67:
+ keyspan_usa67_send_setup(serial, port, reset_port);
+ break;
}
}
@@ -2313,9 +2675,19 @@ static int keyspan_startup (struct usb_serial *serial)
keyspan_setup_urbs(serial);
- s_priv->instat_urb->dev = serial->dev;
- if ((err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL)) != 0) {
- dbg("%s - submit instat urb failed %d", __FUNCTION__, err);
+ if (s_priv->instat_urb != NULL) {
+ s_priv->instat_urb->dev = serial->dev;
+ err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL);
+ if (err != 0)
+ dbg("%s - submit instat urb failed %d", __FUNCTION__,
+ err);
+ }
+ if (s_priv->indat_urb != NULL) {
+ s_priv->indat_urb->dev = serial->dev;
+ err = usb_submit_urb(s_priv->indat_urb, GFP_KERNEL);
+ if (err != 0)
+ dbg("%s - submit indat urb failed %d", __FUNCTION__,
+ err);
}
return (0);
@@ -2335,6 +2707,7 @@ static void keyspan_shutdown (struct usb_serial *serial)
/* Stop reading/writing urbs */
stop_urb(s_priv->instat_urb);
stop_urb(s_priv->glocont_urb);
+ stop_urb(s_priv->indat_urb);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
@@ -2348,6 +2721,7 @@ static void keyspan_shutdown (struct usb_serial *serial)
/* Now free them */
usb_free_urb(s_priv->instat_urb);
+ usb_free_urb(s_priv->indat_urb);
usb_free_urb(s_priv->glocont_urb);
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index c6830cbdc6df..8a0d17401529 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -99,6 +99,10 @@ static int keyspan_usa90_send_setup (struct usb_serial *serial,
struct usb_serial_port *port,
int reset_port);
+static int keyspan_usa67_send_setup (struct usb_serial *serial,
+ struct usb_serial_port *port,
+ int reset_port);
+
/* Struct used for firmware - increased size of data section
to allow Keyspan's 'C' firmware struct to be used unmodified */
struct ezusb_hex_record {
@@ -229,15 +233,17 @@ struct ezusb_hex_record {
#define keyspan_usa28_product_id 0x010f
#define keyspan_usa28x_product_id 0x0110
#define keyspan_usa28xa_product_id 0x0115
+#define keyspan_usa28xb_product_id 0x0110
+#define keyspan_usa28xg_product_id 0x0135
#define keyspan_usa49w_product_id 0x010a
#define keyspan_usa49wlc_product_id 0x012a
-
+#define keyspan_usa49wg_product_id 0x0131
struct keyspan_device_details {
/* product ID value */
int product_id;
- enum {msg_usa26, msg_usa28, msg_usa49, msg_usa90} msg_format;
+ enum {msg_usa26, msg_usa28, msg_usa49, msg_usa90, msg_usa67} msg_format;
/* Number of physical ports */
int num_ports;
@@ -264,6 +270,9 @@ struct keyspan_device_details {
/* Endpoint used for input status */
int instat_endpoint;
+ /* Endpoint used for input data 49WG only */
+ int indat_endpoint;
+
/* Endpoint used for global control functions */
int glocont_endpoint;
@@ -287,6 +296,7 @@ static const struct keyspan_device_details usa18x_device_details = {
.inack_endpoints = {0x85},
.outcont_endpoints = {0x05},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA18X_BAUDCLK,
@@ -303,6 +313,7 @@ static const struct keyspan_device_details usa19_device_details = {
.inack_endpoints = {0x83},
.outcont_endpoints = {0x03},
.instat_endpoint = 0x84,
+ .indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa19_calc_baud,
.baudclk = KEYSPAN_USA19_BAUDCLK,
@@ -319,6 +330,7 @@ static const struct keyspan_device_details usa19qi_device_details = {
.inack_endpoints = {0x83},
.outcont_endpoints = {0x03},
.instat_endpoint = 0x84,
+ .indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa28_calc_baud,
.baudclk = KEYSPAN_USA19_BAUDCLK,
@@ -335,6 +347,7 @@ static const struct keyspan_device_details mpr_device_details = {
.inack_endpoints = {0x83},
.outcont_endpoints = {0x03},
.instat_endpoint = 0x84,
+ .indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa28_calc_baud,
.baudclk = KEYSPAN_USA19_BAUDCLK,
@@ -351,6 +364,7 @@ static const struct keyspan_device_details usa19qw_device_details = {
.inack_endpoints = {0x85},
.outcont_endpoints = {0x05},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
@@ -367,6 +381,7 @@ static const struct keyspan_device_details usa19w_device_details = {
.inack_endpoints = {0x85},
.outcont_endpoints = {0x05},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
@@ -383,6 +398,7 @@ static const struct keyspan_device_details usa19hs_device_details = {
.inack_endpoints = {-1},
.outcont_endpoints = {0x02},
.instat_endpoint = 0x82,
+ .indat_endpoint = -1,
.glocont_endpoint = -1,
.calculate_baud_rate = keyspan_usa19hs_calc_baud,
.baudclk = KEYSPAN_USA19HS_BAUDCLK,
@@ -399,6 +415,7 @@ static const struct keyspan_device_details usa28_device_details = {
.inack_endpoints = {0x85, 0x86},
.outcont_endpoints = {0x05, 0x06},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa28_calc_baud,
.baudclk = KEYSPAN_USA28_BAUDCLK,
@@ -415,6 +432,7 @@ static const struct keyspan_device_details usa28x_device_details = {
.inack_endpoints = {0x85, 0x86},
.outcont_endpoints = {0x05, 0x06},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA28X_BAUDCLK,
@@ -431,11 +449,28 @@ static const struct keyspan_device_details usa28xa_device_details = {
.inack_endpoints = {0x85, 0x86},
.outcont_endpoints = {0x05, 0x06},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA28X_BAUDCLK,
};
+static const struct keyspan_device_details usa28xg_device_details = {
+ .product_id = keyspan_usa28xg_product_id,
+ .msg_format = msg_usa67,
+ .num_ports = 2,
+ .indat_endp_flip = 0,
+ .outdat_endp_flip = 0,
+ .indat_endpoints = {0x84, 0x88},
+ .outdat_endpoints = {0x02, 0x06},
+ .inack_endpoints = {-1, -1},
+ .outcont_endpoints = {-1, -1},
+ .instat_endpoint = 0x81,
+ .indat_endpoint = -1,
+ .glocont_endpoint = 0x01,
+ .calculate_baud_rate = keyspan_usa19w_calc_baud,
+ .baudclk = KEYSPAN_USA28X_BAUDCLK,
+};
/* We don't need a separate entry for the usa28xb as it appears as a 28x anyway */
static const struct keyspan_device_details usa49w_device_details = {
@@ -449,6 +484,7 @@ static const struct keyspan_device_details usa49w_device_details = {
.inack_endpoints = {-1, -1, -1, -1},
.outcont_endpoints = {-1, -1, -1, -1},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA49W_BAUDCLK,
@@ -465,11 +501,29 @@ static const struct keyspan_device_details usa49wlc_device_details = {
.inack_endpoints = {-1, -1, -1, -1},
.outcont_endpoints = {-1, -1, -1, -1},
.instat_endpoint = 0x87,
+ .indat_endpoint = -1,
.glocont_endpoint = 0x07,
.calculate_baud_rate = keyspan_usa19w_calc_baud,
.baudclk = KEYSPAN_USA19W_BAUDCLK,
};
+static const struct keyspan_device_details usa49wg_device_details = {
+ .product_id = keyspan_usa49wg_product_id,
+ .msg_format = msg_usa49,
+ .num_ports = 4,
+ .indat_endp_flip = 0,
+ .outdat_endp_flip = 0,
+ .indat_endpoints = {-1, -1, -1, -1}, /* single 'global' data in EP */
+ .outdat_endpoints = {0x01, 0x02, 0x04, 0x06},
+ .inack_endpoints = {-1, -1, -1, -1},
+ .outcont_endpoints = {-1, -1, -1, -1},
+ .instat_endpoint = 0x81,
+ .indat_endpoint = 0x88,
+ .glocont_endpoint = 0x00, /* uses control EP */
+ .calculate_baud_rate = keyspan_usa19w_calc_baud,
+ .baudclk = KEYSPAN_USA19W_BAUDCLK,
+};
+
static const struct keyspan_device_details *keyspan_devices[] = {
&usa18x_device_details,
&usa19_device_details,
@@ -481,9 +535,11 @@ static const struct keyspan_device_details *keyspan_devices[] = {
&usa28_device_details,
&usa28x_device_details,
&usa28xa_device_details,
+ &usa28xg_device_details,
/* 28xb not required as it renumerates as a 28x */
&usa49w_device_details,
&usa49wlc_device_details,
+ &usa49wg_device_details,
NULL,
};
@@ -510,8 +566,11 @@ static struct usb_device_id keyspan_ids_combined[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
+ { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_product_id) },
+ { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xg_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
+ { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
{ } /* Terminating entry */
};
@@ -557,12 +616,15 @@ static struct usb_device_id keyspan_2port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
+ { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_product_id) },
+ { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xg_product_id) },
{ } /* Terminating entry */
};
static struct usb_device_id keyspan_4port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
+ { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
{ } /* Terminating entry */
};
@@ -573,7 +635,6 @@ static struct usb_serial_driver keyspan_pre_device = {
.name = "keyspan_no_firm",
},
.description = "Keyspan - (without firmware)",
- .usb_driver = &keyspan_driver,
.id_table = keyspan_pre_ids,
.num_interrupt_in = NUM_DONT_CARE,
.num_bulk_in = NUM_DONT_CARE,
@@ -588,7 +649,6 @@ static struct usb_serial_driver keyspan_1port_device = {
.name = "keyspan_1",
},
.description = "Keyspan 1 port adapter",
- .usb_driver = &keyspan_driver,
.id_table = keyspan_1port_ids,
.num_interrupt_in = NUM_DONT_CARE,
.num_bulk_in = NUM_DONT_CARE,
@@ -616,7 +676,6 @@ static struct usb_serial_driver keyspan_2port_device = {
.name = "keyspan_2",
},
.description = "Keyspan 2 port adapter",
- .usb_driver = &keyspan_driver,
.id_table = keyspan_2port_ids,
.num_interrupt_in = NUM_DONT_CARE,
.num_bulk_in = NUM_DONT_CARE,
@@ -644,11 +703,10 @@ static struct usb_serial_driver keyspan_4port_device = {
.name = "keyspan_4",
},
.description = "Keyspan 4 port adapter",
- .usb_driver = &keyspan_driver,
.id_table = keyspan_4port_ids,
.num_interrupt_in = NUM_DONT_CARE,
- .num_bulk_in = 5,
- .num_bulk_out = 5,
+ .num_bulk_in = NUM_DONT_CARE,
+ .num_bulk_out = NUM_DONT_CARE,
.num_ports = 4,
.open = keyspan_open,
.close = keyspan_close,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index dd0b66a6ed5d..be9ac20a8f10 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -218,11 +218,12 @@ static void keyspan_pda_rx_interrupt (struct urb *urb)
struct tty_struct *tty = port->tty;
unsigned char *data = urb->transfer_buffer;
int i;
- int status;
+ int retval;
+ int status = urb->status;
struct keyspan_pda_private *priv;
priv = usb_get_serial_port_data(port);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -230,10 +231,12 @@ static void keyspan_pda_rx_interrupt (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero urb status received: %d",
+ __FUNCTION__, status);
goto exit;
}
@@ -268,10 +271,10 @@ static void keyspan_pda_rx_interrupt (struct urb *urb)
}
exit:
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb (urb, GFP_ATOMIC);
+ if (retval)
err ("%s - usb_submit_urb failed with result %d",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h
new file mode 100644
index 000000000000..20fa3e2f7187
--- /dev/null
+++ b/drivers/usb/serial/keyspan_usa67msg.h
@@ -0,0 +1,254 @@
+/*
+ usa67msg.h
+
+ Copyright (c) 1998-2007 InnoSys Incorporated. All Rights Reserved
+ This file is available under a BSD-style copyright
+
+ Keyspan USB Async Firmware to run on Anchor FX1
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions of source code must retain this licence text
+ without modification, this list of conditions, and the following
+ disclaimer. The following copyright notice must appear immediately at
+ the beginning of all source files:
+
+ Copyright (c) 1998-2007 InnoSys Incorporated. All Rights Reserved
+
+ This file is available under a BSD-style copyright
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. The name of InnoSys Incorprated may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY INNOSYS CORP. ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGE.
+
+ Fourth revision: This message format supports the USA28XG
+
+ Buffer formats for RX/TX data messages are not defined by
+ a structure, but are described here:
+
+ USB OUT (host -> USAxx, transmit) messages contain a
+ REQUEST_ACK indicator (set to 0xff to request an ACK at the
+ completion of transmit; 0x00 otherwise), followed by data:
+
+ RQSTACK DAT DAT DAT ...
+
+ with a total data length of up to 63.
+
+ USB IN (USAxx -> host, receive) messages begin with a status
+ byte in which the 0x80 bit is either:
+
+ (a) 0x80 bit clear
+ indicates that the bytes following it are all data
+ bytes:
+
+ STAT DATA DATA DATA DATA DATA ...
+
+ for a total of up to 63 DATA bytes,
+
+ or:
+
+ (b) 0x80 bit set
+ indiates that the bytes following alternate data and
+ status bytes:
+
+ STAT DATA STAT DATA STAT DATA STAT DATA ...
+
+ for a total of up to 32 DATA bytes.
+
+ The valid bits in the STAT bytes are:
+
+ OVERRUN 0x02
+ PARITY 0x04
+ FRAMING 0x08
+ BREAK 0x10
+
+ Notes:
+
+ (1) The OVERRUN bit can appear in either (a) or (b) format
+ messages, but the but the PARITY/FRAMING/BREAK bits
+ only appear in (b) format messages.
+ (2) For the host to determine the exact point at which the
+ overrun occurred (to identify the point in the data
+ stream at which the data was lost), it needs to count
+ 128 characters, starting at the first character of the
+ message in which OVERRUN was reported; the lost character(s)
+ would have been received between the 128th and 129th
+ characters.
+ (3) An RX data message in which the first byte has 0x80 clear
+ serves as a "break off" indicator.
+
+ revision history:
+
+ 1999feb10 add reportHskiaChanges to allow us to ignore them
+ 1999feb10 add txAckThreshold for fast+loose throughput enhancement
+ 1999mar30 beef up support for RX error reporting
+ 1999apr14 add resetDataToggle to control message
+ 2000jan04 merge with usa17msg.h
+ 2000jun01 add extended BSD-style copyright text
+ 2001jul05 change message format to improve OVERRUN case
+ 2002jun05 update copyright date, improve comments
+ 2006feb06 modify for FX1 chip
+
+*/
+
+#ifndef __USA67MSG__
+#define __USA67MSG__
+
+
+// all things called "ControlMessage" are sent on the 'control' endpoint
+
+typedef struct keyspan_usa67_portControlMessage
+{
+ u8 port; // 0 or 1 (selects port)
+ /*
+ there are three types of "commands" sent in the control message:
+
+ 1. configuration changes which must be requested by setting
+ the corresponding "set" flag (and should only be requested
+ when necessary, to reduce overhead on the device):
+ */
+ u8 setClocking, // host requests baud rate be set
+ baudLo, // host does baud divisor calculation
+ baudHi, // baudHi is only used for first port (gives lower rates)
+ externalClock_txClocking,
+ // 0=internal, other=external
+
+ setLcr, // host requests lcr be set
+ lcr, // use PARITY, STOPBITS, DATABITS below
+
+ setFlowControl, // host requests flow control be set
+ ctsFlowControl, // 1=use CTS flow control, 0=don't
+ xonFlowControl, // 1=use XON/XOFF flow control, 0=don't
+ xonChar, // specified in current character format
+ xoffChar, // specified in current character format
+
+ setTxTriState_setRts,
+ // host requests TX tri-state be set
+ txTriState_rts, // 1=active (normal), 0=tristate (off)
+
+ setHskoa_setDtr,
+ // host requests HSKOA output be set
+ hskoa_dtr, // 1=on, 0=off
+
+ setPrescaler, // host requests prescalar be set (default: 13)
+ prescaler; // specified as N/8; values 8-ff are valid
+ // must be set any time internal baud rate is set;
+ // must not be set when external clocking is used
+
+ /*
+ 3. configuration data which is simply used as is (no overhead,
+ but must be specified correctly in every host message).
+ */
+ u8 forwardingLength, // forward when this number of chars available
+ reportHskiaChanges_dsrFlowControl,
+ // 1=normal; 0=ignore external clock
+ // 1=use DSR flow control, 0=don't
+ txAckThreshold, // 0=not allowed, 1=normal, 2-255 deliver ACK faster
+ loopbackMode; // 0=no loopback, 1=loopback enabled
+
+ /*
+ 4. commands which are flags only; these are processed in order
+ (so that, e.g., if both _txOn and _txOff flags are set, the
+ port ends in a TX_OFF state); any non-zero value is respected
+ */
+ u8 _txOn, // enable transmitting (and continue if there's data)
+ _txOff, // stop transmitting
+ txFlush, // toss outbound data
+ txBreak, // turn on break (cleared by _txOn)
+ rxOn, // turn on receiver
+ rxOff, // turn off receiver
+ rxFlush, // toss inbound data
+ rxForward, // forward all inbound data, NOW (as if fwdLen==1)
+ returnStatus, // return current status (even if it hasn't changed)
+ resetDataToggle;// reset data toggle state to DATA0
+
+} keyspan_usa67_portControlMessage;
+
+// defines for bits in lcr
+#define USA_DATABITS_5 0x00
+#define USA_DATABITS_6 0x01
+#define USA_DATABITS_7 0x02
+#define USA_DATABITS_8 0x03
+#define STOPBITS_5678_1 0x00 // 1 stop bit for all byte sizes
+#define STOPBITS_5_1p5 0x04 // 1.5 stop bits for 5-bit byte
+#define STOPBITS_678_2 0x04 // 2 stop bits for 6/7/8-bit byte
+#define USA_PARITY_NONE 0x00
+#define USA_PARITY_ODD 0x08
+#define USA_PARITY_EVEN 0x18
+#define PARITY_1 0x28
+#define PARITY_0 0x38
+
+// all things called "StatusMessage" are sent on the status endpoint
+
+typedef struct keyspan_usa67_portStatusMessage // one for each port
+{
+ u8 port, // 0=first, 1=second, other=see below
+ hskia_cts, // reports HSKIA pin
+ gpia_dcd, // reports GPIA pin
+ _txOff, // port has been disabled (by host)
+ _txXoff, // port is in XOFF state (either host or RX XOFF)
+ txAck, // indicates a TX message acknowledgement
+ rxEnabled, // as configured by rxOn/rxOff 1=on, 0=off
+ controlResponse;// 1=a control message has been processed
+} keyspan_usa67_portStatusMessage;
+
+// bits in RX data message when STAT byte is included
+#define RXERROR_OVERRUN 0x02
+#define RXERROR_PARITY 0x04
+#define RXERROR_FRAMING 0x08
+#define RXERROR_BREAK 0x10
+
+typedef struct keyspan_usa67_globalControlMessage
+{
+ u8 port, // 3
+ sendGlobalStatus, // 2=request for two status responses
+ resetStatusToggle, // 1=reset global status toggle
+ resetStatusCount; // a cycling value
+} keyspan_usa67_globalControlMessage;
+
+typedef struct keyspan_usa67_globalStatusMessage
+{
+ u8 port, // 3
+ sendGlobalStatus, // from request, decremented
+ resetStatusCount; // as in request
+} keyspan_usa67_globalStatusMessage;
+
+typedef struct keyspan_usa67_globalDebugMessage
+{
+ u8 port, // 2
+ a,
+ b,
+ c,
+ d;
+} keyspan_usa67_globalDebugMessage;
+
+// ie: the maximum length of an FX1 endpoint buffer
+#define MAX_DATA_LEN 64
+
+// update status approx. 60 times a second (16.6666 ms)
+#define STATUS_UPDATE_INTERVAL 16
+
+// status rationing tuning value (each port gets checked each n ms)
+#define STATUS_RATION 10
+
+#endif
+
+
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 7b085f334ceb..5a4127e62c4a 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -567,12 +567,13 @@ exit:
static void klsi_105_write_bulk_callback ( struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
-
- if (urb->status) {
+
+ if (status) {
dbg("%s - nonzero write bulk status received: %d", __FUNCTION__,
- urb->status);
+ status);
return;
}
@@ -631,16 +632,17 @@ static void klsi_105_read_bulk_callback (struct urb *urb)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
int rc;
+ int status = urb->status;
- dbg("%s - port %d", __FUNCTION__, port->number);
+ dbg("%s - port %d", __FUNCTION__, port->number);
/* The urb might have been killed. */
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__,
- urb->status);
- return;
- }
-
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d", __FUNCTION__,
+ status);
+ return;
+ }
+
/* The data received is again preceded by a length double-byte in LSB-
* first order (see klsi_105_write() )
*/
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 0683b51f0932..02a86dbc0e97 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -358,24 +358,26 @@ static void kobil_close (struct usb_serial_port *port, struct file *filp)
}
-static void kobil_read_int_callback( struct urb *purb)
+static void kobil_read_int_callback(struct urb *urb)
{
int result;
- struct usb_serial_port *port = (struct usb_serial_port *) purb->context;
+ struct usb_serial_port *port = urb->context;
struct tty_struct *tty;
- unsigned char *data = purb->transfer_buffer;
+ unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
// char *dbg_data;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (purb->status) {
- dbg("%s - port %d Read int status not zero: %d", __FUNCTION__, port->number, purb->status);
+ if (status) {
+ dbg("%s - port %d Read int status not zero: %d",
+ __FUNCTION__, port->number, status);
return;
}
-
- tty = port->tty;
- if (purb->actual_length) {
-
+
+ tty = port->tty;
+ if (urb->actual_length) {
+
// BEGIN DEBUG
/*
dbg_data = kzalloc((3 * purb->actual_length + 10) * sizeof(char), GFP_KERNEL);
@@ -390,15 +392,15 @@ static void kobil_read_int_callback( struct urb *purb)
*/
// END DEBUG
- tty_buffer_request_room(tty, purb->actual_length);
- tty_insert_flip_string(tty, data, purb->actual_length);
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
// someone sets the dev to 0 if the close method has been called
port->interrupt_in_urb->dev = port->serial->dev;
- result = usb_submit_urb( port->interrupt_in_urb, GFP_ATOMIC );
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dbg("%s - port %d Send read URB returns: %i", __FUNCTION__, port->number, result);
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 3db1adc25f84..2a3fabcf5186 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -81,7 +81,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "z2.0" /* Linux in-kernel version */
+#define DRIVER_VERSION "z2.1" /* Linux in-kernel version */
#define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>"
#define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver"
@@ -110,6 +110,10 @@ static int mct_u232_tiocmget (struct usb_serial_port *port,
static int mct_u232_tiocmset (struct usb_serial_port *port,
struct file *file, unsigned int set,
unsigned int clear);
+static void mct_u232_throttle (struct usb_serial_port *port);
+static void mct_u232_unthrottle (struct usb_serial_port *port);
+
+
/*
* All of the device info needed for the MCT USB-RS232 converter.
*/
@@ -145,6 +149,8 @@ static struct usb_serial_driver mct_u232_device = {
.num_ports = 1,
.open = mct_u232_open,
.close = mct_u232_close,
+ .throttle = mct_u232_throttle,
+ .unthrottle = mct_u232_unthrottle,
.read_int_callback = mct_u232_read_int_callback,
.ioctl = mct_u232_ioctl,
.set_termios = mct_u232_set_termios,
@@ -162,8 +168,11 @@ struct mct_u232_private {
unsigned char last_lcr; /* Line Control Register */
unsigned char last_lsr; /* Line Status Register */
unsigned char last_msr; /* Modem Status Register */
+ unsigned int rx_flags; /* Throttling flags */
};
+#define THROTTLED 0x01
+
/*
* Handle vendor specific USB requests
*/
@@ -216,11 +225,13 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, int value)
}
}
-static int mct_u232_set_baud_rate(struct usb_serial *serial, int value)
+static int mct_u232_set_baud_rate(struct usb_serial *serial, struct usb_serial_port *port,
+ int value)
{
__le32 divisor;
int rc;
unsigned char zero_byte = 0;
+ unsigned char cts_enable_byte = 0;
divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value));
@@ -238,10 +249,17 @@ static int mct_u232_set_baud_rate(struct usb_serial *serial, int value)
'baud rate change' message. The actual functionality of the
request codes in these messages is not fully understood but these
particular codes are never seen in any operation besides a baud
- rate change. Both of these messages send a single byte of data
- whose value is always zero. The second of these two extra messages
- is required in order for data to be properly written to an RS-232
- device which does not assert the 'CTS' signal. */
+ rate change. Both of these messages send a single byte of data.
+ In the first message, the value of this byte is always zero.
+
+ The second message has been determined experimentally to control
+ whether data will be transmitted to a device which is not asserting
+ the 'CTS' signal. If the second message's data byte is zero, data
+ will be transmitted even if 'CTS' is not asserted (i.e. no hardware
+ flow control). if the second message's data byte is nonzero (a value
+ of 1 is used by this driver), data will not be transmitted to a device
+ which is not asserting 'CTS'.
+ */
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_UNKNOWN1_REQUEST,
@@ -252,14 +270,19 @@ static int mct_u232_set_baud_rate(struct usb_serial *serial, int value)
err("Sending USB device request code %d failed (error = %d)",
MCT_U232_SET_UNKNOWN1_REQUEST, rc);
+ if (port && C_CRTSCTS(port->tty)) {
+ cts_enable_byte = 1;
+ }
+
+ dbg("set_baud_rate: send second control message, data = %02X", cts_enable_byte);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- MCT_U232_SET_UNKNOWN2_REQUEST,
+ MCT_U232_SET_CTS_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN2_SIZE,
+ 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE,
WDR_TIMEOUT);
if (rc < 0)
- err("Sending USB device request code %d failed (error = %d)",
- MCT_U232_SET_UNKNOWN2_REQUEST, rc);
+ err("Sending USB device request code %d failed (error = %d)",
+ MCT_U232_SET_CTS_REQUEST, rc);
return rc;
} /* mct_u232_set_baud_rate */
@@ -458,8 +481,25 @@ error:
static void mct_u232_close (struct usb_serial_port *port, struct file *filp)
{
+ unsigned int c_cflag;
+ unsigned long flags;
+ unsigned int control_state;
+ struct mct_u232_private *priv = usb_get_serial_port_data(port);
dbg("%s port %d", __FUNCTION__, port->number);
+ if (port->tty) {
+ c_cflag = port->tty->termios->c_cflag;
+ if (c_cflag & HUPCL) {
+ /* drop DTR and RTS */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+ control_state = priv->control_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ mct_u232_set_modem_ctrl(port->serial, control_state);
+ }
+ }
+
+
if (port->serial->dev) {
/* shutdown our urbs */
usb_kill_urb(port->write_urb);
@@ -476,10 +516,11 @@ static void mct_u232_read_int_callback (struct urb *urb)
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- int status;
+ int retval;
+ int status = urb->status;
unsigned long flags;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -487,10 +528,12 @@ static void mct_u232_read_int_callback (struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down with status: %d",
+ __FUNCTION__, status);
return;
default:
- dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+ dbg("%s - nonzero urb status received: %d",
+ __FUNCTION__, status);
goto exit;
}
@@ -554,10 +597,10 @@ static void mct_u232_read_int_callback (struct urb *urb)
#endif
spin_unlock_irqrestore(&priv->lock, flags);
exit:
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb (urb, GFP_ATOMIC);
+ if (retval)
err ("%s - usb_submit_urb failed with result %d",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
} /* mct_u232_read_int_callback */
static void mct_u232_set_termios (struct usb_serial_port *port,
@@ -565,11 +608,10 @@ static void mct_u232_set_termios (struct usb_serial_port *port,
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
- unsigned int iflag = port->tty->termios->c_iflag;
unsigned int cflag = port->tty->termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
unsigned long flags;
- unsigned int control_state, new_state;
+ unsigned int control_state;
unsigned char last_lcr;
/* get a local copy of the current port settings */
@@ -585,18 +627,14 @@ static void mct_u232_set_termios (struct usb_serial_port *port,
* Premature optimization is the root of all evil.
*/
- /* reassert DTR and (maybe) RTS on transition from B0 */
+ /* reassert DTR and RTS on transition from B0 */
if ((old_cflag & CBAUD) == B0) {
dbg("%s: baud was B0", __FUNCTION__);
- control_state |= TIOCM_DTR;
- /* don't set RTS if using hardware flow control */
- if (!(old_cflag & CRTSCTS)) {
- control_state |= TIOCM_RTS;
- }
+ control_state |= TIOCM_DTR | TIOCM_RTS;
mct_u232_set_modem_ctrl(serial, control_state);
}
- mct_u232_set_baud_rate(serial, cflag & CBAUD);
+ mct_u232_set_baud_rate(serial, port, cflag & CBAUD);
if ((cflag & CBAUD) == B0 ) {
dbg("%s: baud is B0", __FUNCTION__);
@@ -638,21 +676,6 @@ static void mct_u232_set_termios (struct usb_serial_port *port,
mct_u232_set_line_ctrl(serial, last_lcr);
- /*
- * Set flow control: well, I do not really now how to handle DTR/RTS.
- * Just do what we have seen with SniffUSB on Win98.
- */
- /* Drop DTR/RTS if no flow control otherwise assert */
- new_state = control_state;
- if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS))
- new_state |= TIOCM_DTR | TIOCM_RTS;
- else
- new_state &= ~(TIOCM_DTR | TIOCM_RTS);
- if (new_state != control_state) {
- mct_u232_set_modem_ctrl(serial, new_state);
- control_state = new_state;
- }
-
/* save off the modified port settings */
spin_lock_irqsave(&priv->lock, flags);
priv->control_state = control_state;
@@ -747,6 +770,50 @@ static int mct_u232_ioctl (struct usb_serial_port *port, struct file * file,
return 0;
} /* mct_u232_ioctl */
+static void mct_u232_throttle (struct usb_serial_port *port)
+{
+ struct mct_u232_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int control_state;
+ struct tty_struct *tty;
+
+ tty = port->tty;
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->rx_flags |= THROTTLED;
+ if (C_CRTSCTS(tty)) {
+ priv->control_state &= ~TIOCM_RTS;
+ control_state = priv->control_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ (void) mct_u232_set_modem_ctrl(port->serial, control_state);
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+}
+
+
+static void mct_u232_unthrottle (struct usb_serial_port *port)
+{
+ struct mct_u232_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int control_state;
+ struct tty_struct *tty;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ tty = port->tty;
+ spin_lock_irqsave(&priv->lock, flags);
+ if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) {
+ priv->rx_flags &= ~THROTTLED;
+ priv->control_state |= TIOCM_RTS;
+ control_state = priv->control_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ (void) mct_u232_set_modem_ctrl(port->serial, control_state);
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+}
static int __init mct_u232_init (void)
{
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 73dd0d984cd3..a61bac8f224a 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -63,14 +63,15 @@
#define MCT_U232_SET_UNKNOWN1_REQUEST 11 /* Unknown functionality */
#define MCT_U232_SET_UNKNOWN1_SIZE 1
-/* This USB device request code is not well understood. It is transmitted by
- the MCT-supplied Windows driver whenever the baud rate changes.
+/* This USB device request code appears to control whether CTS is required
+ during transmission.
- Without this USB device request, the USB/RS-232 adapter will not write to
- RS-232 devices which do not assert the 'CTS' signal.
+ Sending a zero byte allows data transmission to a device which is not
+ asserting CTS. Sending a '1' byte will cause transmission to be deferred
+ until the device asserts CTS.
*/
-#define MCT_U232_SET_UNKNOWN2_REQUEST 12 /* Unknown functionality */
-#define MCT_U232_SET_UNKNOWN2_SIZE 1
+#define MCT_U232_SET_CTS_REQUEST 12
+#define MCT_U232_SET_CTS_SIZE 1
/*
* Baud rate (divisor)
@@ -439,7 +440,7 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, int value);
* which says "U232-P9" ;-)
*
* The circuit board inside the adaptor contains a Philips PDIUSBD12
- * USB endpoint chip and a Phillips P87C52UBAA microcontroller with
+ * USB endpoint chip and a Philips P87C52UBAA microcontroller with
* embedded UART. Exhaustive documentation for these is available at:
*
* http://www.semiconductors.philips.com/pip/p87c52ubaa
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index b563e2ad8728..01e811becec4 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -9,9 +9,9 @@
* the Free Software Foundation, version 2 of the License.
*
* Developed by:
- * VijayaKumar.G.N. <vijaykumar@aspirecom.net>
- * AjayKumar <ajay@aspirecom.net>
- * Gurudeva.N. <gurudev@aspirecom.net>
+ * Vijaya Kumar <vijaykumar.gn@gmail.com>
+ * Ajay Kumar <naanuajay@yahoo.com>
+ * Gurudeva <ngurudeva@yahoo.com>
*
* Cleaned up from the original by:
* Greg Kroah-Hartman <gregkh@suse.de>
@@ -103,18 +103,14 @@ static void mos7720_interrupt_callback(struct urb *urb)
{
int result;
int length;
+ int status = urb->status;
__u8 *data;
__u8 sp1;
__u8 sp2;
dbg("%s"," : Entering\n");
- if (!urb) {
- dbg("%s","Invalid Pointer !!!!:\n");
- return;
- }
-
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -123,11 +119,11 @@ static void mos7720_interrupt_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __FUNCTION__,
- urb->status);
+ status);
return;
default:
dbg("%s - nonzero urb status received: %d", __FUNCTION__,
- urb->status);
+ status);
goto exit;
}
@@ -198,14 +194,15 @@ exit:
*/
static void mos7720_bulk_in_callback(struct urb *urb)
{
- int status;
+ int retval;
unsigned char *data ;
struct usb_serial_port *port;
struct moschip_port *mos7720_port;
struct tty_struct *tty;
+ int status = urb->status;
- if (urb->status) {
- dbg("nonzero read bulk status received: %d",urb->status);
+ if (status) {
+ dbg("nonzero read bulk status received: %d", status);
return;
}
@@ -236,10 +233,10 @@ static void mos7720_bulk_in_callback(struct urb *urb)
if (port->read_urb->status != -EINPROGRESS) {
port->read_urb->dev = port->serial->dev;
- status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- if (status)
- dbg("usb_submit_urb(read bulk) failed, status = %d",
- status);
+ retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (retval)
+ dbg("usb_submit_urb(read bulk) failed, retval = %d",
+ retval);
}
}
@@ -252,9 +249,10 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
{
struct moschip_port *mos7720_port;
struct tty_struct *tty;
+ int status = urb->status;
- if (urb->status) {
- dbg("nonzero write bulk status received:%d", urb->status);
+ if (status) {
+ dbg("nonzero write bulk status received:%d", status);
return;
}
@@ -1235,16 +1233,6 @@ static void mos7720_set_termios(struct usb_serial_port *port,
return;
}
- /* check that they really want us to change something */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(tty->termios->c_iflag) ==
- RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("Nothing to change");
- return;
- }
- }
-
dbg("%s - clfag %08x iflag %08x", __FUNCTION__,
tty->termios->c_cflag,
RELEVANT_IFLAG(tty->termios->c_iflag));
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 36620c651079..f76480f1455d 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -434,15 +434,11 @@ static void mos7840_control_callback(struct urb *urb)
struct moschip_port *mos7840_port;
__u8 regval = 0x0;
int result = 0;
-
- if (!urb) {
- dbg("%s", "Invalid Pointer !!!!:\n");
- return;
- }
+ int status = urb->status;
mos7840_port = (struct moschip_port *)urb->context;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -451,11 +447,11 @@ static void mos7840_control_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __FUNCTION__,
- urb->status);
+ status);
return;
default:
dbg("%s - nonzero urb status received: %d", __FUNCTION__,
- urb->status);
+ status);
goto exit;
}
@@ -521,14 +517,11 @@ static void mos7840_interrupt_callback(struct urb *urb)
__u8 sp[5], st;
int i, rv = 0;
__u16 wval, wreg = 0;
+ int status = urb->status;
dbg("%s", " : Entering\n");
- if (!urb) {
- dbg("%s", "Invalid Pointer !!!!:\n");
- return;
- }
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -537,11 +530,11 @@ static void mos7840_interrupt_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __FUNCTION__,
- urb->status);
+ status);
return;
default:
dbg("%s - nonzero urb status received: %d", __FUNCTION__,
- urb->status);
+ status);
goto exit;
}
@@ -666,20 +659,16 @@ static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
static void mos7840_bulk_in_callback(struct urb *urb)
{
- int status;
+ int retval;
unsigned char *data;
struct usb_serial *serial;
struct usb_serial_port *port;
struct moschip_port *mos7840_port;
struct tty_struct *tty;
+ int status = urb->status;
- if (!urb) {
- dbg("%s", "Invalid Pointer !!!!:\n");
- return;
- }
-
- if (urb->status) {
- dbg("nonzero read bulk status received: %d", urb->status);
+ if (status) {
+ dbg("nonzero read bulk status received: %d", status);
return;
}
@@ -729,11 +718,11 @@ static void mos7840_bulk_in_callback(struct urb *urb)
mos7840_port->read_urb->dev = serial->dev;
- status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
+ retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
- if (status) {
- dbg(" usb_submit_urb(read bulk) failed, status = %d",
- status);
+ if (retval) {
+ dbg(" usb_submit_urb(read bulk) failed, retval = %d",
+ retval);
}
}
@@ -747,13 +736,9 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
{
struct moschip_port *mos7840_port;
struct tty_struct *tty;
+ int status = urb->status;
int i;
- if (!urb) {
- dbg("%s", "Invalid Pointer !!!!:\n");
- return;
- }
-
mos7840_port = (struct moschip_port *)urb->context;
spin_lock(&mos7840_port->pool_lock);
for (i = 0; i < NUM_URBS; i++) {
@@ -764,8 +749,8 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
}
spin_unlock(&mos7840_port->pool_lock);
- if (urb->status) {
- dbg("nonzero write bulk status received:%d\n", urb->status);
+ if (status) {
+ dbg("nonzero write bulk status received:%d\n", status);
return;
}
@@ -2185,16 +2170,6 @@ static void mos7840_set_termios(struct usb_serial_port *port,
return;
}
- /* check that they really want us to change something */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(tty->termios->c_iflag) ==
- RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("%s\n", "Nothing to change");
- return;
- }
- }
-
dbg("%s - clfag %08x iflag %08x", __FUNCTION__,
tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag));
@@ -2254,30 +2229,6 @@ static int mos7840_get_lsr_info(struct moschip_port *mos7840_port,
}
/*****************************************************************************
- * mos7840_get_bytes_avail - get number of bytes available
- *
- * Purpose: Let user call ioctl to get the count of number of bytes available.
- *****************************************************************************/
-
-static int mos7840_get_bytes_avail(struct moschip_port *mos7840_port,
- unsigned int __user *value)
-{
- unsigned int result = 0;
- struct tty_struct *tty = mos7840_port->port->tty;
-
- if (!tty)
- return -ENOIOCTLCMD;
-
- result = tty->read_cnt;
-
- dbg("%s(%d) = %d", __FUNCTION__, mos7840_port->port->number, result);
- if (copy_to_user(value, &result, sizeof(int)))
- return -EFAULT;
-
- return -ENOIOCTLCMD;
-}
-
-/*****************************************************************************
* mos7840_set_modem_info
* function to set modem info
*****************************************************************************/
@@ -2425,8 +2376,6 @@ static int mos7840_ioctl(struct usb_serial_port *port, struct file *file,
struct async_icount cprev;
struct serial_icounter_struct icount;
int mosret = 0;
- int retval;
- struct tty_ldisc *ld;
if (mos7840_port_paranoia_check(port, __FUNCTION__)) {
dbg("%s", "Invalid port \n");
@@ -2445,42 +2394,6 @@ static int mos7840_ioctl(struct usb_serial_port *port, struct file *file,
switch (cmd) {
/* return number of bytes available */
- case TIOCINQ:
- dbg("%s (%d) TIOCINQ", __FUNCTION__, port->number);
- return mos7840_get_bytes_avail(mos7840_port, argp);
-
- case TIOCOUTQ:
- dbg("%s (%d) TIOCOUTQ", __FUNCTION__, port->number);
- return put_user(tty->driver->chars_in_buffer ?
- tty->driver->chars_in_buffer(tty) : 0,
- (int __user *)arg);
-
- case TCFLSH:
- retval = tty_check_change(tty);
- if (retval)
- return retval;
-
- ld = tty_ldisc_ref(tty);
- switch (arg) {
- case TCIFLUSH:
- if (ld && ld->flush_buffer)
- ld->flush_buffer(tty);
- break;
- case TCIOFLUSH:
- if (ld && ld->flush_buffer)
- ld->flush_buffer(tty);
- /* fall through */
- case TCOFLUSH:
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
- break;
- default:
- tty_ldisc_deref(ld);
- return -EINVAL;
- }
- tty_ldisc_deref(ld);
- return 0;
-
case TIOCSERGETLSR:
dbg("%s (%d) TIOCSERGETLSR", __FUNCTION__, port->number);
return mos7840_get_lsr_info(mos7840_port, argp);
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 90701111d746..7f337c9aeb5f 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -37,9 +37,10 @@ static void navman_read_int_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
+ int status = urb->status;
int result;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -48,11 +49,11 @@ static void navman_read_int_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
goto exit;
}
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 00afc1712c39..ee94d9616d82 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,10 +1,9 @@
/*
* USB ZyXEL omni.net LCD PLUS driver
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
@@ -201,14 +200,15 @@ static void omninet_read_bulk_callback (struct urb *urb)
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
unsigned char *data = urb->transfer_buffer;
struct omninet_header *header = (struct omninet_header *) &data[0];
-
+ int status = urb->status;
int i;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -312,12 +312,14 @@ static void omninet_write_bulk_callback (struct urb *urb)
{
/* struct omninet_header *header = (struct omninet_header *) urb->transfer_buffer; */
struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
+ int status = urb->status;
dbg("%s - port %0x\n", __FUNCTION__, port->number);
port->write_urb_busy = 0;
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5d3999e3ff61..84c12b5f1271 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -38,6 +38,7 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
+#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
@@ -240,6 +241,7 @@ struct option_port_private {
/* Output endpoints and buffer for this port */
struct urb *out_urbs[N_OUT_URB];
char out_buffer[N_OUT_URB][OUT_BUFLEN];
+ unsigned long out_busy; /* Bit vector of URBs in use */
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
@@ -370,7 +372,7 @@ static int option_write(struct usb_serial_port *port,
todo = OUT_BUFLEN;
this_urb = portdata->out_urbs[i];
- if (this_urb->status == -EINPROGRESS) {
+ if (test_and_set_bit(i, &portdata->out_busy)) {
if (time_before(jiffies,
portdata->tx_start_time[i] + 10 * HZ))
continue;
@@ -394,6 +396,7 @@ static int option_write(struct usb_serial_port *port,
dbg("usb_submit_urb %p (write bulk) failed "
"(%d, has %d)", this_urb,
err, this_urb->status);
+ clear_bit(i, &portdata->out_busy);
continue;
}
portdata->tx_start_time[i] = jiffies;
@@ -413,15 +416,16 @@ static void option_indat_callback(struct urb *urb)
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
dbg("%s: %p", __FUNCTION__, urb);
endpoint = usb_pipeendpoint(urb->pipe);
port = (struct usb_serial_port *) urb->context;
- if (urb->status) {
+ if (status) {
dbg("%s: nonzero status: %d on endpoint %02x.",
- __FUNCTION__, urb->status, endpoint);
+ __FUNCTION__, status, endpoint);
} else {
tty = port->tty;
if (urb->actual_length) {
@@ -433,7 +437,7 @@ static void option_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- if (port->open_count && urb->status != -ESHUTDOWN) {
+ if (port->open_count && status != -ESHUTDOWN) {
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
printk(KERN_ERR "%s: resubmit read urb failed. "
@@ -446,17 +450,29 @@ static void option_indat_callback(struct urb *urb)
static void option_outdat_callback(struct urb *urb)
{
struct usb_serial_port *port;
+ struct option_port_private *portdata;
+ int i;
dbg("%s", __FUNCTION__);
port = (struct usb_serial_port *) urb->context;
usb_serial_port_softint(port);
+
+ portdata = usb_get_serial_port_data(port);
+ for (i = 0; i < N_OUT_URB; ++i) {
+ if (portdata->out_urbs[i] == urb) {
+ smp_mb__before_clear_bit();
+ clear_bit(i, &portdata->out_busy);
+ break;
+ }
+ }
}
static void option_instat_callback(struct urb *urb)
{
int err;
+ int status = urb->status;
struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
struct option_port_private *portdata = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
@@ -464,7 +480,7 @@ static void option_instat_callback(struct urb *urb)
dbg("%s", __FUNCTION__);
dbg("%s: urb %p port %p has data %p", __FUNCTION__,urb,port,portdata);
- if (urb->status == 0) {
+ if (status == 0) {
struct usb_ctrlrequest *req_pkt =
(struct usb_ctrlrequest *)urb->transfer_buffer;
@@ -495,10 +511,10 @@ static void option_instat_callback(struct urb *urb)
req_pkt->bRequestType,req_pkt->bRequest);
}
} else
- dbg("%s: error %d", __FUNCTION__, urb->status);
+ dbg("%s: error %d", __FUNCTION__, status);
/* Resubmit urb so we continue receiving IRQ data */
- if (urb->status != -ESHUTDOWN) {
+ if (status != -ESHUTDOWN) {
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
@@ -518,7 +534,7 @@ static int option_write_room(struct usb_serial_port *port)
for (i=0; i < N_OUT_URB; i++) {
this_urb = portdata->out_urbs[i];
- if (this_urb && this_urb->status != -EINPROGRESS)
+ if (this_urb && !test_bit(i, &portdata->out_busy))
data_len += OUT_BUFLEN;
}
@@ -537,7 +553,7 @@ static int option_chars_in_buffer(struct usb_serial_port *port)
for (i=0; i < N_OUT_URB; i++) {
this_urb = portdata->out_urbs[i];
- if (this_urb && this_urb->status == -EINPROGRESS)
+ if (this_urb && test_bit(i, &portdata->out_busy))
data_len += this_urb->transfer_buffer_length;
}
dbg("%s: %d", __FUNCTION__, data_len);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
new file mode 100644
index 000000000000..d7db71eca520
--- /dev/null
+++ b/drivers/usb/serial/oti6858.c
@@ -0,0 +1,1342 @@
+/*
+ * Ours Technology Inc. OTi-6858 USB to serial adapter driver.
+ *
+ * Copyleft (C) 2007 Kees Lemmens (adapted for kernel 2.6.20)
+ * Copyright (C) 2006 Tomasz Michal Lukaszewski (FIXME: add e-mail)
+ * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2003 IBM Corp.
+ *
+ * Many thanks to the authors of pl2303 driver: all functions in this file
+ * are heavily based on pl2303 code, buffering code is a 1-to-1 copy.
+ *
+ * Warning! You use this driver on your own risk! The only official
+ * description of this device I have is datasheet from manufacturer,
+ * and it doesn't contain almost any information needed to write a driver.
+ * Almost all knowlegde used while writing this driver was gathered by:
+ * - analyzing traffic between device and the M$ Windows 2000 driver,
+ * - trying different bit combinations and checking pin states
+ * with a voltmeter,
+ * - receiving malformed frames and producing buffer overflows
+ * to learn how errors are reported,
+ * So, THIS CODE CAN DESTROY OTi-6858 AND ANY OTHER DEVICES, THAT ARE
+ * CONNECTED TO IT!
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * See Documentation/usb/usb-serial.txt for more information on using this driver
+ *
+ * TODO:
+ * - implement correct flushing for ioctls and oti6858_close()
+ * - check how errors (rx overflow, parity error, framing error) are reported
+ * - implement oti6858_break_ctl()
+ * - implement more ioctls
+ * - test/implement flow control
+ * - allow setting custom baud rates
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <asm/uaccess.h>
+#include "oti6858.h"
+
+#define OTI6858_DESCRIPTION \
+ "Ours Technology Inc. OTi-6858 USB to serial adapter driver"
+#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
+#define OTI6858_VERSION "0.1"
+
+static struct usb_device_id id_table [] = {
+ { USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver oti6858_driver = {
+ .name = "oti6858",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static int debug;
+
+
+/* buffering code, copied from pl2303 driver */
+#define PL2303_BUF_SIZE 1024
+#define PL2303_TMP_BUF_SIZE 1024
+
+struct pl2303_buf {
+ unsigned int buf_size;
+ char *buf_buf;
+ char *buf_get;
+ char *buf_put;
+};
+
+/* requests */
+#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
+#define OTI6858_REQ_T_GET_STATUS 0x01
+
+#define OTI6858_REQ_SET_LINE (USB_DIR_OUT | USB_TYPE_VENDOR | 0x00)
+#define OTI6858_REQ_T_SET_LINE 0x00
+
+#define OTI6858_REQ_CHECK_TXBUFF (USB_DIR_IN | USB_TYPE_VENDOR | 0x01)
+#define OTI6858_REQ_T_CHECK_TXBUFF 0x00
+
+/* format of the control packet */
+struct oti6858_control_pkt {
+ u16 divisor; /* baud rate = 96000000 / (16 * divisor), LE */
+#define OTI6858_MAX_BAUD_RATE 3000000
+ u8 frame_fmt;
+#define FMT_STOP_BITS_MASK 0xc0
+#define FMT_STOP_BITS_1 0x00
+#define FMT_STOP_BITS_2 0x40 /* 1.5 stop bits if FMT_DATA_BITS_5 */
+#define FMT_PARITY_MASK 0x38
+#define FMT_PARITY_NONE 0x00
+#define FMT_PARITY_ODD 0x08
+#define FMT_PARITY_EVEN 0x18
+#define FMT_PARITY_MARK 0x28
+#define FMT_PARITY_SPACE 0x38
+#define FMT_DATA_BITS_MASK 0x03
+#define FMT_DATA_BITS_5 0x00
+#define FMT_DATA_BITS_6 0x01
+#define FMT_DATA_BITS_7 0x02
+#define FMT_DATA_BITS_8 0x03
+ u8 something; /* always equals 0x43 */
+ u8 control; /* settings of flow control lines */
+#define CONTROL_MASK 0x0c
+#define CONTROL_DTR_HIGH 0x08
+#define CONTROL_RTS_HIGH 0x04
+ u8 tx_status;
+#define TX_BUFFER_EMPTIED 0x09
+ u8 pin_state;
+#define PIN_MASK 0x3f
+#define PIN_RTS 0x20 /* output pin */
+#define PIN_CTS 0x10 /* input pin, active low */
+#define PIN_DSR 0x08 /* input pin, active low */
+#define PIN_DTR 0x04 /* output pin */
+#define PIN_RI 0x02 /* input pin, active low */
+#define PIN_DCD 0x01 /* input pin, active low */
+ u8 rx_bytes_avail; /* number of bytes in rx buffer */;
+};
+
+#define OTI6858_CTRL_PKT_SIZE sizeof(struct oti6858_control_pkt)
+#define OTI6858_CTRL_EQUALS_PENDING(a, priv) \
+ ( ((a)->divisor == (priv)->pending_setup.divisor) \
+ && ((a)->control == (priv)->pending_setup.control) \
+ && ((a)->frame_fmt == (priv)->pending_setup.frame_fmt) )
+
+/* function prototypes */
+static int oti6858_open(struct usb_serial_port *port, struct file *filp);
+static void oti6858_close(struct usb_serial_port *port, struct file *filp);
+static void oti6858_set_termios(struct usb_serial_port *port,
+ struct ktermios *old);
+static int oti6858_ioctl(struct usb_serial_port *port, struct file *file,
+ unsigned int cmd, unsigned long arg);
+static void oti6858_read_int_callback(struct urb *urb);
+static void oti6858_read_bulk_callback(struct urb *urb);
+static void oti6858_write_bulk_callback(struct urb *urb);
+static int oti6858_write(struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+static int oti6858_write_room(struct usb_serial_port *port);
+static void oti6858_break_ctl(struct usb_serial_port *port, int break_state);
+static int oti6858_chars_in_buffer(struct usb_serial_port *port);
+static int oti6858_tiocmget(struct usb_serial_port *port, struct file *file);
+static int oti6858_tiocmset(struct usb_serial_port *port, struct file *file,
+ unsigned int set, unsigned int clear);
+static int oti6858_startup(struct usb_serial *serial);
+static void oti6858_shutdown(struct usb_serial *serial);
+
+/* functions operating on buffers */
+static struct pl2303_buf *pl2303_buf_alloc(unsigned int size);
+static void pl2303_buf_free(struct pl2303_buf *pb);
+static void pl2303_buf_clear(struct pl2303_buf *pb);
+static unsigned int pl2303_buf_data_avail(struct pl2303_buf *pb);
+static unsigned int pl2303_buf_space_avail(struct pl2303_buf *pb);
+static unsigned int pl2303_buf_put(struct pl2303_buf *pb, const char *buf,
+ unsigned int count);
+static unsigned int pl2303_buf_get(struct pl2303_buf *pb, char *buf,
+ unsigned int count);
+
+
+/* device info */
+static struct usb_serial_driver oti6858_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "oti6858",
+ },
+ .id_table = id_table,
+ .num_interrupt_in = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_ports = 1,
+ .open = oti6858_open,
+ .close = oti6858_close,
+ .write = oti6858_write,
+ .ioctl = oti6858_ioctl,
+ .break_ctl = oti6858_break_ctl,
+ .set_termios = oti6858_set_termios,
+ .tiocmget = oti6858_tiocmget,
+ .tiocmset = oti6858_tiocmset,
+ .read_bulk_callback = oti6858_read_bulk_callback,
+ .read_int_callback = oti6858_read_int_callback,
+ .write_bulk_callback = oti6858_write_bulk_callback,
+ .write_room = oti6858_write_room,
+ .chars_in_buffer = oti6858_chars_in_buffer,
+ .attach = oti6858_startup,
+ .shutdown = oti6858_shutdown,
+};
+
+struct oti6858_private {
+ spinlock_t lock;
+
+ struct pl2303_buf *buf;
+ struct oti6858_control_pkt status;
+
+ struct {
+ u8 read_urb_in_use;
+ u8 write_urb_in_use;
+ u8 termios_initialized;
+ } flags;
+ struct delayed_work delayed_write_work;
+
+ struct {
+ u16 divisor;
+ u8 frame_fmt;
+ u8 control;
+ } pending_setup;
+ u8 transient;
+ u8 setup_done;
+ struct delayed_work delayed_setup_work;
+
+ wait_queue_head_t intr_wait;
+ struct usb_serial_port *port; /* USB port with which associated */
+};
+
+#undef dbg
+/* #define dbg(format, arg...) printk(KERN_INFO "%s: " format "\n", __FILE__, ## arg) */
+#define dbg(format, arg...) printk(KERN_INFO "" format "\n", ## arg)
+
+static void setup_line(struct work_struct *work)
+{
+ struct oti6858_private *priv = container_of(work, struct oti6858_private, delayed_setup_work.work);
+ struct usb_serial_port *port = priv->port;
+ struct oti6858_control_pkt *new_setup;
+ unsigned long flags;
+ int result;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ if ((new_setup = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL)) == NULL) {
+ dev_err(&port->dev, "%s(): out of memory!\n", __FUNCTION__);
+ /* we will try again */
+ schedule_delayed_work(&priv->delayed_setup_work, msecs_to_jiffies(2));
+ return;
+ }
+
+ result = usb_control_msg(port->serial->dev,
+ usb_rcvctrlpipe(port->serial->dev, 0),
+ OTI6858_REQ_T_GET_STATUS,
+ OTI6858_REQ_GET_STATUS,
+ 0, 0,
+ new_setup, OTI6858_CTRL_PKT_SIZE,
+ 100);
+
+ if (result != OTI6858_CTRL_PKT_SIZE) {
+ dev_err(&port->dev, "%s(): error reading status", __FUNCTION__);
+ kfree(new_setup);
+ /* we will try again */
+ schedule_delayed_work(&priv->delayed_setup_work, msecs_to_jiffies(2));
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!OTI6858_CTRL_EQUALS_PENDING(new_setup, priv)) {
+ new_setup->divisor = priv->pending_setup.divisor;
+ new_setup->control = priv->pending_setup.control;
+ new_setup->frame_fmt = priv->pending_setup.frame_fmt;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ result = usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ OTI6858_REQ_T_SET_LINE,
+ OTI6858_REQ_SET_LINE,
+ 0, 0,
+ new_setup, OTI6858_CTRL_PKT_SIZE,
+ 100);
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ result = 0;
+ }
+ kfree(new_setup);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (result != OTI6858_CTRL_PKT_SIZE)
+ priv->transient = 0;
+ priv->setup_done = 1;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ dbg("%s(): submitting interrupt urb", __FUNCTION__);
+ port->interrupt_in_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed"
+ " with error %d\n", __FUNCTION__, result);
+ }
+}
+
+void send_data(struct work_struct *work)
+{
+ struct oti6858_private *priv = container_of(work, struct oti6858_private, delayed_write_work.work);
+ struct usb_serial_port *port = priv->port;
+ int count = 0, result;
+ unsigned long flags;
+ unsigned char allow;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->flags.write_urb_in_use) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ schedule_delayed_work(&priv->delayed_write_work, msecs_to_jiffies(2));
+ return;
+ }
+ priv->flags.write_urb_in_use = 1;
+
+ count = pl2303_buf_data_avail(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (count > port->bulk_out_size)
+ count = port->bulk_out_size;
+
+ if (count != 0) {
+ result = usb_control_msg(port->serial->dev,
+ usb_rcvctrlpipe(port->serial->dev, 0),
+ OTI6858_REQ_T_CHECK_TXBUFF,
+ OTI6858_REQ_CHECK_TXBUFF,
+ count, 0, &allow, 1, 100);
+ if (result != 1 || allow != 0)
+ count = 0;
+ }
+
+ if (count == 0) {
+ priv->flags.write_urb_in_use = 0;
+
+ dbg("%s(): submitting interrupt urb", __FUNCTION__);
+ port->interrupt_in_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed"
+ " with error %d\n", __FUNCTION__, result);
+ }
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pl2303_buf_get(priv->buf, port->write_urb->transfer_buffer, count);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ port->write_urb->transfer_buffer_length = count;
+ port->write_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ if (result != 0) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed"
+ " with error %d\n", __FUNCTION__, result);
+ priv->flags.write_urb_in_use = 0;
+ }
+
+ usb_serial_port_softint(port);
+}
+
+static int oti6858_startup(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+ struct oti6858_private *priv;
+ int i;
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
+ if (!priv)
+ break;
+ priv->buf = pl2303_buf_alloc(PL2303_BUF_SIZE);
+ if (priv->buf == NULL) {
+ kfree(priv);
+ break;
+ }
+
+ spin_lock_init(&priv->lock);
+ init_waitqueue_head(&priv->intr_wait);
+// INIT_WORK(&priv->setup_work, setup_line, serial->port[i]);
+// INIT_WORK(&priv->write_work, send_data, serial->port[i]);
+ priv->port = port;
+ INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
+ INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
+
+ usb_set_serial_port_data(serial->port[i], priv);
+ }
+ if (i == serial->num_ports)
+ return 0;
+
+ for (--i; i >= 0; --i) {
+ priv = usb_get_serial_port_data(serial->port[i]);
+ pl2303_buf_free(priv->buf);
+ kfree(priv);
+ usb_set_serial_port_data(serial->port[i], NULL);
+ }
+ return -ENOMEM;
+}
+
+static int oti6858_write(struct usb_serial_port *port,
+ const unsigned char *buf, int count)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ dbg("%s(port = %d, count = %d)", __FUNCTION__, port->number, count);
+
+ if (!count)
+ return count;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ count = pl2303_buf_put(priv->buf, buf, count);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return count;
+}
+
+static int oti6858_write_room(struct usb_serial_port *port)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ int room = 0;
+ unsigned long flags;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ room = pl2303_buf_space_avail(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return room;
+}
+
+static int oti6858_chars_in_buffer(struct usb_serial_port *port)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ int chars = 0;
+ unsigned long flags;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ chars = pl2303_buf_data_avail(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return chars;
+}
+
+static void oti6858_set_termios(struct usb_serial_port *port,
+ struct ktermios *old_termios)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int cflag;
+ u8 frame_fmt, control;
+ u16 divisor;
+ int br;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ if ((!port->tty) || (!port->tty->termios)) {
+ dbg("%s(): no tty structures", __FUNCTION__);
+ return;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!priv->flags.termios_initialized) {
+ *(port->tty->termios) = tty_std_termios;
+ port->tty->termios->c_cflag = B38400 | CS8 | CREAD | HUPCL | CLOCAL;
+ priv->flags.termios_initialized = 1;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ cflag = port->tty->termios->c_cflag;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ divisor = priv->pending_setup.divisor;
+ frame_fmt = priv->pending_setup.frame_fmt;
+ control = priv->pending_setup.control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ frame_fmt &= ~FMT_DATA_BITS_MASK;
+ switch (cflag & CSIZE) {
+ case CS5:
+ frame_fmt |= FMT_DATA_BITS_5;
+ break;
+ case CS6:
+ frame_fmt |= FMT_DATA_BITS_6;
+ break;
+ case CS7:
+ frame_fmt |= FMT_DATA_BITS_7;
+ break;
+ default:
+ case CS8:
+ frame_fmt |= FMT_DATA_BITS_8;
+ break;
+ }
+
+ /* manufacturer claims that this device can work with baud rates
+ * up to 3 Mbps; I've tested it only on 115200 bps, so I can't
+ * guarantee that any other baud rate will work (especially
+ * the higher ones)
+ */
+ br = tty_get_baud_rate(port->tty);
+ if (br == 0) {
+ divisor = 0;
+ } else if (br <= OTI6858_MAX_BAUD_RATE) {
+ int real_br;
+
+ divisor = (96000000 + 8 * br) / (16 * br);
+ real_br = 96000000 / (16 * divisor);
+ if ((((real_br - br) * 100 + br - 1) / br) > 2) {
+ dbg("%s(): baud rate %d is invalid", __FUNCTION__, br);
+ return;
+ }
+ divisor = cpu_to_le16(divisor);
+ } else {
+ dbg("%s(): baud rate %d is too high", __FUNCTION__, br);
+ return;
+ }
+
+ frame_fmt &= ~FMT_STOP_BITS_MASK;
+ if ((cflag & CSTOPB) != 0) {
+ frame_fmt |= FMT_STOP_BITS_2;
+ } else {
+ frame_fmt |= FMT_STOP_BITS_1;
+ }
+
+ frame_fmt &= ~FMT_PARITY_MASK;
+ if ((cflag & PARENB) != 0) {
+ if ((cflag & PARODD) != 0) {
+ frame_fmt |= FMT_PARITY_ODD;
+ } else {
+ frame_fmt |= FMT_PARITY_EVEN;
+ }
+ } else {
+ frame_fmt |= FMT_PARITY_NONE;
+ }
+
+ control &= ~CONTROL_MASK;
+ if ((cflag & CRTSCTS) != 0)
+ control |= (CONTROL_DTR_HIGH | CONTROL_RTS_HIGH);
+
+ /* change control lines if we are switching to or from B0 */
+ /* FIXME:
+ spin_lock_irqsave(&priv->lock, flags);
+ control = priv->line_control;
+ if ((cflag & CBAUD) == B0)
+ priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
+ else
+ priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
+ if (control != priv->line_control) {
+ control = priv->line_control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ set_control_lines(serial->dev, control);
+ } else {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ */
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (divisor != priv->pending_setup.divisor
+ || control != priv->pending_setup.control
+ || frame_fmt != priv->pending_setup.frame_fmt) {
+ priv->pending_setup.divisor = divisor;
+ priv->pending_setup.control = control;
+ priv->pending_setup.frame_fmt = frame_fmt;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int oti6858_open(struct usb_serial_port *port, struct file *filp)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ struct ktermios tmp_termios;
+ struct usb_serial *serial = port->serial;
+ struct oti6858_control_pkt *buf;
+ unsigned long flags;
+ int result;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ usb_clear_halt(serial->dev, port->write_urb->pipe);
+ usb_clear_halt(serial->dev, port->read_urb->pipe);
+
+ if (port->open_count != 1)
+ return 0;
+
+ if ((buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL)) == NULL) {
+ dev_err(&port->dev, "%s(): out of memory!\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ OTI6858_REQ_T_GET_STATUS,
+ OTI6858_REQ_GET_STATUS,
+ 0, 0,
+ buf, OTI6858_CTRL_PKT_SIZE,
+ 100);
+ if (result != OTI6858_CTRL_PKT_SIZE) {
+ /* assume default (after power-on reset) values */
+ buf->divisor = cpu_to_le16(0x009c); /* 38400 bps */
+ buf->frame_fmt = 0x03; /* 8N1 */
+ buf->something = 0x43;
+ buf->control = 0x4c; /* DTR, RTS */
+ buf->tx_status = 0x00;
+ buf->pin_state = 0x5b; /* RTS, CTS, DSR, DTR, RI, DCD */
+ buf->rx_bytes_avail = 0x00;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ memcpy(&priv->status, buf, OTI6858_CTRL_PKT_SIZE);
+ priv->pending_setup.divisor = buf->divisor;
+ priv->pending_setup.frame_fmt = buf->frame_fmt;
+ priv->pending_setup.control = buf->control;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ kfree(buf);
+
+ dbg("%s(): submitting interrupt urb", __FUNCTION__);
+ port->interrupt_in_urb->dev = serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (result != 0) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed"
+ " with error %d\n", __FUNCTION__, result);
+ oti6858_close(port, NULL);
+ return -EPROTO;
+ }
+
+ /* setup termios */
+ if (port->tty)
+ oti6858_set_termios(port, &tmp_termios);
+
+ return 0;
+}
+
+static void oti6858_close(struct usb_serial_port *port, struct file *filp)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ long timeout;
+ wait_queue_t wait;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ /* wait for data to drain from the buffer */
+ spin_lock_irqsave(&priv->lock, flags);
+ timeout = 30 * HZ; /* PL2303_CLOSING_WAIT */
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&port->tty->write_wait, &wait);
+ dbg("%s(): entering wait loop", __FUNCTION__);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (pl2303_buf_data_avail(priv->buf) == 0
+ || timeout == 0 || signal_pending(current)
+ || !usb_get_intfdata(port->serial->interface)) /* disconnect */
+ break;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irqsave(&priv->lock, flags);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->tty->write_wait, &wait);
+ dbg("%s(): after wait loop", __FUNCTION__);
+
+ /* clear out any remaining data in the buffer */
+ pl2303_buf_clear(priv->buf);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* wait for characters to drain from the device */
+ /* (this is long enough for the entire 256 byte */
+ /* pl2303 hardware buffer to drain with no flow */
+ /* control for data rates of 1200 bps or more, */
+ /* for lower rates we should really know how much */
+ /* data is in the buffer to compute a delay */
+ /* that is not unnecessarily long) */
+ /* FIXME
+ bps = tty_get_baud_rate(port->tty);
+ if (bps > 1200)
+ timeout = max((HZ*2560)/bps,HZ/10);
+ else
+ */
+ timeout = 2*HZ;
+ schedule_timeout_interruptible(timeout);
+ dbg("%s(): after schedule_timeout_interruptible()", __FUNCTION__);
+
+ /* cancel scheduled setup */
+ cancel_delayed_work(&priv->delayed_setup_work);
+ cancel_delayed_work(&priv->delayed_write_work);
+ flush_scheduled_work();
+
+ /* shutdown our urbs */
+ dbg("%s(): shutting down urbs", __FUNCTION__);
+ usb_kill_urb(port->write_urb);
+ usb_kill_urb(port->read_urb);
+ usb_kill_urb(port->interrupt_in_urb);
+
+ /*
+ if (port->tty && (port->tty->termios->c_cflag) & HUPCL) {
+ // drop DTR and RTS
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->pending_setup.control &= ~CONTROL_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ */
+}
+
+static int oti6858_tiocmset(struct usb_serial_port *port, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ u8 control;
+
+ dbg("%s(port = %d, set = 0x%08x, clear = 0x%08x)",
+ __FUNCTION__, port->number, set, clear);
+
+ if (!usb_get_intfdata(port->serial->interface))
+ return -ENODEV;
+
+ /* FIXME: check if this is correct (active high/low) */
+ spin_lock_irqsave(&priv->lock, flags);
+ control = priv->pending_setup.control;
+ if ((set & TIOCM_RTS) != 0)
+ control |= CONTROL_RTS_HIGH;
+ if ((set & TIOCM_DTR) != 0)
+ control |= CONTROL_DTR_HIGH;
+ if ((clear & TIOCM_RTS) != 0)
+ control &= ~CONTROL_RTS_HIGH;
+ if ((clear & TIOCM_DTR) != 0)
+ control &= ~CONTROL_DTR_HIGH;
+
+ if (control != priv->pending_setup.control) {
+ priv->pending_setup.control = control;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int oti6858_tiocmget(struct usb_serial_port *port, struct file *file)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned pin_state;
+ unsigned result = 0;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ if (!usb_get_intfdata(port->serial->interface))
+ return -ENODEV;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pin_state = priv->status.pin_state & PIN_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* FIXME: check if this is correct (active high/low) */
+ if ((pin_state & PIN_RTS) != 0)
+ result |= TIOCM_RTS;
+ if ((pin_state & PIN_CTS) != 0)
+ result |= TIOCM_CTS;
+ if ((pin_state & PIN_DSR) != 0)
+ result |= TIOCM_DSR;
+ if ((pin_state & PIN_DTR) != 0)
+ result |= TIOCM_DTR;
+ if ((pin_state & PIN_RI) != 0)
+ result |= TIOCM_RI;
+ if ((pin_state & PIN_DCD) != 0)
+ result |= TIOCM_CD;
+
+ dbg("%s() = 0x%08x", __FUNCTION__, result);
+
+ return result;
+}
+
+static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+{
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int prev, status;
+ unsigned int changed;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ prev = priv->status.pin_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (1) {
+ wait_event_interruptible(priv->intr_wait, priv->status.pin_state != prev);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->status.pin_state & PIN_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ changed = prev ^ status;
+ /* FIXME: check if this is correct (active high/low) */
+ if ( ((arg & TIOCM_RNG) && (changed & PIN_RI)) ||
+ ((arg & TIOCM_DSR) && (changed & PIN_DSR)) ||
+ ((arg & TIOCM_CD) && (changed & PIN_DCD)) ||
+ ((arg & TIOCM_CTS) && (changed & PIN_CTS))) {
+ return 0;
+ }
+ prev = status;
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+
+static int oti6858_ioctl(struct usb_serial_port *port, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *user_arg = (void __user *) arg;
+ unsigned int x;
+
+ dbg("%s(port = %d, cmd = 0x%04x, arg = 0x%08lx)",
+ __FUNCTION__, port->number, cmd, arg);
+
+ switch (cmd) {
+ case TCGETS:
+ if (copy_to_user(user_arg, port->tty->termios,
+ sizeof(struct ktermios))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case TCSETS:
+ case TCSETSW: /* FIXME: this is not the same! */
+ case TCSETSF: /* FIXME: this is not the same! */
+ if (copy_from_user(port->tty->termios, user_arg,
+ sizeof(struct ktermios))) {
+ return -EFAULT;
+ }
+ oti6858_set_termios(port, NULL);
+ return 0;
+
+ case TCFLSH:
+ /* FIXME */
+ return 0;
+
+ case TIOCMBIS:
+ if (copy_from_user(&x, user_arg, sizeof(x)))
+ return -EFAULT;
+ return oti6858_tiocmset(port, NULL, x, 0);
+
+ case TIOCMBIC:
+ if (copy_from_user(&x, user_arg, sizeof(x)))
+ return -EFAULT;
+ return oti6858_tiocmset(port, NULL, 0, x);
+
+ case TIOCGSERIAL:
+ if (copy_to_user(user_arg, port->tty->termios,
+ sizeof(struct ktermios))) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case TIOCSSERIAL:
+ if (copy_from_user(port->tty->termios, user_arg,
+ sizeof(struct ktermios))) {
+ return -EFAULT;
+ }
+ oti6858_set_termios(port, NULL);
+ return 0;
+
+ case TIOCMIWAIT:
+ dbg("%s(): TIOCMIWAIT", __FUNCTION__);
+ return wait_modem_info(port, arg);
+
+ default:
+ dbg("%s(): 0x%04x not supported", __FUNCTION__, cmd);
+ break;
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static void oti6858_break_ctl(struct usb_serial_port *port, int break_state)
+{
+ int state;
+
+ dbg("%s(port = %d)", __FUNCTION__, port->number);
+
+ state = (break_state == 0) ? 0 : 1;
+ dbg("%s(): turning break %s", __FUNCTION__, state ? "on" : "off");
+
+ /* FIXME */
+/*
+ result = usb_control_msg (serial->dev, usb_sndctrlpipe (serial->dev, 0),
+ BREAK_REQUEST, BREAK_REQUEST_TYPE, state,
+ 0, NULL, 0, 100);
+ if (result != 0)
+ dbg("%s(): error sending break", __FUNCTION__);
+ */
+}
+
+static void oti6858_shutdown(struct usb_serial *serial)
+{
+ struct oti6858_private *priv;
+ int i;
+
+ dbg("%s()", __FUNCTION__);
+
+ for (i = 0; i < serial->num_ports; ++i) {
+ priv = usb_get_serial_port_data(serial->port[i]);
+ if (priv) {
+ pl2303_buf_free(priv->buf);
+ kfree(priv);
+ usb_set_serial_port_data(serial->port[i], NULL);
+ }
+ }
+}
+
+static void oti6858_read_int_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ int transient = 0, can_recv = 0, resubmit = 1;
+ int status = urb->status;
+
+ dbg("%s(port = %d, status = %d)",
+ __FUNCTION__, port->number, status);
+
+ switch (status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s(): urb shutting down with status: %d",
+ __FUNCTION__, status);
+ return;
+ default:
+ dbg("%s(): nonzero urb status received: %d",
+ __FUNCTION__, status);
+ break;
+ }
+
+ if (status == 0 && urb->actual_length == OTI6858_CTRL_PKT_SIZE) {
+ struct oti6858_control_pkt *xs = urb->transfer_buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!priv->transient) {
+ if (!OTI6858_CTRL_EQUALS_PENDING(xs, priv)) {
+ if (xs->rx_bytes_avail == 0) {
+ priv->transient = 4;
+ priv->setup_done = 0;
+ resubmit = 0;
+ dbg("%s(): scheduling setup_line()",
+ __FUNCTION__);
+ schedule_delayed_work(&priv->delayed_setup_work, 0);
+ }
+ }
+ } else {
+ if (OTI6858_CTRL_EQUALS_PENDING(xs, priv)) {
+ priv->transient = 0;
+ } else if (!priv->setup_done) {
+ resubmit = 0;
+ } else if (--priv->transient == 0) {
+ if (xs->rx_bytes_avail == 0) {
+ priv->transient = 4;
+ priv->setup_done = 0;
+ resubmit = 0;
+ dbg("%s(): scheduling setup_line()",
+ __FUNCTION__);
+ schedule_delayed_work(&priv->delayed_setup_work, 0);
+ }
+ }
+ }
+
+ if (!priv->transient) {
+ if (xs->pin_state != priv->status.pin_state)
+ wake_up_interruptible(&priv->intr_wait);
+ memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
+ }
+
+ if (!priv->transient && xs->rx_bytes_avail != 0) {
+ can_recv = xs->rx_bytes_avail;
+ priv->flags.read_urb_in_use = 1;
+ }
+
+ transient = priv->transient;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ if (can_recv) {
+ int result;
+
+ port->read_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
+ if (result != 0) {
+ priv->flags.read_urb_in_use = 0;
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
+ " error %d\n", __FUNCTION__, result);
+ } else {
+ resubmit = 0;
+ }
+ } else if (!transient) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->flags.write_urb_in_use == 0
+ && pl2303_buf_data_avail(priv->buf) != 0) {
+ schedule_delayed_work(&priv->delayed_write_work,0);
+ resubmit = 0;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ if (resubmit) {
+ int result;
+
+// dbg("%s(): submitting interrupt urb", __FUNCTION__);
+ urb->dev = port->serial->dev;
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result != 0) {
+ dev_err(&urb->dev->dev,
+ "%s(): usb_submit_urb() failed with"
+ " error %d\n", __FUNCTION__, result);
+ }
+ }
+}
+
+static void oti6858_read_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ struct tty_struct *tty;
+ unsigned char *data = urb->transfer_buffer;
+ unsigned long flags;
+ int i, result;
+ int status = urb->status;
+ char tty_flag;
+
+ dbg("%s(port = %d, status = %d)",
+ __FUNCTION__, port->number, status);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->flags.read_urb_in_use = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (status != 0) {
+ if (!port->open_count) {
+ dbg("%s(): port is closed, exiting", __FUNCTION__);
+ return;
+ }
+ /*
+ if (status == -EPROTO) {
+ // PL2303 mysteriously fails with -EPROTO reschedule the read
+ dbg("%s - caught -EPROTO, resubmitting the urb", __FUNCTION__);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
+ return;
+ }
+ */
+ dbg("%s(): unable to handle the error, exiting", __FUNCTION__);
+ return;
+ }
+
+ // get tty_flag from status
+ tty_flag = TTY_NORMAL;
+
+/* FIXME: probably, errors will be signalled using interrupt pipe! */
+/*
+ // break takes precedence over parity,
+ // which takes precedence over framing errors
+ if (status & UART_BREAK_ERROR )
+ tty_flag = TTY_BREAK;
+ else if (status & UART_PARITY_ERROR)
+ tty_flag = TTY_PARITY;
+ else if (status & UART_FRAME_ERROR)
+ tty_flag = TTY_FRAME;
+ dbg("%s - tty_flag = %d", __FUNCTION__, tty_flag);
+*/
+
+ tty = port->tty;
+ if (tty != NULL && urb->actual_length > 0) {
+ tty_buffer_request_room(tty, urb->actual_length);
+ for (i = 0; i < urb->actual_length; ++i)
+ tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_flip_buffer_push(tty);
+ }
+
+ // schedule the interrupt urb if we are still open */
+ if (port->open_count != 0) {
+ port->interrupt_in_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
+ " error %d\n", __FUNCTION__, result);
+ }
+ }
+}
+
+static void oti6858_write_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
+ struct oti6858_private *priv = usb_get_serial_port_data(port);
+ int status = urb->status;
+ int result;
+
+ dbg("%s(port = %d, status = %d)",
+ __FUNCTION__, port->number, status);
+
+ switch (status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s(): urb shutting down with status: %d",
+ __FUNCTION__, status);
+ priv->flags.write_urb_in_use = 0;
+ return;
+ default:
+ /* error in the urb, so we have to resubmit it */
+ dbg("%s(): nonzero write bulk status received: %d",
+ __FUNCTION__, status);
+ dbg("%s(): overflow in write", __FUNCTION__);
+
+ port->write_urb->transfer_buffer_length = 1;
+ port->write_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
+ if (result) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
+ " error %d\n", __FUNCTION__, result);
+ } else {
+ return;
+ }
+ }
+
+ priv->flags.write_urb_in_use = 0;
+
+ // schedule the interrupt urb if we are still open */
+ port->interrupt_in_urb->dev = port->serial->dev;
+ dbg("%s(): submitting interrupt urb", __FUNCTION__);
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0) {
+ dev_err(&port->dev, "%s(): failed submitting int urb,"
+ " error %d\n", __FUNCTION__, result);
+ }
+}
+
+
+/*
+ * pl2303_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static struct pl2303_buf *pl2303_buf_alloc(unsigned int size)
+{
+ struct pl2303_buf *pb;
+
+ if (size == 0)
+ return NULL;
+
+ pb = (struct pl2303_buf *)kmalloc(sizeof(struct pl2303_buf), GFP_KERNEL);
+ if (pb == NULL)
+ return NULL;
+
+ pb->buf_buf = kmalloc(size, GFP_KERNEL);
+ if (pb->buf_buf == NULL) {
+ kfree(pb);
+ return NULL;
+ }
+
+ pb->buf_size = size;
+ pb->buf_get = pb->buf_put = pb->buf_buf;
+
+ return pb;
+}
+
+/*
+ * pl2303_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+static void pl2303_buf_free(struct pl2303_buf *pb)
+{
+ if (pb) {
+ kfree(pb->buf_buf);
+ kfree(pb);
+ }
+}
+
+/*
+ * pl2303_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+static void pl2303_buf_clear(struct pl2303_buf *pb)
+{
+ if (pb != NULL) {
+ /* equivalent to a get of all data available */
+ pb->buf_get = pb->buf_put;
+ }
+}
+
+/*
+ * pl2303_buf_data_avail
+ *
+ * Return the number of bytes of data available in the circular
+ * buffer.
+ */
+static unsigned int pl2303_buf_data_avail(struct pl2303_buf *pb)
+{
+ if (pb == NULL)
+ return 0;
+ return ((pb->buf_size + pb->buf_put - pb->buf_get) % pb->buf_size);
+}
+
+/*
+ * pl2303_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+static unsigned int pl2303_buf_space_avail(struct pl2303_buf *pb)
+{
+ if (pb == NULL)
+ return 0;
+ return ((pb->buf_size + pb->buf_get - pb->buf_put - 1) % pb->buf_size);
+}
+
+/*
+ * pl2303_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+static unsigned int pl2303_buf_put(struct pl2303_buf *pb, const char *buf,
+ unsigned int count)
+{
+ unsigned int len;
+
+ if (pb == NULL)
+ return 0;
+
+ len = pl2303_buf_space_avail(pb);
+ if (count > len)
+ count = len;
+
+ if (count == 0)
+ return 0;
+
+ len = pb->buf_buf + pb->buf_size - pb->buf_put;
+ if (count > len) {
+ memcpy(pb->buf_put, buf, len);
+ memcpy(pb->buf_buf, buf+len, count - len);
+ pb->buf_put = pb->buf_buf + count - len;
+ } else {
+ memcpy(pb->buf_put, buf, count);
+ if (count < len)
+ pb->buf_put += count;
+ else /* count == len */
+ pb->buf_put = pb->buf_buf;
+ }
+
+ return count;
+}
+
+/*
+ * pl2303_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+static unsigned int pl2303_buf_get(struct pl2303_buf *pb, char *buf,
+ unsigned int count)
+{
+ unsigned int len;
+
+ if (pb == NULL)
+ return 0;
+
+ len = pl2303_buf_data_avail(pb);
+ if (count > len)
+ count = len;
+
+ if (count == 0)
+ return 0;
+
+ len = pb->buf_buf + pb->buf_size - pb->buf_get;
+ if (count > len) {
+ memcpy(buf, pb->buf_get, len);
+ memcpy(buf+len, pb->buf_buf, count - len);
+ pb->buf_get = pb->buf_buf + count - len;
+ } else {
+ memcpy(buf, pb->buf_get, count);
+ if (count < len)
+ pb->buf_get += count;
+ else /* count == len */
+ pb->buf_get = pb->buf_buf;
+ }
+
+ return count;
+}
+
+/* module description and (de)initialization */
+
+static int __init oti6858_init(void)
+{
+ int retval;
+
+ if ((retval = usb_serial_register(&oti6858_device)) == 0) {
+ if ((retval = usb_register(&oti6858_driver)) != 0)
+ usb_serial_deregister(&oti6858_device);
+ else
+ return 0;
+ }
+
+ return retval;
+}
+
+static void __exit oti6858_exit(void)
+{
+ usb_deregister(&oti6858_driver);
+ usb_serial_deregister(&oti6858_device);
+}
+
+module_init(oti6858_init);
+module_exit(oti6858_exit);
+
+MODULE_DESCRIPTION(OTI6858_DESCRIPTION);
+MODULE_AUTHOR(OTI6858_AUTHOR);
+MODULE_VERSION(OTI6858_VERSION);
+MODULE_LICENSE("GPL");
+
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "enable debug output");
+
diff --git a/drivers/usb/serial/oti6858.h b/drivers/usb/serial/oti6858.h
new file mode 100644
index 000000000000..704ac3a532b3
--- /dev/null
+++ b/drivers/usb/serial/oti6858.h
@@ -0,0 +1,15 @@
+/*
+ * Ours Technology Inc. OTi-6858 USB to serial adapter driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __LINUX_USB_SERIAL_OTI6858_H
+#define __LINUX_USB_SERIAL_OTI6858_H
+
+#define OTI6858_VENDOR_ID 0x0ea0
+#define OTI6858_PRODUCT_ID 0x6858
+
+#endif
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 83dfae93a45d..f9f85f56f0db 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1,14 +1,14 @@
/*
* Prolific PL2303 USB to serial adaptor driver
*
- * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2003 IBM Corp.
*
* Original driver for 2.2.x by anonymous
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
@@ -484,15 +484,6 @@ static void pl2303_set_termios(struct usb_serial_port *port,
spin_unlock_irqrestore(&priv->lock, flags);
cflag = port->tty->termios->c_cflag;
- /* check that they really want us to change something */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(port->tty->termios->c_iflag) ==
- RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("%s - nothing to change...", __FUNCTION__);
- return;
- }
- }
buf = kzalloc(7, GFP_KERNEL);
if (!buf) {
@@ -517,29 +508,7 @@ static void pl2303_set_termios(struct usb_serial_port *port,
dbg("%s - data bits = %d", __FUNCTION__, buf[6]);
}
- baud = 0;
- switch (cflag & CBAUD) {
- case B0: baud = 0; break;
- case B75: baud = 75; break;
- case B150: baud = 150; break;
- case B300: baud = 300; break;
- case B600: baud = 600; break;
- case B1200: baud = 1200; break;
- case B1800: baud = 1800; break;
- case B2400: baud = 2400; break;
- case B4800: baud = 4800; break;
- case B9600: baud = 9600; break;
- case B19200: baud = 19200; break;
- case B38400: baud = 38400; break;
- case B57600: baud = 57600; break;
- case B115200: baud = 115200; break;
- case B230400: baud = 230400; break;
- case B460800: baud = 460800; break;
- default:
- dev_err(&port->dev, "pl2303 driver does not support"
- " the baudrate requested (fix it)\n");
- break;
- }
+ baud = tty_get_baud_rate(port->tty);;
dbg("%s - baud = %d", __FUNCTION__, baud);
if (baud) {
buf[0] = baud & 0xff;
@@ -617,6 +586,13 @@ static void pl2303_set_termios(struct usb_serial_port *port,
VENDOR_WRITE_REQUEST_TYPE,
0x0, index, NULL, 0, 100);
dbg("0x40:0x1:0x0:0x%x %d", index, i);
+ } else {
+ i = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ VENDOR_WRITE_REQUEST,
+ VENDOR_WRITE_REQUEST_TYPE,
+ 0x0, 0x0, NULL, 0, 100);
+ dbg ("0x40:0x1:0x0:0x0 %d", i);
}
kfree(buf);
@@ -954,11 +930,12 @@ static void pl2303_read_int_callback(struct urb *urb)
struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned int actual_length = urb->actual_length;
- int status;
+ int status = urb->status;
+ int retval;
dbg("%s (%d)", __FUNCTION__, port->number);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -967,11 +944,11 @@ static void pl2303_read_int_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __FUNCTION__,
- urb->status);
+ status);
return;
default:
dbg("%s - nonzero urb status received: %d", __FUNCTION__,
- urb->status);
+ status);
goto exit;
}
@@ -981,11 +958,11 @@ static void pl2303_read_int_callback(struct urb *urb)
pl2303_update_line_status(port, data, actual_length);
exit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
- __FUNCTION__, status);
+ __FUNCTION__, retval);
}
static void pl2303_read_bulk_callback(struct urb *urb)
@@ -997,23 +974,23 @@ static void pl2303_read_bulk_callback(struct urb *urb)
unsigned long flags;
int i;
int result;
- u8 status;
+ int status = urb->status;
+ u8 line_status;
char tty_flag;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - urb->status = %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - urb status = %d", __FUNCTION__, status);
if (!port->open_count) {
dbg("%s - port is closed, exiting.", __FUNCTION__);
return;
}
- if (urb->status == -EPROTO) {
+ if (status == -EPROTO) {
/* PL2303 mysteriously fails with -EPROTO reschedule
* the read */
dbg("%s - caught -EPROTO, resubmitting the urb",
__FUNCTION__);
- urb->status = 0;
urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
@@ -1033,18 +1010,18 @@ static void pl2303_read_bulk_callback(struct urb *urb)
tty_flag = TTY_NORMAL;
spin_lock_irqsave(&priv->lock, flags);
- status = priv->line_status;
+ line_status = priv->line_status;
priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
spin_unlock_irqrestore(&priv->lock, flags);
wake_up_interruptible(&priv->delta_msr_wait);
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
- if (status & UART_BREAK_ERROR )
+ if (line_status & UART_BREAK_ERROR )
tty_flag = TTY_BREAK;
- else if (status & UART_PARITY_ERROR)
+ else if (line_status & UART_PARITY_ERROR)
tty_flag = TTY_PARITY;
- else if (status & UART_FRAME_ERROR)
+ else if (line_status & UART_FRAME_ERROR)
tty_flag = TTY_FRAME;
dbg("%s - tty_flag = %d", __FUNCTION__, tty_flag);
@@ -1052,7 +1029,7 @@ static void pl2303_read_bulk_callback(struct urb *urb)
if (tty && urb->actual_length) {
tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
- if (status & UART_OVERRUN_ERROR)
+ if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
for (i = 0; i < urb->actual_length; ++i)
tty_insert_flip_char(tty, data[i], tty_flag);
@@ -1076,10 +1053,11 @@ static void pl2303_write_bulk_callback(struct urb *urb)
struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
struct pl2303_private *priv = usb_get_serial_port_data(port);
int result;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -1088,14 +1066,14 @@ static void pl2303_write_bulk_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __FUNCTION__,
- urb->status);
+ status);
priv->write_urb_in_use = 0;
return;
default:
/* error in the urb, so we have to resubmit it */
dbg("%s - Overflow in write", __FUNCTION__);
dbg("%s - nonzero write bulk status received: %d", __FUNCTION__,
- urb->status);
+ status);
port->write_urb->transfer_buffer_length = 1;
port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 5a03a3fc9386..86899d55d8d8 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -211,11 +211,13 @@ static void safe_read_bulk_callback (struct urb *urb)
unsigned char length = urb->actual_length;
int i;
int result;
+ int status = urb->status;
dbg ("%s", __FUNCTION__);
- if (urb->status) {
- dbg ("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1829c6e8f0..0794ccdebfd4 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1,7 +1,7 @@
/*
USB Driver for Sierra Wireless
- Copyright (C) 2006 Kevin Lloyd <linux@sierrawireless.com>
+ Copyright (C) 2006, 2007 Kevin Lloyd <linux@sierrawireless.com>
IMPORTANT DISCLAIMER: This driver is not commercially supported by
Sierra Wireless. Use at your own risk.
@@ -12,10 +12,9 @@
Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
-
*/
-#define DRIVER_VERSION "v.1.0.6"
+#define DRIVER_VERSION "v.1.2.5b"
#define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
@@ -28,23 +27,98 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#define SWIMS_USB_REQUEST_SetMode 0x0B
+#define SWIMS_USB_REQUEST_TYPE_SetMode 0x40
+#define SWIMS_USB_INDEX_SetMode 0x0000
+#define SWIMS_SET_MODE_Modem 0x0001
+
+/* per port private data */
+#define N_IN_URB 4
+#define N_OUT_URB 4
+#define IN_BUFLEN 4096
+
+static int debug;
+
+enum devicetype {
+ DEVICE_3_PORT = 0,
+ DEVICE_1_PORT = 1,
+ DEVICE_INSTALLER = 2,
+};
+
+int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
+{
+ int result;
+ dev_dbg(&udev->dev, "%s", "SET POWER STATE");
+ result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ 0x00, /* __u8 request */
+ 0x40, /* __u8 request type */
+ swiState, /* __u16 value */
+ 0, /* __u16 index */
+ NULL, /* void *data */
+ 0, /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+ return result;
+}
+
+int sierra_set_ms_mode(struct usb_device *udev, __u16 eSocMode)
+{
+ int result;
+ dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH");
+ result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_SetMode, /* __u8 request */
+ SWIMS_USB_REQUEST_TYPE_SetMode, /* __u8 request type */
+ eSocMode, /* __u16 value */
+ SWIMS_USB_INDEX_SetMode, /* __u16 index */
+ NULL, /* void *data */
+ 0, /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+ return result;
+}
+
+int sierra_probe(struct usb_interface *iface, const struct usb_device_id *id)
+{
+ int result;
+ struct usb_device *udev;
+
+ udev = usb_get_dev(interface_to_usbdev(iface));
+
+ /* Check if in installer mode */
+ if (id->driver_info == DEVICE_INSTALLER) {
+ dev_dbg(&udev->dev, "%s", "FOUND DEVICE(SW)\n");
+ result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
+ /*We do not want to bind to the device when in installer mode*/
+ return -EIO;
+ }
+
+ return usb_serial_probe(iface, id);
+}
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
{ USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
+ { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
- { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless AirCard 595U */
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
+ { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
+
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
- { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */
+ { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
{ USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
+ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
+ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
+ { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
+ { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
+ { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
+ { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
- { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
- { USB_DEVICE(0x0F3D, 0x0112) }, /* AirPrime/Sierra PC 5220 */
+ { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */
+ { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */
+
+ { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -58,43 +132,43 @@ static struct usb_device_id id_table_1port [] = {
static struct usb_device_id id_table_3port [] = {
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
{ USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
+ { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
- { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless AirCard 595U */
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
+ { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U*/
+
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
- { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */
+ { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
{ USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
+ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
+ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
+ { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
+ { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
+ { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880E */
+ { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881E */
{ }
};
static struct usb_driver sierra_driver = {
.name = "sierra",
- .probe = usb_serial_probe,
+ .probe = sierra_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.no_dynamic_id = 1,
};
-static int debug;
-
-/* per port private data */
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 128
-
struct sierra_port_private {
+ spinlock_t lock; /* lock the structure */
+ int outstanding_urbs; /* number of out urbs in flight */
+
/* Input endpoints and buffer for this port */
struct urb *in_urbs[N_IN_URB];
char in_buffer[N_IN_URB][IN_BUFLEN];
- /* Output endpoints and buffer for this port */
- struct urb *out_urbs[N_OUT_URB];
- char out_buffer[N_OUT_URB][OUT_BUFLEN];
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
@@ -103,8 +177,6 @@ struct sierra_port_private {
int dsr_state;
int dcd_state;
int ri_state;
-
- unsigned long tx_start_time[N_OUT_URB];
};
static int sierra_send_setup(struct usb_serial_port *port)
@@ -197,61 +269,98 @@ static int sierra_ioctl(struct usb_serial_port *port, struct file *file,
return -ENOIOCTLCMD;
}
+static void sierra_outdat_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ int status = urb->status;
+ unsigned long flags;
+
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ /* free up the transfer buffer, as usb_free_urb() does not do this */
+ kfree(urb->transfer_buffer);
+
+ if (status)
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
+
+ spin_lock_irqsave(&portdata->lock, flags);
+ --portdata->outstanding_urbs;
+ spin_unlock_irqrestore(&portdata->lock, flags);
+
+ usb_serial_port_softint(port);
+}
+
/* Write */
static int sierra_write(struct usb_serial_port *port,
const unsigned char *buf, int count)
{
- struct sierra_port_private *portdata;
- int i;
- int left, todo;
- struct urb *this_urb = NULL; /* spurious */
- int err;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ unsigned long flags;
+ unsigned char *buffer;
+ struct urb *urb;
+ int status;
portdata = usb_get_serial_port_data(port);
dbg("%s: write (%d chars)", __FUNCTION__, count);
- i = 0;
- left = count;
- for (i=0; left > 0 && i < N_OUT_URB; i++) {
- todo = left;
- if (todo > OUT_BUFLEN)
- todo = OUT_BUFLEN;
-
- this_urb = portdata->out_urbs[i];
- if (this_urb->status == -EINPROGRESS) {
- if (time_before(jiffies,
- portdata->tx_start_time[i] + 10 * HZ))
- continue;
- usb_unlink_urb(this_urb);
- continue;
- }
- if (this_urb->status != 0)
- dbg("usb_write %p failed (err=%d)",
- this_urb, this_urb->status);
-
- dbg("%s: endpoint %d buf %d", __FUNCTION__,
- usb_pipeendpoint(this_urb->pipe), i);
-
- /* send the data */
- memcpy (this_urb->transfer_buffer, buf, todo);
- this_urb->transfer_buffer_length = todo;
-
- this_urb->dev = port->serial->dev;
- err = usb_submit_urb(this_urb, GFP_ATOMIC);
- if (err) {
- dbg("usb_submit_urb %p (write bulk) failed "
- "(%d, has %d)", this_urb,
- err, this_urb->status);
- continue;
- }
- portdata->tx_start_time[i] = jiffies;
- buf += todo;
- left -= todo;
+ spin_lock_irqsave(&portdata->lock, flags);
+ if (portdata->outstanding_urbs > N_OUT_URB) {
+ spin_unlock_irqrestore(&portdata->lock, flags);
+ dbg("%s - write limit hit\n", __FUNCTION__);
+ return 0;
+ }
+ portdata->outstanding_urbs++;
+ spin_unlock_irqrestore(&portdata->lock, flags);
+
+ buffer = kmalloc(count, GFP_ATOMIC);
+ if (!buffer) {
+ dev_err(&port->dev, "out of memory\n");
+ count = -ENOMEM;
+ goto error_no_buffer;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ dev_err(&port->dev, "no more free urbs\n");
+ count = -ENOMEM;
+ goto error_no_urb;
+ }
+
+ memcpy(buffer, buf, count);
+
+ usb_serial_debug_data(debug, &port->dev, __FUNCTION__, count, buffer);
+
+ usb_fill_bulk_urb(urb, serial->dev,
+ usb_sndbulkpipe(serial->dev,
+ port->bulk_out_endpointAddress),
+ buffer, count, sierra_outdat_callback, port);
+
+ /* send it down the pipe */
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status) {
+ dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
+ "with status = %d\n", __FUNCTION__, status);
+ count = status;
+ goto error;
}
- count -= left;
- dbg("%s: wrote (did %d)", __FUNCTION__, count);
+ /* we are done with this urb, so let the host driver
+ * really free it when it is finished with it */
+ usb_free_urb(urb);
+
+ return count;
+error:
+ usb_free_urb(urb);
+error_no_urb:
+ kfree(buffer);
+error_no_buffer:
+ spin_lock_irqsave(&portdata->lock, flags);
+ --portdata->outstanding_urbs;
+ spin_unlock_irqrestore(&portdata->lock, flags);
return count;
}
@@ -262,15 +371,16 @@ static void sierra_indat_callback(struct urb *urb)
struct usb_serial_port *port;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
dbg("%s: %p", __FUNCTION__, urb);
endpoint = usb_pipeendpoint(urb->pipe);
port = (struct usb_serial_port *) urb->context;
- if (urb->status) {
+ if (status) {
dbg("%s: nonzero status: %d on endpoint %02x.",
- __FUNCTION__, urb->status, endpoint);
+ __FUNCTION__, status, endpoint);
} else {
tty = port->tty;
if (urb->actual_length) {
@@ -282,30 +392,20 @@ static void sierra_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- if (port->open_count && urb->status != -ESHUTDOWN) {
+ if (port->open_count && status != -ESHUTDOWN) {
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
- printk(KERN_ERR "%s: resubmit read urb failed. "
- "(%d)", __FUNCTION__, err);
+ dev_err(&port->dev, "resubmit read urb failed."
+ "(%d)", err);
}
}
return;
}
-static void sierra_outdat_callback(struct urb *urb)
-{
- struct usb_serial_port *port;
-
- dbg("%s", __FUNCTION__);
-
- port = (struct usb_serial_port *) urb->context;
-
- usb_serial_port_softint(port);
-}
-
static void sierra_instat_callback(struct urb *urb)
{
int err;
+ int status = urb->status;
struct usb_serial_port *port = (struct usb_serial_port *) urb->context;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
@@ -313,7 +413,7 @@ static void sierra_instat_callback(struct urb *urb)
dbg("%s", __FUNCTION__);
dbg("%s: urb %p port %p has data %p", __FUNCTION__,urb,port,portdata);
- if (urb->status == 0) {
+ if (status == 0) {
struct usb_ctrlrequest *req_pkt =
(struct usb_ctrlrequest *)urb->transfer_buffer;
@@ -344,10 +444,10 @@ static void sierra_instat_callback(struct urb *urb)
req_pkt->bRequestType,req_pkt->bRequest);
}
} else
- dbg("%s: error %d", __FUNCTION__, urb->status);
+ dbg("%s: error %d", __FUNCTION__, status);
/* Resubmit urb so we continue receiving IRQ data */
- if (urb->status != -ESHUTDOWN) {
+ if (status != -ESHUTDOWN) {
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err)
@@ -358,49 +458,44 @@ static void sierra_instat_callback(struct urb *urb)
static int sierra_write_room(struct usb_serial_port *port)
{
- struct sierra_port_private *portdata;
- int i;
- int data_len = 0;
- struct urb *this_urb;
+ struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ unsigned long flags;
- portdata = usb_get_serial_port_data(port);
+ dbg("%s - port %d", __FUNCTION__, port->number);
- for (i=0; i < N_OUT_URB; i++) {
- this_urb = portdata->out_urbs[i];
- if (this_urb && this_urb->status != -EINPROGRESS)
- data_len += OUT_BUFLEN;
+ /* try to give a good number back based on if we have any free urbs at
+ * this point in time */
+ spin_lock_irqsave(&portdata->lock, flags);
+ if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) {
+ spin_unlock_irqrestore(&portdata->lock, flags);
+ dbg("%s - write limit hit\n", __FUNCTION__);
+ return 0;
}
+ spin_unlock_irqrestore(&portdata->lock, flags);
- dbg("%s: %d", __FUNCTION__, data_len);
- return data_len;
+ return 2048;
}
static int sierra_chars_in_buffer(struct usb_serial_port *port)
{
- struct sierra_port_private *portdata;
- int i;
- int data_len = 0;
- struct urb *this_urb;
-
- portdata = usb_get_serial_port_data(port);
-
- for (i=0; i < N_OUT_URB; i++) {
- this_urb = portdata->out_urbs[i];
- if (this_urb && this_urb->status == -EINPROGRESS)
- data_len += this_urb->transfer_buffer_length;
- }
- dbg("%s: %d", __FUNCTION__, data_len);
- return data_len;
+ dbg("%s - port %d", __FUNCTION__, port->number);
+
+ /*
+ * We can't really account for how much data we
+ * have sent out, but hasn't made it through to the
+ * device as we can't see the backend here, so just
+ * tell the tty layer that everything is flushed.
+ */
+ return 0;
}
static int sierra_open(struct usb_serial_port *port, struct file *filp)
{
struct sierra_port_private *portdata;
struct usb_serial *serial = port->serial;
- int i, err;
+ int i;
struct urb *urb;
int result;
- __u16 set_mode_dzero = 0x0000;
portdata = usb_get_serial_port_data(port);
@@ -413,7 +508,7 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
/* Reset low level data toggle and start reading from endpoints */
for (i = 0; i < N_IN_URB; i++) {
urb = portdata->in_urbs[i];
- if (! urb)
+ if (!urb)
continue;
if (urb->dev != serial->dev) {
dbg("%s: dev %p != %p", __FUNCTION__,
@@ -427,35 +522,25 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
*/
usb_clear_halt(urb->dev, urb->pipe);
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- dbg("%s: submit urb %d failed (%d) %d",
- __FUNCTION__, i, err,
- urb->transfer_buffer_length);
+ result = usb_submit_urb(urb, GFP_KERNEL);
+ if (result) {
+ dev_err(&port->dev, "submit urb %d failed (%d) %d",
+ i, result, urb->transfer_buffer_length);
}
}
- /* Reset low level data toggle on out endpoints */
- for (i = 0; i < N_OUT_URB; i++) {
- urb = portdata->out_urbs[i];
- if (! urb)
- continue;
- urb->dev = serial->dev;
- /* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
- usb_pipeout(urb->pipe), 0); */
- }
-
port->tty->low_latency = 1;
- /* set mode to D0 */
- result = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- 0x00, 0x40, set_mode_dzero, 0, NULL,
- 0, USB_CTRL_SET_TIMEOUT);
-
sierra_send_setup(port);
- return (0);
+ /* start up the interrupt endpoint if we have one */
+ if (port->interrupt_in_urb) {
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (result)
+ dev_err(&port->dev, "submit irq_in urb failed %d",
+ result);
+ }
+ return 0;
}
static void sierra_close(struct usb_serial_port *port, struct file *filp)
@@ -475,74 +560,27 @@ static void sierra_close(struct usb_serial_port *port, struct file *filp)
/* Stop reading/writing urbs */
for (i = 0; i < N_IN_URB; i++)
- usb_unlink_urb(portdata->in_urbs[i]);
- for (i = 0; i < N_OUT_URB; i++)
- usb_unlink_urb(portdata->out_urbs[i]);
- }
- port->tty = NULL;
-}
-
-/* Helper functions used by sierra_setup_urbs */
-static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint,
- int dir, void *ctx, char *buf, int len,
- usb_complete_t callback)
-{
- struct urb *urb;
-
- if (endpoint == -1)
- return NULL; /* endpoint not needed */
-
- urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
- if (urb == NULL) {
- dbg("%s: alloc for endpoint %d failed.", __FUNCTION__, endpoint);
- return NULL;
+ usb_kill_urb(portdata->in_urbs[i]);
}
- /* Fill URB using supplied data. */
- usb_fill_bulk_urb(urb, serial->dev,
- usb_sndbulkpipe(serial->dev, endpoint) | dir,
- buf, len, callback, ctx);
-
- return urb;
-}
-
-/* Setup urbs */
-static void sierra_setup_urbs(struct usb_serial *serial)
-{
- int i,j;
- struct usb_serial_port *port;
- struct sierra_port_private *portdata;
-
- dbg("%s", __FUNCTION__);
-
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
-
- /* Do indat endpoints first */
- for (j = 0; j < N_IN_URB; ++j) {
- portdata->in_urbs[j] = sierra_setup_urb (serial,
- port->bulk_in_endpointAddress, USB_DIR_IN, port,
- portdata->in_buffer[j], IN_BUFLEN, sierra_indat_callback);
- }
+ usb_kill_urb(port->interrupt_in_urb);
- /* outdat endpoints */
- for (j = 0; j < N_OUT_URB; ++j) {
- portdata->out_urbs[j] = sierra_setup_urb (serial,
- port->bulk_out_endpointAddress, USB_DIR_OUT, port,
- portdata->out_buffer[j], OUT_BUFLEN, sierra_outdat_callback);
- }
- }
+ port->tty = NULL;
}
static int sierra_startup(struct usb_serial *serial)
{
- int i, err;
struct usb_serial_port *port;
struct sierra_port_private *portdata;
+ struct urb *urb;
+ int i;
+ int j;
dbg("%s", __FUNCTION__);
+ /*Set Device mode to D0 */
+ sierra_set_power_state(serial->dev, 0x0000);
+
/* Now setup per port private data */
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
@@ -550,22 +588,31 @@ static int sierra_startup(struct usb_serial *serial)
if (!portdata) {
dbg("%s: kmalloc for sierra_port_private (%d) failed!.",
__FUNCTION__, i);
- return (1);
+ return -ENOMEM;
}
+ spin_lock_init(&portdata->lock);
usb_set_serial_port_data(port, portdata);
- if (! port->interrupt_in_urb)
- continue;
- err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (err)
- dbg("%s: submit irq_in urb failed %d",
- __FUNCTION__, err);
+ /* initialize the in urbs */
+ for (j = 0; j < N_IN_URB; ++j) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (urb == NULL) {
+ dbg("%s: alloc for in port failed.",
+ __FUNCTION__);
+ continue;
+ }
+ /* Fill URB using supplied data. */
+ usb_fill_bulk_urb(urb, serial->dev,
+ usb_rcvbulkpipe(serial->dev,
+ port->bulk_in_endpointAddress),
+ portdata->in_buffer[j], IN_BUFLEN,
+ sierra_indat_callback, port);
+ portdata->in_urbs[j] = urb;
+ }
}
- sierra_setup_urbs(serial);
-
- return (0);
+ return 0;
}
static void sierra_shutdown(struct usb_serial *serial)
@@ -576,22 +623,6 @@ static void sierra_shutdown(struct usb_serial *serial)
dbg("%s", __FUNCTION__);
- /* Stop reading/writing urbs */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- if (!port)
- continue;
- portdata = usb_get_serial_port_data(port);
- if (!portdata)
- continue;
-
- for (j = 0; j < N_IN_URB; j++)
- usb_unlink_urb(portdata->in_urbs[j]);
- for (j = 0; j < N_OUT_URB; j++)
- usb_unlink_urb(portdata->out_urbs[j]);
- }
-
- /* Now free them */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
if (!port)
@@ -601,25 +632,12 @@ static void sierra_shutdown(struct usb_serial *serial)
continue;
for (j = 0; j < N_IN_URB; j++) {
- if (portdata->in_urbs[j]) {
- usb_free_urb(portdata->in_urbs[j]);
- portdata->in_urbs[j] = NULL;
- }
+ usb_kill_urb(portdata->in_urbs[j]);
+ usb_free_urb(portdata->in_urbs[j]);
+ portdata->in_urbs[j] = NULL;
}
- for (j = 0; j < N_OUT_URB; j++) {
- if (portdata->out_urbs[j]) {
- usb_free_urb(portdata->out_urbs[j]);
- portdata->out_urbs[j] = NULL;
- }
- }
- }
-
- /* Now free per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- if (!port)
- continue;
- kfree(usb_get_serial_port_data(port));
+ kfree(portdata);
+ usb_set_serial_port_data(port, NULL);
}
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3d505fd0645b..f98626ae75fe 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1112,22 +1112,24 @@ static void ti_interrupt_callback(struct urb *urb)
int length = urb->actual_length;
int port_number;
int function;
- int status;
+ int status = urb->status;
+ int retval;
__u8 msr;
dbg("%s", __FUNCTION__);
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- dbg("%s - urb shutting down, %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down, %d", __FUNCTION__, status);
tdev->td_urb_error = 1;
return;
default:
- dev_err(dev, "%s - nonzero urb status, %d\n", __FUNCTION__, urb->status);
+ dev_err(dev, "%s - nonzero urb status, %d\n",
+ __FUNCTION__, status);
tdev->td_urb_error = 1;
goto exit;
}
@@ -1175,9 +1177,10 @@ static void ti_interrupt_callback(struct urb *urb)
}
exit:
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status)
- dev_err(dev, "%s - resubmit interrupt urb failed, %d\n", __FUNCTION__, status);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ dev_err(dev, "%s - resubmit interrupt urb failed, %d\n",
+ __FUNCTION__, retval);
}
@@ -1186,30 +1189,32 @@ static void ti_bulk_in_callback(struct urb *urb)
struct ti_port *tport = (struct ti_port *)urb->context;
struct usb_serial_port *port = tport->tp_port;
struct device *dev = &urb->dev->dev;
- int status = 0;
+ int status = urb->status;
+ int retval = 0;
dbg("%s", __FUNCTION__);
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- dbg("%s - urb shutting down, %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down, %d", __FUNCTION__, status);
tport->tp_tdev->td_urb_error = 1;
wake_up_interruptible(&tport->tp_write_wait);
return;
default:
- dev_err(dev, "%s - nonzero urb status, %d\n", __FUNCTION__, urb->status );
+ dev_err(dev, "%s - nonzero urb status, %d\n",
+ __FUNCTION__, status );
tport->tp_tdev->td_urb_error = 1;
wake_up_interruptible(&tport->tp_write_wait);
}
- if (urb->status == -EPIPE)
+ if (status == -EPIPE)
goto exit;
- if (urb->status) {
+ if (status) {
dev_err(dev, "%s - stopping read!\n", __FUNCTION__);
return;
}
@@ -1234,13 +1239,14 @@ exit:
spin_lock(&tport->tp_lock);
if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) {
urb->dev = port->serial->dev;
- status = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
} else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING) {
tport->tp_read_urb_state = TI_READ_URB_STOPPED;
}
spin_unlock(&tport->tp_lock);
- if (status)
- dev_err(dev, "%s - resubmit read urb failed, %d\n", __FUNCTION__, status);
+ if (retval)
+ dev_err(dev, "%s - resubmit read urb failed, %d\n",
+ __FUNCTION__, retval);
}
@@ -1249,23 +1255,25 @@ static void ti_bulk_out_callback(struct urb *urb)
struct ti_port *tport = (struct ti_port *)urb->context;
struct usb_serial_port *port = tport->tp_port;
struct device *dev = &urb->dev->dev;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
tport->tp_write_urb_in_use = 0;
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
- dbg("%s - urb shutting down, %d", __FUNCTION__, urb->status);
+ dbg("%s - urb shutting down, %d", __FUNCTION__, status);
tport->tp_tdev->td_urb_error = 1;
wake_up_interruptible(&tport->tp_write_wait);
return;
default:
- dev_err(dev, "%s - nonzero urb status, %d\n", __FUNCTION__, urb->status);
+ dev_err(dev, "%s - nonzero urb status, %d\n",
+ __FUNCTION__, status);
tport->tp_tdev->td_urb_error = 1;
wake_up_interruptible(&tport->tp_write_wait);
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 87f378806db6..a3665659d13b 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -46,6 +46,8 @@ static struct usb_driver usb_serial_driver = {
.name = "usbserial",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
+ .suspend = usb_serial_suspend,
+ .resume = usb_serial_resume,
.no_dynamic_id = 1,
};
@@ -120,11 +122,9 @@ static void return_serial(struct usb_serial *serial)
if (serial == NULL)
return;
- spin_lock(&table_lock);
for (i = 0; i < serial->num_ports; ++i) {
serial_table[serial->minor + i] = NULL;
}
- spin_unlock(&table_lock);
}
static void destroy_serial(struct kref *kref)
@@ -172,7 +172,9 @@ static void destroy_serial(struct kref *kref)
void usb_serial_put(struct usb_serial *serial)
{
+ spin_lock(&table_lock);
kref_put(&serial->kref, destroy_serial);
+ spin_unlock(&table_lock);
}
/*****************************************************************************
@@ -1069,6 +1071,35 @@ void usb_serial_disconnect(struct usb_interface *interface)
dev_info(dev, "device disconnected\n");
}
+int usb_serial_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usb_serial *serial = usb_get_intfdata(intf);
+ struct usb_serial_port *port;
+ int i, r = 0;
+
+ if (serial) {
+ for (i = 0; i < serial->num_ports; ++i) {
+ port = serial->port[i];
+ if (port)
+ kill_traffic(port);
+ }
+ }
+
+ if (serial->type->suspend)
+ serial->type->suspend(serial, message);
+
+ return r;
+}
+EXPORT_SYMBOL(usb_serial_suspend);
+
+int usb_serial_resume(struct usb_interface *intf)
+{
+ struct usb_serial *serial = usb_get_intfdata(intf);
+
+ return serial->type->resume(serial);
+}
+EXPORT_SYMBOL(usb_serial_resume);
+
static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index ffbe601cde2a..7d84a7647e81 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -5,9 +5,9 @@
* Copyright (C) 1999 - 2004
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
@@ -273,7 +273,8 @@ struct visor_private {
int bytes_in;
int bytes_out;
int outstanding_urbs;
- int throttled;
+ unsigned char throttled;
+ unsigned char actually_throttled;
};
/* number of outstanding urbs to prevent userspace DoS from happening */
@@ -484,16 +485,17 @@ static void visor_write_bulk_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct visor_private *priv = usb_get_serial_port_data(port);
+ int status = urb->status;
unsigned long flags;
/* free up the transfer buffer, as usb_free_urb() does not do this */
kfree (urb->transfer_buffer);
dbg("%s - port %d", __FUNCTION__, port->number);
-
- if (urb->status)
+
+ if (status)
dbg("%s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
spin_lock_irqsave(&priv->lock, flags);
--priv->outstanding_urbs;
@@ -508,15 +510,16 @@ static void visor_read_bulk_callback (struct urb *urb)
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct visor_private *priv = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
+ int status = urb->status;
struct tty_struct *tty;
- unsigned long flags;
- int throttled;
int result;
+ int available_room;
dbg("%s - port %d", __FUNCTION__, port->number);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -524,17 +527,20 @@ static void visor_read_bulk_callback (struct urb *urb)
tty = port->tty;
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
+ available_room = tty_buffer_request_room(tty, urb->actual_length);
+ if (available_room) {
+ tty_insert_flip_string(tty, data, available_room);
+ tty_flip_buffer_push(tty);
+ }
+ spin_lock(&priv->lock);
+ priv->bytes_in += available_room;
+
+ } else {
+ spin_lock(&priv->lock);
}
- spin_lock_irqsave(&priv->lock, flags);
- priv->bytes_in += urb->actual_length;
- throttled = priv->throttled;
- spin_unlock_irqrestore(&priv->lock, flags);
/* Continue trying to always read if we should */
- if (!throttled) {
+ if (!priv->throttled) {
usb_fill_bulk_urb (port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
@@ -544,16 +550,19 @@ static void visor_read_bulk_callback (struct urb *urb)
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __FUNCTION__, result);
+ } else {
+ priv->actually_throttled = 1;
}
- return;
+ spin_unlock(&priv->lock);
}
static void visor_read_int_callback (struct urb *urb)
{
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
+ int status = urb->status;
int result;
- switch (urb->status) {
+ switch (status) {
case 0:
/* success */
break;
@@ -562,11 +571,11 @@ static void visor_read_int_callback (struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
- __FUNCTION__, urb->status);
+ __FUNCTION__, status);
goto exit;
}
@@ -608,6 +617,7 @@ static void visor_unthrottle (struct usb_serial_port *port)
dbg("%s - port %d", __FUNCTION__, port->number);
spin_lock_irqsave(&priv->lock, flags);
priv->throttled = 0;
+ priv->actually_throttled = 0;
spin_unlock_irqrestore(&priv->lock, flags);
port->read_urb->dev = port->serial->dev;
@@ -938,14 +948,6 @@ static void visor_set_termios (struct usb_serial_port *port, struct ktermios *ol
}
cflag = port->tty->termios->c_cflag;
- /* check that they really want us to change something */
- if (old_termios) {
- if ((cflag == old_termios->c_cflag) &&
- (RELEVANT_IFLAG(port->tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) {
- dbg("%s - nothing to change...", __FUNCTION__);
- return;
- }
- }
/* get the byte size */
switch (cflag & CSIZE) {
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 27c5f8f9a2d5..cc8b44c08712 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -74,6 +74,7 @@
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/termbits.h>
#include <linux/usb.h>
@@ -203,7 +204,7 @@ static struct usb_serial_driver whiteheat_device = {
struct whiteheat_command_private {
- spinlock_t lock;
+ struct mutex mutex;
__u8 port_running;
__u8 command_finished;
wait_queue_head_t wait_command; /* for handling sleeping while waiting for a command to finish */
@@ -232,6 +233,7 @@ struct whiteheat_private {
struct usb_serial_port *port;
struct list_head tx_urbs_free;
struct list_head tx_urbs_submitted;
+ struct mutex deathwarrant;
};
@@ -425,6 +427,7 @@ static int whiteheat_attach (struct usb_serial *serial)
}
spin_lock_init(&info->lock);
+ mutex_init(&info->deathwarrant);
info->flags = 0;
info->mcr = 0;
INIT_WORK(&info->rx_work, rx_data_softint);
@@ -495,7 +498,7 @@ static int whiteheat_attach (struct usb_serial *serial)
goto no_command_private;
}
- spin_lock_init(&command_info->lock);
+ mutex_init(&command_info->mutex);
command_info->port_running = 0;
init_waitqueue_head(&command_info->wait_command);
usb_set_serial_port_data(command_port, command_info);
@@ -654,7 +657,6 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
struct urb *urb;
struct list_head *tmp;
struct list_head *tmp2;
- unsigned long flags;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -683,24 +685,32 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
firm_close(port);
+printk(KERN_ERR"Before processing rx_urbs_submitted.\n");
/* shutdown our bulk reads and writes */
- spin_lock_irqsave(&info->lock, flags);
+ mutex_lock(&info->deathwarrant);
+ spin_lock_irq(&info->lock);
list_for_each_safe(tmp, tmp2, &info->rx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
+ list_del(tmp);
+ spin_unlock_irq(&info->lock);
usb_kill_urb(urb);
- list_move(tmp, &info->rx_urbs_free);
+ spin_lock_irq(&info->lock);
+ list_add(tmp, &info->rx_urbs_free);
}
list_for_each_safe(tmp, tmp2, &info->rx_urb_q)
list_move(tmp, &info->rx_urbs_free);
-
list_for_each_safe(tmp, tmp2, &info->tx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
+ list_del(tmp);
+ spin_unlock_irq(&info->lock);
usb_kill_urb(urb);
- list_move(tmp, &info->tx_urbs_free);
+ spin_lock_irq(&info->lock);
+ list_add(tmp, &info->tx_urbs_free);
}
- spin_unlock_irqrestore(&info->lock, flags);
+ spin_unlock_irq(&info->lock);
+ mutex_unlock(&info->deathwarrant);
stop_command_port(port->serial);
@@ -872,7 +882,7 @@ static int whiteheat_ioctl (struct usb_serial_port *port, struct file * file, un
}
-static void whiteheat_set_termios (struct usb_serial_port *port, struct ktermios *old_termios)
+static void whiteheat_set_termios(struct usb_serial_port *port, struct ktermios *old_termios)
{
dbg("%s -port %d", __FUNCTION__, port->number);
@@ -881,15 +891,6 @@ static void whiteheat_set_termios (struct usb_serial_port *port, struct ktermios
goto exit;
}
- /* check that they really want us to change something */
- if (old_termios) {
- if ((port->tty->termios->c_cflag == old_termios->c_cflag) &&
- (port->tty->termios->c_iflag == old_termios->c_iflag)) {
- dbg("%s - nothing to change...", __FUNCTION__);
- goto exit;
- }
- }
-
firm_setup_port(port);
exit:
@@ -920,7 +921,7 @@ static int whiteheat_chars_in_buffer(struct usb_serial_port *port)
spin_unlock_irqrestore(&info->lock, flags);
dbg ("%s - returns %d", __FUNCTION__, chars);
- return (chars);
+ return chars;
}
@@ -962,54 +963,57 @@ static void whiteheat_unthrottle (struct usb_serial_port *port)
/*****************************************************************************
* Connect Tech's White Heat callback routines
*****************************************************************************/
-static void command_port_write_callback (struct urb *urb)
+static void command_port_write_callback(struct urb *urb)
{
+ int status = urb->status;
+
dbg("%s", __FUNCTION__);
- if (urb->status) {
- dbg ("nonzero urb status: %d", urb->status);
+ if (status) {
+ dbg("nonzero urb status: %d", status);
return;
}
}
-static void command_port_read_callback (struct urb *urb)
+static void command_port_read_callback(struct urb *urb)
{
struct usb_serial_port *command_port = (struct usb_serial_port *)urb->context;
struct whiteheat_command_private *command_info;
+ int status = urb->status;
unsigned char *data = urb->transfer_buffer;
int result;
- unsigned long flags;
dbg("%s", __FUNCTION__);
- if (urb->status) {
- dbg("%s - nonzero urb status: %d", __FUNCTION__, urb->status);
- return;
- }
-
- usb_serial_debug_data(debug, &command_port->dev, __FUNCTION__, urb->actual_length, data);
-
command_info = usb_get_serial_port_data(command_port);
if (!command_info) {
dbg ("%s - command_info is NULL, exiting.", __FUNCTION__);
return;
}
- spin_lock_irqsave(&command_info->lock, flags);
+ if (status) {
+ dbg("%s - nonzero urb status: %d", __FUNCTION__, status);
+ if (status != -ENOENT)
+ command_info->command_finished = WHITEHEAT_CMD_FAILURE;
+ wake_up(&command_info->wait_command);
+ return;
+ }
+
+ usb_serial_debug_data(debug, &command_port->dev, __FUNCTION__, urb->actual_length, data);
if (data[0] == WHITEHEAT_CMD_COMPLETE) {
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
- wake_up_interruptible(&command_info->wait_command);
+ wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_CMD_FAILURE) {
command_info->command_finished = WHITEHEAT_CMD_FAILURE;
- wake_up_interruptible(&command_info->wait_command);
+ wake_up(&command_info->wait_command);
} else if (data[0] == WHITEHEAT_EVENT) {
/* These are unsolicited reports from the firmware, hence no waiting command to wakeup */
dbg("%s - event received", __FUNCTION__);
} else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
memcpy(command_info->result_buffer, &data[1], urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
- wake_up_interruptible(&command_info->wait_command);
+ wake_up(&command_info->wait_command);
} else {
dbg("%s - bad reply from firmware", __FUNCTION__);
}
@@ -1017,7 +1021,6 @@ static void command_port_read_callback (struct urb *urb)
/* Continue trying to always read */
command_port->read_urb->dev = command_port->serial->dev;
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
- spin_unlock_irqrestore(&command_info->lock, flags);
if (result)
dbg("%s - failed resubmitting read urb, error %d", __FUNCTION__, result);
}
@@ -1029,6 +1032,7 @@ static void whiteheat_read_callback(struct urb *urb)
struct whiteheat_urb_wrap *wrap;
unsigned char *data = urb->transfer_buffer;
struct whiteheat_private *info = usb_get_serial_port_data(port);
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -1042,8 +1046,9 @@ static void whiteheat_read_callback(struct urb *urb)
list_del(&wrap->list);
spin_unlock(&info->lock);
- if (urb->status) {
- dbg("%s - nonzero read bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero read bulk status received: %d",
+ __FUNCTION__, status);
spin_lock(&info->lock);
list_add(&wrap->list, &info->rx_urbs_free);
spin_unlock(&info->lock);
@@ -1070,6 +1075,7 @@ static void whiteheat_write_callback(struct urb *urb)
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct whiteheat_private *info = usb_get_serial_port_data(port);
struct whiteheat_urb_wrap *wrap;
+ int status = urb->status;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -1083,8 +1089,9 @@ static void whiteheat_write_callback(struct urb *urb)
list_move(&wrap->list, &info->tx_urbs_free);
spin_unlock(&info->lock);
- if (urb->status) {
- dbg("%s - nonzero write bulk status received: %d", __FUNCTION__, urb->status);
+ if (status) {
+ dbg("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, status);
return;
}
@@ -1095,20 +1102,20 @@ static void whiteheat_write_callback(struct urb *urb)
/*****************************************************************************
* Connect Tech's White Heat firmware interface
*****************************************************************************/
-static int firm_send_command (struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize)
+static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
struct whiteheat_private *info;
__u8 *transfer_buffer;
int retval = 0;
- unsigned long flags;
+ int t;
dbg("%s - command %d", __FUNCTION__, command);
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
- spin_lock_irqsave(&command_info->lock, flags);
+ mutex_lock(&command_info->mutex);
command_info->command_finished = false;
transfer_buffer = (__u8 *)command_port->write_urb->transfer_buffer;
@@ -1116,18 +1123,17 @@ static int firm_send_command (struct usb_serial_port *port, __u8 command, __u8 *
memcpy (&transfer_buffer[1], data, datasize);
command_port->write_urb->transfer_buffer_length = datasize + 1;
command_port->write_urb->dev = port->serial->dev;
- retval = usb_submit_urb (command_port->write_urb, GFP_KERNEL);
+ retval = usb_submit_urb (command_port->write_urb, GFP_NOIO);
if (retval) {
dbg("%s - submit urb failed", __FUNCTION__);
goto exit;
}
- spin_unlock_irqrestore(&command_info->lock, flags);
/* wait for the command to complete */
- wait_event_interruptible_timeout(command_info->wait_command,
+ t = wait_event_timeout(command_info->wait_command,
(bool)command_info->command_finished, COMMAND_TIMEOUT);
-
- spin_lock_irqsave(&command_info->lock, flags);
+ if (!t)
+ usb_kill_urb(command_port->write_urb);
if (command_info->command_finished == false) {
dbg("%s - command timed out.", __FUNCTION__);
@@ -1152,7 +1158,7 @@ static int firm_send_command (struct usb_serial_port *port, __u8 command, __u8 *
}
exit:
- spin_unlock_irqrestore(&command_info->lock, flags);
+ mutex_unlock(&command_info->mutex);
return retval;
}
@@ -1305,12 +1311,11 @@ static int start_command_port(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
- unsigned long flags;
int retval = 0;
command_port = serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
- spin_lock_irqsave(&command_info->lock, flags);
+ mutex_lock(&command_info->mutex);
if (!command_info->port_running) {
/* Work around HCD bugs */
usb_clear_halt(serial->dev, command_port->read_urb->pipe);
@@ -1325,7 +1330,7 @@ static int start_command_port(struct usb_serial *serial)
command_info->port_running++;
exit:
- spin_unlock_irqrestore(&command_info->lock, flags);
+ mutex_unlock(&command_info->mutex);
return retval;
}
@@ -1334,15 +1339,14 @@ static void stop_command_port(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
struct whiteheat_command_private *command_info;
- unsigned long flags;
command_port = serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
- spin_lock_irqsave(&command_info->lock, flags);
+ mutex_lock(&command_info->mutex);
command_info->port_running--;
if (!command_info->port_running)
usb_kill_urb(command_port->read_urb);
- spin_unlock_irqrestore(&command_info->lock, flags);
+ mutex_unlock(&command_info->mutex);
}
@@ -1363,17 +1367,23 @@ static int start_port_read(struct usb_serial_port *port)
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
urb->dev = port->serial->dev;
+ spin_unlock_irqrestore(&info->lock, flags);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
+ spin_lock_irqsave(&info->lock, flags);
list_add(tmp, &info->rx_urbs_free);
list_for_each_safe(tmp, tmp2, &info->rx_urbs_submitted) {
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
+ list_del(tmp);
+ spin_unlock_irqrestore(&info->lock, flags);
usb_kill_urb(urb);
- list_move(tmp, &info->rx_urbs_free);
+ spin_lock_irqsave(&info->lock, flags);
+ list_add(tmp, &info->rx_urbs_free);
}
break;
}
+ spin_lock_irqsave(&info->lock, flags);
list_add(tmp, &info->rx_urbs_submitted);
}
diff --git a/drivers/usb/storage/dpcm.c b/drivers/usb/storage/dpcm.c
index 1628cb258562..9a410b5a6e5b 100644
--- a/drivers/usb/storage/dpcm.c
+++ b/drivers/usb/storage/dpcm.c
@@ -46,43 +46,43 @@
*/
int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- int ret;
+ int ret;
- if(srb == NULL)
- return USB_STOR_TRANSPORT_ERROR;
+ if (srb == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
- US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun);
+ US_DEBUGP("dpcm_transport: LUN=%d\n", srb->device->lun);
- switch(srb->device->lun) {
- case 0:
+ switch (srb->device->lun) {
+ case 0:
- /*
- * LUN 0 corresponds to the CompactFlash card reader.
- */
- ret = usb_stor_CB_transport(srb, us);
- break;
+ /*
+ * LUN 0 corresponds to the CompactFlash card reader.
+ */
+ ret = usb_stor_CB_transport(srb, us);
+ break;
#ifdef CONFIG_USB_STORAGE_SDDR09
- case 1:
+ case 1:
- /*
- * LUN 1 corresponds to the SmartMedia card reader.
- */
+ /*
+ * LUN 1 corresponds to the SmartMedia card reader.
+ */
- /*
- * Set the LUN to 0 (just in case).
- */
- srb->device->lun = 0; us->srb->device->lun = 0;
- ret = sddr09_transport(srb, us);
- srb->device->lun = 1; us->srb->device->lun = 1;
- break;
+ /*
+ * Set the LUN to 0 (just in case).
+ */
+ srb->device->lun = 0; us->srb->device->lun = 0;
+ ret = sddr09_transport(srb, us);
+ srb->device->lun = 1; us->srb->device->lun = 1;
+ break;
#endif
- default:
- US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun);
- ret = USB_STOR_TRANSPORT_ERROR;
- break;
- }
- return ret;
+ default:
+ US_DEBUGP("dpcm_transport: Invalid LUN %d\n", srb->device->lun);
+ ret = USB_STOR_TRANSPORT_ERROR;
+ break;
+ }
+ return ret;
}
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index d35369392fed..dfd42fe9e5f0 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -57,9 +57,10 @@ static void usb_onetouch_irq(struct urb *urb)
struct usb_onetouch *onetouch = urb->context;
signed char *data = onetouch->data;
struct input_dev *dev = onetouch->dev;
- int status;
+ int status = urb->status;
+ int retval;
- switch (urb->status) {
+ switch (status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
@@ -75,11 +76,11 @@ static void usb_onetouch_irq(struct urb *urb)
input_sync(dev);
resubmit:
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status)
- err ("can't resubmit intr, %s-%s/input0, status %d",
+ retval = usb_submit_urb (urb, GFP_ATOMIC);
+ if (retval)
+ err ("can't resubmit intr, %s-%s/input0, retval %d",
onetouch->udev->bus->bus_name,
- onetouch->udev->devpath, status);
+ onetouch->udev->devpath, retval);
}
static int usb_onetouch_open(struct input_dev *dev)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e227f64d5641..47e56079925d 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -285,10 +285,15 @@ static int device_reset(struct scsi_cmnd *srb)
US_DEBUGP("%s called\n", __FUNCTION__);
- /* lock the device pointers and do the reset */
- mutex_lock(&(us->dev_mutex));
- result = us->transport_reset(us);
- mutex_unlock(&us->dev_mutex);
+ result = usb_autopm_get_interface(us->pusb_intf);
+ if (result == 0) {
+
+ /* lock the device pointers and do the reset */
+ mutex_lock(&(us->dev_mutex));
+ result = us->transport_reset(us);
+ mutex_unlock(&us->dev_mutex);
+ usb_autopm_put_interface(us->pusb_intf);
+ }
return result < 0 ? FAILED : SUCCESS;
}
@@ -321,10 +326,14 @@ void usb_stor_report_device_reset(struct us_data *us)
/* Report a driver-initiated bus reset to the SCSI layer.
* Calling this for a SCSI-initiated reset is unnecessary but harmless.
- * The caller must own the SCSI host lock. */
+ * The caller must not own the SCSI host lock. */
void usb_stor_report_bus_reset(struct us_data *us)
{
- scsi_report_bus_reset(us_to_host(us), 0);
+ struct Scsi_Host *host = us_to_host(us);
+
+ scsi_lock(host);
+ scsi_report_bus_reset(host, 0);
+ scsi_unlock(host);
}
/***********************************************************************
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 54979c239c63..a624e72f81dc 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -50,10 +50,10 @@
/* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
*/
UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
- "ATMEL",
- "SND1 Storage",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_RESIDUE),
+ "ATMEL",
+ "SND1 Storage",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE),
/* modified by Tobias Lorenz <tobias.lorenz@gmx.net> */
UNUSUAL_DEV( 0x03ee, 0x6901, 0x0000, 0x0200,
@@ -69,18 +69,18 @@ UNUSUAL_DEV( 0x03ee, 0x6906, 0x0003, 0x0003,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-UNUSUAL_DEV( 0x03f0, 0x0107, 0x0200, 0x0200,
+UNUSUAL_DEV( 0x03f0, 0x0107, 0x0200, 0x0200,
"HP",
"CD-Writer+",
- US_SC_8070, US_PR_CB, NULL, 0),
+ US_SC_8070, US_PR_CB, NULL, 0),
#ifdef CONFIG_USB_STORAGE_USBAT
-UNUSUAL_DEV( 0x03f0, 0x0207, 0x0001, 0x0001,
+UNUSUAL_DEV( 0x03f0, 0x0207, 0x0001, 0x0001,
"HP",
"CD-Writer+ 8200e",
US_SC_8070, US_PR_USBAT, init_usbat_cd, 0),
-UNUSUAL_DEV( 0x03f0, 0x0307, 0x0001, 0x0001,
+UNUSUAL_DEV( 0x03f0, 0x0307, 0x0001, 0x0001,
"HP",
"CD-Writer+ CD-4e",
US_SC_8070, US_PR_USBAT, init_usbat_cd, 0),
@@ -115,10 +115,10 @@ UNUSUAL_DEV( 0x0411, 0x001c, 0x0113, 0x0113,
/* Submitted by Ernestas Vaiciukevicius <ernisv@gmail.com> */
UNUSUAL_DEV( 0x0419, 0x0100, 0x0100, 0x0100,
- "Samsung Info. Systems America, Inc.",
- "MP3 Player",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_RESIDUE ),
+ "Samsung Info. Systems America, Inc.",
+ "MP3 Player",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
/* Reported by Orgad Shaneh <orgads@gmail.com> */
UNUSUAL_DEV( 0x0419, 0xaace, 0x0100, 0x0100,
@@ -256,10 +256,10 @@ UNUSUAL_DEV( 0x0457, 0x0150, 0x0100, 0x0100,
* the revision to my model only
*/
UNUSUAL_DEV( 0x0457, 0x0151, 0x0100, 0x0100,
- "USB 2.0",
- "Flash Disk",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_NOT_LOCKABLE ),
+ "USB 2.0",
+ "Flash Disk",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_NOT_LOCKABLE ),
#ifdef CONFIG_USB_STORAGE_KARMA
UNUSUAL_DEV( 0x045a, 0x5210, 0x0101, 0x0101,
@@ -313,6 +313,13 @@ UNUSUAL_DEV( 0x04b0, 0x0301, 0x0010, 0x0010,
US_SC_DEVICE, US_PR_DEVICE,NULL,
US_FL_NOT_LOCKABLE ),
+/* Reported by Stefan de Konink <skinkie@xs4all.nl> */
+UNUSUAL_DEV( 0x04b0, 0x0401, 0x0200, 0x0200,
+ "NIKON",
+ "NIKON DSC D100",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY),
+
/* Reported by Andreas Bockhold <andreas@bockionline.de> */
UNUSUAL_DEV( 0x04b0, 0x0405, 0x0100, 0x0100,
"NIKON",
@@ -408,19 +415,19 @@ UNUSUAL_DEV( 0x04da, 0x2373, 0x0000, 0x9999,
/* Most of the following entries were developed with the help of
* Shuttle/SCM directly.
*/
-UNUSUAL_DEV( 0x04e6, 0x0001, 0x0200, 0x0200,
+UNUSUAL_DEV( 0x04e6, 0x0001, 0x0200, 0x0200,
"Matshita",
"LS-120",
US_SC_8020, US_PR_CB, NULL, 0),
-UNUSUAL_DEV( 0x04e6, 0x0002, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x04e6, 0x0002, 0x0100, 0x0100,
"Shuttle",
"eUSCSI Bridge",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG ),
+ US_FL_SCM_MULT_TARG ),
#ifdef CONFIG_USB_STORAGE_SDDR09
-UNUSUAL_DEV( 0x04e6, 0x0003, 0x0000, 0x9999,
+UNUSUAL_DEV( 0x04e6, 0x0003, 0x0000, 0x9999,
"Sandisk",
"ImageMate SDDR09",
US_SC_SCSI, US_PR_EUSB_SDDR09, usb_stor_sddr09_init,
@@ -431,52 +438,52 @@ UNUSUAL_DEV( 0x04e6, 0x0005, 0x0100, 0x0208,
"SCM Microsystems",
"eUSB SmartMedia / CompactFlash Adapter",
US_SC_SCSI, US_PR_DPCM_USB, usb_stor_sddr09_dpcm_init,
- 0),
+ 0),
#endif
/* Reported by Markus Demleitner <msdemlei@cl.uni-heidelberg.de> */
-UNUSUAL_DEV( 0x04e6, 0x0006, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x04e6, 0x0006, 0x0100, 0x0100,
"SCM Microsystems Inc.",
"eUSB MMC Adapter",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN),
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN),
/* Reported by Daniel Nouri <dpunktnpunkt@web.de> */
-UNUSUAL_DEV( 0x04e6, 0x0006, 0x0205, 0x0205,
+UNUSUAL_DEV( 0x04e6, 0x0006, 0x0205, 0x0205,
"Shuttle",
"eUSB MMC Adapter",
- US_SC_SCSI, US_PR_DEVICE, NULL,
- US_FL_SINGLE_LUN),
+ US_SC_SCSI, US_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN),
-UNUSUAL_DEV( 0x04e6, 0x0007, 0x0100, 0x0200,
+UNUSUAL_DEV( 0x04e6, 0x0007, 0x0100, 0x0200,
"Sony",
"Hifd",
- US_SC_SCSI, US_PR_CB, NULL,
- US_FL_SINGLE_LUN),
+ US_SC_SCSI, US_PR_CB, NULL,
+ US_FL_SINGLE_LUN),
-UNUSUAL_DEV( 0x04e6, 0x0009, 0x0200, 0x0200,
+UNUSUAL_DEV( 0x04e6, 0x0009, 0x0200, 0x0200,
"Shuttle",
"eUSB ATA/ATAPI Adapter",
US_SC_8020, US_PR_CB, NULL, 0),
-UNUSUAL_DEV( 0x04e6, 0x000a, 0x0200, 0x0200,
+UNUSUAL_DEV( 0x04e6, 0x000a, 0x0200, 0x0200,
"Shuttle",
"eUSB CompactFlash Adapter",
US_SC_8020, US_PR_CB, NULL, 0),
-UNUSUAL_DEV( 0x04e6, 0x000B, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x04e6, 0x000B, 0x0100, 0x0100,
"Shuttle",
"eUSCSI Bridge",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-UNUSUAL_DEV( 0x04e6, 0x000C, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x04e6, 0x000C, 0x0100, 0x0100,
"Shuttle",
"eUSCSI Bridge",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG ),
+ US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_FL_SCM_MULT_TARG ),
-UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
+UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
"Shuttle",
"CD-RW Device",
US_SC_8020, US_PR_CB, NULL, 0),
@@ -556,9 +563,9 @@ UNUSUAL_DEV( 0x052b, 0x1911, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450,
+UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450,
"Sony",
- "DSC-S30/S70/S75/505V/F505/F707/F717/P8",
+ "DSC-S30/S70/S75/505V/F505/F707/F717/P8",
US_SC_SCSI, US_PR_DEVICE, NULL,
US_FL_SINGLE_LUN | US_FL_NOT_LOCKABLE | US_FL_NO_WP_DETECT ),
@@ -572,7 +579,7 @@ UNUSUAL_DEV( 0x054c, 0x0010, 0x0500, 0x0610,
/* Reported by wim@geeks.nl */
-UNUSUAL_DEV( 0x054c, 0x0025, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x054c, 0x0025, 0x0100, 0x0100,
"Sony",
"Memorystick NW-MS7",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -593,21 +600,21 @@ UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x2000,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_SINGLE_LUN ),
-UNUSUAL_DEV( 0x054c, 0x002d, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x054c, 0x002d, 0x0100, 0x0100,
"Sony",
"Memorystick MSAC-US1",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_SINGLE_LUN ),
/* Submitted by Klaus Mueller <k.mueller@intershop.de> */
-UNUSUAL_DEV( 0x054c, 0x002e, 0x0106, 0x0310,
+UNUSUAL_DEV( 0x054c, 0x002e, 0x0106, 0x0310,
"Sony",
"Handycam",
US_SC_SCSI, US_PR_DEVICE, NULL,
US_FL_SINGLE_LUN ),
/* Submitted by Rajesh Kumble Nayak <nayak@obs-nice.fr> */
-UNUSUAL_DEV( 0x054c, 0x002e, 0x0500, 0x0500,
+UNUSUAL_DEV( 0x054c, 0x002e, 0x0500, 0x0500,
"Sony",
"Handycam HC-85",
US_SC_UFI, US_PR_DEVICE, NULL,
@@ -648,26 +655,26 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
/* Submitted by Frank Engel <frankie@cse.unsw.edu.au> */
UNUSUAL_DEV( 0x054c, 0x0099, 0x0000, 0x9999,
- "Sony",
- "PEG Mass Storage",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
+ "Sony",
+ "PEG Mass Storage",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_INQUIRY ),
/* floppy reports multiple luns */
UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
- "SAMSUNG",
- "SFD-321U [FW 0C]",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_SINGLE_LUN ),
+ "SAMSUNG",
+ "SFD-321U [FW 0C]",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_SINGLE_LUN ),
-UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
+UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
"Y-E Data",
"Flashbuster-U",
US_SC_DEVICE, US_PR_CB, NULL,
US_FL_SINGLE_LUN),
-UNUSUAL_DEV( 0x057b, 0x0000, 0x0300, 0x9999,
+UNUSUAL_DEV( 0x057b, 0x0000, 0x0300, 0x9999,
"Y-E Data",
"Flashbuster-U",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -677,7 +684,7 @@ UNUSUAL_DEV( 0x057b, 0x0000, 0x0300, 0x9999,
* This entry is needed only because the device reports
* bInterfaceClass = 0xff (vendor-specific)
*/
-UNUSUAL_DEV( 0x057b, 0x0022, 0x0000, 0x9999,
+UNUSUAL_DEV( 0x057b, 0x0022, 0x0000, 0x9999,
"Y-E Data",
"Silicon Media R/W",
US_SC_DEVICE, US_PR_DEVICE, NULL, 0),
@@ -825,13 +832,13 @@ UNUSUAL_DEV( 0x0636, 0x0003, 0x0000, 0x9999,
US_SC_SCSI, US_PR_BULK, NULL,
US_FL_FIX_INQUIRY ),
-UNUSUAL_DEV( 0x0644, 0x0000, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x0644, 0x0000, 0x0100, 0x0100,
"TEAC",
"Floppy Drive",
- US_SC_UFI, US_PR_CB, NULL, 0 ),
+ US_SC_UFI, US_PR_CB, NULL, 0 ),
#ifdef CONFIG_USB_STORAGE_SDDR09
-UNUSUAL_DEV( 0x066b, 0x0105, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x066b, 0x0105, 0x0100, 0x0100,
"Olympus",
"Camedia MAUSB-2",
US_SC_SCSI, US_PR_EUSB_SDDR09, usb_stor_sddr09_init,
@@ -867,14 +874,14 @@ UNUSUAL_DEV( 0x0686, 0x4011, 0x0001, 0x0001,
/* Reported by Miguel A. Fosas <amn3s1a@ono.com> */
UNUSUAL_DEV( 0x0686, 0x4017, 0x0001, 0x0001,
- "Minolta",
- "DIMAGE E223",
- US_SC_SCSI, US_PR_DEVICE, NULL, 0 ),
+ "Minolta",
+ "DIMAGE E223",
+ US_SC_SCSI, US_PR_DEVICE, NULL, 0 ),
UNUSUAL_DEV( 0x0693, 0x0005, 0x0100, 0x0100,
"Hagiwara",
"Flashgate",
- US_SC_SCSI, US_PR_BULK, NULL, 0 ),
+ US_SC_SCSI, US_PR_BULK, NULL, 0 ),
/* Reported by David Hamilton <niftimusmaximus@lycos.com> */
UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001,
@@ -918,7 +925,7 @@ UNUSUAL_DEV( 0x0781, 0x0100, 0x0100, 0x0100,
US_FL_SINGLE_LUN ),
#ifdef CONFIG_USB_STORAGE_SDDR09
-UNUSUAL_DEV( 0x0781, 0x0200, 0x0000, 0x9999,
+UNUSUAL_DEV( 0x0781, 0x0200, 0x0000, 0x9999,
"Sandisk",
"ImageMate SDDR-09",
US_SC_SCSI, US_PR_EUSB_SDDR09, usb_stor_sddr09_init,
@@ -939,17 +946,17 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
-UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
+UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
"Microtech",
"USB-SCSI-DB25",
US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
+UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
"Microtech",
"USB-SCSI-HD50",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
- US_FL_SCM_MULT_TARG ),
+ US_FL_SCM_MULT_TARG ),
#ifdef CONFIG_USB_STORAGE_DPCM
UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100,
@@ -1053,10 +1060,10 @@ UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff,
* as "DualSlot CompactFlash(TM) & MStick Drive USB"
*/
UNUSUAL_DEV( 0x07c4, 0xa10b, 0x0000, 0xffff,
- "DataFab Systems Inc.",
- "USB CF+MS",
- US_SC_SCSI, US_PR_DATAFAB, NULL,
- 0 ),
+ "DataFab Systems Inc.",
+ "USB CF+MS",
+ US_SC_SCSI, US_PR_DATAFAB, NULL,
+ 0 ),
#endif
@@ -1119,10 +1126,10 @@ UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
* US_FL_IGNORE_RESIDUE Needed
*/
UNUSUAL_DEV( 0x08ca, 0x3103, 0x0100, 0x0100,
- "AIPTEK",
- "Aiptek USB Keychain MP3 Player",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_RESIDUE),
+ "AIPTEK",
+ "Aiptek USB Keychain MP3 Player",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE),
/* Entry needed for flags. Moreover, all devices with this ID use
* bulk-only transport, but _some_ falsely report Control/Bulk instead.
@@ -1166,26 +1173,26 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
* Submitted by James Courtier-Dutton <James@superbug.demon.co.uk>
*/
UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
- "Pentax",
- "Optio 2/3/400",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
+ "Pentax",
+ "Optio 2/3/400",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_INQUIRY ),
/* Submitted by Per Winkvist <per.winkvist@uk.com> */
UNUSUAL_DEV( 0x0a17, 0x006, 0x0000, 0xffff,
- "Pentax",
- "Optio S/S4",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_INQUIRY ),
+ "Pentax",
+ "Optio S/S4",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_INQUIRY ),
/* These are virtual windows driver CDs, which the zd1211rw driver
* automatically converts into WLAN devices. */
UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
- "ZyXEL",
- "G-220F USB-WLAN Install",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_DEVICE ),
+ "ZyXEL",
+ "G-220F USB-WLAN Install",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE ),
UNUSUAL_DEV( 0x0ace, 0x20ff, 0x0101, 0x0101,
"SiteCom",
@@ -1211,17 +1218,17 @@ UNUSUAL_DEV( 0x0bf6, 0xa001, 0x0100, 0x0110,
#ifdef CONFIG_USB_STORAGE_DATAFAB
UNUSUAL_DEV( 0x0c0b, 0xa109, 0x0000, 0xffff,
- "Acomdata",
- "CF",
- US_SC_SCSI, US_PR_DATAFAB, NULL,
- US_FL_SINGLE_LUN ),
+ "Acomdata",
+ "CF",
+ US_SC_SCSI, US_PR_DATAFAB, NULL,
+ US_FL_SINGLE_LUN ),
#endif
#ifdef CONFIG_USB_STORAGE_SDDR55
UNUSUAL_DEV( 0x0c0b, 0xa109, 0x0000, 0xffff,
- "Acomdata",
- "SM",
- US_SC_SCSI, US_PR_SDDR55, NULL,
- US_FL_SINGLE_LUN ),
+ "Acomdata",
+ "SM",
+ US_SC_SCSI, US_PR_SDDR55, NULL,
+ US_FL_SINGLE_LUN ),
#endif
/* Submitted by: Nick Sillik <n.sillik@temple.edu>
@@ -1384,6 +1391,17 @@ UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x0110,
US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
0 ),
+/* Reported by Kevin Lloyd <linux@sierrawireless.com>
+ * Entry is needed for the initializer function override,
+ * which instructs the device to load as a modem
+ * device.
+ */
+UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
+ "Sierra Wireless",
+ "USB MMC Storage",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_DEVICE),
+
/* Reported by Jaco Kroon <jaco@kroon.co.za>
* The usb-storage module found on the Digitech GNX4 (and supposedly other
* devices) misbehaves and causes a bunch of invalid I/O errors.
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 8e898e3d861e..28842d208bb0 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -191,16 +191,13 @@ static int storage_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
+ US_DEBUGP("%s\n", __FUNCTION__);
+
/* Wait until no command is running */
mutex_lock(&us->dev_mutex);
- US_DEBUGP("%s\n", __FUNCTION__);
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_SUSPEND);
- iface->dev.power.power_state.event = message.event;
-
- /* When runtime PM is working, we'll set a flag to indicate
- * whether we should autoresume when a SCSI request arrives. */
mutex_unlock(&us->dev_mutex);
return 0;
@@ -210,14 +207,25 @@ static int storage_resume(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
- mutex_lock(&us->dev_mutex);
-
US_DEBUGP("%s\n", __FUNCTION__);
+
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_RESUME);
- iface->dev.power.power_state.event = PM_EVENT_ON;
- mutex_unlock(&us->dev_mutex);
+ return 0;
+}
+
+static int storage_reset_resume(struct usb_interface *iface)
+{
+ struct us_data *us = usb_get_intfdata(iface);
+
+ US_DEBUGP("%s\n", __FUNCTION__);
+
+ /* Report the reset to the SCSI core */
+ usb_stor_report_bus_reset(us);
+
+ /* FIXME: Notify the subdrivers that they need to reinitialize
+ * the device */
return 0;
}
@@ -228,7 +236,7 @@ static int storage_resume(struct usb_interface *iface)
* a USB port reset, whether from this driver or a different one.
*/
-static void storage_pre_reset(struct usb_interface *iface)
+static int storage_pre_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
@@ -236,22 +244,23 @@ static void storage_pre_reset(struct usb_interface *iface)
/* Make sure no command runs during the reset */
mutex_lock(&us->dev_mutex);
+ return 0;
}
-static void storage_post_reset(struct usb_interface *iface)
+static int storage_post_reset(struct usb_interface *iface)
{
struct us_data *us = usb_get_intfdata(iface);
US_DEBUGP("%s\n", __FUNCTION__);
/* Report the reset to the SCSI core */
- scsi_lock(us_to_host(us));
usb_stor_report_bus_reset(us);
- scsi_unlock(us_to_host(us));
/* FIXME: Notify the subdrivers that they need to reinitialize
* the device */
+
mutex_unlock(&us->dev_mutex);
+ return 0;
}
/*
@@ -300,8 +309,7 @@ static int usb_stor_control_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
-
- current->flags |= PF_NOFREEZE;
+ int autopm_rc;
for(;;) {
US_DEBUGP("*** thread sleeping.\n");
@@ -310,6 +318,9 @@ static int usb_stor_control_thread(void * __us)
US_DEBUGP("*** thread awakened.\n");
+ /* Autoresume the device */
+ autopm_rc = usb_autopm_get_interface(us->pusb_intf);
+
/* lock the device pointers */
mutex_lock(&(us->dev_mutex));
@@ -368,6 +379,12 @@ static int usb_stor_control_thread(void * __us)
us->srb->result = SAM_STAT_GOOD;
}
+ /* Did the autoresume fail? */
+ else if (autopm_rc < 0) {
+ US_DEBUGP("Could not wake device\n");
+ us->srb->result = DID_ERROR << 16;
+ }
+
/* we've got a command, let's do it! */
else {
US_DEBUG(usb_stor_show_command(us->srb));
@@ -410,25 +427,21 @@ SkipForAbort:
/* unlock the device pointers */
mutex_unlock(&us->dev_mutex);
- } /* for (;;) */
- scsi_host_put(host);
+ /* Start an autosuspend */
+ if (autopm_rc == 0)
+ usb_autopm_put_interface(us->pusb_intf);
+ } /* for (;;) */
- /* notify the exit routine that we're actually exiting now
- *
- * complete()/wait_for_completion() is similar to up()/down(),
- * except that complete() is safe in the case where the structure
- * is getting deleted in a parallel mode of execution (i.e. just
- * after the down() -- that's necessary for the thread-shutdown
- * case.
- *
- * complete_and_exit() goes even further than this -- it is safe in
- * the case that the thread of the caller is going away (not just
- * the structure) -- this is necessary for the module-remove case.
- * This is important in preemption kernels, which transfer the flow
- * of execution immediately upon a complete().
- */
- complete_and_exit(&threads_gone, 0);
+ /* Wait until we are told to stop */
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
}
/***********************************************************************
@@ -796,19 +809,13 @@ static int usb_stor_acquire_resources(struct us_data *us)
}
/* Start up our control thread */
- th = kthread_create(usb_stor_control_thread, us, "usb-storage");
+ th = kthread_run(usb_stor_control_thread, us, "usb-storage");
if (IS_ERR(th)) {
printk(KERN_WARNING USB_STORAGE
"Unable to start control thread\n");
return PTR_ERR(th);
}
-
- /* Take a reference to the host for the control thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(us_to_host(us));
- atomic_inc(&total_threads);
- wake_up_process(th);
+ us->ctl_thread = th;
return 0;
}
@@ -825,6 +832,8 @@ static void usb_stor_release_resources(struct us_data *us)
US_DEBUGP("-- sending exit command to thread\n");
set_bit(US_FLIDX_DISCONNECTING, &us->flags);
up(&us->sema);
+ if (us->ctl_thread)
+ kthread_stop(us->ctl_thread);
/* Call the destructor routine, if it exists */
if (us->extra_destructor) {
@@ -909,6 +918,7 @@ static int usb_stor_scan_thread(void * __us)
printk(KERN_DEBUG
"usb-storage: device found at %d\n", us->pusb_dev->devnum);
+ set_freezable();
/* Wait for the timeout to expire or for a disconnect */
if (delay_use > 0) {
printk(KERN_DEBUG "usb-storage: waiting for device "
@@ -938,6 +948,7 @@ retry:
}
scsi_host_put(us_to_host(us));
+ usb_autopm_put_interface(us->pusb_intf);
complete_and_exit(&threads_gone, 0);
}
@@ -1027,6 +1038,7 @@ static int storage_probe(struct usb_interface *intf,
* start it up. */
scsi_host_get(us_to_host(us));
atomic_inc(&total_threads);
+ usb_autopm_get_interface(intf); /* dropped in the scanning thread */
wake_up_process(th);
return 0;
@@ -1059,10 +1071,12 @@ static struct usb_driver usb_storage_driver = {
#ifdef CONFIG_PM
.suspend = storage_suspend,
.resume = storage_resume,
+ .reset_resume = storage_reset_resume,
#endif
.pre_reset = storage_pre_reset,
.post_reset = storage_post_reset,
.id_table = storage_usb_ids,
+ .supports_autosuspend = 1,
};
static int __init usb_stor_init(void)
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 6dac1ffdde86..6445665b1577 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -144,6 +144,7 @@ struct us_data {
unsigned char *sensebuf; /* sense data buffer */
dma_addr_t cr_dma; /* buffer DMA addresses */
dma_addr_t iobuf_dma;
+ struct task_struct *ctl_thread; /* the control thread */
/* mutual exclusion and synchronization structures */
struct semaphore sema; /* to sleep thread on */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 8432bf171d2e..8de11deb5d14 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -34,9 +34,6 @@ static struct usb_device_id skel_table [] = {
};
MODULE_DEVICE_TABLE(usb, skel_table);
-/* to prevent a race between open and disconnect */
-static DEFINE_MUTEX(skel_open_lock);
-
/* Get a minor range for your devices from the usb maintainer */
#define USB_SKEL_MINOR_BASE 192
@@ -54,16 +51,21 @@ struct usb_skel {
struct usb_device *udev; /* the usb device for this device */
struct usb_interface *interface; /* the interface for this device */
struct semaphore limit_sem; /* limiting the number of writes in progress */
+ struct usb_anchor submitted; /* in case we need to retract our submissions */
unsigned char *bulk_in_buffer; /* the buffer to receive data */
size_t bulk_in_size; /* the size of the receive buffer */
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
+ int errors; /* the last request tanked */
+ int open_count; /* count the number of openers */
+ spinlock_t err_lock; /* lock for errors */
struct kref kref;
struct mutex io_mutex; /* synchronize I/O with disconnect */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
static struct usb_driver skel_driver;
+static void skel_draw_down(struct usb_skel *dev);
static void skel_delete(struct kref *kref)
{
@@ -83,10 +85,8 @@ static int skel_open(struct inode *inode, struct file *file)
subminor = iminor(inode);
- mutex_lock(&skel_open_lock);
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
- mutex_unlock(&skel_open_lock);
err ("%s - error, can't find device for minor %d",
__FUNCTION__, subminor);
retval = -ENODEV;
@@ -95,22 +95,33 @@ static int skel_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
- mutex_unlock(&skel_open_lock);
retval = -ENODEV;
goto exit;
}
/* increment our usage count for the device */
kref_get(&dev->kref);
- /* now we can drop the lock */
- mutex_unlock(&skel_open_lock);
- /* prevent the device from being autosuspended */
- retval = usb_autopm_get_interface(interface);
- if (retval) {
+ /* lock the device to allow correctly handling errors
+ * in resumption */
+ mutex_lock(&dev->io_mutex);
+
+ if (!dev->open_count++) {
+ retval = usb_autopm_get_interface(interface);
+ if (retval) {
+ dev->open_count--;
+ mutex_unlock(&dev->io_mutex);
+ kref_put(&dev->kref, skel_delete);
+ goto exit;
+ }
+ } /* else { //uncomment this block if you want exclusive open
+ retval = -EBUSY;
+ dev->open_count--;
+ mutex_unlock(&dev->io_mutex);
kref_put(&dev->kref, skel_delete);
goto exit;
- }
+ } */
+ /* prevent the device from being autosuspended */
/* save our object in the file's private structure */
file->private_data = dev;
@@ -129,7 +140,7 @@ static int skel_release(struct inode *inode, struct file *file)
/* allow the device to be autosuspended */
mutex_lock(&dev->io_mutex);
- if (dev->interface)
+ if (!--dev->open_count && dev->interface)
usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->io_mutex);
@@ -138,6 +149,30 @@ static int skel_release(struct inode *inode, struct file *file)
return 0;
}
+static int skel_flush(struct file *file, fl_owner_t id)
+{
+ struct usb_skel *dev;
+ int res;
+
+ dev = (struct usb_skel *)file->private_data;
+ if (dev == NULL)
+ return -ENODEV;
+
+ /* wait for io to stop */
+ mutex_lock(&dev->io_mutex);
+ skel_draw_down(dev);
+
+ /* read out errors, leave subsequent opens a clean slate */
+ spin_lock_irq(&dev->err_lock);
+ res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
+ dev->errors = 0;
+ spin_unlock_irq(&dev->err_lock);
+
+ mutex_unlock(&dev->io_mutex);
+
+ return res;
+}
+
static ssize_t skel_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
{
struct usb_skel *dev;
@@ -179,12 +214,16 @@ static void skel_write_bulk_callback(struct urb *urb)
dev = (struct usb_skel *)urb->context;
/* sync/async unlink faults aren't errors */
- if (urb->status &&
- !(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -ESHUTDOWN)) {
- err("%s - nonzero write bulk status received: %d",
- __FUNCTION__, urb->status);
+ if (urb->status) {
+ if(!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ err("%s - nonzero write bulk status received: %d",
+ __FUNCTION__, urb->status);
+
+ spin_lock(&dev->err_lock);
+ dev->errors = urb->status;
+ spin_unlock(&dev->err_lock);
}
/* free up our allocated buffer */
@@ -213,6 +252,17 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
goto exit;
}
+ spin_lock_irq(&dev->err_lock);
+ if ((retval = dev->errors) < 0) {
+ /* any error is reported once */
+ dev->errors = 0;
+ /* to preserve notifications about reset */
+ retval = (retval == -EPIPE) ? retval : -EIO;
+ }
+ spin_unlock_irq(&dev->err_lock);
+ if (retval < 0)
+ goto error;
+
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
@@ -244,13 +294,14 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
buf, writesize, skel_write_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->submitted);
/* send the data out the bulk port */
retval = usb_submit_urb(urb, GFP_KERNEL);
mutex_unlock(&dev->io_mutex);
if (retval) {
err("%s - failed submitting write urb, error %d", __FUNCTION__, retval);
- goto error;
+ goto error_unanchor;
}
/* release our reference to this urb, the USB core will eventually free it entirely */
@@ -259,6 +310,8 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, size_t cou
return writesize;
+error_unanchor:
+ usb_unanchor_urb(urb);
error:
if (urb) {
usb_buffer_free(dev->udev, writesize, buf, urb->transfer_dma);
@@ -276,6 +329,7 @@ static const struct file_operations skel_fops = {
.write = skel_write,
.open = skel_open,
.release = skel_release,
+ .flush = skel_flush,
};
/*
@@ -306,6 +360,8 @@ static int skel_probe(struct usb_interface *interface, const struct usb_device_i
kref_init(&dev->kref);
sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
mutex_init(&dev->io_mutex);
+ spin_lock_init(&dev->err_lock);
+ init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
@@ -368,22 +424,18 @@ static void skel_disconnect(struct usb_interface *interface)
struct usb_skel *dev;
int minor = interface->minor;
- /* prevent skel_open() from racing skel_disconnect() */
- mutex_lock(&skel_open_lock);
-
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/* give back our minor */
usb_deregister_dev(interface, &skel_class);
- mutex_unlock(&skel_open_lock);
/* prevent more I/O from starting */
mutex_lock(&dev->io_mutex);
dev->interface = NULL;
mutex_unlock(&dev->io_mutex);
-
+ usb_kill_anchored_urbs(&dev->submitted);
/* decrement our usage count */
kref_put(&dev->kref, skel_delete);
@@ -391,10 +443,59 @@ static void skel_disconnect(struct usb_interface *interface)
info("USB Skeleton #%d now disconnected", minor);
}
+static void skel_draw_down(struct usb_skel *dev)
+{
+ int time;
+
+ time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
+ if (!time)
+ usb_kill_anchored_urbs(&dev->submitted);
+}
+
+static int skel_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usb_skel *dev = usb_get_intfdata(intf);
+
+ if (!dev)
+ return 0;
+ skel_draw_down(dev);
+ return 0;
+}
+
+static int skel_resume (struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int skel_pre_reset(struct usb_interface *intf)
+{
+ struct usb_skel *dev = usb_get_intfdata(intf);
+
+ mutex_lock(&dev->io_mutex);
+ skel_draw_down(dev);
+
+ return 0;
+}
+
+static int skel_post_reset(struct usb_interface *intf)
+{
+ struct usb_skel *dev = usb_get_intfdata(intf);
+
+ /* we are sure no URBs are active - no locking needed */
+ dev->errors = -EPIPE;
+ mutex_unlock(&dev->io_mutex);
+
+ return 0;
+}
+
static struct usb_driver skel_driver = {
.name = "skeleton",
.probe = skel_probe,
.disconnect = skel_disconnect,
+ .suspend = skel_suspend,
+ .resume = skel_resume,
+ .pre_reset = skel_pre_reset,
+ .post_reset = skel_post_reset,
.id_table = skel_table,
.supports_autosuspend = 1,
};
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index 0dda73da8628..7f907fb23b8a 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -60,7 +60,7 @@ static u_long videomemory;
static u_long videomemorysize;
static struct fb_info fb_info;
-static u32 mc68x328fb_pseudo_palette[17];
+static u32 mc68x328fb_pseudo_palette[16];
static struct fb_var_screeninfo mc68x328fb_default __initdata = {
.red = { 0, 8, 0 },
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 403dac787ebf..564cc9b51822 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -12,6 +12,13 @@ config VGASTATE
tristate
default n
+config VIDEO_OUTPUT_CONTROL
+ tristate "Lowlevel video output switch controls"
+ default m
+ help
+ This framework adds support for low-level control of the video
+ output switch.
+
config FB
tristate "Support for frame buffer devices"
---help---
@@ -812,7 +819,7 @@ config FB_PVR2
config FB_EPSON1355
bool "Epson 1355 framebuffer support"
- depends on (FB = y) && (SUPERH || ARCH_CEIVA)
+ depends on (FB = y) && ARCH_CEIVA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -849,6 +856,16 @@ config FB_INTSRAM
Say Y if you want to map Frame Buffer in internal SRAM. Say N if you want
to let frame buffer in external SDRAM.
+config FB_ATMEL_STN
+ bool "Use a STN display with AT91/AT32 LCD Controller"
+ depends on FB_ATMEL && MACH_AT91SAM9261EK
+ default n
+ help
+ Say Y if you want to connect a STN LCD display to the AT91/AT32 LCD
+ Controller. Say N if you want to connect a TFT.
+
+ If unsure, say N.
+
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
depends on FB && PCI
@@ -1790,19 +1807,20 @@ config FB_IBM_GXT4500
adaptor, found on some IBM System P (pSeries) machines.
config FB_PS3
- bool "PS3 GPU framebuffer driver"
- depends on (FB = y) && PS3_PS3AV
+ tristate "PS3 GPU framebuffer driver"
+ depends on FB && PS3_PS3AV
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
---help---
Include support for the virtual frame buffer in the PS3 platform.
config FB_PS3_DEFAULT_SIZE_M
int "PS3 default frame buffer size (in MiB)"
depends on FB_PS3
- default 18
+ default 9
---help---
This is the default size (in MiB) of the virtual frame buffer in
the PS3.
@@ -1820,6 +1838,10 @@ config FB_XILINX
framebuffer. ML300 carries a 640*480 LCD display on the board,
ML403 uses a standard DB15 VGA connector.
+if ARCH_OMAP
+ source "drivers/video/omap/Kconfig"
+endif
+
config FB_VIRTUAL
tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index bd8b05229500..518933d4905f 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
obj-$(CONFIG_FB_PS3) += ps3fb.o
obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
+obj-$(CONFIG_FB_OMAP) += omap/
# Platform or fallback drivers go here
obj-$(CONFIG_FB_VESA) += vesafb.o
@@ -122,3 +123,6 @@ obj-$(CONFIG_FB_OF) += offb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
+
+#video output switch sysfs driver
+obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 6c9dc2e69c82..a7a1c891bfa2 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,13 +447,12 @@ static int clcdfb_probe(struct amba_device *dev, void *id)
goto out;
}
- fb = kmalloc(sizeof(struct clcd_fb), GFP_KERNEL);
+ fb = kzalloc(sizeof(struct clcd_fb), GFP_KERNEL);
if (!fb) {
printk(KERN_INFO "CLCD: could not allocate new clcd_fb struct\n");
ret = -ENOMEM;
goto free_region;
}
- memset(fb, 0, sizeof(struct clcd_fb));
fb->dev = dev;
fb->board = board;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index e1d5bd0c98c4..235b618b4117 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -79,6 +79,29 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.accel = FB_ACCEL_NONE,
};
+static unsigned long compute_hozval(unsigned long xres, unsigned long lcdcon2)
+{
+ unsigned long value;
+
+ if (!(cpu_is_at91sam9261() || cpu_is_at32ap7000()))
+ return xres;
+
+ value = xres;
+ if ((lcdcon2 & ATMEL_LCDC_DISTYPE) != ATMEL_LCDC_DISTYPE_TFT) {
+ /* STN display */
+ if ((lcdcon2 & ATMEL_LCDC_DISTYPE) == ATMEL_LCDC_DISTYPE_STNCOLOR) {
+ value *= 3;
+ }
+ if ( (lcdcon2 & ATMEL_LCDC_IFWIDTH) == ATMEL_LCDC_IFWIDTH_4
+ || ( (lcdcon2 & ATMEL_LCDC_IFWIDTH) == ATMEL_LCDC_IFWIDTH_8
+ && (lcdcon2 & ATMEL_LCDC_SCANMOD) == ATMEL_LCDC_SCANMOD_DUAL ))
+ value = DIV_ROUND_UP(value, 4);
+ else
+ value = DIV_ROUND_UP(value, 8);
+ }
+
+ return value;
+}
static void atmel_lcdfb_update_dma(struct fb_info *info,
struct fb_var_screeninfo *var)
@@ -181,6 +204,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
var->xoffset = var->yoffset = 0;
switch (var->bits_per_pixel) {
+ case 1:
case 2:
case 4:
case 8:
@@ -195,8 +219,11 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
var->blue.offset = 10;
var->red.length = var->green.length = var->blue.length = 5;
break;
- case 24:
case 32:
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ /* fall through */
+ case 24:
var->red.offset = 0;
var->green.offset = 8;
var->blue.offset = 16;
@@ -228,8 +255,10 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
static int atmel_lcdfb_set_par(struct fb_info *info)
{
struct atmel_lcdfb_info *sinfo = info->par;
+ unsigned long hozval_linesz;
unsigned long value;
unsigned long clk_value_khz;
+ unsigned long bits_per_line;
dev_dbg(info->device, "%s:\n", __func__);
dev_dbg(info->device, " * resolution: %ux%u (%ux%u virtual)\n",
@@ -241,12 +270,15 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
lcdc_writel(sinfo, ATMEL_LCDC_DMACON, 0);
- if (info->var.bits_per_pixel <= 8)
+ if (info->var.bits_per_pixel == 1)
+ info->fix.visual = FB_VISUAL_MONO01;
+ else if (info->var.bits_per_pixel <= 8)
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
else
info->fix.visual = FB_VISUAL_TRUECOLOR;
- info->fix.line_length = info->var.xres_virtual * (info->var.bits_per_pixel / 8);
+ bits_per_line = info->var.xres_virtual * info->var.bits_per_pixel;
+ info->fix.line_length = DIV_ROUND_UP(bits_per_line, 8);
/* Re-initialize the DMA engine... */
dev_dbg(info->device, " * update DMA engine\n");
@@ -262,18 +294,21 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
/* Set pixel clock */
clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
- value = clk_value_khz / PICOS2KHZ(info->var.pixclock);
-
- if (clk_value_khz % PICOS2KHZ(info->var.pixclock))
- value++;
+ value = DIV_ROUND_UP(clk_value_khz, PICOS2KHZ(info->var.pixclock));
value = (value / 2) - 1;
+ dev_dbg(info->device, " * programming CLKVAL = 0x%08lx\n", value);
if (value <= 0) {
dev_notice(info->device, "Bypassing pixel clock divider\n");
lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS);
- } else
+ } else {
lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, value << ATMEL_LCDC_CLKVAL_OFFSET);
+ info->var.pixclock = KHZ2PICOS(clk_value_khz / (2 * (value + 1)));
+ dev_dbg(info->device, " updated pixclk: %lu KHz\n",
+ PICOS2KHZ(info->var.pixclock));
+ }
+
/* Initialize control register 2 */
value = sinfo->default_lcdcon2;
@@ -311,9 +346,14 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
dev_dbg(info->device, " * LCDTIM2 = %08lx\n", value);
lcdc_writel(sinfo, ATMEL_LCDC_TIM2, value);
+ /* Horizontal value (aka line size) */
+ hozval_linesz = compute_hozval(info->var.xres,
+ lcdc_readl(sinfo, ATMEL_LCDC_LCDCON2));
+
/* Display size */
- value = (info->var.xres - 1) << ATMEL_LCDC_HOZVAL_OFFSET;
+ value = (hozval_linesz - 1) << ATMEL_LCDC_HOZVAL_OFFSET;
value |= info->var.yres - 1;
+ dev_dbg(info->device, " * LCDFRMCFG = %08lx\n", value);
lcdc_writel(sinfo, ATMEL_LCDC_LCDFRMCFG, value);
/* FIFO Threshold: Use formula from data sheet */
@@ -421,6 +461,15 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
ret = 0;
}
break;
+
+ case FB_VISUAL_MONO01:
+ if (regno < 2) {
+ val = (regno == 0) ? 0x00 : 0x1F;
+ lcdc_writel(sinfo, ATMEL_LCDC_LUT(regno), val);
+ ret = 0;
+ }
+ break;
+
}
return ret;
diff --git a/drivers/video/aty/ati_ids.h b/drivers/video/aty/ati_ids.h
index 90e7df22f508..685a754991c6 100644
--- a/drivers/video/aty/ati_ids.h
+++ b/drivers/video/aty/ati_ids.h
@@ -204,6 +204,7 @@
#define PCI_CHIP_RV280_5961 0x5961
#define PCI_CHIP_RV280_5962 0x5962
#define PCI_CHIP_RV280_5964 0x5964
+#define PCI_CHIP_RS485_5975 0x5975
#define PCI_CHIP_RV280_5C61 0x5C61
#define PCI_CHIP_RV280_5C63 0x5C63
#define PCI_CHIP_R423_5D57 0x5D57
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 7fea4d8ae8e2..cfcbe37d2d70 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1733,7 +1733,7 @@ static int aty128_bl_get_level_brightness(struct aty128fb_par *par,
static int aty128_bl_update_status(struct backlight_device *bd)
{
- struct aty128fb_par *par = class_get_devdata(&bd->class_dev);
+ struct aty128fb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL);
int level;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 2fbff6317433..bc6f0096aa04 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -541,7 +541,7 @@ static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
-static u32 pseudo_palette[17];
+static u32 pseudo_palette[16];
#ifdef CONFIG_FB_ATY_GX
static char *aty_gx_ram[8] __devinitdata = {
@@ -2141,7 +2141,7 @@ static int aty_bl_get_level_brightness(struct atyfb_par *par, int level)
static int aty_bl_update_status(struct backlight_device *bd)
{
- struct atyfb_par *par = class_get_devdata(&bd->class_dev);
+ struct atyfb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
int level;
@@ -2913,10 +2913,6 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
int node, len, i, j, ret;
u32 mem, chip_id;
- /* Do not attach when we have a serial console. */
- if (!con_is_present())
- return -ENXIO;
-
/*
* Map memory-mapped registers.
*/
@@ -2937,12 +2933,11 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
/* nothing */ ;
j = i + 4;
- par->mmap_map = kmalloc(j * sizeof(*par->mmap_map), GFP_ATOMIC);
+ par->mmap_map = kcalloc(j, sizeof(*par->mmap_map), GFP_ATOMIC);
if (!par->mmap_map) {
PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n");
return -ENOMEM;
}
- memset(par->mmap_map, 0, j * sizeof(*par->mmap_map));
for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) {
struct resource *rp = &pdev->resource[i];
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 0be25fa5540c..1a056adb61c8 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -47,7 +47,7 @@ static int radeon_bl_get_level_brightness(struct radeon_bl_privdata *pdata,
static int radeon_bl_update_status(struct backlight_device *bd)
{
- struct radeon_bl_privdata *pdata = class_get_devdata(&bd->class_dev);
+ struct radeon_bl_privdata *pdata = bl_get_data(bd);
struct radeonfb_info *rinfo = pdata->rinfo;
u32 lvds_gen_cntl, tmpPixclksCntl;
int level;
@@ -206,7 +206,7 @@ void radeonfb_bl_exit(struct radeonfb_info *rinfo)
if (bd) {
struct radeon_bl_privdata *pdata;
- pdata = class_get_devdata(&bd->class_dev);
+ pdata = bl_get_data(bd);
backlight_device_unregister(bd);
kfree(pdata);
rinfo->info->bl_dev = NULL;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 2ce050193018..47ca62fe7c3e 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -153,6 +153,8 @@ static struct pci_device_id radeonfb_pci_table[] = {
/* Mobility 9200 (M9+) */
CHIP_DEF(PCI_CHIP_RV280_5C61, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
CHIP_DEF(PCI_CHIP_RV280_5C63, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
+ /*Mobility Xpress 200 */
+ CHIP_DEF(PCI_CHIP_RS485_5975, R300, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
/* 9200 */
CHIP_DEF(PCI_CHIP_RV280_5960, RV280, CHIP_HAS_CRTC2),
CHIP_DEF(PCI_CHIP_RV280_5961, RV280, CHIP_HAS_CRTC2),
@@ -2102,7 +2104,9 @@ static ssize_t radeon_show_one_edid(char *buf, loff_t off, size_t count, const u
}
-static ssize_t radeon_show_edid1(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t radeon_show_edid1(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2113,7 +2117,9 @@ static ssize_t radeon_show_edid1(struct kobject *kobj, char *buf, loff_t off, si
}
-static ssize_t radeon_show_edid2(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t radeon_show_edid2(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2126,7 +2132,6 @@ static ssize_t radeon_show_edid2(struct kobject *kobj, char *buf, loff_t off, si
static struct bin_attribute edid1_attr = {
.attr = {
.name = "edid1",
- .owner = THIS_MODULE,
.mode = 0444,
},
.size = EDID_LENGTH,
@@ -2136,7 +2141,6 @@ static struct bin_attribute edid1_attr = {
static struct bin_attribute edid2_attr = {
.attr = {
.name = "edid2",
- .owner = THIS_MODULE,
.mode = 0444,
},
.size = EDID_LENGTH,
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7ebffcdfd1e3..7c922c7b460b 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -301,7 +301,7 @@ struct radeonfb_info {
void __iomem *bios_seg;
int fp_bios_start;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
struct { u8 red, green, blue, pad; }
palette[256];
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index dbf4ec3f6d57..03e57ef88378 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1589,11 +1589,10 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
return -EFAULT;
}
- fbi->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
+ fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
if (!fbi->pseudo_palette) {
return -ENOMEM;
}
- memset(fbi->pseudo_palette, 0, sizeof(u32) * 16);
if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
print_err("Fail to allocate colormap (%d entries)",
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index fbef663fc057..2580f5fa2486 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -8,26 +8,32 @@ menuconfig BACKLIGHT_LCD_SUPPORT
Enable this to be able to choose the drivers for controlling the
backlight and the LCD panel on some platforms, for example on PDAs.
-config BACKLIGHT_CLASS_DEVICE
- tristate "Lowlevel Backlight controls"
+#
+# LCD
+#
+config LCD_CLASS_DEVICE
+ tristate "Lowlevel LCD controls"
depends on BACKLIGHT_LCD_SUPPORT
default m
help
- This framework adds support for low-level control of the LCD
- backlight. This includes support for brightness and power.
+ This framework adds support for low-level control of LCD.
+ Some framebuffer devices connect to platform-specific LCD modules
+ in order to have a platform-specific way to control the flat panel
+ (contrast and applying power to the LCD (not to the backlight!)).
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
-config LCD_CLASS_DEVICE
- tristate "Lowlevel LCD controls"
+#
+# Backlight
+#
+config BACKLIGHT_CLASS_DEVICE
+ tristate "Lowlevel Backlight controls"
depends on BACKLIGHT_LCD_SUPPORT
default m
help
- This framework adds support for low-level control of LCD.
- Some framebuffer devices connect to platform-specific LCD modules
- in order to have a platform-specific way to control the flat panel
- (contrast and applying power to the LCD (not to the backlight!)).
+ This framework adds support for low-level control of the LCD
+ backlight. This includes support for brightness and power.
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index c65e81ff3578..b26de8cf3112 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -69,18 +69,20 @@ static inline void backlight_unregister_fb(struct backlight_device *bd)
}
#endif /* CONFIG_FB */
-static ssize_t backlight_show_power(struct class_device *cdev, char *buf)
+static ssize_t backlight_show_power(struct device *dev,
+ struct device_attribute *attr,char *buf)
{
- struct backlight_device *bd = to_backlight_device(cdev);
+ struct backlight_device *bd = to_backlight_device(dev);
return sprintf(buf, "%d\n", bd->props.power);
}
-static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count)
+static ssize_t backlight_store_power(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
char *endp;
- struct backlight_device *bd = to_backlight_device(cdev);
+ struct backlight_device *bd = to_backlight_device(dev);
int power = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
@@ -101,18 +103,20 @@ static ssize_t backlight_store_power(struct class_device *cdev, const char *buf,
return rc;
}
-static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf)
+static ssize_t backlight_show_brightness(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct backlight_device *bd = to_backlight_device(cdev);
+ struct backlight_device *bd = to_backlight_device(dev);
return sprintf(buf, "%d\n", bd->props.brightness);
}
-static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count)
+static ssize_t backlight_store_brightness(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
char *endp;
- struct backlight_device *bd = to_backlight_device(cdev);
+ struct backlight_device *bd = to_backlight_device(dev);
int brightness = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
@@ -138,18 +142,19 @@ static ssize_t backlight_store_brightness(struct class_device *cdev, const char
return rc;
}
-static ssize_t backlight_show_max_brightness(struct class_device *cdev, char *buf)
+static ssize_t backlight_show_max_brightness(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct backlight_device *bd = to_backlight_device(cdev);
+ struct backlight_device *bd = to_backlight_device(dev);
return sprintf(buf, "%d\n", bd->props.max_brightness);
}
-static ssize_t backlight_show_actual_brightness(struct class_device *cdev,
- char *buf)
+static ssize_t backlight_show_actual_brightness(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int rc = -ENXIO;
- struct backlight_device *bd = to_backlight_device(cdev);
+ struct backlight_device *bd = to_backlight_device(dev);
mutex_lock(&bd->ops_lock);
if (bd->ops && bd->ops->get_brightness)
@@ -159,31 +164,22 @@ static ssize_t backlight_show_actual_brightness(struct class_device *cdev,
return rc;
}
-static void backlight_class_release(struct class_device *dev)
+struct class *backlight_class;
+
+static void bl_device_release(struct device *dev)
{
struct backlight_device *bd = to_backlight_device(dev);
kfree(bd);
}
-static struct class backlight_class = {
- .name = "backlight",
- .release = backlight_class_release,
-};
-
-#define DECLARE_ATTR(_name,_mode,_show,_store) \
-{ \
- .attr = { .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
- .show = _show, \
- .store = _store, \
-}
-
-static const struct class_device_attribute bl_class_device_attributes[] = {
- DECLARE_ATTR(power, 0644, backlight_show_power, backlight_store_power),
- DECLARE_ATTR(brightness, 0644, backlight_show_brightness,
+static struct device_attribute bl_device_attributes[] = {
+ __ATTR(bl_power, 0644, backlight_show_power, backlight_store_power),
+ __ATTR(brightness, 0644, backlight_show_brightness,
backlight_store_brightness),
- DECLARE_ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
+ __ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
NULL),
- DECLARE_ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
+ __ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
+ __ATTR_NULL,
};
/**
@@ -191,22 +187,20 @@ static const struct class_device_attribute bl_class_device_attributes[] = {
* backlight_device class.
* @name: the name of the new object(must be the same as the name of the
* respective framebuffer device).
- * @devdata: an optional pointer to be stored in the class_device. The
- * methods may retrieve it by using class_get_devdata(&bd->class_dev).
+ * @devdata: an optional pointer to be stored for private driver use. The
+ * methods may retrieve it by using bl_get_data(bd).
* @ops: the backlight operations structure.
*
- * Creates and registers new backlight class_device. Returns either an
+ * Creates and registers new backlight device. Returns either an
* ERR_PTR() or a pointer to the newly allocated device.
*/
struct backlight_device *backlight_device_register(const char *name,
- struct device *dev,
- void *devdata,
- struct backlight_ops *ops)
+ struct device *parent, void *devdata, struct backlight_ops *ops)
{
- int i, rc;
struct backlight_device *new_bd;
+ int rc;
- pr_debug("backlight_device_alloc: name=%s\n", name);
+ pr_debug("backlight_device_register: name=%s\n", name);
new_bd = kzalloc(sizeof(struct backlight_device), GFP_KERNEL);
if (!new_bd)
@@ -214,13 +208,14 @@ struct backlight_device *backlight_device_register(const char *name,
mutex_init(&new_bd->update_lock);
mutex_init(&new_bd->ops_lock);
- new_bd->ops = ops;
- new_bd->class_dev.class = &backlight_class;
- new_bd->class_dev.dev = dev;
- strlcpy(new_bd->class_dev.class_id, name, KOBJ_NAME_LEN);
- class_set_devdata(&new_bd->class_dev, devdata);
- rc = class_device_register(&new_bd->class_dev);
+ new_bd->dev.class = backlight_class;
+ new_bd->dev.parent = parent;
+ new_bd->dev.release = bl_device_release;
+ strlcpy(new_bd->dev.bus_id, name, BUS_ID_SIZE);
+ dev_set_drvdata(&new_bd->dev, devdata);
+
+ rc = device_register(&new_bd->dev);
if (rc) {
kfree(new_bd);
return ERR_PTR(rc);
@@ -228,23 +223,11 @@ struct backlight_device *backlight_device_register(const char *name,
rc = backlight_register_fb(new_bd);
if (rc) {
- class_device_unregister(&new_bd->class_dev);
+ device_unregister(&new_bd->dev);
return ERR_PTR(rc);
}
-
- for (i = 0; i < ARRAY_SIZE(bl_class_device_attributes); i++) {
- rc = class_device_create_file(&new_bd->class_dev,
- &bl_class_device_attributes[i]);
- if (rc) {
- while (--i >= 0)
- class_device_remove_file(&new_bd->class_dev,
- &bl_class_device_attributes[i]);
- class_device_unregister(&new_bd->class_dev);
- /* No need to kfree(new_bd) since release() method was called */
- return ERR_PTR(rc);
- }
- }
+ new_bd->ops = ops;
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
@@ -265,42 +248,40 @@ EXPORT_SYMBOL(backlight_device_register);
*/
void backlight_device_unregister(struct backlight_device *bd)
{
- int i;
-
if (!bd)
return;
- pr_debug("backlight_device_unregister: name=%s\n", bd->class_dev.class_id);
-
#ifdef CONFIG_PMAC_BACKLIGHT
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight == bd)
pmac_backlight = NULL;
mutex_unlock(&pmac_backlight_mutex);
#endif
-
- for (i = 0; i < ARRAY_SIZE(bl_class_device_attributes); i++)
- class_device_remove_file(&bd->class_dev,
- &bl_class_device_attributes[i]);
-
mutex_lock(&bd->ops_lock);
bd->ops = NULL;
mutex_unlock(&bd->ops_lock);
backlight_unregister_fb(bd);
-
- class_device_unregister(&bd->class_dev);
+ device_unregister(&bd->dev);
}
EXPORT_SYMBOL(backlight_device_unregister);
static void __exit backlight_class_exit(void)
{
- class_unregister(&backlight_class);
+ class_destroy(backlight_class);
}
static int __init backlight_class_init(void)
{
- return class_register(&backlight_class);
+ backlight_class = class_create(THIS_MODULE, "backlight");
+ if (IS_ERR(backlight_class)) {
+ printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(backlight_class));
+ return PTR_ERR(backlight_class);
+ }
+
+ backlight_class->dev_attrs = bl_device_attributes;
+ return 0;
}
/*
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index e9bbc3455c94..b7904da51b23 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -174,7 +174,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
struct cr_panel *crp;
u8 dev_en;
- crp = kzalloc(sizeof(crp), GFP_KERNEL);
+ crp = kzalloc(sizeof(*crp), GFP_KERNEL);
if (crp == NULL)
return -ENOMEM;
@@ -202,7 +202,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
}
crp->cr_lcd_device = lcd_device_register("cr-lcd",
- &pdev->dev,
+ &pdev->dev, NULL,
&cr_lcd_ops);
if (IS_ERR(crp->cr_lcd_device)) {
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 6ef8f0a7a137..6f652c65fae1 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -61,10 +61,11 @@ static inline void lcd_unregister_fb(struct lcd_device *ld)
}
#endif /* CONFIG_FB */
-static ssize_t lcd_show_power(struct class_device *cdev, char *buf)
+static ssize_t lcd_show_power(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int rc;
- struct lcd_device *ld = to_lcd_device(cdev);
+ struct lcd_device *ld = to_lcd_device(dev);
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->get_power)
@@ -76,11 +77,12 @@ static ssize_t lcd_show_power(struct class_device *cdev, char *buf)
return rc;
}
-static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count)
+static ssize_t lcd_store_power(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
char *endp;
- struct lcd_device *ld = to_lcd_device(cdev);
+ struct lcd_device *ld = to_lcd_device(dev);
int power = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
@@ -100,10 +102,11 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_
return rc;
}
-static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
+static ssize_t lcd_show_contrast(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int rc = -ENXIO;
- struct lcd_device *ld = to_lcd_device(cdev);
+ struct lcd_device *ld = to_lcd_device(dev);
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->get_contrast)
@@ -113,11 +116,12 @@ static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf)
return rc;
}
-static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count)
+static ssize_t lcd_store_contrast(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
char *endp;
- struct lcd_device *ld = to_lcd_device(cdev);
+ struct lcd_device *ld = to_lcd_device(dev);
int contrast = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
@@ -137,53 +141,45 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si
return rc;
}
-static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf)
+static ssize_t lcd_show_max_contrast(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct lcd_device *ld = to_lcd_device(cdev);
+ struct lcd_device *ld = to_lcd_device(dev);
return sprintf(buf, "%d\n", ld->props.max_contrast);
}
-static void lcd_class_release(struct class_device *dev)
+struct class *lcd_class;
+
+static void lcd_device_release(struct device *dev)
{
struct lcd_device *ld = to_lcd_device(dev);
kfree(ld);
}
-static struct class lcd_class = {
- .name = "lcd",
- .release = lcd_class_release,
-};
-
-#define DECLARE_ATTR(_name,_mode,_show,_store) \
-{ \
- .attr = { .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
- .show = _show, \
- .store = _store, \
-}
-
-static const struct class_device_attribute lcd_class_device_attributes[] = {
- DECLARE_ATTR(power, 0644, lcd_show_power, lcd_store_power),
- DECLARE_ATTR(contrast, 0644, lcd_show_contrast, lcd_store_contrast),
- DECLARE_ATTR(max_contrast, 0444, lcd_show_max_contrast, NULL),
+static struct device_attribute lcd_device_attributes[] = {
+ __ATTR(lcd_power, 0644, lcd_show_power, lcd_store_power),
+ __ATTR(contrast, 0644, lcd_show_contrast, lcd_store_contrast),
+ __ATTR(max_contrast, 0444, lcd_show_max_contrast, NULL),
+ __ATTR_NULL,
};
/**
* lcd_device_register - register a new object of lcd_device class.
* @name: the name of the new object(must be the same as the name of the
* respective framebuffer device).
- * @devdata: an optional pointer to be stored in the class_device. The
- * methods may retrieve it by using class_get_devdata(ld->class_dev).
+ * @devdata: an optional pointer to be stored in the device. The
+ * methods may retrieve it by using lcd_get_data(ld).
* @ops: the lcd operations structure.
*
- * Creates and registers a new lcd class_device. Returns either an ERR_PTR()
+ * Creates and registers a new lcd device. Returns either an ERR_PTR()
* or a pointer to the newly allocated device.
*/
-struct lcd_device *lcd_device_register(const char *name, void *devdata,
- struct lcd_ops *ops)
+struct lcd_device *lcd_device_register(const char *name, struct device *parent,
+ void *devdata, struct lcd_ops *ops)
{
- int i, rc;
struct lcd_device *new_ld;
+ int rc;
pr_debug("lcd_device_register: name=%s\n", name);
@@ -193,12 +189,14 @@ struct lcd_device *lcd_device_register(const char *name, void *devdata,
mutex_init(&new_ld->ops_lock);
mutex_init(&new_ld->update_lock);
- new_ld->ops = ops;
- new_ld->class_dev.class = &lcd_class;
- strlcpy(new_ld->class_dev.class_id, name, KOBJ_NAME_LEN);
- class_set_devdata(&new_ld->class_dev, devdata);
- rc = class_device_register(&new_ld->class_dev);
+ new_ld->dev.class = lcd_class;
+ new_ld->dev.parent = parent;
+ new_ld->dev.release = lcd_device_release;
+ strlcpy(new_ld->dev.bus_id, name, BUS_ID_SIZE);
+ dev_set_drvdata(&new_ld->dev, devdata);
+
+ rc = device_register(&new_ld->dev);
if (rc) {
kfree(new_ld);
return ERR_PTR(rc);
@@ -206,22 +204,11 @@ struct lcd_device *lcd_device_register(const char *name, void *devdata,
rc = lcd_register_fb(new_ld);
if (rc) {
- class_device_unregister(&new_ld->class_dev);
+ device_unregister(&new_ld->dev);
return ERR_PTR(rc);
}
- for (i = 0; i < ARRAY_SIZE(lcd_class_device_attributes); i++) {
- rc = class_device_create_file(&new_ld->class_dev,
- &lcd_class_device_attributes[i]);
- if (rc) {
- while (--i >= 0)
- class_device_remove_file(&new_ld->class_dev,
- &lcd_class_device_attributes[i]);
- class_device_unregister(&new_ld->class_dev);
- /* No need to kfree(new_ld) since release() method was called */
- return ERR_PTR(rc);
- }
- }
+ new_ld->ops = ops;
return new_ld;
}
@@ -235,33 +222,34 @@ EXPORT_SYMBOL(lcd_device_register);
*/
void lcd_device_unregister(struct lcd_device *ld)
{
- int i;
-
if (!ld)
return;
- pr_debug("lcd_device_unregister: name=%s\n", ld->class_dev.class_id);
-
- for (i = 0; i < ARRAY_SIZE(lcd_class_device_attributes); i++)
- class_device_remove_file(&ld->class_dev,
- &lcd_class_device_attributes[i]);
-
mutex_lock(&ld->ops_lock);
ld->ops = NULL;
mutex_unlock(&ld->ops_lock);
lcd_unregister_fb(ld);
- class_device_unregister(&ld->class_dev);
+
+ device_unregister(&ld->dev);
}
EXPORT_SYMBOL(lcd_device_unregister);
static void __exit lcd_class_exit(void)
{
- class_unregister(&lcd_class);
+ class_destroy(lcd_class);
}
static int __init lcd_class_init(void)
{
- return class_register(&lcd_class);
+ lcd_class = class_create(THIS_MODULE, "lcd");
+ if (IS_ERR(lcd_class)) {
+ printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(lcd_class));
+ return PTR_ERR(lcd_class);
+ }
+
+ lcd_class->dev_attrs = lcd_device_attributes;
+ return 0;
}
/*
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 50b78af0fa24..dea6579941b7 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -366,11 +366,10 @@ int __init clps711xfb_init(void)
if (fb_get_options("clps711xfb", NULL))
return -ENODEV;
- cfb = kmalloc(sizeof(*cfb), GFP_KERNEL);
+ cfb = kzalloc(sizeof(*cfb), GFP_KERNEL);
if (!cfb)
goto out;
- memset(cfb, 0, sizeof(*cfb));
strcpy(cfb->fix.id, "clps711x");
cfb->fbops = &clps7111fb_ops;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 63b85bf81a65..49643969f9f8 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -6,7 +6,7 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EMBEDDED || !X86
- depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !ARCH_VERSATILE && !SUPERH
+ depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !ARCH_VERSATILE && !SUPERH && !BFIN
default y
help
Saying Y here will allow you to use Linux in text mode through a
@@ -118,6 +118,22 @@ config FRAMEBUFFER_CONSOLE
help
Low-level framebuffer-based console driver.
+config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+ bool "Map the console to the primary display device"
+ depends on FRAMEBUFFER_CONSOLE
+ default n
+ ---help---
+ If this option is selected, the framebuffer console will
+ automatically select the primary display device (if the architecture
+ supports this feature). Otherwise, the framebuffer console will
+ always select the first framebuffer driver that is loaded. The latter
+ is the default behavior.
+
+ You can always override the automatic selection of the primary device
+ by using the fbcon=map: boot option.
+
+ If unsure, select n.
+
config FRAMEBUFFER_CONSOLE_ROTATION
bool "Framebuffer Console Rotation"
depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 73813c60d03a..decfdc8eb9cc 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -75,6 +75,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/crc32.h> /* For counting font checksums */
+#include <asm/fb.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -125,6 +126,8 @@ static int first_fb_vc;
static int last_fb_vc = MAX_NR_CONSOLES - 1;
static int fbcon_is_default = 1;
static int fbcon_has_exited;
+static int primary_device = -1;
+static int map_override;
/* font data */
static char fontname[40];
@@ -152,6 +155,7 @@ static int fbcon_set_origin(struct vc_data *);
#define DEFAULT_CURSOR_BLINK_RATE (20)
static int vbl_cursor_cnt;
+static int fbcon_cursor_noblink;
#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
@@ -188,16 +192,14 @@ static __inline__ void ypan_down(struct vc_data *vc, int count);
static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- struct vc_data *vc);
-static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- int unit);
+ int unit);
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
static void fbcon_exit(void);
-static struct class_device *fbcon_class_device;
+static struct device *fbcon_device;
#ifdef CONFIG_MAC
/*
@@ -441,7 +443,8 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
struct fbcon_ops *ops = info->fbcon_par;
if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
- !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
+ !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
+ !fbcon_cursor_noblink) {
if (!info->queue.func)
INIT_WORK(&info->queue, fb_flashcursor);
@@ -495,13 +498,17 @@ static int __init fb_console_setup(char *this_opt)
if (!strncmp(options, "map:", 4)) {
options += 4;
- if (*options)
+ if (*options) {
for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) {
if (!options[j])
j = 0;
con2fb_map_boot[i] =
(options[j++]-'0') % FB_MAX;
}
+
+ map_override = 1;
+ }
+
return 1;
}
@@ -736,7 +743,9 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
if (!err) {
info->fbcon_par = ops;
- set_blitting_type(vc, info);
+
+ if (vc)
+ set_blitting_type(vc, info);
}
if (err) {
@@ -798,11 +807,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
ops->flags |= FBCON_FLAGS_INIT;
ops->graphics = 0;
-
- if (vc)
- fbcon_set_disp(info, &info->var, vc);
- else
- fbcon_preset_disp(info, &info->var, unit);
+ fbcon_set_disp(info, &info->var, unit);
if (show_logo) {
struct vc_data *fg_vc = vc_cons[fg_console].d;
@@ -1107,6 +1112,9 @@ static void fbcon_init(struct vc_data *vc, int init)
if (var_to_display(p, &info->var, info))
return;
+ if (!info->fbcon_par)
+ con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
+
/* If we are not the first console on this
fb, copy the font from that console */
t = &fb_display[fg_console];
@@ -1349,6 +1357,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
+ if (vc->vc_cursor_type & 0x10)
+ fbcon_del_cursor_timer(info);
+ else
+ fbcon_add_cursor_timer(info);
+
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
if (mode & CM_SOFTBACK) {
mode &= ~CM_SOFTBACK;
@@ -1368,36 +1381,29 @@ static int scrollback_phys_max = 0;
static int scrollback_max = 0;
static int scrollback_current = 0;
-/*
- * If no vc is existent yet, just set struct display
- */
-static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- int unit)
-{
- struct display *p = &fb_display[unit];
- struct display *t = &fb_display[fg_console];
-
- if (var_to_display(p, var, info))
- return;
-
- p->fontdata = t->fontdata;
- p->userfont = t->userfont;
- if (p->userfont)
- REFCOUNT(p->fontdata)++;
-}
-
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
- struct vc_data *vc)
+ int unit)
{
- struct display *p = &fb_display[vc->vc_num], *t;
- struct vc_data **default_mode = vc->vc_display_fg;
- struct vc_data *svc = *default_mode;
+ struct display *p, *t;
+ struct vc_data **default_mode, *vc;
+ struct vc_data *svc;
struct fbcon_ops *ops = info->fbcon_par;
int rows, cols, charcnt = 256;
+ p = &fb_display[unit];
+
if (var_to_display(p, var, info))
return;
+
+ vc = vc_cons[unit].d;
+
+ if (!vc)
+ return;
+
+ default_mode = vc->vc_display_fg;
+ svc = *default_mode;
t = &fb_display[svc->vc_num];
+
if (!vc->vc_font.data) {
vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
vc->vc_font.width = (*default_mode)->vc_font.width;
@@ -1704,6 +1710,56 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
}
}
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+ struct display *p, int line, int count, int ycount)
+{
+ int offset = ycount * vc->vc_cols;
+ unsigned short *d = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+ unsigned short *s = d + offset;
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+
+ do {
+ c = scr_readw(s);
+
+ if (c == scr_readw(d)) {
+ if (s > start) {
+ ops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s-start);
+ x += s - start + 1;
+ start = s + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+
+ scr_writew(c, d);
+ console_conditional_schedule();
+ s++;
+ d++;
+ } while (s < le);
+ if (s > start)
+ ops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s-start);
+ console_conditional_schedule();
+ if (ycount > 0)
+ line++;
+ else {
+ line--;
+ /* NOTE: We subtract two lines from these pointers */
+ s -= vc->vc_size_row;
+ d -= vc->vc_size_row;
+ }
+ }
+}
+
static void fbcon_redraw(struct vc_data *vc, struct display *p,
int line, int count, int offset)
{
@@ -1789,7 +1845,6 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
- struct fbcon_ops *ops = info->fbcon_par;
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
@@ -1813,10 +1868,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
goto redraw_up;
switch (p->scrollmode) {
case SCROLL_MOVE:
- ops->bmove(vc, info, t + count, 0, t, 0,
- b - t - count, vc->vc_cols);
- ops->clear(vc, info, b - count, 0, count,
- vc->vc_cols);
+ fbcon_redraw_blit(vc, info, p, t, b - t - count,
+ count);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return 1;
break;
case SCROLL_WRAP_MOVE:
@@ -1899,9 +1959,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
goto redraw_down;
switch (p->scrollmode) {
case SCROLL_MOVE:
- ops->bmove(vc, info, t, 0, t + count, 0,
- b - t - count, vc->vc_cols);
- ops->clear(vc, info, t, 0, count, vc->vc_cols);
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+ -count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return 1;
break;
case SCROLL_WRAP_MOVE:
@@ -2937,9 +3003,48 @@ static int fbcon_mode_deleted(struct fb_info *info,
return found;
}
-static int fbcon_fb_unregistered(int idx)
+#ifdef CONFIG_VT_HW_CONSOLE_BINDING
+static int fbcon_unbind(void)
{
- int i;
+ int ret;
+
+ ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+ return ret;
+}
+#else
+static inline int fbcon_unbind(void)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
+static int fbcon_fb_unbind(int idx)
+{
+ int i, new_idx = -1, ret = 0;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] != idx &&
+ con2fb_map[i] != -1) {
+ new_idx = i;
+ break;
+ }
+ }
+
+ if (new_idx != -1) {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] == idx)
+ set_con2fb_map(i, new_idx, 0);
+ }
+ } else
+ ret = fbcon_unbind();
+
+ return ret;
+}
+
+static int fbcon_fb_unregistered(struct fb_info *info)
+{
+ int i, idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] == idx)
@@ -2967,12 +3072,48 @@ static int fbcon_fb_unregistered(int idx)
if (!num_registered_fb)
unregister_con_driver(&fb_con);
+
+ if (primary_device == idx)
+ primary_device = -1;
+
return 0;
}
-static int fbcon_fb_registered(int idx)
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
+static void fbcon_select_primary(struct fb_info *info)
{
- int ret = 0, i;
+ if (!map_override && primary_device == -1 &&
+ fb_is_primary_device(info)) {
+ int i;
+
+ printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n",
+ info->fix.id, info->node);
+ primary_device = info->node;
+
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ con2fb_map_boot[i] = primary_device;
+
+ if (con_is_bound(&fb_con)) {
+ printk(KERN_INFO "fbcon: Remapping primary device, "
+ "fb%i, to tty %i-%i\n", info->node,
+ first_fb_vc + 1, last_fb_vc + 1);
+ info_idx = primary_device;
+ }
+ }
+
+}
+#else
+static inline void fbcon_select_primary(struct fb_info *info)
+{
+ return;
+}
+#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+
+static int fbcon_fb_registered(struct fb_info *info)
+{
+ int ret = 0, i, idx = info->node;
+
+ fbcon_select_primary(info);
if (info_idx == -1) {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2986,8 +3127,7 @@ static int fbcon_fb_registered(int idx)
ret = fbcon_takeover(1);
} else {
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (con2fb_map_boot[i] == idx &&
- con2fb_map[i] == -1)
+ if (con2fb_map_boot[i] == idx)
set_con2fb_map(i, idx, 0);
}
}
@@ -3034,12 +3174,7 @@ static void fbcon_new_modelist(struct fb_info *info)
mode = fb_find_nearest_mode(fb_display[i].mode,
&info->modelist);
fb_videomode_to_var(&var, mode);
-
- if (vc)
- fbcon_set_disp(info, &var, vc);
- else
- fbcon_preset_disp(info, &var, i);
-
+ fbcon_set_disp(info, &var, vc->vc_num);
}
}
@@ -3114,11 +3249,14 @@ static int fbcon_event_notify(struct notifier_block *self,
mode = event->data;
ret = fbcon_mode_deleted(info, mode);
break;
+ case FB_EVENT_FB_UNBIND:
+ ret = fbcon_fb_unbind(info->node);
+ break;
case FB_EVENT_FB_REGISTERED:
- ret = fbcon_fb_registered(info->node);
+ ret = fbcon_fb_registered(info);
break;
case FB_EVENT_FB_UNREGISTERED:
- ret = fbcon_fb_unregistered(info->node);
+ ret = fbcon_fb_unregistered(info);
break;
case FB_EVENT_SET_CONSOLE_MAP:
con2fb = event->data;
@@ -3179,8 +3317,9 @@ static struct notifier_block fbcon_event_notifier = {
.notifier_call = fbcon_event_notify,
};
-static ssize_t store_rotate(struct class_device *class_device,
- const char *buf, size_t count)
+static ssize_t store_rotate(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct fb_info *info;
int rotate, idx;
@@ -3203,8 +3342,9 @@ err:
return count;
}
-static ssize_t store_rotate_all(struct class_device *class_device,
- const char *buf, size_t count)
+static ssize_t store_rotate_all(struct device *device,
+ struct device_attribute *attr,const char *buf,
+ size_t count)
{
struct fb_info *info;
int rotate, idx;
@@ -3227,7 +3367,8 @@ err:
return count;
}
-static ssize_t show_rotate(struct class_device *class_device, char *buf)
+static ssize_t show_rotate(struct device *device,
+ struct device_attribute *attr,char *buf)
{
struct fb_info *info;
int rotate = 0, idx;
@@ -3248,20 +3389,86 @@ err:
return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
}
-static struct class_device_attribute class_device_attrs[] = {
+static ssize_t show_cursor_blink(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ int idx, blink = -1;
+
+ if (fbcon_has_exited)
+ return 0;
+
+ acquire_console_sem();
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+
+ info = registered_fb[idx];
+ ops = info->fbcon_par;
+
+ if (!ops)
+ goto err;
+
+ blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
+err:
+ release_console_sem();
+ return snprintf(buf, PAGE_SIZE, "%d\n", blink);
+}
+
+static ssize_t store_cursor_blink(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info;
+ int blink, idx;
+ char **last = NULL;
+
+ if (fbcon_has_exited)
+ return count;
+
+ acquire_console_sem();
+ idx = con2fb_map[fg_console];
+
+ if (idx == -1 || registered_fb[idx] == NULL)
+ goto err;
+
+ info = registered_fb[idx];
+
+ if (!info->fbcon_par)
+ goto err;
+
+ blink = simple_strtoul(buf, last, 0);
+
+ if (blink) {
+ fbcon_cursor_noblink = 0;
+ fbcon_add_cursor_timer(info);
+ } else {
+ fbcon_cursor_noblink = 1;
+ fbcon_del_cursor_timer(info);
+ }
+
+err:
+ release_console_sem();
+ return count;
+}
+
+static struct device_attribute device_attrs[] = {
__ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
__ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
+ __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink,
+ store_cursor_blink),
};
-static int fbcon_init_class_device(void)
+static int fbcon_init_device(void)
{
int i, error = 0;
fbcon_has_sysfs = 1;
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
- error = class_device_create_file(fbcon_class_device,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ error = device_create_file(fbcon_device, &device_attrs[i]);
if (error)
break;
@@ -3269,8 +3476,7 @@ static int fbcon_init_class_device(void)
if (error) {
while (--i >= 0)
- class_device_remove_file(fbcon_class_device,
- &class_device_attrs[i]);
+ device_remove_file(fbcon_device, &device_attrs[i]);
fbcon_has_sysfs = 0;
}
@@ -3356,16 +3562,15 @@ static int __init fb_console_init(void)
acquire_console_sem();
fb_register_client(&fbcon_event_notifier);
- fbcon_class_device =
- class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
+ fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), "fbcon");
- if (IS_ERR(fbcon_class_device)) {
- printk(KERN_WARNING "Unable to create class_device "
+ if (IS_ERR(fbcon_device)) {
+ printk(KERN_WARNING "Unable to create device "
"for fbcon; errno = %ld\n",
- PTR_ERR(fbcon_class_device));
- fbcon_class_device = NULL;
+ PTR_ERR(fbcon_device));
+ fbcon_device = NULL;
} else
- fbcon_init_class_device();
+ fbcon_init_device();
for (i = 0; i < MAX_NR_CONSOLES; i++)
con2fb_map[i] = -1;
@@ -3379,14 +3584,13 @@ module_init(fb_console_init);
#ifdef MODULE
-static void __exit fbcon_deinit_class_device(void)
+static void __exit fbcon_deinit_device(void)
{
int i;
if (fbcon_has_sysfs) {
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_remove_file(fbcon_class_device,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+ device_remove_file(fbcon_device, &device_attrs[i]);
fbcon_has_sysfs = 0;
}
@@ -3396,8 +3600,8 @@ static void __exit fb_console_exit(void)
{
acquire_console_sem();
fb_unregister_client(&fbcon_event_notifier);
- fbcon_deinit_class_device();
- class_device_destroy(fb_class, MKDEV(0, 0));
+ fbcon_deinit_device();
+ device_destroy(fb_class, MKDEV(0, 0));
fbcon_exit();
release_console_sem();
unregister_con_driver(&fb_con);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f46fe95f69fb..d18b73aafa0d 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -187,7 +187,11 @@ static void vgacon_scrollback_init(int pitch)
}
}
-static void vgacon_scrollback_startup(void)
+/*
+ * Called only duing init so call of alloc_bootmen is ok.
+ * Marked __init_refok to silence modpost.
+ */
+static void __init_refok vgacon_scrollback_startup(void)
{
vgacon_scrollback = alloc_bootmem(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE
* 1024);
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 8b762739b1e0..b0be7eac32d8 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -94,7 +94,7 @@ static inline int VAR_MATCH(struct fb_var_screeninfo *x, struct fb_var_screeninf
struct fb_info_control {
struct fb_info info;
struct fb_par_control par;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index 7a6eeda5ae9a..30ede6e8830f 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -1221,11 +1221,10 @@ cyberpro_alloc_fb_info(unsigned int id, char *name)
{
struct cfb_info *cfb;
- cfb = kmalloc(sizeof(struct cfb_info), GFP_KERNEL);
+ cfb = kzalloc(sizeof(struct cfb_info), GFP_KERNEL);
if (!cfb)
return NULL;
- memset(cfb, 0, sizeof(struct cfb_info));
cfb->id = id;
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index 94a66c2d2cf5..e23324d10be2 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -1068,15 +1068,18 @@ static int cyblafb_setcolreg(unsigned regno, unsigned red, unsigned green,
out8(0x3C9, green >> 10);
out8(0x3C9, blue >> 10);
- } else if (bpp == 16) // RGB 565
- ((u32 *) info->pseudo_palette)[regno] =
- (red & 0xF800) |
- ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
- else if (bpp == 32) // ARGB 8888
- ((u32 *) info->pseudo_palette)[regno] =
- ((transp & 0xFF00) << 16) |
- ((red & 0xFF00) << 8) |
- ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
+ } else if (regno < 16) {
+ if (bpp == 16) // RGB 565
+ ((u32 *) info->pseudo_palette)[regno] =
+ (red & 0xF800) |
+ ((green & 0xFC00) >> 5) |
+ ((blue & 0xF800) >> 11);
+ else if (bpp == 32) // ARGB 8888
+ ((u32 *) info->pseudo_palette)[regno] =
+ ((transp & 0xFF00) << 16) |
+ ((red & 0xFF00) << 8) |
+ ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
+ }
return 0;
}
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index ca2c54ce508e..33be46ccb54f 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -63,23 +63,12 @@
struct epson1355_par {
unsigned long reg_addr;
+ u32 pseudo_palette[16];
};
/* ------------------------------------------------------------------------- */
-#ifdef CONFIG_SUPERH
-
-static inline u8 epson1355_read_reg(int index)
-{
- return ctrl_inb(par.reg_addr + index);
-}
-
-static inline void epson1355_write_reg(u8 data, int index)
-{
- ctrl_outb(data, par.reg_addr + index);
-}
-
-#elif defined(CONFIG_ARM)
+#if defined(CONFIG_ARM)
# ifdef CONFIG_ARCH_CEIVA
# include <asm/arch/hardware.h>
@@ -289,7 +278,7 @@ static int epson1355fb_blank(int blank_mode, struct fb_info *info)
struct epson1355_par *par = info->par;
switch (blank_mode) {
- case FB_BLANK_UNBLANKING:
+ case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
lcd_enable(par, 1);
backlight_enable(1);
@@ -635,7 +624,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
goto bail;
}
- info = framebuffer_alloc(sizeof(struct epson1355_par) + sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(struct epson1355_par), &dev->dev);
if (!info) {
rc = -ENOMEM;
goto bail;
@@ -648,7 +637,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
rc = -ENOMEM;
goto bail;
}
- info->pseudo_palette = (void *)(default_par + 1);
+ info->pseudo_palette = default_par->pseudo_palette;
info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN);
if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 38c2e2558f5e..215ac579f901 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -33,17 +33,10 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/efi.h>
+#include <linux/fb.h>
-#if defined(__mc68000__) || defined(CONFIG_APUS)
-#include <asm/setup.h>
-#endif
+#include <asm/fb.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <linux/fb.h>
/*
* Frame buffer device initialization and setup routines
@@ -411,10 +404,146 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
}
}
+static int fb_show_logo_line(struct fb_info *info, int rotate,
+ const struct linux_logo *logo, int y,
+ unsigned int n)
+{
+ u32 *palette = NULL, *saved_pseudo_palette = NULL;
+ unsigned char *logo_new = NULL, *logo_rotate = NULL;
+ struct fb_image image;
+
+ /* Return if the frame buffer is not mapped or suspended */
+ if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
+ info->flags & FBINFO_MODULE)
+ return 0;
+
+ image.depth = 8;
+ image.data = logo->data;
+
+ if (fb_logo.needs_cmapreset)
+ fb_set_logocmap(info, logo);
+
+ if (fb_logo.needs_truepalette ||
+ fb_logo.needs_directpalette) {
+ palette = kmalloc(256 * 4, GFP_KERNEL);
+ if (palette == NULL)
+ return 0;
+
+ if (fb_logo.needs_truepalette)
+ fb_set_logo_truepalette(info, logo, palette);
+ else
+ fb_set_logo_directpalette(info, logo, palette);
+
+ saved_pseudo_palette = info->pseudo_palette;
+ info->pseudo_palette = palette;
+ }
+
+ if (fb_logo.depth <= 4) {
+ logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
+ if (logo_new == NULL) {
+ kfree(palette);
+ if (saved_pseudo_palette)
+ info->pseudo_palette = saved_pseudo_palette;
+ return 0;
+ }
+ image.data = logo_new;
+ fb_set_logo(info, logo, logo_new, fb_logo.depth);
+ }
+
+ image.dx = 0;
+ image.dy = y;
+ image.width = logo->width;
+ image.height = logo->height;
+
+ if (rotate) {
+ logo_rotate = kmalloc(logo->width *
+ logo->height, GFP_KERNEL);
+ if (logo_rotate)
+ fb_rotate_logo(info, logo_rotate, &image, rotate);
+ }
+
+ fb_do_show_logo(info, &image, rotate, n);
+
+ kfree(palette);
+ if (saved_pseudo_palette != NULL)
+ info->pseudo_palette = saved_pseudo_palette;
+ kfree(logo_new);
+ kfree(logo_rotate);
+ return logo->height;
+}
+
+
+#ifdef CONFIG_FB_LOGO_EXTRA
+
+#define FB_LOGO_EX_NUM_MAX 10
+static struct logo_data_extra {
+ const struct linux_logo *logo;
+ unsigned int n;
+} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
+static unsigned int fb_logo_ex_num;
+
+void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
+{
+ if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
+ return;
+
+ fb_logo_ex[fb_logo_ex_num].logo = logo;
+ fb_logo_ex[fb_logo_ex_num].n = n;
+ fb_logo_ex_num++;
+}
+
+static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
+ unsigned int yres)
+{
+ unsigned int i;
+
+ /* FIXME: logo_ex supports only truecolor fb. */
+ if (info->fix.visual != FB_VISUAL_TRUECOLOR)
+ fb_logo_ex_num = 0;
+
+ for (i = 0; i < fb_logo_ex_num; i++) {
+ height += fb_logo_ex[i].logo->height;
+ if (height > yres) {
+ height -= fb_logo_ex[i].logo->height;
+ fb_logo_ex_num = i;
+ break;
+ }
+ }
+ return height;
+}
+
+static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+ unsigned int i;
+
+ for (i = 0; i < fb_logo_ex_num; i++)
+ y += fb_show_logo_line(info, rotate,
+ fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
+
+ return y;
+}
+
+#else /* !CONFIG_FB_LOGO_EXTRA */
+
+static inline int fb_prepare_extra_logos(struct fb_info *info,
+ unsigned int height,
+ unsigned int yres)
+{
+ return height;
+}
+
+static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+ return y;
+}
+
+#endif /* CONFIG_FB_LOGO_EXTRA */
+
+
int fb_prepare_logo(struct fb_info *info, int rotate)
{
int depth = fb_get_color_depth(&info->var, &info->fix);
- int yres;
+ unsigned int yres;
memset(&fb_logo, 0, sizeof(struct logo_data));
@@ -456,7 +585,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
if (!fb_logo.logo) {
return 0;
}
-
+
if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
yres = info->var.yres;
else
@@ -473,75 +602,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
fb_logo.depth = 4;
else
- fb_logo.depth = 1;
- return fb_logo.logo->height;
+ fb_logo.depth = 1;
+
+ return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
}
int fb_show_logo(struct fb_info *info, int rotate)
{
- u32 *palette = NULL, *saved_pseudo_palette = NULL;
- unsigned char *logo_new = NULL, *logo_rotate = NULL;
- struct fb_image image;
-
- /* Return if the frame buffer is not mapped or suspended */
- if (fb_logo.logo == NULL || info->state != FBINFO_STATE_RUNNING ||
- info->flags & FBINFO_MODULE)
- return 0;
-
- image.depth = 8;
- image.data = fb_logo.logo->data;
-
- if (fb_logo.needs_cmapreset)
- fb_set_logocmap(info, fb_logo.logo);
-
- if (fb_logo.needs_truepalette ||
- fb_logo.needs_directpalette) {
- palette = kmalloc(256 * 4, GFP_KERNEL);
- if (palette == NULL)
- return 0;
-
- if (fb_logo.needs_truepalette)
- fb_set_logo_truepalette(info, fb_logo.logo, palette);
- else
- fb_set_logo_directpalette(info, fb_logo.logo, palette);
-
- saved_pseudo_palette = info->pseudo_palette;
- info->pseudo_palette = palette;
- }
-
- if (fb_logo.depth <= 4) {
- logo_new = kmalloc(fb_logo.logo->width * fb_logo.logo->height,
- GFP_KERNEL);
- if (logo_new == NULL) {
- kfree(palette);
- if (saved_pseudo_palette)
- info->pseudo_palette = saved_pseudo_palette;
- return 0;
- }
- image.data = logo_new;
- fb_set_logo(info, fb_logo.logo, logo_new, fb_logo.depth);
- }
+ int y;
- image.dx = 0;
- image.dy = 0;
- image.width = fb_logo.logo->width;
- image.height = fb_logo.logo->height;
+ y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
+ num_online_cpus());
+ y = fb_show_extra_logos(info, y, rotate);
- if (rotate) {
- logo_rotate = kmalloc(fb_logo.logo->width *
- fb_logo.logo->height, GFP_KERNEL);
- if (logo_rotate)
- fb_rotate_logo(info, logo_rotate, &image, rotate);
- }
-
- fb_do_show_logo(info, &image, rotate, num_online_cpus());
-
- kfree(palette);
- if (saved_pseudo_palette != NULL)
- info->pseudo_palette = saved_pseudo_palette;
- kfree(logo_new);
- kfree(logo_rotate);
- return fb_logo.logo->height;
+ return y;
}
#else
int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
@@ -1155,17 +1229,15 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#endif
-static int
+static int
fb_mmap(struct file *file, struct vm_area_struct * vma)
{
int fbidx = iminor(file->f_path.dentry->d_inode);
struct fb_info *info = registered_fb[fbidx];
struct fb_ops *fb = info->fbops;
unsigned long off;
-#if !defined(__sparc__) || defined(__sparc_v9__)
unsigned long start;
u32 len;
-#endif
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
@@ -1180,12 +1252,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
return res;
}
-#if defined(__sparc__) && !defined(__sparc_v9__)
- /* Should never get here, all fb drivers should have their own
- mmap routines */
- return -EINVAL;
-#else
- /* !sparc32... */
lock_kernel();
/* frame buffer memory */
@@ -1209,50 +1275,11 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
vma->vm_pgoff = off >> PAGE_SHIFT;
/* This is an IO map - tell maydump to skip this VMA */
vma->vm_flags |= VM_IO | VM_RESERVED;
-#if defined(__mc68000__)
-#if defined(CONFIG_SUN3)
- pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
-#elif defined(CONFIG_MMU)
- if (CPU_IS_020_OR_030)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
- if (CPU_IS_040_OR_060) {
- pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
- /* Use no-cache mode, serialized */
- pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
- }
-#endif
-#elif defined(__powerpc__)
- vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-#elif defined(__alpha__)
- /* Caching is off in the I/O space quadrant by design. */
-#elif defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3)
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-#elif defined(__mips__) || defined(__sparc_v9__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#elif defined(__hppa__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-#elif defined(__avr32__)
- vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
- & ~_PAGE_CACHABLE)
- | (_PAGE_BUFFER | _PAGE_DIRTY));
-#elif defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#else
-#warning What do we have to do here??
-#endif
+ fb_pgprotect(file, vma, off);
if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
-#endif /* !sparc32 */
}
static int
@@ -1388,17 +1415,34 @@ register_framebuffer(struct fb_info *fb_info)
*
* Returns negative errno on error, or zero for success.
*
+ * This function will also notify the framebuffer console
+ * to release the driver.
+ *
+ * This is meant to be called within a driver's module_exit()
+ * function. If this is called outside module_exit(), ensure
+ * that the driver implements fb_open() and fb_release() to
+ * check that no processes are using the device.
*/
int
unregister_framebuffer(struct fb_info *fb_info)
{
struct fb_event event;
- int i;
+ int i, ret = 0;
i = fb_info->node;
- if (!registered_fb[i])
- return -EINVAL;
+ if (!registered_fb[i]) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ event.info = fb_info;
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
+
+ if (ret) {
+ ret = -EINVAL;
+ goto done;
+ }
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
@@ -1410,7 +1454,8 @@ unregister_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
event.info = fb_info;
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
- return 0;
+done:
+ return ret;
}
/**
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 70ff55b14596..6c91c61cdb63 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -195,13 +195,15 @@ static int fm2fb_blank(int blank, struct fb_info *info)
static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
- if (regno > info->cmap.len)
- return 1;
- red >>= 8;
- green >>= 8;
- blue >>= 8;
+ if (regno < 16) {
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+
+ ((u32*)(info->pseudo_palette))[regno] = (red << 16) |
+ (green << 8) | blue;
+ }
- ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
return 0;
}
@@ -237,7 +239,7 @@ static int __devinit fm2fb_probe(struct zorro_dev *z,
if (!zorro_request_device(z,"fm2fb"))
return -ENXIO;
- info = framebuffer_alloc(256 * sizeof(u32), &z->dev);
+ info = framebuffer_alloc(16 * sizeof(u32), &z->dev);
if (!info) {
zorro_release_device(z);
return -ENOMEM;
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index bf0e60b5a3b6..b9b572b293d4 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -86,7 +86,7 @@ static int gbe_revision;
static int ypan, ywrap;
-static uint32_t pseudo_palette[256];
+static uint32_t pseudo_palette[16];
static char *mode_option __initdata = NULL;
@@ -854,8 +854,7 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
green >>= 8;
blue >>= 8;
- switch (info->var.bits_per_pixel) {
- case 8:
+ if (info->var.bits_per_pixel <= 8) {
/* wait for the color map FIFO to have a free entry */
for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
udelay(10);
@@ -864,23 +863,25 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
}
gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
- break;
- case 15:
- case 16:
- red >>= 3;
- green >>= 3;
- blue >>= 3;
- pseudo_palette[regno] =
- (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
- break;
- case 32:
- pseudo_palette[regno] =
- (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
- break;
+ } else if (regno < 16) {
+ switch (info->var.bits_per_pixel) {
+ case 15:
+ case 16:
+ red >>= 3;
+ green >>= 3;
+ blue >>= 3;
+ pseudo_palette[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ break;
+ case 32:
+ pseudo_palette[regno] =
+ (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ break;
+ }
}
return 0;
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 889e4ea5edc1..328ae6c673ec 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -266,7 +266,7 @@ struct i810fb_par {
struct i810fb_i2c_chan chan[3];
struct mutex open_lock;
unsigned int use_count;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
unsigned long mmio_start_phys;
u8 __iomem *mmio_start_virtual;
u8 *edid;
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index eb1a4812ad1d..b87ea21d3d78 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -379,10 +379,6 @@ int __init igafb_init(void)
if (fb_get_options("igafb", NULL))
return -ENODEV;
- /* Do not attach when we have a serial console. */
- if (!con_is_present())
- return -ENXIO;
-
pdev = pci_get_device(PCI_VENDOR_ID_INTERG,
PCI_DEVICE_ID_INTERG_1682, 0);
if (pdev == NULL) {
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 80b94c19a9fa..6148300fadd6 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -302,7 +302,7 @@ struct intelfb_info {
u32 ring_lockup;
/* palette */
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
/* chip info */
int pci_chipset;
diff --git a/drivers/video/kyro/STG4000InitDevice.c b/drivers/video/kyro/STG4000InitDevice.c
index ab5285a7f1d6..1d3f2080aa6f 100644
--- a/drivers/video/kyro/STG4000InitDevice.c
+++ b/drivers/video/kyro/STG4000InitDevice.c
@@ -247,7 +247,6 @@ int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev)
u32 ulCoreClock;
u32 tmp;
u32 ulChipSpeed;
- u8 rev;
STG_WRITE_REG(IntMask, 0xFFFF);
@@ -276,9 +275,9 @@ int SetCoreClockPLL(volatile STG4000REG __iomem *pSTGReg, struct pci_dev *pDev)
PMX2_SOFTRESET_ROM_RST);
pci_read_config_word(pDev, PCI_CONFIG_SUBSYS_ID, &sub);
- pci_read_config_byte(pDev, PCI_REVISION_ID, &rev);
- ulChipSpeed = InitSDRAMRegisters(pSTGReg, (u32)sub, (u32)rev);
+ ulChipSpeed = InitSDRAMRegisters(pSTGReg, (u32)sub,
+ (u32)pDev->revision);
if (ulChipSpeed == 0)
return -EINVAL;
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 9397bcef3018..9de1c114f809 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,6 +10,11 @@ menuconfig LOGO
if LOGO
+config FB_LOGO_EXTRA
+ bool
+ depends on FB=y
+ default y if SPU_BASE
+
config LOGO_LINUX_MONO
bool "Standard black and white Linux logo"
default y
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b985dfad6c63..a5fc4edf84e6 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_LOGO_SUPERH_VGA16) += logo_superh_vga16.o
obj-$(CONFIG_LOGO_SUPERH_CLUT224) += logo_superh_clut224.o
obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o
+obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
+
# How to generate logo's
# Use logo-cfiles to retrieve list of .c files to be built
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 80c03618eb53..2b0f799aa8da 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -34,8 +34,11 @@ extern const struct linux_logo logo_superh_vga16;
extern const struct linux_logo logo_superh_clut224;
extern const struct linux_logo logo_m32r_clut224;
-
-const struct linux_logo *fb_find_logo(int depth)
+/* logo's are marked __initdata. Use __init_refok to tell
+ * modpost that it is intended that this function uses data
+ * marked __initdata.
+ */
+const struct linux_logo * __init_refok fb_find_logo(int depth)
{
const struct linux_logo *logo = NULL;
diff --git a/drivers/video/logo/logo_spe_clut224.ppm b/drivers/video/logo/logo_spe_clut224.ppm
new file mode 100644
index 000000000000..d36ad624a79c
--- /dev/null
+++ b/drivers/video/logo/logo_spe_clut224.ppm
@@ -0,0 +1,283 @@
+P3
+40 40
+255
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 6 6 6
+15 15 15 21 21 21 19 19 19 14 14 14 6 6 6 2 2 2
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 21 21 21 55 55 55
+56 56 56 54 54 54 53 53 53 60 60 60 56 56 56 25 25 25
+6 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 2 2 2 27 27 27 62 62 62 17 17 19
+2 2 6 2 2 6 2 2 6 2 2 6 16 16 18 57 57 57
+45 45 45 8 8 8 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 16 16 16 62 62 62 8 8 10 2 2 6
+2 2 6 2 2 6 2 2 6 12 12 14 67 67 67 16 16 17
+45 45 45 41 41 41 4 4 4 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 2 2 2 35 35 35 40 40 40 2 2 6 2 2 6
+2 2 6 2 2 6 2 2 6 15 15 17 70 70 70 27 27 27
+3 3 6 62 62 62 20 20 20 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 4 4 4 58 58 58 12 12 14 2 2 6 2 2 6
+2 2 6 2 2 6 2 2 6 4 4 7 4 4 7 2 2 6
+2 2 6 34 34 36 40 40 40 3 3 3 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 64 64 64 2 2 6 5 5 5 17 17 17
+3 3 6 2 2 6 2 2 6 15 15 15 21 21 21 7 7 10
+2 2 6 8 8 10 62 62 62 6 6 6 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 66 66 66 5 5 8 122 122 122 122 122 122
+9 9 11 3 3 6 104 96 81 179 179 179 122 122 122 13 13 13
+2 2 6 2 2 6 67 67 67 10 10 10 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 65 65 65 41 41 43 152 149 142 192 191 189
+48 48 49 23 23 24 228 210 210 86 86 86 192 191 189 59 59 61
+2 2 6 2 2 6 64 64 64 14 14 14 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 7 7 7 66 66 66 59 59 59 59 59 61 86 86 86
+99 84 50 78 66 28 152 149 142 5 5 8 122 122 122 104 96 81
+2 2 6 2 2 6 67 67 67 14 14 14 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 63 63 63 24 24 24 152 149 142 175 122 13
+238 184 12 220 170 13 226 181 52 112 86 32 194 165 151 46 46 47
+2 2 6 2 2 6 65 65 65 17 17 17 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 59 59 59 21 21 21 175 122 13 231 174 11
+240 192 13 237 183 61 240 192 13 240 192 13 234 179 16 81 64 9
+2 2 6 2 2 6 63 63 63 25 25 25 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 54 54 54 51 48 39 189 138 9 238 184 12
+240 192 13 240 192 13 240 192 13 215 161 11 207 152 19 81 64 9
+16 16 18 5 5 8 40 40 40 44 44 44 4 4 4 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 5 5 5 59 59 59 27 27 27 126 107 64 187 136 12
+220 170 13 201 147 20 189 138 9 198 154 46 199 182 125 70 70 70
+27 27 27 104 96 81 12 12 14 70 70 70 16 16 16 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 17 17 17 70 70 70 12 12 12 168 168 168 174 135 135
+175 122 13 175 122 13 178 151 83 192 191 189 233 233 233 179 179 179
+3 3 6 29 29 31 3 3 6 41 41 41 44 44 44 5 5 5
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+8 8 8 53 53 53 44 44 44 59 59 59 238 238 238 192 191 189
+192 191 189 192 191 189 221 205 205 240 240 240 253 253 253 253 253 253
+70 70 70 2 2 6 2 2 6 5 5 8 67 67 67 22 22 22
+2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 5 5
+38 38 38 56 56 56 7 7 9 221 205 205 253 253 253 233 233 233
+221 205 205 233 233 233 251 251 251 253 253 253 253 253 253 253 253 253
+192 191 189 2 2 6 2 2 6 2 2 6 25 25 25 64 64 64
+15 15 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 27 27 27
+66 66 66 7 7 9 86 86 86 252 252 252 253 253 253 253 253 253
+252 252 252 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+244 244 244 19 19 21 2 2 6 2 2 6 2 2 6 38 38 38
+54 54 54 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 14 14 14 62 62 62
+10 10 12 3 3 6 122 122 122 235 235 235 251 251 251 248 248 248
+235 235 235 248 248 248 252 252 252 246 246 246 233 233 233 237 228 228
+223 207 207 70 70 70 2 2 6 2 2 6 2 2 6 2 2 6
+46 46 47 38 38 38 4 4 4 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 33 33 33 44 44 44
+4 4 7 9 9 11 168 168 168 240 240 240 252 252 252 252 252 252
+246 246 246 253 253 253 253 253 253 251 251 251 245 241 241 233 233 233
+221 205 205 192 191 189 29 29 31 27 27 27 9 9 12 2 2 6
+3 3 6 65 65 65 15 15 15 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 6 6 6 59 59 59 19 19 21
+24 24 24 86 86 86 249 249 249 253 253 253 253 253 253 253 253 253
+253 253 253 228 210 210 241 230 230 253 253 253 253 253 253 253 253 253
+251 251 251 228 210 210 152 149 142 5 5 8 27 27 27 4 4 7
+2 2 6 46 46 47 34 34 34 2 2 2 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 16 16 16 67 67 67 19 19 21
+12 12 14 223 207 207 254 20 20 254 20 20 253 127 127 242 223 223
+254 20 20 253 127 127 254 48 48 242 223 223 254 86 86 254 20 20
+254 20 20 253 137 137 233 233 233 32 32 32 35 35 35 23 23 24
+2 2 6 15 15 15 60 60 60 6 6 6 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 4 4 4 38 38 38 48 48 49 22 22 22
+86 86 86 253 253 253 254 20 20 241 230 230 227 216 186 253 137 137
+253 137 137 253 253 253 253 137 137 253 137 137 254 48 48 253 253 253
+253 253 253 253 253 253 253 253 253 62 62 62 2 2 6 23 23 24
+2 2 6 2 2 6 62 62 62 17 17 17 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 14 14 14 70 70 70 14 14 14 16 16 18
+179 179 179 253 253 253 227 216 186 254 48 48 240 219 160 253 127 127
+254 20 20 253 137 137 254 86 86 231 203 141 254 20 20 254 20 20
+253 137 137 253 253 253 253 253 253 104 96 81 2 2 6 23 23 24
+2 2 6 2 2 6 46 46 47 27 27 27 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 4 4 4 39 39 39 42 42 43 19 19 21 13 13 13
+228 210 210 242 223 223 253 253 253 242 223 223 253 127 127 253 127 127
+253 127 127 253 127 127 253 137 137 253 253 253 254 48 48 253 253 253
+228 210 210 253 253 253 253 253 253 122 122 122 2 2 6 19 19 19
+2 2 6 2 2 6 39 39 39 38 38 38 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 8 8 8 60 60 60 3 3 6 33 33 33 38 38 38
+253 137 137 254 86 86 253 137 137 254 86 86 253 137 137 209 197 168
+253 127 127 253 253 253 253 253 253 253 253 253 253 127 127 254 86 86
+254 86 86 253 137 137 253 253 253 122 122 122 2 2 6 17 17 17
+2 2 6 2 2 6 34 34 36 42 42 43 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 13 13 13 59 59 59 2 2 6 9 9 12 56 56 56
+252 252 252 240 219 160 253 137 137 240 219 160 253 253 253 237 228 228
+254 86 86 253 253 253 253 253 253 253 253 253 253 253 253 242 223 223
+227 216 186 249 249 249 253 253 253 122 122 122 16 16 17 17 17 17
+12 12 14 3 3 6 39 39 39 38 38 38 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2
+5 5 5 22 22 22 104 96 81 187 136 12 207 152 19 51 48 39
+221 205 205 253 253 253 253 253 253 253 253 253 253 253 253 240 240 240
+250 247 243 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 250 247 243 240 219 160 99 84 50 5 5 8 2 2 6
+7 7 9 46 46 47 58 58 58 35 35 35 3 3 3 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 8 8 8 33 33 33
+58 58 58 86 86 86 170 136 53 239 182 13 246 190 14 220 170 13
+44 38 29 179 179 179 253 253 253 253 253 253 253 253 253 240 240 240
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+253 253 253 240 219 160 240 192 13 112 86 32 2 2 6 2 2 6
+3 3 6 41 33 20 220 170 13 53 53 53 4 4 4 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 2 2 2 32 32 32 150 116 44
+215 161 11 215 161 11 228 170 11 245 188 14 246 190 14 246 190 14
+187 136 12 9 9 11 122 122 122 251 251 251 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+248 248 248 211 196 135 239 182 13 175 122 13 6 5 6 2 2 6
+16 14 12 187 136 12 238 184 12 84 78 65 10 10 10 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 4 4 4 53 53 53 207 152 19
+242 185 13 245 188 14 246 190 14 246 190 14 246 190 14 246 190 14
+240 192 13 81 64 9 2 2 6 86 86 86 244 244 244 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+233 233 233 199 182 125 231 174 11 207 152 19 175 122 13 175 122 13
+201 147 20 239 182 13 244 187 14 150 116 44 35 35 35 6 6 6
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 5 5 5 53 53 53 201 147 20
+242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 220 170 13 13 11 10 2 2 6 152 149 142 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+235 235 235 199 182 125 228 170 11 234 177 12 226 168 11 226 168 11
+234 177 12 246 190 14 246 190 14 234 179 16 126 107 64 36 36 36
+6 6 6 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 3 3 3 48 48 49 189 142 35
+242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 140 112 39 36 36 36 192 191 189 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
+192 191 189 112 86 32 226 168 11 244 187 14 244 187 14 244 187 14
+245 188 14 246 190 14 246 190 14 246 190 14 242 185 13 150 116 44
+27 27 27 2 2 2 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 6 6 6 58 58 58 189 142 35
+239 182 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 239 188 14 209 197 168 253 253 253 253 253 253
+253 253 253 253 253 253 253 253 253 253 253 253 252 252 252 168 168 168
+16 16 18 97 67 8 228 170 11 245 188 14 246 190 14 246 190 14
+246 190 14 246 190 14 246 190 14 246 190 14 244 187 14 198 154 46
+35 35 35 3 3 3 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 13 13 13 84 78 65 215 161 11
+244 187 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 238 184 12 187 136 12 168 168 168 244 244 244
+253 253 253 252 252 252 240 240 240 179 179 179 67 67 67 2 2 6
+2 2 6 97 67 8 228 170 11 246 190 14 246 190 14 246 190 14
+246 190 14 246 190 14 245 188 14 234 177 12 189 142 35 86 77 61
+16 16 16 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 13 13 13 103 92 56 207 152 19
+228 170 11 234 177 12 239 182 13 242 186 14 245 188 14 246 190 14
+246 190 14 246 190 14 239 182 13 189 138 9 41 33 20 10 10 12
+30 30 31 23 23 24 5 5 8 2 2 6 2 2 6 2 2 6
+4 4 6 112 86 32 215 161 11 245 188 14 246 190 14 245 188 14
+239 182 13 228 170 11 189 142 35 104 96 81 48 48 49 17 17 17
+2 2 2 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 5 5 5 39 39 39 103 92 56
+141 109 44 175 122 13 187 136 12 189 138 9 207 152 19 228 170 11
+239 182 13 239 182 13 215 161 11 175 122 13 41 33 20 2 2 6
+15 15 17 20 20 22 20 20 22 20 20 22 20 20 22 8 8 10
+4 4 6 97 67 8 189 138 9 231 174 11 239 182 13 226 168 11
+189 138 9 126 107 64 59 59 59 21 21 21 5 5 5 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 5 5 5 17 17 17
+34 34 34 57 57 57 84 78 65 103 92 56 125 101 41 140 112 39
+175 122 13 175 122 13 175 122 13 97 67 8 72 67 58 84 78 65
+60 60 60 56 56 56 56 56 56 56 56 56 57 57 57 65 65 65
+86 86 86 95 73 34 175 122 13 187 136 12 187 136 12 175 122 13
+103 92 56 41 41 41 10 10 10 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 2 2 4 4 4 12 12 12 24 24 24 40 40 40 70 70 70
+86 77 61 95 73 34 88 72 41 72 67 58 36 36 36 10 10 10
+5 5 5 5 5 5 5 5 5 4 4 4 5 5 5 6 6 6
+22 22 22 61 61 59 88 72 41 112 86 32 112 86 32 84 78 65
+32 32 32 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 10 10 10
+21 21 21 33 33 33 31 31 31 16 16 16 2 2 2 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+2 2 2 12 12 12 30 30 31 40 40 40 32 32 32 16 16 16
+2 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index f7d647dda978..aa8c714d6245 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -170,7 +170,7 @@ static struct fb_fix_screeninfo macfb_fix = {
};
static struct fb_info fb_info;
-static u32 pseudo_palette[17];
+static u32 pseudo_palette[16];
static int inverse = 0;
static int vidtest = 0;
@@ -529,56 +529,63 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno >= fb_info->cmap.len)
return 1;
- switch (fb_info->var.bits_per_pixel) {
- case 1:
- /* We shouldn't get here */
- break;
- case 2:
- case 4:
- case 8:
- if (macfb_setpalette)
- macfb_setpalette(regno, red, green, blue, fb_info);
- else
- return 1;
- break;
- case 16:
- if (fb_info->var.red.offset == 10) {
- /* 1:5:5:5 */
- ((u32*) (fb_info->pseudo_palette))[regno] =
+ if (fb_info->var.bits_per_pixel <= 8) {
+ switch (fb_info->var.bits_per_pixel) {
+ case 1:
+ /* We shouldn't get here */
+ break;
+ case 2:
+ case 4:
+ case 8:
+ if (macfb_setpalette)
+ macfb_setpalette(regno, red, green, blue,
+ fb_info);
+ else
+ return 1;
+ break;
+ }
+ } else if (regno < 16) {
+ switch (fb_info->var.bits_per_pixel) {
+ case 16:
+ if (fb_info->var.red.offset == 10) {
+ /* 1:5:5:5 */
+ ((u32*) (fb_info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11) |
((transp != 0) << 15);
- } else {
- /* 0:5:6:5 */
- ((u32*) (fb_info->pseudo_palette))[regno] =
+ } else {
+ /* 0:5:6:5 */
+ ((u32*) (fb_info->pseudo_palette))[regno] =
((red & 0xf800) ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
+ }
+ break;
+ /* I'm pretty sure that one or the other of these
+ doesn't exist on 68k Macs */
+ case 24:
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ ((u32 *)(fb_info->pseudo_palette))[regno] =
+ (red << fb_info->var.red.offset) |
+ (green << fb_info->var.green.offset) |
+ (blue << fb_info->var.blue.offset);
+ break;
+ case 32:
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ ((u32 *)(fb_info->pseudo_palette))[regno] =
+ (red << fb_info->var.red.offset) |
+ (green << fb_info->var.green.offset) |
+ (blue << fb_info->var.blue.offset);
+ break;
}
- break;
- /* I'm pretty sure that one or the other of these
- doesn't exist on 68k Macs */
- case 24:
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- ((u32 *)(fb_info->pseudo_palette))[regno] =
- (red << fb_info->var.red.offset) |
- (green << fb_info->var.green.offset) |
- (blue << fb_info->var.blue.offset);
- break;
- case 32:
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- ((u32 *)(fb_info->pseudo_palette))[regno] =
- (red << fb_info->var.red.offset) |
- (green << fb_info->var.green.offset) |
- (blue << fb_info->var.blue.offset);
- break;
- }
- return 0;
+ }
+
+ return 0;
}
static struct fb_ops macfb_ops = {
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index ab2149531a04..083f60321ed8 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
*
*/
-int __devinit mac_find_mode(struct fb_var_screeninfo *var,
- struct fb_info *info, const char *mode_option,
- unsigned int default_bpp)
+int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
+ const char *mode_option, unsigned int default_bpp)
{
const struct fb_videomode *db = NULL;
unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index babeb81f467d..b86ba08aac9e 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
int *cmode);
extern int mac_map_monitor_sense(int sense);
-extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
- struct fb_info *info,
- const char *mode_option,
- unsigned int default_bpp);
+extern int mac_find_mode(struct fb_var_screeninfo *var,
+ struct fb_info *info,
+ const char *mode_option,
+ unsigned int default_bpp);
/*
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index c57aaadf410c..3660d2673bdc 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -91,7 +91,6 @@ static inline void matrox_cfb4_pal(u_int32_t* pal) {
for (i = 0; i < 16; i++) {
pal[i] = i * 0x11111111U;
}
- pal[i] = 0xFFFFFFFF;
}
static inline void matrox_cfb8_pal(u_int32_t* pal) {
@@ -100,7 +99,6 @@ static inline void matrox_cfb8_pal(u_int32_t* pal) {
for (i = 0; i < 16; i++) {
pal[i] = i * 0x01010101U;
}
- pal[i] = 0x0F0F0F0F;
}
static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area);
@@ -145,13 +143,10 @@ void matrox_cfbX_init(WPMINFO2) {
ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
}
break;
- case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5) {
+ case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5)
maccess = 0xC0000001;
- ACCESS_FBINFO(cmap[16]) = 0x7FFF7FFF;
- } else {
+ else
maccess = 0x40000001;
- ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
- }
mopmode = M_OPMODE_16BPP;
if (accel) {
ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
@@ -161,7 +156,6 @@ void matrox_cfbX_init(WPMINFO2) {
break;
case 24: maccess = 0x00000003;
mopmode = M_OPMODE_24BPP;
- ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
if (accel) {
ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
@@ -170,7 +164,6 @@ void matrox_cfbX_init(WPMINFO2) {
break;
case 32: maccess = 0x00000002;
mopmode = M_OPMODE_32BPP;
- ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
if (accel) {
ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index c8559a756b75..86ca7b179000 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -679,6 +679,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
mga_outb(M_DAC_VAL, blue);
break;
case 16:
+ if (regno >= 16)
+ break;
{
u_int16_t col =
(red << ACCESS_FBINFO(fbcon).var.red.offset) |
@@ -690,6 +692,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
break;
case 24:
case 32:
+ if (regno >= 16)
+ break;
ACCESS_FBINFO(cmap[regno]) =
(red << ACCESS_FBINFO(fbcon).var.red.offset) |
(green << ACCESS_FBINFO(fbcon).var.green.offset) |
@@ -1994,7 +1998,6 @@ static void matroxfb_unregister_device(struct matrox_fb_info* minfo) {
static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dummy) {
struct board* b;
- u_int8_t rev;
u_int16_t svid;
u_int16_t sid;
struct matrox_fb_info* minfo;
@@ -2005,11 +2008,10 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
#endif
DBG(__FUNCTION__)
- pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
svid = pdev->subsystem_vendor;
sid = pdev->subsystem_device;
for (b = dev_list; b->vendor; b++) {
- if ((b->vendor != pdev->vendor) || (b->device != pdev->device) || (b->rev < rev)) continue;
+ if ((b->vendor != pdev->vendor) || (b->device != pdev->device) || (b->rev < pdev->revision)) continue;
if (b->svid)
if ((b->svid != svid) || (b->sid != sid)) continue;
break;
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 9c25c2f7966b..d59577c8de86 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -518,7 +518,7 @@ struct matrox_fb_info {
dll:1;
} memory;
} values;
- u_int32_t cmap[17];
+ u_int32_t cmap[16];
};
#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon)
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 03ae55b168ff..4b3344e03695 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -163,11 +163,6 @@ static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) {
ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004;
}
-static void matroxfb_dh_cfbX_init(struct matroxfb_dh_fb_info* m2info) {
- /* no acceleration for secondary head... */
- m2info->cmap[16] = 0xFFFFFFFF;
-}
-
static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
struct fb_var_screeninfo* var) {
unsigned int pos;
@@ -385,7 +380,6 @@ static int matroxfb_dh_set_par(struct fb_info* info) {
}
}
up_read(&ACCESS_FBINFO(altout).lock);
- matroxfb_dh_cfbX_init(m2info);
}
m2info->initialized = 1;
return 0;
diff --git a/drivers/video/matrox/matroxfb_crtc2.h b/drivers/video/matrox/matroxfb_crtc2.h
index 608e40bb20e9..1005582e843e 100644
--- a/drivers/video/matrox/matroxfb_crtc2.h
+++ b/drivers/video/matrox/matroxfb_crtc2.h
@@ -2,8 +2,6 @@
#define __MATROXFB_CRTC2_H__
#include <linux/ioctl.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include "matroxfb_base.h"
struct matroxfb_dh_fb_info {
@@ -30,7 +28,7 @@ struct matroxfb_dh_fb_info {
unsigned int interlaced:1;
- u_int32_t cmap[17];
+ u_int32_t cmap[16];
};
#endif /* __MATROXFB_CRTC2_H__ */
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 5d29a26b8cdf..de0d755f9019 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -273,8 +273,11 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
}
}
}
+
+ /* if h2/post/in/feed have not been assigned, return zero (error) */
if (besth2 < 2)
return 0;
+
dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant);
return fxtal * (*feed) / (*in) * ctl->den;
}
@@ -284,7 +287,7 @@ static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
unsigned int* in, unsigned int* feed, unsigned int* post,
unsigned int* htotal2) {
unsigned int fvco;
- unsigned int p;
+ unsigned int uninitialized_var(p);
fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
if (!fvco)
@@ -715,7 +718,9 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt,
m->regs[0x82] = 0x81;
for (x = 0; x < 8; x++) {
- unsigned int a, b, c, h2;
+ unsigned int c;
+ unsigned int uninitialized_var(a), uninitialized_var(b),
+ uninitialized_var(h2);
unsigned int h = ht + 2 + x;
if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 43f62d8ee41d..443e3c85a9a0 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -50,7 +50,7 @@ static int nvidia_bl_get_level_brightness(struct nvidia_par *par,
static int nvidia_bl_update_status(struct backlight_device *bd)
{
- struct nvidia_par *par = class_get_devdata(&bd->class_dev);
+ struct nvidia_par *par = bl_get_data(bd);
u32 tmp_pcrt, tmp_pmc, fpcontrol;
int level;
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index aff11bbf59a7..d1a10549f543 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -150,8 +150,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
if (((par->Chipset & 0xfff0) == 0x0290) ||
- ((par->Chipset & 0xfff0) == 0x0390) ||
- ((par->Chipset & 0xfff0) == 0x02E0)) {
+ ((par->Chipset & 0xfff0) == 0x0390)) {
MB = 1;
NB = 1;
} else {
@@ -161,7 +160,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
*MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
pll = NV_RD32(par->PMC, 0x4000);
- P = (pll >> 16) & 0x03;
+ P = (pll >> 16) & 0x07;
pll = NV_RD32(par->PMC, 0x4004);
M = pll & 0xFF;
N = (pll >> 8) & 0xFF;
@@ -892,11 +891,17 @@ void NVCalcStateExt(struct nvidia_par *par,
state->general = bpp == 16 ? 0x00101100 : 0x00100100;
state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
break;
+ case NV_ARCH_40:
+ if (!par->FlatPanel)
+ state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
+ 0xeffffeff;
+ /* fallthrough */
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
default:
- if ((par->Chipset & 0xfff0) == 0x0240) {
+ if ((par->Chipset & 0xfff0) == 0x0240 ||
+ (par->Chipset & 0xfff0) == 0x03d0) {
state->arbitration0 = 256;
state->arbitration1 = 0x0480;
} else if (((par->Chipset & 0xffff) == 0x01A0) ||
@@ -939,7 +944,7 @@ void NVCalcStateExt(struct nvidia_par *par,
void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
{
- int i;
+ int i, j;
NV_WR32(par->PMC, 0x0140, 0x00000000);
NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
@@ -951,7 +956,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
if (par->Architecture == NV_ARCH_04) {
- NV_WR32(par->PFB, 0x0200, state->config);
+ if (state)
+ NV_WR32(par->PFB, 0x0200, state->config);
} else if ((par->Architecture < NV_ARCH_40) ||
(par->Chipset & 0xfff0) == 0x0040) {
for (i = 0; i < 8; i++) {
@@ -964,8 +970,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
- ((par->Chipset & 0xfff0) == 0x02E0) ||
- ((par->Chipset & 0xfff0) == 0x0290))
+ ((par->Chipset & 0xfff0) == 0x0290) ||
+ ((par->Chipset & 0xfff0) == 0x0390) ||
+ ((par->Chipset & 0xfff0) == 0x03D0))
regions = 15;
for(i = 0; i < regions; i++) {
NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
@@ -1206,16 +1213,20 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
if (par->Architecture >= NV_ARCH_40) {
- u32 tmp;
-
NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
+ NV_WR32(par->PGRAPH, 0x0bc4,
+ NV_RD32(par->PGRAPH, 0x0bc4) |
+ 0x00008000);
- tmp = NV_RD32(par->REGS, 0x1540) & 0xff;
- for(i = 0; tmp && !(tmp & 1); tmp >>= 1, i++);
- NV_WR32(par->PGRAPH, 0x5000, i);
+ j = NV_RD32(par->REGS, 0x1540) & 0xff;
+
+ if (j) {
+ for (i = 0; !(j & 1); j >>= 1, i++);
+ NV_WR32(par->PGRAPH, 0x5000, i);
+ }
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09b0,
@@ -1250,6 +1261,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
case 0x0160:
case 0x01D0:
case 0x0240:
+ case 0x03D0:
NV_WR32(par->PMC, 0x1700,
NV_RD32(par->PFB, 0x020C));
NV_WR32(par->PMC, 0x1704, 0);
@@ -1269,7 +1281,6 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0x00000108);
break;
case 0x0220:
- case 0x0230:
NV_WR32(par->PGRAPH, 0x0860, 0);
NV_WR32(par->PGRAPH, 0x0864, 0);
NV_WR32(par->PRAMDAC, 0x0608,
@@ -1277,8 +1288,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0x00100000);
break;
case 0x0090:
- case 0x02E0:
case 0x0290:
+ case 0x0390:
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
@@ -1355,8 +1366,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
} else {
if (((par->Chipset & 0xfff0) == 0x0090) ||
((par->Chipset & 0xfff0) == 0x01D0) ||
- ((par->Chipset & 0xfff0) == 0x02E0) ||
- ((par->Chipset & 0xfff0) == 0x0290)) {
+ ((par->Chipset & 0xfff0) == 0x0290) ||
+ ((par->Chipset & 0xfff0) == 0x0390) ||
+ ((par->Chipset & 0xfff0) == 0x03D0)) {
for (i = 0; i < 60; i++) {
NV_WR32(par->PGRAPH,
0x0D00 + i*4,
@@ -1407,8 +1419,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
} else {
if ((par->Chipset & 0xfff0) == 0x0090 ||
(par->Chipset & 0xfff0) == 0x01D0 ||
- (par->Chipset & 0xfff0) == 0x02E0 ||
- (par->Chipset & 0xfff0) == 0x0290) {
+ (par->Chipset & 0xfff0) == 0x0290 ||
+ (par->Chipset & 0xfff0) == 0x0390) {
NV_WR32(par->PGRAPH, 0x0DF0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0DF4,
@@ -1495,6 +1507,12 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
+
+ if (!state) {
+ par->CurrentState = NULL;
+ return;
+ }
+
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
NV_WR32(par->PCRTC0, 0x0860, state->head);
@@ -1566,6 +1584,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
VGA_WR08(par->PCIO, 0x03D5, state->interlace);
if (!par->FlatPanel) {
+ if (par->Architecture >= NV_ARCH_40)
+ NV_WR32(par->PRAMDAC0, 0x0580, state->control);
+
NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
if (par->twoHeads)
@@ -1631,6 +1652,9 @@ void NVUnloadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) {
state->scale = NV_RD32(par->PRAMDAC, 0x0848);
state->config = NV_RD32(par->PFB, 0x0200);
+ if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
+ state->control = NV_RD32(par->PRAMDAC0, 0x0580);
+
if (par->Architecture >= NV_ARCH_10) {
if (par->twoHeads) {
state->head = NV_RD32(par->PCRTC0, 0x0860);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 707e2c8a13ed..82579d3a9970 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -166,11 +166,13 @@ u8 NVReadDacData(struct nvidia_par *par)
static int NVIsConnected(struct nvidia_par *par, int output)
{
volatile u32 __iomem *PRAMDAC = par->PRAMDAC0;
- u32 reg52C, reg608;
+ u32 reg52C, reg608, dac0_reg608 = 0;
int present;
- if (output)
- PRAMDAC += 0x800;
+ if (output) {
+ dac0_reg608 = NV_RD32(PRAMDAC, 0x0608);
+ PRAMDAC += 0x800;
+ }
reg52C = NV_RD32(PRAMDAC, 0x052C);
reg608 = NV_RD32(PRAMDAC, 0x0608);
@@ -194,8 +196,8 @@ static int NVIsConnected(struct nvidia_par *par, int output)
else
printk("nvidiafb: CRTC%i analog not found\n", output);
- NV_WR32(par->PRAMDAC0, 0x0608, NV_RD32(par->PRAMDAC0, 0x0608) &
- 0x0000EFFF);
+ if (output)
+ NV_WR32(par->PRAMDAC0, 0x0608, dac0_reg608);
NV_WR32(PRAMDAC, 0x052C, reg52C);
NV_WR32(PRAMDAC, 0x0608, reg608);
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h
index 38f7cc0a2331..2fdf77ec39fc 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/nvidia/nv_type.h
@@ -86,6 +86,7 @@ typedef struct _riva_hw_state {
u32 timingV;
u32 displayV;
u32 crtcSync;
+ u32 control;
} RIVA_HW_STATE;
struct riva_regs {
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 41f63658572f..a7fe214f0f77 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -674,6 +674,7 @@ static int nvidiafb_set_par(struct fb_info *info)
info->fbops->fb_sync = nvidiafb_sync;
info->pixmap.scan_align = 4;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
+ info->flags |= FBINFO_READS_FAST;
NVResetGraphics(info);
} else {
info->fbops->fb_imageblit = cfb_imageblit;
@@ -682,6 +683,7 @@ static int nvidiafb_set_par(struct fb_info *info)
info->fbops->fb_sync = NULL;
info->pixmap.scan_align = 1;
info->flags |= FBINFO_HWACCEL_DISABLED;
+ info->flags &= ~FBINFO_READS_FAST;
}
par->cursor_reset = 1;
@@ -1193,7 +1195,8 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
printk(KERN_INFO PFX "Device ID: %x \n", id);
- if ((id & 0xfff0) == 0x00f0) {
+ if ((id & 0xfff0) == 0x00f0 ||
+ (id & 0xfff0) == 0x02e0) {
/* pci-e */
id = NV_RD32(par->REGS, 0x1800);
@@ -1238,18 +1241,16 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
case 0x0040: /* GeForce 6800 */
case 0x00C0: /* GeForce 6800 */
case 0x0120: /* GeForce 6800 */
- case 0x0130:
case 0x0140: /* GeForce 6600 */
case 0x0160: /* GeForce 6200 */
case 0x01D0: /* GeForce 7200, 7300, 7400 */
- case 0x02E0: /* GeForce 7300 GT */
case 0x0090: /* GeForce 7800 */
case 0x0210: /* GeForce 6800 */
case 0x0220: /* GeForce 6200 */
- case 0x0230:
case 0x0240: /* GeForce 6100 */
case 0x0290: /* GeForce 7900 */
case 0x0390: /* GeForce 7600 */
+ case 0x03D0:
arch = NV_ARCH_40;
break;
case 0x0020: /* TNT, TNT2 */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 885b42836cbb..452433d46973 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -271,7 +271,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
return;
}
- size = sizeof(struct fb_info) + sizeof(u32) * 17;
+ size = sizeof(struct fb_info) + sizeof(u32) * 16;
info = kmalloc(size, GFP_ATOMIC);
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
new file mode 100644
index 000000000000..7f4d25b8a184
--- /dev/null
+++ b/drivers/video/omap/Kconfig
@@ -0,0 +1,58 @@
+config FB_OMAP
+ tristate "OMAP frame buffer support (EXPERIMENTAL)"
+ depends on FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame buffer driver for OMAP based boards.
+
+config FB_OMAP_BOOTLOADER_INIT
+ bool "Check bootloader initializaion"
+ depends on FB_OMAP
+ help
+ Say Y here if you want to enable checking if the bootloader has
+ already initialized the display controller. In this case the
+ driver will skip the initialization.
+
+config FB_OMAP_CONSISTENT_DMA_SIZE
+ int "Consistent DMA memory size (MB)"
+ depends on FB_OMAP
+ range 1 14
+ default 2
+ help
+ Increase the DMA consistent memory size according to your video
+ memory needs, for example if you want to use multiple planes.
+ The size must be 2MB aligned.
+ If unsure say 1.
+
+config FB_OMAP_DMA_TUNE
+ bool "Set DMA SDRAM access priority high"
+ depends on FB_OMAP && ARCH_OMAP1
+ help
+ On systems in which video memory is in system memory
+ (SDRAM) this will speed up graphics DMA operations.
+ If you have such a system and want to use rotation
+ answer yes. Answer no if you have a dedicated video
+ memory, or don't use any of the accelerated features.
+
+config FB_OMAP_LCDC_EXTERNAL
+ bool "External LCD controller support"
+ depends on FB_OMAP
+ help
+ Say Y here, if you want to have support for boards with an
+ external LCD controller connected to the SoSSI/RFBI interface.
+
+config FB_OMAP_LCDC_HWA742
+ bool "Epson HWA742 LCD controller support"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here if you want to have support for the external
+ Epson HWA742 LCD controller.
+
+config FB_OMAP_LCDC_BLIZZARD
+ bool "Epson Blizzard LCD controller support"
+ depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
+ help
+ Say Y here if you want to have support for the external
+ Epson Blizzard LCD controller.
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
new file mode 100644
index 000000000000..99da8b6d2c36
--- /dev/null
+++ b/drivers/video/omap/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the new OMAP framebuffer device driver
+#
+
+obj-$(CONFIG_FB_OMAP) += omapfb.o
+
+objs-yy := omapfb_main.o
+
+objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o
+objs-y$(CONFIG_ARCH_OMAP2) += dispc.o
+
+objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
+objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
+
+objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
+objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
+
+objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
+objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
+objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
+objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
+objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
+objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
+objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
+objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
+objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
+
+omapfb-objs := $(objs-yy)
+
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
new file mode 100644
index 000000000000..e682940a97a4
--- /dev/null
+++ b/drivers/video/omap/blizzard.c
@@ -0,0 +1,1568 @@
+/*
+ * Epson Blizzard LCD controller driver
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Authors: Juha Yrjola <juha.yrjola@nokia.com>
+ * Imre Deak <imre.deak@nokia.com>
+ * YUV support: Jussi Laako <jussi.laako@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/blizzard.h>
+
+#include "dispc.h"
+
+#define MODULE_NAME "blizzard"
+
+#define BLIZZARD_REV_CODE 0x00
+#define BLIZZARD_CONFIG 0x02
+#define BLIZZARD_PLL_DIV 0x04
+#define BLIZZARD_PLL_LOCK_RANGE 0x06
+#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08
+#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a
+#define BLIZZARD_PLL_MODE 0x0c
+#define BLIZZARD_CLK_SRC 0x0e
+#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
+#define BLIZZARD_MEM_BANK0_STATUS 0x14
+#define BLIZZARD_HDISP 0x2a
+#define BLIZZARD_HNDP 0x2c
+#define BLIZZARD_VDISP0 0x2e
+#define BLIZZARD_VDISP1 0x30
+#define BLIZZARD_VNDP 0x32
+#define BLIZZARD_HSW 0x34
+#define BLIZZARD_VSW 0x38
+#define BLIZZARD_DISPLAY_MODE 0x68
+#define BLIZZARD_INPUT_WIN_X_START_0 0x6c
+#define BLIZZARD_DATA_SOURCE_SELECT 0x8e
+#define BLIZZARD_DISP_MEM_DATA_PORT 0x90
+#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92
+#define BLIZZARD_POWER_SAVE 0xE6
+#define BLIZZARD_NDISP_CTRL_STATUS 0xE8
+
+/* Data source select */
+/* For S1D13745 */
+#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00
+#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01
+#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04
+#define BLIZZARD_SRC_DISABLE_OVERLAY 0x05
+/* For S1D13744 */
+#define BLIZZARD_SRC_WRITE_LCD 0x00
+#define BLIZZARD_SRC_BLT_LCD 0x06
+
+#define BLIZZARD_COLOR_RGB565 0x01
+#define BLIZZARD_COLOR_YUV420 0x09
+
+#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */
+#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */
+
+#define BLIZZARD_AUTO_UPDATE_TIME (HZ / 20)
+
+/* Reserve 4 request slots for requests in irq context */
+#define REQ_POOL_SIZE 24
+#define IRQ_REQ_POOL_SIZE 4
+
+#define REQ_FROM_IRQ_POOL 0x01
+
+#define REQ_COMPLETE 0
+#define REQ_PENDING 1
+
+struct blizzard_reg_list {
+ int start;
+ int end;
+};
+
+/* These need to be saved / restored separately from the rest. */
+static struct blizzard_reg_list blizzard_pll_regs[] = {
+ {
+ .start = 0x04, /* Don't save PLL ctrl (0x0C) */
+ .end = 0x0a,
+ },
+ {
+ .start = 0x0e, /* Clock configuration */
+ .end = 0x0e,
+ },
+};
+
+static struct blizzard_reg_list blizzard_gen_regs[] = {
+ {
+ .start = 0x18, /* SDRAM control */
+ .end = 0x20,
+ },
+ {
+ .start = 0x28, /* LCD Panel configuration */
+ .end = 0x5a, /* HSSI interface, TV configuration */
+ },
+};
+
+static u8 blizzard_reg_cache[0x5a / 2];
+
+struct update_param {
+ int plane;
+ int x, y, width, height;
+ int out_x, out_y;
+ int out_width, out_height;
+ int color_mode;
+ int bpp;
+ int flags;
+};
+
+struct blizzard_request {
+ struct list_head entry;
+ unsigned int flags;
+
+ int (*handler)(struct blizzard_request *req);
+ void (*complete)(void *data);
+ void *complete_data;
+
+ union {
+ struct update_param update;
+ struct completion *sync;
+ } par;
+};
+
+struct plane_info {
+ unsigned long offset;
+ int pos_x, pos_y;
+ int width, height;
+ int out_width, out_height;
+ int scr_width;
+ int color_mode;
+ int bpp;
+};
+
+struct blizzard_struct {
+ enum omapfb_update_mode update_mode;
+ enum omapfb_update_mode update_mode_before_suspend;
+
+ struct timer_list auto_update_timer;
+ int stop_auto_update;
+ struct omapfb_update_window auto_update_window;
+ int enabled_planes;
+ int vid_nonstd_color;
+ int vid_scaled;
+ int last_color_mode;
+ int zoom_on;
+ int screen_width;
+ int screen_height;
+ unsigned te_connected:1;
+ unsigned vsync_only:1;
+
+ struct plane_info plane[OMAPFB_PLANE_NUM];
+
+ struct blizzard_request req_pool[REQ_POOL_SIZE];
+ struct list_head pending_req_list;
+ struct list_head free_req_list;
+ struct semaphore req_sema;
+ spinlock_t req_lock;
+
+ unsigned long sys_ck_rate;
+ struct extif_timings reg_timings, lut_timings;
+
+ u32 max_transmit_size;
+ u32 extif_clk_period;
+ int extif_clk_div;
+ unsigned long pix_tx_time;
+ unsigned long line_upd_time;
+
+ struct omapfb_device *fbdev;
+ struct lcd_ctrl_extif *extif;
+ struct lcd_ctrl *int_ctrl;
+
+ void (*power_up)(struct device *dev);
+ void (*power_down)(struct device *dev);
+
+ int version;
+} blizzard;
+
+struct lcd_ctrl blizzard_ctrl;
+
+static u8 blizzard_read_reg(u8 reg)
+{
+ u8 data;
+
+ blizzard.extif->set_bits_per_cycle(8);
+ blizzard.extif->write_command(&reg, 1);
+ blizzard.extif->read_data(&data, 1);
+
+ return data;
+}
+
+static void blizzard_write_reg(u8 reg, u8 val)
+{
+ blizzard.extif->set_bits_per_cycle(8);
+ blizzard.extif->write_command(&reg, 1);
+ blizzard.extif->write_data(&val, 1);
+}
+
+static void blizzard_restart_sdram(void)
+{
+ unsigned long tmo;
+
+ blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
+ udelay(50);
+ blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1);
+ tmo = jiffies + msecs_to_jiffies(200);
+ while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) {
+ if (time_after(jiffies, tmo)) {
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: SDRAM not ready");
+ break;
+ }
+ msleep(1);
+ }
+}
+
+static void blizzard_stop_sdram(void)
+{
+ blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
+}
+
+/* Wait until the last window was completely written into the controllers
+ * SDRAM and we can start transferring the next window.
+ */
+static void blizzard_wait_line_buffer(void)
+{
+ unsigned long tmo = jiffies + msecs_to_jiffies(30);
+
+ while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) {
+ if (time_after(jiffies, tmo)) {
+ if (printk_ratelimit())
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: line buffer not ready\n");
+ break;
+ }
+ }
+}
+
+/* Wait until the YYC color space converter is idle. */
+static void blizzard_wait_yyc(void)
+{
+ unsigned long tmo = jiffies + msecs_to_jiffies(30);
+
+ while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 4)) {
+ if (time_after(jiffies, tmo)) {
+ if (printk_ratelimit())
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: YYC not ready\n");
+ break;
+ }
+ }
+}
+
+static void disable_overlay(void)
+{
+ blizzard_write_reg(BLIZZARD_DATA_SOURCE_SELECT,
+ BLIZZARD_SRC_DISABLE_OVERLAY);
+}
+
+static void set_window_regs(int x_start, int y_start, int x_end, int y_end,
+ int x_out_start, int y_out_start,
+ int x_out_end, int y_out_end, int color_mode,
+ int zoom_off, int flags)
+{
+ u8 tmp[18];
+ u8 cmd;
+
+ x_end--;
+ y_end--;
+ tmp[0] = x_start;
+ tmp[1] = x_start >> 8;
+ tmp[2] = y_start;
+ tmp[3] = y_start >> 8;
+ tmp[4] = x_end;
+ tmp[5] = x_end >> 8;
+ tmp[6] = y_end;
+ tmp[7] = y_end >> 8;
+
+ x_out_end--;
+ y_out_end--;
+ tmp[8] = x_out_start;
+ tmp[9] = x_out_start >> 8;
+ tmp[10] = y_out_start;
+ tmp[11] = y_out_start >> 8;
+ tmp[12] = x_out_end;
+ tmp[13] = x_out_end >> 8;
+ tmp[14] = y_out_end;
+ tmp[15] = y_out_end >> 8;
+
+ tmp[16] = color_mode;
+ if (zoom_off && blizzard.version == BLIZZARD_VERSION_S1D13745)
+ tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
+ else if (flags & OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY)
+ tmp[17] = BLIZZARD_SRC_WRITE_OVERLAY_ENABLE;
+ else
+ tmp[17] = blizzard.version == BLIZZARD_VERSION_S1D13744 ?
+ BLIZZARD_SRC_WRITE_LCD :
+ BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
+
+ blizzard.extif->set_bits_per_cycle(8);
+ cmd = BLIZZARD_INPUT_WIN_X_START_0;
+ blizzard.extif->write_command(&cmd, 1);
+ blizzard.extif->write_data(tmp, 18);
+}
+
+static void enable_tearsync(int y, int width, int height, int screen_height,
+ int out_height, int force_vsync)
+{
+ u8 b;
+
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+ b |= 1 << 3;
+ blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+
+ if (likely(blizzard.vsync_only || force_vsync)) {
+ blizzard.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if (width * blizzard.pix_tx_time < blizzard.line_upd_time) {
+ blizzard.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if ((width * blizzard.pix_tx_time / 1000) * height <
+ (y + out_height) * (blizzard.line_upd_time / 1000)) {
+ blizzard.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ blizzard.extif->enable_tearsync(1, y + 1);
+}
+
+static void disable_tearsync(void)
+{
+ u8 b;
+
+ blizzard.extif->enable_tearsync(0, 0);
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+ b &= ~(1 << 3);
+ blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+}
+
+static inline void set_extif_timings(const struct extif_timings *t);
+
+static inline struct blizzard_request *alloc_req(void)
+{
+ unsigned long flags;
+ struct blizzard_request *req;
+ int req_flags = 0;
+
+ if (!in_interrupt())
+ down(&blizzard.req_sema);
+ else
+ req_flags = REQ_FROM_IRQ_POOL;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+ BUG_ON(list_empty(&blizzard.free_req_list));
+ req = list_entry(blizzard.free_req_list.next,
+ struct blizzard_request, entry);
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+ INIT_LIST_HEAD(&req->entry);
+ req->flags = req_flags;
+
+ return req;
+}
+
+static inline void free_req(struct blizzard_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+
+ list_del(&req->entry);
+ list_add(&req->entry, &blizzard.free_req_list);
+ if (!(req->flags & REQ_FROM_IRQ_POOL))
+ up(&blizzard.req_sema);
+
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+}
+
+static void process_pending_requests(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+
+ while (!list_empty(&blizzard.pending_req_list)) {
+ struct blizzard_request *req;
+ void (*complete)(void *);
+ void *complete_data;
+
+ req = list_entry(blizzard.pending_req_list.next,
+ struct blizzard_request, entry);
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+ if (req->handler(req) == REQ_PENDING)
+ return;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+}
+
+static void submit_req_list(struct list_head *head)
+{
+ unsigned long flags;
+ int process = 1;
+
+ spin_lock_irqsave(&blizzard.req_lock, flags);
+ if (likely(!list_empty(&blizzard.pending_req_list)))
+ process = 0;
+ list_splice_init(head, blizzard.pending_req_list.prev);
+ spin_unlock_irqrestore(&blizzard.req_lock, flags);
+
+ if (process)
+ process_pending_requests();
+}
+
+static void request_complete(void *data)
+{
+ struct blizzard_request *req = (struct blizzard_request *)data;
+ void (*complete)(void *);
+ void *complete_data;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ process_pending_requests();
+}
+
+
+static int do_full_screen_update(struct blizzard_request *req)
+{
+ int i;
+ int flags;
+
+ for (i = 0; i < 3; i++) {
+ struct plane_info *p = &blizzard.plane[i];
+ if (!(blizzard.enabled_planes & (1 << i))) {
+ blizzard.int_ctrl->enable_plane(i, 0);
+ continue;
+ }
+ dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n",
+ p->width, p->height);
+ blizzard.int_ctrl->setup_plane(i,
+ OMAPFB_CHANNEL_OUT_LCD, p->offset,
+ p->scr_width, p->pos_x, p->pos_y,
+ p->width, p->height,
+ p->color_mode);
+ blizzard.int_ctrl->enable_plane(i, 1);
+ }
+
+ dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n",
+ blizzard.screen_width, blizzard.screen_height);
+ blizzard_wait_line_buffer();
+ flags = req->par.update.flags;
+ if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+ enable_tearsync(0, blizzard.screen_width,
+ blizzard.screen_height,
+ blizzard.screen_height,
+ blizzard.screen_height,
+ flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+ else
+ disable_tearsync();
+
+ set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height,
+ 0, 0, blizzard.screen_width, blizzard.screen_height,
+ BLIZZARD_COLOR_RGB565, blizzard.zoom_on, flags);
+ blizzard.zoom_on = 0;
+
+ blizzard.extif->set_bits_per_cycle(16);
+ /* set_window_regs has left the register index at the right
+ * place, so no need to set it here.
+ */
+ blizzard.extif->transfer_area(blizzard.screen_width,
+ blizzard.screen_height,
+ request_complete, req);
+ return REQ_PENDING;
+}
+
+/* Setup all planes with an overlapping area with the update window. */
+static int do_partial_update(struct blizzard_request *req, int plane,
+ int x, int y, int w, int h,
+ int x_out, int y_out, int w_out, int h_out,
+ int wnd_color_mode, int bpp)
+{
+ int i;
+ int gx1, gy1, gx2, gy2;
+ int gx1_out, gy1_out, gx2_out, gy2_out;
+ int color_mode;
+ int flags;
+ int zoom_off;
+
+ /* Global coordinates, relative to pixel 0,0 of the LCD */
+ gx1 = x + blizzard.plane[plane].pos_x;
+ gy1 = y + blizzard.plane[plane].pos_y;
+ gx2 = gx1 + w;
+ gy2 = gy1 + h;
+
+ flags = req->par.update.flags;
+ if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
+ gx1_out = gx1;
+ gy1_out = gy1;
+ gx2_out = gx1 + w * 2;
+ gy2_out = gy1 + h * 2;
+ } else {
+ gx1_out = x_out + blizzard.plane[plane].pos_x;
+ gy1_out = y_out + blizzard.plane[plane].pos_y;
+ gx2_out = gx1_out + w_out;
+ gy2_out = gy1_out + h_out;
+ }
+ zoom_off = blizzard.zoom_on && gx1 == 0 && gy1 == 0 &&
+ w == blizzard.screen_width && h == blizzard.screen_height;
+ blizzard.zoom_on = (!zoom_off && blizzard.zoom_on) ||
+ (w < w_out || h < h_out);
+
+ for (i = 0; i < OMAPFB_PLANE_NUM; i++) {
+ struct plane_info *p = &blizzard.plane[i];
+ int px1, py1;
+ int px2, py2;
+ int pw, ph;
+ int pposx, pposy;
+ unsigned long offset;
+
+ if (!(blizzard.enabled_planes & (1 << i)) ||
+ (wnd_color_mode && i != plane)) {
+ blizzard.int_ctrl->enable_plane(i, 0);
+ continue;
+ }
+ /* Plane coordinates */
+ if (i == plane) {
+ /* Plane in which we are doing the update.
+ * Local coordinates are the one in the update
+ * request.
+ */
+ px1 = x;
+ py1 = y;
+ px2 = x + w;
+ py2 = y + h;
+ pposx = 0;
+ pposy = 0;
+ } else {
+ /* Check if this plane has an overlapping part */
+ px1 = gx1 - p->pos_x;
+ py1 = gy1 - p->pos_y;
+ px2 = gx2 - p->pos_x;
+ py2 = gy2 - p->pos_y;
+ if (px1 >= p->width || py1 >= p->height ||
+ px2 <= 0 || py2 <= 0) {
+ blizzard.int_ctrl->enable_plane(i, 0);
+ continue;
+ }
+ /* Calculate the coordinates for the overlapping
+ * part in the plane's local coordinates.
+ */
+ pposx = -px1;
+ pposy = -py1;
+ if (px1 < 0)
+ px1 = 0;
+ if (py1 < 0)
+ py1 = 0;
+ if (px2 > p->width)
+ px2 = p->width;
+ if (py2 > p->height)
+ py2 = p->height;
+ if (pposx < 0)
+ pposx = 0;
+ if (pposy < 0)
+ pposy = 0;
+ }
+ pw = px2 - px1;
+ ph = py2 - py1;
+ offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8;
+ if (wnd_color_mode)
+ /* Window embedded in the plane with a differing
+ * color mode / bpp. Calculate the number of DMA
+ * transfer elements in terms of the plane's bpp.
+ */
+ pw = (pw + 1) * bpp / p->bpp;
+#ifdef VERBOSE
+ dev_dbg(blizzard.fbdev->dev,
+ "plane %d offset %#08lx pposx %d pposy %d "
+ "px1 %d py1 %d pw %d ph %d\n",
+ i, offset, pposx, pposy, px1, py1, pw, ph);
+#endif
+ blizzard.int_ctrl->setup_plane(i,
+ OMAPFB_CHANNEL_OUT_LCD, offset,
+ p->scr_width,
+ pposx, pposy, pw, ph,
+ p->color_mode);
+
+ blizzard.int_ctrl->enable_plane(i, 1);
+ }
+
+ switch (wnd_color_mode) {
+ case OMAPFB_COLOR_YUV420:
+ color_mode = BLIZZARD_COLOR_YUV420;
+ /* Currently only the 16 bits/pixel cycle format is
+ * supported on the external interface. Adjust the number
+ * of transfer elements per line for 12bpp format.
+ */
+ w = (w + 1) * 3 / 4;
+ break;
+ default:
+ color_mode = BLIZZARD_COLOR_RGB565;
+ break;
+ }
+
+ blizzard_wait_line_buffer();
+ if (blizzard.last_color_mode == BLIZZARD_COLOR_YUV420)
+ blizzard_wait_yyc();
+ blizzard.last_color_mode = color_mode;
+ if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+ enable_tearsync(gy1, w, h,
+ blizzard.screen_height,
+ h_out,
+ flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+ else
+ disable_tearsync();
+
+ set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out,
+ color_mode, zoom_off, flags);
+
+ blizzard.extif->set_bits_per_cycle(16);
+ /* set_window_regs has left the register index at the right
+ * place, so no need to set it here.
+ */
+ blizzard.extif->transfer_area(w, h, request_complete, req);
+
+ return REQ_PENDING;
+}
+
+static int send_frame_handler(struct blizzard_request *req)
+{
+ struct update_param *par = &req->par.update;
+ int plane = par->plane;
+
+#ifdef VERBOSE
+ dev_dbg(blizzard.fbdev->dev,
+ "send_frame: x %d y %d w %d h %d "
+ "x_out %d y_out %d w_out %d h_out %d "
+ "color_mode %04x flags %04x planes %01x\n",
+ par->x, par->y, par->width, par->height,
+ par->out_x, par->out_y, par->out_width, par->out_height,
+ par->color_mode, par->flags, blizzard.enabled_planes);
+#endif
+ if (par->flags & OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY)
+ disable_overlay();
+
+ if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) ||
+ (blizzard.enabled_planes & blizzard.vid_scaled))
+ return do_full_screen_update(req);
+
+ return do_partial_update(req, plane, par->x, par->y,
+ par->width, par->height,
+ par->out_x, par->out_y,
+ par->out_width, par->out_height,
+ par->color_mode, par->bpp);
+}
+
+static void send_frame_complete(void *data)
+{
+}
+
+#define ADD_PREQ(_x, _y, _w, _h, _x_out, _y_out, _w_out, _h_out) do { \
+ req = alloc_req(); \
+ req->handler = send_frame_handler; \
+ req->complete = send_frame_complete; \
+ req->par.update.plane = plane_idx; \
+ req->par.update.x = _x; \
+ req->par.update.y = _y; \
+ req->par.update.width = _w; \
+ req->par.update.height = _h; \
+ req->par.update.out_x = _x_out; \
+ req->par.update.out_y = _y_out; \
+ req->par.update.out_width = _w_out; \
+ req->par.update.out_height = _h_out; \
+ req->par.update.bpp = bpp; \
+ req->par.update.color_mode = color_mode;\
+ req->par.update.flags = flags; \
+ list_add_tail(&req->entry, req_head); \
+} while(0)
+
+static void create_req_list(int plane_idx,
+ struct omapfb_update_window *win,
+ struct list_head *req_head)
+{
+ struct blizzard_request *req;
+ int x = win->x;
+ int y = win->y;
+ int width = win->width;
+ int height = win->height;
+ int x_out = win->out_x;
+ int y_out = win->out_y;
+ int width_out = win->out_width;
+ int height_out = win->out_height;
+ int color_mode;
+ int bpp;
+ int flags;
+ unsigned int ystart = y;
+ unsigned int yspan = height;
+ unsigned int ystart_out = y_out;
+ unsigned int yspan_out = height_out;
+
+ flags = win->format & ~OMAPFB_FORMAT_MASK;
+ color_mode = win->format & OMAPFB_FORMAT_MASK;
+ switch (color_mode) {
+ case OMAPFB_COLOR_YUV420:
+ /* Embedded window with different color mode */
+ bpp = 12;
+ /* X, Y, height must be aligned at 2, width at 4 pixels */
+ x &= ~1;
+ y &= ~1;
+ height = yspan = height & ~1;
+ width = width & ~3;
+ break;
+ default:
+ /* Same as the plane color mode */
+ bpp = blizzard.plane[plane_idx].bpp;
+ break;
+ }
+ if (width * height * bpp / 8 > blizzard.max_transmit_size) {
+ yspan = blizzard.max_transmit_size / (width * bpp / 8);
+ yspan_out = yspan * height_out / height;
+ ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
+ width_out, yspan_out);
+ ystart += yspan;
+ ystart_out += yspan_out;
+ yspan = height - yspan;
+ yspan_out = height_out - yspan_out;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+
+ ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
+ width_out, yspan_out);
+}
+
+static void auto_update_complete(void *data)
+{
+ if (!blizzard.stop_auto_update)
+ mod_timer(&blizzard.auto_update_timer,
+ jiffies + BLIZZARD_AUTO_UPDATE_TIME);
+}
+
+static void blizzard_update_window_auto(unsigned long arg)
+{
+ LIST_HEAD(req_list);
+ struct blizzard_request *last;
+ struct omapfb_plane_struct *plane;
+
+ plane = blizzard.fbdev->fb_info[0]->par;
+ create_req_list(plane->idx,
+ &blizzard.auto_update_window, &req_list);
+ last = list_entry(req_list.prev, struct blizzard_request, entry);
+
+ last->complete = auto_update_complete;
+ last->complete_data = NULL;
+
+ submit_req_list(&req_list);
+}
+
+int blizzard_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*complete_callback)(void *arg),
+ void *complete_callback_data)
+{
+ LIST_HEAD(req_list);
+ struct blizzard_request *last;
+ struct omapfb_plane_struct *plane = fbi->par;
+
+ if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE))
+ return -EINVAL;
+ if (unlikely(!blizzard.te_connected &&
+ (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC)))
+ return -EINVAL;
+
+ create_req_list(plane->idx, win, &req_list);
+ last = list_entry(req_list.prev, struct blizzard_request, entry);
+
+ last->complete = complete_callback;
+ last->complete_data = (void *)complete_callback_data;
+
+ submit_req_list(&req_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(blizzard_update_window_async);
+
+static int update_full_screen(void)
+{
+ return blizzard_update_window_async(blizzard.fbdev->fb_info[0],
+ &blizzard.auto_update_window, NULL, NULL);
+
+}
+
+static int blizzard_setup_plane(int plane, int channel_out,
+ unsigned long offset, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ struct plane_info *p;
+
+#ifdef VERBOSE
+ dev_dbg(blizzard.fbdev->dev,
+ "plane %d ch_out %d offset %#08lx scr_width %d "
+ "pos_x %d pos_y %d width %d height %d color_mode %d\n",
+ plane, channel_out, offset, screen_width,
+ pos_x, pos_y, width, height, color_mode);
+#endif
+ if ((unsigned)plane > OMAPFB_PLANE_NUM)
+ return -EINVAL;
+ p = &blizzard.plane[plane];
+
+ switch (color_mode) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUY422:
+ p->bpp = 16;
+ blizzard.vid_nonstd_color &= ~(1 << plane);
+ break;
+ case OMAPFB_COLOR_YUV420:
+ p->bpp = 12;
+ blizzard.vid_nonstd_color |= 1 << plane;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ p->bpp = 16;
+ blizzard.vid_nonstd_color &= ~(1 << plane);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p->offset = offset;
+ p->pos_x = pos_x;
+ p->pos_y = pos_y;
+ p->width = width;
+ p->height = height;
+ p->scr_width = screen_width;
+ if (!p->out_width)
+ p->out_width = width;
+ if (!p->out_height)
+ p->out_height = height;
+
+ p->color_mode = color_mode;
+
+ return 0;
+}
+
+static int blizzard_set_scale(int plane, int orig_w, int orig_h,
+ int out_w, int out_h)
+{
+ struct plane_info *p = &blizzard.plane[plane];
+ int r;
+
+ dev_dbg(blizzard.fbdev->dev,
+ "plane %d orig_w %d orig_h %d out_w %d out_h %d\n",
+ plane, orig_w, orig_h, out_w, out_h);
+ if ((unsigned)plane > OMAPFB_PLANE_NUM)
+ return -ENODEV;
+
+ r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h);
+ if (r < 0)
+ return r;
+
+ p->width = orig_w;
+ p->height = orig_h;
+ p->out_width = out_w;
+ p->out_height = out_h;
+ if (orig_w == out_w && orig_h == out_h)
+ blizzard.vid_scaled &= ~(1 << plane);
+ else
+ blizzard.vid_scaled |= 1 << plane;
+
+ return 0;
+}
+
+static int blizzard_enable_plane(int plane, int enable)
+{
+ if (enable)
+ blizzard.enabled_planes |= 1 << plane;
+ else
+ blizzard.enabled_planes &= ~(1 << plane);
+
+ return 0;
+}
+
+static int sync_handler(struct blizzard_request *req)
+{
+ complete(req->par.sync);
+ return REQ_COMPLETE;
+}
+
+static void blizzard_sync(void)
+{
+ LIST_HEAD(req_list);
+ struct blizzard_request *req;
+ struct completion comp;
+
+ req = alloc_req();
+
+ req->handler = sync_handler;
+ req->complete = NULL;
+ init_completion(&comp);
+ req->par.sync = &comp;
+
+ list_add(&req->entry, &req_list);
+ submit_req_list(&req_list);
+
+ wait_for_completion(&comp);
+}
+
+
+static void blizzard_bind_client(struct omapfb_notifier_block *nb)
+{
+ if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) {
+ omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
+ }
+}
+
+static int blizzard_set_update_mode(enum omapfb_update_mode mode)
+{
+ if (unlikely(mode != OMAPFB_MANUAL_UPDATE &&
+ mode != OMAPFB_AUTO_UPDATE &&
+ mode != OMAPFB_UPDATE_DISABLED))
+ return -EINVAL;
+
+ if (mode == blizzard.update_mode)
+ return 0;
+
+ dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n",
+ mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
+ (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
+
+ switch (blizzard.update_mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ blizzard.stop_auto_update = 1;
+ del_timer_sync(&blizzard.auto_update_timer);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ blizzard.update_mode = mode;
+ blizzard_sync();
+ blizzard.stop_auto_update = 0;
+
+ switch (mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ blizzard_update_window_auto(0);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ return 0;
+}
+
+static enum omapfb_update_mode blizzard_get_update_mode(void)
+{
+ return blizzard.update_mode;
+}
+
+static inline void set_extif_timings(const struct extif_timings *t)
+{
+ blizzard.extif->set_timings(t);
+}
+
+static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+ int bus_tick = blizzard.extif_clk_period * div;
+ return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 12.2 ns (regs),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 12 ns (regs),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 2*SYSCLK (regs),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns */
+
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(blizzard.fbdev->dev,
+ "Blizzard systim %lu ps extif_clk_period %u div %d\n",
+ systim, blizzard.extif_clk_period, div);
+
+ t = &blizzard.reg_timings;
+ memset(t, 0, sizeof(*t));
+
+ t->clk_div = div;
+
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return blizzard.extif->convert_timings(t);
+}
+
+static int calc_lut_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns */
+
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(blizzard.fbdev->dev,
+ "Blizzard systim %lu ps extif_clk_period %u div %d\n",
+ systim, blizzard.extif_clk_period, div);
+
+ t = &blizzard.lut_timings;
+ memset(t, 0, sizeof(*t));
+
+ t->clk_div = div;
+
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(blizzard.fbdev->dev,
+ "[lut]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(blizzard.fbdev->dev,
+ "[lut]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return blizzard.extif->convert_timings(t);
+}
+
+static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
+{
+ int max_clk_div;
+ int div;
+
+ blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div);
+ for (div = 1; div <= max_clk_div; div++) {
+ if (calc_reg_timing(sysclk, div) == 0)
+ break;
+ }
+ if (div > max_clk_div) {
+ dev_dbg(blizzard.fbdev->dev, "reg timing failed\n");
+ goto err;
+ }
+ *extif_mem_div = div;
+
+ for (div = 1; div <= max_clk_div; div++) {
+ if (calc_lut_timing(sysclk, div) == 0)
+ break;
+ }
+
+ if (div > max_clk_div)
+ goto err;
+
+ blizzard.extif_clk_div = div;
+
+ return 0;
+err:
+ dev_err(blizzard.fbdev->dev, "can't setup timings\n");
+ return -1;
+}
+
+static void calc_blizzard_clk_rates(unsigned long ext_clk,
+ unsigned long *sys_clk, unsigned long *pix_clk)
+{
+ int pix_clk_src;
+ int sys_div = 0, sys_mul = 0;
+ int pix_div;
+
+ pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC);
+ pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
+ if ((pix_clk_src & (0x3 << 1)) == 0) {
+ /* Source is the PLL */
+ sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1;
+ sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0);
+ sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1)
+ & 0x0f) << 11);
+ *sys_clk = ext_clk * sys_mul / sys_div;
+ } else /* else source is ext clk, or oscillator */
+ *sys_clk = ext_clk;
+
+ *pix_clk = *sys_clk / pix_div; /* HZ */
+ dev_dbg(blizzard.fbdev->dev,
+ "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
+ ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
+ dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
+ *sys_clk, *pix_clk);
+}
+
+static int setup_tearsync(unsigned long pix_clk, int extif_div)
+{
+ int hdisp, vdisp;
+ int hndp, vndp;
+ int hsw, vsw;
+ int hs, vs;
+ int hs_pol_inv, vs_pol_inv;
+ int use_hsvs, use_ndp;
+ u8 b;
+
+ hsw = blizzard_read_reg(BLIZZARD_HSW);
+ vsw = blizzard_read_reg(BLIZZARD_VSW);
+ hs_pol_inv = !(hsw & 0x80);
+ vs_pol_inv = !(vsw & 0x80);
+ hsw = hsw & 0x7f;
+ vsw = vsw & 0x3f;
+
+ hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8;
+ vdisp = blizzard_read_reg(BLIZZARD_VDISP0) +
+ ((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8);
+
+ hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f;
+ vndp = blizzard_read_reg(BLIZZARD_VNDP);
+
+ /* time to transfer one pixel (16bpp) in ps */
+ blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time;
+ if (blizzard.extif->get_max_tx_rate != NULL) {
+ /* The external interface might have a rate limitation,
+ * if so, we have to maximize our transfer rate.
+ */
+ unsigned long min_tx_time;
+ unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate();
+
+ dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n",
+ max_tx_rate);
+ min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
+ if (blizzard.pix_tx_time < min_tx_time)
+ blizzard.pix_tx_time = min_tx_time;
+ }
+
+ /* time to update one line in ps */
+ blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
+ blizzard.line_upd_time *= 1000;
+ if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time)
+ /* transfer speed too low, we might have to use both
+ * HS and VS */
+ use_hsvs = 1;
+ else
+ /* decent transfer speed, we'll always use only VS */
+ use_hsvs = 0;
+
+ if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
+ /* HS or'ed with VS doesn't work, use the active high
+ * TE signal based on HNDP / VNDP */
+ use_ndp = 1;
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ hs = hndp;
+ vs = vndp;
+ } else {
+ /* Use HS or'ed with VS as a TE signal if both are needed
+ * or VNDP if only vsync is needed. */
+ use_ndp = 0;
+ hs = hsw;
+ vs = vsw;
+ if (!use_hsvs) {
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ }
+ }
+
+ hs = hs * 1000000 / (pix_clk / 1000); /* ps */
+ hs *= 1000;
+
+ vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
+ vs *= 1000;
+
+ if (vs <= hs)
+ return -EDOM;
+ /* set VS to 120% of HS to minimize VS detection time */
+ vs = hs * 12 / 10;
+ /* minimize HS too */
+ if (hs > 10000)
+ hs = 10000;
+
+ b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
+ b &= ~0x3;
+ b |= use_hsvs ? 1 : 0;
+ b |= (use_ndp && use_hsvs) ? 0 : 2;
+ blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
+
+ blizzard.vsync_only = !use_hsvs;
+
+ dev_dbg(blizzard.fbdev->dev,
+ "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
+ pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time);
+ dev_dbg(blizzard.fbdev->dev,
+ "hs %d ps vs %d ps mode %d vsync_only %d\n",
+ hs, vs, b & 0x3, !use_hsvs);
+
+ return blizzard.extif->setup_tearsync(1, hs, vs,
+ hs_pol_inv, vs_pol_inv,
+ extif_div);
+}
+
+static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
+{
+ blizzard.int_ctrl->get_caps(plane, caps);
+ caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
+ OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE |
+ OMAPFB_CAPS_WINDOW_SCALE |
+ OMAPFB_CAPS_WINDOW_OVERLAY;
+ if (blizzard.te_connected)
+ caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
+ caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
+ (1 << OMAPFB_COLOR_YUV420);
+}
+
+static void _save_regs(struct blizzard_reg_list *list, int cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++, list++) {
+ int reg;
+ for (reg = list->start; reg <= list->end; reg += 2)
+ blizzard_reg_cache[reg / 2] = blizzard_read_reg(reg);
+ }
+}
+
+static void _restore_regs(struct blizzard_reg_list *list, int cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++, list++) {
+ int reg;
+ for (reg = list->start; reg <= list->end; reg += 2)
+ blizzard_write_reg(reg, blizzard_reg_cache[reg / 2]);
+ }
+}
+
+static void blizzard_save_all_regs(void)
+{
+ _save_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
+ _save_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
+}
+
+static void blizzard_restore_pll_regs(void)
+{
+ _restore_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
+}
+
+static void blizzard_restore_gen_regs(void)
+{
+ _restore_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
+}
+
+static void blizzard_suspend(void)
+{
+ u32 l;
+ unsigned long tmo;
+
+ if (blizzard.last_color_mode) {
+ update_full_screen();
+ blizzard_sync();
+ }
+ blizzard.update_mode_before_suspend = blizzard.update_mode;
+ /* the following will disable clocks as well */
+ blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
+
+ blizzard_save_all_regs();
+
+ blizzard_stop_sdram();
+
+ l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
+ /* Standby, Sleep. We assume we use an external clock. */
+ l |= 0x03;
+ blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
+
+ tmo = jiffies + msecs_to_jiffies(100);
+ while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) {
+ if (time_after(jiffies, tmo)) {
+ dev_err(blizzard.fbdev->dev,
+ "s1d1374x: sleep timeout, stopping PLL manually\n");
+ l = blizzard_read_reg(BLIZZARD_PLL_MODE);
+ l &= ~0x03;
+ /* Disable PLL, counter function */
+ l |= 0x2;
+ blizzard_write_reg(BLIZZARD_PLL_MODE, l);
+ break;
+ }
+ msleep(1);
+ }
+
+ if (blizzard.power_down != NULL)
+ blizzard.power_down(blizzard.fbdev->dev);
+}
+
+static void blizzard_resume(void)
+{
+ u32 l;
+
+ if (blizzard.power_up != NULL)
+ blizzard.power_up(blizzard.fbdev->dev);
+
+ l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
+ /* Standby, Sleep */
+ l &= ~0x03;
+ blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
+
+ blizzard_restore_pll_regs();
+ l = blizzard_read_reg(BLIZZARD_PLL_MODE);
+ l &= ~0x03;
+ /* Enable PLL, counter function */
+ l |= 0x1;
+ blizzard_write_reg(BLIZZARD_PLL_MODE, l);
+
+ while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7)))
+ msleep(1);
+
+ blizzard_restart_sdram();
+
+ blizzard_restore_gen_regs();
+
+ /* Enable display */
+ blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01);
+
+ /* the following will enable clocks as necessary */
+ blizzard_set_update_mode(blizzard.update_mode_before_suspend);
+
+ /* Force a background update */
+ blizzard.zoom_on = 1;
+ update_full_screen();
+ blizzard_sync();
+}
+
+static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r = 0, i;
+ u8 rev, conf;
+ unsigned long ext_clk;
+ int extif_div;
+ unsigned long sys_clk, pix_clk;
+ struct omapfb_platform_data *omapfb_conf;
+ struct blizzard_platform_data *ctrl_conf;
+
+ blizzard.fbdev = fbdev;
+
+ BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
+
+ blizzard.fbdev = fbdev;
+ blizzard.extif = fbdev->ext_if;
+ blizzard.int_ctrl = fbdev->int_ctrl;
+
+ omapfb_conf = fbdev->dev->platform_data;
+ ctrl_conf = omapfb_conf->ctrl_platform_data;
+ if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+ dev_err(fbdev->dev, "s1d1374x: missing platform data\n");
+ r = -ENOENT;
+ goto err1;
+ }
+
+ blizzard.power_down = ctrl_conf->power_down;
+ blizzard.power_up = ctrl_conf->power_up;
+
+ spin_lock_init(&blizzard.req_lock);
+
+ if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0)
+ goto err1;
+
+ if ((r = blizzard.extif->init(fbdev)) < 0)
+ goto err2;
+
+ blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key;
+ blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key;
+ blizzard_ctrl.setup_mem = blizzard.int_ctrl->setup_mem;
+ blizzard_ctrl.mmap = blizzard.int_ctrl->mmap;
+
+ ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+ if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0)
+ goto err3;
+
+ set_extif_timings(&blizzard.reg_timings);
+
+ if (blizzard.power_up != NULL)
+ blizzard.power_up(fbdev->dev);
+
+ calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk);
+
+ if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0)
+ goto err3;
+ set_extif_timings(&blizzard.reg_timings);
+
+ if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) {
+ dev_err(fbdev->dev,
+ "controller not initialized by the bootloader\n");
+ r = -ENODEV;
+ goto err3;
+ }
+
+ if (ctrl_conf->te_connected) {
+ if ((r = setup_tearsync(pix_clk, extif_div)) < 0)
+ goto err3;
+ blizzard.te_connected = 1;
+ }
+
+ rev = blizzard_read_reg(BLIZZARD_REV_CODE);
+ conf = blizzard_read_reg(BLIZZARD_CONFIG);
+
+ switch (rev & 0xfc) {
+ case 0x9c:
+ blizzard.version = BLIZZARD_VERSION_S1D13744;
+ pr_info("omapfb: s1d13744 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+ break;
+ case 0xa4:
+ blizzard.version = BLIZZARD_VERSION_S1D13745;
+ pr_info("omapfb: s1d13745 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+ break;
+ default:
+ dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n",
+ rev);
+ r = -ENODEV;
+ goto err3;
+ }
+
+ blizzard.max_transmit_size = blizzard.extif->max_transmit_size;
+
+ blizzard.update_mode = OMAPFB_UPDATE_DISABLED;
+
+ blizzard.auto_update_window.x = 0;
+ blizzard.auto_update_window.y = 0;
+ blizzard.auto_update_window.width = fbdev->panel->x_res;
+ blizzard.auto_update_window.height = fbdev->panel->y_res;
+ blizzard.auto_update_window.out_x = 0;
+ blizzard.auto_update_window.out_x = 0;
+ blizzard.auto_update_window.out_width = fbdev->panel->x_res;
+ blizzard.auto_update_window.out_height = fbdev->panel->y_res;
+ blizzard.auto_update_window.format = 0;
+
+ blizzard.screen_width = fbdev->panel->x_res;
+ blizzard.screen_height = fbdev->panel->y_res;
+
+ init_timer(&blizzard.auto_update_timer);
+ blizzard.auto_update_timer.function = blizzard_update_window_auto;
+ blizzard.auto_update_timer.data = 0;
+
+ INIT_LIST_HEAD(&blizzard.free_req_list);
+ INIT_LIST_HEAD(&blizzard.pending_req_list);
+ for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++)
+ list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list);
+ BUG_ON(i <= IRQ_REQ_POOL_SIZE);
+ sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE);
+
+ return 0;
+err3:
+ if (blizzard.power_down != NULL)
+ blizzard.power_down(fbdev->dev);
+ blizzard.extif->cleanup();
+err2:
+ blizzard.int_ctrl->cleanup();
+err1:
+ return r;
+}
+
+static void blizzard_cleanup(void)
+{
+ blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ blizzard.extif->cleanup();
+ blizzard.int_ctrl->cleanup();
+ if (blizzard.power_down != NULL)
+ blizzard.power_down(blizzard.fbdev->dev);
+}
+
+struct lcd_ctrl blizzard_ctrl = {
+ .name = "blizzard",
+ .init = blizzard_init,
+ .cleanup = blizzard_cleanup,
+ .bind_client = blizzard_bind_client,
+ .get_caps = blizzard_get_caps,
+ .set_update_mode = blizzard_set_update_mode,
+ .get_update_mode = blizzard_get_update_mode,
+ .setup_plane = blizzard_setup_plane,
+ .set_scale = blizzard_set_scale,
+ .enable_plane = blizzard_enable_plane,
+ .update_window = blizzard_update_window_async,
+ .sync = blizzard_sync,
+ .suspend = blizzard_suspend,
+ .resume = blizzard_resume,
+};
+
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
new file mode 100644
index 000000000000..f4c23434de6f
--- /dev/null
+++ b/drivers/video/omap/dispc.c
@@ -0,0 +1,1502 @@
+/*
+ * OMAP2 display controller support
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/sram.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/board.h>
+
+#include "dispc.h"
+
+#define MODULE_NAME "dispc"
+
+#define DSS_BASE 0x48050000
+#define DSS_SYSCONFIG 0x0010
+
+#define DISPC_BASE 0x48050400
+
+/* DISPC common */
+#define DISPC_REVISION 0x0000
+#define DISPC_SYSCONFIG 0x0010
+#define DISPC_SYSSTATUS 0x0014
+#define DISPC_IRQSTATUS 0x0018
+#define DISPC_IRQENABLE 0x001C
+#define DISPC_CONTROL 0x0040
+#define DISPC_CONFIG 0x0044
+#define DISPC_CAPABLE 0x0048
+#define DISPC_DEFAULT_COLOR0 0x004C
+#define DISPC_DEFAULT_COLOR1 0x0050
+#define DISPC_TRANS_COLOR0 0x0054
+#define DISPC_TRANS_COLOR1 0x0058
+#define DISPC_LINE_STATUS 0x005C
+#define DISPC_LINE_NUMBER 0x0060
+#define DISPC_TIMING_H 0x0064
+#define DISPC_TIMING_V 0x0068
+#define DISPC_POL_FREQ 0x006C
+#define DISPC_DIVISOR 0x0070
+#define DISPC_SIZE_DIG 0x0078
+#define DISPC_SIZE_LCD 0x007C
+
+#define DISPC_DATA_CYCLE1 0x01D4
+#define DISPC_DATA_CYCLE2 0x01D8
+#define DISPC_DATA_CYCLE3 0x01DC
+
+/* DISPC GFX plane */
+#define DISPC_GFX_BA0 0x0080
+#define DISPC_GFX_BA1 0x0084
+#define DISPC_GFX_POSITION 0x0088
+#define DISPC_GFX_SIZE 0x008C
+#define DISPC_GFX_ATTRIBUTES 0x00A0
+#define DISPC_GFX_FIFO_THRESHOLD 0x00A4
+#define DISPC_GFX_FIFO_SIZE_STATUS 0x00A8
+#define DISPC_GFX_ROW_INC 0x00AC
+#define DISPC_GFX_PIXEL_INC 0x00B0
+#define DISPC_GFX_WINDOW_SKIP 0x00B4
+#define DISPC_GFX_TABLE_BA 0x00B8
+
+/* DISPC Video plane 1/2 */
+#define DISPC_VID1_BASE 0x00BC
+#define DISPC_VID2_BASE 0x014C
+
+/* Offsets into DISPC_VID1/2_BASE */
+#define DISPC_VID_BA0 0x0000
+#define DISPC_VID_BA1 0x0004
+#define DISPC_VID_POSITION 0x0008
+#define DISPC_VID_SIZE 0x000C
+#define DISPC_VID_ATTRIBUTES 0x0010
+#define DISPC_VID_FIFO_THRESHOLD 0x0014
+#define DISPC_VID_FIFO_SIZE_STATUS 0x0018
+#define DISPC_VID_ROW_INC 0x001C
+#define DISPC_VID_PIXEL_INC 0x0020
+#define DISPC_VID_FIR 0x0024
+#define DISPC_VID_PICTURE_SIZE 0x0028
+#define DISPC_VID_ACCU0 0x002C
+#define DISPC_VID_ACCU1 0x0030
+
+/* 8 elements in 8 byte increments */
+#define DISPC_VID_FIR_COEF_H0 0x0034
+/* 8 elements in 8 byte increments */
+#define DISPC_VID_FIR_COEF_HV0 0x0038
+/* 5 elements in 4 byte increments */
+#define DISPC_VID_CONV_COEF0 0x0074
+
+#define DISPC_IRQ_FRAMEMASK 0x0001
+#define DISPC_IRQ_VSYNC 0x0002
+#define DISPC_IRQ_EVSYNC_EVEN 0x0004
+#define DISPC_IRQ_EVSYNC_ODD 0x0008
+#define DISPC_IRQ_ACBIAS_COUNT_STAT 0x0010
+#define DISPC_IRQ_PROG_LINE_NUM 0x0020
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW 0x0040
+#define DISPC_IRQ_GFX_END_WIN 0x0080
+#define DISPC_IRQ_PAL_GAMMA_MASK 0x0100
+#define DISPC_IRQ_OCP_ERR 0x0200
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW 0x0400
+#define DISPC_IRQ_VID1_END_WIN 0x0800
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW 0x1000
+#define DISPC_IRQ_VID2_END_WIN 0x2000
+#define DISPC_IRQ_SYNC_LOST 0x4000
+
+#define DISPC_IRQ_MASK_ALL 0x7fff
+
+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+ DISPC_IRQ_SYNC_LOST)
+
+#define RFBI_CONTROL 0x48050040
+
+#define MAX_PALETTE_SIZE (256 * 16)
+
+#define FLD_MASK(pos, len) (((1 << len) - 1) << pos)
+
+#define MOD_REG_FLD(reg, mask, val) \
+ dispc_write_reg((reg), (dispc_read_reg(reg) & ~(mask)) | (val));
+
+#define OMAP2_SRAM_START 0x40200000
+/* Maximum size, in reality this is smaller if SRAM is partially locked. */
+#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
+
+/* We support the SDRAM / SRAM types. See OMAPFB_PLANE_MEMTYPE_* in omapfb.h */
+#define DISPC_MEMTYPE_NUM 2
+
+#define RESMAP_SIZE(_page_cnt) \
+ ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
+#define RESMAP_PTR(_res_map, _page_nr) \
+ (((_res_map)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
+#define RESMAP_MASK(_page_nr) \
+ (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
+
+struct resmap {
+ unsigned long start;
+ unsigned page_cnt;
+ unsigned long *map;
+};
+
+static struct {
+ u32 base;
+
+ struct omapfb_mem_desc mem_desc;
+ struct resmap *res_map[DISPC_MEMTYPE_NUM];
+ atomic_t map_count[OMAPFB_PLANE_NUM];
+
+ dma_addr_t palette_paddr;
+ void *palette_vaddr;
+
+ int ext_mode;
+
+ unsigned long enabled_irqs;
+ void (*irq_callback)(void *);
+ void *irq_callback_data;
+ struct completion frame_done;
+
+ int fir_hinc[OMAPFB_PLANE_NUM];
+ int fir_vinc[OMAPFB_PLANE_NUM];
+
+ struct clk *dss_ick, *dss1_fck;
+ struct clk *dss_54m_fck;
+
+ enum omapfb_update_mode update_mode;
+ struct omapfb_device *fbdev;
+
+ struct omapfb_color_key color_key;
+} dispc;
+
+static void enable_lcd_clocks(int enable);
+
+static void inline dispc_write_reg(int idx, u32 val)
+{
+ __raw_writel(val, dispc.base + idx);
+}
+
+static u32 inline dispc_read_reg(int idx)
+{
+ u32 l = __raw_readl(dispc.base + idx);
+ return l;
+}
+
+/* Select RFBI or bypass mode */
+static void enable_rfbi_mode(int enable)
+{
+ u32 l;
+
+ l = dispc_read_reg(DISPC_CONTROL);
+ /* Enable RFBI, GPIO0/1 */
+ l &= ~((1 << 11) | (1 << 15) | (1 << 16));
+ l |= enable ? (1 << 11) : 0;
+ /* RFBI En: GPIO0/1=10 RFBI Dis: GPIO0/1=11 */
+ l |= 1 << 15;
+ l |= enable ? 0 : (1 << 16);
+ dispc_write_reg(DISPC_CONTROL, l);
+
+ /* Set bypass mode in RFBI module */
+ l = __raw_readl(io_p2v(RFBI_CONTROL));
+ l |= enable ? 0 : (1 << 1);
+ __raw_writel(l, io_p2v(RFBI_CONTROL));
+}
+
+static void set_lcd_data_lines(int data_lines)
+{
+ u32 l;
+ int code = 0;
+
+ switch (data_lines) {
+ case 12:
+ code = 0;
+ break;
+ case 16:
+ code = 1;
+ break;
+ case 18:
+ code = 2;
+ break;
+ case 24:
+ code = 3;
+ break;
+ default:
+ BUG();
+ }
+
+ l = dispc_read_reg(DISPC_CONTROL);
+ l &= ~(0x03 << 8);
+ l |= code << 8;
+ dispc_write_reg(DISPC_CONTROL, l);
+}
+
+static void set_load_mode(int mode)
+{
+ BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY |
+ DISPC_LOAD_CLUT_ONCE_FRAME));
+ MOD_REG_FLD(DISPC_CONFIG, 0x03 << 1, mode << 1);
+}
+
+void omap_dispc_set_lcd_size(int x, int y)
+{
+ BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_SIZE_LCD, FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((y - 1) << 16) | (x - 1));
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_set_lcd_size);
+
+void omap_dispc_set_digit_size(int x, int y)
+{
+ BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_SIZE_DIG, FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((y - 1) << 16) | (x - 1));
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_set_digit_size);
+
+static void setup_plane_fifo(int plane, int ext_mode)
+{
+ const u32 ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
+ DISPC_VID1_BASE + DISPC_VID_FIFO_THRESHOLD,
+ DISPC_VID2_BASE + DISPC_VID_FIFO_THRESHOLD };
+ const u32 fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
+ DISPC_VID1_BASE + DISPC_VID_FIFO_SIZE_STATUS,
+ DISPC_VID2_BASE + DISPC_VID_FIFO_SIZE_STATUS };
+ int low, high;
+ u32 l;
+
+ BUG_ON(plane > 2);
+
+ l = dispc_read_reg(fsz_reg[plane]);
+ l &= FLD_MASK(0, 9);
+ if (ext_mode) {
+ low = l * 3 / 4;
+ high = l;
+ } else {
+ low = l / 4;
+ high = l * 3 / 4;
+ }
+ MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 9) | FLD_MASK(0, 9),
+ (high << 16) | low);
+}
+
+void omap_dispc_enable_lcd_out(int enable)
+{
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_CONTROL, 1, enable ? 1 : 0);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_lcd_out);
+
+void omap_dispc_enable_digit_out(int enable)
+{
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_CONTROL, 1 << 1, enable ? 1 << 1 : 0);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_digit_out);
+
+static inline int _setup_plane(int plane, int channel_out,
+ u32 paddr, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+ DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+ const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0,
+ DISPC_VID2_BASE + DISPC_VID_BA0 };
+ const u32 ps_reg[] = { DISPC_GFX_POSITION,
+ DISPC_VID1_BASE + DISPC_VID_POSITION,
+ DISPC_VID2_BASE + DISPC_VID_POSITION };
+ const u32 sz_reg[] = { DISPC_GFX_SIZE,
+ DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE,
+ DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE };
+ const u32 ri_reg[] = { DISPC_GFX_ROW_INC,
+ DISPC_VID1_BASE + DISPC_VID_ROW_INC,
+ DISPC_VID2_BASE + DISPC_VID_ROW_INC };
+ const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
+ DISPC_VID2_BASE + DISPC_VID_SIZE };
+
+ int chout_shift, burst_shift;
+ int chout_val;
+ int color_code;
+ int bpp;
+ int cconv_en;
+ int set_vsize;
+ u32 l;
+
+#ifdef VERBOSE
+ dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d"
+ " pos_x %d pos_y %d width %d height %d color_mode %d\n",
+ plane, channel_out, paddr, screen_width, pos_x, pos_y,
+ width, height, color_mode);
+#endif
+
+ set_vsize = 0;
+ switch (plane) {
+ case OMAPFB_PLANE_GFX:
+ burst_shift = 6;
+ chout_shift = 8;
+ break;
+ case OMAPFB_PLANE_VID1:
+ case OMAPFB_PLANE_VID2:
+ burst_shift = 14;
+ chout_shift = 16;
+ set_vsize = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (channel_out) {
+ case OMAPFB_CHANNEL_OUT_LCD:
+ chout_val = 0;
+ break;
+ case OMAPFB_CHANNEL_OUT_DIGIT:
+ chout_val = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cconv_en = 0;
+ switch (color_mode) {
+ case OMAPFB_COLOR_RGB565:
+ color_code = DISPC_RGB_16_BPP;
+ bpp = 16;
+ break;
+ case OMAPFB_COLOR_YUV422:
+ if (plane == 0)
+ return -EINVAL;
+ color_code = DISPC_UYVY_422;
+ cconv_en = 1;
+ bpp = 16;
+ break;
+ case OMAPFB_COLOR_YUY422:
+ if (plane == 0)
+ return -EINVAL;
+ color_code = DISPC_YUV2_422;
+ cconv_en = 1;
+ bpp = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ l = dispc_read_reg(at_reg[plane]);
+
+ l &= ~(0x0f << 1);
+ l |= color_code << 1;
+ l &= ~(1 << 9);
+ l |= cconv_en << 9;
+
+ l &= ~(0x03 << burst_shift);
+ l |= DISPC_BURST_8x32 << burst_shift;
+
+ l &= ~(1 << chout_shift);
+ l |= chout_val << chout_shift;
+
+ dispc_write_reg(at_reg[plane], l);
+
+ dispc_write_reg(ba_reg[plane], paddr);
+ MOD_REG_FLD(ps_reg[plane],
+ FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x);
+
+ MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((height - 1) << 16) | (width - 1));
+
+ if (set_vsize) {
+ /* Set video size if set_scale hasn't set it */
+ if (!dispc.fir_vinc[plane])
+ MOD_REG_FLD(vs_reg[plane],
+ FLD_MASK(16, 11), (height - 1) << 16);
+ if (!dispc.fir_hinc[plane])
+ MOD_REG_FLD(vs_reg[plane],
+ FLD_MASK(0, 11), width - 1);
+ }
+
+ dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
+
+ return height * screen_width * bpp / 8;
+}
+
+static int omap_dispc_setup_plane(int plane, int channel_out,
+ unsigned long offset,
+ int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ u32 paddr;
+ int r;
+
+ if ((unsigned)plane > dispc.mem_desc.region_cnt)
+ return -EINVAL;
+ paddr = dispc.mem_desc.region[plane].paddr + offset;
+ enable_lcd_clocks(1);
+ r = _setup_plane(plane, channel_out, paddr,
+ screen_width,
+ pos_x, pos_y, width, height, color_mode);
+ enable_lcd_clocks(0);
+ return r;
+}
+
+static void write_firh_reg(int plane, int reg, u32 value)
+{
+ u32 base;
+
+ if (plane == 1)
+ base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0;
+ else
+ base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0;
+ dispc_write_reg(base + reg * 8, value);
+}
+
+static void write_firhv_reg(int plane, int reg, u32 value)
+{
+ u32 base;
+
+ if (plane == 1)
+ base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0;
+ else
+ base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0;
+ dispc_write_reg(base + reg * 8, value);
+}
+
+static void set_upsampling_coef_table(int plane)
+{
+ const u32 coef[][2] = {
+ { 0x00800000, 0x00800000 },
+ { 0x0D7CF800, 0x037B02FF },
+ { 0x1E70F5FF, 0x0C6F05FE },
+ { 0x335FF5FE, 0x205907FB },
+ { 0xF74949F7, 0x00404000 },
+ { 0xF55F33FB, 0x075920FE },
+ { 0xF5701EFE, 0x056F0CFF },
+ { 0xF87C0DFF, 0x027B0300 },
+ };
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ write_firh_reg(plane, i, coef[i][0]);
+ write_firhv_reg(plane, i, coef[i][1]);
+ }
+}
+
+static int omap_dispc_set_scale(int plane,
+ int orig_width, int orig_height,
+ int out_width, int out_height)
+{
+ const u32 at_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+ const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
+ DISPC_VID2_BASE + DISPC_VID_SIZE };
+ const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR,
+ DISPC_VID2_BASE + DISPC_VID_FIR };
+
+ u32 l;
+ int fir_hinc;
+ int fir_vinc;
+
+ if ((unsigned)plane > OMAPFB_PLANE_NUM)
+ return -ENODEV;
+
+ if (plane == OMAPFB_PLANE_GFX &&
+ (out_width != orig_width || out_height != orig_height))
+ return -EINVAL;
+
+ enable_lcd_clocks(1);
+ if (orig_width < out_width) {
+ /*
+ * Upsampling.
+ * Currently you can only scale both dimensions in one way.
+ */
+ if (orig_height > out_height ||
+ orig_width * 8 < out_width ||
+ orig_height * 8 < out_height) {
+ enable_lcd_clocks(0);
+ return -EINVAL;
+ }
+ set_upsampling_coef_table(plane);
+ } else if (orig_width > out_width) {
+ /* Downsampling not yet supported
+ */
+
+ enable_lcd_clocks(0);
+ return -EINVAL;
+ }
+ if (!orig_width || orig_width == out_width)
+ fir_hinc = 0;
+ else
+ fir_hinc = 1024 * orig_width / out_width;
+ if (!orig_height || orig_height == out_height)
+ fir_vinc = 0;
+ else
+ fir_vinc = 1024 * orig_height / out_height;
+ dispc.fir_hinc[plane] = fir_hinc;
+ dispc.fir_vinc[plane] = fir_vinc;
+
+ MOD_REG_FLD(fir_reg[plane],
+ FLD_MASK(16, 12) | FLD_MASK(0, 12),
+ ((fir_vinc & 4095) << 16) |
+ (fir_hinc & 4095));
+
+ dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
+ "orig_height %d fir_hinc %d fir_vinc %d\n",
+ out_width, out_height, orig_width, orig_height,
+ fir_hinc, fir_vinc);
+
+ MOD_REG_FLD(vs_reg[plane],
+ FLD_MASK(16, 11) | FLD_MASK(0, 11),
+ ((out_height - 1) << 16) | (out_width - 1));
+
+ l = dispc_read_reg(at_reg[plane]);
+ l &= ~(0x03 << 5);
+ l |= fir_hinc ? (1 << 5) : 0;
+ l |= fir_vinc ? (1 << 6) : 0;
+ dispc_write_reg(at_reg[plane], l);
+
+ enable_lcd_clocks(0);
+ return 0;
+}
+
+static int omap_dispc_enable_plane(int plane, int enable)
+{
+ const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+ DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+ DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
+ if ((unsigned int)plane > dispc.mem_desc.region_cnt)
+ return -EINVAL;
+
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
+ enable_lcd_clocks(0);
+
+ return 0;
+}
+
+static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
+{
+ u32 df_reg, tr_reg;
+ int shift, val;
+
+ switch (ck->channel_out) {
+ case OMAPFB_CHANNEL_OUT_LCD:
+ df_reg = DISPC_DEFAULT_COLOR0;
+ tr_reg = DISPC_TRANS_COLOR0;
+ shift = 10;
+ break;
+ case OMAPFB_CHANNEL_OUT_DIGIT:
+ df_reg = DISPC_DEFAULT_COLOR1;
+ tr_reg = DISPC_TRANS_COLOR1;
+ shift = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (ck->key_type) {
+ case OMAPFB_COLOR_KEY_DISABLED:
+ val = 0;
+ break;
+ case OMAPFB_COLOR_KEY_GFX_DST:
+ val = 1;
+ break;
+ case OMAPFB_COLOR_KEY_VID_SRC:
+ val = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ enable_lcd_clocks(1);
+ MOD_REG_FLD(DISPC_CONFIG, FLD_MASK(shift, 2), val << shift);
+
+ if (val != 0)
+ dispc_write_reg(tr_reg, ck->trans_key);
+ dispc_write_reg(df_reg, ck->background);
+ enable_lcd_clocks(0);
+
+ dispc.color_key = *ck;
+
+ return 0;
+}
+
+static int omap_dispc_get_color_key(struct omapfb_color_key *ck)
+{
+ *ck = dispc.color_key;
+ return 0;
+}
+
+static void load_palette(void)
+{
+}
+
+static int omap_dispc_set_update_mode(enum omapfb_update_mode mode)
+{
+ int r = 0;
+
+ if (mode != dispc.update_mode) {
+ switch (mode) {
+ case OMAPFB_AUTO_UPDATE:
+ case OMAPFB_MANUAL_UPDATE:
+ enable_lcd_clocks(1);
+ omap_dispc_enable_lcd_out(1);
+ dispc.update_mode = mode;
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ init_completion(&dispc.frame_done);
+ omap_dispc_enable_lcd_out(0);
+ if (!wait_for_completion_timeout(&dispc.frame_done,
+ msecs_to_jiffies(500))) {
+ dev_err(dispc.fbdev->dev,
+ "timeout waiting for FRAME DONE\n");
+ }
+ dispc.update_mode = mode;
+ enable_lcd_clocks(0);
+ break;
+ default:
+ r = -EINVAL;
+ }
+ }
+
+ return r;
+}
+
+static void omap_dispc_get_caps(int plane, struct omapfb_caps *caps)
+{
+ caps->ctrl |= OMAPFB_CAPS_PLANE_RELOCATE_MEM;
+ if (plane > 0)
+ caps->ctrl |= OMAPFB_CAPS_PLANE_SCALE;
+ caps->plane_color |= (1 << OMAPFB_COLOR_RGB565) |
+ (1 << OMAPFB_COLOR_YUV422) |
+ (1 << OMAPFB_COLOR_YUY422);
+ if (plane == 0)
+ caps->plane_color |= (1 << OMAPFB_COLOR_CLUT_8BPP) |
+ (1 << OMAPFB_COLOR_CLUT_4BPP) |
+ (1 << OMAPFB_COLOR_CLUT_2BPP) |
+ (1 << OMAPFB_COLOR_CLUT_1BPP) |
+ (1 << OMAPFB_COLOR_RGB444);
+}
+
+static enum omapfb_update_mode omap_dispc_get_update_mode(void)
+{
+ return dispc.update_mode;
+}
+
+static void setup_color_conv_coef(void)
+{
+ u32 mask = FLD_MASK(16, 11) | FLD_MASK(0, 11);
+ int cf1_reg = DISPC_VID1_BASE + DISPC_VID_CONV_COEF0;
+ int cf2_reg = DISPC_VID2_BASE + DISPC_VID_CONV_COEF0;
+ int at1_reg = DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES;
+ int at2_reg = DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES;
+ const struct color_conv_coef {
+ int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
+ int full_range;
+ } ctbl_bt601_5 = {
+ 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
+ };
+ const struct color_conv_coef *ct;
+#define CVAL(x, y) (((x & 2047) << 16) | (y & 2047))
+
+ ct = &ctbl_bt601_5;
+
+ MOD_REG_FLD(cf1_reg, mask, CVAL(ct->rcr, ct->ry));
+ MOD_REG_FLD(cf1_reg + 4, mask, CVAL(ct->gy, ct->rcb));
+ MOD_REG_FLD(cf1_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
+ MOD_REG_FLD(cf1_reg + 12, mask, CVAL(ct->bcr, ct->by));
+ MOD_REG_FLD(cf1_reg + 16, mask, CVAL(0, ct->bcb));
+
+ MOD_REG_FLD(cf2_reg, mask, CVAL(ct->rcr, ct->ry));
+ MOD_REG_FLD(cf2_reg + 4, mask, CVAL(ct->gy, ct->rcb));
+ MOD_REG_FLD(cf2_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
+ MOD_REG_FLD(cf2_reg + 12, mask, CVAL(ct->bcr, ct->by));
+ MOD_REG_FLD(cf2_reg + 16, mask, CVAL(0, ct->bcb));
+#undef CVAL
+
+ MOD_REG_FLD(at1_reg, (1 << 11), ct->full_range);
+ MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
+}
+
+static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
+{
+ unsigned long fck, lck;
+
+ *lck_div = 1;
+ pck = max(1, pck);
+ fck = clk_get_rate(dispc.dss1_fck);
+ lck = fck;
+ *pck_div = (lck + pck - 1) / pck;
+ if (is_tft)
+ *pck_div = max(2, *pck_div);
+ else
+ *pck_div = max(3, *pck_div);
+ if (*pck_div > 255) {
+ *pck_div = 255;
+ lck = pck * *pck_div;
+ *lck_div = fck / lck;
+ BUG_ON(*lck_div < 1);
+ if (*lck_div > 255) {
+ *lck_div = 255;
+ dev_warn(dispc.fbdev->dev, "pixclock %d kHz too low.\n",
+ pck / 1000);
+ }
+ }
+}
+
+static void set_lcd_tft_mode(int enable)
+{
+ u32 mask;
+
+ mask = 1 << 3;
+ MOD_REG_FLD(DISPC_CONTROL, mask, enable ? mask : 0);
+}
+
+static void set_lcd_timings(void)
+{
+ u32 l;
+ int lck_div, pck_div;
+ struct lcd_panel *panel = dispc.fbdev->panel;
+ int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
+ unsigned long fck;
+
+ l = dispc_read_reg(DISPC_TIMING_H);
+ l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
+ l |= ( max(1, (min(64, panel->hsw))) - 1 ) << 0;
+ l |= ( max(1, (min(256, panel->hfp))) - 1 ) << 8;
+ l |= ( max(1, (min(256, panel->hbp))) - 1 ) << 20;
+ dispc_write_reg(DISPC_TIMING_H, l);
+
+ l = dispc_read_reg(DISPC_TIMING_V);
+ l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
+ l |= ( max(1, (min(64, panel->vsw))) - 1 ) << 0;
+ l |= ( max(0, (min(255, panel->vfp))) - 0 ) << 8;
+ l |= ( max(0, (min(255, panel->vbp))) - 0 ) << 20;
+ dispc_write_reg(DISPC_TIMING_V, l);
+
+ l = dispc_read_reg(DISPC_POL_FREQ);
+ l &= ~FLD_MASK(12, 6);
+ l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 12;
+ l |= panel->acb & 0xff;
+ dispc_write_reg(DISPC_POL_FREQ, l);
+
+ calc_ck_div(is_tft, panel->pixel_clock * 1000, &lck_div, &pck_div);
+
+ l = dispc_read_reg(DISPC_DIVISOR);
+ l &= ~(FLD_MASK(16, 8) | FLD_MASK(0, 8));
+ l |= (lck_div << 16) | (pck_div << 0);
+ dispc_write_reg(DISPC_DIVISOR, l);
+
+ /* update panel info with the exact clock */
+ fck = clk_get_rate(dispc.dss1_fck);
+ panel->pixel_clock = fck / lck_div / pck_div / 1000;
+}
+
+int omap_dispc_request_irq(void (*callback)(void *data), void *data)
+{
+ int r = 0;
+
+ BUG_ON(callback == NULL);
+
+ if (dispc.irq_callback)
+ r = -EBUSY;
+ else {
+ dispc.irq_callback = callback;
+ dispc.irq_callback_data = data;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(omap_dispc_request_irq);
+
+void omap_dispc_enable_irqs(int irq_mask)
+{
+ enable_lcd_clocks(1);
+ dispc.enabled_irqs = irq_mask;
+ irq_mask |= DISPC_IRQ_MASK_ERROR;
+ MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_enable_irqs);
+
+void omap_dispc_disable_irqs(int irq_mask)
+{
+ enable_lcd_clocks(1);
+ dispc.enabled_irqs &= ~irq_mask;
+ irq_mask &= ~DISPC_IRQ_MASK_ERROR;
+ MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_disable_irqs);
+
+void omap_dispc_free_irq(void)
+{
+ enable_lcd_clocks(1);
+ omap_dispc_disable_irqs(DISPC_IRQ_MASK_ALL);
+ dispc.irq_callback = NULL;
+ dispc.irq_callback_data = NULL;
+ enable_lcd_clocks(0);
+}
+EXPORT_SYMBOL(omap_dispc_free_irq);
+
+static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
+{
+ u32 stat = dispc_read_reg(DISPC_IRQSTATUS);
+
+ if (stat & DISPC_IRQ_FRAMEMASK)
+ complete(&dispc.frame_done);
+
+ if (stat & DISPC_IRQ_MASK_ERROR) {
+ if (printk_ratelimit()) {
+ dev_err(dispc.fbdev->dev, "irq error status %04x\n",
+ stat & 0x7fff);
+ }
+ }
+
+ if ((stat & dispc.enabled_irqs) && dispc.irq_callback)
+ dispc.irq_callback(dispc.irq_callback_data);
+
+ dispc_write_reg(DISPC_IRQSTATUS, stat);
+
+ return IRQ_HANDLED;
+}
+
+static int get_dss_clocks(void)
+{
+ if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
+ dev_err(dispc.fbdev->dev, "can't get dss_ick");
+ return PTR_ERR(dispc.dss_ick);
+ }
+
+ if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
+ dev_err(dispc.fbdev->dev, "can't get dss1_fck");
+ clk_put(dispc.dss_ick);
+ return PTR_ERR(dispc.dss1_fck);
+ }
+
+ if (IS_ERR((dispc.dss_54m_fck =
+ clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
+ dev_err(dispc.fbdev->dev, "can't get dss_54m_fck");
+ clk_put(dispc.dss_ick);
+ clk_put(dispc.dss1_fck);
+ return PTR_ERR(dispc.dss_54m_fck);
+ }
+
+ return 0;
+}
+
+static void put_dss_clocks(void)
+{
+ clk_put(dispc.dss_54m_fck);
+ clk_put(dispc.dss1_fck);
+ clk_put(dispc.dss_ick);
+}
+
+static void enable_lcd_clocks(int enable)
+{
+ if (enable)
+ clk_enable(dispc.dss1_fck);
+ else
+ clk_disable(dispc.dss1_fck);
+}
+
+static void enable_interface_clocks(int enable)
+{
+ if (enable)
+ clk_enable(dispc.dss_ick);
+ else
+ clk_disable(dispc.dss_ick);
+}
+
+static void enable_digit_clocks(int enable)
+{
+ if (enable)
+ clk_enable(dispc.dss_54m_fck);
+ else
+ clk_disable(dispc.dss_54m_fck);
+}
+
+static void omap_dispc_suspend(void)
+{
+ if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
+ init_completion(&dispc.frame_done);
+ omap_dispc_enable_lcd_out(0);
+ if (!wait_for_completion_timeout(&dispc.frame_done,
+ msecs_to_jiffies(500))) {
+ dev_err(dispc.fbdev->dev,
+ "timeout waiting for FRAME DONE\n");
+ }
+ enable_lcd_clocks(0);
+ }
+}
+
+static void omap_dispc_resume(void)
+{
+ if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
+ enable_lcd_clocks(1);
+ if (!dispc.ext_mode) {
+ set_lcd_timings();
+ load_palette();
+ }
+ omap_dispc_enable_lcd_out(1);
+ }
+}
+
+
+static int omap_dispc_update_window(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*complete_callback)(void *arg),
+ void *complete_callback_data)
+{
+ return dispc.update_mode == OMAPFB_UPDATE_DISABLED ? -ENODEV : 0;
+}
+
+static int mmap_kern(struct omapfb_mem_region *region)
+{
+ struct vm_struct *kvma;
+ struct vm_area_struct vma;
+ pgprot_t pgprot;
+ unsigned long vaddr;
+
+ kvma = get_vm_area(region->size, VM_IOREMAP);
+ if (kvma == NULL) {
+ dev_err(dispc.fbdev->dev, "can't get kernel vm area\n");
+ return -ENOMEM;
+ }
+ vma.vm_mm = &init_mm;
+
+ vaddr = (unsigned long)kvma->addr;
+
+ pgprot = pgprot_writecombine(pgprot_kernel);
+ vma.vm_start = vaddr;
+ vma.vm_end = vaddr + region->size;
+ if (io_remap_pfn_range(&vma, vaddr, region->paddr >> PAGE_SHIFT,
+ region->size, pgprot) < 0) {
+ dev_err(dispc.fbdev->dev, "kernel mmap for FBMEM failed\n");
+ return -EAGAIN;
+ }
+ region->vaddr = (void *)vaddr;
+
+ return 0;
+}
+
+static void mmap_user_open(struct vm_area_struct *vma)
+{
+ int plane = (int)vma->vm_private_data;
+
+ atomic_inc(&dispc.map_count[plane]);
+}
+
+static void mmap_user_close(struct vm_area_struct *vma)
+{
+ int plane = (int)vma->vm_private_data;
+
+ atomic_dec(&dispc.map_count[plane]);
+}
+
+static struct vm_operations_struct mmap_user_ops = {
+ .open = mmap_user_open,
+ .close = mmap_user_close,
+};
+
+static int omap_dispc_mmap_user(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct omapfb_plane_struct *plane = info->par;
+ unsigned long off;
+ unsigned long start;
+ u32 len;
+
+ if (vma->vm_end - vma->vm_start == 0)
+ return 0;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+ off = vma->vm_pgoff << PAGE_SHIFT;
+
+ start = info->fix.smem_start;
+ len = info->fix.smem_len;
+ if (off >= len)
+ return -EINVAL;
+ if ((vma->vm_end - vma->vm_start + off) > len)
+ return -EINVAL;
+ off += start;
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mmap_user_ops;
+ vma->vm_private_data = (void *)plane->idx;
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+ /* vm_ops.open won't be called for mmap itself. */
+ atomic_inc(&dispc.map_count[plane->idx]);
+ return 0;
+}
+
+static void unmap_kern(struct omapfb_mem_region *region)
+{
+ vunmap(region->vaddr);
+}
+
+static int alloc_palette_ram(void)
+{
+ dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
+ MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL);
+ if (dispc.palette_vaddr == NULL) {
+ dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void free_palette_ram(void)
+{
+ dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE,
+ dispc.palette_vaddr, dispc.palette_paddr);
+}
+
+static int alloc_fbmem(struct omapfb_mem_region *region)
+{
+ region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
+ region->size, &region->paddr, GFP_KERNEL);
+
+ if (region->vaddr == NULL) {
+ dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void free_fbmem(struct omapfb_mem_region *region)
+{
+ dma_free_writecombine(dispc.fbdev->dev, region->size,
+ region->vaddr, region->paddr);
+}
+
+static struct resmap *init_resmap(unsigned long start, size_t size)
+{
+ unsigned page_cnt;
+ struct resmap *res_map;
+
+ page_cnt = PAGE_ALIGN(size) / PAGE_SIZE;
+ res_map =
+ kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL);
+ if (res_map == NULL)
+ return NULL;
+ res_map->start = start;
+ res_map->page_cnt = page_cnt;
+ res_map->map = (unsigned long *)(res_map + 1);
+ return res_map;
+}
+
+static void cleanup_resmap(struct resmap *res_map)
+{
+ kfree(res_map);
+}
+
+static inline int resmap_mem_type(unsigned long start)
+{
+ if (start >= OMAP2_SRAM_START &&
+ start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
+ return OMAPFB_MEMTYPE_SRAM;
+ else
+ return OMAPFB_MEMTYPE_SDRAM;
+}
+
+static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr)
+{
+ return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0;
+}
+
+static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr)
+{
+ BUG_ON(resmap_page_reserved(res_map, page_nr));
+ *RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr);
+}
+
+static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr)
+{
+ BUG_ON(!resmap_page_reserved(res_map, page_nr));
+ *RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr);
+}
+
+static void resmap_reserve_region(unsigned long start, size_t size)
+{
+
+ struct resmap *res_map;
+ unsigned start_page;
+ unsigned end_page;
+ int mtype;
+ unsigned i;
+
+ mtype = resmap_mem_type(start);
+ res_map = dispc.res_map[mtype];
+ dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n",
+ mtype, start, size);
+ start_page = (start - res_map->start) / PAGE_SIZE;
+ end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
+ for (i = start_page; i < end_page; i++)
+ resmap_reserve_page(res_map, i);
+}
+
+static void resmap_free_region(unsigned long start, size_t size)
+{
+ struct resmap *res_map;
+ unsigned start_page;
+ unsigned end_page;
+ unsigned i;
+ int mtype;
+
+ mtype = resmap_mem_type(start);
+ res_map = dispc.res_map[mtype];
+ dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n",
+ mtype, start, size);
+ start_page = (start - res_map->start) / PAGE_SIZE;
+ end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
+ for (i = start_page; i < end_page; i++)
+ resmap_free_page(res_map, i);
+}
+
+static unsigned long resmap_alloc_region(int mtype, size_t size)
+{
+ unsigned i;
+ unsigned total;
+ unsigned start_page;
+ unsigned long start;
+ struct resmap *res_map = dispc.res_map[mtype];
+
+ BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size);
+
+ size = PAGE_ALIGN(size) / PAGE_SIZE;
+ start_page = 0;
+ total = 0;
+ for (i = 0; i < res_map->page_cnt; i++) {
+ if (resmap_page_reserved(res_map, i)) {
+ start_page = i + 1;
+ total = 0;
+ } else if (++total == size)
+ break;
+ }
+ if (total < size)
+ return 0;
+
+ start = res_map->start + start_page * PAGE_SIZE;
+ resmap_reserve_region(start, size * PAGE_SIZE);
+
+ return start;
+}
+
+/* Note that this will only work for user mappings, we don't deal with
+ * kernel mappings here, so fbcon will keep using the old region.
+ */
+static int omap_dispc_setup_mem(int plane, size_t size, int mem_type,
+ unsigned long *paddr)
+{
+ struct omapfb_mem_region *rg;
+ unsigned long new_addr = 0;
+
+ if ((unsigned)plane > dispc.mem_desc.region_cnt)
+ return -EINVAL;
+ if (mem_type >= DISPC_MEMTYPE_NUM)
+ return -EINVAL;
+ if (dispc.res_map[mem_type] == NULL)
+ return -ENOMEM;
+ rg = &dispc.mem_desc.region[plane];
+ if (size == rg->size && mem_type == rg->type)
+ return 0;
+ if (atomic_read(&dispc.map_count[plane]))
+ return -EBUSY;
+ if (rg->size != 0)
+ resmap_free_region(rg->paddr, rg->size);
+ if (size != 0) {
+ new_addr = resmap_alloc_region(mem_type, size);
+ if (!new_addr) {
+ /* Reallocate old region. */
+ resmap_reserve_region(rg->paddr, rg->size);
+ return -ENOMEM;
+ }
+ }
+ rg->paddr = new_addr;
+ rg->size = size;
+ rg->type = mem_type;
+
+ *paddr = new_addr;
+
+ return 0;
+}
+
+static int setup_fbmem(struct omapfb_mem_desc *req_md)
+{
+ struct omapfb_mem_region *rg;
+ int i;
+ int r;
+ unsigned long mem_start[DISPC_MEMTYPE_NUM];
+ unsigned long mem_end[DISPC_MEMTYPE_NUM];
+
+ if (!req_md->region_cnt) {
+ dev_err(dispc.fbdev->dev, "no memory regions defined\n");
+ return -ENOENT;
+ }
+
+ rg = &req_md->region[0];
+ memset(mem_start, 0xff, sizeof(mem_start));
+ memset(mem_end, 0, sizeof(mem_end));
+
+ for (i = 0; i < req_md->region_cnt; i++, rg++) {
+ int mtype;
+ if (rg->paddr) {
+ rg->alloc = 0;
+ if (rg->vaddr == NULL) {
+ rg->map = 1;
+ if ((r = mmap_kern(rg)) < 0)
+ return r;
+ }
+ } else {
+ if (rg->type != OMAPFB_MEMTYPE_SDRAM) {
+ dev_err(dispc.fbdev->dev,
+ "unsupported memory type\n");
+ return -EINVAL;
+ }
+ rg->alloc = rg->map = 1;
+ if ((r = alloc_fbmem(rg)) < 0)
+ return r;
+ }
+ mtype = rg->type;
+
+ if (rg->paddr < mem_start[mtype])
+ mem_start[mtype] = rg->paddr;
+ if (rg->paddr + rg->size > mem_end[mtype])
+ mem_end[mtype] = rg->paddr + rg->size;
+ }
+
+ for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+ unsigned long start;
+ size_t size;
+ if (mem_end[i] == 0)
+ continue;
+ start = mem_start[i];
+ size = mem_end[i] - start;
+ dispc.res_map[i] = init_resmap(start, size);
+ r = -ENOMEM;
+ if (dispc.res_map[i] == NULL)
+ goto fail;
+ /* Initial state is that everything is reserved. This
+ * includes possible holes as well, which will never be
+ * freed.
+ */
+ resmap_reserve_region(start, size);
+ }
+
+ dispc.mem_desc = *req_md;
+
+ return 0;
+fail:
+ for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+ if (dispc.res_map[i] != NULL)
+ cleanup_resmap(dispc.res_map[i]);
+ }
+ return r;
+}
+
+static void cleanup_fbmem(void)
+{
+ struct omapfb_mem_region *rg;
+ int i;
+
+ for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
+ if (dispc.res_map[i] != NULL)
+ cleanup_resmap(dispc.res_map[i]);
+ }
+ rg = &dispc.mem_desc.region[0];
+ for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) {
+ if (rg->alloc)
+ free_fbmem(rg);
+ else {
+ if (rg->map)
+ unmap_kern(rg);
+ }
+ }
+}
+
+static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r;
+ u32 l;
+ struct lcd_panel *panel = fbdev->panel;
+ int tmo = 10000;
+ int skip_init = 0;
+ int i;
+
+ memset(&dispc, 0, sizeof(dispc));
+
+ dispc.base = io_p2v(DISPC_BASE);
+ dispc.fbdev = fbdev;
+ dispc.ext_mode = ext_mode;
+
+ init_completion(&dispc.frame_done);
+
+ if ((r = get_dss_clocks()) < 0)
+ return r;
+
+ enable_interface_clocks(1);
+ enable_lcd_clocks(1);
+
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ l = dispc_read_reg(DISPC_CONTROL);
+ /* LCD enabled ? */
+ if (l & 1) {
+ pr_info("omapfb: skipping hardware initialization\n");
+ skip_init = 1;
+ }
+#endif
+
+ if (!skip_init) {
+ /* Reset monitoring works only w/ the 54M clk */
+ enable_digit_clocks(1);
+
+ /* Soft reset */
+ MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1);
+
+ while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) {
+ if (!--tmo) {
+ dev_err(dispc.fbdev->dev, "soft reset failed\n");
+ r = -ENODEV;
+ enable_digit_clocks(0);
+ goto fail1;
+ }
+ }
+
+ enable_digit_clocks(0);
+ }
+
+ /* Enable smart idle and autoidle */
+ l = dispc_read_reg(DISPC_CONTROL);
+ l &= ~((3 << 12) | (3 << 3));
+ l |= (2 << 12) | (2 << 3) | (1 << 0);
+ dispc_write_reg(DISPC_SYSCONFIG, l);
+ omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG);
+
+ /* Set functional clock autogating */
+ l = dispc_read_reg(DISPC_CONFIG);
+ l |= 1 << 9;
+ dispc_write_reg(DISPC_CONFIG, l);
+
+ l = dispc_read_reg(DISPC_IRQSTATUS);
+ dispc_write_reg(l, DISPC_IRQSTATUS);
+
+ /* Enable those that we handle always */
+ omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK);
+
+ if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler,
+ 0, MODULE_NAME, fbdev)) < 0) {
+ dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n");
+ goto fail1;
+ }
+
+ /* L3 firewall setting: enable access to OCM RAM */
+ __raw_writel(0x402000b0, io_p2v(0x680050a0));
+
+ if ((r = alloc_palette_ram()) < 0)
+ goto fail2;
+
+ if ((r = setup_fbmem(req_vram)) < 0)
+ goto fail3;
+
+ if (!skip_init) {
+ for (i = 0; i < dispc.mem_desc.region_cnt; i++) {
+ memset(dispc.mem_desc.region[i].vaddr, 0,
+ dispc.mem_desc.region[i].size);
+ }
+
+ /* Set logic clock to fck, pixel clock to fck/2 for now */
+ MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16);
+ MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0);
+
+ setup_plane_fifo(0, ext_mode);
+ setup_plane_fifo(1, ext_mode);
+ setup_plane_fifo(2, ext_mode);
+
+ setup_color_conv_coef();
+
+ set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT);
+ set_load_mode(DISPC_LOAD_FRAME_ONLY);
+
+ if (!ext_mode) {
+ set_lcd_data_lines(panel->data_lines);
+ omap_dispc_set_lcd_size(panel->x_res, panel->y_res);
+ set_lcd_timings();
+ } else
+ set_lcd_data_lines(panel->bpp);
+ enable_rfbi_mode(ext_mode);
+ }
+
+ l = dispc_read_reg(DISPC_REVISION);
+ pr_info("omapfb: DISPC version %d.%d initialized\n",
+ l >> 4 & 0x0f, l & 0x0f);
+ enable_lcd_clocks(0);
+
+ return 0;
+fail3:
+ free_palette_ram();
+fail2:
+ free_irq(INT_24XX_DSS_IRQ, fbdev);
+fail1:
+ enable_lcd_clocks(0);
+ enable_interface_clocks(0);
+ put_dss_clocks();
+
+ return r;
+}
+
+static void omap_dispc_cleanup(void)
+{
+ int i;
+
+ omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ /* This will also disable clocks that are on */
+ for (i = 0; i < dispc.mem_desc.region_cnt; i++)
+ omap_dispc_enable_plane(i, 0);
+ cleanup_fbmem();
+ free_palette_ram();
+ free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
+ enable_interface_clocks(0);
+ put_dss_clocks();
+}
+
+const struct lcd_ctrl omap2_int_ctrl = {
+ .name = "internal",
+ .init = omap_dispc_init,
+ .cleanup = omap_dispc_cleanup,
+ .get_caps = omap_dispc_get_caps,
+ .set_update_mode = omap_dispc_set_update_mode,
+ .get_update_mode = omap_dispc_get_update_mode,
+ .update_window = omap_dispc_update_window,
+ .suspend = omap_dispc_suspend,
+ .resume = omap_dispc_resume,
+ .setup_plane = omap_dispc_setup_plane,
+ .setup_mem = omap_dispc_setup_mem,
+ .set_scale = omap_dispc_set_scale,
+ .enable_plane = omap_dispc_enable_plane,
+ .set_color_key = omap_dispc_set_color_key,
+ .get_color_key = omap_dispc_get_color_key,
+ .mmap = omap_dispc_mmap_user,
+};
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h
new file mode 100644
index 000000000000..eb1512b56ce8
--- /dev/null
+++ b/drivers/video/omap/dispc.h
@@ -0,0 +1,43 @@
+#ifndef _DISPC_H
+#define _DISPC_H
+
+#include <linux/interrupt.h>
+
+#define DISPC_PLANE_GFX 0
+#define DISPC_PLANE_VID1 1
+#define DISPC_PLANE_VID2 2
+
+#define DISPC_RGB_1_BPP 0x00
+#define DISPC_RGB_2_BPP 0x01
+#define DISPC_RGB_4_BPP 0x02
+#define DISPC_RGB_8_BPP 0x03
+#define DISPC_RGB_12_BPP 0x04
+#define DISPC_RGB_16_BPP 0x06
+#define DISPC_RGB_24_BPP 0x08
+#define DISPC_RGB_24_BPP_UNPACK_32 0x09
+#define DISPC_YUV2_422 0x0a
+#define DISPC_UYVY_422 0x0b
+
+#define DISPC_BURST_4x32 0
+#define DISPC_BURST_8x32 1
+#define DISPC_BURST_16x32 2
+
+#define DISPC_LOAD_CLUT_AND_FRAME 0x00
+#define DISPC_LOAD_CLUT_ONLY 0x01
+#define DISPC_LOAD_FRAME_ONLY 0x02
+#define DISPC_LOAD_CLUT_ONCE_FRAME 0x03
+
+#define DISPC_TFT_DATA_LINES_12 0
+#define DISPC_TFT_DATA_LINES_16 1
+#define DISPC_TFT_DATA_LINES_18 2
+#define DISPC_TFT_DATA_LINES_24 3
+
+extern void omap_dispc_set_lcd_size(int width, int height);
+
+extern void omap_dispc_enable_lcd_out(int enable);
+extern void omap_dispc_enable_digit_out(int enable);
+
+extern int omap_dispc_request_irq(void (*callback)(void *data), void *data);
+extern void omap_dispc_free_irq(void);
+
+#endif
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
new file mode 100644
index 000000000000..dc48e02f215c
--- /dev/null
+++ b/drivers/video/omap/hwa742.c
@@ -0,0 +1,1077 @@
+/*
+ * Epson HWA742 LCD controller driver
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Authors: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Imre Deak <imre.deak@nokia.com>
+ * YUV support: Jussi Laako <jussi.laako@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/hwa742.h>
+
+#define HWA742_REV_CODE_REG 0x0
+#define HWA742_CONFIG_REG 0x2
+#define HWA742_PLL_DIV_REG 0x4
+#define HWA742_PLL_0_REG 0x6
+#define HWA742_PLL_1_REG 0x8
+#define HWA742_PLL_2_REG 0xa
+#define HWA742_PLL_3_REG 0xc
+#define HWA742_PLL_4_REG 0xe
+#define HWA742_CLK_SRC_REG 0x12
+#define HWA742_PANEL_TYPE_REG 0x14
+#define HWA742_H_DISP_REG 0x16
+#define HWA742_H_NDP_REG 0x18
+#define HWA742_V_DISP_1_REG 0x1a
+#define HWA742_V_DISP_2_REG 0x1c
+#define HWA742_V_NDP_REG 0x1e
+#define HWA742_HS_W_REG 0x20
+#define HWA742_HP_S_REG 0x22
+#define HWA742_VS_W_REG 0x24
+#define HWA742_VP_S_REG 0x26
+#define HWA742_PCLK_POL_REG 0x28
+#define HWA742_INPUT_MODE_REG 0x2a
+#define HWA742_TRANSL_MODE_REG1 0x2e
+#define HWA742_DISP_MODE_REG 0x34
+#define HWA742_WINDOW_TYPE 0x36
+#define HWA742_WINDOW_X_START_0 0x38
+#define HWA742_WINDOW_X_START_1 0x3a
+#define HWA742_WINDOW_Y_START_0 0x3c
+#define HWA742_WINDOW_Y_START_1 0x3e
+#define HWA742_WINDOW_X_END_0 0x40
+#define HWA742_WINDOW_X_END_1 0x42
+#define HWA742_WINDOW_Y_END_0 0x44
+#define HWA742_WINDOW_Y_END_1 0x46
+#define HWA742_MEMORY_WRITE_LSB 0x48
+#define HWA742_MEMORY_WRITE_MSB 0x49
+#define HWA742_MEMORY_READ_0 0x4a
+#define HWA742_MEMORY_READ_1 0x4c
+#define HWA742_MEMORY_READ_2 0x4e
+#define HWA742_POWER_SAVE 0x56
+#define HWA742_NDP_CTRL 0x58
+
+#define HWA742_AUTO_UPDATE_TIME (HZ / 20)
+
+/* Reserve 4 request slots for requests in irq context */
+#define REQ_POOL_SIZE 24
+#define IRQ_REQ_POOL_SIZE 4
+
+#define REQ_FROM_IRQ_POOL 0x01
+
+#define REQ_COMPLETE 0
+#define REQ_PENDING 1
+
+struct update_param {
+ int x, y, width, height;
+ int color_mode;
+ int flags;
+};
+
+struct hwa742_request {
+ struct list_head entry;
+ unsigned int flags;
+
+ int (*handler)(struct hwa742_request *req);
+ void (*complete)(void *data);
+ void *complete_data;
+
+ union {
+ struct update_param update;
+ struct completion *sync;
+ } par;
+};
+
+struct {
+ enum omapfb_update_mode update_mode;
+ enum omapfb_update_mode update_mode_before_suspend;
+
+ struct timer_list auto_update_timer;
+ int stop_auto_update;
+ struct omapfb_update_window auto_update_window;
+ unsigned te_connected:1;
+ unsigned vsync_only:1;
+
+ struct hwa742_request req_pool[REQ_POOL_SIZE];
+ struct list_head pending_req_list;
+ struct list_head free_req_list;
+ struct semaphore req_sema;
+ spinlock_t req_lock;
+
+ struct extif_timings reg_timings, lut_timings;
+
+ int prev_color_mode;
+ int prev_flags;
+ int window_type;
+
+ u32 max_transmit_size;
+ u32 extif_clk_period;
+ unsigned long pix_tx_time;
+ unsigned long line_upd_time;
+
+
+ struct omapfb_device *fbdev;
+ struct lcd_ctrl_extif *extif;
+ struct lcd_ctrl *int_ctrl;
+
+ void (*power_up)(struct device *dev);
+ void (*power_down)(struct device *dev);
+} hwa742;
+
+struct lcd_ctrl hwa742_ctrl;
+
+static u8 hwa742_read_reg(u8 reg)
+{
+ u8 data;
+
+ hwa742.extif->set_bits_per_cycle(8);
+ hwa742.extif->write_command(&reg, 1);
+ hwa742.extif->read_data(&data, 1);
+
+ return data;
+}
+
+static void hwa742_write_reg(u8 reg, u8 data)
+{
+ hwa742.extif->set_bits_per_cycle(8);
+ hwa742.extif->write_command(&reg, 1);
+ hwa742.extif->write_data(&data, 1);
+}
+
+static void set_window_regs(int x_start, int y_start, int x_end, int y_end)
+{
+ u8 tmp[8];
+ u8 cmd;
+
+ x_end--;
+ y_end--;
+ tmp[0] = x_start;
+ tmp[1] = x_start >> 8;
+ tmp[2] = y_start;
+ tmp[3] = y_start >> 8;
+ tmp[4] = x_end;
+ tmp[5] = x_end >> 8;
+ tmp[6] = y_end;
+ tmp[7] = y_end >> 8;
+
+ hwa742.extif->set_bits_per_cycle(8);
+ cmd = HWA742_WINDOW_X_START_0;
+
+ hwa742.extif->write_command(&cmd, 1);
+
+ hwa742.extif->write_data(tmp, 8);
+}
+
+static void set_format_regs(int conv, int transl, int flags)
+{
+ if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
+ hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01);
+#ifdef VERBOSE
+ dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n");
+#endif
+ } else {
+ hwa742.window_type = (hwa742.window_type & 0xfc);
+#ifdef VERBOSE
+ dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n");
+#endif
+ }
+
+ hwa742_write_reg(HWA742_INPUT_MODE_REG, conv);
+ hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl);
+ hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type);
+}
+
+static void enable_tearsync(int y, int width, int height, int screen_height,
+ int force_vsync)
+{
+ u8 b;
+
+ b = hwa742_read_reg(HWA742_NDP_CTRL);
+ b |= 1 << 2;
+ hwa742_write_reg(HWA742_NDP_CTRL, b);
+
+ if (likely(hwa742.vsync_only || force_vsync)) {
+ hwa742.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if (width * hwa742.pix_tx_time < hwa742.line_upd_time) {
+ hwa742.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ if ((width * hwa742.pix_tx_time / 1000) * height <
+ (y + height) * (hwa742.line_upd_time / 1000)) {
+ hwa742.extif->enable_tearsync(1, 0);
+ return;
+ }
+
+ hwa742.extif->enable_tearsync(1, y + 1);
+}
+
+static void disable_tearsync(void)
+{
+ u8 b;
+
+ hwa742.extif->enable_tearsync(0, 0);
+
+ b = hwa742_read_reg(HWA742_NDP_CTRL);
+ b &= ~(1 << 2);
+ hwa742_write_reg(HWA742_NDP_CTRL, b);
+}
+
+static inline struct hwa742_request *alloc_req(void)
+{
+ unsigned long flags;
+ struct hwa742_request *req;
+ int req_flags = 0;
+
+ if (!in_interrupt())
+ down(&hwa742.req_sema);
+ else
+ req_flags = REQ_FROM_IRQ_POOL;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+ BUG_ON(list_empty(&hwa742.free_req_list));
+ req = list_entry(hwa742.free_req_list.next,
+ struct hwa742_request, entry);
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+ INIT_LIST_HEAD(&req->entry);
+ req->flags = req_flags;
+
+ return req;
+}
+
+static inline void free_req(struct hwa742_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+
+ list_del(&req->entry);
+ list_add(&req->entry, &hwa742.free_req_list);
+ if (!(req->flags & REQ_FROM_IRQ_POOL))
+ up(&hwa742.req_sema);
+
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+}
+
+static void process_pending_requests(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+
+ while (!list_empty(&hwa742.pending_req_list)) {
+ struct hwa742_request *req;
+ void (*complete)(void *);
+ void *complete_data;
+
+ req = list_entry(hwa742.pending_req_list.next,
+ struct hwa742_request, entry);
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+ if (req->handler(req) == REQ_PENDING)
+ return;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+}
+
+static void submit_req_list(struct list_head *head)
+{
+ unsigned long flags;
+ int process = 1;
+
+ spin_lock_irqsave(&hwa742.req_lock, flags);
+ if (likely(!list_empty(&hwa742.pending_req_list)))
+ process = 0;
+ list_splice_init(head, hwa742.pending_req_list.prev);
+ spin_unlock_irqrestore(&hwa742.req_lock, flags);
+
+ if (process)
+ process_pending_requests();
+}
+
+static void request_complete(void *data)
+{
+ struct hwa742_request *req = (struct hwa742_request *)data;
+ void (*complete)(void *);
+ void *complete_data;
+
+ complete = req->complete;
+ complete_data = req->complete_data;
+
+ free_req(req);
+
+ if (complete)
+ complete(complete_data);
+
+ process_pending_requests();
+}
+
+static int send_frame_handler(struct hwa742_request *req)
+{
+ struct update_param *par = &req->par.update;
+ int x = par->x;
+ int y = par->y;
+ int w = par->width;
+ int h = par->height;
+ int bpp;
+ int conv, transl;
+ unsigned long offset;
+ int color_mode = par->color_mode;
+ int flags = par->flags;
+ int scr_width = hwa742.fbdev->panel->x_res;
+ int scr_height = hwa742.fbdev->panel->y_res;
+
+#ifdef VERBOSE
+ dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d "
+ "color_mode %d flags %d\n",
+ x, y, w, h, scr_width, color_mode, flags);
+#endif
+
+ switch (color_mode) {
+ case OMAPFB_COLOR_YUV422:
+ bpp = 16;
+ conv = 0x08;
+ transl = 0x25;
+ break;
+ case OMAPFB_COLOR_YUV420:
+ bpp = 12;
+ conv = 0x09;
+ transl = 0x25;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ bpp = 16;
+ conv = 0x01;
+ transl = 0x05;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (hwa742.prev_flags != flags ||
+ hwa742.prev_color_mode != color_mode) {
+ set_format_regs(conv, transl, flags);
+ hwa742.prev_color_mode = color_mode;
+ hwa742.prev_flags = flags;
+ }
+ flags = req->par.update.flags;
+ if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
+ enable_tearsync(y, scr_width, h, scr_height,
+ flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
+ else
+ disable_tearsync();
+
+ set_window_regs(x, y, x + w, y + h);
+
+ offset = (scr_width * y + x) * bpp / 8;
+
+ hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX,
+ OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h,
+ color_mode);
+
+ hwa742.extif->set_bits_per_cycle(16);
+
+ hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
+ hwa742.extif->transfer_area(w, h, request_complete, req);
+
+ return REQ_PENDING;
+}
+
+static void send_frame_complete(void *data)
+{
+ hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
+}
+
+#define ADD_PREQ(_x, _y, _w, _h) do { \
+ req = alloc_req(); \
+ req->handler = send_frame_handler; \
+ req->complete = send_frame_complete; \
+ req->par.update.x = _x; \
+ req->par.update.y = _y; \
+ req->par.update.width = _w; \
+ req->par.update.height = _h; \
+ req->par.update.color_mode = color_mode;\
+ req->par.update.flags = flags; \
+ list_add_tail(&req->entry, req_head); \
+} while(0)
+
+static void create_req_list(struct omapfb_update_window *win,
+ struct list_head *req_head)
+{
+ struct hwa742_request *req;
+ int x = win->x;
+ int y = win->y;
+ int width = win->width;
+ int height = win->height;
+ int color_mode;
+ int flags;
+
+ flags = win->format & ~OMAPFB_FORMAT_MASK;
+ color_mode = win->format & OMAPFB_FORMAT_MASK;
+
+ if (x & 1) {
+ ADD_PREQ(x, y, 1, height);
+ width--;
+ x++;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+ if (width & ~1) {
+ unsigned int xspan = width & ~1;
+ unsigned int ystart = y;
+ unsigned int yspan = height;
+
+ if (xspan * height * 2 > hwa742.max_transmit_size) {
+ yspan = hwa742.max_transmit_size / (xspan * 2);
+ ADD_PREQ(x, ystart, xspan, yspan);
+ ystart += yspan;
+ yspan = height - yspan;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+
+ ADD_PREQ(x, ystart, xspan, yspan);
+ x += xspan;
+ width -= xspan;
+ flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
+ }
+ if (width)
+ ADD_PREQ(x, y, 1, height);
+}
+
+static void auto_update_complete(void *data)
+{
+ if (!hwa742.stop_auto_update)
+ mod_timer(&hwa742.auto_update_timer,
+ jiffies + HWA742_AUTO_UPDATE_TIME);
+}
+
+static void hwa742_update_window_auto(unsigned long arg)
+{
+ LIST_HEAD(req_list);
+ struct hwa742_request *last;
+
+ create_req_list(&hwa742.auto_update_window, &req_list);
+ last = list_entry(req_list.prev, struct hwa742_request, entry);
+
+ last->complete = auto_update_complete;
+ last->complete_data = NULL;
+
+ submit_req_list(&req_list);
+}
+
+int hwa742_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*complete_callback)(void *arg),
+ void *complete_callback_data)
+{
+ LIST_HEAD(req_list);
+ struct hwa742_request *last;
+ int r = 0;
+
+ if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) {
+ dev_dbg(hwa742.fbdev->dev, "invalid update mode\n");
+ r = -EINVAL;
+ goto out;
+ }
+ if (unlikely(win->format &
+ ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE |
+ OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) {
+ dev_dbg(hwa742.fbdev->dev, "invalid window flag");
+ r = -EINVAL;
+ goto out;
+ }
+
+ create_req_list(win, &req_list);
+ last = list_entry(req_list.prev, struct hwa742_request, entry);
+
+ last->complete = complete_callback;
+ last->complete_data = (void *)complete_callback_data;
+
+ submit_req_list(&req_list);
+
+out:
+ return r;
+}
+EXPORT_SYMBOL(hwa742_update_window_async);
+
+static int hwa742_setup_plane(int plane, int channel_out,
+ unsigned long offset, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ if (plane != OMAPFB_PLANE_GFX ||
+ channel_out != OMAPFB_CHANNEL_OUT_LCD)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hwa742_enable_plane(int plane, int enable)
+{
+ if (plane != 0)
+ return -EINVAL;
+
+ hwa742.int_ctrl->enable_plane(plane, enable);
+
+ return 0;
+}
+
+static int sync_handler(struct hwa742_request *req)
+{
+ complete(req->par.sync);
+ return REQ_COMPLETE;
+}
+
+static void hwa742_sync(void)
+{
+ LIST_HEAD(req_list);
+ struct hwa742_request *req;
+ struct completion comp;
+
+ req = alloc_req();
+
+ req->handler = sync_handler;
+ req->complete = NULL;
+ init_completion(&comp);
+ req->par.sync = &comp;
+
+ list_add(&req->entry, &req_list);
+ submit_req_list(&req_list);
+
+ wait_for_completion(&comp);
+}
+
+static void hwa742_bind_client(struct omapfb_notifier_block *nb)
+{
+ dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode);
+ if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) {
+ omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
+ }
+}
+
+static int hwa742_set_update_mode(enum omapfb_update_mode mode)
+{
+ if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE &&
+ mode != OMAPFB_UPDATE_DISABLED)
+ return -EINVAL;
+
+ if (mode == hwa742.update_mode)
+ return 0;
+
+ dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n",
+ mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
+ (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
+
+ switch (hwa742.update_mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ hwa742.stop_auto_update = 1;
+ del_timer_sync(&hwa742.auto_update_timer);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ hwa742.update_mode = mode;
+ hwa742_sync();
+ hwa742.stop_auto_update = 0;
+
+ switch (mode) {
+ case OMAPFB_MANUAL_UPDATE:
+ omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
+ break;
+ case OMAPFB_AUTO_UPDATE:
+ hwa742_update_window_auto(0);
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ break;
+ }
+
+ return 0;
+}
+
+static enum omapfb_update_mode hwa742_get_update_mode(void)
+{
+ return hwa742.update_mode;
+}
+
+static unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+ int bus_tick = hwa742.extif_clk_period * div;
+ return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 12.2 ns (regs),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 16 ns (regs),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 2*SYSCLK (regs),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns */
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
+ "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
+
+ t = &hwa742.reg_timings;
+ memset(t, 0, sizeof(*t));
+ t->clk_div = div;
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return hwa742.extif->convert_timings(t);
+}
+
+static int calc_lut_timing(unsigned long sysclk, int div)
+{
+ struct extif_timings *t;
+ unsigned long systim;
+
+ /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
+ * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
+ * WEOffTime = WEOnTime + 1 ns,
+ * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
+ * CSOffTime = REOffTime + 1 ns
+ * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
+ * WriteCycle = 2*SYSCLK + 2 ns,
+ * CSPulseWidth = 10 ns
+ */
+ systim = 1000000000 / (sysclk / 1000);
+ dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
+ "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
+
+ t = &hwa742.lut_timings;
+ memset(t, 0, sizeof(*t));
+
+ t->clk_div = div;
+
+ t->cs_on_time = 0;
+ t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
+ t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
+ t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
+ 26000, div);
+ t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
+ t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
+ if (t->we_cycle_time < t->we_off_time)
+ t->we_cycle_time = t->we_off_time;
+ t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
+ if (t->re_cycle_time < t->re_off_time)
+ t->re_cycle_time = t->re_off_time;
+ t->cs_pulse_width = 0;
+
+ dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return hwa742.extif->convert_timings(t);
+}
+
+static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
+{
+ int max_clk_div;
+ int div;
+
+ hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div);
+ for (div = 1; div < max_clk_div; div++) {
+ if (calc_reg_timing(sysclk, div) == 0)
+ break;
+ }
+ if (div > max_clk_div)
+ goto err;
+
+ *extif_mem_div = div;
+
+ for (div = 1; div < max_clk_div; div++) {
+ if (calc_lut_timing(sysclk, div) == 0)
+ break;
+ }
+
+ if (div > max_clk_div)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(hwa742.fbdev->dev, "can't setup timings\n");
+ return -1;
+}
+
+static void calc_hwa742_clk_rates(unsigned long ext_clk,
+ unsigned long *sys_clk, unsigned long *pix_clk)
+{
+ int pix_clk_src;
+ int sys_div = 0, sys_mul = 0;
+ int pix_div;
+
+ pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG);
+ pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
+ if ((pix_clk_src & (0x3 << 1)) == 0) {
+ /* Source is the PLL */
+ sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1;
+ sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1;
+ *sys_clk = ext_clk * sys_mul / sys_div;
+ } else /* else source is ext clk, or oscillator */
+ *sys_clk = ext_clk;
+
+ *pix_clk = *sys_clk / pix_div; /* HZ */
+ dev_dbg(hwa742.fbdev->dev,
+ "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
+ ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
+ dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
+ *sys_clk, *pix_clk);
+}
+
+
+static int setup_tearsync(unsigned long pix_clk, int extif_div)
+{
+ int hdisp, vdisp;
+ int hndp, vndp;
+ int hsw, vsw;
+ int hs, vs;
+ int hs_pol_inv, vs_pol_inv;
+ int use_hsvs, use_ndp;
+ u8 b;
+
+ hsw = hwa742_read_reg(HWA742_HS_W_REG);
+ vsw = hwa742_read_reg(HWA742_VS_W_REG);
+ hs_pol_inv = !(hsw & 0x80);
+ vs_pol_inv = !(vsw & 0x80);
+ hsw = hsw & 0x7f;
+ vsw = vsw & 0x3f;
+
+ hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8;
+ vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) +
+ ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8);
+
+ hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f;
+ vndp = hwa742_read_reg(HWA742_V_NDP_REG);
+
+ /* time to transfer one pixel (16bpp) in ps */
+ hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time;
+ if (hwa742.extif->get_max_tx_rate != NULL) {
+ /*
+ * The external interface might have a rate limitation,
+ * if so, we have to maximize our transfer rate.
+ */
+ unsigned long min_tx_time;
+ unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate();
+
+ dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n",
+ max_tx_rate);
+ min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
+ if (hwa742.pix_tx_time < min_tx_time)
+ hwa742.pix_tx_time = min_tx_time;
+ }
+
+ /* time to update one line in ps */
+ hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
+ hwa742.line_upd_time *= 1000;
+ if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time)
+ /*
+ * transfer speed too low, we might have to use both
+ * HS and VS
+ */
+ use_hsvs = 1;
+ else
+ /* decent transfer speed, we'll always use only VS */
+ use_hsvs = 0;
+
+ if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
+ /*
+ * HS or'ed with VS doesn't work, use the active high
+ * TE signal based on HNDP / VNDP
+ */
+ use_ndp = 1;
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ hs = hndp;
+ vs = vndp;
+ } else {
+ /*
+ * Use HS or'ed with VS as a TE signal if both are needed
+ * or VNDP if only vsync is needed.
+ */
+ use_ndp = 0;
+ hs = hsw;
+ vs = vsw;
+ if (!use_hsvs) {
+ hs_pol_inv = 0;
+ vs_pol_inv = 0;
+ }
+ }
+
+ hs = hs * 1000000 / (pix_clk / 1000); /* ps */
+ hs *= 1000;
+
+ vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
+ vs *= 1000;
+
+ if (vs <= hs)
+ return -EDOM;
+ /* set VS to 120% of HS to minimize VS detection time */
+ vs = hs * 12 / 10;
+ /* minimize HS too */
+ hs = 10000;
+
+ b = hwa742_read_reg(HWA742_NDP_CTRL);
+ b &= ~0x3;
+ b |= use_hsvs ? 1 : 0;
+ b |= (use_ndp && use_hsvs) ? 0 : 2;
+ hwa742_write_reg(HWA742_NDP_CTRL, b);
+
+ hwa742.vsync_only = !use_hsvs;
+
+ dev_dbg(hwa742.fbdev->dev,
+ "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
+ pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time);
+ dev_dbg(hwa742.fbdev->dev,
+ "hs %d ps vs %d ps mode %d vsync_only %d\n",
+ hs, vs, (b & 0x3), !use_hsvs);
+
+ return hwa742.extif->setup_tearsync(1, hs, vs,
+ hs_pol_inv, vs_pol_inv, extif_div);
+}
+
+static void hwa742_get_caps(int plane, struct omapfb_caps *caps)
+{
+ hwa742.int_ctrl->get_caps(plane, caps);
+ caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
+ OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE;
+ if (hwa742.te_connected)
+ caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
+ caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
+ (1 << OMAPFB_COLOR_YUV420);
+}
+
+static void hwa742_suspend(void)
+{
+ hwa742.update_mode_before_suspend = hwa742.update_mode;
+ hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ /* Enable sleep mode */
+ hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
+ if (hwa742.power_down != NULL)
+ hwa742.power_down(hwa742.fbdev->dev);
+}
+
+static void hwa742_resume(void)
+{
+ if (hwa742.power_up != NULL)
+ hwa742.power_up(hwa742.fbdev->dev);
+ /* Disable sleep mode */
+ hwa742_write_reg(HWA742_POWER_SAVE, 0);
+ while (1) {
+ /* Loop until PLL output is stabilized */
+ if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(5));
+ }
+ hwa742_set_update_mode(hwa742.update_mode_before_suspend);
+}
+
+static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r = 0, i;
+ u8 rev, conf;
+ unsigned long ext_clk;
+ unsigned long sys_clk, pix_clk;
+ int extif_mem_div;
+ struct omapfb_platform_data *omapfb_conf;
+ struct hwa742_platform_data *ctrl_conf;
+
+ BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
+
+ hwa742.fbdev = fbdev;
+ hwa742.extif = fbdev->ext_if;
+ hwa742.int_ctrl = fbdev->int_ctrl;
+
+ omapfb_conf = fbdev->dev->platform_data;
+ ctrl_conf = omapfb_conf->ctrl_platform_data;
+
+ if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
+ dev_err(fbdev->dev, "HWA742: missing platform data\n");
+ r = -ENOENT;
+ goto err1;
+ }
+
+ hwa742.power_down = ctrl_conf->power_down;
+ hwa742.power_up = ctrl_conf->power_up;
+
+ spin_lock_init(&hwa742.req_lock);
+
+ if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0)
+ goto err1;
+
+ if ((r = hwa742.extif->init(fbdev)) < 0)
+ goto err2;
+
+ ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
+ if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
+ goto err3;
+ hwa742.extif->set_timings(&hwa742.reg_timings);
+ if (hwa742.power_up != NULL)
+ hwa742.power_up(fbdev->dev);
+
+ calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
+ if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
+ goto err4;
+ hwa742.extif->set_timings(&hwa742.reg_timings);
+
+ rev = hwa742_read_reg(HWA742_REV_CODE_REG);
+ if ((rev & 0xfc) != 0x80) {
+ dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev);
+ r = -ENODEV;
+ goto err4;
+ }
+
+
+ if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) {
+ dev_err(fbdev->dev,
+ "HWA742: controller not initialized by the bootloader\n");
+ r = -ENODEV;
+ goto err4;
+ }
+
+ if (ctrl_conf->te_connected) {
+ if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) {
+ dev_err(hwa742.fbdev->dev,
+ "HWA742: can't setup tearing synchronization\n");
+ goto err4;
+ }
+ hwa742.te_connected = 1;
+ }
+
+ hwa742.max_transmit_size = hwa742.extif->max_transmit_size;
+
+ hwa742.update_mode = OMAPFB_UPDATE_DISABLED;
+
+ hwa742.auto_update_window.x = 0;
+ hwa742.auto_update_window.y = 0;
+ hwa742.auto_update_window.width = fbdev->panel->x_res;
+ hwa742.auto_update_window.height = fbdev->panel->y_res;
+ hwa742.auto_update_window.format = 0;
+
+ init_timer(&hwa742.auto_update_timer);
+ hwa742.auto_update_timer.function = hwa742_update_window_auto;
+ hwa742.auto_update_timer.data = 0;
+
+ hwa742.prev_color_mode = -1;
+ hwa742.prev_flags = 0;
+
+ hwa742.fbdev = fbdev;
+
+ INIT_LIST_HEAD(&hwa742.free_req_list);
+ INIT_LIST_HEAD(&hwa742.pending_req_list);
+ for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++)
+ list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list);
+ BUG_ON(i <= IRQ_REQ_POOL_SIZE);
+ sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE);
+
+ conf = hwa742_read_reg(HWA742_CONFIG_REG);
+ dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d "
+ "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
+
+ return 0;
+err4:
+ if (hwa742.power_down != NULL)
+ hwa742.power_down(fbdev->dev);
+err3:
+ hwa742.extif->cleanup();
+err2:
+ hwa742.int_ctrl->cleanup();
+err1:
+ return r;
+}
+
+static void hwa742_cleanup(void)
+{
+ hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
+ hwa742.extif->cleanup();
+ hwa742.int_ctrl->cleanup();
+ if (hwa742.power_down != NULL)
+ hwa742.power_down(hwa742.fbdev->dev);
+}
+
+struct lcd_ctrl hwa742_ctrl = {
+ .name = "hwa742",
+ .init = hwa742_init,
+ .cleanup = hwa742_cleanup,
+ .bind_client = hwa742_bind_client,
+ .get_caps = hwa742_get_caps,
+ .set_update_mode = hwa742_set_update_mode,
+ .get_update_mode = hwa742_get_update_mode,
+ .setup_plane = hwa742_setup_plane,
+ .enable_plane = hwa742_enable_plane,
+ .update_window = hwa742_update_window_async,
+ .sync = hwa742_sync,
+ .suspend = hwa742_suspend,
+ .resume = hwa742_resume,
+};
+
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
new file mode 100644
index 000000000000..51807b4e26d1
--- /dev/null
+++ b/drivers/video/omap/lcd_h3.c
@@ -0,0 +1,141 @@
+/*
+ * LCD panel support for the TI OMAP H3 board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/tps65010.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME "omapfb-lcd_h3"
+
+#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
+
+static int h3_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void h3_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int h3_panel_enable(struct lcd_panel *panel)
+{
+ int r = 0;
+
+ /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
+ r = tps65010_set_gpio_out_value(GPIO1, HIGH);
+ if (!r)
+ r = tps65010_set_gpio_out_value(GPIO2, HIGH);
+ if (r)
+ pr_err("Unable to turn on LCD panel\n");
+
+ return r;
+}
+
+static void h3_panel_disable(struct lcd_panel *panel)
+{
+ int r = 0;
+
+ /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
+ r = tps65010_set_gpio_out_value(GPIO1, LOW);
+ if (!r)
+ tps65010_set_gpio_out_value(GPIO2, LOW);
+ if (r)
+ pr_err("Unable to turn off LCD panel\n");
+}
+
+static unsigned long h3_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel h3_panel = {
+ .name = "h3",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .data_lines = 16,
+ .bpp = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 12000,
+ .hsw = 12,
+ .hfp = 14,
+ .hbp = 72 - 12,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 0,
+
+ .init = h3_panel_init,
+ .cleanup = h3_panel_cleanup,
+ .enable = h3_panel_enable,
+ .disable = h3_panel_disable,
+ .get_caps = h3_panel_get_caps,
+};
+
+static int h3_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&h3_panel);
+ return 0;
+}
+
+static int h3_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int h3_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int h3_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver h3_panel_driver = {
+ .probe = h3_panel_probe,
+ .remove = h3_panel_remove,
+ .suspend = h3_panel_suspend,
+ .resume = h3_panel_resume,
+ .driver = {
+ .name = "lcd_h3",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int h3_panel_drv_init(void)
+{
+ return platform_driver_register(&h3_panel_driver);
+}
+
+static void h3_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&h3_panel_driver);
+}
+
+module_init(h3_panel_drv_init);
+module_exit(h3_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
new file mode 100644
index 000000000000..fd6f0eb16de1
--- /dev/null
+++ b/drivers/video/omap/lcd_h4.c
@@ -0,0 +1,117 @@
+/*
+ * LCD panel support for the TI OMAP H4 board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/omapfb.h>
+
+static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void h4_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int h4_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void h4_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long h4_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel h4_panel = {
+ .name = "h4",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 6250,
+ .hsw = 15,
+ .hfp = 15,
+ .hbp = 60,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 1,
+
+ .init = h4_panel_init,
+ .cleanup = h4_panel_cleanup,
+ .enable = h4_panel_enable,
+ .disable = h4_panel_disable,
+ .get_caps = h4_panel_get_caps,
+};
+
+static int h4_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&h4_panel);
+ return 0;
+}
+
+static int h4_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int h4_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver h4_panel_driver = {
+ .probe = h4_panel_probe,
+ .remove = h4_panel_remove,
+ .suspend = h4_panel_suspend,
+ .resume = h4_panel_resume,
+ .driver = {
+ .name = "lcd_h4",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int h4_panel_drv_init(void)
+{
+ return platform_driver_register(&h4_panel_driver);
+}
+
+static void h4_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&h4_panel_driver);
+}
+
+module_init(h4_panel_drv_init);
+module_exit(h4_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
new file mode 100644
index 000000000000..551f385861d1
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -0,0 +1,124 @@
+/*
+ * LCD panel support for the TI OMAP1510 Innovator board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/fpga.h>
+#include <asm/arch/omapfb.h>
+
+static int innovator1510_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void innovator1510_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int innovator1510_panel_enable(struct lcd_panel *panel)
+{
+ fpga_write(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL);
+ return 0;
+}
+
+static void innovator1510_panel_disable(struct lcd_panel *panel)
+{
+ fpga_write(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL);
+}
+
+static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel innovator1510_panel = {
+ .name = "inn1510",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 12500,
+ .hsw = 40,
+ .hfp = 40,
+ .hbp = 72,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 12,
+
+ .init = innovator1510_panel_init,
+ .cleanup = innovator1510_panel_cleanup,
+ .enable = innovator1510_panel_enable,
+ .disable = innovator1510_panel_disable,
+ .get_caps = innovator1510_panel_get_caps,
+};
+
+static int innovator1510_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&innovator1510_panel);
+ return 0;
+}
+
+static int innovator1510_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int innovator1510_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int innovator1510_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver innovator1510_panel_driver = {
+ .probe = innovator1510_panel_probe,
+ .remove = innovator1510_panel_remove,
+ .suspend = innovator1510_panel_suspend,
+ .resume = innovator1510_panel_resume,
+ .driver = {
+ .name = "lcd_inn1510",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int innovator1510_panel_drv_init(void)
+{
+ return platform_driver_register(&innovator1510_panel_driver);
+}
+
+static void innovator1510_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&innovator1510_panel_driver);
+}
+
+module_init(innovator1510_panel_drv_init);
+module_exit(innovator1510_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
new file mode 100644
index 000000000000..95604ca43301
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -0,0 +1,150 @@
+/*
+ * LCD panel support for the TI OMAP1610 Innovator board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME "omapfb-lcd_h3"
+
+#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
+
+static int innovator1610_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ int r = 0;
+
+ if (omap_request_gpio(14)) {
+ pr_err("can't request GPIO 14\n");
+ r = -1;
+ goto exit;
+ }
+ if (omap_request_gpio(15)) {
+ pr_err("can't request GPIO 15\n");
+ omap_free_gpio(14);
+ r = -1;
+ goto exit;
+ }
+ /* configure GPIO(14, 15) as outputs */
+ omap_set_gpio_direction(14, 0);
+ omap_set_gpio_direction(15, 0);
+exit:
+ return r;
+}
+
+static void innovator1610_panel_cleanup(struct lcd_panel *panel)
+{
+ omap_free_gpio(15);
+ omap_free_gpio(14);
+}
+
+static int innovator1610_panel_enable(struct lcd_panel *panel)
+{
+ /* set GPIO14 and GPIO15 high */
+ omap_set_gpio_dataout(14, 1);
+ omap_set_gpio_dataout(15, 1);
+ return 0;
+}
+
+static void innovator1610_panel_disable(struct lcd_panel *panel)
+{
+ /* set GPIO13, GPIO14 and GPIO15 low */
+ omap_set_gpio_dataout(14, 0);
+ omap_set_gpio_dataout(15, 0);
+}
+
+static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel innovator1610_panel = {
+ .name = "inn1610",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 320,
+ .y_res = 240,
+ .pixel_clock = 12500,
+ .hsw = 40,
+ .hfp = 40,
+ .hbp = 72,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 12,
+
+ .init = innovator1610_panel_init,
+ .cleanup = innovator1610_panel_cleanup,
+ .enable = innovator1610_panel_enable,
+ .disable = innovator1610_panel_disable,
+ .get_caps = innovator1610_panel_get_caps,
+};
+
+static int innovator1610_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&innovator1610_panel);
+ return 0;
+}
+
+static int innovator1610_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int innovator1610_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int innovator1610_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver innovator1610_panel_driver = {
+ .probe = innovator1610_panel_probe,
+ .remove = innovator1610_panel_remove,
+ .suspend = innovator1610_panel_suspend,
+ .resume = innovator1610_panel_resume,
+ .driver = {
+ .name = "lcd_inn1610",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int innovator1610_panel_drv_init(void)
+{
+ return platform_driver_register(&innovator1610_panel_driver);
+}
+
+static void innovator1610_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&innovator1610_panel_driver);
+}
+
+module_init(innovator1610_panel_drv_init);
+module_exit(innovator1610_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
new file mode 100644
index 000000000000..a38038840fd6
--- /dev/null
+++ b/drivers/video/omap/lcd_osk.c
@@ -0,0 +1,144 @@
+/*
+ * LCD panel support for the TI OMAP OSK board
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ * Adapted for OSK by <dirk.behme@de.bosch.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/omapfb.h>
+
+static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void osk_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int osk_panel_enable(struct lcd_panel *panel)
+{
+ /* configure PWL pin */
+ omap_cfg_reg(PWL);
+
+ /* Enable PWL unit */
+ omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
+
+ /* Set PWL level */
+ omap_writeb(0xFF, OMAP_PWL_ENABLE);
+
+ /* configure GPIO2 as output */
+ omap_set_gpio_direction(2, 0);
+
+ /* set GPIO2 high */
+ omap_set_gpio_dataout(2, 1);
+
+ return 0;
+}
+
+static void osk_panel_disable(struct lcd_panel *panel)
+{
+ /* Set PWL level to zero */
+ omap_writeb(0x00, OMAP_PWL_ENABLE);
+
+ /* Disable PWL unit */
+ omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
+
+ /* set GPIO2 low */
+ omap_set_gpio_dataout(2, 0);
+}
+
+static unsigned long osk_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel osk_panel = {
+ .name = "osk",
+ .config = OMAP_LCDC_PANEL_TFT,
+
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 240,
+ .y_res = 320,
+ .pixel_clock = 12500,
+ .hsw = 40,
+ .hfp = 40,
+ .hbp = 72,
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 0,
+ .pcd = 12,
+
+ .init = osk_panel_init,
+ .cleanup = osk_panel_cleanup,
+ .enable = osk_panel_enable,
+ .disable = osk_panel_disable,
+ .get_caps = osk_panel_get_caps,
+};
+
+static int osk_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&osk_panel);
+ return 0;
+}
+
+static int osk_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int osk_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver osk_panel_driver = {
+ .probe = osk_panel_probe,
+ .remove = osk_panel_remove,
+ .suspend = osk_panel_suspend,
+ .resume = osk_panel_resume,
+ .driver = {
+ .name = "lcd_osk",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int osk_panel_drv_init(void)
+{
+ return platform_driver_register(&osk_panel_driver);
+}
+
+static void osk_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&osk_panel_driver);
+}
+
+module_init(osk_panel_drv_init);
+module_exit(osk_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
new file mode 100644
index 000000000000..52bdfdac42c9
--- /dev/null
+++ b/drivers/video/omap/lcd_palmte.c
@@ -0,0 +1,123 @@
+/*
+ * LCD panel support for the Palm Tungsten E
+ *
+ * Original version : Romain Goyet <r.goyet@gmail.com>
+ * Current version : Laurent Gonzalez <palmte.linux@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/fpga.h>
+#include <asm/arch/omapfb.h>
+
+static int palmte_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void palmte_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int palmte_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void palmte_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmte_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel palmte_panel = {
+ .name = "palmte",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+ OMAP_LCDC_HSVS_OPPOSITE,
+
+ .data_lines = 16,
+ .bpp = 8,
+ .pixel_clock = 12000,
+ .x_res = 320,
+ .y_res = 320,
+ .hsw = 4,
+ .hfp = 8,
+ .hbp = 28,
+ .vsw = 1,
+ .vfp = 8,
+ .vbp = 7,
+ .pcd = 0,
+
+ .init = palmte_panel_init,
+ .cleanup = palmte_panel_cleanup,
+ .enable = palmte_panel_enable,
+ .disable = palmte_panel_disable,
+ .get_caps = palmte_panel_get_caps,
+};
+
+static int palmte_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&palmte_panel);
+ return 0;
+}
+
+static int palmte_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int palmte_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver palmte_panel_driver = {
+ .probe = palmte_panel_probe,
+ .remove = palmte_panel_remove,
+ .suspend = palmte_panel_suspend,
+ .resume = palmte_panel_resume,
+ .driver = {
+ .name = "lcd_palmte",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int palmte_panel_drv_init(void)
+{
+ return platform_driver_register(&palmte_panel_driver);
+}
+
+static void palmte_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&palmte_panel_driver);
+}
+
+module_init(palmte_panel_drv_init);
+module_exit(palmte_panel_drv_cleanup);
+
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
new file mode 100644
index 000000000000..4bb349f54356
--- /dev/null
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -0,0 +1,127 @@
+/*
+ * LCD panel support for Palm Tungsten|T
+ * Current version : Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Modified from lcd_inn1510.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+GPIO11 - backlight
+GPIO12 - screen blanking
+GPIO13 - screen blanking
+*/
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+
+static int palmtt_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void palmtt_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static int palmtt_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void palmtt_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel)
+{
+ return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+struct lcd_panel palmtt_panel = {
+ .name = "palmtt",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+ OMAP_LCDC_HSVS_OPPOSITE,
+ .bpp = 16,
+ .data_lines = 16,
+ .x_res = 320,
+ .y_res = 320,
+ .pixel_clock = 10000,
+ .hsw = 4,
+ .hfp = 8,
+ .hbp = 28,
+ .vsw = 1,
+ .vfp = 8,
+ .vbp = 7,
+ .pcd = 0,
+
+ .init = palmtt_panel_init,
+ .cleanup = palmtt_panel_cleanup,
+ .enable = palmtt_panel_enable,
+ .disable = palmtt_panel_disable,
+ .get_caps = palmtt_panel_get_caps,
+};
+
+static int palmtt_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&palmtt_panel);
+ return 0;
+}
+
+static int palmtt_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int palmtt_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver palmtt_panel_driver = {
+ .probe = palmtt_panel_probe,
+ .remove = palmtt_panel_remove,
+ .suspend = palmtt_panel_suspend,
+ .resume = palmtt_panel_resume,
+ .driver = {
+ .name = "lcd_palmtt",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int palmtt_panel_drv_init(void)
+{
+ return platform_driver_register(&palmtt_panel_driver);
+}
+
+static void palmtt_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&palmtt_panel_driver);
+}
+
+module_init(palmtt_panel_drv_init);
+module_exit(palmtt_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
new file mode 100644
index 000000000000..ea6170ddff35
--- /dev/null
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -0,0 +1,123 @@
+/*
+ * LCD panel support for the Palm Zire71
+ *
+ * Original version : Romain Goyet
+ * Current version : Laurent Gonzalez
+ * Modified for zire71 : Marek Vasut
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/arch/omapfb.h>
+
+static int palmz71_panel_init(struct lcd_panel *panel,
+ struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void palmz71_panel_cleanup(struct lcd_panel *panel)
+{
+
+}
+
+static int palmz71_panel_enable(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+static void palmz71_panel_disable(struct lcd_panel *panel)
+{
+}
+
+static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel)
+{
+ return OMAPFB_CAPS_SET_BACKLIGHT;
+}
+
+struct lcd_panel palmz71_panel = {
+ .name = "palmz71",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
+ OMAP_LCDC_HSVS_OPPOSITE,
+ .data_lines = 16,
+ .bpp = 16,
+ .pixel_clock = 24000,
+ .x_res = 320,
+ .y_res = 320,
+ .hsw = 4,
+ .hfp = 8,
+ .hbp = 28,
+ .vsw = 1,
+ .vfp = 8,
+ .vbp = 7,
+ .pcd = 0,
+
+ .init = palmz71_panel_init,
+ .cleanup = palmz71_panel_cleanup,
+ .enable = palmz71_panel_enable,
+ .disable = palmz71_panel_disable,
+ .get_caps = palmz71_panel_get_caps,
+};
+
+static int palmz71_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&palmz71_panel);
+ return 0;
+}
+
+static int palmz71_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int palmz71_panel_suspend(struct platform_device *pdev,
+ pm_message_t mesg)
+{
+ return 0;
+}
+
+static int palmz71_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver palmz71_panel_driver = {
+ .probe = palmz71_panel_probe,
+ .remove = palmz71_panel_remove,
+ .suspend = palmz71_panel_suspend,
+ .resume = palmz71_panel_resume,
+ .driver = {
+ .name = "lcd_palmz71",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int palmz71_panel_drv_init(void)
+{
+ return platform_driver_register(&palmz71_panel_driver);
+}
+
+static void palmz71_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&palmz71_panel_driver);
+}
+
+module_init(palmz71_panel_drv_init);
+module_exit(palmz71_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
new file mode 100644
index 000000000000..c4f306a4e5c9
--- /dev/null
+++ b/drivers/video/omap/lcd_sx1.c
@@ -0,0 +1,334 @@
+/*
+ * LCD panel support for the Siemens SX1 mobile phone
+ *
+ * Current version : Vovan888@gmail.com, great help from FCA00000
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/omapfb.h>
+#include <asm/arch/mcbsp.h>
+#include <asm/arch/mux.h>
+
+/*
+ * OMAP310 GPIO registers
+ */
+#define GPIO_DATA_INPUT 0xfffce000
+#define GPIO_DATA_OUTPUT 0xfffce004
+#define GPIO_DIR_CONTROL 0xfffce008
+#define GPIO_INT_CONTROL 0xfffce00c
+#define GPIO_INT_MASK 0xfffce010
+#define GPIO_INT_STATUS 0xfffce014
+#define GPIO_PIN_CONTROL 0xfffce018
+
+
+#define A_LCD_SSC_RD 3
+#define A_LCD_SSC_SD 7
+#define _A_LCD_RESET 9
+#define _A_LCD_SSC_CS 12
+#define _A_LCD_SSC_A0 13
+
+#define DSP_REG 0xE1017024
+
+const unsigned char INIT_1[12] = {
+ 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
+};
+
+const unsigned char INIT_2[127] = {
+ 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
+ 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
+ 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
+ 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
+ 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
+ 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
+ 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
+ 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
+ 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
+ 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
+ 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
+ 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
+ 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
+ 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
+ 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
+ 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
+};
+
+const unsigned char INIT_3[15] = {
+ 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
+ 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
+};
+
+static void epson_sendbyte(int flag, unsigned char byte)
+{
+ int i, shifter = 0x80;
+
+ if (!flag)
+ omap_set_gpio_dataout(_A_LCD_SSC_A0, 0);
+ mdelay(2);
+ omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
+
+ omap_set_gpio_dataout(A_LCD_SSC_SD, flag);
+
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
+ for (i = 0; i < 8; i++) {
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
+ omap_set_gpio_dataout(A_LCD_SSC_SD, shifter & byte);
+ OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
+ shifter >>= 1;
+ }
+ omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
+}
+
+static void init_system(void)
+{
+ omap_mcbsp_request(OMAP_MCBSP3);
+ omap_mcbsp_stop(OMAP_MCBSP3);
+}
+
+static void setup_GPIO(void)
+{
+ /* new wave */
+ omap_request_gpio(A_LCD_SSC_RD);
+ omap_request_gpio(A_LCD_SSC_SD);
+ omap_request_gpio(_A_LCD_RESET);
+ omap_request_gpio(_A_LCD_SSC_CS);
+ omap_request_gpio(_A_LCD_SSC_A0);
+
+ /* set all GPIOs to output */
+ omap_set_gpio_direction(A_LCD_SSC_RD, 0);
+ omap_set_gpio_direction(A_LCD_SSC_SD, 0);
+ omap_set_gpio_direction(_A_LCD_RESET, 0);
+ omap_set_gpio_direction(_A_LCD_SSC_CS, 0);
+ omap_set_gpio_direction(_A_LCD_SSC_A0, 0);
+
+ /* set GPIO data */
+ omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
+ omap_set_gpio_dataout(A_LCD_SSC_SD, 0);
+ omap_set_gpio_dataout(_A_LCD_RESET, 0);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
+}
+
+static void display_init(void)
+{
+ int i;
+
+ omap_cfg_reg(MCBSP3_CLKX);
+
+ mdelay(2);
+ setup_GPIO();
+ mdelay(2);
+
+ /* reset LCD */
+ omap_set_gpio_dataout(A_LCD_SSC_SD, 1);
+ epson_sendbyte(0, 0x25);
+
+ omap_set_gpio_dataout(_A_LCD_RESET, 0);
+ mdelay(10);
+ omap_set_gpio_dataout(_A_LCD_RESET, 1);
+
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ mdelay(2);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD, phase 1 */
+ epson_sendbyte(0, 0xCA);
+ for (i = 0; i < 10; i++)
+ epson_sendbyte(1, INIT_1[i]);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 2 */
+ epson_sendbyte(0, 0xCB);
+ for (i = 0; i < 125; i++)
+ epson_sendbyte(1, INIT_2[i]);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 2a */
+ epson_sendbyte(0, 0xCC);
+ for (i = 0; i < 14; i++)
+ epson_sendbyte(1, INIT_3[i]);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 3 */
+ epson_sendbyte(0, 0xBC);
+ epson_sendbyte(1, 0x08);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 4 */
+ epson_sendbyte(0, 0x07);
+ epson_sendbyte(1, 0x05);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 5 */
+ epson_sendbyte(0, 0x94);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 6 */
+ epson_sendbyte(0, 0xC6);
+ epson_sendbyte(1, 0x80);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ mdelay(100); /* used to be 1000 */
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 7 */
+ epson_sendbyte(0, 0x16);
+ epson_sendbyte(1, 0x02);
+ epson_sendbyte(1, 0x00);
+ epson_sendbyte(1, 0xB1);
+ epson_sendbyte(1, 0x00);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 8 */
+ epson_sendbyte(0, 0x76);
+ epson_sendbyte(1, 0x00);
+ epson_sendbyte(1, 0x00);
+ epson_sendbyte(1, 0xDB);
+ epson_sendbyte(1, 0x00);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ /* init LCD phase 9 */
+ epson_sendbyte(0, 0xAF);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+}
+
+static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
+{
+ return 0;
+}
+
+static void sx1_panel_cleanup(struct lcd_panel *panel)
+{
+}
+
+static void sx1_panel_disable(struct lcd_panel *panel)
+{
+ printk(KERN_INFO "SX1: LCD panel disable\n");
+ sx1_setmmipower(0);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+
+ epson_sendbyte(0, 0x25);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ epson_sendbyte(0, 0xAE);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+ mdelay(100);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
+
+ epson_sendbyte(0, 0x95);
+ omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
+}
+
+static int sx1_panel_enable(struct lcd_panel *panel)
+{
+ printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
+ init_system();
+ display_init();
+
+ sx1_setmmipower(1);
+ sx1_setbacklight(0x18);
+ sx1_setkeylight (0x06);
+ return 0;
+}
+
+
+static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
+{
+ return 0;
+}
+
+struct lcd_panel sx1_panel = {
+ .name = "sx1",
+ .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
+ OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
+ OMAP_LCDC_INV_OUTPUT_EN,
+
+ .x_res = 176,
+ .y_res = 220,
+ .data_lines = 16,
+ .bpp = 16,
+ .hsw = 5,
+ .hfp = 5,
+ .hbp = 5,
+ .vsw = 2,
+ .vfp = 1,
+ .vbp = 1,
+ .pixel_clock = 1500,
+
+ .init = sx1_panel_init,
+ .cleanup = sx1_panel_cleanup,
+ .enable = sx1_panel_enable,
+ .disable = sx1_panel_disable,
+ .get_caps = sx1_panel_get_caps,
+};
+
+static int sx1_panel_probe(struct platform_device *pdev)
+{
+ omapfb_register_panel(&sx1_panel);
+ return 0;
+}
+
+static int sx1_panel_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int sx1_panel_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver sx1_panel_driver = {
+ .probe = sx1_panel_probe,
+ .remove = sx1_panel_remove,
+ .suspend = sx1_panel_suspend,
+ .resume = sx1_panel_resume,
+ .driver = {
+ .name = "lcd_sx1",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int sx1_panel_drv_init(void)
+{
+ return platform_driver_register(&sx1_panel_driver);
+}
+
+static void sx1_panel_drv_cleanup(void)
+{
+ platform_driver_unregister(&sx1_panel_driver);
+}
+
+module_init(sx1_panel_drv_init);
+module_exit(sx1_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
new file mode 100644
index 000000000000..9085188d815e
--- /dev/null
+++ b/drivers/video/omap/lcdc.c
@@ -0,0 +1,893 @@
+/*
+ * OMAP1 internal LCD controller
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#include <asm/mach-types.h>
+
+#define MODULE_NAME "lcdc"
+
+#define OMAP_LCDC_BASE 0xfffec000
+#define OMAP_LCDC_SIZE 256
+#define OMAP_LCDC_IRQ INT_LCD_CTRL
+
+#define OMAP_LCDC_CONTROL (OMAP_LCDC_BASE + 0x00)
+#define OMAP_LCDC_TIMING0 (OMAP_LCDC_BASE + 0x04)
+#define OMAP_LCDC_TIMING1 (OMAP_LCDC_BASE + 0x08)
+#define OMAP_LCDC_TIMING2 (OMAP_LCDC_BASE + 0x0c)
+#define OMAP_LCDC_STATUS (OMAP_LCDC_BASE + 0x10)
+#define OMAP_LCDC_SUBPANEL (OMAP_LCDC_BASE + 0x14)
+#define OMAP_LCDC_LINE_INT (OMAP_LCDC_BASE + 0x18)
+#define OMAP_LCDC_DISPLAY_STATUS (OMAP_LCDC_BASE + 0x1c)
+
+#define OMAP_LCDC_STAT_DONE (1 << 0)
+#define OMAP_LCDC_STAT_VSYNC (1 << 1)
+#define OMAP_LCDC_STAT_SYNC_LOST (1 << 2)
+#define OMAP_LCDC_STAT_ABC (1 << 3)
+#define OMAP_LCDC_STAT_LINE_INT (1 << 4)
+#define OMAP_LCDC_STAT_FUF (1 << 5)
+#define OMAP_LCDC_STAT_LOADED_PALETTE (1 << 6)
+
+#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
+#define OMAP_LCDC_CTRL_LCD_TFT (1 << 7)
+#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL (1 << 10)
+
+#define OMAP_LCDC_IRQ_VSYNC (1 << 2)
+#define OMAP_LCDC_IRQ_DONE (1 << 3)
+#define OMAP_LCDC_IRQ_LOADED_PALETTE (1 << 4)
+#define OMAP_LCDC_IRQ_LINE_NIRQ (1 << 5)
+#define OMAP_LCDC_IRQ_LINE (1 << 6)
+#define OMAP_LCDC_IRQ_MASK (((1 << 5) - 1) << 2)
+
+#define MAX_PALETTE_SIZE PAGE_SIZE
+
+enum lcdc_load_mode {
+ OMAP_LCDC_LOAD_PALETTE,
+ OMAP_LCDC_LOAD_FRAME,
+ OMAP_LCDC_LOAD_PALETTE_AND_FRAME
+};
+
+static struct omap_lcd_controller {
+ enum omapfb_update_mode update_mode;
+ int ext_mode;
+
+ unsigned long frame_offset;
+ int screen_width;
+ int xres;
+ int yres;
+
+ enum omapfb_color_format color_mode;
+ int bpp;
+ void *palette_virt;
+ dma_addr_t palette_phys;
+ int palette_code;
+ int palette_size;
+
+ unsigned int irq_mask;
+ struct completion last_frame_complete;
+ struct completion palette_load_complete;
+ struct clk *lcd_ck;
+ struct omapfb_device *fbdev;
+
+ void (*dma_callback)(void *data);
+ void *dma_callback_data;
+
+ int fbmem_allocated;
+ dma_addr_t vram_phys;
+ void *vram_virt;
+ unsigned long vram_size;
+} lcdc;
+
+static void inline enable_irqs(int mask)
+{
+ lcdc.irq_mask |= mask;
+}
+
+static void inline disable_irqs(int mask)
+{
+ lcdc.irq_mask &= ~mask;
+}
+
+static void set_load_mode(enum lcdc_load_mode mode)
+{
+ u32 l;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l &= ~(3 << 20);
+ switch (mode) {
+ case OMAP_LCDC_LOAD_PALETTE:
+ l |= 1 << 20;
+ break;
+ case OMAP_LCDC_LOAD_FRAME:
+ l |= 2 << 20;
+ break;
+ case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
+ break;
+ default:
+ BUG();
+ }
+ omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void enable_controller(void)
+{
+ u32 l;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l |= OMAP_LCDC_CTRL_LCD_EN;
+ l &= ~OMAP_LCDC_IRQ_MASK;
+ l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */
+ omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void disable_controller_async(void)
+{
+ u32 l;
+ u32 mask;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
+ /*
+ * Preserve the DONE mask, since we still want to get the
+ * final DONE irq. It will be disabled in the IRQ handler.
+ */
+ mask &= ~OMAP_LCDC_IRQ_DONE;
+ l &= ~mask;
+ omap_writel(l, OMAP_LCDC_CONTROL);
+}
+
+static void disable_controller(void)
+{
+ init_completion(&lcdc.last_frame_complete);
+ disable_controller_async();
+ if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
+ msecs_to_jiffies(500)))
+ dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
+}
+
+static void reset_controller(u32 status)
+{
+ static unsigned long reset_count;
+ static unsigned long last_jiffies;
+
+ disable_controller_async();
+ reset_count++;
+ if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
+ dev_err(lcdc.fbdev->dev,
+ "resetting (status %#010x,reset count %lu)\n",
+ status, reset_count);
+ last_jiffies = jiffies;
+ }
+ if (reset_count < 100) {
+ enable_controller();
+ } else {
+ reset_count = 0;
+ dev_err(lcdc.fbdev->dev,
+ "too many reset attempts, giving up.\n");
+ }
+}
+
+/*
+ * Configure the LCD DMA according to the current mode specified by parameters
+ * in lcdc.fbdev and fbdev->var.
+ */
+static void setup_lcd_dma(void)
+{
+ static const int dma_elem_type[] = {
+ 0,
+ OMAP_DMA_DATA_TYPE_S8,
+ OMAP_DMA_DATA_TYPE_S16,
+ 0,
+ OMAP_DMA_DATA_TYPE_S32,
+ };
+ struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
+ struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
+ unsigned long src;
+ int esize, xelem, yelem;
+
+ src = lcdc.vram_phys + lcdc.frame_offset;
+
+ switch (var->rotate) {
+ case 0:
+ if (plane->info.mirror || (src & 3) ||
+ lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
+ (lcdc.xres & 1))
+ esize = 2;
+ else
+ esize = 4;
+ xelem = lcdc.xres * lcdc.bpp / 8 / esize;
+ yelem = lcdc.yres;
+ break;
+ case 90:
+ case 180:
+ case 270:
+ if (cpu_is_omap15xx()) {
+ BUG();
+ }
+ esize = 2;
+ xelem = lcdc.yres * lcdc.bpp / 16;
+ yelem = lcdc.xres;
+ break;
+ default:
+ BUG();
+ return;
+ }
+#ifdef VERBOSE
+ dev_dbg(lcdc.fbdev->dev,
+ "setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
+ src, esize, xelem, yelem);
+#endif
+ omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
+ if (!cpu_is_omap15xx()) {
+ int bpp = lcdc.bpp;
+
+ /*
+ * YUV support is only for external mode when we have the
+ * YUV window embedded in a 16bpp frame buffer.
+ */
+ if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
+ bpp = 16;
+ /* Set virtual xres elem size */
+ omap_set_lcd_dma_b1_vxres(
+ lcdc.screen_width * bpp / 8 / esize);
+ /* Setup transformations */
+ omap_set_lcd_dma_b1_rotation(var->rotate);
+ omap_set_lcd_dma_b1_mirror(plane->info.mirror);
+ }
+ omap_setup_lcd_dma();
+}
+
+static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
+{
+ u32 status;
+
+ status = omap_readl(OMAP_LCDC_STATUS);
+
+ if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
+ reset_controller(status);
+ else {
+ if (status & OMAP_LCDC_STAT_DONE) {
+ u32 l;
+
+ /*
+ * Disable IRQ_DONE. The status bit will be cleared
+ * only when the controller is reenabled and we don't
+ * want to get more interrupts.
+ */
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l &= ~OMAP_LCDC_IRQ_DONE;
+ omap_writel(l, OMAP_LCDC_CONTROL);
+ complete(&lcdc.last_frame_complete);
+ }
+ if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
+ disable_controller_async();
+ complete(&lcdc.palette_load_complete);
+ }
+ }
+
+ /*
+ * Clear these interrupt status bits.
+ * Sync_lost, FUF bits were cleared by disabling the LCD controller
+ * LOADED_PALETTE can be cleared this way only in palette only
+ * load mode. In other load modes it's cleared by disabling the
+ * controller.
+ */
+ status &= ~(OMAP_LCDC_STAT_VSYNC |
+ OMAP_LCDC_STAT_LOADED_PALETTE |
+ OMAP_LCDC_STAT_ABC |
+ OMAP_LCDC_STAT_LINE_INT);
+ omap_writel(status, OMAP_LCDC_STATUS);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Change to a new video mode. We defer this to a later time to avoid any
+ * flicker and not to mess up the current LCD DMA context. For this we disable
+ * the LCD controler, which will generate a DONE irq after the last frame has
+ * been transferred. Then it'll be safe to reconfigure both the LCD controller
+ * as well as the LCD DMA.
+ */
+static int omap_lcdc_setup_plane(int plane, int channel_out,
+ unsigned long offset, int screen_width,
+ int pos_x, int pos_y, int width, int height,
+ int color_mode)
+{
+ struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
+ struct lcd_panel *panel = lcdc.fbdev->panel;
+ int rot_x, rot_y;
+
+ if (var->rotate == 0) {
+ rot_x = panel->x_res;
+ rot_y = panel->y_res;
+ } else {
+ rot_x = panel->y_res;
+ rot_y = panel->x_res;
+ }
+ if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
+ width > rot_x || height > rot_y) {
+#ifdef VERBOSE
+ dev_dbg(lcdc.fbdev->dev,
+ "invalid plane params plane %d pos_x %d pos_y %d "
+ "w %d h %d\n", plane, pos_x, pos_y, width, height);
+#endif
+ return -EINVAL;
+ }
+
+ lcdc.frame_offset = offset;
+ lcdc.xres = width;
+ lcdc.yres = height;
+ lcdc.screen_width = screen_width;
+ lcdc.color_mode = color_mode;
+
+ switch (color_mode) {
+ case OMAPFB_COLOR_CLUT_8BPP:
+ lcdc.bpp = 8;
+ lcdc.palette_code = 0x3000;
+ lcdc.palette_size = 512;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ lcdc.bpp = 16;
+ lcdc.palette_code = 0x4000;
+ lcdc.palette_size = 32;
+ break;
+ case OMAPFB_COLOR_RGB444:
+ lcdc.bpp = 16;
+ lcdc.palette_code = 0x4000;
+ lcdc.palette_size = 32;
+ break;
+ case OMAPFB_COLOR_YUV420:
+ if (lcdc.ext_mode) {
+ lcdc.bpp = 12;
+ break;
+ }
+ /* fallthrough */
+ case OMAPFB_COLOR_YUV422:
+ if (lcdc.ext_mode) {
+ lcdc.bpp = 16;
+ break;
+ }
+ /* fallthrough */
+ default:
+ /* FIXME: other BPPs.
+ * bpp1: code 0, size 256
+ * bpp2: code 0x1000 size 256
+ * bpp4: code 0x2000 size 256
+ * bpp12: code 0x4000 size 32
+ */
+ dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
+ BUG();
+ return -1;
+ }
+
+ if (lcdc.ext_mode) {
+ setup_lcd_dma();
+ return 0;
+ }
+
+ if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+ disable_controller();
+ omap_stop_lcd_dma();
+ setup_lcd_dma();
+ enable_controller();
+ }
+
+ return 0;
+}
+
+static int omap_lcdc_enable_plane(int plane, int enable)
+{
+ dev_dbg(lcdc.fbdev->dev,
+ "plane %d enable %d update_mode %d ext_mode %d\n",
+ plane, enable, lcdc.update_mode, lcdc.ext_mode);
+ if (plane != OMAPFB_PLANE_GFX)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Configure the LCD DMA for a palette load operation and do the palette
+ * downloading synchronously. We don't use the frame+palette load mode of
+ * the controller, since the palette can always be downloaded seperately.
+ */
+static void load_palette(void)
+{
+ u16 *palette;
+
+ palette = (u16 *)lcdc.palette_virt;
+
+ *(u16 *)palette &= 0x0fff;
+ *(u16 *)palette |= lcdc.palette_code;
+
+ omap_set_lcd_dma_b1(lcdc.palette_phys,
+ lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
+
+ omap_set_lcd_dma_single_transfer(1);
+ omap_setup_lcd_dma();
+
+ init_completion(&lcdc.palette_load_complete);
+ enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
+ set_load_mode(OMAP_LCDC_LOAD_PALETTE);
+ enable_controller();
+ if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
+ msecs_to_jiffies(500)))
+ dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
+ /* The controller gets disabled in the irq handler */
+ disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
+ omap_stop_lcd_dma();
+
+ omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
+}
+
+/* Used only in internal controller mode */
+static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
+ u16 transp, int update_hw_pal)
+{
+ u16 *palette;
+
+ if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
+ return -EINVAL;
+
+ palette = (u16 *)lcdc.palette_virt;
+
+ palette[regno] &= ~0x0fff;
+ palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
+ (blue >> 12);
+
+ if (update_hw_pal) {
+ disable_controller();
+ omap_stop_lcd_dma();
+ load_palette();
+ setup_lcd_dma();
+ set_load_mode(OMAP_LCDC_LOAD_FRAME);
+ enable_controller();
+ }
+
+ return 0;
+}
+
+static void calc_ck_div(int is_tft, int pck, int *pck_div)
+{
+ unsigned long lck;
+
+ pck = max(1, pck);
+ lck = clk_get_rate(lcdc.lcd_ck);
+ *pck_div = (lck + pck - 1) / pck;
+ if (is_tft)
+ *pck_div = max(2, *pck_div);
+ else
+ *pck_div = max(3, *pck_div);
+ if (*pck_div > 255) {
+ /* FIXME: try to adjust logic clock divider as well */
+ *pck_div = 255;
+ dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
+ pck / 1000);
+ }
+}
+
+static void inline setup_regs(void)
+{
+ u32 l;
+ struct lcd_panel *panel = lcdc.fbdev->panel;
+ int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
+ unsigned long lck;
+ int pcd;
+
+ l = omap_readl(OMAP_LCDC_CONTROL);
+ l &= ~OMAP_LCDC_CTRL_LCD_TFT;
+ l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
+#ifdef CONFIG_MACH_OMAP_PALMTE
+/* FIXME:if (machine_is_omap_palmte()) { */
+ /* PalmTE uses alternate TFT setting in 8BPP mode */
+ l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
+/* } */
+#endif
+ omap_writel(l, OMAP_LCDC_CONTROL);
+
+ l = omap_readl(OMAP_LCDC_TIMING2);
+ l &= ~(((1 << 6) - 1) << 20);
+ l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
+ omap_writel(l, OMAP_LCDC_TIMING2);
+
+ l = panel->x_res - 1;
+ l |= (panel->hsw - 1) << 10;
+ l |= (panel->hfp - 1) << 16;
+ l |= (panel->hbp - 1) << 24;
+ omap_writel(l, OMAP_LCDC_TIMING0);
+
+ l = panel->y_res - 1;
+ l |= (panel->vsw - 1) << 10;
+ l |= panel->vfp << 16;
+ l |= panel->vbp << 24;
+ omap_writel(l, OMAP_LCDC_TIMING1);
+
+ l = omap_readl(OMAP_LCDC_TIMING2);
+ l &= ~0xff;
+
+ lck = clk_get_rate(lcdc.lcd_ck);
+
+ if (!panel->pcd)
+ calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
+ else {
+ dev_warn(lcdc.fbdev->dev,
+ "Pixel clock divider value is obsolete.\n"
+ "Try to set pixel_clock to %lu and pcd to 0 "
+ "in drivers/video/omap/lcd_%s.c and submit a patch.\n",
+ lck / panel->pcd / 1000, panel->name);
+
+ pcd = panel->pcd;
+ }
+ l |= pcd & 0xff;
+ l |= panel->acb << 8;
+ omap_writel(l, OMAP_LCDC_TIMING2);
+
+ /* update panel info with the exact clock */
+ panel->pixel_clock = lck / pcd / 1000;
+}
+
+/*
+ * Configure the LCD controller, download the color palette and start a looped
+ * DMA transfer of the frame image data. Called only in internal
+ * controller mode.
+ */
+static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
+{
+ int r = 0;
+
+ if (mode != lcdc.update_mode) {
+ switch (mode) {
+ case OMAPFB_AUTO_UPDATE:
+ setup_regs();
+ load_palette();
+
+ /* Setup and start LCD DMA */
+ setup_lcd_dma();
+
+ set_load_mode(OMAP_LCDC_LOAD_FRAME);
+ enable_irqs(OMAP_LCDC_IRQ_DONE);
+ /* This will start the actual DMA transfer */
+ enable_controller();
+ lcdc.update_mode = mode;
+ break;
+ case OMAPFB_UPDATE_DISABLED:
+ disable_controller();
+ omap_stop_lcd_dma();
+ lcdc.update_mode = mode;
+ break;
+ default:
+ r = -EINVAL;
+ }
+ }
+
+ return r;
+}
+
+static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
+{
+ return lcdc.update_mode;
+}
+
+/* PM code called only in internal controller mode */
+static void omap_lcdc_suspend(void)
+{
+ if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+ disable_controller();
+ omap_stop_lcd_dma();
+ }
+}
+
+static void omap_lcdc_resume(void)
+{
+ if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
+ setup_regs();
+ load_palette();
+ setup_lcd_dma();
+ set_load_mode(OMAP_LCDC_LOAD_FRAME);
+ enable_irqs(OMAP_LCDC_IRQ_DONE);
+ enable_controller();
+ }
+}
+
+static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
+{
+ return;
+}
+
+int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
+{
+ BUG_ON(callback == NULL);
+
+ if (lcdc.dma_callback)
+ return -EBUSY;
+ else {
+ lcdc.dma_callback = callback;
+ lcdc.dma_callback_data = data;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
+
+void omap_lcdc_free_dma_callback(void)
+{
+ lcdc.dma_callback = NULL;
+}
+EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
+
+static void lcdc_dma_handler(u16 status, void *data)
+{
+ if (lcdc.dma_callback)
+ lcdc.dma_callback(lcdc.dma_callback_data);
+}
+
+static int mmap_kern(void)
+{
+ struct vm_struct *kvma;
+ struct vm_area_struct vma;
+ pgprot_t pgprot;
+ unsigned long vaddr;
+
+ kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
+ if (kvma == NULL) {
+ dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
+ return -ENOMEM;
+ }
+ vma.vm_mm = &init_mm;
+
+ vaddr = (unsigned long)kvma->addr;
+ vma.vm_start = vaddr;
+ vma.vm_end = vaddr + lcdc.vram_size;
+
+ pgprot = pgprot_writecombine(pgprot_kernel);
+ if (io_remap_pfn_range(&vma, vaddr,
+ lcdc.vram_phys >> PAGE_SHIFT,
+ lcdc.vram_size, pgprot) < 0) {
+ dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
+ return -EAGAIN;
+ }
+
+ lcdc.vram_virt = (void *)vaddr;
+
+ return 0;
+}
+
+static void unmap_kern(void)
+{
+ vunmap(lcdc.vram_virt);
+}
+
+static int alloc_palette_ram(void)
+{
+ lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
+ MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
+ if (lcdc.palette_virt == NULL) {
+ dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
+ return -ENOMEM;
+ }
+ memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
+
+ return 0;
+}
+
+static void free_palette_ram(void)
+{
+ dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
+ lcdc.palette_virt, lcdc.palette_phys);
+}
+
+static int alloc_fbmem(struct omapfb_mem_region *region)
+{
+ int bpp;
+ int frame_size;
+ struct lcd_panel *panel = lcdc.fbdev->panel;
+
+ bpp = panel->bpp;
+ if (bpp == 12)
+ bpp = 16;
+ frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
+ if (region->size > frame_size)
+ frame_size = region->size;
+ lcdc.vram_size = frame_size;
+ lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
+ lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
+ if (lcdc.vram_virt == NULL) {
+ dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
+ return -ENOMEM;
+ }
+ region->size = frame_size;
+ region->paddr = lcdc.vram_phys;
+ region->vaddr = lcdc.vram_virt;
+ region->alloc = 1;
+
+ memset(lcdc.vram_virt, 0, lcdc.vram_size);
+
+ return 0;
+}
+
+static void free_fbmem(void)
+{
+ dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
+ lcdc.vram_virt, lcdc.vram_phys);
+}
+
+static int setup_fbmem(struct omapfb_mem_desc *req_md)
+{
+ int r;
+
+ if (!req_md->region_cnt) {
+ dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
+ return -EINVAL;
+ }
+
+ if (req_md->region_cnt > 1) {
+ dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
+ req_md->region_cnt = 1;
+ }
+
+ if (req_md->region[0].paddr == 0) {
+ lcdc.fbmem_allocated = 1;
+ if ((r = alloc_fbmem(&req_md->region[0])) < 0)
+ return r;
+ return 0;
+ }
+
+ lcdc.vram_phys = req_md->region[0].paddr;
+ lcdc.vram_size = req_md->region[0].size;
+
+ if ((r = mmap_kern()) < 0)
+ return r;
+
+ dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
+ lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
+
+ return 0;
+}
+
+static void cleanup_fbmem(void)
+{
+ if (lcdc.fbmem_allocated)
+ free_fbmem();
+ else
+ unmap_kern();
+}
+
+static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
+ struct omapfb_mem_desc *req_vram)
+{
+ int r;
+ u32 l;
+ int rate;
+ struct clk *tc_ck;
+
+ lcdc.irq_mask = 0;
+
+ lcdc.fbdev = fbdev;
+ lcdc.ext_mode = ext_mode;
+
+ l = 0;
+ omap_writel(l, OMAP_LCDC_CONTROL);
+
+ /* FIXME:
+ * According to errata some platforms have a clock rate limitiation
+ */
+ lcdc.lcd_ck = clk_get(NULL, "lcd_ck");
+ if (IS_ERR(lcdc.lcd_ck)) {
+ dev_err(fbdev->dev, "unable to access LCD clock\n");
+ r = PTR_ERR(lcdc.lcd_ck);
+ goto fail0;
+ }
+
+ tc_ck = clk_get(NULL, "tc_ck");
+ if (IS_ERR(tc_ck)) {
+ dev_err(fbdev->dev, "unable to access TC clock\n");
+ r = PTR_ERR(tc_ck);
+ goto fail1;
+ }
+
+ rate = clk_get_rate(tc_ck);
+ clk_put(tc_ck);
+
+ if (machine_is_ams_delta())
+ rate /= 4;
+ if (machine_is_omap_h3())
+ rate /= 3;
+ r = clk_set_rate(lcdc.lcd_ck, rate);
+ if (r) {
+ dev_err(fbdev->dev, "failed to adjust LCD rate\n");
+ goto fail1;
+ }
+ clk_enable(lcdc.lcd_ck);
+
+ r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
+ if (r) {
+ dev_err(fbdev->dev, "unable to get IRQ\n");
+ goto fail2;
+ }
+
+ r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
+ if (r) {
+ dev_err(fbdev->dev, "unable to get LCD DMA\n");
+ goto fail3;
+ }
+
+ omap_set_lcd_dma_single_transfer(ext_mode);
+ omap_set_lcd_dma_ext_controller(ext_mode);
+
+ if (!ext_mode)
+ if ((r = alloc_palette_ram()) < 0)
+ goto fail4;
+
+ if ((r = setup_fbmem(req_vram)) < 0)
+ goto fail5;
+
+ pr_info("omapfb: LCDC initialized\n");
+
+ return 0;
+fail5:
+ if (!ext_mode)
+ free_palette_ram();
+fail4:
+ omap_free_lcd_dma();
+fail3:
+ free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
+fail2:
+ clk_disable(lcdc.lcd_ck);
+fail1:
+ clk_put(lcdc.lcd_ck);
+fail0:
+ return r;
+}
+
+static void omap_lcdc_cleanup(void)
+{
+ if (!lcdc.ext_mode)
+ free_palette_ram();
+ cleanup_fbmem();
+ omap_free_lcd_dma();
+ free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
+ clk_disable(lcdc.lcd_ck);
+ clk_put(lcdc.lcd_ck);
+}
+
+const struct lcd_ctrl omap1_int_ctrl = {
+ .name = "internal",
+ .init = omap_lcdc_init,
+ .cleanup = omap_lcdc_cleanup,
+ .get_caps = omap_lcdc_get_caps,
+ .set_update_mode = omap_lcdc_set_update_mode,
+ .get_update_mode = omap_lcdc_get_update_mode,
+ .update_window = NULL,
+ .suspend = omap_lcdc_suspend,
+ .resume = omap_lcdc_resume,
+ .setup_plane = omap_lcdc_setup_plane,
+ .enable_plane = omap_lcdc_enable_plane,
+ .setcolreg = omap_lcdc_setcolreg,
+};
diff --git a/drivers/video/omap/lcdc.h b/drivers/video/omap/lcdc.h
new file mode 100644
index 000000000000..adb731e5314a
--- /dev/null
+++ b/drivers/video/omap/lcdc.h
@@ -0,0 +1,7 @@
+#ifndef LCDC_H
+#define LCDC_H
+
+int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data);
+void omap_lcdc_free_dma_callback(void);
+
+#endif
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
new file mode 100644
index 000000000000..14d0f7a11145
--- /dev/null
+++ b/drivers/video/omap/omapfb_main.c
@@ -0,0 +1,1941 @@
+/*
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * Acknowledgements:
+ * Alex McMains <aam@ridgerun.com> - Original driver
+ * Juha Yrjola <juha.yrjola@nokia.com> - Original driver and improvements
+ * Dirk Behme <dirk.behme@de.bosch.com> - changes for 2.6 kernel API
+ * Texas Instruments - H3 support
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#define MODULE_NAME "omapfb"
+
+static unsigned int def_accel;
+static unsigned long def_vram[OMAPFB_PLANE_NUM];
+static int def_vram_cnt;
+static unsigned long def_vxres;
+static unsigned long def_vyres;
+static unsigned int def_rotate;
+static unsigned int def_mirror;
+
+#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
+static int manual_update = 1;
+#else
+static int manual_update;
+#endif
+
+static struct platform_device *fbdev_pdev;
+static struct lcd_panel *fbdev_panel;
+static struct omapfb_device *omapfb_dev;
+
+struct caps_table_struct {
+ unsigned long flag;
+ const char *name;
+};
+
+static struct caps_table_struct ctrl_caps[] = {
+ { OMAPFB_CAPS_MANUAL_UPDATE, "manual update" },
+ { OMAPFB_CAPS_TEARSYNC, "tearing synchronization" },
+ { OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" },
+ { OMAPFB_CAPS_PLANE_SCALE, "scale plane" },
+ { OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE, "pixel double window" },
+ { OMAPFB_CAPS_WINDOW_SCALE, "scale window" },
+ { OMAPFB_CAPS_WINDOW_OVERLAY, "overlay window" },
+ { OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" },
+};
+
+static struct caps_table_struct color_caps[] = {
+ { 1 << OMAPFB_COLOR_RGB565, "RGB565", },
+ { 1 << OMAPFB_COLOR_YUV422, "YUV422", },
+ { 1 << OMAPFB_COLOR_YUV420, "YUV420", },
+ { 1 << OMAPFB_COLOR_CLUT_8BPP, "CLUT8", },
+ { 1 << OMAPFB_COLOR_CLUT_4BPP, "CLUT4", },
+ { 1 << OMAPFB_COLOR_CLUT_2BPP, "CLUT2", },
+ { 1 << OMAPFB_COLOR_CLUT_1BPP, "CLUT1", },
+ { 1 << OMAPFB_COLOR_RGB444, "RGB444", },
+ { 1 << OMAPFB_COLOR_YUY422, "YUY422", },
+};
+
+/*
+ * ---------------------------------------------------------------------------
+ * LCD panel
+ * ---------------------------------------------------------------------------
+ */
+extern struct lcd_ctrl omap1_int_ctrl;
+extern struct lcd_ctrl omap2_int_ctrl;
+extern struct lcd_ctrl hwa742_ctrl;
+extern struct lcd_ctrl blizzard_ctrl;
+
+static struct lcd_ctrl *ctrls[] = {
+#ifdef CONFIG_ARCH_OMAP1
+ &omap1_int_ctrl,
+#else
+ &omap2_int_ctrl,
+#endif
+
+#ifdef CONFIG_FB_OMAP_LCDC_HWA742
+ &hwa742_ctrl,
+#endif
+#ifdef CONFIG_FB_OMAP_LCDC_BLIZZARD
+ &blizzard_ctrl,
+#endif
+};
+
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+#ifdef CONFIG_ARCH_OMAP1
+extern struct lcd_ctrl_extif omap1_ext_if;
+#else
+extern struct lcd_ctrl_extif omap2_ext_if;
+#endif
+#endif
+
+static void omapfb_rqueue_lock(struct omapfb_device *fbdev)
+{
+ mutex_lock(&fbdev->rqueue_mutex);
+}
+
+static void omapfb_rqueue_unlock(struct omapfb_device *fbdev)
+{
+ mutex_unlock(&fbdev->rqueue_mutex);
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * LCD controller and LCD DMA
+ * ---------------------------------------------------------------------------
+ */
+/* Lookup table to map elem size to elem type. */
+static const int dma_elem_type[] = {
+ 0,
+ OMAP_DMA_DATA_TYPE_S8,
+ OMAP_DMA_DATA_TYPE_S16,
+ 0,
+ OMAP_DMA_DATA_TYPE_S32,
+};
+
+/*
+ * Allocate resources needed for LCD controller and LCD DMA operations. Video
+ * memory is allocated from system memory according to the virtual display
+ * size, except if a bigger memory size is specified explicitly as a kernel
+ * parameter.
+ */
+static int ctrl_init(struct omapfb_device *fbdev)
+{
+ int r;
+ int i;
+
+ /* kernel/module vram parameters override boot tags/board config */
+ if (def_vram_cnt) {
+ for (i = 0; i < def_vram_cnt; i++)
+ fbdev->mem_desc.region[i].size =
+ PAGE_ALIGN(def_vram[i]);
+ fbdev->mem_desc.region_cnt = i;
+ } else {
+ struct omapfb_platform_data *conf;
+
+ conf = fbdev->dev->platform_data;
+ fbdev->mem_desc = conf->mem_desc;
+ }
+
+ if (!fbdev->mem_desc.region_cnt) {
+ struct lcd_panel *panel = fbdev->panel;
+ int def_size;
+ int bpp = panel->bpp;
+
+ /* 12 bpp is packed in 16 bits */
+ if (bpp == 12)
+ bpp = 16;
+ def_size = def_vxres * def_vyres * bpp / 8;
+ fbdev->mem_desc.region_cnt = 1;
+ fbdev->mem_desc.region[0].size = PAGE_ALIGN(def_size);
+ }
+ r = fbdev->ctrl->init(fbdev, 0, &fbdev->mem_desc);
+ if (r < 0) {
+ dev_err(fbdev->dev, "controller initialization failed (%d)\n",
+ r);
+ return r;
+ }
+
+#ifdef DEBUG
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ dev_dbg(fbdev->dev, "region%d phys %08x virt %p size=%lu\n",
+ i,
+ fbdev->mem_desc.region[i].paddr,
+ fbdev->mem_desc.region[i].vaddr,
+ fbdev->mem_desc.region[i].size);
+ }
+#endif
+ return 0;
+}
+
+static void ctrl_cleanup(struct omapfb_device *fbdev)
+{
+ fbdev->ctrl->cleanup();
+}
+
+/* Must be called with fbdev->rqueue_mutex held. */
+static int ctrl_change_mode(struct fb_info *fbi)
+{
+ int r;
+ unsigned long offset;
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_var_screeninfo *var = &fbi->var;
+
+ offset = var->yoffset * fbi->fix.line_length +
+ var->xoffset * var->bits_per_pixel / 8;
+
+ if (fbdev->ctrl->sync)
+ fbdev->ctrl->sync();
+ r = fbdev->ctrl->setup_plane(plane->idx, plane->info.channel_out,
+ offset, var->xres_virtual,
+ plane->info.pos_x, plane->info.pos_y,
+ var->xres, var->yres, plane->color_mode);
+ if (fbdev->ctrl->set_scale != NULL)
+ r = fbdev->ctrl->set_scale(plane->idx,
+ var->xres, var->yres,
+ plane->info.out_width,
+ plane->info.out_height);
+
+ return r;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * fbdev framework callbacks and the ioctl interface
+ * ---------------------------------------------------------------------------
+ */
+/* Called each time the omapfb device is opened */
+static int omapfb_open(struct fb_info *info, int user)
+{
+ return 0;
+}
+
+static void omapfb_sync(struct fb_info *info);
+
+/* Called when the omapfb device is closed. We make sure that any pending
+ * gfx DMA operations are ended, before we return. */
+static int omapfb_release(struct fb_info *info, int user)
+{
+ omapfb_sync(info);
+ return 0;
+}
+
+/* Store a single color palette entry into a pseudo palette or the hardware
+ * palette if one is available. For now we support only 16bpp and thus store
+ * the entry only to the pseudo palette.
+ */
+static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp, int update_hw_pal)
+{
+ struct omapfb_plane_struct *plane = info->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_var_screeninfo *var = &info->var;
+ int r = 0;
+
+ switch (plane->color_mode) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUV420:
+ case OMAPFB_COLOR_YUY422:
+ r = -EINVAL;
+ break;
+ case OMAPFB_COLOR_CLUT_8BPP:
+ case OMAPFB_COLOR_CLUT_4BPP:
+ case OMAPFB_COLOR_CLUT_2BPP:
+ case OMAPFB_COLOR_CLUT_1BPP:
+ if (fbdev->ctrl->setcolreg)
+ r = fbdev->ctrl->setcolreg(regno, red, green, blue,
+ transp, update_hw_pal);
+ /* Fallthrough */
+ case OMAPFB_COLOR_RGB565:
+ case OMAPFB_COLOR_RGB444:
+ if (r != 0)
+ break;
+
+ if (regno < 0) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (regno < 16) {
+ u16 pal;
+ pal = ((red >> (16 - var->red.length)) <<
+ var->red.offset) |
+ ((green >> (16 - var->green.length)) <<
+ var->green.offset) |
+ (blue >> (16 - var->blue.length));
+ ((u32 *)(info->pseudo_palette))[regno] = pal;
+ }
+ break;
+ default:
+ BUG();
+ }
+ return r;
+}
+
+static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ return _setcolreg(info, regno, red, green, blue, transp, 1);
+}
+
+static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ int count, index, r;
+ u16 *red, *green, *blue, *transp;
+ u16 trans = 0xffff;
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ index = cmap->start;
+
+ for (count = 0; count < cmap->len; count++) {
+ if (transp)
+ trans = *transp++;
+ r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
+ count == cmap->len - 1);
+ if (r != 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int omapfb_update_full_screen(struct fb_info *fbi);
+
+static int omapfb_blank(int blank, struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int do_update = 0;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ switch (blank) {
+ case VESA_NO_BLANKING:
+ if (fbdev->state == OMAPFB_SUSPENDED) {
+ if (fbdev->ctrl->resume)
+ fbdev->ctrl->resume();
+ fbdev->panel->enable(fbdev->panel);
+ fbdev->state = OMAPFB_ACTIVE;
+ if (fbdev->ctrl->get_update_mode() ==
+ OMAPFB_MANUAL_UPDATE)
+ do_update = 1;
+ }
+ break;
+ case VESA_POWERDOWN:
+ if (fbdev->state == OMAPFB_ACTIVE) {
+ fbdev->panel->disable(fbdev->panel);
+ if (fbdev->ctrl->suspend)
+ fbdev->ctrl->suspend();
+ fbdev->state = OMAPFB_SUSPENDED;
+ }
+ break;
+ default:
+ r = -EINVAL;
+ }
+ omapfb_rqueue_unlock(fbdev);
+
+ if (r == 0 && do_update)
+ r = omapfb_update_full_screen(fbi);
+
+ return r;
+}
+
+static void omapfb_sync(struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+
+ omapfb_rqueue_lock(fbdev);
+ if (fbdev->ctrl->sync)
+ fbdev->ctrl->sync();
+ omapfb_rqueue_unlock(fbdev);
+}
+
+/*
+ * Set fb_info.fix fields and also updates fbdev.
+ * When calling this fb_info.var must be set up already.
+ */
+static void set_fb_fix(struct fb_info *fbi)
+{
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_mem_region *rg;
+ int bpp;
+
+ rg = &plane->fbdev->mem_desc.region[plane->idx];
+ fbi->screen_base = (char __iomem *)rg->vaddr;
+ fix->smem_start = rg->paddr;
+ fix->smem_len = rg->size;
+
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ bpp = var->bits_per_pixel;
+ if (var->nonstd)
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ else switch (var->bits_per_pixel) {
+ case 16:
+ case 12:
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ /* 12bpp is stored in 16 bits */
+ bpp = 16;
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ }
+ fix->accel = FB_ACCEL_OMAP1610;
+ fix->line_length = var->xres_virtual * bpp / 8;
+}
+
+static int set_color_mode(struct omapfb_plane_struct *plane,
+ struct fb_var_screeninfo *var)
+{
+ switch (var->nonstd) {
+ case 0:
+ break;
+ case OMAPFB_COLOR_YUV422:
+ var->bits_per_pixel = 16;
+ plane->color_mode = var->nonstd;
+ return 0;
+ case OMAPFB_COLOR_YUV420:
+ var->bits_per_pixel = 12;
+ plane->color_mode = var->nonstd;
+ return 0;
+ case OMAPFB_COLOR_YUY422:
+ var->bits_per_pixel = 16;
+ plane->color_mode = var->nonstd;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 1:
+ plane->color_mode = OMAPFB_COLOR_CLUT_1BPP;
+ return 0;
+ case 2:
+ plane->color_mode = OMAPFB_COLOR_CLUT_2BPP;
+ return 0;
+ case 4:
+ plane->color_mode = OMAPFB_COLOR_CLUT_4BPP;
+ return 0;
+ case 8:
+ plane->color_mode = OMAPFB_COLOR_CLUT_8BPP;
+ return 0;
+ case 12:
+ var->bits_per_pixel = 16;
+ plane->color_mode = OMAPFB_COLOR_RGB444;
+ return 0;
+ case 16:
+ plane->color_mode = OMAPFB_COLOR_RGB565;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Check the values in var against our capabilities and in case of out of
+ * bound values try to adjust them.
+ */
+static int set_fb_var(struct fb_info *fbi,
+ struct fb_var_screeninfo *var)
+{
+ int bpp;
+ unsigned long max_frame_size;
+ unsigned long line_size;
+ int xres_min, xres_max;
+ int yres_min, yres_max;
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct lcd_panel *panel = fbdev->panel;
+
+ if (set_color_mode(plane, var) < 0)
+ return -EINVAL;
+
+ bpp = var->bits_per_pixel;
+ if (plane->color_mode == OMAPFB_COLOR_RGB444)
+ bpp = 16;
+
+ switch (var->rotate) {
+ case 0:
+ case 180:
+ xres_min = OMAPFB_PLANE_XRES_MIN;
+ xres_max = panel->x_res;
+ yres_min = OMAPFB_PLANE_YRES_MIN;
+ yres_max = panel->y_res;
+ if (cpu_is_omap15xx()) {
+ var->xres = panel->x_res;
+ var->yres = panel->y_res;
+ }
+ break;
+ case 90:
+ case 270:
+ xres_min = OMAPFB_PLANE_YRES_MIN;
+ xres_max = panel->y_res;
+ yres_min = OMAPFB_PLANE_XRES_MIN;
+ yres_max = panel->x_res;
+ if (cpu_is_omap15xx()) {
+ var->xres = panel->y_res;
+ var->yres = panel->x_res;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (var->xres < xres_min)
+ var->xres = xres_min;
+ if (var->yres < yres_min)
+ var->yres = yres_min;
+ if (var->xres > xres_max)
+ var->xres = xres_max;
+ if (var->yres > yres_max)
+ var->yres = yres_max;
+
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
+ max_frame_size = fbdev->mem_desc.region[plane->idx].size;
+ line_size = var->xres_virtual * bpp / 8;
+ if (line_size * var->yres_virtual > max_frame_size) {
+ /* Try to keep yres_virtual first */
+ line_size = max_frame_size / var->yres_virtual;
+ var->xres_virtual = line_size * 8 / bpp;
+ if (var->xres_virtual < var->xres) {
+ /* Still doesn't fit. Shrink yres_virtual too */
+ var->xres_virtual = var->xres;
+ line_size = var->xres * bpp / 8;
+ var->yres_virtual = max_frame_size / line_size;
+ }
+ /* Recheck this, as the virtual size changed. */
+ if (var->xres_virtual < var->xres)
+ var->xres = var->xres_virtual;
+ if (var->yres_virtual < var->yres)
+ var->yres = var->yres_virtual;
+ if (var->xres < xres_min || var->yres < yres_min)
+ return -EINVAL;
+ }
+ if (var->xres + var->xoffset > var->xres_virtual)
+ var->xoffset = var->xres_virtual - var->xres;
+ if (var->yres + var->yoffset > var->yres_virtual)
+ var->yoffset = var->yres_virtual - var->yres;
+ line_size = var->xres * bpp / 8;
+
+ if (plane->color_mode == OMAPFB_COLOR_RGB444) {
+ var->red.offset = 8; var->red.length = 4;
+ var->red.msb_right = 0;
+ var->green.offset = 4; var->green.length = 4;
+ var->green.msb_right = 0;
+ var->blue.offset = 0; var->blue.length = 4;
+ var->blue.msb_right = 0;
+ } else {
+ var->red.offset = 11; var->red.length = 5;
+ var->red.msb_right = 0;
+ var->green.offset = 5; var->green.length = 6;
+ var->green.msb_right = 0;
+ var->blue.offset = 0; var->blue.length = 5;
+ var->blue.msb_right = 0;
+ }
+
+ var->height = -1;
+ var->width = -1;
+ var->grayscale = 0;
+
+ /* pixclock in ps, the rest in pixclock */
+ var->pixclock = 10000000 / (panel->pixel_clock / 100);
+ var->left_margin = panel->hfp;
+ var->right_margin = panel->hbp;
+ var->upper_margin = panel->vfp;
+ var->lower_margin = panel->vbp;
+ var->hsync_len = panel->hsw;
+ var->vsync_len = panel->vsw;
+
+ /* TODO: get these from panel->config */
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->sync = 0;
+
+ return 0;
+}
+
+
+/* Set rotation (0, 90, 180, 270 degree), and switch to the new mode. */
+static void omapfb_rotate(struct fb_info *fbi, int rotate)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+
+ omapfb_rqueue_lock(fbdev);
+ if (cpu_is_omap15xx() && rotate != fbi->var.rotate) {
+ struct fb_var_screeninfo *new_var = &fbdev->new_var;
+
+ memcpy(new_var, &fbi->var, sizeof(*new_var));
+ new_var->rotate = rotate;
+ if (set_fb_var(fbi, new_var) == 0 &&
+ memcmp(new_var, &fbi->var, sizeof(*new_var))) {
+ memcpy(&fbi->var, new_var, sizeof(*new_var));
+ ctrl_change_mode(fbi);
+ }
+ }
+ omapfb_rqueue_unlock(fbdev);
+}
+
+/*
+ * Set new x,y offsets in the virtual display for the visible area and switch
+ * to the new mode.
+ */
+static int omapfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ if (var->xoffset != fbi->var.xoffset ||
+ var->yoffset != fbi->var.yoffset) {
+ struct fb_var_screeninfo *new_var = &fbdev->new_var;
+
+ memcpy(new_var, &fbi->var, sizeof(*new_var));
+ new_var->xoffset = var->xoffset;
+ new_var->yoffset = var->yoffset;
+ if (set_fb_var(fbi, new_var))
+ r = -EINVAL;
+ else {
+ memcpy(&fbi->var, new_var, sizeof(*new_var));
+ ctrl_change_mode(fbi);
+ }
+ }
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/* Set mirror to vertical axis and switch to the new mode. */
+static int omapfb_mirror(struct fb_info *fbi, int mirror)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ mirror = mirror ? 1 : 0;
+ if (cpu_is_omap15xx())
+ r = -EINVAL;
+ else if (mirror != plane->info.mirror) {
+ plane->info.mirror = mirror;
+ r = ctrl_change_mode(fbi);
+ }
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/*
+ * Check values in var, try to adjust them in case of out of bound values if
+ * possible, or return error.
+ */
+static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ if (fbdev->ctrl->sync != NULL)
+ fbdev->ctrl->sync();
+ r = set_fb_var(fbi, var);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/*
+ * Switch to a new mode. The parameters for it has been check already by
+ * omapfb_check_var.
+ */
+static int omapfb_set_par(struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ set_fb_fix(fbi);
+ r = ctrl_change_mode(fbi);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+int omapfb_update_window_async(struct fb_info *fbi,
+ struct omapfb_update_window *win,
+ void (*callback)(void *),
+ void *callback_data)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_var_screeninfo *var;
+
+ var = &fbi->var;
+ if (win->x >= var->xres || win->y >= var->yres ||
+ win->out_x > var->xres || win->out_y >= var->yres)
+ return -EINVAL;
+
+ if (!fbdev->ctrl->update_window ||
+ fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
+ return -ENODEV;
+
+ if (win->x + win->width >= var->xres)
+ win->width = var->xres - win->x;
+ if (win->y + win->height >= var->yres)
+ win->height = var->yres - win->y;
+ /* The out sizes should be cropped to the LCD size */
+ if (win->out_x + win->out_width > fbdev->panel->x_res)
+ win->out_width = fbdev->panel->x_res - win->out_x;
+ if (win->out_y + win->out_height > fbdev->panel->y_res)
+ win->out_height = fbdev->panel->y_res - win->out_y;
+ if (!win->width || !win->height || !win->out_width || !win->out_height)
+ return 0;
+
+ return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
+}
+EXPORT_SYMBOL(omapfb_update_window_async);
+
+static int omapfb_update_win(struct fb_info *fbi,
+ struct omapfb_update_window *win)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ int ret;
+
+ omapfb_rqueue_lock(plane->fbdev);
+ ret = omapfb_update_window_async(fbi, win, NULL, 0);
+ omapfb_rqueue_unlock(plane->fbdev);
+
+ return ret;
+}
+
+static int omapfb_update_full_screen(struct fb_info *fbi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct omapfb_update_window win;
+ int r;
+
+ if (!fbdev->ctrl->update_window ||
+ fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
+ return -ENODEV;
+
+ win.x = 0;
+ win.y = 0;
+ win.width = fbi->var.xres;
+ win.height = fbi->var.yres;
+ win.out_x = 0;
+ win.out_y = 0;
+ win.out_width = fbi->var.xres;
+ win.out_height = fbi->var.yres;
+ win.format = 0;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->update_window(fbi, &win, NULL, 0);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct lcd_panel *panel = fbdev->panel;
+ struct omapfb_plane_info old_info;
+ int r = 0;
+
+ if (pi->pos_x + pi->out_width > panel->x_res ||
+ pi->pos_y + pi->out_height > panel->y_res)
+ return -EINVAL;
+
+ omapfb_rqueue_lock(fbdev);
+ if (pi->enabled && !fbdev->mem_desc.region[plane->idx].size) {
+ /*
+ * This plane's memory was freed, can't enable it
+ * until it's reallocated.
+ */
+ r = -EINVAL;
+ goto out;
+ }
+ old_info = plane->info;
+ plane->info = *pi;
+ if (pi->enabled) {
+ r = ctrl_change_mode(fbi);
+ if (r < 0) {
+ plane->info = old_info;
+ goto out;
+ }
+ }
+ r = fbdev->ctrl->enable_plane(plane->idx, pi->enabled);
+ if (r < 0) {
+ plane->info = old_info;
+ goto out;
+ }
+out:
+ omapfb_rqueue_unlock(fbdev);
+ return r;
+}
+
+static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+
+ *pi = plane->info;
+ return 0;
+}
+
+static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct omapfb_mem_region *rg = &fbdev->mem_desc.region[plane->idx];
+ size_t size;
+ int r = 0;
+
+ if (fbdev->ctrl->setup_mem == NULL)
+ return -ENODEV;
+ if (mi->type > OMAPFB_MEMTYPE_MAX)
+ return -EINVAL;
+
+ size = PAGE_ALIGN(mi->size);
+ omapfb_rqueue_lock(fbdev);
+ if (plane->info.enabled) {
+ r = -EBUSY;
+ goto out;
+ }
+ if (rg->size != size || rg->type != mi->type) {
+ struct fb_var_screeninfo *new_var = &fbdev->new_var;
+ unsigned long old_size = rg->size;
+ u8 old_type = rg->type;
+ unsigned long paddr;
+
+ rg->size = size;
+ rg->type = mi->type;
+ /*
+ * size == 0 is a special case, for which we
+ * don't check / adjust the screen parameters.
+ * This isn't a problem since the plane can't
+ * be reenabled unless its size is > 0.
+ */
+ if (old_size != size && size) {
+ if (size) {
+ memcpy(new_var, &fbi->var, sizeof(*new_var));
+ r = set_fb_var(fbi, new_var);
+ if (r < 0)
+ goto out;
+ }
+ }
+
+ if (fbdev->ctrl->sync)
+ fbdev->ctrl->sync();
+ r = fbdev->ctrl->setup_mem(plane->idx, size, mi->type, &paddr);
+ if (r < 0) {
+ /* Revert changes. */
+ rg->size = old_size;
+ rg->type = old_type;
+ goto out;
+ }
+ rg->paddr = paddr;
+
+ if (old_size != size) {
+ if (size) {
+ memcpy(&fbi->var, new_var, sizeof(fbi->var));
+ set_fb_fix(fbi);
+ } else {
+ /*
+ * Set these explicitly to indicate that the
+ * plane memory is dealloce'd, the other
+ * screen parameters in var / fix are invalid.
+ */
+ fbi->fix.smem_start = 0;
+ fbi->fix.smem_len = 0;
+ }
+ }
+ }
+out:
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct omapfb_mem_region *rg;
+
+ rg = &fbdev->mem_desc.region[plane->idx];
+ memset(mi, 0, sizeof(*mi));
+ mi->size = rg->size;
+ mi->type = rg->type;
+
+ return 0;
+}
+
+static int omapfb_set_color_key(struct omapfb_device *fbdev,
+ struct omapfb_color_key *ck)
+{
+ int r;
+
+ if (!fbdev->ctrl->set_color_key)
+ return -ENODEV;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->set_color_key(ck);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_get_color_key(struct omapfb_device *fbdev,
+ struct omapfb_color_key *ck)
+{
+ int r;
+
+ if (!fbdev->ctrl->get_color_key)
+ return -ENODEV;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->get_color_key(ck);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static struct blocking_notifier_head omapfb_client_list[OMAPFB_PLANE_NUM];
+static int notifier_inited;
+
+static void omapfb_init_notifier(void)
+{
+ int i;
+
+ for (i = 0; i < OMAPFB_PLANE_NUM; i++)
+ BLOCKING_INIT_NOTIFIER_HEAD(&omapfb_client_list[i]);
+}
+
+int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
+ omapfb_notifier_callback_t callback,
+ void *callback_data)
+{
+ int r;
+
+ if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
+ return -EINVAL;
+
+ if (!notifier_inited) {
+ omapfb_init_notifier();
+ notifier_inited = 1;
+ }
+
+ omapfb_nb->nb.notifier_call = (int (*)(struct notifier_block *,
+ unsigned long, void *))callback;
+ omapfb_nb->data = callback_data;
+ r = blocking_notifier_chain_register(
+ &omapfb_client_list[omapfb_nb->plane_idx],
+ &omapfb_nb->nb);
+ if (r)
+ return r;
+ if (omapfb_dev != NULL &&
+ omapfb_dev->ctrl && omapfb_dev->ctrl->bind_client) {
+ omapfb_dev->ctrl->bind_client(omapfb_nb);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(omapfb_register_client);
+
+int omapfb_unregister_client(struct omapfb_notifier_block *omapfb_nb)
+{
+ return blocking_notifier_chain_unregister(
+ &omapfb_client_list[omapfb_nb->plane_idx], &omapfb_nb->nb);
+}
+EXPORT_SYMBOL(omapfb_unregister_client);
+
+void omapfb_notify_clients(struct omapfb_device *fbdev, unsigned long event)
+{
+ int i;
+
+ if (!notifier_inited)
+ /* no client registered yet */
+ return;
+
+ for (i = 0; i < OMAPFB_PLANE_NUM; i++)
+ blocking_notifier_call_chain(&omapfb_client_list[i], event,
+ fbdev->fb_info[i]);
+}
+EXPORT_SYMBOL(omapfb_notify_clients);
+
+static int omapfb_set_update_mode(struct omapfb_device *fbdev,
+ enum omapfb_update_mode mode)
+{
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->set_update_mode(mode);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static enum omapfb_update_mode omapfb_get_update_mode(struct omapfb_device *fbdev)
+{
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->get_update_mode();
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+static void omapfb_get_caps(struct omapfb_device *fbdev, int plane,
+ struct omapfb_caps *caps)
+{
+ memset(caps, 0, sizeof(*caps));
+ fbdev->ctrl->get_caps(plane, caps);
+ caps->ctrl |= fbdev->panel->get_caps(fbdev->panel);
+}
+
+/* For lcd testing */
+void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval)
+{
+ omapfb_rqueue_lock(fbdev);
+ *(u16 *)fbdev->mem_desc.region[0].vaddr = pixval;
+ if (fbdev->ctrl->get_update_mode() == OMAPFB_MANUAL_UPDATE) {
+ struct omapfb_update_window win;
+
+ memset(&win, 0, sizeof(win));
+ win.width = 2;
+ win.height = 2;
+ win.out_width = 2;
+ win.out_height = 2;
+ fbdev->ctrl->update_window(fbdev->fb_info[0], &win, NULL, 0);
+ }
+ omapfb_rqueue_unlock(fbdev);
+}
+EXPORT_SYMBOL(omapfb_write_first_pixel);
+
+/*
+ * Ioctl interface. Part of the kernel mode frame buffer API is duplicated
+ * here to be accessible by user mode code.
+ */
+static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct omapfb_plane_struct *plane = fbi->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ struct fb_ops *ops = fbi->fbops;
+ union {
+ struct omapfb_update_window update_window;
+ struct omapfb_plane_info plane_info;
+ struct omapfb_mem_info mem_info;
+ struct omapfb_color_key color_key;
+ enum omapfb_update_mode update_mode;
+ struct omapfb_caps caps;
+ unsigned int mirror;
+ int plane_out;
+ int enable_plane;
+ } p;
+ int r = 0;
+
+ BUG_ON(!ops);
+ switch (cmd) {
+ case OMAPFB_MIRROR:
+ if (get_user(p.mirror, (int __user *)arg))
+ r = -EFAULT;
+ else
+ omapfb_mirror(fbi, p.mirror);
+ break;
+ case OMAPFB_SYNC_GFX:
+ omapfb_sync(fbi);
+ break;
+ case OMAPFB_VSYNC:
+ break;
+ case OMAPFB_SET_UPDATE_MODE:
+ if (get_user(p.update_mode, (int __user *)arg))
+ r = -EFAULT;
+ else
+ r = omapfb_set_update_mode(fbdev, p.update_mode);
+ break;
+ case OMAPFB_GET_UPDATE_MODE:
+ p.update_mode = omapfb_get_update_mode(fbdev);
+ if (put_user(p.update_mode,
+ (enum omapfb_update_mode __user *)arg))
+ r = -EFAULT;
+ break;
+ case OMAPFB_UPDATE_WINDOW_OLD:
+ if (copy_from_user(&p.update_window, (void __user *)arg,
+ sizeof(struct omapfb_update_window_old)))
+ r = -EFAULT;
+ else {
+ struct omapfb_update_window *u = &p.update_window;
+ u->out_x = u->x;
+ u->out_y = u->y;
+ u->out_width = u->width;
+ u->out_height = u->height;
+ memset(u->reserved, 0, sizeof(u->reserved));
+ r = omapfb_update_win(fbi, u);
+ }
+ break;
+ case OMAPFB_UPDATE_WINDOW:
+ if (copy_from_user(&p.update_window, (void __user *)arg,
+ sizeof(p.update_window)))
+ r = -EFAULT;
+ else
+ r = omapfb_update_win(fbi, &p.update_window);
+ break;
+ case OMAPFB_SETUP_PLANE:
+ if (copy_from_user(&p.plane_info, (void __user *)arg,
+ sizeof(p.plane_info)))
+ r = -EFAULT;
+ else
+ r = omapfb_setup_plane(fbi, &p.plane_info);
+ break;
+ case OMAPFB_QUERY_PLANE:
+ if ((r = omapfb_query_plane(fbi, &p.plane_info)) < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.plane_info,
+ sizeof(p.plane_info)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_SETUP_MEM:
+ if (copy_from_user(&p.mem_info, (void __user *)arg,
+ sizeof(p.mem_info)))
+ r = -EFAULT;
+ else
+ r = omapfb_setup_mem(fbi, &p.mem_info);
+ break;
+ case OMAPFB_QUERY_MEM:
+ if ((r = omapfb_query_mem(fbi, &p.mem_info)) < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.mem_info,
+ sizeof(p.mem_info)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_SET_COLOR_KEY:
+ if (copy_from_user(&p.color_key, (void __user *)arg,
+ sizeof(p.color_key)))
+ r = -EFAULT;
+ else
+ r = omapfb_set_color_key(fbdev, &p.color_key);
+ break;
+ case OMAPFB_GET_COLOR_KEY:
+ if ((r = omapfb_get_color_key(fbdev, &p.color_key)) < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.color_key,
+ sizeof(p.color_key)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_GET_CAPS:
+ omapfb_get_caps(fbdev, plane->idx, &p.caps);
+ if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
+ r = -EFAULT;
+ break;
+ case OMAPFB_LCD_TEST:
+ {
+ int test_num;
+
+ if (get_user(test_num, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (!fbdev->panel->run_test) {
+ r = -EINVAL;
+ break;
+ }
+ r = fbdev->panel->run_test(fbdev->panel, test_num);
+ break;
+ }
+ case OMAPFB_CTRL_TEST:
+ {
+ int test_num;
+
+ if (get_user(test_num, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (!fbdev->ctrl->run_test) {
+ r = -EINVAL;
+ break;
+ }
+ r = fbdev->ctrl->run_test(test_num);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct omapfb_plane_struct *plane = info->par;
+ struct omapfb_device *fbdev = plane->fbdev;
+ int r;
+
+ omapfb_rqueue_lock(fbdev);
+ r = fbdev->ctrl->mmap(info, vma);
+ omapfb_rqueue_unlock(fbdev);
+
+ return r;
+}
+
+/*
+ * Callback table for the frame buffer framework. Some of these pointers
+ * will be changed according to the current setting of fb_info->accel_flags.
+ */
+static struct fb_ops omapfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = omapfb_open,
+ .fb_release = omapfb_release,
+ .fb_setcolreg = omapfb_setcolreg,
+ .fb_setcmap = omapfb_setcmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_blank = omapfb_blank,
+ .fb_ioctl = omapfb_ioctl,
+ .fb_check_var = omapfb_check_var,
+ .fb_set_par = omapfb_set_par,
+ .fb_rotate = omapfb_rotate,
+ .fb_pan_display = omapfb_pan_display,
+};
+
+/*
+ * ---------------------------------------------------------------------------
+ * Sysfs interface
+ * ---------------------------------------------------------------------------
+ */
+/* omapfbX sysfs entries */
+static ssize_t omapfb_show_caps_num(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int plane;
+ size_t size;
+ struct omapfb_caps caps;
+
+ plane = 0;
+ size = 0;
+ while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+ omapfb_get_caps(fbdev, plane, &caps);
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ "plane#%d %#010x %#010x %#010x\n",
+ plane, caps.ctrl, caps.plane_color, caps.wnd_color);
+ plane++;
+ }
+ return size;
+}
+
+static ssize_t omapfb_show_caps_text(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int i;
+ struct omapfb_caps caps;
+ int plane;
+ size_t size;
+
+ plane = 0;
+ size = 0;
+ while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
+ omapfb_get_caps(fbdev, plane, &caps);
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ "plane#%d:\n", plane);
+ for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
+ size < PAGE_SIZE; i++) {
+ if (ctrl_caps[i].flag & caps.ctrl)
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " %s\n", ctrl_caps[i].name);
+ }
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " plane colors:\n");
+ for (i = 0; i < ARRAY_SIZE(color_caps) &&
+ size < PAGE_SIZE; i++) {
+ if (color_caps[i].flag & caps.plane_color)
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " %s\n", color_caps[i].name);
+ }
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " window colors:\n");
+ for (i = 0; i < ARRAY_SIZE(color_caps) &&
+ size < PAGE_SIZE; i++) {
+ if (color_caps[i].flag & caps.wnd_color)
+ size += snprintf(&buf[size], PAGE_SIZE - size,
+ " %s\n", color_caps[i].name);
+ }
+
+ plane++;
+ }
+ return size;
+}
+
+static DEVICE_ATTR(caps_num, 0444, omapfb_show_caps_num, NULL);
+static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL);
+
+/* panel sysfs entries */
+static ssize_t omapfb_show_panel_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
+}
+
+static ssize_t omapfb_show_bklight_level(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int r;
+
+ if (fbdev->panel->get_bklight_level) {
+ r = snprintf(buf, PAGE_SIZE, "%d\n",
+ fbdev->panel->get_bklight_level(fbdev->panel));
+ } else
+ r = -ENODEV;
+ return r;
+}
+
+static ssize_t omapfb_store_bklight_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int r;
+
+ if (fbdev->panel->set_bklight_level) {
+ unsigned int level;
+
+ if (sscanf(buf, "%10d", &level) == 1) {
+ r = fbdev->panel->set_bklight_level(fbdev->panel,
+ level);
+ } else
+ r = -EINVAL;
+ } else
+ r = -ENODEV;
+ return r ? r : size;
+}
+
+static ssize_t omapfb_show_bklight_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+ int r;
+
+ if (fbdev->panel->get_bklight_level) {
+ r = snprintf(buf, PAGE_SIZE, "%d\n",
+ fbdev->panel->get_bklight_max(fbdev->panel));
+ } else
+ r = -ENODEV;
+ return r;
+}
+
+static struct device_attribute dev_attr_panel_name =
+ __ATTR(name, 0444, omapfb_show_panel_name, NULL);
+static DEVICE_ATTR(backlight_level, 0664,
+ omapfb_show_bklight_level, omapfb_store_bklight_level);
+static DEVICE_ATTR(backlight_max, 0444, omapfb_show_bklight_max, NULL);
+
+static struct attribute *panel_attrs[] = {
+ &dev_attr_panel_name.attr,
+ &dev_attr_backlight_level.attr,
+ &dev_attr_backlight_max.attr,
+ NULL,
+};
+
+static struct attribute_group panel_attr_grp = {
+ .name = "panel",
+ .attrs = panel_attrs,
+};
+
+/* ctrl sysfs entries */
+static ssize_t omapfb_show_ctrl_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
+}
+
+static struct device_attribute dev_attr_ctrl_name =
+ __ATTR(name, 0444, omapfb_show_ctrl_name, NULL);
+
+static struct attribute *ctrl_attrs[] = {
+ &dev_attr_ctrl_name.attr,
+ NULL,
+};
+
+static struct attribute_group ctrl_attr_grp = {
+ .name = "ctrl",
+ .attrs = ctrl_attrs,
+};
+
+static int omapfb_register_sysfs(struct omapfb_device *fbdev)
+{
+ int r;
+
+ if ((r = device_create_file(fbdev->dev, &dev_attr_caps_num)))
+ goto fail0;
+
+ if ((r = device_create_file(fbdev->dev, &dev_attr_caps_text)))
+ goto fail1;
+
+ if ((r = sysfs_create_group(&fbdev->dev->kobj, &panel_attr_grp)))
+ goto fail2;
+
+ if ((r = sysfs_create_group(&fbdev->dev->kobj, &ctrl_attr_grp)))
+ goto fail3;
+
+ return 0;
+fail3:
+ sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
+fail2:
+ device_remove_file(fbdev->dev, &dev_attr_caps_text);
+fail1:
+ device_remove_file(fbdev->dev, &dev_attr_caps_num);
+fail0:
+ dev_err(fbdev->dev, "unable to register sysfs interface\n");
+ return r;
+}
+
+static void omapfb_unregister_sysfs(struct omapfb_device *fbdev)
+{
+ sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp);
+ sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
+ device_remove_file(fbdev->dev, &dev_attr_caps_num);
+ device_remove_file(fbdev->dev, &dev_attr_caps_text);
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * LDM callbacks
+ * ---------------------------------------------------------------------------
+ */
+/* Initialize system fb_info object and set the default video mode.
+ * The frame buffer memory already allocated by lcddma_init
+ */
+static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
+{
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_fix_screeninfo *fix = &info->fix;
+ int r = 0;
+
+ info->fbops = &omapfb_ops;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
+
+ info->pseudo_palette = fbdev->pseudo_palette;
+
+ var->accel_flags = def_accel ? FB_ACCELF_TEXT : 0;
+ var->xres = def_vxres;
+ var->yres = def_vyres;
+ var->xres_virtual = def_vxres;
+ var->yres_virtual = def_vyres;
+ var->rotate = def_rotate;
+ var->bits_per_pixel = fbdev->panel->bpp;
+
+ set_fb_var(info, var);
+ set_fb_fix(info);
+
+ r = fb_alloc_cmap(&info->cmap, 16, 0);
+ if (r != 0)
+ dev_err(fbdev->dev, "unable to allocate color map memory\n");
+
+ return r;
+}
+
+/* Release the fb_info object */
+static void fbinfo_cleanup(struct omapfb_device *fbdev, struct fb_info *fbi)
+{
+ fb_dealloc_cmap(&fbi->cmap);
+}
+
+static void planes_cleanup(struct omapfb_device *fbdev)
+{
+ int i;
+
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ if (fbdev->fb_info[i] == NULL)
+ break;
+ fbinfo_cleanup(fbdev, fbdev->fb_info[i]);
+ framebuffer_release(fbdev->fb_info[i]);
+ }
+}
+
+static int planes_init(struct omapfb_device *fbdev)
+{
+ struct fb_info *fbi;
+ int i;
+ int r;
+
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ struct omapfb_plane_struct *plane;
+ fbi = framebuffer_alloc(sizeof(struct omapfb_plane_struct),
+ fbdev->dev);
+ if (fbi == NULL) {
+ dev_err(fbdev->dev,
+ "unable to allocate memory for plane info\n");
+ planes_cleanup(fbdev);
+ return -ENOMEM;
+ }
+ plane = fbi->par;
+ plane->idx = i;
+ plane->fbdev = fbdev;
+ plane->info.mirror = def_mirror;
+ fbdev->fb_info[i] = fbi;
+
+ if ((r = fbinfo_init(fbdev, fbi)) < 0) {
+ framebuffer_release(fbi);
+ planes_cleanup(fbdev);
+ return r;
+ }
+ plane->info.out_width = fbi->var.xres;
+ plane->info.out_height = fbi->var.yres;
+ }
+ return 0;
+}
+
+/*
+ * Free driver resources. Can be called to rollback an aborted initialization
+ * sequence.
+ */
+static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
+{
+ int i;
+
+ switch (state) {
+ case OMAPFB_ACTIVE:
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
+ unregister_framebuffer(fbdev->fb_info[i]);
+ case 7:
+ omapfb_unregister_sysfs(fbdev);
+ case 6:
+ fbdev->panel->disable(fbdev->panel);
+ case 5:
+ omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
+ case 4:
+ planes_cleanup(fbdev);
+ case 3:
+ ctrl_cleanup(fbdev);
+ case 2:
+ fbdev->panel->cleanup(fbdev->panel);
+ case 1:
+ dev_set_drvdata(fbdev->dev, NULL);
+ kfree(fbdev);
+ case 0:
+ /* nothing to free */
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int omapfb_find_ctrl(struct omapfb_device *fbdev)
+{
+ struct omapfb_platform_data *conf;
+ char name[17];
+ int i;
+
+ conf = fbdev->dev->platform_data;
+
+ fbdev->ctrl = NULL;
+
+ strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+
+ if (strcmp(name, "internal") == 0) {
+ fbdev->ctrl = fbdev->int_ctrl;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctrls); i++) {
+ dev_dbg(fbdev->dev, "ctrl %s\n", ctrls[i]->name);
+ if (strcmp(ctrls[i]->name, name) == 0) {
+ fbdev->ctrl = ctrls[i];
+ break;
+ }
+ }
+
+ if (fbdev->ctrl == NULL) {
+ dev_dbg(fbdev->dev, "ctrl %s not supported\n", name);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void check_required_callbacks(struct omapfb_device *fbdev)
+{
+#define _C(x) (fbdev->ctrl->x != NULL)
+#define _P(x) (fbdev->panel->x != NULL)
+ BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
+ BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
+ _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
+ _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
+ _P(get_caps)));
+#undef _P
+#undef _C
+}
+
+/*
+ * Called by LDM binding to probe and attach a new device.
+ * Initialization sequence:
+ * 1. allocate system omapfb_device structure
+ * 2. select controller type according to platform configuration
+ * init LCD panel
+ * 3. init LCD controller and LCD DMA
+ * 4. init system fb_info structure for all planes
+ * 5. setup video mode for first plane and enable it
+ * 6. enable LCD panel
+ * 7. register sysfs attributes
+ * OMAPFB_ACTIVE: register system fb_info structure for all planes
+ */
+static int omapfb_do_probe(struct platform_device *pdev,
+ struct lcd_panel *panel)
+{
+ struct omapfb_device *fbdev = NULL;
+ int init_state;
+ unsigned long phz, hhz, vhz;
+ unsigned long vram;
+ int i;
+ int r = 0;
+
+ init_state = 0;
+
+ if (pdev->num_resources != 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ r = -ENODEV;
+ goto cleanup;
+ }
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ r = -ENOENT;
+ goto cleanup;
+ }
+
+ fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL);
+ if (fbdev == NULL) {
+ dev_err(&pdev->dev,
+ "unable to allocate memory for device info\n");
+ r = -ENOMEM;
+ goto cleanup;
+ }
+ init_state++;
+
+ fbdev->dev = &pdev->dev;
+ fbdev->panel = panel;
+ platform_set_drvdata(pdev, fbdev);
+
+ mutex_init(&fbdev->rqueue_mutex);
+
+#ifdef CONFIG_ARCH_OMAP1
+ fbdev->int_ctrl = &omap1_int_ctrl;
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+ fbdev->ext_if = &omap1_ext_if;
+#endif
+#else /* OMAP2 */
+ fbdev->int_ctrl = &omap2_int_ctrl;
+#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
+ fbdev->ext_if = &omap2_ext_if;
+#endif
+#endif
+ if (omapfb_find_ctrl(fbdev) < 0) {
+ dev_err(fbdev->dev,
+ "LCD controller not found, board not supported\n");
+ r = -ENODEV;
+ goto cleanup;
+ }
+
+ r = fbdev->panel->init(fbdev->panel, fbdev);
+ if (r)
+ goto cleanup;
+
+ pr_info("omapfb: configured for panel %s\n", fbdev->panel->name);
+
+ def_vxres = def_vxres ? : fbdev->panel->x_res;
+ def_vyres = def_vyres ? : fbdev->panel->y_res;
+
+ init_state++;
+
+ r = ctrl_init(fbdev);
+ if (r)
+ goto cleanup;
+ if (fbdev->ctrl->mmap != NULL)
+ omapfb_ops.fb_mmap = omapfb_mmap;
+ init_state++;
+
+ check_required_callbacks(fbdev);
+
+ r = planes_init(fbdev);
+ if (r)
+ goto cleanup;
+ init_state++;
+
+#ifdef CONFIG_FB_OMAP_DMA_TUNE
+ /* Set DMA priority for EMIFF access to highest */
+ if (cpu_class_is_omap1())
+ omap_set_dma_priority(0, OMAP_DMA_PORT_EMIFF, 15);
+#endif
+
+ r = ctrl_change_mode(fbdev->fb_info[0]);
+ if (r) {
+ dev_err(fbdev->dev, "mode setting failed\n");
+ goto cleanup;
+ }
+
+ /* GFX plane is enabled by default */
+ r = fbdev->ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
+ if (r)
+ goto cleanup;
+
+ omapfb_set_update_mode(fbdev, manual_update ?
+ OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE);
+ init_state++;
+
+ r = fbdev->panel->enable(fbdev->panel);
+ if (r)
+ goto cleanup;
+ init_state++;
+
+ r = omapfb_register_sysfs(fbdev);
+ if (r)
+ goto cleanup;
+ init_state++;
+
+ vram = 0;
+ for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
+ r = register_framebuffer(fbdev->fb_info[i]);
+ if (r != 0) {
+ dev_err(fbdev->dev,
+ "registering framebuffer %d failed\n", i);
+ goto cleanup;
+ }
+ vram += fbdev->mem_desc.region[i].size;
+ }
+
+ fbdev->state = OMAPFB_ACTIVE;
+
+ panel = fbdev->panel;
+ phz = panel->pixel_clock * 1000;
+ hhz = phz * 10 / (panel->hfp + panel->x_res + panel->hbp + panel->hsw);
+ vhz = hhz / (panel->vfp + panel->y_res + panel->vbp + panel->vsw);
+
+ omapfb_dev = fbdev;
+
+ pr_info("omapfb: Framebuffer initialized. Total vram %lu planes %d\n",
+ vram, fbdev->mem_desc.region_cnt);
+ pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
+ "vfreq %lu.%lu Hz\n",
+ phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
+
+ return 0;
+
+cleanup:
+ omapfb_free_resources(fbdev, init_state);
+
+ return r;
+}
+
+static int omapfb_probe(struct platform_device *pdev)
+{
+ BUG_ON(fbdev_pdev != NULL);
+
+ /* Delay actual initialization until the LCD is registered */
+ fbdev_pdev = pdev;
+ if (fbdev_panel != NULL)
+ omapfb_do_probe(fbdev_pdev, fbdev_panel);
+ return 0;
+}
+
+void omapfb_register_panel(struct lcd_panel *panel)
+{
+ BUG_ON(fbdev_panel != NULL);
+
+ fbdev_panel = panel;
+ if (fbdev_pdev != NULL)
+ omapfb_do_probe(fbdev_pdev, fbdev_panel);
+}
+
+/* Called when the device is being detached from the driver */
+static int omapfb_remove(struct platform_device *pdev)
+{
+ struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+ enum omapfb_state saved_state = fbdev->state;
+
+ /* FIXME: wait till completion of pending events */
+
+ fbdev->state = OMAPFB_DISABLED;
+ omapfb_free_resources(fbdev, saved_state);
+
+ return 0;
+}
+
+/* PM suspend */
+static int omapfb_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+
+ omapfb_blank(VESA_POWERDOWN, fbdev->fb_info[0]);
+
+ return 0;
+}
+
+/* PM resume */
+static int omapfb_resume(struct platform_device *pdev)
+{
+ struct omapfb_device *fbdev = platform_get_drvdata(pdev);
+
+ omapfb_blank(VESA_NO_BLANKING, fbdev->fb_info[0]);
+ return 0;
+}
+
+static struct platform_driver omapfb_driver = {
+ .probe = omapfb_probe,
+ .remove = omapfb_remove,
+ .suspend = omapfb_suspend,
+ .resume = omapfb_resume,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+#ifndef MODULE
+
+/* Process kernel command line parameters */
+static int __init omapfb_setup(char *options)
+{
+ char *this_opt = NULL;
+ int r = 0;
+
+ pr_debug("omapfb: options %s\n", options);
+
+ if (!options || !*options)
+ return 0;
+
+ while (!r && (this_opt = strsep(&options, ",")) != NULL) {
+ if (!strncmp(this_opt, "accel", 5))
+ def_accel = 1;
+ else if (!strncmp(this_opt, "vram:", 5)) {
+ char *suffix;
+ unsigned long vram;
+ vram = (simple_strtoul(this_opt + 5, &suffix, 0));
+ switch (suffix[0]) {
+ case '\0':
+ break;
+ case 'm':
+ case 'M':
+ vram *= 1024;
+ /* Fall through */
+ case 'k':
+ case 'K':
+ vram *= 1024;
+ break;
+ default:
+ pr_debug("omapfb: invalid vram suffix %c\n",
+ suffix[0]);
+ r = -1;
+ }
+ def_vram[def_vram_cnt++] = vram;
+ }
+ else if (!strncmp(this_opt, "vxres:", 6))
+ def_vxres = simple_strtoul(this_opt + 6, NULL, 0);
+ else if (!strncmp(this_opt, "vyres:", 6))
+ def_vyres = simple_strtoul(this_opt + 6, NULL, 0);
+ else if (!strncmp(this_opt, "rotate:", 7))
+ def_rotate = (simple_strtoul(this_opt + 7, NULL, 0));
+ else if (!strncmp(this_opt, "mirror:", 7))
+ def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
+ else if (!strncmp(this_opt, "manual_update", 13))
+ manual_update = 1;
+ else {
+ pr_debug("omapfb: invalid option\n");
+ r = -1;
+ }
+ }
+
+ return r;
+}
+
+#endif
+
+/* Register both the driver and the device */
+static int __init omapfb_init(void)
+{
+#ifndef MODULE
+ char *option;
+
+ if (fb_get_options("omapfb", &option))
+ return -ENODEV;
+ omapfb_setup(option);
+#endif
+ /* Register the driver with LDM */
+ if (platform_driver_register(&omapfb_driver)) {
+ pr_debug("failed to register omapfb driver\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit omapfb_cleanup(void)
+{
+ platform_driver_unregister(&omapfb_driver);
+}
+
+module_param_named(accel, def_accel, uint, 0664);
+module_param_array_named(vram, def_vram, ulong, &def_vram_cnt, 0664);
+module_param_named(vxres, def_vxres, long, 0664);
+module_param_named(vyres, def_vyres, long, 0664);
+module_param_named(rotate, def_rotate, uint, 0664);
+module_param_named(mirror, def_mirror, uint, 0664);
+module_param_named(manual_update, manual_update, bool, 0664);
+
+module_init(omapfb_init);
+module_exit(omapfb_cleanup);
+
+MODULE_DESCRIPTION("TI OMAP framebuffer driver");
+MODULE_AUTHOR("Imre Deak <imre.deak@nokia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
new file mode 100644
index 000000000000..2b4269813b22
--- /dev/null
+++ b/drivers/video/omap/rfbi.c
@@ -0,0 +1,588 @@
+/*
+ * OMAP2 Remote Frame Buffer Interface support
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/omapfb.h>
+
+#include "dispc.h"
+
+/* To work around an RFBI transfer rate limitation */
+#define OMAP_RFBI_RATE_LIMIT 1
+
+#define RFBI_BASE 0x48050800
+#define RFBI_REVISION 0x0000
+#define RFBI_SYSCONFIG 0x0010
+#define RFBI_SYSSTATUS 0x0014
+#define RFBI_CONTROL 0x0040
+#define RFBI_PIXEL_CNT 0x0044
+#define RFBI_LINE_NUMBER 0x0048
+#define RFBI_CMD 0x004c
+#define RFBI_PARAM 0x0050
+#define RFBI_DATA 0x0054
+#define RFBI_READ 0x0058
+#define RFBI_STATUS 0x005c
+#define RFBI_CONFIG0 0x0060
+#define RFBI_ONOFF_TIME0 0x0064
+#define RFBI_CYCLE_TIME0 0x0068
+#define RFBI_DATA_CYCLE1_0 0x006c
+#define RFBI_DATA_CYCLE2_0 0x0070
+#define RFBI_DATA_CYCLE3_0 0x0074
+#define RFBI_VSYNC_WIDTH 0x0090
+#define RFBI_HSYNC_WIDTH 0x0094
+
+#define DISPC_BASE 0x48050400
+#define DISPC_CONTROL 0x0040
+
+static struct {
+ u32 base;
+ void (*lcdc_callback)(void *data);
+ void *lcdc_callback_data;
+ unsigned long l4_khz;
+ int bits_per_cycle;
+ struct omapfb_device *fbdev;
+ struct clk *dss_ick;
+ struct clk *dss1_fck;
+ unsigned tearsync_pin_cnt;
+ unsigned tearsync_mode;
+} rfbi;
+
+static inline void rfbi_write_reg(int idx, u32 val)
+{
+ __raw_writel(val, rfbi.base + idx);
+}
+
+static inline u32 rfbi_read_reg(int idx)
+{
+ return __raw_readl(rfbi.base + idx);
+}
+
+static int rfbi_get_clocks(void)
+{
+ if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
+ dev_err(rfbi.fbdev->dev, "can't get dss_ick");
+ return PTR_ERR(rfbi.dss_ick);
+ }
+
+ if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
+ dev_err(rfbi.fbdev->dev, "can't get dss1_fck");
+ clk_put(rfbi.dss_ick);
+ return PTR_ERR(rfbi.dss1_fck);
+ }
+
+ return 0;
+}
+
+static void rfbi_put_clocks(void)
+{
+ clk_put(rfbi.dss1_fck);
+ clk_put(rfbi.dss_ick);
+}
+
+static void rfbi_enable_clocks(int enable)
+{
+ if (enable) {
+ clk_enable(rfbi.dss_ick);
+ clk_enable(rfbi.dss1_fck);
+ } else {
+ clk_disable(rfbi.dss1_fck);
+ clk_disable(rfbi.dss_ick);
+ }
+}
+
+
+#ifdef VERBOSE
+static void rfbi_print_timings(void)
+{
+ u32 l;
+ u32 time;
+
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ time = 1000000000 / rfbi.l4_khz;
+ if (l & (1 << 4))
+ time *= 2;
+
+ dev_dbg(rfbi.fbdev->dev, "Tick time %u ps\n", time);
+ l = rfbi_read_reg(RFBI_ONOFF_TIME0);
+ dev_dbg(rfbi.fbdev->dev,
+ "CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
+ "REONTIME %d, REOFFTIME %d\n",
+ l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
+ (l >> 20) & 0x0f, (l >> 24) & 0x3f);
+
+ l = rfbi_read_reg(RFBI_CYCLE_TIME0);
+ dev_dbg(rfbi.fbdev->dev,
+ "WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
+ "ACCESSTIME %d\n",
+ (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
+ (l >> 22) & 0x3f);
+}
+#else
+static void rfbi_print_timings(void) {}
+#endif
+
+static void rfbi_set_timings(const struct extif_timings *t)
+{
+ u32 l;
+
+ BUG_ON(!t->converted);
+
+ rfbi_enable_clocks(1);
+ rfbi_write_reg(RFBI_ONOFF_TIME0, t->tim[0]);
+ rfbi_write_reg(RFBI_CYCLE_TIME0, t->tim[1]);
+
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ l &= ~(1 << 4);
+ l |= (t->tim[2] ? 1 : 0) << 4;
+ rfbi_write_reg(RFBI_CONFIG0, l);
+
+ rfbi_print_timings();
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+ *clk_period = 1000000000 / rfbi.l4_khz;
+ *max_clk_div = 2;
+}
+
+static int ps_to_rfbi_ticks(int time, int div)
+{
+ unsigned long tick_ps;
+ int ret;
+
+ /* Calculate in picosecs to yield more exact results */
+ tick_ps = 1000000000 / (rfbi.l4_khz) * div;
+
+ ret = (time + tick_ps - 1) / tick_ps;
+
+ return ret;
+}
+
+#ifdef OMAP_RFBI_RATE_LIMIT
+static unsigned long rfbi_get_max_tx_rate(void)
+{
+ unsigned long l4_rate, dss1_rate;
+ int min_l4_ticks = 0;
+ int i;
+
+ /* According to TI this can't be calculated so make the
+ * adjustments for a couple of known frequencies and warn for
+ * others.
+ */
+ static const struct {
+ unsigned long l4_clk; /* HZ */
+ unsigned long dss1_clk; /* HZ */
+ unsigned long min_l4_ticks;
+ } ftab[] = {
+ { 55, 132, 7, }, /* 7.86 MPix/s */
+ { 110, 110, 12, }, /* 9.16 MPix/s */
+ { 110, 132, 10, }, /* 11 Mpix/s */
+ { 120, 120, 10, }, /* 12 Mpix/s */
+ { 133, 133, 10, }, /* 13.3 Mpix/s */
+ };
+
+ l4_rate = rfbi.l4_khz / 1000;
+ dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000;
+
+ for (i = 0; i < ARRAY_SIZE(ftab); i++) {
+ /* Use a window instead of an exact match, to account
+ * for different DPLL multiplier / divider pairs.
+ */
+ if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
+ abs(ftab[i].dss1_clk - dss1_rate) < 3) {
+ min_l4_ticks = ftab[i].min_l4_ticks;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ftab)) {
+ /* Can't be sure, return anyway the maximum not
+ * rate-limited. This might cause a problem only for the
+ * tearing synchronisation.
+ */
+ dev_err(rfbi.fbdev->dev,
+ "can't determine maximum RFBI transfer rate\n");
+ return rfbi.l4_khz * 1000;
+ }
+ return rfbi.l4_khz * 1000 / min_l4_ticks;
+}
+#else
+static int rfbi_get_max_tx_rate(void)
+{
+ return rfbi.l4_khz * 1000;
+}
+#endif
+
+
+static int rfbi_convert_timings(struct extif_timings *t)
+{
+ u32 l;
+ int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
+ int actim, recyc, wecyc;
+ int div = t->clk_div;
+
+ if (div <= 0 || div > 2)
+ return -1;
+
+ /* Make sure that after conversion it still holds that:
+ * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
+ * csoff > cson, csoff >= max(weoff, reoff), actim > reon
+ */
+ weon = ps_to_rfbi_ticks(t->we_on_time, div);
+ weoff = ps_to_rfbi_ticks(t->we_off_time, div);
+ if (weoff <= weon)
+ weoff = weon + 1;
+ if (weon > 0x0f)
+ return -1;
+ if (weoff > 0x3f)
+ return -1;
+
+ reon = ps_to_rfbi_ticks(t->re_on_time, div);
+ reoff = ps_to_rfbi_ticks(t->re_off_time, div);
+ if (reoff <= reon)
+ reoff = reon + 1;
+ if (reon > 0x0f)
+ return -1;
+ if (reoff > 0x3f)
+ return -1;
+
+ cson = ps_to_rfbi_ticks(t->cs_on_time, div);
+ csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
+ if (csoff <= cson)
+ csoff = cson + 1;
+ if (csoff < max(weoff, reoff))
+ csoff = max(weoff, reoff);
+ if (cson > 0x0f)
+ return -1;
+ if (csoff > 0x3f)
+ return -1;
+
+ l = cson;
+ l |= csoff << 4;
+ l |= weon << 10;
+ l |= weoff << 14;
+ l |= reon << 20;
+ l |= reoff << 24;
+
+ t->tim[0] = l;
+
+ actim = ps_to_rfbi_ticks(t->access_time, div);
+ if (actim <= reon)
+ actim = reon + 1;
+ if (actim > 0x3f)
+ return -1;
+
+ wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
+ if (wecyc < weoff)
+ wecyc = weoff;
+ if (wecyc > 0x3f)
+ return -1;
+
+ recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
+ if (recyc < reoff)
+ recyc = reoff;
+ if (recyc > 0x3f)
+ return -1;
+
+ cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
+ if (cs_pulse > 0x3f)
+ return -1;
+
+ l = wecyc;
+ l |= recyc << 6;
+ l |= cs_pulse << 12;
+ l |= actim << 22;
+
+ t->tim[1] = l;
+
+ t->tim[2] = div - 1;
+
+ t->converted = 1;
+
+ return 0;
+}
+
+static int rfbi_setup_tearsync(unsigned pin_cnt,
+ unsigned hs_pulse_time, unsigned vs_pulse_time,
+ int hs_pol_inv, int vs_pol_inv, int extif_div)
+{
+ int hs, vs;
+ int min;
+ u32 l;
+
+ if (pin_cnt != 1 && pin_cnt != 2)
+ return -EINVAL;
+
+ hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
+ vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
+ if (hs < 2)
+ return -EDOM;
+ if (pin_cnt == 2)
+ min = 2;
+ else
+ min = 4;
+ if (vs < min)
+ return -EDOM;
+ if (vs == hs)
+ return -EINVAL;
+ rfbi.tearsync_pin_cnt = pin_cnt;
+ dev_dbg(rfbi.fbdev->dev,
+ "setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n",
+ pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv);
+
+ rfbi_enable_clocks(1);
+ rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
+ rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
+
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ if (hs_pol_inv)
+ l &= ~(1 << 21);
+ else
+ l |= 1 << 21;
+ if (vs_pol_inv)
+ l &= ~(1 << 20);
+ else
+ l |= 1 << 20;
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static int rfbi_enable_tearsync(int enable, unsigned line)
+{
+ u32 l;
+
+ dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n",
+ enable, line, rfbi.tearsync_mode);
+ if (line > (1 << 11) - 1)
+ return -EINVAL;
+
+ rfbi_enable_clocks(1);
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ l &= ~(0x3 << 2);
+ if (enable) {
+ rfbi.tearsync_mode = rfbi.tearsync_pin_cnt;
+ l |= rfbi.tearsync_mode << 2;
+ } else
+ rfbi.tearsync_mode = 0;
+ rfbi_write_reg(RFBI_CONFIG0, l);
+ rfbi_write_reg(RFBI_LINE_NUMBER, line);
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static void rfbi_write_command(const void *buf, unsigned int len)
+{
+ rfbi_enable_clocks(1);
+ if (rfbi.bits_per_cycle == 16) {
+ const u16 *w = buf;
+ BUG_ON(len & 1);
+ for (; len; len -= 2)
+ rfbi_write_reg(RFBI_CMD, *w++);
+ } else {
+ const u8 *b = buf;
+ BUG_ON(rfbi.bits_per_cycle != 8);
+ for (; len; len--)
+ rfbi_write_reg(RFBI_CMD, *b++);
+ }
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_read_data(void *buf, unsigned int len)
+{
+ rfbi_enable_clocks(1);
+ if (rfbi.bits_per_cycle == 16) {
+ u16 *w = buf;
+ BUG_ON(len & ~1);
+ for (; len; len -= 2) {
+ rfbi_write_reg(RFBI_READ, 0);
+ *w++ = rfbi_read_reg(RFBI_READ);
+ }
+ } else {
+ u8 *b = buf;
+ BUG_ON(rfbi.bits_per_cycle != 8);
+ for (; len; len--) {
+ rfbi_write_reg(RFBI_READ, 0);
+ *b++ = rfbi_read_reg(RFBI_READ);
+ }
+ }
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_write_data(const void *buf, unsigned int len)
+{
+ rfbi_enable_clocks(1);
+ if (rfbi.bits_per_cycle == 16) {
+ const u16 *w = buf;
+ BUG_ON(len & 1);
+ for (; len; len -= 2)
+ rfbi_write_reg(RFBI_PARAM, *w++);
+ } else {
+ const u8 *b = buf;
+ BUG_ON(rfbi.bits_per_cycle != 8);
+ for (; len; len--)
+ rfbi_write_reg(RFBI_PARAM, *b++);
+ }
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_transfer_area(int width, int height,
+ void (callback)(void * data), void *data)
+{
+ u32 w;
+
+ BUG_ON(callback == NULL);
+
+ rfbi_enable_clocks(1);
+ omap_dispc_set_lcd_size(width, height);
+
+ rfbi.lcdc_callback = callback;
+ rfbi.lcdc_callback_data = data;
+
+ rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
+
+ w = rfbi_read_reg(RFBI_CONTROL);
+ w |= 1; /* enable */
+ if (!rfbi.tearsync_mode)
+ w |= 1 << 4; /* internal trigger, reset by HW */
+ rfbi_write_reg(RFBI_CONTROL, w);
+
+ omap_dispc_enable_lcd_out(1);
+}
+
+static inline void _stop_transfer(void)
+{
+ u32 w;
+
+ w = rfbi_read_reg(RFBI_CONTROL);
+ rfbi_write_reg(RFBI_CONTROL, w & ~(1 << 0));
+ rfbi_enable_clocks(0);
+}
+
+static void rfbi_dma_callback(void *data)
+{
+ _stop_transfer();
+ rfbi.lcdc_callback(rfbi.lcdc_callback_data);
+}
+
+static void rfbi_set_bits_per_cycle(int bpc)
+{
+ u32 l;
+
+ rfbi_enable_clocks(1);
+ l = rfbi_read_reg(RFBI_CONFIG0);
+ l &= ~(0x03 << 0);
+
+ switch (bpc) {
+ case 8:
+ break;
+ case 16:
+ l |= 3;
+ break;
+ default:
+ BUG();
+ }
+ rfbi_write_reg(RFBI_CONFIG0, l);
+ rfbi.bits_per_cycle = bpc;
+ rfbi_enable_clocks(0);
+}
+
+static int rfbi_init(struct omapfb_device *fbdev)
+{
+ u32 l;
+ int r;
+
+ rfbi.fbdev = fbdev;
+ rfbi.base = io_p2v(RFBI_BASE);
+
+ if ((r = rfbi_get_clocks()) < 0)
+ return r;
+ rfbi_enable_clocks(1);
+
+ rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000;
+
+ /* Reset */
+ rfbi_write_reg(RFBI_SYSCONFIG, 1 << 1);
+ while (!(rfbi_read_reg(RFBI_SYSSTATUS) & (1 << 0)));
+
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+ /* Enable autoidle and smart-idle */
+ l |= (1 << 0) | (2 << 3);
+ rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+ /* 16-bit interface, ITE trigger mode, 16-bit data */
+ l = (0x03 << 0) | (0x00 << 2) | (0x01 << 5) | (0x02 << 7);
+ l |= (0 << 9) | (1 << 20) | (1 << 21);
+ rfbi_write_reg(RFBI_CONFIG0, l);
+
+ rfbi_write_reg(RFBI_DATA_CYCLE1_0, 0x00000010);
+
+ l = rfbi_read_reg(RFBI_CONTROL);
+ /* Select CS0, clear bypass mode */
+ l = (0x01 << 2);
+ rfbi_write_reg(RFBI_CONTROL, l);
+
+ if ((r = omap_dispc_request_irq(rfbi_dma_callback, NULL)) < 0) {
+ dev_err(fbdev->dev, "can't get DISPC irq\n");
+ rfbi_enable_clocks(0);
+ return r;
+ }
+
+ l = rfbi_read_reg(RFBI_REVISION);
+ pr_info("omapfb: RFBI version %d.%d initialized\n",
+ (l >> 4) & 0x0f, l & 0x0f);
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+static void rfbi_cleanup(void)
+{
+ omap_dispc_free_irq();
+ rfbi_put_clocks();
+}
+
+const struct lcd_ctrl_extif omap2_ext_if = {
+ .init = rfbi_init,
+ .cleanup = rfbi_cleanup,
+ .get_clk_info = rfbi_get_clk_info,
+ .get_max_tx_rate = rfbi_get_max_tx_rate,
+ .set_bits_per_cycle = rfbi_set_bits_per_cycle,
+ .convert_timings = rfbi_convert_timings,
+ .set_timings = rfbi_set_timings,
+ .write_command = rfbi_write_command,
+ .read_data = rfbi_read_data,
+ .write_data = rfbi_write_data,
+ .transfer_area = rfbi_transfer_area,
+ .setup_tearsync = rfbi_setup_tearsync,
+ .enable_tearsync = rfbi_enable_tearsync,
+
+ .max_transmit_size = (u32) ~0,
+};
+
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
new file mode 100644
index 000000000000..81dbcf53cf0e
--- /dev/null
+++ b/drivers/video/omap/sossi.c
@@ -0,0 +1,686 @@
+/*
+ * OMAP1 Special OptimiSed Screen Interface support
+ *
+ * Copyright (C) 2004-2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <asm/arch/dma.h>
+#include <asm/arch/omapfb.h>
+
+#include "lcdc.h"
+
+#define MODULE_NAME "omapfb-sossi"
+
+#define OMAP_SOSSI_BASE 0xfffbac00
+#define SOSSI_ID_REG 0x00
+#define SOSSI_INIT1_REG 0x04
+#define SOSSI_INIT2_REG 0x08
+#define SOSSI_INIT3_REG 0x0c
+#define SOSSI_FIFO_REG 0x10
+#define SOSSI_REOTABLE_REG 0x14
+#define SOSSI_TEARING_REG 0x18
+#define SOSSI_INIT1B_REG 0x1c
+#define SOSSI_FIFOB_REG 0x20
+
+#define DMA_GSCR 0xfffedc04
+#define DMA_LCD_CCR 0xfffee3c2
+#define DMA_LCD_CTRL 0xfffee3c4
+#define DMA_LCD_LCH_CTRL 0xfffee3ea
+
+#define CONF_SOSSI_RESET_R (1 << 23)
+
+#define RD_ACCESS 0
+#define WR_ACCESS 1
+
+#define SOSSI_MAX_XMIT_BYTES (512 * 1024)
+
+static struct {
+ void __iomem *base;
+ struct clk *fck;
+ unsigned long fck_hz;
+ spinlock_t lock;
+ int bus_pick_count;
+ int bus_pick_width;
+ int tearsync_mode;
+ int tearsync_line;
+ void (*lcdc_callback)(void *data);
+ void *lcdc_callback_data;
+ int vsync_dma_pending;
+ /* timing for read and write access */
+ int clk_div;
+ u8 clk_tw0[2];
+ u8 clk_tw1[2];
+ /*
+ * if last_access is the same as current we don't have to change
+ * the timings
+ */
+ int last_access;
+
+ struct omapfb_device *fbdev;
+} sossi;
+
+static inline u32 sossi_read_reg(int reg)
+{
+ return readl(sossi.base + reg);
+}
+
+static inline u16 sossi_read_reg16(int reg)
+{
+ return readw(sossi.base + reg);
+}
+
+static inline u8 sossi_read_reg8(int reg)
+{
+ return readb(sossi.base + reg);
+}
+
+static inline void sossi_write_reg(int reg, u32 value)
+{
+ writel(value, sossi.base + reg);
+}
+
+static inline void sossi_write_reg16(int reg, u16 value)
+{
+ writew(value, sossi.base + reg);
+}
+
+static inline void sossi_write_reg8(int reg, u8 value)
+{
+ writeb(value, sossi.base + reg);
+}
+
+static void sossi_set_bits(int reg, u32 bits)
+{
+ sossi_write_reg(reg, sossi_read_reg(reg) | bits);
+}
+
+static void sossi_clear_bits(int reg, u32 bits)
+{
+ sossi_write_reg(reg, sossi_read_reg(reg) & ~bits);
+}
+
+#define HZ_TO_PS(x) (1000000000 / (x / 1000))
+
+static u32 ps_to_sossi_ticks(u32 ps, int div)
+{
+ u32 clk_period = HZ_TO_PS(sossi.fck_hz) * div;
+ return (clk_period + ps - 1) / clk_period;
+}
+
+static int calc_rd_timings(struct extif_timings *t)
+{
+ u32 tw0, tw1;
+ int reon, reoff, recyc, actim;
+ int div = t->clk_div;
+
+ /*
+ * Make sure that after conversion it still holds that:
+ * reoff > reon, recyc >= reoff, actim > reon
+ */
+ reon = ps_to_sossi_ticks(t->re_on_time, div);
+ /* reon will be exactly one sossi tick */
+ if (reon > 1)
+ return -1;
+
+ reoff = ps_to_sossi_ticks(t->re_off_time, div);
+
+ if (reoff <= reon)
+ reoff = reon + 1;
+
+ tw0 = reoff - reon;
+ if (tw0 > 0x10)
+ return -1;
+
+ recyc = ps_to_sossi_ticks(t->re_cycle_time, div);
+ if (recyc <= reoff)
+ recyc = reoff + 1;
+
+ tw1 = recyc - tw0;
+ /* values less then 3 result in the SOSSI block resetting itself */
+ if (tw1 < 3)
+ tw1 = 3;
+ if (tw1 > 0x40)
+ return -1;
+
+ actim = ps_to_sossi_ticks(t->access_time, div);
+ if (actim < reoff)
+ actim++;
+ /*
+ * access time (data hold time) will be exactly one sossi
+ * tick
+ */
+ if (actim - reoff > 1)
+ return -1;
+
+ t->tim[0] = tw0 - 1;
+ t->tim[1] = tw1 - 1;
+
+ return 0;
+}
+
+static int calc_wr_timings(struct extif_timings *t)
+{
+ u32 tw0, tw1;
+ int weon, weoff, wecyc;
+ int div = t->clk_div;
+
+ /*
+ * Make sure that after conversion it still holds that:
+ * weoff > weon, wecyc >= weoff
+ */
+ weon = ps_to_sossi_ticks(t->we_on_time, div);
+ /* weon will be exactly one sossi tick */
+ if (weon > 1)
+ return -1;
+
+ weoff = ps_to_sossi_ticks(t->we_off_time, div);
+ if (weoff <= weon)
+ weoff = weon + 1;
+ tw0 = weoff - weon;
+ if (tw0 > 0x10)
+ return -1;
+
+ wecyc = ps_to_sossi_ticks(t->we_cycle_time, div);
+ if (wecyc <= weoff)
+ wecyc = weoff + 1;
+
+ tw1 = wecyc - tw0;
+ /* values less then 3 result in the SOSSI block resetting itself */
+ if (tw1 < 3)
+ tw1 = 3;
+ if (tw1 > 0x40)
+ return -1;
+
+ t->tim[2] = tw0 - 1;
+ t->tim[3] = tw1 - 1;
+
+ return 0;
+}
+
+static void _set_timing(int div, int tw0, int tw1)
+{
+ u32 l;
+
+#ifdef VERBOSE
+ dev_dbg(sossi.fbdev->dev, "Using TW0 = %d, TW1 = %d, div = %d\n",
+ tw0 + 1, tw1 + 1, div);
+#endif
+
+ clk_set_rate(sossi.fck, sossi.fck_hz / div);
+ clk_enable(sossi.fck);
+ l = sossi_read_reg(SOSSI_INIT1_REG);
+ l &= ~((0x0f << 20) | (0x3f << 24));
+ l |= (tw0 << 20) | (tw1 << 24);
+ sossi_write_reg(SOSSI_INIT1_REG, l);
+ clk_disable(sossi.fck);
+}
+
+static void _set_bits_per_cycle(int bus_pick_count, int bus_pick_width)
+{
+ u32 l;
+
+ l = sossi_read_reg(SOSSI_INIT3_REG);
+ l &= ~0x3ff;
+ l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f);
+ sossi_write_reg(SOSSI_INIT3_REG, l);
+}
+
+static void _set_tearsync_mode(int mode, unsigned line)
+{
+ u32 l;
+
+ l = sossi_read_reg(SOSSI_TEARING_REG);
+ l &= ~(((1 << 11) - 1) << 15);
+ l |= line << 15;
+ l &= ~(0x3 << 26);
+ l |= mode << 26;
+ sossi_write_reg(SOSSI_TEARING_REG, l);
+ if (mode)
+ sossi_set_bits(SOSSI_INIT2_REG, 1 << 6); /* TE logic */
+ else
+ sossi_clear_bits(SOSSI_INIT2_REG, 1 << 6);
+}
+
+static inline void set_timing(int access)
+{
+ if (access != sossi.last_access) {
+ sossi.last_access = access;
+ _set_timing(sossi.clk_div,
+ sossi.clk_tw0[access], sossi.clk_tw1[access]);
+ }
+}
+
+static void sossi_start_transfer(void)
+{
+ /* WE */
+ sossi_clear_bits(SOSSI_INIT2_REG, 1 << 4);
+ /* CS active low */
+ sossi_clear_bits(SOSSI_INIT1_REG, 1 << 30);
+}
+
+static void sossi_stop_transfer(void)
+{
+ /* WE */
+ sossi_set_bits(SOSSI_INIT2_REG, 1 << 4);
+ /* CS active low */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 30);
+}
+
+static void wait_end_of_write(void)
+{
+ /* Before reading we must check if some writings are going on */
+ while (!(sossi_read_reg(SOSSI_INIT2_REG) & (1 << 3)));
+}
+
+static void send_data(const void *data, unsigned int len)
+{
+ while (len >= 4) {
+ sossi_write_reg(SOSSI_FIFO_REG, *(const u32 *) data);
+ len -= 4;
+ data += 4;
+ }
+ while (len >= 2) {
+ sossi_write_reg16(SOSSI_FIFO_REG, *(const u16 *) data);
+ len -= 2;
+ data += 2;
+ }
+ while (len) {
+ sossi_write_reg8(SOSSI_FIFO_REG, *(const u8 *) data);
+ len--;
+ data++;
+ }
+}
+
+static void set_cycles(unsigned int len)
+{
+ unsigned long nr_cycles = len / (sossi.bus_pick_width / 8);
+
+ BUG_ON((nr_cycles - 1) & ~0x3ffff);
+
+ sossi_clear_bits(SOSSI_INIT1_REG, 0x3ffff);
+ sossi_set_bits(SOSSI_INIT1_REG, (nr_cycles - 1) & 0x3ffff);
+}
+
+static int sossi_convert_timings(struct extif_timings *t)
+{
+ int r = 0;
+ int div = t->clk_div;
+
+ t->converted = 0;
+
+ if (div <= 0 || div > 8)
+ return -1;
+
+ /* no CS on SOSSI, so ignore cson, csoff, cs_pulsewidth */
+ if ((r = calc_rd_timings(t)) < 0)
+ return r;
+
+ if ((r = calc_wr_timings(t)) < 0)
+ return r;
+
+ t->tim[4] = div;
+
+ t->converted = 1;
+
+ return 0;
+}
+
+static void sossi_set_timings(const struct extif_timings *t)
+{
+ BUG_ON(!t->converted);
+
+ sossi.clk_tw0[RD_ACCESS] = t->tim[0];
+ sossi.clk_tw1[RD_ACCESS] = t->tim[1];
+
+ sossi.clk_tw0[WR_ACCESS] = t->tim[2];
+ sossi.clk_tw1[WR_ACCESS] = t->tim[3];
+
+ sossi.clk_div = t->tim[4];
+}
+
+static void sossi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+ *clk_period = HZ_TO_PS(sossi.fck_hz);
+ *max_clk_div = 8;
+}
+
+static void sossi_set_bits_per_cycle(int bpc)
+{
+ int bus_pick_count, bus_pick_width;
+
+ /*
+ * We set explicitly the the bus_pick_count as well, although
+ * with remapping/reordering disabled it will be calculated by HW
+ * as (32 / bus_pick_width).
+ */
+ switch (bpc) {
+ case 8:
+ bus_pick_count = 4;
+ bus_pick_width = 8;
+ break;
+ case 16:
+ bus_pick_count = 2;
+ bus_pick_width = 16;
+ break;
+ default:
+ BUG();
+ return;
+ }
+ sossi.bus_pick_width = bus_pick_width;
+ sossi.bus_pick_count = bus_pick_count;
+}
+
+static int sossi_setup_tearsync(unsigned pin_cnt,
+ unsigned hs_pulse_time, unsigned vs_pulse_time,
+ int hs_pol_inv, int vs_pol_inv, int div)
+{
+ int hs, vs;
+ u32 l;
+
+ if (pin_cnt != 1 || div < 1 || div > 8)
+ return -EINVAL;
+
+ hs = ps_to_sossi_ticks(hs_pulse_time, div);
+ vs = ps_to_sossi_ticks(vs_pulse_time, div);
+ if (vs < 8 || vs <= hs || vs >= (1 << 12))
+ return -EDOM;
+ vs /= 8;
+ vs--;
+ if (hs > 8)
+ hs = 8;
+ if (hs)
+ hs--;
+
+ dev_dbg(sossi.fbdev->dev,
+ "setup_tearsync: hs %d vs %d hs_inv %d vs_inv %d\n",
+ hs, vs, hs_pol_inv, vs_pol_inv);
+
+ clk_enable(sossi.fck);
+ l = sossi_read_reg(SOSSI_TEARING_REG);
+ l &= ~((1 << 15) - 1);
+ l |= vs << 3;
+ l |= hs;
+ if (hs_pol_inv)
+ l |= 1 << 29;
+ else
+ l &= ~(1 << 29);
+ if (vs_pol_inv)
+ l |= 1 << 28;
+ else
+ l &= ~(1 << 28);
+ sossi_write_reg(SOSSI_TEARING_REG, l);
+ clk_disable(sossi.fck);
+
+ return 0;
+}
+
+static int sossi_enable_tearsync(int enable, unsigned line)
+{
+ int mode;
+
+ dev_dbg(sossi.fbdev->dev, "tearsync %d line %d\n", enable, line);
+ if (line >= 1 << 11)
+ return -EINVAL;
+ if (enable) {
+ if (line)
+ mode = 2; /* HS or VS */
+ else
+ mode = 3; /* VS only */
+ } else
+ mode = 0;
+ sossi.tearsync_line = line;
+ sossi.tearsync_mode = mode;
+
+ return 0;
+}
+
+static void sossi_write_command(const void *data, unsigned int len)
+{
+ clk_enable(sossi.fck);
+ set_timing(WR_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ /* CMD#/DATA */
+ sossi_clear_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(len);
+ sossi_start_transfer();
+ send_data(data, len);
+ sossi_stop_transfer();
+ wait_end_of_write();
+ clk_disable(sossi.fck);
+}
+
+static void sossi_write_data(const void *data, unsigned int len)
+{
+ clk_enable(sossi.fck);
+ set_timing(WR_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ /* CMD#/DATA */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(len);
+ sossi_start_transfer();
+ send_data(data, len);
+ sossi_stop_transfer();
+ wait_end_of_write();
+ clk_disable(sossi.fck);
+}
+
+static void sossi_transfer_area(int width, int height,
+ void (callback)(void *data), void *data)
+{
+ BUG_ON(callback == NULL);
+
+ sossi.lcdc_callback = callback;
+ sossi.lcdc_callback_data = data;
+
+ clk_enable(sossi.fck);
+ set_timing(WR_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ _set_tearsync_mode(sossi.tearsync_mode, sossi.tearsync_line);
+ /* CMD#/DATA */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(width * height * sossi.bus_pick_width / 8);
+
+ sossi_start_transfer();
+ if (sossi.tearsync_mode) {
+ /*
+ * Wait for the sync signal and start the transfer only
+ * then. We can't seem to be able to use HW sync DMA for
+ * this since LCD DMA shows huge latencies, as if it
+ * would ignore some of the DMA requests from SoSSI.
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sossi.lock, flags);
+ sossi.vsync_dma_pending++;
+ spin_unlock_irqrestore(&sossi.lock, flags);
+ } else
+ /* Just start the transfer right away. */
+ omap_enable_lcd_dma();
+}
+
+static void sossi_dma_callback(void *data)
+{
+ omap_stop_lcd_dma();
+ sossi_stop_transfer();
+ clk_disable(sossi.fck);
+ sossi.lcdc_callback(sossi.lcdc_callback_data);
+}
+
+static void sossi_read_data(void *data, unsigned int len)
+{
+ clk_enable(sossi.fck);
+ set_timing(RD_ACCESS);
+ _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
+ /* CMD#/DATA */
+ sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
+ set_cycles(len);
+ sossi_start_transfer();
+ while (len >= 4) {
+ *(u32 *) data = sossi_read_reg(SOSSI_FIFO_REG);
+ len -= 4;
+ data += 4;
+ }
+ while (len >= 2) {
+ *(u16 *) data = sossi_read_reg16(SOSSI_FIFO_REG);
+ len -= 2;
+ data += 2;
+ }
+ while (len) {
+ *(u8 *) data = sossi_read_reg8(SOSSI_FIFO_REG);
+ len--;
+ data++;
+ }
+ sossi_stop_transfer();
+ clk_disable(sossi.fck);
+}
+
+static irqreturn_t sossi_match_irq(int irq, void *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sossi.lock, flags);
+ if (sossi.vsync_dma_pending) {
+ sossi.vsync_dma_pending--;
+ omap_enable_lcd_dma();
+ }
+ spin_unlock_irqrestore(&sossi.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static int sossi_init(struct omapfb_device *fbdev)
+{
+ u32 l, k;
+ struct clk *fck;
+ struct clk *dpll1out_ck;
+ int r;
+
+ sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE);
+ sossi.fbdev = fbdev;
+ spin_lock_init(&sossi.lock);
+
+ dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out");
+ if (IS_ERR(dpll1out_ck)) {
+ dev_err(fbdev->dev, "can't get DPLL1OUT clock\n");
+ return PTR_ERR(dpll1out_ck);
+ }
+ /*
+ * We need the parent clock rate, which we might divide further
+ * depending on the timing requirements of the controller. See
+ * _set_timings.
+ */
+ sossi.fck_hz = clk_get_rate(dpll1out_ck);
+ clk_put(dpll1out_ck);
+
+ fck = clk_get(fbdev->dev, "ck_sossi");
+ if (IS_ERR(fck)) {
+ dev_err(fbdev->dev, "can't get SoSSI functional clock\n");
+ return PTR_ERR(fck);
+ }
+ sossi.fck = fck;
+
+ /* Reset and enable the SoSSI module */
+ l = omap_readl(MOD_CONF_CTRL_1);
+ l |= CONF_SOSSI_RESET_R;
+ omap_writel(l, MOD_CONF_CTRL_1);
+ l &= ~CONF_SOSSI_RESET_R;
+ omap_writel(l, MOD_CONF_CTRL_1);
+
+ clk_enable(sossi.fck);
+ l = omap_readl(ARM_IDLECT2);
+ l &= ~(1 << 8); /* DMACK_REQ */
+ omap_writel(l, ARM_IDLECT2);
+
+ l = sossi_read_reg(SOSSI_INIT2_REG);
+ /* Enable and reset the SoSSI block */
+ l |= (1 << 0) | (1 << 1);
+ sossi_write_reg(SOSSI_INIT2_REG, l);
+ /* Take SoSSI out of reset */
+ l &= ~(1 << 1);
+ sossi_write_reg(SOSSI_INIT2_REG, l);
+
+ sossi_write_reg(SOSSI_ID_REG, 0);
+ l = sossi_read_reg(SOSSI_ID_REG);
+ k = sossi_read_reg(SOSSI_ID_REG);
+
+ if (l != 0x55555555 || k != 0xaaaaaaaa) {
+ dev_err(fbdev->dev,
+ "invalid SoSSI sync pattern: %08x, %08x\n", l, k);
+ r = -ENODEV;
+ goto err;
+ }
+
+ if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) {
+ dev_err(fbdev->dev, "can't get LCDC IRQ\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ l = sossi_read_reg(SOSSI_ID_REG); /* Component code */
+ l = sossi_read_reg(SOSSI_ID_REG);
+ dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n",
+ l >> 16, l & 0xffff);
+
+ l = sossi_read_reg(SOSSI_INIT1_REG);
+ l |= (1 << 19); /* DMA_MODE */
+ l &= ~(1 << 31); /* REORDERING */
+ sossi_write_reg(SOSSI_INIT1_REG, l);
+
+ if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
+ IRQT_FALLING,
+ "sossi_match", sossi.fbdev->dev)) < 0) {
+ dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
+ goto err;
+ }
+
+ clk_disable(sossi.fck);
+ return 0;
+
+err:
+ clk_disable(sossi.fck);
+ clk_put(sossi.fck);
+ return r;
+}
+
+static void sossi_cleanup(void)
+{
+ omap_lcdc_free_dma_callback();
+ clk_put(sossi.fck);
+}
+
+struct lcd_ctrl_extif omap1_ext_if = {
+ .init = sossi_init,
+ .cleanup = sossi_cleanup,
+ .get_clk_info = sossi_get_clk_info,
+ .convert_timings = sossi_convert_timings,
+ .set_timings = sossi_set_timings,
+ .set_bits_per_cycle = sossi_set_bits_per_cycle,
+ .setup_tearsync = sossi_setup_tearsync,
+ .enable_tearsync = sossi_enable_tearsync,
+ .write_command = sossi_write_command,
+ .read_data = sossi_read_data,
+ .write_data = sossi_write_data,
+ .transfer_area = sossi_transfer_area,
+
+ .max_transmit_size = SOSSI_MAX_XMIT_BYTES,
+};
+
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index e64f8b5d0056..8503e733a172 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -52,7 +52,7 @@ struct fb_info_platinum {
struct {
__u8 red, green, blue;
} palette[256];
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
volatile struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 0a04483aa3e0..10c0cc6e93fc 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -24,7 +24,7 @@
* License. See the file COPYING in the main directory of this archive for
* more details.
*
- *
+ *
*/
#include <linux/module.h>
@@ -58,7 +58,7 @@
#endif
/*
- * Driver data
+ * Driver data
*/
static char *mode __devinitdata = NULL;
@@ -82,12 +82,12 @@ struct pm2fb_par
{
pm2type_t type; /* Board type */
unsigned char __iomem *v_regs;/* virtual address of p_regs */
- u32 memclock; /* memclock */
+ u32 memclock; /* memclock */
u32 video; /* video flags before blanking */
u32 mem_config; /* MemConfig reg at probe */
u32 mem_control; /* MemControl reg at probe */
u32 boot_address; /* BootAddress reg at probe */
- u32 palette[16];
+ u32 palette[16];
};
/*
@@ -95,12 +95,12 @@ struct pm2fb_par
* if we don't use modedb.
*/
static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
- .id = "",
+ .id = "",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 1,
.ypanstep = 1,
- .ywrapstep = 0,
+ .ywrapstep = 0,
.accel = FB_ACCEL_3DLABS_PERMEDIA2,
};
@@ -109,26 +109,26 @@ static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
*/
static struct fb_var_screeninfo pm2fb_var __devinitdata = {
/* "640x480, 8 bpp @ 60 Hz */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel =8,
- .red = {0, 8, 0},
- .blue = {0, 8, 0},
- .green = {0, 8, 0},
- .activate = FB_ACTIVATE_NOW,
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
+ .xres = 640,
+ .yres = 480,
+ .xres_virtual = 640,
+ .yres_virtual = 480,
+ .bits_per_pixel = 8,
+ .red = {0, 8, 0},
+ .blue = {0, 8, 0},
+ .green = {0, 8, 0},
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .accel_flags = 0,
+ .pixclock = 39721,
+ .left_margin = 40,
+ .right_margin = 24,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED
};
/*
@@ -166,7 +166,7 @@ static inline u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
index = PM2VR_RD_INDEXED_DATA;
break;
- }
+ }
mb();
return pm2_RD(p, index);
}
@@ -182,7 +182,7 @@ static inline void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
index = PM2VR_RD_INDEXED_DATA;
break;
- }
+ }
wmb();
pm2_WR(p, index, v);
wmb();
@@ -197,7 +197,7 @@ static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
}
#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
-#define WAIT_FIFO(p,a)
+#define WAIT_FIFO(p, a)
#else
static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
{
@@ -209,7 +209,7 @@ static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
/*
* partial products for the supported horizontal resolutions.
*/
-#define PACKPP(p0,p1,p2) (((p2) << 6) | ((p1) << 3) | (p0))
+#define PACKPP(p0, p1, p2) (((p2) << 6) | ((p1) << 3) | (p0))
static const struct {
u16 width;
u16 pp;
@@ -357,7 +357,7 @@ static void reset_card(struct pm2fb_par* p)
static void reset_config(struct pm2fb_par* p)
{
WAIT_FIFO(p, 52);
- pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG)&
+ pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) &
~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED));
pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L));
pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L));
@@ -367,7 +367,7 @@ static void reset_config(struct pm2fb_par* p)
pm2_WR(p, PM2R_RASTERIZER_MODE, 0);
pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB);
pm2_WR(p, PM2R_LB_READ_FORMAT, 0);
- pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
+ pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
pm2_WR(p, PM2R_LB_READ_MODE, 0);
pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0);
pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0);
@@ -535,7 +535,7 @@ static void set_video(struct pm2fb_par* p, u32 video) {
vsync = video;
DPRINTK("video = 0x%x\n", video);
-
+
/*
* The hardware cursor needs +vsync to recognise vert retrace.
* We may not be using the hardware cursor, but the X Glint
@@ -574,9 +574,9 @@ static void set_video(struct pm2fb_par* p, u32 video) {
*/
/**
- * pm2fb_check_var - Optional function. Validates a var passed in.
- * @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_check_var - Optional function. Validates a var passed in.
+ * @var: frame buffer variable screen structure
+ * @info: frame buffer structure that represents a single frame buffer
*
* Checks to see if the hardware supports the state requested by
* var passed in.
@@ -615,23 +615,23 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */
lpitch = var->xres * ((var->bits_per_pixel + 7)>>3);
-
+
if (var->xres < 320 || var->xres > 1600) {
DPRINTK("width not supported: %u\n", var->xres);
return -EINVAL;
}
-
+
if (var->yres < 200 || var->yres > 1200) {
DPRINTK("height not supported: %u\n", var->yres);
return -EINVAL;
}
-
+
if (lpitch * var->yres_virtual > info->fix.smem_len) {
DPRINTK("no memory for screen (%ux%ux%u)\n",
var->xres, var->yres_virtual, var->bits_per_pixel);
return -EINVAL;
}
-
+
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock));
return -EINVAL;
@@ -672,17 +672,17 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
break;
}
var->height = var->width = -1;
-
+
var->accel_flags = 0; /* Can't mmap if this is on */
-
+
DPRINTK("Checking graphics mode at %dx%d depth %d\n",
var->xres, var->yres, var->bits_per_pixel);
return 0;
}
/**
- * pm2fb_set_par - Alters the hardware state.
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_set_par - Alters the hardware state.
+ * @info: frame buffer structure that represents a single frame buffer
*
* Using the fb_var_screeninfo in fb_info we set the resolution of the
* this particular framebuffer.
@@ -709,7 +709,7 @@ static int pm2fb_set_par(struct fb_info *info)
clear_palette(par);
if ( par->memclock )
set_memclock(par, par->memclock);
-
+
width = (info->var.xres_virtual + 7) & ~7;
height = info->var.yres_virtual;
depth = (info->var.bits_per_pixel + 7) & ~7;
@@ -722,7 +722,7 @@ static int pm2fb_set_par(struct fb_info *info)
DPRINTK("pixclock too high (%uKHz)\n", pixclock);
return -EINVAL;
}
-
+
hsstart = to3264(info->var.right_margin, depth, data64);
hsend = hsstart + to3264(info->var.hsync_len, depth, data64);
hbend = hsend + to3264(info->var.left_margin, depth, data64);
@@ -737,7 +737,7 @@ static int pm2fb_set_par(struct fb_info *info)
base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1);
if (data64)
video |= PM2F_DATA_64_ENABLE;
-
+
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) {
if (lowhsync) {
DPRINTK("ignoring +hsync, using -hsync.\n");
@@ -778,9 +778,9 @@ static int pm2fb_set_par(struct fb_info *info)
WAIT_FIFO(par, 1);
pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0);
}
-
+
set_aperture(par, depth);
-
+
mb();
WAIT_FIFO(par, 19);
pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL,
@@ -847,22 +847,22 @@ static int pm2fb_set_par(struct fb_info *info)
set_pixclock(par, pixclock);
DPRINTK("Setting graphics mode at %dx%d depth %d\n",
info->var.xres, info->var.yres, info->var.bits_per_pixel);
- return 0;
+ return 0;
}
/**
- * pm2fb_setcolreg - Sets a color register.
- * @regno: boolean, 0 copy local, 1 get_user() function
- * @red: frame buffer colormap structure
- * @green: The green value which can be up to 16 bits wide
+ * pm2fb_setcolreg - Sets a color register.
+ * @regno: boolean, 0 copy local, 1 get_user() function
+ * @red: frame buffer colormap structure
+ * @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
- * @transp: If supported the alpha value which can be up to 16 bits wide.
- * @info: frame buffer info structure
- *
- * Set a single color register. The values supplied have a 16 bit
- * magnitude which needs to be scaled in this function for the hardware.
+ * @transp: If supported the alpha value which can be up to 16 bits wide.
+ * @info: frame buffer info structure
+ *
+ * Set a single color register. The values supplied have a 16 bit
+ * magnitude which needs to be scaled in this function for the hardware.
* Pretty much a direct lift from tdfxfb.c.
- *
+ *
* Returns negative errno on error, or zero on success.
*/
static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -906,7 +906,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (blue << blue.offset) | (transp << transp.offset)
* RAMDAC does not exist
*/
-#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16)
+#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF -(val)) >> 16)
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
case FB_VISUAL_PSEUDOCOLOR:
@@ -916,9 +916,9 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
transp = CNVT_TOHW(transp, info->var.transp.length);
break;
case FB_VISUAL_DIRECTCOLOR:
- /* example here assumes 8 bit DAC. Might be different
- * for your hardware */
- red = CNVT_TOHW(red, 8);
+ /* example here assumes 8 bit DAC. Might be different
+ * for your hardware */
+ red = CNVT_TOHW(red, 8);
green = CNVT_TOHW(green, 8);
blue = CNVT_TOHW(blue, 8);
/* hey, there is bug in transp handling... */
@@ -940,11 +940,11 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
switch (info->var.bits_per_pixel) {
case 8:
- break;
- case 16:
+ break;
+ case 16:
case 24:
- case 32:
- par->palette[regno] = v;
+ case 32:
+ par->palette[regno] = v;
break;
}
return 0;
@@ -956,15 +956,15 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
}
/**
- * pm2fb_pan_display - Pans the display.
- * @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_pan_display - Pans the display.
+ * @var: frame buffer variable screen structure
+ * @info: frame buffer structure that represents a single frame buffer
*
* Pan (or wrap, depending on the `vmode' field) the display using the
- * `xoffset' and `yoffset' fields of the `var' structure.
- * If the values don't fit, return -EINVAL.
+ * `xoffset' and `yoffset' fields of the `var' structure.
+ * If the values don't fit, return -EINVAL.
*
- * Returns negative errno on error, or zero on success.
+ * Returns negative errno on error, or zero on success.
*
*/
static int pm2fb_pan_display(struct fb_var_screeninfo *var,
@@ -980,24 +980,24 @@ static int pm2fb_pan_display(struct fb_var_screeninfo *var,
depth = (depth > 32) ? 32 : depth;
base = to3264(var->yoffset * xres + var->xoffset, depth, 1);
WAIT_FIFO(p, 1);
- pm2_WR(p, PM2R_SCREEN_BASE, base);
+ pm2_WR(p, PM2R_SCREEN_BASE, base);
return 0;
}
/**
- * pm2fb_blank - Blanks the display.
- * @blank_mode: the blank mode we want.
- * @info: frame buffer structure that represents a single frame buffer
+ * pm2fb_blank - Blanks the display.
+ * @blank_mode: the blank mode we want.
+ * @info: frame buffer structure that represents a single frame buffer
*
- * Blank the screen if blank_mode != 0, else unblank. Return 0 if
- * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
- * video mode which doesn't support it. Implements VESA suspend
- * and powerdown modes on hardware that supports disabling hsync/vsync:
- * blank_mode == 2: suspend vsync
- * blank_mode == 3: suspend hsync
- * blank_mode == 4: powerdown
+ * Blank the screen if blank_mode != 0, else unblank. Return 0 if
+ * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
+ * video mode which doesn't support it. Implements VESA suspend
+ * and powerdown modes on hardware that supports disabling hsync/vsync:
+ * blank_mode == 2: suspend vsync
+ * blank_mode == 3: suspend hsync
+ * blank_mode == 4: powerdown
*
- * Returns negative errno on error, or zero on success.
+ * Returns negative errno on error, or zero on success.
*
*/
static int pm2fb_blank(int blank_mode, struct fb_info *info)
@@ -1071,7 +1071,7 @@ static void pm2fb_block_op(struct fb_info* info, int copy,
pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x);
pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w);
wmb();
- pm2_WR(par, PM2R_RENDER,PM2F_RENDER_RECTANGLE |
+ pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE |
(x<xsrc ? PM2F_INCREASE_X : 0) |
(y<ysrc ? PM2F_INCREASE_Y : 0) |
(copy ? 0 : PM2F_RENDER_FASTFILL));
@@ -1234,7 +1234,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
DPRINTK("Adjusting register base for big-endian.\n");
#endif
DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start);
-
+
/* Registers - request region and map it. */
if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len,
"pm2fb regbase") ) {
@@ -1317,17 +1317,17 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
}
info->fbops = &pm2fb_ops;
- info->fix = pm2fb_fix;
+ info->fix = pm2fb_fix;
info->pseudo_palette = default_par->palette;
info->flags = FBINFO_DEFAULT |
- FBINFO_HWACCEL_YPAN |
- FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT;
+ FBINFO_HWACCEL_YPAN |
+ FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT;
if (!mode)
mode = "640x480@60";
-
- err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
+
+ err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
if (!err || err == 4)
info->var = pm2fb_var;
@@ -1348,8 +1348,8 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
return 0;
err_exit_all:
- fb_dealloc_cmap(&info->cmap);
- err_exit_both:
+ fb_dealloc_cmap(&info->cmap);
+ err_exit_both:
iounmap(info->screen_base);
release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len);
err_exit_mmio:
@@ -1374,7 +1374,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
struct pm2fb_par *par = info->par;
unregister_framebuffer(info);
-
+
iounmap(info->screen_base);
release_mem_region(fix->smem_start, fix->smem_len);
iounmap(par->v_regs);
@@ -1402,9 +1402,9 @@ static struct pci_device_id pm2fb_id_table[] = {
static struct pci_driver pm2fb_driver = {
.name = "pm2fb",
- .id_table = pm2fb_id_table,
- .probe = pm2fb_probe,
- .remove = __devexit_p(pm2fb_remove),
+ .id_table = pm2fb_id_table,
+ .probe = pm2fb_probe,
+ .remove = __devexit_p(pm2fb_remove),
};
MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
@@ -1423,7 +1423,7 @@ static int __init pm2fb_setup(char *options)
if (!options || !*options)
return 0;
- while ((this_opt = strsep(&options, ",")) != NULL) {
+ while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if(!strcmp(this_opt, "lowhsync")) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index b52e883f0a52..5b3f54c0918e 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -77,7 +77,7 @@ static struct fb_fix_screeninfo pm3fb_fix __devinitdata = {
.xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 0,
- .accel = FB_ACCEL_NONE,
+ .accel = FB_ACCEL_3DLABS_PERMEDIA3,
};
/*
@@ -185,6 +185,238 @@ static inline int pm3fb_shift_bpp(unsigned bpp, int v)
return 0;
}
+/* acceleration */
+static int pm3fb_sync(struct fb_info *info)
+{
+ struct pm3_par *par = info->par;
+
+ PM3_WAIT(par, 2);
+ PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
+ PM3_WRITE_REG(par, PM3Sync, 0);
+ mb();
+ do {
+ while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0);
+ rmb();
+ } while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag);
+
+ return 0;
+}
+
+static void pm3fb_init_engine(struct fb_info *info)
+{
+ struct pm3_par *par = info->par;
+ const u32 width = (info->var.xres_virtual + 7) & ~7;
+
+ PM3_WAIT(par, 50);
+ PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
+ PM3_WRITE_REG(par, PM3StatisticMode, 0x0);
+ PM3_WRITE_REG(par, PM3DeltaMode, 0x0);
+ PM3_WRITE_REG(par, PM3RasterizerMode, 0x0);
+ PM3_WRITE_REG(par, PM3ScissorMode, 0x0);
+ PM3_WRITE_REG(par, PM3LineStippleMode, 0x0);
+ PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0);
+ PM3_WRITE_REG(par, PM3GIDMode, 0x0);
+ PM3_WRITE_REG(par, PM3DepthMode, 0x0);
+ PM3_WRITE_REG(par, PM3StencilMode, 0x0);
+ PM3_WRITE_REG(par, PM3StencilData, 0x0);
+ PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0);
+ PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0);
+ PM3_WRITE_REG(par, PM3TextureReadMode, 0x0);
+ PM3_WRITE_REG(par, PM3LUTMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0);
+ PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0);
+ PM3_WRITE_REG(par, PM3FogMode, 0x0);
+ PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0);
+ PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0);
+ PM3_WRITE_REG(par, PM3AntialiasMode, 0x0);
+ PM3_WRITE_REG(par, PM3YUVMode, 0x0);
+ PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0);
+ PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0);
+ PM3_WRITE_REG(par, PM3DitherMode, 0x0);
+ PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0);
+ PM3_WRITE_REG(par, PM3RouterMode, 0x0);
+ PM3_WRITE_REG(par, PM3Window, 0x0);
+
+ PM3_WRITE_REG(par, PM3Config2D, 0x0);
+
+ PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff);
+
+ PM3_WRITE_REG(par, PM3XBias, 0x0);
+ PM3_WRITE_REG(par, PM3YBias, 0x0);
+ PM3_WRITE_REG(par, PM3DeltaControl, 0x0);
+
+ PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff);
+
+ PM3_WRITE_REG(par, PM3FBDestReadEnables,
+ PM3FBDestReadEnables_E(0xff) |
+ PM3FBDestReadEnables_R(0xff) |
+ PM3FBDestReadEnables_ReferenceAlpha(0xff));
+ PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0);
+ PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0);
+ PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0,
+ PM3FBDestReadBufferWidth_Width(width));
+
+ PM3_WRITE_REG(par, PM3FBDestReadMode,
+ PM3FBDestReadMode_ReadEnable |
+ PM3FBDestReadMode_Enable0);
+ PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0);
+ PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0);
+ PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth,
+ PM3FBSourceReadBufferWidth_Width(width));
+ PM3_WRITE_REG(par, PM3FBSourceReadMode,
+ PM3FBSourceReadMode_Blocking |
+ PM3FBSourceReadMode_ReadEnable);
+
+ PM3_WAIT(par, 2);
+ {
+ unsigned long rm = 1;
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ PM3_WRITE_REG(par, PM3PixelSize,
+ PM3PixelSize_GLOBAL_8BIT);
+ break;
+ case 16:
+ PM3_WRITE_REG(par, PM3PixelSize,
+ PM3PixelSize_GLOBAL_16BIT);
+ break;
+ case 32:
+ PM3_WRITE_REG(par, PM3PixelSize,
+ PM3PixelSize_GLOBAL_32BIT);
+ break;
+ default:
+ DPRINTK(1, "Unsupported depth %d\n",
+ info->var.bits_per_pixel);
+ break;
+ }
+ PM3_WRITE_REG(par, PM3RasterizerMode, rm);
+ }
+
+ PM3_WAIT(par, 20);
+ PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff);
+ PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff);
+ PM3_WRITE_REG(par, PM3FBWriteMode,
+ PM3FBWriteMode_WriteEnable |
+ PM3FBWriteMode_OpaqueSpan |
+ PM3FBWriteMode_Enable0);
+ PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0);
+ PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0);
+ PM3_WRITE_REG(par, PM3FBWriteBufferWidth0,
+ PM3FBWriteBufferWidth_Width(width));
+
+ PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0);
+ {
+ /* size in lines of FB */
+ unsigned long sofb = info->screen_size /
+ info->fix.line_length;
+ if (sofb > 4095)
+ PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095);
+ else
+ PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb);
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ PM3_WRITE_REG(par, PM3DitherMode,
+ (1 << 10) | (2 << 3));
+ break;
+ case 16:
+ PM3_WRITE_REG(par, PM3DitherMode,
+ (1 << 10) | (1 << 3));
+ break;
+ case 32:
+ PM3_WRITE_REG(par, PM3DitherMode,
+ (1 << 10) | (0 << 3));
+ break;
+ default:
+ DPRINTK(1, "Unsupported depth %d\n",
+ info->current_par->depth);
+ break;
+ }
+ }
+
+ PM3_WRITE_REG(par, PM3dXDom, 0x0);
+ PM3_WRITE_REG(par, PM3dXSub, 0x0);
+ PM3_WRITE_REG(par, PM3dY, (1 << 16));
+ PM3_WRITE_REG(par, PM3StartXDom, 0x0);
+ PM3_WRITE_REG(par, PM3StartXSub, 0x0);
+ PM3_WRITE_REG(par, PM3StartY, 0x0);
+ PM3_WRITE_REG(par, PM3Count, 0x0);
+
+/* Disable LocalBuffer. better safe than sorry */
+ PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0);
+ PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0);
+ PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0);
+ PM3_WRITE_REG(par, PM3LBWriteMode, 0x0);
+
+ pm3fb_sync(info);
+}
+
+static void pm3fb_fillrect (struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ struct pm3_par *par = info->par;
+ struct fb_fillrect modded;
+ int vxres, vyres;
+ u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
+ ((u32*)info->pseudo_palette)[region->color] : region->color;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+ if ((info->flags & FBINFO_HWACCEL_DISABLED) ||
+ region->rop != ROP_COPY ) {
+ cfb_fillrect(info, region);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ memcpy(&modded, region, sizeof(struct fb_fillrect));
+
+ if(!modded.width || !modded.height ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if(modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if(modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ if(info->var.bits_per_pixel == 8)
+ color |= color << 8;
+ if(info->var.bits_per_pixel <= 16)
+ color |= color << 16;
+
+ PM3_WAIT(par, 4);
+
+ PM3_WRITE_REG(par, PM3Config2D,
+ PM3Config2D_UseConstantSource |
+ PM3Config2D_ForegroundROPEnable |
+ (PM3Config2D_ForegroundROP(0x3)) | /* Ox3 is GXcopy */
+ PM3Config2D_FBWriteEnable);
+
+ PM3_WRITE_REG(par, PM3ForegroundColor, color);
+
+ PM3_WRITE_REG(par, PM3RectanglePosition,
+ (PM3RectanglePosition_XOffset(modded.dx)) |
+ (PM3RectanglePosition_YOffset(modded.dy)));
+
+ PM3_WRITE_REG(par, PM3Render2D,
+ PM3Render2D_XPositive |
+ PM3Render2D_YPositive |
+ PM3Render2D_Operation_Normal |
+ PM3Render2D_SpanOperation |
+ (PM3Render2D_Width(modded.width)) |
+ (PM3Render2D_Height(modded.height)));
+}
+/* end of acceleration functions */
+
/* write the mode to registers */
static void pm3fb_write_mode(struct fb_info *info)
{
@@ -380,8 +612,6 @@ static void pm3fb_write_mode(struct fb_info *info)
/*
* hardware independent functions
*/
-int pm3fb_init(void);
-
static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 lpitch;
@@ -528,6 +758,7 @@ static int pm3fb_set_par(struct fb_info *info)
pm3fb_clear_colormap(par, 0, 0, 0);
PM3_WRITE_DAC_REG(par, PM3RD_CursorMode,
PM3RD_CursorMode_CURSOR_DISABLE);
+ pm3fb_init_engine(info);
pm3fb_write_mode(info);
return 0;
}
@@ -675,10 +906,11 @@ static struct fb_ops pm3fb_ops = {
.fb_set_par = pm3fb_set_par,
.fb_setcolreg = pm3fb_setcolreg,
.fb_pan_display = pm3fb_pan_display,
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = pm3fb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_blank = pm3fb_blank,
+ .fb_sync = pm3fb_sync,
};
/* ------------------------------------------------------------------------- */
@@ -847,7 +1079,8 @@ static int __devinit pm3fb_probe(struct pci_dev *dev,
info->fix = pm3fb_fix;
info->pseudo_palette = par->palette;
- info->flags = FBINFO_DEFAULT;/* | FBINFO_HWACCEL_YPAN;*/
+ info->flags = FBINFO_DEFAULT |
+ FBINFO_HWACCEL_FILLRECT;/* | FBINFO_HWACCEL_YPAN;*/
/*
* This should give a reasonable default video mode. The following is
@@ -935,35 +1168,12 @@ static struct pci_driver pm3fb_driver = {
MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
-#ifndef MODULE
- /*
- * Setup
- */
-
-/*
- * Only necessary if your driver takes special options,
- * otherwise we fall back on the generic fb_setup().
- */
-static int __init pm3fb_setup(char *options)
+static int __init pm3fb_init(void)
{
- /* Parse user speficied options (`video=pm3fb:') */
- return 0;
-}
-#endif /* MODULE */
-
-int __init pm3fb_init(void)
-{
- /*
- * For kernel boot options (in 'video=pm3fb:<options>' format)
- */
#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("pm3fb", &option))
+ if (fb_get_options("pm3fb", NULL))
return -ENODEV;
- pm3fb_setup(option);
#endif
-
return pci_register_driver(&pm3fb_driver);
}
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9cf92ba5d6e3..646ec823c168 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -27,7 +27,6 @@
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/console.h>
#include <linux/ioctl.h>
#include <linux/notifier.h>
@@ -46,6 +45,9 @@
#include <asm/ps3fb.h>
#include <asm/ps3.h>
+
+#define DEVICE_NAME "ps3fb"
+
#ifdef PS3FB_DEBUG
#define DPRINTK(fmt, args...) printk("%s: " fmt, __func__ , ##args)
#else
@@ -126,7 +128,6 @@ struct gpu_driver_info {
struct ps3fb_priv {
unsigned int irq_no;
- void *dev;
u64 context_handle, memory_handle;
void *xdr_ea;
@@ -171,7 +172,7 @@ static const struct ps3fb_res_table ps3fb_res[] = {
{ 0, 0, 0, 0 , 0} };
/* default resolution */
-#define GPU_RES_INDEX 0 /* 720 x 480 */
+#define GPU_RES_INDEX 0 /* 720 x 480 */
static const struct fb_videomode ps3fb_modedb[] = {
/* 60 Hz broadcast modes (modes "1" to "5") */
@@ -298,10 +299,9 @@ static const struct fb_videomode ps3fb_modedb[] = {
#define FB_OFF(i) (GPU_OFFSET - VP_OFF(i) % GPU_OFFSET)
static int ps3fb_mode;
-module_param(ps3fb_mode, bool, 0);
-
-static char *mode_option __initdata;
+module_param(ps3fb_mode, int, 0);
+static char *mode_option __devinitdata;
static int ps3fb_get_res_table(u32 xres, u32 yres)
{
@@ -681,15 +681,15 @@ int ps3fb_wait_for_vsync(u32 crtc)
EXPORT_SYMBOL_GPL(ps3fb_wait_for_vsync);
-void ps3fb_flip_ctl(int on)
+void ps3fb_flip_ctl(int on, void *data)
{
+ struct ps3fb_priv *priv = data;
if (on)
- atomic_dec_if_positive(&ps3fb.ext_flip);
+ atomic_dec_if_positive(&priv->ext_flip);
else
- atomic_inc(&ps3fb.ext_flip);
+ atomic_inc(&priv->ext_flip);
}
-EXPORT_SYMBOL_GPL(ps3fb_flip_ctl);
/*
* ioctl
@@ -812,6 +812,7 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
static int ps3fbd(void *arg)
{
+ set_freezable();
while (!kthread_should_stop()) {
try_to_freeze();
set_current_state(TASK_INTERRUPTIBLE);
@@ -851,37 +852,9 @@ static irqreturn_t ps3fb_vsync_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
-#ifndef MODULE
-static int __init ps3fb_setup(char *options)
-{
- char *this_opt;
- int mode = 0;
-
- if (!options || !*options)
- return 0; /* no options */
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
- if (!strncmp(this_opt, "mode:", 5))
- mode = simple_strtoul(this_opt + 5, NULL, 0);
- else
- mode_option = this_opt;
- }
- return mode;
-}
-#endif /* MODULE */
-
- /*
- * Initialisation
- */
-static void ps3fb_platform_release(struct device *device)
-{
- /* This is called when the reference count goes to zero. */
-}
-
-static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo, void *dev)
+static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo,
+ struct ps3_system_bus_device *dev)
{
int error;
@@ -897,7 +870,6 @@ static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo, void *dev)
return -EINVAL;
}
- ps3fb.dev = dev;
error = ps3_irq_plug_setup(PS3_BINDING_CPU_ANY, dinfo->irq.irq_outlet,
&ps3fb.irq_no);
if (error) {
@@ -907,7 +879,7 @@ static int ps3fb_vsync_settings(struct gpu_driver_info *dinfo, void *dev)
}
error = request_irq(ps3fb.irq_no, ps3fb_vsync_interrupt, IRQF_DISABLED,
- "ps3fb vsync", ps3fb.dev);
+ DEVICE_NAME, dev);
if (error) {
printk(KERN_ERR "%s: request_irq failed %d\n", __func__,
error);
@@ -966,16 +938,45 @@ static struct fb_ops ps3fb_ops = {
};
static struct fb_fix_screeninfo ps3fb_fix __initdata = {
- .id = "PS3 FB",
+ .id = DEVICE_NAME,
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE,
};
-static int __init ps3fb_probe(struct platform_device *dev)
+static int ps3fb_set_sync(void)
+{
+ int status;
+
+#ifdef HEAD_A
+ status = lv1_gpu_context_attribute(0x0,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+ 0, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
+ if (status) {
+ printk(KERN_ERR "%s: lv1_gpu_context_attribute DISPLAY_SYNC "
+ "failed: %d\n", __func__, status);
+ return -1;
+ }
+#endif
+#ifdef HEAD_B
+ status = lv1_gpu_context_attribute(0x0,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+ 1, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
+
+ if (status) {
+ printk(KERN_ERR "%s: lv1_gpu_context_attribute DISPLAY_MODE "
+ "failed: %d\n", __func__, status);
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
{
struct fb_info *info;
int retval = -ENOMEM;
+ u32 xres, yres;
u64 ddr_lpar = 0;
u64 lpar_dma_control = 0;
u64 lpar_driver_info = 0;
@@ -986,6 +987,30 @@ static int __init ps3fb_probe(struct platform_device *dev)
unsigned long offset;
struct task_struct *task;
+ status = ps3_open_hv_device(dev);
+ if (status) {
+ printk(KERN_ERR "%s: ps3_open_hv_device failed\n", __func__);
+ goto err;
+ }
+
+ if (!ps3fb_mode)
+ ps3fb_mode = ps3av_get_mode();
+ DPRINTK("ps3av_mode:%d\n", ps3fb_mode);
+
+ if (ps3fb_mode > 0 &&
+ !ps3av_video_mode2res(ps3fb_mode, &xres, &yres)) {
+ ps3fb.res_index = ps3fb_get_res_table(xres, yres);
+ DPRINTK("res_index:%d\n", ps3fb.res_index);
+ } else
+ ps3fb.res_index = GPU_RES_INDEX;
+
+ atomic_set(&ps3fb.f_count, -1); /* fbcon opens ps3fb */
+ atomic_set(&ps3fb.ext_flip, 0); /* for flip with vsync */
+ init_waitqueue_head(&ps3fb.wait_vsync);
+ ps3fb.num_frames = 1;
+
+ ps3fb_set_sync();
+
/* get gpu context handle */
status = lv1_gpu_memory_allocate(DDR_SIZE, 0, 0, 0, 0,
&ps3fb.memory_handle, &ddr_lpar);
@@ -1029,7 +1054,7 @@ static int __init ps3fb_probe(struct platform_device *dev)
* leakage into userspace
*/
memset(ps3fb.xdr_ea, 0, ps3fb_videomemory.size);
- info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
+ info = framebuffer_alloc(sizeof(u32) * 16, &dev->core);
if (!info)
goto err_free_irq;
@@ -1042,7 +1067,7 @@ static int __init ps3fb_probe(struct platform_device *dev)
info->fix.smem_len = ps3fb_videomemory.size - offset;
info->pseudo_palette = info->par;
info->par = NULL;
- info->flags = FBINFO_FLAG_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_READS_FAST;
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0)
@@ -1061,19 +1086,20 @@ static int __init ps3fb_probe(struct platform_device *dev)
if (retval < 0)
goto err_fb_dealloc;
- platform_set_drvdata(dev, info);
+ dev->core.driver_data = info;
printk(KERN_INFO
"fb%d: PS3 frame buffer device, using %ld KiB of video memory\n",
info->node, ps3fb_videomemory.size >> 10);
- task = kthread_run(ps3fbd, info, "ps3fbd");
+ task = kthread_run(ps3fbd, info, DEVICE_NAME);
if (IS_ERR(task)) {
retval = PTR_ERR(task);
goto err_unregister_framebuffer;
}
ps3fb.task = task;
+ ps3av_register_flip_ctl(ps3fb_flip_ctl, &ps3fb);
return 0;
@@ -1084,7 +1110,7 @@ err_fb_dealloc:
err_framebuffer_release:
framebuffer_release(info);
err_free_irq:
- free_irq(ps3fb.irq_no, ps3fb.dev);
+ free_irq(ps3fb.irq_no, dev);
ps3_irq_plug_destroy(ps3fb.irq_no);
err_iounmap_dinfo:
iounmap((u8 __iomem *)ps3fb.dinfo);
@@ -1096,26 +1122,30 @@ err:
return retval;
}
-static void ps3fb_shutdown(struct platform_device *dev)
+static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
{
- ps3fb_flip_ctl(0); /* flip off */
+ int status;
+ struct fb_info *info = dev->core.driver_data;
+
+ DPRINTK(" -> %s:%d\n", __func__, __LINE__);
+
+ ps3fb_flip_ctl(0, &ps3fb); /* flip off */
ps3fb.dinfo->irq.mask = 0;
- free_irq(ps3fb.irq_no, ps3fb.dev);
- ps3_irq_plug_destroy(ps3fb.irq_no);
- iounmap((u8 __iomem *)ps3fb.dinfo);
-}
-void ps3fb_cleanup(void)
-{
- int status;
+ if (info) {
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ ps3av_register_flip_ctl(NULL, NULL);
if (ps3fb.task) {
struct task_struct *task = ps3fb.task;
ps3fb.task = NULL;
kthread_stop(task);
}
if (ps3fb.irq_no) {
- free_irq(ps3fb.irq_no, ps3fb.dev);
+ free_irq(ps3fb.irq_no, dev);
ps3_irq_plug_destroy(ps3fb.irq_no);
}
iounmap((u8 __iomem *)ps3fb.dinfo);
@@ -1128,134 +1158,69 @@ void ps3fb_cleanup(void)
if (status)
DPRINTK("lv1_gpu_memory_free failed: %d\n", status);
- ps3av_dev_close();
-}
+ ps3_close_hv_device(dev);
+ DPRINTK(" <- %s:%d\n", __func__, __LINE__);
-EXPORT_SYMBOL_GPL(ps3fb_cleanup);
-
-static int ps3fb_remove(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
-
- if (info) {
- unregister_framebuffer(info);
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- }
- ps3fb_cleanup();
return 0;
}
-static struct platform_driver ps3fb_driver = {
- .probe = ps3fb_probe,
- .remove = ps3fb_remove,
- .shutdown = ps3fb_shutdown,
- .driver = { .name = "ps3fb" }
-};
-
-static struct platform_device ps3fb_device = {
- .name = "ps3fb",
- .id = 0,
- .dev = { .release = ps3fb_platform_release }
+static struct ps3_system_bus_driver ps3fb_driver = {
+ .match_id = PS3_MATCH_ID_GRAPHICS,
+ .core.name = DEVICE_NAME,
+ .core.owner = THIS_MODULE,
+ .probe = ps3fb_probe,
+ .remove = ps3fb_shutdown,
+ .shutdown = ps3fb_shutdown,
};
-int ps3fb_set_sync(void)
+static int __init ps3fb_setup(void)
{
- int status;
+ char *options;
-#ifdef HEAD_A
- status = lv1_gpu_context_attribute(0x0,
- L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
- 0, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
- if (status) {
- printk(KERN_ERR
- "%s: lv1_gpu_context_attribute DISPLAY_SYNC failed: %d\n",
- __func__, status);
- return -1;
- }
-#endif
-#ifdef HEAD_B
- status = lv1_gpu_context_attribute(0x0,
- L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
- 1, L1GPU_DISPLAY_SYNC_VSYNC, 0, 0);
-
- if (status) {
- printk(KERN_ERR
- "%s: lv1_gpu_context_attribute DISPLAY_MODE failed: %d\n",
- __func__, status);
- return -1;
- }
-#endif
+#ifdef MODULE
return 0;
-}
-
-EXPORT_SYMBOL_GPL(ps3fb_set_sync);
-
-static int __init ps3fb_init(void)
-{
- int error;
-#ifndef MODULE
- int mode;
- char *option = NULL;
-
- if (fb_get_options("ps3fb", &option))
- goto err;
#endif
- if (!ps3fb_videomemory.address)
- goto err;
-
- error = ps3av_dev_open();
- if (error) {
- printk(KERN_ERR "%s: ps3av_dev_open failed\n", __func__);
- goto err;
- }
+ if (fb_get_options(DEVICE_NAME, &options))
+ return -ENXIO;
- ps3fb_mode = ps3av_get_mode();
- DPRINTK("ps3av_mode:%d\n", ps3fb_mode);
-#ifndef MODULE
- mode = ps3fb_setup(option); /* check boot option */
- if (mode)
- ps3fb_mode = mode;
-#endif
- if (ps3fb_mode > 0) {
- u32 xres, yres;
- ps3av_video_mode2res(ps3fb_mode, &xres, &yres);
- ps3fb.res_index = ps3fb_get_res_table(xres, yres);
- DPRINTK("res_index:%d\n", ps3fb.res_index);
- } else
- ps3fb.res_index = GPU_RES_INDEX;
+ if (!options || !*options)
+ return 0;
- atomic_set(&ps3fb.f_count, -1); /* fbcon opens ps3fb */
- atomic_set(&ps3fb.ext_flip, 0); /* for flip with vsync */
- init_waitqueue_head(&ps3fb.wait_vsync);
- ps3fb.num_frames = 1;
+ while (1) {
+ char *this_opt = strsep(&options, ",");
- error = platform_driver_register(&ps3fb_driver);
- if (!error) {
- error = platform_device_register(&ps3fb_device);
- if (error)
- platform_driver_unregister(&ps3fb_driver);
+ if (!this_opt)
+ break;
+ if (!*this_opt)
+ continue;
+ if (!strncmp(this_opt, "mode:", 5))
+ ps3fb_mode = simple_strtoul(this_opt + 5, NULL, 0);
+ else
+ mode_option = this_opt;
}
+ return 0;
+}
- ps3fb_set_sync();
-
- return error;
+static int __init ps3fb_init(void)
+{
+ if (!ps3fb_videomemory.address || ps3fb_setup())
+ return -ENXIO;
-err:
- return -ENXIO;
+ return ps3_system_bus_driver_register(&ps3fb_driver);
}
-module_init(ps3fb_init);
-
-#ifdef MODULE
static void __exit ps3fb_exit(void)
{
- platform_device_unregister(&ps3fb_device);
- platform_driver_unregister(&ps3fb_driver);
+ DPRINTK(" -> %s:%d\n", __func__, __LINE__);
+ ps3_system_bus_driver_unregister(&ps3fb_driver);
+ DPRINTK(" <- %s:%d\n", __func__, __LINE__);
}
+module_init(ps3fb_init);
module_exit(ps3fb_exit);
MODULE_LICENSE("GPL");
-#endif /* MODULE */
+MODULE_DESCRIPTION("PS3 GPU Frame Buffer Driver");
+MODULE_AUTHOR("Sony Computer Entertainment Inc.");
+MODULE_ALIAS(PS3_MODULE_ALIAS_GRAPHICS);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index df2909ae704c..f9300266044d 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -115,11 +115,11 @@ enum { VO_PAL, VO_NTSC, VO_VGA };
enum { PAL_ARGB1555, PAL_RGB565, PAL_ARGB4444, PAL_ARGB8888 };
struct pvr2_params { unsigned int val; char *name; };
-static struct pvr2_params cables[] __initdata = {
+static struct pvr2_params cables[] __devinitdata = {
{ CT_VGA, "VGA" }, { CT_RGB, "RGB" }, { CT_COMPOSITE, "COMPOSITE" },
};
-static struct pvr2_params outputs[] __initdata = {
+static struct pvr2_params outputs[] __devinitdata = {
{ VO_PAL, "PAL" }, { VO_NTSC, "NTSC" }, { VO_VGA, "VGA" },
};
@@ -147,16 +147,16 @@ static struct pvr2fb_par {
static struct fb_info *fb_info;
-static struct fb_fix_screeninfo pvr2_fix __initdata = {
+static struct fb_fix_screeninfo pvr2_fix __devinitdata = {
.id = "NEC PowerVR2",
- .type = FB_TYPE_PACKED_PIXELS,
- .visual = FB_VISUAL_TRUECOLOR,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
.ypanstep = 1,
.ywrapstep = 1,
- .accel = FB_ACCEL_NONE,
+ .accel = FB_ACCEL_NONE,
};
-static struct fb_var_screeninfo pvr2_var __initdata = {
+static struct fb_var_screeninfo pvr2_var __devinitdata = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
@@ -195,10 +195,6 @@ static unsigned int shdma = PVR2_CASCADE_CHAN;
static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
#endif
-/* Interface used by the world */
-
-int pvr2fb_setup(char*);
-
static int pvr2fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue,
unsigned int transp, struct fb_info *info);
static int pvr2fb_blank(int blank, struct fb_info *info);
@@ -227,12 +223,12 @@ static struct fb_ops pvr2fb_ops = {
#ifdef CONFIG_SH_DMA
.fb_write = pvr2fb_write,
#endif
- .fb_fillrect = cfb_fillrect,
+ .fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
-static struct fb_videomode pvr2_modedb[] __initdata = {
+static struct fb_videomode pvr2_modedb[] __devinitdata = {
/*
* Broadcast video modes (PAL and NTSC). I'm unfamiliar with
* PAL-M and PAL-N, but from what I've read both modes parallel PAL and
@@ -252,7 +248,7 @@ static struct fb_videomode pvr2_modedb[] __initdata = {
/* 640x480 @ 60hz (VGA) */
"vga_640x480", 60, 640, 480, VGA_CLK, 38, 33, 0, 18, 146, 26,
0, FB_VMODE_YWRAP
- },
+ },
};
#define NUM_TOTAL_MODES ARRAY_SIZE(pvr2_modedb)
@@ -262,7 +258,7 @@ static struct fb_videomode pvr2_modedb[] __initdata = {
#define DEFMODE_VGA 2
static int defmode = DEFMODE_NTSC;
-static char *mode_option __initdata = NULL;
+static char *mode_option __devinitdata = NULL;
static inline void pvr2fb_set_pal_type(unsigned int type)
{
@@ -293,7 +289,7 @@ static void set_color_bitfields(struct fb_var_screeninfo *var)
{
switch (var->bits_per_pixel) {
case 16: /* RGB 565 */
- pvr2fb_set_pal_type(PAL_RGB565);
+ pvr2fb_set_pal_type(PAL_RGB565);
var->red.offset = 11; var->red.length = 5;
var->green.offset = 5; var->green.length = 6;
var->blue.offset = 0; var->blue.length = 5;
@@ -306,7 +302,7 @@ static void set_color_bitfields(struct fb_var_screeninfo *var)
var->transp.offset = 0; var->transp.length = 0;
break;
case 32: /* ARGB 8888 */
- pvr2fb_set_pal_type(PAL_ARGB8888);
+ pvr2fb_set_pal_type(PAL_ARGB8888);
var->red.offset = 16; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 0; var->blue.length = 8;
@@ -337,24 +333,25 @@ static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
((blue & 0xf800) >> 11);
pvr2fb_set_pal_entry(par, regno, tmp);
- ((u16*)(info->pseudo_palette))[regno] = tmp;
break;
case 24: /* RGB 888 */
red >>= 8; green >>= 8; blue >>= 8;
- ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
+ tmp = (red << 16) | (green << 8) | blue;
break;
case 32: /* ARGB 8888 */
red >>= 8; green >>= 8; blue >>= 8;
tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
pvr2fb_set_pal_entry(par, regno, tmp);
- ((u32*)(info->pseudo_palette))[regno] = tmp;
break;
default:
pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
return 1;
}
+ if (regno < 16)
+ ((u32*)(info->pseudo_palette))[regno] = tmp;
+
return 0;
}
@@ -379,13 +376,13 @@ static int pvr2fb_set_par(struct fb_info *info)
var->vmode &= FB_VMODE_MASK;
if (var->vmode & FB_VMODE_INTERLACED && video_output != VO_VGA)
par->is_interlaced = 1;
- /*
+ /*
* XXX: Need to be more creative with this (i.e. allow doublecan for
* PAL/NTSC output).
*/
if (var->vmode & FB_VMODE_DOUBLE && video_output == VO_VGA)
par->is_doublescan = 1;
-
+
par->hsync_total = var->left_margin + var->xres + var->right_margin +
var->hsync_len;
par->vsync_total = var->upper_margin + var->yres + var->lower_margin +
@@ -408,7 +405,7 @@ static int pvr2fb_set_par(struct fb_info *info)
} else {
/* VGA mode */
/* XXX: What else needs to be checked? */
- /*
+ /*
* XXX: We have a little freedom in VGA modes, what ranges
* should be here (i.e. hsync/vsync totals, etc.)?
*/
@@ -419,8 +416,8 @@ static int pvr2fb_set_par(struct fb_info *info)
/* Calculate the remainding offsets */
par->diwstart_h = par->borderstart_h + var->left_margin;
par->diwstart_v = par->borderstart_v + var->upper_margin;
- par->borderstop_h = par->diwstart_h + var->xres +
- var->right_margin;
+ par->borderstop_h = par->diwstart_h + var->xres +
+ var->right_margin;
par->borderstop_v = par->diwstart_v + var->yres +
var->lower_margin;
@@ -465,12 +462,12 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
set_color_bitfields(var);
if (var->vmode & FB_VMODE_YWRAP) {
- if (var->xoffset || var->yoffset < 0 ||
+ if (var->xoffset || var->yoffset < 0 ||
var->yoffset >= var->yres_virtual) {
var->xoffset = var->yoffset = 0;
} else {
if (var->xoffset > var->xres_virtual - var->xres ||
- var->yoffset > var->yres_virtual - var->yres ||
+ var->yoffset > var->yres_virtual - var->yres ||
var->xoffset < 0 || var->yoffset < 0)
var->xoffset = var->yoffset = 0;
}
@@ -478,7 +475,7 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->xoffset = var->yoffset = 0;
}
- /*
+ /*
* XXX: Need to be more creative with this (i.e. allow doublecan for
* PAL/NTSC output).
*/
@@ -507,7 +504,7 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
var->vsync_len = par->borderstop_v +
(par->vsync_total - par->borderstop_v);
}
-
+
hsync_total = var->left_margin + var->xres + var->right_margin +
var->hsync_len;
vtotal = var->upper_margin + var->yres + var->lower_margin +
@@ -531,7 +528,7 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
}
}
-
+
/* Check memory sizes */
line_length = get_line_length(var->xres_virtual, var->bits_per_pixel);
if (line_length * var->yres_virtual > info->fix.smem_len)
@@ -552,7 +549,7 @@ static void pvr2_update_display(struct fb_info *info)
DISP_DIWADDRS);
}
-/*
+/*
* Initialize the video mode. Currently, the 16bpp and 24bpp modes aren't
* very stable. It's probably due to the fact that a lot of the 2D video
* registers are still undocumented.
@@ -592,18 +589,18 @@ static void pvr2_init_display(struct fb_info *info)
/* display window start position */
fb_writel(par->diwstart_h, DISP_DIWHSTRT);
fb_writel((par->diwstart_v << 16) | par->diwstart_v, DISP_DIWVSTRT);
-
+
/* misc. settings */
fb_writel((0x16 << 16) | par->is_lowres, DISP_DIWCONF);
/* clock doubler (for VGA), scan doubler, display enable */
- fb_writel(((video_output == VO_VGA) << 23) |
+ fb_writel(((video_output == VO_VGA) << 23) |
(par->is_doublescan << 1) | 1, DISP_DIWMODE);
/* bits per pixel */
fb_writel(fb_readl(DISP_DIWMODE) | (--bytesperpixel << 2), DISP_DIWMODE);
- /* video enable, color sync, interlace,
+ /* video enable, color sync, interlace,
* hsync and vsync polarity (currently unused) */
fb_writel(0x100 | ((par->is_interlaced /*|4*/) << 4), DISP_SYNCCONF);
}
@@ -657,7 +654,7 @@ static irqreturn_t pvr2fb_interrupt(int irq, void *dev_id)
static int pvr2_init_cable(void)
{
if (cable_type < 0) {
- fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000,
+ fb_writel((fb_readl(PCTRA) & 0xfff0ffff) | 0x000a0000,
PCTRA);
cable_type = (fb_readw(PDTRA) >> 8) & 3;
}
@@ -687,7 +684,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
-
+
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, (unsigned long)buf,
nr_pages, WRITE, 0, pages, NULL);
@@ -700,7 +697,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
}
dma_configure_channel(shdma, 0x12c1);
-
+
dst = (unsigned long)fb_info->screen_base + *ppos;
start = (unsigned long)page_address(pages[0]);
end = (unsigned long)page_address(pages[nr_pages]);
@@ -744,7 +741,7 @@ out_unmap:
kfree(pages);
return ret;
-}
+}
#endif /* CONFIG_SH_DMA */
/**
@@ -765,21 +762,21 @@ out_unmap:
* in for flexibility anyways. Who knows, maybe someone has tv-out on a
* PCI-based version of these things ;-)
*/
-static int __init pvr2fb_common_init(void)
+static int __devinit pvr2fb_common_init(void)
{
struct pvr2fb_par *par = currentpar;
unsigned long modememused, rev;
fb_info->screen_base = ioremap_nocache(pvr2_fix.smem_start,
pvr2_fix.smem_len);
-
+
if (!fb_info->screen_base) {
printk(KERN_ERR "pvr2fb: Failed to remap smem space\n");
goto out_err;
}
par->mmio_base = (unsigned long)ioremap_nocache(pvr2_fix.mmio_start,
- pvr2_fix.mmio_len);
+ pvr2_fix.mmio_len);
if (!par->mmio_base) {
printk(KERN_ERR "pvr2fb: Failed to remap mmio space\n");
goto out_err;
@@ -820,7 +817,7 @@ static int __init pvr2fb_common_init(void)
printk("fb%d: %s (rev %ld.%ld) frame buffer device, using %ldk/%ldk of video memory\n",
fb_info->node, fb_info->fix.id, (rev >> 4) & 0x0f, rev & 0x0f,
modememused >> 10, (unsigned long)(fb_info->fix.smem_len >> 10));
- printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
+ printk("fb%d: Mode %dx%d-%d pitch = %ld cable: %s video output: %s\n",
fb_info->node, fb_info->var.xres, fb_info->var.yres,
fb_info->var.bits_per_pixel,
get_line_length(fb_info->var.xres, fb_info->var.bits_per_pixel),
@@ -878,8 +875,8 @@ static int __init pvr2fb_dc_init(void)
video_output = VO_NTSC;
}
}
-
- /*
+
+ /*
* Nothing exciting about the DC PVR2 .. only a measly 8MiB.
*/
pvr2_fix.smem_start = 0xa5000000; /* RAM starts here */
@@ -903,7 +900,7 @@ static int __init pvr2fb_dc_init(void)
return pvr2fb_common_init();
}
-static void pvr2fb_dc_exit(void)
+static void __exit pvr2fb_dc_exit(void)
{
if (fb_info->screen_base) {
iounmap(fb_info->screen_base);
@@ -987,13 +984,13 @@ static int __init pvr2fb_pci_init(void)
return pci_register_driver(&pvr2fb_pci_driver);
}
-static void pvr2fb_pci_exit(void)
+static void __exit pvr2fb_pci_exit(void)
{
pci_unregister_driver(&pvr2fb_pci_driver);
}
#endif /* CONFIG_PCI */
-static int __init pvr2_get_param(const struct pvr2_params *p, const char *s,
+static int __devinit pvr2_get_param(const struct pvr2_params *p, const char *s,
int val, int size)
{
int i;
@@ -1021,7 +1018,7 @@ static int __init pvr2_get_param(const struct pvr2_params *p, const char *s,
*/
#ifndef MODULE
-int __init pvr2fb_setup(char *options)
+static int __init pvr2fb_setup(char *options)
{
char *this_opt;
char cable_arg[80];
@@ -1061,7 +1058,7 @@ static struct pvr2_board {
int (*init)(void);
void (*exit)(void);
char name[16];
-} board_list[] = {
+} board_driver[] = {
#ifdef CONFIG_SH_DREAMCAST
{ pvr2fb_dc_init, pvr2fb_dc_exit, "Sega DC PVR2" },
#endif
@@ -1071,7 +1068,7 @@ static struct pvr2_board {
{ 0, },
};
-int __init pvr2fb_init(void)
+static int __init pvr2fb_init(void)
{
int i, ret = -ENODEV;
int size;
@@ -1085,18 +1082,17 @@ int __init pvr2fb_init(void)
#endif
size = sizeof(struct fb_info) + sizeof(struct pvr2fb_par) + 16 * sizeof(u32);
- fb_info = kmalloc(size, GFP_KERNEL);
+ fb_info = kzalloc(size, GFP_KERNEL);
if (!fb_info) {
printk(KERN_ERR "Failed to allocate memory for fb_info\n");
return -ENOMEM;
}
- memset(fb_info, 0, size);
currentpar = (struct pvr2fb_par *)(fb_info + 1);
- for (i = 0; i < ARRAY_SIZE(board_list); i++) {
- struct pvr2_board *pvr_board = board_list + i;
+ for (i = 0; i < ARRAY_SIZE(board_driver); i++) {
+ struct pvr2_board *pvr_board = board_driver + i;
if (!pvr_board->init)
continue;
@@ -1118,13 +1114,13 @@ static void __exit pvr2fb_exit(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(board_list); i++) {
- struct pvr2_board *pvr_board = board_list + i;
+ for (i = 0; i < ARRAY_SIZE(board_driver); i++) {
+ struct pvr2_board *pvr_board = board_driver + i;
if (pvr_board->exit)
pvr_board->exit();
}
-
+
#ifdef CONFIG_SH_STORE_QUEUES
sq_unmap(pvr2fb_map);
#endif
@@ -1139,4 +1135,3 @@ module_exit(pvr2fb_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
MODULE_DESCRIPTION("Framebuffer driver for NEC PowerVR 2 based graphics boards");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 48536c3e58a4..4beac1df617b 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -95,7 +95,7 @@ static int __init q40fb_probe(struct platform_device *dev)
/* mapped in q40/config.c */
q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR;
- info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 0fe547842c64..5c47968e7f21 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -307,7 +307,7 @@ static int riva_bl_get_level_brightness(struct riva_par *par,
static int riva_bl_update_status(struct backlight_device *bd)
{
- struct riva_par *par = class_get_devdata(&bd->class_dev);
+ struct riva_par *par = bl_get_data(bd);
U032 tmp_pcrt, tmp_pmc;
int level;
@@ -2146,7 +2146,7 @@ static void __devexit rivafb_remove(struct pci_dev *pd)
* ------------------------------------------------------------------------- */
#ifndef MODULE
-static int __init rivafb_setup(char *options)
+static int __devinit rivafb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/riva/riva_hw.c
index 70bfd78eca81..13307703a9f0 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/riva/riva_hw.c
@@ -1223,6 +1223,8 @@ static int CalcVClock
}
}
}
+
+ /* non-zero: M/N/P/clock values assigned. zero: error (not set) */
return (DeltaOld != 0xFFFFFFFF);
}
/*
@@ -1240,7 +1242,10 @@ int CalcStateExt
int dotClock
)
{
- int pixelDepth, VClk, m, n, p;
+ int pixelDepth;
+ int uninitialized_var(VClk),uninitialized_var(m),
+ uninitialized_var(n), uninitialized_var(p);
+
/*
* Save mode parameters.
*/
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 3d7507ad55f6..b855f4a34afe 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2174,11 +2174,10 @@ static int __devinit savage_init_fb_info(struct fb_info *info,
#if defined(CONFIG_FB_SAVAGE_ACCEL)
/* FIFO size + padding for commands */
- info->pixmap.addr = kmalloc(8*1024, GFP_KERNEL);
+ info->pixmap.addr = kcalloc(8, 1024, GFP_KERNEL);
err = -ENOMEM;
if (info->pixmap.addr) {
- memset(info->pixmap.addr, 0, 8*1024);
info->pixmap.size = 8*1024;
info->pixmap.scan_align = 4;
info->pixmap.buf_align = 4;
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index ebb6756aea08..4fb16240c04d 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -752,7 +752,7 @@ static int __init sgivwfb_probe(struct platform_device *dev)
struct fb_info *info;
char *monitor;
- info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
if (!info)
return -ENOMEM;
par = info->par;
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index d5e2d9c27847..d53bf6945f0c 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -479,7 +479,7 @@ struct sis_video_info {
struct fb_var_screeninfo default_var;
struct fb_fix_screeninfo sisfb_fix;
- u32 pseudo_palette[17];
+ u32 pseudo_palette[16];
struct sisfb_monitor {
u16 hmin;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a30e1e13d8be..e8ccace01252 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1405,12 +1405,18 @@ sisfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
}
break;
case 16:
+ if (regno >= 16)
+ break;
+
((u32 *)(info->pseudo_palette))[regno] =
(red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 32:
+ if (regno >= 16)
+ break;
+
red >>= 8;
green >>= 8;
blue >>= 8;
@@ -5789,7 +5795,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ivideo->warncount = 0;
ivideo->chip_id = pdev->device;
ivideo->chip_vendor = pdev->vendor;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &ivideo->revision_id);
+ ivideo->revision_id = pdev->revision;
ivideo->SiS_Pr.ChipRevision = ivideo->revision_id;
pci_read_config_word(pdev, PCI_COMMAND, &reg16);
ivideo->sisvga_enabled = reg16 & 0x01;
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 62fa5500361d..5eff28ce4f4d 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -1348,7 +1348,7 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
f_ddprintk("found device : %s\n", spec->name);
par->dev = pdev;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &par->revision);
+ par->revision = pdev->revision;
fix->mmio_start = pci_resource_start(pdev,0);
fix->mmio_len = 0x400000;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index f0fde6ea7c36..89facb73edfc 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1625,8 +1625,7 @@ tgafb_register(struct device *dev)
par->tga_regs_base = mem_base + TGA_REGS_OFFSET;
par->tga_type = tga_type;
if (tga_bus_pci)
- pci_read_config_byte(to_pci_dev(dev), PCI_REVISION_ID,
- &par->tga_chip_rev);
+ par->tga_chip_rev = (to_pci_dev(dev))->revision;
if (tga_bus_tc)
par->tga_chip_rev = TGA_READ_REG(par, TGA_START_REG) & 0xff;
@@ -1635,7 +1634,7 @@ tgafb_register(struct device *dev)
FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT;
info->fbops = &tgafb_ops;
info->screen_base = par->tga_fb_base;
- info->pseudo_palette = (void *)(par + 1);
+ info->pseudo_palette = par->palette;
/* This should give a reasonable default video mode. */
if (tga_bus_pci) {
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 55e8aa450bfa..c699864b6f4a 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -976,7 +976,7 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
- if (bpp==8) {
+ if (bpp == 8) {
t_outb(0xFF,0x3C6);
t_outb(regno,0x3C8);
@@ -984,19 +984,21 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
t_outb(green>>10,0x3C9);
t_outb(blue>>10,0x3C9);
- } else if (bpp == 16) { /* RGB 565 */
- u32 col;
-
- col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
- ((blue & 0xF800) >> 11);
- col |= col << 16;
- ((u32 *)(info->pseudo_palette))[regno] = col;
- } else if (bpp == 32) /* ARGB 8888 */
- ((u32*)info->pseudo_palette)[regno] =
- ((transp & 0xFF00) <<16) |
- ((red & 0xFF00) << 8) |
- ((green & 0xFF00)) |
- ((blue & 0xFF00)>>8);
+ } else if (regno < 16) {
+ if (bpp == 16) { /* RGB 565 */
+ u32 col;
+
+ col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
+ ((blue & 0xF800) >> 11);
+ col |= col << 16;
+ ((u32 *)(info->pseudo_palette))[regno] = col;
+ } else if (bpp == 32) /* ARGB 8888 */
+ ((u32*)info->pseudo_palette)[regno] =
+ ((transp & 0xFF00) <<16) |
+ ((red & 0xFF00) << 8) |
+ ((green & 0xFF00)) |
+ ((blue & 0xFF00)>>8);
+ }
// debug("exit\n");
return 0;
diff --git a/drivers/video/tx3912fb.c b/drivers/video/tx3912fb.c
index 07389ba01eff..e6f7c78da68b 100644
--- a/drivers/video/tx3912fb.c
+++ b/drivers/video/tx3912fb.c
@@ -291,7 +291,7 @@ int __init tx3912fb_init(void)
fb_info.fbops = &tx3912fb_ops;
fb_info.var = tx3912fb_var;
fb_info.fix = tx3912fb_fix;
- fb_info.pseudo_palette = pseudo_palette;
+ fb_info.pseudo_palette = cfb8;
fb_info.flags = FBINFO_DEFAULT;
/* Clear the framebuffer */
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index ad66f070acb8..7b0cef9ca8f9 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -356,10 +356,9 @@ int __init valkyriefb_init(void)
}
#endif /* ppc (!CONFIG_MAC) */
- p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ p = kzalloc(sizeof(*p), GFP_ATOMIC);
if (p == 0)
return -ENOMEM;
- memset(p, 0, sizeof(*p));
/* Map in frame buffer and registers */
if (!request_mem_region(frame_buffer_phys, 0x100000, "valkyriefb")) {
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 30c0b948852b..4c3a63308df1 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -68,26 +68,26 @@ static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
/* CRT timing register sets */
-struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
-struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
-struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
-struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
-
-struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
-struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
-struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
-struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
-struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
-
-struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
-struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
-struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
-struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
-
-struct svga_timing_regs vt8623_timing_regs = {
+static struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
+
+static struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
+
+static struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
+static struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
+static struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
+static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
+
+static struct svga_timing_regs vt8623_timing_regs = {
vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
@@ -903,7 +903,7 @@ static void __exit vt8623fb_cleanup(void)
/* Driver Initialisation */
-int __init vt8623fb_init(void)
+static int __init vt8623fb_init(void)
{
#ifndef MODULE
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index ca75b3ad3a2e..6854fd6b9714 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -1,8 +1,6 @@
-menu "Dallas's 1-wire bus"
- depends on HAS_IOMEM
-
-config W1
+menuconfig W1
tristate "Dallas's 1-wire support"
+ depends on HAS_IOMEM
---help---
Dallas' 1-wire bus is useful to connect slow 1-pin devices
such as iButtons and thermal sensors.
@@ -12,8 +10,10 @@ config W1
This W1 support can also be built as a module. If so, the module
will be called wire.ko.
+if W1
+
config W1_CON
- depends on CONNECTOR && W1
+ depends on CONNECTOR
bool "Userspace communication over connector"
default y
--- help ---
@@ -27,4 +27,4 @@ config W1_CON
source drivers/w1/masters/Kconfig
source drivers/w1/slaves/Kconfig
-endmenu
+endif # W1
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 8f779338f744..8236d447adf5 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -3,11 +3,10 @@
#
menu "1-wire Bus Masters"
- depends on W1
config W1_MASTER_MATROX
tristate "Matrox G400 transport layer for 1-wire"
- depends on W1 && PCI
+ depends on PCI
help
Say Y here if you want to communicate with your 1-wire devices
using Matrox's G400 GPIO pins.
@@ -17,7 +16,7 @@ config W1_MASTER_MATROX
config W1_MASTER_DS2490
tristate "DS2490 USB <-> W1 transport layer for 1-wire"
- depends on W1 && USB
+ depends on USB
help
Say Y here if you want to have a driver for DS2490 based USB <-> W1 bridges,
for example DS9490*.
@@ -27,7 +26,7 @@ config W1_MASTER_DS2490
config W1_MASTER_DS2482
tristate "Maxim DS2482 I2C to 1-Wire bridge"
- depends on I2C && W1 && EXPERIMENTAL
+ depends on I2C && EXPERIMENTAL
help
If you say yes here you get support for the Maxim DS2482
I2C to 1-Wire bridge.
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 6f9d880ab2e9..d356da5709fc 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -164,7 +164,7 @@ static int __devinit matrox_w1_probe(struct pci_dev *pdev, const struct pci_devi
if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
return -ENODEV;
- dev = kmalloc(sizeof(struct matrox_device) +
+ dev = kzalloc(sizeof(struct matrox_device) +
sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev,
@@ -173,7 +173,6 @@ static int __devinit matrox_w1_probe(struct pci_dev *pdev, const struct pci_devi
return -ENOMEM;
}
- memset(dev, 0, sizeof(struct matrox_device) + sizeof(struct w1_bus_master));
dev->bus_master = (struct w1_bus_master *)(dev + 1);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index df95d6c2cefa..3df29a122f84 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -3,25 +3,21 @@
#
menu "1-wire Slaves"
- depends on W1
config W1_SLAVE_THERM
tristate "Thermal family implementation"
- depends on W1
help
Say Y here if you want to connect 1-wire thermal sensors to your
wire.
config W1_SLAVE_SMEM
tristate "Simple 64bit memory family implementation"
- depends on W1
help
Say Y here if you want to connect 1-wire
simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
config W1_SLAVE_DS2433
tristate "4kb EEPROM family support (DS2433)"
- depends on W1
help
Say Y here if you want to use a 1-wire
4kb EEPROM family device (DS2433).
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 8ea17a53eed8..858c16a544c2 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -91,8 +91,9 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
}
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
-static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+static ssize_t w1_f23_read_bin(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
@@ -199,8 +200,9 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
return 0;
}
-static ssize_t w1_f23_write_bin(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+static ssize_t w1_f23_write_bin(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len, idx;
@@ -252,7 +254,6 @@ static struct bin_attribute w1_f23_bin_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = W1_EEPROM_SIZE,
.read = w1_f23_read_bin,
@@ -265,10 +266,9 @@ static int w1_f23_add_slave(struct w1_slave *sl)
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *data;
- data = kmalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- memset(data, 0, sizeof(struct w1_f23_data));
sl->family_data = data;
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 88a37fbccc3f..ed6b0576208c 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -68,8 +68,9 @@ int w1_ds2760_write(struct device *dev, char *buf, int addr, size_t count)
return w1_ds2760_io(dev, buf, addr, count, 1);
}
-static ssize_t w1_ds2760_read_bin(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
return w1_ds2760_read(dev, buf, off, count);
@@ -121,8 +122,6 @@ static void release_bat_id(int id)
mutex_lock(&bat_idr_lock);
idr_remove(&bat_idr, id);
mutex_unlock(&bat_idr_lock);
-
- return;
}
static int w1_ds2760_add_slave(struct w1_slave *sl)
@@ -174,8 +173,6 @@ static void w1_ds2760_remove_slave(struct w1_slave *sl)
platform_device_unregister(pdev);
release_bat_id(id);
sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr);
-
- return;
}
static struct w1_family_ops w1_ds2760_fops = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 1a6937dc190b..4318935678c5 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -42,13 +42,13 @@ static u8 bad_roms[][9] = {
{}
};
-static ssize_t w1_therm_read_bin(struct kobject *, char *, loff_t, size_t);
+static ssize_t w1_therm_read_bin(struct kobject *, struct bin_attribute *,
+ char *, loff_t, size_t);
static struct bin_attribute w1_therm_bin_attr = {
.attr = {
.name = "w1_slave",
.mode = S_IRUGO,
- .owner = THIS_MODULE,
},
.size = W1_SLAVE_DATA_SIZE,
.read = w1_therm_read_bin,
@@ -159,7 +159,9 @@ static int w1_therm_check_rom(u8 rom[9])
return 0;
}
-static ssize_t w1_therm_read_bin(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t w1_therm_read_bin(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
struct w1_master *dev = sl->master;
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 7d6876dbcc96..8d7ab74170d5 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -105,7 +105,9 @@ static ssize_t w1_slave_read_name(struct device *dev, struct device_attribute *a
return sprintf(buf, "%s\n", sl->name);
}
-static ssize_t w1_slave_read_id(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t w1_slave_read_id(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -128,7 +130,6 @@ static struct bin_attribute w1_slave_attr_bin_id = {
.attr = {
.name = "id",
.mode = S_IRUGO,
- .owner = THIS_MODULE,
},
.size = 8,
.read = w1_slave_read_id,
@@ -136,7 +137,9 @@ static struct bin_attribute w1_slave_attr_bin_id = {
/* Default family */
-static ssize_t w1_default_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t w1_default_write(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -153,7 +156,9 @@ out_up:
return count;
}
-static ssize_t w1_default_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
+static ssize_t w1_default_read(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -167,7 +172,6 @@ static struct bin_attribute w1_default_attr = {
.attr = {
.name = "rw",
.mode = S_IRUGO | S_IWUSR,
- .owner = THIS_MODULE,
},
.size = PAGE_SIZE,
.read = w1_default_read,
@@ -516,7 +520,7 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
int err;
struct w1_netlink_msg msg;
- sl = kmalloc(sizeof(struct w1_slave), GFP_KERNEL);
+ sl = kzalloc(sizeof(struct w1_slave), GFP_KERNEL);
if (!sl) {
dev_err(&dev->dev,
"%s: failed to allocate new slave device.\n",
@@ -524,7 +528,6 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
return -ENOMEM;
}
- memset(sl, 0, sizeof(*sl));
sl->owner = THIS_MODULE;
sl->master = dev;
@@ -801,6 +804,7 @@ static int w1_control(void *data)
struct w1_master *dev, *n;
int have_to_wait = 0;
+ set_freezable();
while (!kthread_should_stop() || have_to_wait) {
have_to_wait = 0;
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 258defdb2efd..2fbd8dd16df5 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -41,7 +41,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
/*
* We are in process context(kernel thread), so can sleep.
*/
- dev = kmalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
printk(KERN_ERR
"Failed to allocate %zd bytes for new w1 device.\n",
@@ -49,7 +49,6 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
return NULL;
}
- memset(dev, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master));
dev->bus_master = (struct w1_bus_master *)(dev + 1);
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
new file mode 100644
index 000000000000..56592f0d6cef
--- /dev/null
+++ b/drivers/xen/Makefile
@@ -0,0 +1,2 @@
+obj-y += grant-table.o
+obj-y += xenbus/
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
new file mode 100644
index 000000000000..ea94dbabf9a9
--- /dev/null
+++ b/drivers/xen/grant-table.c
@@ -0,0 +1,582 @@
+/******************************************************************************
+ * grant_table.c
+ *
+ * Granting foreign access to our memory reservation.
+ *
+ * Copyright (c) 2005-2006, Christopher Clark
+ * Copyright (c) 2004-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+#include <xen/interface/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <asm/pgtable.h>
+#include <asm/sync_bitops.h>
+
+
+/* External tools reserve first few grant table entries. */
+#define NR_RESERVED_ENTRIES 8
+#define GNTTAB_LIST_END 0xffffffff
+#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+
+static grant_ref_t **gnttab_list;
+static unsigned int nr_grant_frames;
+static unsigned int boot_max_nr_grant_frames;
+static int gnttab_free_count;
+static grant_ref_t gnttab_free_head;
+static DEFINE_SPINLOCK(gnttab_list_lock);
+
+static struct grant_entry *shared;
+
+static struct gnttab_free_callback *gnttab_free_callback_list;
+
+static int gnttab_expand(unsigned int req_entries);
+
+#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+
+static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
+{
+ return &gnttab_list[(entry) / RPP][(entry) % RPP];
+}
+/* This can be used as an l-value */
+#define gnttab_entry(entry) (*__gnttab_entry(entry))
+
+static int get_free_entries(unsigned count)
+{
+ unsigned long flags;
+ int ref, rc;
+ grant_ref_t head;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+
+ if ((gnttab_free_count < count) &&
+ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ return rc;
+ }
+
+ ref = head = gnttab_free_head;
+ gnttab_free_count -= count;
+ while (count-- > 1)
+ head = gnttab_entry(head);
+ gnttab_free_head = gnttab_entry(head);
+ gnttab_entry(head) = GNTTAB_LIST_END;
+
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+
+ return ref;
+}
+
+static void do_free_callbacks(void)
+{
+ struct gnttab_free_callback *callback, *next;
+
+ callback = gnttab_free_callback_list;
+ gnttab_free_callback_list = NULL;
+
+ while (callback != NULL) {
+ next = callback->next;
+ if (gnttab_free_count >= callback->count) {
+ callback->next = NULL;
+ callback->fn(callback->arg);
+ } else {
+ callback->next = gnttab_free_callback_list;
+ gnttab_free_callback_list = callback;
+ }
+ callback = next;
+ }
+}
+
+static inline void check_free_callbacks(void)
+{
+ if (unlikely(gnttab_free_callback_list))
+ do_free_callbacks();
+}
+
+static void put_free_entry(grant_ref_t ref)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ gnttab_entry(ref) = gnttab_free_head;
+ gnttab_free_head = ref;
+ gnttab_free_count++;
+ check_free_callbacks();
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void update_grant_entry(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
+{
+ /*
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ */
+ shared[ref].frame = frame;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = flags;
+}
+
+/*
+ * Public grant-issuing interface functions
+ */
+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int readonly)
+{
+ update_grant_entry(ref, domid, frame,
+ GTF_permit_access | (readonly ? GTF_readonly : 0));
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
+
+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
+ int readonly)
+{
+ int ref;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+ u16 nflags;
+
+ nflags = shared[ref].flags;
+
+ return (nflags & (GTF_reading|GTF_writing));
+}
+EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ u16 flags, nflags;
+
+ nflags = shared[ref].flags;
+ do {
+ flags = nflags;
+ if (flags & (GTF_reading|GTF_writing)) {
+ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ return 0;
+ }
+ } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+
+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+ unsigned long page)
+{
+ if (gnttab_end_foreign_access_ref(ref, readonly)) {
+ put_free_entry(ref);
+ if (page != 0)
+ free_page(page);
+ } else {
+ /* XXX This needs to be fixed so that the ref and page are
+ placed on a list to be freed up later. */
+ printk(KERN_WARNING
+ "WARNING: leaking g.e. and page still in use!\n");
+ }
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
+
+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
+{
+ int ref;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
+
+void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
+ unsigned long pfn)
+{
+ update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = shared[ref].flags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = shared[ref].frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
+
+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
+{
+ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
+ put_free_entry(ref);
+ return frame;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
+
+void gnttab_free_grant_reference(grant_ref_t ref)
+{
+ put_free_entry(ref);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
+
+void gnttab_free_grant_references(grant_ref_t head)
+{
+ grant_ref_t ref;
+ unsigned long flags;
+ int count = 1;
+ if (head == GNTTAB_LIST_END)
+ return;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ ref = head;
+ while (gnttab_entry(ref) != GNTTAB_LIST_END) {
+ ref = gnttab_entry(ref);
+ count++;
+ }
+ gnttab_entry(ref) = gnttab_free_head;
+ gnttab_free_head = head;
+ gnttab_free_count += count;
+ check_free_callbacks();
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
+
+int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
+{
+ int h = get_free_entries(count);
+
+ if (h < 0)
+ return -ENOSPC;
+
+ *head = h;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
+
+int gnttab_empty_grant_references(const grant_ref_t *private_head)
+{
+ return (*private_head == GNTTAB_LIST_END);
+}
+EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
+
+int gnttab_claim_grant_reference(grant_ref_t *private_head)
+{
+ grant_ref_t g = *private_head;
+ if (unlikely(g == GNTTAB_LIST_END))
+ return -ENOSPC;
+ *private_head = gnttab_entry(g);
+ return g;
+}
+EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
+
+void gnttab_release_grant_reference(grant_ref_t *private_head,
+ grant_ref_t release)
+{
+ gnttab_entry(release) = *private_head;
+ *private_head = release;
+}
+EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
+
+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+ void (*fn)(void *), void *arg, u16 count)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ if (callback->next)
+ goto out;
+ callback->fn = fn;
+ callback->arg = arg;
+ callback->count = count;
+ callback->next = gnttab_free_callback_list;
+ gnttab_free_callback_list = callback;
+ check_free_callbacks();
+out:
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
+
+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
+{
+ struct gnttab_free_callback **pcb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
+ if (*pcb == callback) {
+ *pcb = callback->next;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
+
+static int grow_gnttab_list(unsigned int more_frames)
+{
+ unsigned int new_nr_grant_frames, extra_entries, i;
+
+ new_nr_grant_frames = nr_grant_frames + more_frames;
+ extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
+
+ for (i = nr_grant_frames; i < new_nr_grant_frames; i++) {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
+ if (!gnttab_list[i])
+ goto grow_nomem;
+ }
+
+
+ for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+ gnttab_entry(i) = i + 1;
+
+ gnttab_entry(i) = gnttab_free_head;
+ gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ gnttab_free_count += extra_entries;
+
+ nr_grant_frames = new_nr_grant_frames;
+
+ check_free_callbacks();
+
+ return 0;
+
+grow_nomem:
+ for ( ; i >= nr_grant_frames; i--)
+ free_page((unsigned long) gnttab_list[i]);
+ return -ENOMEM;
+}
+
+static unsigned int __max_nr_grant_frames(void)
+{
+ struct gnttab_query_size query;
+ int rc;
+
+ query.dom = DOMID_SELF;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
+ if ((rc < 0) || (query.status != GNTST_okay))
+ return 4; /* Legacy max supported number of frames */
+
+ return query.max_nr_frames;
+}
+
+static inline unsigned int max_nr_grant_frames(void)
+{
+ unsigned int xen_max = __max_nr_grant_frames();
+
+ if (xen_max > boot_max_nr_grant_frames)
+ return boot_max_nr_grant_frames;
+ return xen_max;
+}
+
+static int map_pte_fn(pte_t *pte, struct page *pmd_page,
+ unsigned long addr, void *data)
+{
+ unsigned long **frames = (unsigned long **)data;
+
+ set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
+ (*frames)++;
+ return 0;
+}
+
+static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
+ unsigned long addr, void *data)
+{
+
+ set_pte_at(&init_mm, addr, pte, __pte(0));
+ return 0;
+}
+
+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
+{
+ struct gnttab_setup_table setup;
+ unsigned long *frames;
+ unsigned int nr_gframes = end_idx + 1;
+ int rc;
+
+ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
+ if (!frames)
+ return -ENOMEM;
+
+ setup.dom = DOMID_SELF;
+ setup.nr_frames = nr_gframes;
+ setup.frame_list = frames;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
+ if (rc == -ENOSYS) {
+ kfree(frames);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || setup.status);
+
+ if (shared == NULL) {
+ struct vm_struct *area;
+ area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
+ BUG_ON(area == NULL);
+ shared = area->addr;
+ }
+ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
+ PAGE_SIZE * nr_gframes,
+ map_pte_fn, &frames);
+ BUG_ON(rc);
+ frames -= nr_gframes; /* adjust after map_pte_fn() */
+
+ kfree(frames);
+
+ return 0;
+}
+
+static int gnttab_resume(void)
+{
+ if (max_nr_grant_frames() < nr_grant_frames)
+ return -ENOSYS;
+ return gnttab_map(0, nr_grant_frames - 1);
+}
+
+static int gnttab_suspend(void)
+{
+ apply_to_page_range(&init_mm, (unsigned long)shared,
+ PAGE_SIZE * nr_grant_frames,
+ unmap_pte_fn, NULL);
+
+ return 0;
+}
+
+static int gnttab_expand(unsigned int req_entries)
+{
+ int rc;
+ unsigned int cur, extra;
+
+ cur = nr_grant_frames;
+ extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
+ GREFS_PER_GRANT_FRAME);
+ if (cur + extra > max_nr_grant_frames())
+ return -ENOSPC;
+
+ rc = gnttab_map(cur, cur + extra - 1);
+ if (rc == 0)
+ rc = grow_gnttab_list(extra);
+
+ return rc;
+}
+
+static int __devinit gnttab_init(void)
+{
+ int i;
+ unsigned int max_nr_glist_frames;
+ unsigned int nr_init_grefs;
+
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ nr_grant_frames = 1;
+ boot_max_nr_grant_frames = __max_nr_grant_frames();
+
+ /* Determine the maximum number of frames required for the
+ * grant reference free list on the current hypervisor.
+ */
+ max_nr_glist_frames = (boot_max_nr_grant_frames *
+ GREFS_PER_GRANT_FRAME /
+ (PAGE_SIZE / sizeof(grant_ref_t)));
+
+ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
+ GFP_KERNEL);
+ if (gnttab_list == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_grant_frames; i++) {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
+ if (gnttab_list[i] == NULL)
+ goto ini_nomem;
+ }
+
+ if (gnttab_resume() < 0)
+ return -ENODEV;
+
+ nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
+
+ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
+ gnttab_entry(i) = i + 1;
+
+ gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
+ gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
+ gnttab_free_head = NR_RESERVED_ENTRIES;
+
+ printk("Grant table initialized\n");
+ return 0;
+
+ ini_nomem:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)gnttab_list[i]);
+ kfree(gnttab_list);
+ return -ENOMEM;
+}
+
+core_initcall(gnttab_init);
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
new file mode 100644
index 000000000000..5571f5b84223
--- /dev/null
+++ b/drivers/xen/xenbus/Makefile
@@ -0,0 +1,7 @@
+obj-y += xenbus.o
+
+xenbus-objs =
+xenbus-objs += xenbus_client.o
+xenbus-objs += xenbus_comms.o
+xenbus-objs += xenbus_xs.o
+xenbus-objs += xenbus_probe.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
new file mode 100644
index 000000000000..9fd2f70ab46d
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -0,0 +1,569 @@
+/******************************************************************************
+ * Client-facing interface for the Xenbus driver. In other words, the
+ * interface between the Xenbus and the device-specific code, be it the
+ * frontend or the backend of that driver.
+ *
+ * Copyright (C) 2005 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+
+const char *xenbus_strstate(enum xenbus_state state)
+{
+ static const char *const name[] = {
+ [ XenbusStateUnknown ] = "Unknown",
+ [ XenbusStateInitialising ] = "Initialising",
+ [ XenbusStateInitWait ] = "InitWait",
+ [ XenbusStateInitialised ] = "Initialised",
+ [ XenbusStateConnected ] = "Connected",
+ [ XenbusStateClosing ] = "Closing",
+ [ XenbusStateClosed ] = "Closed",
+ };
+ return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
+}
+EXPORT_SYMBOL_GPL(xenbus_strstate);
+
+/**
+ * xenbus_watch_path - register a watch
+ * @dev: xenbus device
+ * @path: path to watch
+ * @watch: watch to register
+ * @callback: callback to register
+ *
+ * Register a @watch on the given path, using the given xenbus_watch structure
+ * for storage, and the given @callback function as the callback. Return 0 on
+ * success, or -errno on error. On success, the given @path will be saved as
+ * @watch->node, and remains the caller's to free. On error, @watch->node will
+ * be NULL, the device will switch to %XenbusStateClosing, and the error will
+ * be saved in the store.
+ */
+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
+ struct xenbus_watch *watch,
+ void (*callback)(struct xenbus_watch *,
+ const char **, unsigned int))
+{
+ int err;
+
+ watch->node = path;
+ watch->callback = callback;
+
+ err = register_xenbus_watch(watch);
+
+ if (err) {
+ watch->node = NULL;
+ watch->callback = NULL;
+ xenbus_dev_fatal(dev, err, "adding watch on %s", path);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_watch_path);
+
+
+/**
+ * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
+ * @dev: xenbus device
+ * @watch: watch to register
+ * @callback: callback to register
+ * @pathfmt: format of path to watch
+ *
+ * Register a watch on the given @path, using the given xenbus_watch
+ * structure for storage, and the given @callback function as the callback.
+ * Return 0 on success, or -errno on error. On success, the watched path
+ * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
+ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
+ * free, the device will switch to %XenbusStateClosing, and the error will be
+ * saved in the store.
+ */
+int xenbus_watch_pathfmt(struct xenbus_device *dev,
+ struct xenbus_watch *watch,
+ void (*callback)(struct xenbus_watch *,
+ const char **, unsigned int),
+ const char *pathfmt, ...)
+{
+ int err;
+ va_list ap;
+ char *path;
+
+ va_start(ap, pathfmt);
+ path = kvasprintf(GFP_KERNEL, pathfmt, ap);
+ va_end(ap);
+
+ if (!path) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
+ return -ENOMEM;
+ }
+ err = xenbus_watch_path(dev, path, watch, callback);
+
+ if (err)
+ kfree(path);
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
+
+
+/**
+ * xenbus_switch_state
+ * @dev: xenbus device
+ * @xbt: transaction handle
+ * @state: new state
+ *
+ * Advertise in the store a change of the given driver to the given new_state.
+ * Return 0 on success, or -errno on error. On error, the device will switch
+ * to XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
+{
+ /* We check whether the state is currently set to the given value, and
+ if not, then the state is set. We don't want to unconditionally
+ write the given state, because we don't want to fire watches
+ unnecessarily. Furthermore, if the node has gone, we don't write
+ to it, as the device will be tearing down, and we don't want to
+ resurrect that directory.
+
+ Note that, because of this cached value of our state, this function
+ will not work inside a Xenstore transaction (something it was
+ trying to in the past) because dev->state would not get reset if
+ the transaction was aborted.
+
+ */
+
+ int current_state;
+ int err;
+
+ if (state == dev->state)
+ return 0;
+
+ err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
+ &current_state);
+ if (err != 1)
+ return 0;
+
+ err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
+ if (err) {
+ if (state != XenbusStateClosing) /* Avoid looping */
+ xenbus_dev_fatal(dev, err, "writing new state");
+ return err;
+ }
+
+ dev->state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_switch_state);
+
+int xenbus_frontend_closed(struct xenbus_device *dev)
+{
+ xenbus_switch_state(dev, XenbusStateClosed);
+ complete(&dev->down);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
+
+/**
+ * Return the path to the error node for the given device, or NULL on failure.
+ * If the value returned is non-NULL, then it is the caller's to kfree.
+ */
+static char *error_path(struct xenbus_device *dev)
+{
+ return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
+}
+
+
+static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
+ const char *fmt, va_list ap)
+{
+ int ret;
+ unsigned int len;
+ char *printf_buffer = NULL;
+ char *path_buffer = NULL;
+
+#define PRINTF_BUFFER_SIZE 4096
+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+ if (printf_buffer == NULL)
+ goto fail;
+
+ len = sprintf(printf_buffer, "%i ", -err);
+ ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
+
+ BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
+
+ dev_err(&dev->dev, "%s\n", printf_buffer);
+
+ path_buffer = error_path(dev);
+
+ if (path_buffer == NULL) {
+ dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
+ dev->nodename, printf_buffer);
+ goto fail;
+ }
+
+ if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
+ dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
+ dev->nodename, printf_buffer);
+ goto fail;
+ }
+
+fail:
+ kfree(printf_buffer);
+ kfree(path_buffer);
+}
+
+
+/**
+ * xenbus_dev_error
+ * @dev: xenbus device
+ * @err: error to report
+ * @fmt: error message format
+ *
+ * Report the given negative errno into the store, along with the given
+ * formatted message.
+ */
+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xenbus_va_dev_error(dev, err, fmt, ap);
+ va_end(ap);
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_error);
+
+/**
+ * xenbus_dev_fatal
+ * @dev: xenbus device
+ * @err: error to report
+ * @fmt: error message format
+ *
+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
+ * closedown of this driver and its peer.
+ */
+
+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xenbus_va_dev_error(dev, err, fmt, ap);
+ va_end(ap);
+
+ xenbus_switch_state(dev, XenbusStateClosing);
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
+
+/**
+ * xenbus_grant_ring
+ * @dev: xenbus device
+ * @ring_mfn: mfn of ring to grant
+
+ * Grant access to the given @ring_mfn to the peer of the given device. Return
+ * 0 on success, or -errno on error. On error, the device will switch to
+ * XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
+{
+ int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
+ if (err < 0)
+ xenbus_dev_fatal(dev, err, "granting access to ring page");
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+
+
+/**
+ * Allocate an event channel for the given xenbus_device, assigning the newly
+ * created local port to *port. Return 0 on success, or -errno on error. On
+ * error, the device will switch to XenbusStateClosing, and the error will be
+ * saved in the store.
+ */
+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
+{
+ struct evtchn_alloc_unbound alloc_unbound;
+ int err;
+
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = dev->otherend_id;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (err)
+ xenbus_dev_fatal(dev, err, "allocating event channel");
+ else
+ *port = alloc_unbound.port;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
+
+
+/**
+ * Bind to an existing interdomain event channel in another domain. Returns 0
+ * on success and stores the local port in *port. On error, returns -errno,
+ * switches the device to XenbusStateClosing, and saves the error in XenStore.
+ */
+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
+{
+ struct evtchn_bind_interdomain bind_interdomain;
+ int err;
+
+ bind_interdomain.remote_dom = dev->otherend_id;
+ bind_interdomain.remote_port = remote_port;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
+ if (err)
+ xenbus_dev_fatal(dev, err,
+ "binding to event channel %d from domain %d",
+ remote_port, dev->otherend_id);
+ else
+ *port = bind_interdomain.local_port;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
+
+
+/**
+ * Free an existing event channel. Returns 0 on success or -errno on error.
+ */
+int xenbus_free_evtchn(struct xenbus_device *dev, int port)
+{
+ struct evtchn_close close;
+ int err;
+
+ close.port = port;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+ if (err)
+ xenbus_dev_error(dev, err, "freeing event channel %d", port);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
+
+
+/**
+ * xenbus_map_ring_valloc
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @vaddr: pointer to address to be filled out by mapping
+ *
+ * Based on Rusty Russell's skeleton driver's map_page.
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
+ * page to that address, and sets *vaddr to that address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+{
+ struct gnttab_map_grant_ref op = {
+ .flags = GNTMAP_host_map,
+ .ref = gnt_ref,
+ .dom = dev->otherend_id,
+ };
+ struct vm_struct *area;
+
+ *vaddr = NULL;
+
+ area = alloc_vm_area(PAGE_SIZE);
+ if (!area)
+ return -ENOMEM;
+
+ op.host_addr = (unsigned long)area->addr;
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status != GNTST_okay) {
+ free_vm_area(area);
+ xenbus_dev_fatal(dev, op.status,
+ "mapping in shared page %d from domain %d",
+ gnt_ref, dev->otherend_id);
+ return op.status;
+ }
+
+ /* Stuff the handle in an unused field */
+ area->phys_addr = (unsigned long)op.handle;
+
+ *vaddr = area->addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+
+/**
+ * xenbus_map_ring
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @handle: pointer to grant handle to be filled
+ * @vaddr: address to be mapped to
+ *
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring does not allocate the virtual address space (you must do
+ * this yourself!). It only maps in the page to the specified address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
+ grant_handle_t *handle, void *vaddr)
+{
+ struct gnttab_map_grant_ref op = {
+ .host_addr = (unsigned long)vaddr,
+ .flags = GNTMAP_host_map,
+ .ref = gnt_ref,
+ .dom = dev->otherend_id,
+ };
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status != GNTST_okay) {
+ xenbus_dev_fatal(dev, op.status,
+ "mapping in shared page %d from domain %d",
+ gnt_ref, dev->otherend_id);
+ } else
+ *handle = op.handle;
+
+ return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring);
+
+
+/**
+ * xenbus_unmap_ring_vfree
+ * @dev: xenbus device
+ * @vaddr: addr to unmap
+ *
+ * Based on Rusty Russell's skeleton driver's unmap_page.
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
+ * xenbus_map_ring_valloc (it will free the virtual address space).
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
+{
+ struct vm_struct *area;
+ struct gnttab_unmap_grant_ref op = {
+ .host_addr = (unsigned long)vaddr,
+ };
+
+ /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
+ * method so that we don't have to muck with vmalloc internals here.
+ * We could force the user to hang on to their struct vm_struct from
+ * xenbus_map_ring_valloc, but these 6 lines considerably simplify
+ * this API.
+ */
+ read_lock(&vmlist_lock);
+ for (area = vmlist; area != NULL; area = area->next) {
+ if (area->addr == vaddr)
+ break;
+ }
+ read_unlock(&vmlist_lock);
+
+ if (!area) {
+ xenbus_dev_error(dev, -ENOENT,
+ "can't find mapped virtual address %p", vaddr);
+ return GNTST_bad_virt_addr;
+ }
+
+ op.handle = (grant_handle_t)area->phys_addr;
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status == GNTST_okay)
+ free_vm_area(area);
+ else
+ xenbus_dev_error(dev, op.status,
+ "unmapping page at handle %d error %d",
+ (int16_t)area->phys_addr, op.status);
+
+ return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+
+/**
+ * xenbus_unmap_ring
+ * @dev: xenbus device
+ * @handle: grant handle
+ * @vaddr: addr to unmap
+ *
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring(struct xenbus_device *dev,
+ grant_handle_t handle, void *vaddr)
+{
+ struct gnttab_unmap_grant_ref op = {
+ .host_addr = (unsigned long)vaddr,
+ .handle = handle,
+ };
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status != GNTST_okay)
+ xenbus_dev_error(dev, op.status,
+ "unmapping page at handle %d error %d",
+ handle, op.status);
+
+ return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
+
+
+/**
+ * xenbus_read_driver_state
+ * @path: path for driver
+ *
+ * Return the state of the driver rooted at the given store path, or
+ * XenbusStateUnknown if no state can be read.
+ */
+enum xenbus_state xenbus_read_driver_state(const char *path)
+{
+ enum xenbus_state result;
+ int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
+ if (err)
+ result = XenbusStateUnknown;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
new file mode 100644
index 000000000000..6efbe3f29ca5
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -0,0 +1,233 @@
+/******************************************************************************
+ * xenbus_comms.c
+ *
+ * Low level code to talks to Xen Store: ringbuffer and event channel.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <xen/xenbus.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include "xenbus_comms.h"
+
+static int xenbus_irq;
+
+static DECLARE_WORK(probe_work, xenbus_probe);
+
+static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
+
+static irqreturn_t wake_waiting(int irq, void *unused)
+{
+ if (unlikely(xenstored_ready == 0)) {
+ xenstored_ready = 1;
+ schedule_work(&probe_work);
+ }
+
+ wake_up(&xb_waitq);
+ return IRQ_HANDLED;
+}
+
+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+ return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+static void *get_output_chunk(XENSTORE_RING_IDX cons,
+ XENSTORE_RING_IDX prod,
+ char *buf, uint32_t *len)
+{
+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+ *len = XENSTORE_RING_SIZE - (prod - cons);
+ return buf + MASK_XENSTORE_IDX(prod);
+}
+
+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
+ XENSTORE_RING_IDX prod,
+ const char *buf, uint32_t *len)
+{
+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+ if ((prod - cons) < *len)
+ *len = prod - cons;
+ return buf + MASK_XENSTORE_IDX(cons);
+}
+
+/**
+ * xb_write - low level write
+ * @data: buffer to send
+ * @len: length of buffer
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int xb_write(const void *data, unsigned len)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ XENSTORE_RING_IDX cons, prod;
+ int rc;
+
+ while (len != 0) {
+ void *dst;
+ unsigned int avail;
+
+ rc = wait_event_interruptible(
+ xb_waitq,
+ (intf->req_prod - intf->req_cons) !=
+ XENSTORE_RING_SIZE);
+ if (rc < 0)
+ return rc;
+
+ /* Read indexes, then verify. */
+ cons = intf->req_cons;
+ prod = intf->req_prod;
+ if (!check_indexes(cons, prod)) {
+ intf->req_cons = intf->req_prod = 0;
+ return -EIO;
+ }
+
+ dst = get_output_chunk(cons, prod, intf->req, &avail);
+ if (avail == 0)
+ continue;
+ if (avail > len)
+ avail = len;
+
+ /* Must write data /after/ reading the consumer index. */
+ mb();
+
+ memcpy(dst, data, avail);
+ data += avail;
+ len -= avail;
+
+ /* Other side must not see new producer until data is there. */
+ wmb();
+ intf->req_prod += avail;
+
+ /* Implies mb(): other side will see the updated producer. */
+ notify_remote_via_evtchn(xen_store_evtchn);
+ }
+
+ return 0;
+}
+
+int xb_data_to_read(void)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ return (intf->rsp_cons != intf->rsp_prod);
+}
+
+int xb_wait_for_data_to_read(void)
+{
+ return wait_event_interruptible(xb_waitq, xb_data_to_read());
+}
+
+int xb_read(void *data, unsigned len)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ XENSTORE_RING_IDX cons, prod;
+ int rc;
+
+ while (len != 0) {
+ unsigned int avail;
+ const char *src;
+
+ rc = xb_wait_for_data_to_read();
+ if (rc < 0)
+ return rc;
+
+ /* Read indexes, then verify. */
+ cons = intf->rsp_cons;
+ prod = intf->rsp_prod;
+ if (!check_indexes(cons, prod)) {
+ intf->rsp_cons = intf->rsp_prod = 0;
+ return -EIO;
+ }
+
+ src = get_input_chunk(cons, prod, intf->rsp, &avail);
+ if (avail == 0)
+ continue;
+ if (avail > len)
+ avail = len;
+
+ /* Must read data /after/ reading the producer index. */
+ rmb();
+
+ memcpy(data, src, avail);
+ data += avail;
+ len -= avail;
+
+ /* Other side must not see free space until we've copied out */
+ mb();
+ intf->rsp_cons += avail;
+
+ pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
+
+ /* Implies mb(): other side will see the updated consumer. */
+ notify_remote_via_evtchn(xen_store_evtchn);
+ }
+
+ return 0;
+}
+
+/**
+ * xb_init_comms - Set up interrupt handler off store event channel.
+ */
+int xb_init_comms(void)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ int err;
+
+ if (intf->req_prod != intf->req_cons)
+ printk(KERN_ERR "XENBUS request ring is not quiescent "
+ "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
+
+ if (intf->rsp_prod != intf->rsp_cons) {
+ printk(KERN_WARNING "XENBUS response ring is not quiescent "
+ "(%08x:%08x): fixing up\n",
+ intf->rsp_cons, intf->rsp_prod);
+ intf->rsp_cons = intf->rsp_prod;
+ }
+
+ if (xenbus_irq)
+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+
+ err = bind_evtchn_to_irqhandler(
+ xen_store_evtchn, wake_waiting,
+ 0, "xenbus", &xb_waitq);
+ if (err <= 0) {
+ printk(KERN_ERR "XENBUS request irq failed %i\n", err);
+ return err;
+ }
+
+ xenbus_irq = err;
+
+ return 0;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
new file mode 100644
index 000000000000..c21db7513736
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -0,0 +1,46 @@
+/*
+ * Private include for xenbus communications.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_COMMS_H
+#define _XENBUS_COMMS_H
+
+int xs_init(void);
+int xb_init_comms(void);
+
+/* Low level routines. */
+int xb_write(const void *data, unsigned len);
+int xb_read(void *data, unsigned len);
+int xb_data_to_read(void);
+int xb_wait_for_data_to_read(void);
+int xs_input_avail(void);
+extern struct xenstore_domain_interface *xen_store_interface;
+extern int xen_store_evtchn;
+
+#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
new file mode 100644
index 000000000000..0b769f7c4a48
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -0,0 +1,935 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...) \
+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
+ __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+int xen_store_evtchn;
+struct xenstore_domain_interface *xen_store_interface;
+static unsigned long xen_store_mfn;
+
+static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
+
+static void wait_for_devices(struct xenbus_driver *xendrv);
+
+static int xenbus_probe_frontend(const char *type, const char *name);
+
+static void xenbus_dev_shutdown(struct device *_dev);
+
+/* If something in array of ids matches this device, return it. */
+static const struct xenbus_device_id *
+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
+{
+ for (; *arr->devicetype != '\0'; arr++) {
+ if (!strcmp(arr->devicetype, dev->devicetype))
+ return arr;
+ }
+ return NULL;
+}
+
+int xenbus_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct xenbus_driver *drv = to_xenbus_driver(_drv);
+
+ if (!drv->ids)
+ return 0;
+
+ return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
+}
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
+{
+ nodename = strchr(nodename, '/');
+ if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
+ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+ return -EINVAL;
+ }
+
+ strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
+ if (!strchr(bus_id, '/')) {
+ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+ return -EINVAL;
+ }
+ *strchr(bus_id, '/') = '-';
+ return 0;
+}
+
+
+static void free_otherend_details(struct xenbus_device *dev)
+{
+ kfree(dev->otherend);
+ dev->otherend = NULL;
+}
+
+
+static void free_otherend_watch(struct xenbus_device *dev)
+{
+ if (dev->otherend_watch.node) {
+ unregister_xenbus_watch(&dev->otherend_watch);
+ kfree(dev->otherend_watch.node);
+ dev->otherend_watch.node = NULL;
+ }
+}
+
+
+int read_otherend_details(struct xenbus_device *xendev,
+ char *id_node, char *path_node)
+{
+ int err = xenbus_gather(XBT_NIL, xendev->nodename,
+ id_node, "%i", &xendev->otherend_id,
+ path_node, NULL, &xendev->otherend,
+ NULL);
+ if (err) {
+ xenbus_dev_fatal(xendev, err,
+ "reading other end details from %s",
+ xendev->nodename);
+ return err;
+ }
+ if (strlen(xendev->otherend) == 0 ||
+ !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
+ xenbus_dev_fatal(xendev, -ENOENT,
+ "unable to read other end from %s. "
+ "missing or inaccessible.",
+ xendev->nodename);
+ free_otherend_details(xendev);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+ return read_otherend_details(xendev, "backend-id", "backend");
+}
+
+
+/* Bus type for frontend drivers. */
+static struct xen_bus_type xenbus_frontend = {
+ .root = "device",
+ .levels = 2, /* device/type/<id> */
+ .get_bus_id = frontend_bus_id,
+ .probe = xenbus_probe_frontend,
+ .bus = {
+ .name = "xen",
+ .match = xenbus_match,
+ .probe = xenbus_dev_probe,
+ .remove = xenbus_dev_remove,
+ .shutdown = xenbus_dev_shutdown,
+ },
+};
+
+static void otherend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ struct xenbus_device *dev =
+ container_of(watch, struct xenbus_device, otherend_watch);
+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+ enum xenbus_state state;
+
+ /* Protect us against watches firing on old details when the otherend
+ details change, say immediately after a resume. */
+ if (!dev->otherend ||
+ strncmp(dev->otherend, vec[XS_WATCH_PATH],
+ strlen(dev->otherend))) {
+ dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]);
+ return;
+ }
+
+ state = xenbus_read_driver_state(dev->otherend);
+
+ dev_dbg(&dev->dev, "state is %d, (%s), %s, %s",
+ state, xenbus_strstate(state), dev->otherend_watch.node,
+ vec[XS_WATCH_PATH]);
+
+ /*
+ * Ignore xenbus transitions during shutdown. This prevents us doing
+ * work that can fail e.g., when the rootfs is gone.
+ */
+ if (system_state > SYSTEM_RUNNING) {
+ struct xen_bus_type *bus = bus;
+ bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
+ /* If we're frontend, drive the state machine to Closed. */
+ /* This should cause the backend to release our resources. */
+ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+ xenbus_frontend_closed(dev);
+ return;
+ }
+
+ if (drv->otherend_changed)
+ drv->otherend_changed(dev, state);
+}
+
+
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+ free_otherend_watch(dev);
+ free_otherend_details(dev);
+
+ return drv->read_otherend_details(dev);
+}
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+ return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
+ "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_dev_probe(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
+ const struct xenbus_device_id *id;
+ int err;
+
+ DPRINTK("%s", dev->nodename);
+
+ if (!drv->probe) {
+ err = -ENODEV;
+ goto fail;
+ }
+
+ id = match_device(drv->ids, dev);
+ if (!id) {
+ err = -ENODEV;
+ goto fail;
+ }
+
+ err = talk_to_otherend(dev);
+ if (err) {
+ dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
+ dev->nodename);
+ return err;
+ }
+
+ err = drv->probe(dev, id);
+ if (err)
+ goto fail;
+
+ err = watch_otherend(dev);
+ if (err) {
+ dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
+ dev->nodename);
+ return err;
+ }
+
+ return 0;
+fail:
+ xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
+ xenbus_switch_state(dev, XenbusStateClosed);
+ return -ENODEV;
+}
+
+int xenbus_dev_remove(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
+
+ DPRINTK("%s", dev->nodename);
+
+ free_otherend_watch(dev);
+ free_otherend_details(dev);
+
+ if (drv->remove)
+ drv->remove(dev);
+
+ xenbus_switch_state(dev, XenbusStateClosed);
+ return 0;
+}
+
+static void xenbus_dev_shutdown(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ unsigned long timeout = 5*HZ;
+
+ DPRINTK("%s", dev->nodename);
+
+ get_device(&dev->dev);
+ if (dev->state != XenbusStateConnected) {
+ printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
+ dev->nodename, xenbus_strstate(dev->state));
+ goto out;
+ }
+ xenbus_switch_state(dev, XenbusStateClosing);
+ timeout = wait_for_completion_timeout(&dev->down, timeout);
+ if (!timeout)
+ printk(KERN_INFO "%s: %s timeout closing device\n",
+ __func__, dev->nodename);
+ out:
+ put_device(&dev->dev);
+}
+
+int xenbus_register_driver_common(struct xenbus_driver *drv,
+ struct xen_bus_type *bus,
+ struct module *owner,
+ const char *mod_name)
+{
+ drv->driver.name = drv->name;
+ drv->driver.bus = &bus->bus;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+
+ return driver_register(&drv->driver);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner, const char *mod_name)
+{
+ int ret;
+
+ drv->read_otherend_details = read_backend_details;
+
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+ owner, mod_name);
+ if (ret)
+ return ret;
+
+ /* If this driver is loaded as a module wait for devices to attach. */
+ wait_for_devices(drv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+void xenbus_unregister_driver(struct xenbus_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
+
+struct xb_find_info
+{
+ struct xenbus_device *dev;
+ const char *nodename;
+};
+
+static int cmp_dev(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct xb_find_info *info = data;
+
+ if (!strcmp(xendev->nodename, info->nodename)) {
+ info->dev = xendev;
+ get_device(dev);
+ return 1;
+ }
+ return 0;
+}
+
+struct xenbus_device *xenbus_device_find(const char *nodename,
+ struct bus_type *bus)
+{
+ struct xb_find_info info = { .dev = NULL, .nodename = nodename };
+
+ bus_for_each_dev(bus, NULL, &info, cmp_dev);
+ return info.dev;
+}
+
+static int cleanup_dev(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct xb_find_info *info = data;
+ int len = strlen(info->nodename);
+
+ DPRINTK("%s", info->nodename);
+
+ /* Match the info->nodename path, or any subdirectory of that path. */
+ if (strncmp(xendev->nodename, info->nodename, len))
+ return 0;
+
+ /* If the node name is longer, ensure it really is a subdirectory. */
+ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
+ return 0;
+
+ info->dev = xendev;
+ get_device(dev);
+ return 1;
+}
+
+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
+{
+ struct xb_find_info info = { .nodename = path };
+
+ do {
+ info.dev = NULL;
+ bus_for_each_dev(bus, NULL, &info, cleanup_dev);
+ if (info.dev) {
+ device_unregister(&info.dev->dev);
+ put_device(&info.dev->dev);
+ }
+ } while (info.dev);
+}
+
+static void xenbus_dev_release(struct device *dev)
+{
+ if (dev)
+ kfree(to_xenbus_device(dev));
+}
+
+static ssize_t xendev_show_nodename(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
+}
+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
+
+static ssize_t xendev_show_devtype(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
+}
+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
+
+
+int xenbus_probe_node(struct xen_bus_type *bus,
+ const char *type,
+ const char *nodename)
+{
+ int err;
+ struct xenbus_device *xendev;
+ size_t stringlen;
+ char *tmpstring;
+
+ enum xenbus_state state = xenbus_read_driver_state(nodename);
+
+ if (state != XenbusStateInitialising) {
+ /* Device is not new, so ignore it. This can happen if a
+ device is going away after switching to Closed. */
+ return 0;
+ }
+
+ stringlen = strlen(nodename) + 1 + strlen(type) + 1;
+ xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
+ if (!xendev)
+ return -ENOMEM;
+
+ xendev->state = XenbusStateInitialising;
+
+ /* Copy the strings into the extra space. */
+
+ tmpstring = (char *)(xendev + 1);
+ strcpy(tmpstring, nodename);
+ xendev->nodename = tmpstring;
+
+ tmpstring += strlen(tmpstring) + 1;
+ strcpy(tmpstring, type);
+ xendev->devicetype = tmpstring;
+ init_completion(&xendev->down);
+
+ xendev->dev.bus = &bus->bus;
+ xendev->dev.release = xenbus_dev_release;
+
+ err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
+ if (err)
+ goto fail;
+
+ /* Register with generic device framework. */
+ err = device_register(&xendev->dev);
+ if (err)
+ goto fail;
+
+ err = device_create_file(&xendev->dev, &dev_attr_nodename);
+ if (err)
+ goto fail_unregister;
+
+ err = device_create_file(&xendev->dev, &dev_attr_devtype);
+ if (err)
+ goto fail_remove_file;
+
+ return 0;
+fail_remove_file:
+ device_remove_file(&xendev->dev, &dev_attr_nodename);
+fail_unregister:
+ device_unregister(&xendev->dev);
+fail:
+ kfree(xendev);
+ return err;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(const char *type, const char *name)
+{
+ char *nodename;
+ int err;
+
+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
+ xenbus_frontend.root, type, name);
+ if (!nodename)
+ return -ENOMEM;
+
+ DPRINTK("%s", nodename);
+
+ err = xenbus_probe_node(&xenbus_frontend, type, nodename);
+ kfree(nodename);
+ return err;
+}
+
+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
+{
+ int err = 0;
+ char **dir;
+ unsigned int dir_n = 0;
+ int i;
+
+ dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ for (i = 0; i < dir_n; i++) {
+ err = bus->probe(type, dir[i]);
+ if (err)
+ break;
+ }
+ kfree(dir);
+ return err;
+}
+
+int xenbus_probe_devices(struct xen_bus_type *bus)
+{
+ int err = 0;
+ char **dir;
+ unsigned int i, dir_n;
+
+ dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ for (i = 0; i < dir_n; i++) {
+ err = xenbus_probe_device_type(bus, dir[i]);
+ if (err)
+ break;
+ }
+ kfree(dir);
+ return err;
+}
+
+static unsigned int char_count(const char *str, char c)
+{
+ unsigned int i, ret = 0;
+
+ for (i = 0; str[i]; i++)
+ if (str[i] == c)
+ ret++;
+ return ret;
+}
+
+static int strsep_len(const char *str, char c, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; str[i]; i++)
+ if (str[i] == c) {
+ if (len == 0)
+ return i;
+ len--;
+ }
+ return (len == 0) ? i : -ERANGE;
+}
+
+void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
+{
+ int exists, rootlen;
+ struct xenbus_device *dev;
+ char type[BUS_ID_SIZE];
+ const char *p, *root;
+
+ if (char_count(node, '/') < 2)
+ return;
+
+ exists = xenbus_exists(XBT_NIL, node, "");
+ if (!exists) {
+ xenbus_cleanup_devices(node, &bus->bus);
+ return;
+ }
+
+ /* backend/<type>/... or device/<type>/... */
+ p = strchr(node, '/') + 1;
+ snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
+ type[BUS_ID_SIZE-1] = '\0';
+
+ rootlen = strsep_len(node, '/', bus->levels);
+ if (rootlen < 0)
+ return;
+ root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
+ if (!root)
+ return;
+
+ dev = xenbus_device_find(root, &bus->bus);
+ if (!dev)
+ xenbus_probe_node(bus, type, root);
+ else
+ put_device(&dev->dev);
+
+ kfree(root);
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ DPRINTK("");
+
+ xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+ .node = "device",
+ .callback = frontend_changed,
+};
+
+static int suspend_dev(struct device *dev, void *data)
+{
+ int err = 0;
+ struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ DPRINTK("");
+
+ if (dev->driver == NULL)
+ return 0;
+ drv = to_xenbus_driver(dev->driver);
+ xdev = container_of(dev, struct xenbus_device, dev);
+ if (drv->suspend)
+ err = drv->suspend(xdev);
+ if (err)
+ printk(KERN_WARNING
+ "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
+ return 0;
+}
+
+static int suspend_cancel_dev(struct device *dev, void *data)
+{
+ int err = 0;
+ struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ DPRINTK("");
+
+ if (dev->driver == NULL)
+ return 0;
+ drv = to_xenbus_driver(dev->driver);
+ xdev = container_of(dev, struct xenbus_device, dev);
+ if (drv->suspend_cancel)
+ err = drv->suspend_cancel(xdev);
+ if (err)
+ printk(KERN_WARNING
+ "xenbus: suspend_cancel %s failed: %i\n",
+ dev->bus_id, err);
+ return 0;
+}
+
+static int resume_dev(struct device *dev, void *data)
+{
+ int err;
+ struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ DPRINTK("");
+
+ if (dev->driver == NULL)
+ return 0;
+
+ drv = to_xenbus_driver(dev->driver);
+ xdev = container_of(dev, struct xenbus_device, dev);
+
+ err = talk_to_otherend(xdev);
+ if (err) {
+ printk(KERN_WARNING
+ "xenbus: resume (talk_to_otherend) %s failed: %i\n",
+ dev->bus_id, err);
+ return err;
+ }
+
+ xdev->state = XenbusStateInitialising;
+
+ if (drv->resume) {
+ err = drv->resume(xdev);
+ if (err) {
+ printk(KERN_WARNING
+ "xenbus: resume %s failed: %i\n",
+ dev->bus_id, err);
+ return err;
+ }
+ }
+
+ err = watch_otherend(xdev);
+ if (err) {
+ printk(KERN_WARNING
+ "xenbus_probe: resume (watch_otherend) %s failed: "
+ "%d.\n", dev->bus_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+void xenbus_suspend(void)
+{
+ DPRINTK("");
+
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
+ xenbus_backend_suspend(suspend_dev);
+ xs_suspend();
+}
+EXPORT_SYMBOL_GPL(xenbus_suspend);
+
+void xenbus_resume(void)
+{
+ xb_init_comms();
+ xs_resume();
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
+ xenbus_backend_resume(resume_dev);
+}
+EXPORT_SYMBOL_GPL(xenbus_resume);
+
+void xenbus_suspend_cancel(void)
+{
+ xs_suspend_cancel();
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
+ xenbus_backend_resume(suspend_cancel_dev);
+}
+EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
+
+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
+int xenstored_ready = 0;
+
+
+int register_xenstore_notifier(struct notifier_block *nb)
+{
+ int ret = 0;
+
+ if (xenstored_ready > 0)
+ ret = nb->notifier_call(nb, 0, NULL);
+ else
+ blocking_notifier_chain_register(&xenstore_chain, nb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_xenstore_notifier);
+
+void unregister_xenstore_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&xenstore_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
+
+void xenbus_probe(struct work_struct *unused)
+{
+ BUG_ON((xenstored_ready <= 0));
+
+ /* Enumerate devices in xenstore and watch for changes. */
+ xenbus_probe_devices(&xenbus_frontend);
+ register_xenbus_watch(&fe_watch);
+ xenbus_backend_probe_and_watch();
+
+ /* Notify others that xenstore is up */
+ blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
+}
+
+static int __init xenbus_probe_init(void)
+{
+ int err = 0;
+
+ DPRINTK("");
+
+ err = -ENODEV;
+ if (!is_running_on_xen())
+ goto out_error;
+
+ /* Register ourselves with the kernel bus subsystem */
+ err = bus_register(&xenbus_frontend.bus);
+ if (err)
+ goto out_error;
+
+ err = xenbus_backend_bus_register();
+ if (err)
+ goto out_unreg_front;
+
+ /*
+ * Domain0 doesn't have a store_evtchn or store_mfn yet.
+ */
+ if (is_initial_xendomain()) {
+ /* dom0 not yet supported */
+ } else {
+ xenstored_ready = 1;
+ xen_store_evtchn = xen_start_info->store_evtchn;
+ xen_store_mfn = xen_start_info->store_mfn;
+ }
+ xen_store_interface = mfn_to_virt(xen_store_mfn);
+
+ /* Initialize the interface to xenstore. */
+ err = xs_init();
+ if (err) {
+ printk(KERN_WARNING
+ "XENBUS: Error initializing xenstore comms: %i\n", err);
+ goto out_unreg_back;
+ }
+
+ if (!is_initial_xendomain())
+ xenbus_probe(NULL);
+
+ return 0;
+
+ out_unreg_back:
+ xenbus_backend_bus_unregister();
+
+ out_unreg_front:
+ bus_unregister(&xenbus_frontend.bus);
+
+ out_error:
+ return err;
+}
+
+postcore_initcall(xenbus_probe_init);
+
+MODULE_LICENSE("GPL");
+
+static int is_disconnected_device(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+
+ /*
+ * A device with no driver will never connect. We care only about
+ * devices which should currently be in the process of connecting.
+ */
+ if (!dev->driver)
+ return 0;
+
+ /* Is this search limited to a particular driver? */
+ if (drv && (dev->driver != drv))
+ return 0;
+
+ return (xendev->state != XenbusStateConnected);
+}
+
+static int exists_disconnected_device(struct device_driver *drv)
+{
+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ is_disconnected_device);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+
+ /* Is this operation limited to a particular driver? */
+ if (drv && (dev->driver != drv))
+ return 0;
+
+ if (!dev->driver) {
+ /* Information only: is this too noisy? */
+ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+ xendev->nodename);
+ } else if (xendev->state != XenbusStateConnected) {
+ printk(KERN_WARNING "XENBUS: Timeout connecting "
+ "to device: %s (state %d)\n",
+ xendev->nodename, xendev->state);
+ }
+
+ return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 10 second timeout, wait for all devices currently configured. We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+ unsigned long timeout = jiffies + 10*HZ;
+ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+
+ if (!ready_to_wait_for_devices || !is_running_on_xen())
+ return;
+
+ while (exists_disconnected_device(drv)) {
+ if (time_after(jiffies, timeout))
+ break;
+ schedule_timeout_interruptible(HZ/10);
+ }
+
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ print_device_status);
+}
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+ ready_to_wait_for_devices = 1;
+ wait_for_devices(NULL);
+ return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
new file mode 100644
index 000000000000..e09b19415a40
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * xenbus_probe.h
+ *
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 XenSource Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_PROBE_H
+#define _XENBUS_PROBE_H
+
+#ifdef CONFIG_XEN_BACKEND
+extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
+extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
+extern void xenbus_backend_probe_and_watch(void);
+extern int xenbus_backend_bus_register(void);
+extern void xenbus_backend_bus_unregister(void);
+#else
+static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
+static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
+static inline void xenbus_backend_probe_and_watch(void) {}
+static inline int xenbus_backend_bus_register(void) { return 0; }
+static inline void xenbus_backend_bus_unregister(void) {}
+#endif
+
+struct xen_bus_type
+{
+ char *root;
+ unsigned int levels;
+ int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
+ int (*probe)(const char *type, const char *dir);
+ struct bus_type bus;
+};
+
+extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
+extern int xenbus_dev_probe(struct device *_dev);
+extern int xenbus_dev_remove(struct device *_dev);
+extern int xenbus_register_driver_common(struct xenbus_driver *drv,
+ struct xen_bus_type *bus,
+ struct module *owner,
+ const char *mod_name);
+extern int xenbus_probe_node(struct xen_bus_type *bus,
+ const char *type,
+ const char *nodename);
+extern int xenbus_probe_devices(struct xen_bus_type *bus);
+
+extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+
+#endif
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
new file mode 100644
index 000000000000..9e943fbce81b
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -0,0 +1,861 @@
+/******************************************************************************
+ * xenbus_xs.c
+ *
+ * This is the kernel equivalent of the "xs" library. We don't need everything
+ * and we use xenbus_comms for communication.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/unistd.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/kthread.h>
+#include <linux/rwsem.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <xen/xenbus.h>
+#include "xenbus_comms.h"
+
+struct xs_stored_msg {
+ struct list_head list;
+
+ struct xsd_sockmsg hdr;
+
+ union {
+ /* Queued replies. */
+ struct {
+ char *body;
+ } reply;
+
+ /* Queued watch events. */
+ struct {
+ struct xenbus_watch *handle;
+ char **vec;
+ unsigned int vec_size;
+ } watch;
+ } u;
+};
+
+struct xs_handle {
+ /* A list of replies. Currently only one will ever be outstanding. */
+ struct list_head reply_list;
+ spinlock_t reply_lock;
+ wait_queue_head_t reply_waitq;
+
+ /*
+ * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
+ * response_mutex is never taken simultaneously with the other three.
+ */
+
+ /* One request at a time. */
+ struct mutex request_mutex;
+
+ /* Protect xenbus reader thread against save/restore. */
+ struct mutex response_mutex;
+
+ /* Protect transactions against save/restore. */
+ struct rw_semaphore transaction_mutex;
+
+ /* Protect watch (de)register against save/restore. */
+ struct rw_semaphore watch_mutex;
+};
+
+static struct xs_handle xs_state;
+
+/* List of registered watches, and a lock to protect it. */
+static LIST_HEAD(watches);
+static DEFINE_SPINLOCK(watches_lock);
+
+/* List of pending watch callback events, and a lock to protect it. */
+static LIST_HEAD(watch_events);
+static DEFINE_SPINLOCK(watch_events_lock);
+
+/*
+ * Details of the xenwatch callback kernel thread. The thread waits on the
+ * watch_events_waitq for work to do (queued on watch_events list). When it
+ * wakes up it acquires the xenwatch_mutex before reading the list and
+ * carrying out work.
+ */
+static pid_t xenwatch_pid;
+static DEFINE_MUTEX(xenwatch_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
+
+static int get_error(const char *errorstring)
+{
+ unsigned int i;
+
+ for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
+ if (i == ARRAY_SIZE(xsd_errors) - 1) {
+ printk(KERN_WARNING
+ "XENBUS xen store gave: unknown error %s",
+ errorstring);
+ return EINVAL;
+ }
+ }
+ return xsd_errors[i].errnum;
+}
+
+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
+{
+ struct xs_stored_msg *msg;
+ char *body;
+
+ spin_lock(&xs_state.reply_lock);
+
+ while (list_empty(&xs_state.reply_list)) {
+ spin_unlock(&xs_state.reply_lock);
+ /* XXX FIXME: Avoid synchronous wait for response here. */
+ wait_event(xs_state.reply_waitq,
+ !list_empty(&xs_state.reply_list));
+ spin_lock(&xs_state.reply_lock);
+ }
+
+ msg = list_entry(xs_state.reply_list.next,
+ struct xs_stored_msg, list);
+ list_del(&msg->list);
+
+ spin_unlock(&xs_state.reply_lock);
+
+ *type = msg->hdr.type;
+ if (len)
+ *len = msg->hdr.len;
+ body = msg->u.reply.body;
+
+ kfree(msg);
+
+ return body;
+}
+
+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
+{
+ void *ret;
+ struct xsd_sockmsg req_msg = *msg;
+ int err;
+
+ if (req_msg.type == XS_TRANSACTION_START)
+ down_read(&xs_state.transaction_mutex);
+
+ mutex_lock(&xs_state.request_mutex);
+
+ err = xb_write(msg, sizeof(*msg) + msg->len);
+ if (err) {
+ msg->type = XS_ERROR;
+ ret = ERR_PTR(err);
+ } else
+ ret = read_reply(&msg->type, &msg->len);
+
+ mutex_unlock(&xs_state.request_mutex);
+
+ if ((msg->type == XS_TRANSACTION_END) ||
+ ((req_msg.type == XS_TRANSACTION_START) &&
+ (msg->type == XS_ERROR)))
+ up_read(&xs_state.transaction_mutex);
+
+ return ret;
+}
+
+/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
+static void *xs_talkv(struct xenbus_transaction t,
+ enum xsd_sockmsg_type type,
+ const struct kvec *iovec,
+ unsigned int num_vecs,
+ unsigned int *len)
+{
+ struct xsd_sockmsg msg;
+ void *ret = NULL;
+ unsigned int i;
+ int err;
+
+ msg.tx_id = t.id;
+ msg.req_id = 0;
+ msg.type = type;
+ msg.len = 0;
+ for (i = 0; i < num_vecs; i++)
+ msg.len += iovec[i].iov_len;
+
+ mutex_lock(&xs_state.request_mutex);
+
+ err = xb_write(&msg, sizeof(msg));
+ if (err) {
+ mutex_unlock(&xs_state.request_mutex);
+ return ERR_PTR(err);
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
+ if (err) {
+ mutex_unlock(&xs_state.request_mutex);
+ return ERR_PTR(err);
+ }
+ }
+
+ ret = read_reply(&msg.type, len);
+
+ mutex_unlock(&xs_state.request_mutex);
+
+ if (IS_ERR(ret))
+ return ret;
+
+ if (msg.type == XS_ERROR) {
+ err = get_error(ret);
+ kfree(ret);
+ return ERR_PTR(-err);
+ }
+
+ if (msg.type != type) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "XENBUS unexpected type [%d], expected [%d]\n",
+ msg.type, type);
+ kfree(ret);
+ return ERR_PTR(-EINVAL);
+ }
+ return ret;
+}
+
+/* Simplified version of xs_talkv: single message. */
+static void *xs_single(struct xenbus_transaction t,
+ enum xsd_sockmsg_type type,
+ const char *string,
+ unsigned int *len)
+{
+ struct kvec iovec;
+
+ iovec.iov_base = (void *)string;
+ iovec.iov_len = strlen(string) + 1;
+ return xs_talkv(t, type, &iovec, 1, len);
+}
+
+/* Many commands only need an ack, don't care what it says. */
+static int xs_error(char *reply)
+{
+ if (IS_ERR(reply))
+ return PTR_ERR(reply);
+ kfree(reply);
+ return 0;
+}
+
+static unsigned int count_strings(const char *strings, unsigned int len)
+{
+ unsigned int num;
+ const char *p;
+
+ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
+ num++;
+
+ return num;
+}
+
+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
+static char *join(const char *dir, const char *name)
+{
+ char *buffer;
+
+ if (strlen(name) == 0)
+ buffer = kasprintf(GFP_KERNEL, "%s", dir);
+ else
+ buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+ return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
+}
+
+static char **split(char *strings, unsigned int len, unsigned int *num)
+{
+ char *p, **ret;
+
+ /* Count the strings. */
+ *num = count_strings(strings, len);
+
+ /* Transfer to one big alloc for easy freeing. */
+ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
+ if (!ret) {
+ kfree(strings);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(&ret[*num], strings, len);
+ kfree(strings);
+
+ strings = (char *)&ret[*num];
+ for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
+ ret[(*num)++] = p;
+
+ return ret;
+}
+
+char **xenbus_directory(struct xenbus_transaction t,
+ const char *dir, const char *node, unsigned int *num)
+{
+ char *strings, *path;
+ unsigned int len;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return (char **)path;
+
+ strings = xs_single(t, XS_DIRECTORY, path, &len);
+ kfree(path);
+ if (IS_ERR(strings))
+ return (char **)strings;
+
+ return split(strings, len, num);
+}
+EXPORT_SYMBOL_GPL(xenbus_directory);
+
+/* Check if a path exists. Return 1 if it does. */
+int xenbus_exists(struct xenbus_transaction t,
+ const char *dir, const char *node)
+{
+ char **d;
+ int dir_n;
+
+ d = xenbus_directory(t, dir, node, &dir_n);
+ if (IS_ERR(d))
+ return 0;
+ kfree(d);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(xenbus_exists);
+
+/* Get the value of a single file.
+ * Returns a kmalloced value: call free() on it after use.
+ * len indicates length in bytes.
+ */
+void *xenbus_read(struct xenbus_transaction t,
+ const char *dir, const char *node, unsigned int *len)
+{
+ char *path;
+ void *ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return (void *)path;
+
+ ret = xs_single(t, XS_READ, path, len);
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_read);
+
+/* Write the value of a single file.
+ * Returns -err on failure.
+ */
+int xenbus_write(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *string)
+{
+ const char *path;
+ struct kvec iovec[2];
+ int ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ iovec[0].iov_base = (void *)path;
+ iovec[0].iov_len = strlen(path) + 1;
+ iovec[1].iov_base = (void *)string;
+ iovec[1].iov_len = strlen(string);
+
+ ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_write);
+
+/* Create a new directory. */
+int xenbus_mkdir(struct xenbus_transaction t,
+ const char *dir, const char *node)
+{
+ char *path;
+ int ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_mkdir);
+
+/* Destroy a file or directory (directories must be empty). */
+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
+{
+ char *path;
+ int ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ret = xs_error(xs_single(t, XS_RM, path, NULL));
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_rm);
+
+/* Start a transaction: changes by others will not be seen during this
+ * transaction, and changes will not be visible to others until end.
+ */
+int xenbus_transaction_start(struct xenbus_transaction *t)
+{
+ char *id_str;
+
+ down_read(&xs_state.transaction_mutex);
+
+ id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
+ if (IS_ERR(id_str)) {
+ up_read(&xs_state.transaction_mutex);
+ return PTR_ERR(id_str);
+ }
+
+ t->id = simple_strtoul(id_str, NULL, 0);
+ kfree(id_str);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_transaction_start);
+
+/* End a transaction.
+ * If abandon is true, transaction is discarded instead of committed.
+ */
+int xenbus_transaction_end(struct xenbus_transaction t, int abort)
+{
+ char abortstr[2];
+ int err;
+
+ if (abort)
+ strcpy(abortstr, "F");
+ else
+ strcpy(abortstr, "T");
+
+ err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
+
+ up_read(&xs_state.transaction_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_transaction_end);
+
+/* Single read and scanf: returns -errno or num scanned. */
+int xenbus_scanf(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ char *val;
+
+ val = xenbus_read(t, dir, node, NULL);
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ va_start(ap, fmt);
+ ret = vsscanf(val, fmt, ap);
+ va_end(ap);
+ kfree(val);
+ /* Distinctive errno. */
+ if (ret == 0)
+ return -ERANGE;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_scanf);
+
+/* Single printf and write: returns -errno or 0. */
+int xenbus_printf(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+#define PRINTF_BUFFER_SIZE 4096
+ char *printf_buffer;
+
+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+ if (printf_buffer == NULL)
+ return -ENOMEM;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+ va_end(ap);
+
+ BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
+ ret = xenbus_write(t, dir, node, printf_buffer);
+
+ kfree(printf_buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_printf);
+
+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
+{
+ va_list ap;
+ const char *name;
+ int ret = 0;
+
+ va_start(ap, dir);
+ while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
+ const char *fmt = va_arg(ap, char *);
+ void *result = va_arg(ap, void *);
+ char *p;
+
+ p = xenbus_read(t, dir, name, NULL);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ break;
+ }
+ if (fmt) {
+ if (sscanf(p, fmt, result) == 0)
+ ret = -EINVAL;
+ kfree(p);
+ } else
+ *(char **)result = p;
+ }
+ va_end(ap);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_gather);
+
+static int xs_watch(const char *path, const char *token)
+{
+ struct kvec iov[2];
+
+ iov[0].iov_base = (void *)path;
+ iov[0].iov_len = strlen(path) + 1;
+ iov[1].iov_base = (void *)token;
+ iov[1].iov_len = strlen(token) + 1;
+
+ return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
+ ARRAY_SIZE(iov), NULL));
+}
+
+static int xs_unwatch(const char *path, const char *token)
+{
+ struct kvec iov[2];
+
+ iov[0].iov_base = (char *)path;
+ iov[0].iov_len = strlen(path) + 1;
+ iov[1].iov_base = (char *)token;
+ iov[1].iov_len = strlen(token) + 1;
+
+ return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
+ ARRAY_SIZE(iov), NULL));
+}
+
+static struct xenbus_watch *find_watch(const char *token)
+{
+ struct xenbus_watch *i, *cmp;
+
+ cmp = (void *)simple_strtoul(token, NULL, 16);
+
+ list_for_each_entry(i, &watches, list)
+ if (i == cmp)
+ return i;
+
+ return NULL;
+}
+
+/* Register callback to watch this node. */
+int register_xenbus_watch(struct xenbus_watch *watch)
+{
+ /* Pointer in ascii is the token. */
+ char token[sizeof(watch) * 2 + 1];
+ int err;
+
+ sprintf(token, "%lX", (long)watch);
+
+ down_read(&xs_state.watch_mutex);
+
+ spin_lock(&watches_lock);
+ BUG_ON(find_watch(token));
+ list_add(&watch->list, &watches);
+ spin_unlock(&watches_lock);
+
+ err = xs_watch(watch->node, token);
+
+ /* Ignore errors due to multiple registration. */
+ if ((err != 0) && (err != -EEXIST)) {
+ spin_lock(&watches_lock);
+ list_del(&watch->list);
+ spin_unlock(&watches_lock);
+ }
+
+ up_read(&xs_state.watch_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(register_xenbus_watch);
+
+void unregister_xenbus_watch(struct xenbus_watch *watch)
+{
+ struct xs_stored_msg *msg, *tmp;
+ char token[sizeof(watch) * 2 + 1];
+ int err;
+
+ sprintf(token, "%lX", (long)watch);
+
+ down_read(&xs_state.watch_mutex);
+
+ spin_lock(&watches_lock);
+ BUG_ON(!find_watch(token));
+ list_del(&watch->list);
+ spin_unlock(&watches_lock);
+
+ err = xs_unwatch(watch->node, token);
+ if (err)
+ printk(KERN_WARNING
+ "XENBUS Failed to release watch %s: %i\n",
+ watch->node, err);
+
+ up_read(&xs_state.watch_mutex);
+
+ /* Make sure there are no callbacks running currently (unless
+ its us) */
+ if (current->pid != xenwatch_pid)
+ mutex_lock(&xenwatch_mutex);
+
+ /* Cancel pending watch events. */
+ spin_lock(&watch_events_lock);
+ list_for_each_entry_safe(msg, tmp, &watch_events, list) {
+ if (msg->u.watch.handle != watch)
+ continue;
+ list_del(&msg->list);
+ kfree(msg->u.watch.vec);
+ kfree(msg);
+ }
+ spin_unlock(&watch_events_lock);
+
+ if (current->pid != xenwatch_pid)
+ mutex_unlock(&xenwatch_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
+
+void xs_suspend(void)
+{
+ down_write(&xs_state.transaction_mutex);
+ down_write(&xs_state.watch_mutex);
+ mutex_lock(&xs_state.request_mutex);
+ mutex_lock(&xs_state.response_mutex);
+}
+
+void xs_resume(void)
+{
+ struct xenbus_watch *watch;
+ char token[sizeof(watch) * 2 + 1];
+
+ mutex_unlock(&xs_state.response_mutex);
+ mutex_unlock(&xs_state.request_mutex);
+ up_write(&xs_state.transaction_mutex);
+
+ /* No need for watches_lock: the watch_mutex is sufficient. */
+ list_for_each_entry(watch, &watches, list) {
+ sprintf(token, "%lX", (long)watch);
+ xs_watch(watch->node, token);
+ }
+
+ up_write(&xs_state.watch_mutex);
+}
+
+void xs_suspend_cancel(void)
+{
+ mutex_unlock(&xs_state.response_mutex);
+ mutex_unlock(&xs_state.request_mutex);
+ up_write(&xs_state.watch_mutex);
+ up_write(&xs_state.transaction_mutex);
+}
+
+static int xenwatch_thread(void *unused)
+{
+ struct list_head *ent;
+ struct xs_stored_msg *msg;
+
+ for (;;) {
+ wait_event_interruptible(watch_events_waitq,
+ !list_empty(&watch_events));
+
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&xenwatch_mutex);
+
+ spin_lock(&watch_events_lock);
+ ent = watch_events.next;
+ if (ent != &watch_events)
+ list_del(ent);
+ spin_unlock(&watch_events_lock);
+
+ if (ent != &watch_events) {
+ msg = list_entry(ent, struct xs_stored_msg, list);
+ msg->u.watch.handle->callback(
+ msg->u.watch.handle,
+ (const char **)msg->u.watch.vec,
+ msg->u.watch.vec_size);
+ kfree(msg->u.watch.vec);
+ kfree(msg);
+ }
+
+ mutex_unlock(&xenwatch_mutex);
+ }
+
+ return 0;
+}
+
+static int process_msg(void)
+{
+ struct xs_stored_msg *msg;
+ char *body;
+ int err;
+
+ /*
+ * We must disallow save/restore while reading a xenstore message.
+ * A partial read across s/r leaves us out of sync with xenstored.
+ */
+ for (;;) {
+ err = xb_wait_for_data_to_read();
+ if (err)
+ return err;
+ mutex_lock(&xs_state.response_mutex);
+ if (xb_data_to_read())
+ break;
+ /* We raced with save/restore: pending data 'disappeared'. */
+ mutex_unlock(&xs_state.response_mutex);
+ }
+
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (msg == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xb_read(&msg->hdr, sizeof(msg->hdr));
+ if (err) {
+ kfree(msg);
+ goto out;
+ }
+
+ body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
+ if (body == NULL) {
+ kfree(msg);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xb_read(body, msg->hdr.len);
+ if (err) {
+ kfree(body);
+ kfree(msg);
+ goto out;
+ }
+ body[msg->hdr.len] = '\0';
+
+ if (msg->hdr.type == XS_WATCH_EVENT) {
+ msg->u.watch.vec = split(body, msg->hdr.len,
+ &msg->u.watch.vec_size);
+ if (IS_ERR(msg->u.watch.vec)) {
+ kfree(msg);
+ err = PTR_ERR(msg->u.watch.vec);
+ goto out;
+ }
+
+ spin_lock(&watches_lock);
+ msg->u.watch.handle = find_watch(
+ msg->u.watch.vec[XS_WATCH_TOKEN]);
+ if (msg->u.watch.handle != NULL) {
+ spin_lock(&watch_events_lock);
+ list_add_tail(&msg->list, &watch_events);
+ wake_up(&watch_events_waitq);
+ spin_unlock(&watch_events_lock);
+ } else {
+ kfree(msg->u.watch.vec);
+ kfree(msg);
+ }
+ spin_unlock(&watches_lock);
+ } else {
+ msg->u.reply.body = body;
+ spin_lock(&xs_state.reply_lock);
+ list_add_tail(&msg->list, &xs_state.reply_list);
+ spin_unlock(&xs_state.reply_lock);
+ wake_up(&xs_state.reply_waitq);
+ }
+
+ out:
+ mutex_unlock(&xs_state.response_mutex);
+ return err;
+}
+
+static int xenbus_thread(void *unused)
+{
+ int err;
+
+ for (;;) {
+ err = process_msg();
+ if (err)
+ printk(KERN_WARNING "XENBUS error %d while reading "
+ "message\n", err);
+ if (kthread_should_stop())
+ break;
+ }
+
+ return 0;
+}
+
+int xs_init(void)
+{
+ int err;
+ struct task_struct *task;
+
+ INIT_LIST_HEAD(&xs_state.reply_list);
+ spin_lock_init(&xs_state.reply_lock);
+ init_waitqueue_head(&xs_state.reply_waitq);
+
+ mutex_init(&xs_state.request_mutex);
+ mutex_init(&xs_state.response_mutex);
+ init_rwsem(&xs_state.transaction_mutex);
+ init_rwsem(&xs_state.watch_mutex);
+
+ /* Initialize the shared memory rings to talk to xenstored */
+ err = xb_init_comms();
+ if (err)
+ return err;
+
+ task = kthread_run(xenwatch_thread, NULL, "xenwatch");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ xenwatch_pid = task->pid;
+
+ task = kthread_run(xenbus_thread, NULL, "xenbus");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ return 0;
+}
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index c3ba0ec334c4..9130f1c12c26 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -49,8 +49,9 @@ static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *
static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
-static ssize_t zorro_read_config(struct kobject *kobj, char *buf, loff_t off,
- size_t count)
+static ssize_t zorro_read_config(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device,
kobj));
@@ -78,7 +79,6 @@ static struct bin_attribute zorro_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
- .owner = THIS_MODULE
},
.size = sizeof(struct ConfigDev),
.read = zorro_read_config,